diff --git a/SOURCES/bz1364242-ethmonitor-add-intel-omnipath-support.patch b/SOURCES/bz1364242-ethmonitor-add-intel-omnipath-support.patch
new file mode 100644
index 0000000..c7451ab
--- /dev/null
+++ b/SOURCES/bz1364242-ethmonitor-add-intel-omnipath-support.patch
@@ -0,0 +1,71 @@
+From 5e8f593b58409c8c1c7793576a3980eb56e8c200 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Thu, 2 Nov 2017 14:01:05 +0100
+Subject: [PATCH 1/2] ethmonitor: add intel omnipath support
+
+---
+ heartbeat/ethmonitor | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor
+index 7f5579f94..952a9f91f 100755
+--- a/heartbeat/ethmonitor
++++ b/heartbeat/ethmonitor
+@@ -219,7 +219,10 @@ infiniband_status()
+ 		device="${OCF_RESKEY_infiniband_device}:${OCF_RESKEY_infiniband_port}"
+ 	fi
+ 	
+-	ibstatus ${device} | grep -q ACTIVE 
++	case "${OCF_RESKEY_infiniband_device}" in
++		*mlx*) ibstatus ${device} | grep -q ACTIVE ;;
++		*hfi*) opainfo | grep -q Active ;;
++	esac
+ }
+ 
+ if_init() {
+@@ -291,8 +294,11 @@ if_init() {
+ 	fi
+ 
+ 	if [ -n "$OCF_RESKEY_infiniband_device" ]; then
+-		#ibstatus is required if an infiniband_device is provided
+-		check_binary ibstatus
++		#ibstatus or opainfo is required if an infiniband_device is provided
++		case "${OCF_RESKEY_infiniband_device}" in
++			*mlx*) check_binary ibstatus ;;
++			*hfi*) check_binary opainfo ;;
++		esac
+ 	fi
+ 	return $OCF_SUCCESS
+ }
+
+From 7e15a3ccfa0bd0e9dab92a6be21df968b073ec3d Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Tue, 7 Nov 2017 16:42:37 +0100
+Subject: [PATCH 2/2] ethmonitor: add /dev/ib* device to case-statement
+
+---
+ heartbeat/ethmonitor | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor
+index 952a9f91f..21bf12be7 100755
+--- a/heartbeat/ethmonitor
++++ b/heartbeat/ethmonitor
+@@ -220,7 +220,7 @@ infiniband_status()
+ 	fi
+ 	
+ 	case "${OCF_RESKEY_infiniband_device}" in
+-		*mlx*) ibstatus ${device} | grep -q ACTIVE ;;
++		*ib*|*mlx*) ibstatus ${device} | grep -q ACTIVE ;;
+ 		*hfi*) opainfo | grep -q Active ;;
+ 	esac
+ }
+@@ -296,7 +296,7 @@ if_init() {
+ 	if [ -n "$OCF_RESKEY_infiniband_device" ]; then
+ 		#ibstatus or opainfo is required if an infiniband_device is provided
+ 		case "${OCF_RESKEY_infiniband_device}" in
+-			*mlx*) check_binary ibstatus ;;
++			*ib*|*mlx*) check_binary ibstatus ;;
+ 			*hfi*) check_binary opainfo ;;
+ 		esac
+ 	fi
diff --git a/SOURCES/bz1436189-sybase.patch b/SOURCES/bz1436189-sybase.patch
new file mode 100644
index 0000000..7d99261
--- /dev/null
+++ b/SOURCES/bz1436189-sybase.patch
@@ -0,0 +1,920 @@
+diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
+--- a/doc/man/Makefile.am	2017-08-30 15:55:08.646159027 +0200
++++ b/doc/man/Makefile.am	2017-08-30 15:58:31.899477013 +0200
+@@ -140,6 +140,7 @@
+                           ocf_heartbeat_scsi2reservation.7 \
+                           ocf_heartbeat_sfex.7 \
+                           ocf_heartbeat_slapd.7 \
++                          ocf_heartbeat_sybaseASE.7 \
+                           ocf_heartbeat_symlink.7 \
+                           ocf_heartbeat_syslog-ng.7 \
+                           ocf_heartbeat_tomcat.7 \
+diff -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+--- a/heartbeat/Makefile.am	2017-08-30 15:55:08.646159027 +0200
++++ b/heartbeat/Makefile.am	2017-08-30 15:58:31.899477013 +0200
+@@ -131,6 +131,7 @@
+ 			SysInfo			\
+ 			scsi2reservation	\
+ 			sfex			\
++			sybaseASE		\
+ 			symlink			\
+ 			syslog-ng		\
+ 			tomcat			\
+diff -uNr a/heartbeat/sybaseASE b/heartbeat/sybaseASE
+--- a/heartbeat/sybaseASE	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/sybaseASE	2017-08-30 14:51:24.000000000 +0200
+@@ -0,0 +1,894 @@
++#!/bin/bash
++
++# 
++# Sybase Availability Agent for Red Hat Cluster v15.0.2 
++# Copyright (C) - 2007
++# Sybase, Inc. All rights reserved.
++#
++# Sybase Availability Agent for Red Hat Cluster v15.0.2 is licensed
++# under the GNU General Public License Version 2.
++#
++# Author(s):
++#    Jian-ping Hui <jphui@sybase.com>
++#
++# Description: Service script for starting/stopping/monitoring \
++#              Sybase Adaptive Server on: \
++#                            Red Hat Enterprise Linux 5 ES \
++#                            Red Hat Enterprise Linux 5 AS
++#
++# NOTES:
++#
++# (1) Before running this script, we assume that user has installed
++#     Sybase ASE 15.0.2 or higher version on the machine. Please
++#     customize your configuration in /etc/cluster/cluster.conf according
++#     to your actual environment. We assume the following files exist before
++#     you start the service:
++#         /$sybase_home/SYBASE.sh
++#         /$sybase_home/$sybase_ase/install/RUN_$server_name
++#
++# (2) You can customize the interval value in the meta-data section if needed:
++#                <action name="start" timeout="300" />
++#                <action name="stop" timeout="300" />
++#                
++#                <!-- Checks to see if it''s mounted in the right place -->
++#                <action name="status"  interval="30" timeout="100" />
++#                <action name="monitor" interval="30" timeout="100" />
++#                
++#                <!--Checks to see if we can read from the mountpoint -->
++#                <action name="status" depth="10" timeout="100" interval="120" />
++#                <action name="monitor" depth="10" timeout="100" interval="120" />
++#                
++#                <action name="meta-data" timeout="5" />
++#                <action name="validate-all" timeout="5" />
++#     The timeout value is not supported by Redhat in RHCS5.0. 
++# 
++# (3) This script should be put under /usr/share/cluster. Its owner should be "root" with 
++#     execution permission.
++#
++
++#######################################################################
++# Initialization:
++
++. /etc/init.d/functions
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++
++#######################################################################
++
++# Default timeouts when we aren't using the rgmanager wrapper
++if ! ocf_is_true "$OCF_RESKEY_is_rgmanager_wrapper"; then
++	if [ -z "$OCF_RESKEY_CRM_meta_timeout" ]; then
++		case $1 in
++			start|stop) OCF_RESKEY_CRM_meta_timeout=300000 ;;
++			*) OCF_RESKEY_CRM_meta_timeout=100000 ;;
++		esac
++	fi
++	default_timeout=$(((${OCF_RESKEY_CRM_meta_timeout}/1000) - 5))
++	default_force_stop_timeout=$(((${OCF_RESKEY_CRM_meta_timeout}/1000) - 5))
++	: ${OCF_RESKEY_shutdown_timeout=${default_force_stop_timeout}}
++	: ${OCF_RESKEY_deep_probe_timeout=${default_timeout}}
++	: ${OCF_RESKEY_start_timeout=${default_timeout}}
++fi
++
++sybase_user_default="sybase"
++sybase_home_default="detect"
++ase_default="detect"
++ocs_default="detect"
++
++: ${OCF_RESKEY_sybase_user=${sybase_user_default}}
++: ${OCF_RESKEY_sybase_ase=${ase_default}}
++: ${OCF_RESKEY_sybase_ocs=${ocs_default}}
++: ${OCF_RESKEY_sybase_home=${sybase_home_default}}
++
++if [ "$OCF_RESKEY_sybase_home" = "detect" ]; then
++	if [ -d "/opt/sap" ]; then
++		OCF_RESKEY_sybase_home="/opt/sap"
++	elif [ -d "/opt/sybase" ]; then
++		OCF_RESKEY_sybase_home="/opt/sybase"
++	else
++		ocf_log err "sybaseASE: Unable to detect 'sybase_home'."
++		return $OCF_ERR_ARGS
++	fi
++fi
++
++sybase_env="$OCF_RESKEY_sybase_home/SYBASE.env"
++
++if [ "$OCF_RESKEY_sybase_ase" = "detect" ]; then
++	if [ -f "$sybase_env" ]; then
++		OCF_RESKEY_sybase_ase=$(grep "SYBASE_ASE" "$sybase_env" | cut -d= -f2)
++	else
++		ocf_log err "sybaseASE: Unable to detect 'sybase_ase'."
++		return $OCF_ERR_ARGS
++	fi
++fi
++
++if [ "$OCF_RESKEY_sybase_ocs" = "detect" ]; then
++	if [ -f "$sybase_env" ]; then
++		OCF_RESKEY_sybase_ocs=$(grep "SYBASE_OCS" "$sybase_env" | cut -d= -f2)
++	else
++		ocf_log err "sybaseASE: Unable to detect 'sybase_ocs'."
++		return $OCF_ERR_ARGS
++	fi
++fi
++
++
++interfaces_file_default="${OCF_RESKEY_sybase_home}/interfaces"
++: ${OCF_RESKEY_interfaces_file=${interfaces_file_default}}
++
++export LD_POINTER_GUARD=0
++
++#######################################################################################
++# Declare some variables we will use in the script. Please don't change their values. #
++#######################################################################################
++declare login_string=""
++declare RUNSERVER_SCRIPT=$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ase/install/RUN_$OCF_RESKEY_server_name
++declare CONSOLE_LOG=$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ase/install/$OCF_RESKEY_server_name.log
++
++##################################################################################################
++# This function will be called by rgmanager to get the meta data of resource agent "sybaseASE". #
++# NEVER CHANGE ANYTHING IN THIS FUNCTION.
++##################################################################################################
++meta_data()
++{
++	cat <<EOT
++<?xml version="1.0" ?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="sybaseASE" >
++	<version>1.0</version>
++
++	<longdesc lang="en">
++		Sybase ASE Failover Instance
++	</longdesc>
++	<shortdesc lang="en">
++		Sybase ASE Failover Instance
++	</shortdesc>
++
++	<parameters>
++		<parameter name="sybase_home">
++			<longdesc lang="en">
++				The home directory of sybase products
++			</longdesc>
++			<shortdesc lang="en">
++				SYBASE home directory
++			</shortdesc>
++			<content type="string" default="${sybase_home_default}"/>
++		</parameter>
++
++		<parameter name="sybase_ase">
++			<longdesc lang="en">
++				The directory name under sybase_home where ASE products are installed
++			</longdesc>
++			<shortdesc lang="en">
++				SYBASE_ASE directory name
++			</shortdesc>
++			<content type="string" default="$ase_default" />
++		</parameter>
++
++		<parameter name="sybase_ocs">
++			<longdesc lang="en">
++				The directory name under sybase_home where OCS products are installed, i.e. ASE-15_0
++			</longdesc>
++			<shortdesc lang="en">
++				SYBASE_OCS directory name
++			</shortdesc>
++			<content type="string" default="${ocs_default}" />
++		</parameter>
++
++		<parameter name="server_name" unique="1" required="1">
++			<longdesc lang="en">
++				The ASE server name which is configured for the HA service
++			</longdesc>
++			<shortdesc lang="en">
++				ASE server name
++			</shortdesc>
++			<content type="string" />
++		</parameter>
++
++		<parameter name="interfaces_file">
++			<longdesc lang="en">
++				The full path of interfaces file which is used to start/access the ASE server
++			</longdesc>
++			<shortdesc lang="en">
++				Interfaces file
++			</shortdesc>
++			<content type="string" default="$interfaces_file_default"/>
++		</parameter>
++
++		<parameter name="sybase_user">
++			<longdesc lang="en">
++				The user who can run ASE server
++			</longdesc>
++			<shortdesc lang="en">
++				Sybase user
++			</shortdesc>
++			<content type="string" default="$sybase_user_default" />
++		</parameter>
++
++		<parameter name="db_user" required="1">
++			<longdesc lang="en">
++				The database user required to login to isql.
++			</longdesc>
++			<shortdesc lang="en">
++				Sybase user
++			</shortdesc>
++			<content type="string"/>
++		</parameter>
++
++		<parameter name="db_passwd">
++			<longdesc lang="en">
++				The database user's password required to login to isql.
++			</longdesc>
++			<shortdesc lang="en">
++				Sybase user
++			</shortdesc>
++			<content type="string"/>
++		</parameter>
++
++	</parameters>
++
++	<actions>
++		<action name="start" timeout="300" />
++		<action name="stop" timeout="300" />
++		
++		<!-- Checks to see if it''s mounted in the right place -->
++		<action name="status"  interval="30" timeout="100" />
++		<action name="monitor" interval="30" timeout="100" />
++		
++		<!--Checks to see if we can read from the mountpoint -->
++		<action name="status" depth="10" timeout="100" interval="120" />
++		<action name="monitor" depth="10" timeout="100" interval="120" />
++		
++		<action name="meta-data" timeout="5" />
++		<action name="validate-all" timeout="5" />
++	</actions>
++
++	<special tag="rgmanager">
++	</special>
++</resource-agent>
++EOT
++}
++
++ase_engine0_process()
++{
++	sed -n -e '/engine 0/s/^.*os pid \([0-9]*\).*online$/\1/p' $CONSOLE_LOG
++}
++
++ase_engine0_thread()
++{
++	sed -n -e 's/.*Thread.*LWP \([0-9]*\).*online as engine 0.*/\1/p' $CONSOLE_LOG
++}
++
++ase_engine_threadpool_pid()
++{
++	sed -n -e 's/.*Adaptive Server is running as process id \([0-9]*\).*/\1/p' $CONSOLE_LOG
++}
++
++ase_all_pids()
++{
++	local PIDS=$(sed -n -e '/engine /s/^.*os pid \([0-9]*\).*online$/\1/p' $CONSOLE_LOG)
++	if [ -z "$PIDS" ]; then
++		#engines are running in a threadpool
++		PIDS=$(ase_engine_threadpool_pid)
++	fi
++	echo $PIDS
++}
++
++##################################################################################################
++# Function Name: verify_all                                                                      #
++# Parameter: None                                                                                #
++# Return value:                                                                                  #
++#             0               SUCCESS                                                            #
++#             OCF_ERR_ARGS    Parameters are invalid                                             #
++# Description: Do some validation on the user-configurable stuff at the beginning of the script. #
++##################################################################################################
++verify_all() 
++{
++	ocf_log debug "sybaseASE: Start 'verify_all'"
++
++	check_binary "ksh"
++
++	# Check if the parameter 'sybase_home' is set.	
++	if [[ -z "$OCF_RESKEY_sybase_home" ]]
++	then
++		ocf_log err "sybaseASE: The parameter 'sybase_home' is not set."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'sybase_home' is a valid path.
++	if [[ ! -d $OCF_RESKEY_sybase_home ]]
++	then
++		ocf_log err "sybaseASE: The sybase_home '$OCF_RESKEY_sybase_home' doesn't exist."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the script file SYBASE.sh exists
++	if [[ ! -f $OCF_RESKEY_sybase_home/SYBASE.sh ]]
++	then
++		ocf_log err "sybaseASE: The file $OCF_RESKEY_sybase_home/SYBASE.sh is required to run this script. Failed to run the script."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'sybase_ase' is set.
++	if [[ -z "$OCF_RESKEY_sybase_ase" ]] 
++	then
++		ocf_log err "sybaseASE: The parameter 'sybase_ase' is not set."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the directory /$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ase exists.
++	if [[ ! -d $OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ase ]]
++	then
++		ocf_log err "sybaseASE: The directory '$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ase' doesn't exist."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'sybase_ocs' is set.
++	if [[ -z "$OCF_RESKEY_sybase_ocs" ]] 
++	then
++		ocf_log err "sybaseASE: The parameter 'sybase_ocs' is not set."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the directory /$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ocs exists.
++	if [[ ! -d $OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ocs ]]
++	then
++		ocf_log err "sybaseASE: The directory '$OCF_RESKEY_sybase_home/$OCF_RESKEY_sybase_ocs' doesn't exist."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'server_name' is set.	
++	if [[ -z "$OCF_RESKEY_server_name" ]]
++	then
++		ocf_log err "sybaseASE: The parameter 'server_name' is not set."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the Run_server file exists.
++	if [[ ! -f $RUNSERVER_SCRIPT ]]
++	then
++		ocf_log err "sybaseASE: The file $RUNSERVER_SCRIPT doesn't exist. The sybase directory may be incorrect."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the user 'sybase_user' exist
++	id -u $OCF_RESKEY_sybase_user
++	if [[ $? != 0 ]]
++	then
++		ocf_log err "sybaseASE: The user '$OCF_RESKEY_sybase_user' doesn't exist in the system."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'interfaces_file' is set
++	if [[ -z "$OCF_RESKEY_interfaces_file" ]]
++	then
++		ocf_log err "sybaseASE: The parameter 'interfaces_file' is not set."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the file 'interfaces_file' exists
++	if [[ ! -f $OCF_RESKEY_interfaces_file ]]
++	then
++		ocf_log err "sybaseASE: The interfaces file '$OCF_RESKEY_interfaces_file' doesn't exist."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'db_user' is set
++	if [[ -z "$OCF_RESKEY_db_user" ]]
++	then
++		ocf_log err "sybaseASE: The parameter 'db_user' is not set."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'shutdown_timeout' is a valid value
++	if [[ $OCF_RESKEY_shutdown_timeout -eq 0 ]]
++	then
++		ocf_log err "sybaseASE: The parameter 'shutdown_timeout' is not set. Its value cannot be zero."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'start_timeout' is a valid value
++	if [[ $OCF_RESKEY_start_timeout -eq 0 ]]
++	then
++		ocf_log err "sybaseASE: The parameter 'start_timeout' is not set. Its value cannot be zero."
++		return $OCF_ERR_ARGS
++	fi
++
++	# Check if the parameter 'deep_probe_timeout' is a valid value
++	if [[ $OCF_RESKEY_deep_probe_timeout -eq 0 ]]
++	then
++		ocf_log err "sybaseASE: The parameter 'deep_probe_timeout' is not set. Its value cannot be zero."
++		return $OCF_ERR_ARGS
++	fi
++
++	ocf_log debug "sybaseASE: End 'verify_all' successfully."
++
++	return $OCF_SUCCESS
++}
++
++set_login_string()
++{
++	tmpstring=""
++	login_sting=""
++
++	login_string="-U$OCF_RESKEY_db_user -P$OCF_RESKEY_db_passwd"
++	return 0
++}
++
++##############################################################################################
++# Function name: ase_start                                                                   #
++# Parameter: None                                                                            #
++# Return value:                                                                              #
++#             0  SUCCESS                                                                     #
++#             1  FAIL                                                                        #
++# Description: This function is used to start the ASE server in primary or secondary server. #
++##############################################################################################
++ase_start()
++{
++	ocf_log debug "sybaseASE: Start 'ase_start'"
++
++	# Check if the server is running. If yes, return SUCCESS directly. Otherwise, continue the start work.
++	ase_is_running
++	if [[ $? = 0 ]]
++	then
++		# The server is running. 
++		ocf_log info "sybaseASE: Server is running. Start is success."
++		return $OCF_SUCCESS
++	fi
++
++	# The server is not running. We need to start it.
++	# If the log file existed, delete it.
++	if [[ -f $CONSOLE_LOG ]]
++	then
++		rm -f $CONSOLE_LOG
++	fi
++		
++	ocf_log debug "sybaseASE: Starting '$OCF_RESKEY_server_name'..."
++
++	# Run runserver script to start the server. Since this script will be run by root and ASE server
++	# needs to be run by another user, we need to change the user to sybase_user first. Then, run
++	# the script to start the server.
++	su $OCF_RESKEY_sybase_user -c ksh << EOF
++		# set required SYBASE environment by running SYBASE.sh.
++		. $OCF_RESKEY_sybase_home/SYBASE.sh
++		# Run the RUNSERVER_SCRIPT to start the server.
++                . $RUNSERVER_SCRIPT > $CONSOLE_LOG 2>&1 &
++EOF
++
++	# Monitor every 1 seconds if the server has
++	# recovered, until RECOVERY_TIMEOUT.
++	t=0
++	while [[ $t -le $OCF_RESKEY_start_timeout ]]
++	do
++		grep -s "Recovery complete." $CONSOLE_LOG > /dev/null 2>&1
++		if [[ $? != 0 ]]
++		then
++			# The server has not completed the recovery. We need to continue to monitor the recovery
++			# process.
++			t=`expr $t + 1`
++		else
++			# The server has completed the recovery.
++			ocf_log info "sybaseASE: ASE server '$OCF_RESKEY_server_name' started successfully."
++			break
++		fi
++		sleep 1
++	done
++
++	# If $t is larger than start_timeout, it means the ASE server cannot start in given time. Otherwise, it 
++	# means the ASE server has started successfully.
++	if [[ $t -gt $OCF_RESKEY_start_timeout ]]
++	then
++		# The server cannot start in specified time. We think the start is failed.
++		ocf_log err "sybaseASE: Failed to start ASE server '$OCF_RESKEY_server_name'. Please check the server error log $CONSOLE_LOG for possible problems."
++		return $OCF_ERR_GENERIC
++	fi
++
++	ase_is_running
++	if [ $? -ne 0 ]; then
++		ocf_log err "sybaseASE: ase_start could not detect database initialized properly."
++
++		return $OCF_ERR_GENERIC
++	fi
++	ocf_log debug "sybaseASE: End 'ase_start' successfully."
++	return $OCF_SUCCESS
++}
++
++#############################################################################################
++# Function name: ase_stop                                                                   #
++# Parameter: None                                                                           #
++# Return value:                                                                             #
++#             0  SUCCESS                                                                    #
++#             1  FAIL                                                                       #
++# Description: This function is used to stop the ASE server in primary or secondary server. #
++#############################################################################################
++ase_stop()
++{
++	ocf_log debug "sybaseASE: Start 'ase_stop'"
++
++	# Check if the ASE server is still running.
++	ase_is_running
++	if [[ $? != 0 ]]
++	then
++		# The ASE server is not running. We need not to shutdown it.
++		ocf_log info "sybaseASE: The dataserver $OCF_RESKEY_server_name is not running."
++		return $OCF_SUCCESS
++	fi
++
++	set_login_string
++
++	# Just in case things are hung, start a process that will wait for the
++	# timeout period, then kill any remaining porcesses.  We'll need to
++	# monitor this process (set -m), so we can terminate it later if it is
++	# not needed.
++	set -m
++	kill_ase $OCF_RESKEY_shutdown_timeout &
++	KILL_PID=$!     # If successful, we will also terminate watchdog process
++
++	# Run "shutdown with nowait" from isql command line to shutdown the server
++	su $OCF_RESKEY_sybase_user -c ksh << EOF
++		# set required SYBASE environment by running SYBASE.sh.
++		. $OCF_RESKEY_sybase_home/SYBASE.sh
++		# Run "shutdown with nowait" to shutdown the server immediately.
++		(echo "use master" ; echo go ; echo "shutdown with nowait"; echo go) | \
++		\$SYBASE/\$SYBASE_OCS/bin/isql $login_string -S$OCF_RESKEY_server_name -I$OCF_RESKEY_interfaces_file  &
++EOF
++
++	sleep 5
++
++	# Check if the server has been shut down successfully
++	t=0
++	while [[ $t -lt $OCF_RESKEY_shutdown_timeout ]]
++	do
++		# Search "ueshutdown: exiting" in the server log. If found, it means the server has been shut down. 
++		# Otherwise, we need to wait.
++		tail $CONSOLE_LOG | grep "ueshutdown: exiting" > /dev/null 2>&1
++		if [[ $? != 0 ]]
++		then
++			# The shutdown is still in processing. Wait...
++			sleep 2
++			t=`expr $t+2`
++		else
++			# The shutdown is success.
++			ocf_log info "sybaseASE: ASE server '$OCF_RESKEY_server_name' shutdown with isql successfully."
++			break
++		fi
++	done
++
++	# If $t is larger than shutdown_timeout, it means the ASE server cannot be shut down in given time. We need
++	# to wait for the background kill process to kill the OS processes directly.
++	if  [[ $t -ge $OCF_RESKEY_shutdown_timeout ]]
++	then
++		ocf_log err "sybaseASE: Shutdown of '$OCF_RESKEY_server_name' from isql failed.  Server is either down or unreachable."
++	fi
++
++	# Here, the ASE server has been shut down by isql command or killed by background process. We need to do
++	# further check to make sure all processes have gone away before saying shutdown is complete. This stops the
++	# other node from starting up the package before it has been stopped and the file system has been unmounted.
++	
++	# Get all processes ids from log file
++	declare -a ENGINE_ALL=$(ase_all_pids)
++
++	typeset -i num_procs=${#ENGINE_ALL[@]}
++
++	# We cannot find any process id from log file. It may be because the log file is corrupted or be deleted.
++	# In this case, we determine the shutdown is failed.
++	if [[ "${ENGINE_ALL[@]}" = "" ]]
++	then
++		ocf_log err "sybaseASE: Unable to find the process id from $CONSOLE_LOG."
++		ocf_log err "sybaseASE: Stop ASE server failed."
++		return $OCF_ERR_GENERIC
++	fi
++
++	# Monitor the system processes to make sure all ASE related processes have gone away.
++	while true
++	do
++		# To every engine process, search it in system processes list. If it is not in the
++		# list, it means this process has gone away. Otherwise, we need to wait for it is
++		# killed by background process.
++		for i in ${ENGINE_ALL[@]}
++		do
++			ps -fu $OCF_RESKEY_sybase_user | awk '{print $2}' | grep $i | grep -v grep
++			if [[ $? != 0 ]]
++			then
++				ocf_log debug "sybaseASE: $i process has stopped."
++				c=0
++				while (( c < $num_procs ))
++				do
++					if [[ ${ENGINE_ALL[$c]} = $i ]]
++					then
++						unset ENGINE_ALL[$c]
++						c=$num_procs
++					fi
++					(( c = c + 1 ))	
++				done
++			fi
++		done
++		
++		# To here, all processes should have gone away. 
++		if [[ ${ENGINE_ALL[@]} = "" ]]
++		then
++			#
++			# Looks like shutdown was successful, so kill the
++			# script to kill any hung processes, which we started earlier.
++			# Check to see if the script is still running.  If jobs
++			# returns that the script is done, then we don't need to kill
++			# it.
++			#
++			job=$(jobs | grep -v Done)
++			if [[ ${job} != "" ]]
++			then
++				ocf_log debug "sybaseASE: Killing the kill_ase script."
++
++				kill -15 $KILL_PID > /dev/null 2>&1
++			fi
++			break
++	        fi
++		sleep 5
++	done
++
++	ocf_log debug "sybaseASE: End 'ase_stop'."
++
++	return $OCF_SUCCESS
++}
++
++####################################################################################
++# Function name: ase_is_running                                                    #
++# Parameter: None                                                                  #
++# Return value:                                                                    #
++#             0   ASE server is running                                            #
++#             1   ASE server is not running or there are errors                    #
++# Description: This function is used to check if the ASE server is still running . #
++####################################################################################
++ase_is_running()
++{
++	local PID
++	local THREAD
++	# If the error log doesn't exist, we can say there is no ASE is running.
++	if [[ ! -f $CONSOLE_LOG ]]
++	then
++		ocf_log debug "could not find console log $CONSOLE_LOG"
++		return $OCF_NOT_RUNNING
++	fi
++
++	# The error log file exists. Check if the engine 0 is alive.
++	PID=$(ase_engine0_process)
++	if [ -n "$PID" ]; then
++		kill -s 0 $PID > /dev/null 2>&1
++		if [ $? -eq 0 ]; then
++			# The engine 0 is running.
++			ocf_log debug "Found engine 0 pid $PID to be running"
++			return $OCF_SUCCESS
++		fi
++		# The engine 0 is not running.
++		return $OCF_NOT_RUNNING
++	fi
++
++	PID=$(ase_engine_threadpool_pid)
++	THREAD=$(ase_engine0_thread)
++	if [ -n "$PID" ] && [ -n "$THREAD" ]; then
++		ps -AL | grep -q "${PID}[[:space:]]*${THREAD} "
++		if [ $? -eq 0 ]; then
++			# engine 0 thread is running
++			ocf_log debug "Found engine 0 thread $THREAD in pid $PID to be running"
++			return $OCF_SUCCESS
++		fi
++		# The engine 0 is not running.
++		return $OCF_NOT_RUNNING
++	fi
++	return $OCF_ERR_GENERIC
++}
++
++####################################################################################
++# Function name: kill_ase                                                          #
++# Parameter:                                                                       #
++#             DELAY  The seconds to wait before killing the ASE processes. 0 means #
++#                    kill the ASE processes immediately.                           #
++# Return value: None                                                               #
++#             1   ASE server is not running or there are errors                    #
++# Description: This function is used to check if the ASE server is still running . #
++####################################################################################
++kill_ase()
++{
++	ocf_log debug "sybaseASE: Start 'kill_ase'."
++
++	DELAY=$1
++
++	# Wait for sometime before sending a kill signal.  
++	t=0
++        while [[ $t -lt $DELAY ]]
++        do
++     		sleep 1
++		t=`expr $t+1`
++        done
++
++	# Get the process ids from log file
++	declare -a ENGINE_ALL=$(ase_all_pids)
++
++	# If there is no process id found in the log file, we need not to continue.
++	if [[ "${ENGINE_ALL[@]}" = "" ]]
++	then
++		ocf_log err "sybaseASE: Unable to find the process id from $CONSOLE_LOG."
++		return $OCF_ERR_GENERIC
++	fi
++
++	# Kill the datasever process(es)
++	for pid in ${ENGINE_ALL[@]}
++	do
++		kill -9 $pid > /dev/null 2>&1
++		if [[ $? != 0 ]]
++		then
++			ocf_log info "sybaseASE: kill_ase function did NOT find process $pid running."
++		else
++			ocf_log info "sybaseASE: kill_ase function did find process $pid running.  Sent SIGTERM."
++		fi
++	done
++
++	ocf_log debug "sybaseASE: End 'kill_ase'."
++	return $OCF_SUCCESS
++}
++
++#####################################################################################
++# Function name: ase_status                                                         #
++# Parameter:                                                                        #
++#             0   Level 0 probe. In this level, we just check if engine 0 is alive  #
++#             10  Level 10 probe. In this level, we need to probe if the ASE server #
++#                 still has response.                                               #              
++# Return value:                                                                     #
++#             0   The server is still alive                                         #
++#             1   The server is down                                                #
++# Description: This function is used to check if the ASE server is still running.   #
++#####################################################################################
++ase_status()
++{
++	local rc
++	ocf_log debug "sybaseASE: Start 'ase_status'."
++
++	# Step 1: Check if the engine 0 is alive
++	ase_is_running
++	rc=$?
++	if [ $rc -ne 0 ]; then
++		# ASE is down. Return fail to Pacemaker to trigger the failover process.
++		ocf_log err "sybaseASE: ASE server is down."
++		return $rc
++	fi
++
++	# ASE process is still alive. 
++	# Step2: If this is level 10 probe, We need to check if the ASE server still has response.
++	if [[ $1 -gt 0 ]]
++	then
++		ocf_log debug "sybaseASE: Need to run deep probe."
++		# Run deep probe
++		deep_probe
++		if [[ $? = 1 ]]
++		then
++			# Deep probe failed. This means the server has been down.
++			ocf_log err "sybaseASE: Deep probe found the ASE server is down."
++			return $OCF_ERR_GENERIC
++		fi
++	fi
++
++	ocf_log debug "sybaseASE: End 'ase_status'."
++
++	return $OCF_SUCCESS
++}
++
++####################################################################################
++# Function name: deep_probe                                                        #
++# Parameter: None                                                                  #
++# Return value:                                                                    #
++#             0   ASE server is alive                                              #
++#             1   ASE server is down                                               #
++# Description: This function is used to run deep probe to make sure the ASE server #
++#              still has response.                                                 #
++####################################################################################
++deep_probe()
++{
++	declare -i rv
++	
++	ocf_log debug "sybaseASE: Start 'deep_probe'."	
++
++	# Declare two temporary files which will be used in this probe.
++	tmpfile1="$(mktemp /tmp/sybaseASE.1.XXXXXX)"
++	tmpfile2="$(mktemp /tmp/sybaseASE.2.XXXXXX)"
++	
++	set_login_string
++
++	rm -f $tmpfile1
++	rm -f $tmpfile2
++
++	# The login file is correct. We have gotten the login account and password from it.
++	# Run isql command in background.
++	su $OCF_RESKEY_sybase_user -c ksh << EOF
++		# set required SYBASE environment by running SYBASE.sh.
++		. $OCF_RESKEY_sybase_home/SYBASE.sh
++		# Run a very simple SQL statement to make sure the server is still ok. The output will be put to
++		# tmpfile1.
++		(echo "select 1"; echo "go") |
++		\$SYBASE/\$SYBASE_OCS/bin/isql $login_string -S$OCF_RESKEY_server_name -I$OCF_RESKEY_interfaces_file -t $OCF_RESKEY_deep_probe_timeout -e -o$tmpfile1 &
++		# Record the isql command process id to temporary file. If the isql is hung, we need this process id
++                # to kill the hung process.
++		echo \$! > $tmpfile2	
++EOF
++	
++	declare -i t=0
++		
++	# Monitor the output file tmpfile1.
++	while [[ $t -lt $OCF_RESKEY_deep_probe_timeout ]]
++	do
++		# If the SQL statement is executed successfully, we will get the following output:
++		# 1> select 1
++		# 
++		# -----------
++		#           1
++		# 
++		# (1 row affected)
++		# So, we determine if the execution is success by searching the keyword "(1 row affected)".
++		grep "(1 row affected)" $tmpfile1
++		if [[ $? = 0 ]]
++		then
++			ocf_log debug "sybaseASE: Deep probe sucess."
++			break
++		else
++			sleep 1
++			t=`expr $t+1`
++		fi
++	done	
++
++	# If $t is larger than deep_probe_timeout, it means the isql command line cannot finish in given time.
++	# This means the deep probe failed. We need to kill the isql process manually.
++	if [[ $t -ge $OCF_RESKEY_deep_probe_timeout ]]
++	then
++		ocf_log err "sybaseASE: Deep probe fail. The dataserver has no response."		
++
++		# Read the process id of isql process from tmpfile2
++		pid=`cat $tmpfile2 | awk '{print $1}'`
++
++		rm -f $tmpfile1
++		rm -f $tmpfile2
++
++		# Kill the isql process directly.
++		kill -9 $pid
++		return 1
++	fi
++
++	rm -f $tmpfile1
++	rm -f $tmpfile2
++
++	ocf_log debug "sybaseASE: End 'deep_probe'."
++
++	return 0
++}
++
++#############################
++# Do some real work here... #
++#############################
++case $1 in
++	start)
++		verify_all || exit $OCF_ERR_GENERIC
++		ase_start
++		exit $?
++		;;
++	stop)
++		verify_all || exit $OCF_ERR_GENERIC
++		ase_stop
++		exit $?
++		;;
++	status | monitor)
++		verify_all || exit $OCF_ERR_GENERIC
++		ase_status $OCF_CHECK_LEVEL
++		exit $?
++		;;
++	meta-data)
++		meta_data
++		exit $OCF_SUCCESS
++		;;
++	validate-all)
++		verify_all
++		exit $?
++		;;
++	*)
++		echo "Usage: $SCRIPT {start|stop|monitor|status|validate-all|meta-data}"
++		exit $OCF_ERR_UNIMPLEMENTED
++		;;
++esac
++exit 0
++
diff --git a/SOURCES/bz1445628-findif-improve-IPv6-NIC-detection.patch b/SOURCES/bz1445628-findif-improve-IPv6-NIC-detection.patch
new file mode 100644
index 0000000..caeb02b
--- /dev/null
+++ b/SOURCES/bz1445628-findif-improve-IPv6-NIC-detection.patch
@@ -0,0 +1,44 @@
+From 7629514ec332fbcb72c420683b1a1b5437ff60a6 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 15 Sep 2017 11:25:40 +0200
+Subject: [PATCH] findif: improve IPv6 NIC detection
+
+---
+ heartbeat/findif.sh | 17 ++++++-----------
+ 1 file changed, 6 insertions(+), 11 deletions(-)
+
+diff --git a/heartbeat/findif.sh b/heartbeat/findif.sh
+index a643da119..019098360 100644
+--- a/heartbeat/findif.sh
++++ b/heartbeat/findif.sh
+@@ -233,6 +233,8 @@ findif()
+     fi
+     case $1 in
+     */*) : OK ;;
++    # "ip route" doesnt show netmask for IPv6 /128
++    *:*:*) : OK ;;
+     *)
+       ocf_exit_reason "Unable to find cidr_netmask."
+       return $OCF_ERR_GENERIC ;;
+@@ -240,17 +242,10 @@ findif()
+   fi
+   [ -z "$nic" ] && nic=$3
+   [ -z "$netmask" ] && netmask=${1#*/}
+-  if [ $family = "inet" ] ; then
+-    if [ -z "$brdcast" ] ; then
+-      if [ -n "$7" ] ; then
+-        set -- `ip -o -f $family addr show | grep $7`
+-        [ "$5" = brd ] && brdcast=$6
+-      fi
+-    fi
+-  else
+-    if [ -z "$OCF_RESKEY_nic" -a "$netmask" != "${1#*/}" ] ; then
+-      ocf_exit_reason "Unable to find nic, or netmask mismatch."
+-      return $OCF_ERR_GENERIC
++  if [ -z "$brdcast" ] ; then
++    if [ -n "$7" ] ; then
++      set -- `ip -o -f $family addr show | grep $7`
++      [ "$5" = brd ] && brdcast=$6
+     fi
+   fi
+   echo "$nic netmask $netmask broadcast $brdcast"
diff --git a/SOURCES/bz1451933-LVM-update-metadata-on-start-relocate.patch b/SOURCES/bz1451933-LVM-update-metadata-on-start-relocate.patch
deleted file mode 100644
index 2514a34..0000000
--- a/SOURCES/bz1451933-LVM-update-metadata-on-start-relocate.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-From 850ee793c5c575898528ab4bd6815431e963d22d Mon Sep 17 00:00:00 2001
-From: Oyvind Albrigtsen <oalbrigt@redhat.com>
-Date: Mon, 22 May 2017 15:01:16 +0200
-Subject: [PATCH] LVM: use vgscan --cache to update metadata during
- start/relocate
-
----
- heartbeat/LVM | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/heartbeat/LVM b/heartbeat/LVM
-index 5b265f58f..583b9a2bd 100755
---- a/heartbeat/LVM
-+++ b/heartbeat/LVM
-@@ -431,7 +431,7 @@ LVM_start() {
- 	if [ "$LVM_MAJOR" -eq "1" ]; then
- 		ocf_run vgscan $vg
- 	else
--		ocf_run vgscan
-+		ocf_run vgscan --cache
- 	fi
- 
- 	case $(get_vg_mode) in
diff --git a/SOURCES/bz1454699-LVM-status-check-for-missing-VG.patch b/SOURCES/bz1454699-LVM-status-check-for-missing-VG.patch
deleted file mode 100644
index 06eb832..0000000
--- a/SOURCES/bz1454699-LVM-status-check-for-missing-VG.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From e587a7dbc17c24de14098a1b56b6de48ded9d8ba Mon Sep 17 00:00:00 2001
-From: Oyvind Albrigtsen <oalbrigt@redhat.com>
-Date: Wed, 24 May 2017 13:03:47 +0200
-Subject: [PATCH] LVM: status check for missing VG
-
----
- heartbeat/LVM | 12 ++++++++++++
- 1 file changed, 12 insertions(+)
-
-diff --git a/heartbeat/LVM b/heartbeat/LVM
-index 5b265f58f..0e5b14d72 100755
---- a/heartbeat/LVM
-+++ b/heartbeat/LVM
-@@ -320,6 +320,18 @@ LVM_status() {
- 			fi
- 		fi
- 	fi
-+
-+	# Check if VG is still available (e.g. for multipath where the device
-+	# doesn't disappear)
-+	if [ "$LVM_MAJOR" -eq "1" ]; then
-+		output=$(vgscan $vg 2>&1)
-+	else
-+		output=$(vgscan --cache 2>&1)
-+	fi
-+	if ! echo "$output" | grep -q "Found.*\"$1\""; then
-+		ocf_exit_reason "LVM Volume $1 is not available"
-+		return $OCF_ERR_GENERIC
-+	fi
- 	
- 	if [ -d /dev/$1 ]; then
- 		test "`cd /dev/$1 && ls`" != ""
diff --git a/SOURCES/bz1455305-VirtualDomain-fix-sed-migrate_options.patch b/SOURCES/bz1455305-VirtualDomain-fix-sed-migrate_options.patch
new file mode 100644
index 0000000..9c7c1ca
--- /dev/null
+++ b/SOURCES/bz1455305-VirtualDomain-fix-sed-migrate_options.patch
@@ -0,0 +1,22 @@
+From 43a6e76f6e685a35db9ddb23c651ab4eed0affae Mon Sep 17 00:00:00 2001
+From: Dejan Muhamedagic <dejan@suse.de>
+Date: Thu, 29 Jan 2015 17:54:05 +0100
+Subject: [PATCH] Dev: VirtualDomain: fix sed expression
+
+---
+ heartbeat/VirtualDomain | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain
+index 17eb94afd..0f6b0bc4f 100755
+--- a/heartbeat/VirtualDomain
++++ b/heartbeat/VirtualDomain
+@@ -664,7 +664,7 @@ VirtualDomain_Migrate_To() {
+ 			migrateuri=`echo "$migrate_opts" |
+ 				sed "s/.*--migrateuri=\([^ ]*\).*/\1/;s/%n/$target_node/g"`
+ 			migrate_opts=`echo "$migrate_opts" |
+-				sed "s/\(.*\)--migrateuri=[^ ]*\(.*\)/\1\3/"`
++				sed "s/\(.*\)--migrateuri=[^ ]*\(.*\)/\1\2/"`
+ 		else
+ 			migrateuri=`mk_migrateuri`
+ 		fi
diff --git a/SOURCES/bz1457382-portblock-suppress-dd-output.patch b/SOURCES/bz1457382-portblock-suppress-dd-output.patch
new file mode 100644
index 0000000..087b3a4
--- /dev/null
+++ b/SOURCES/bz1457382-portblock-suppress-dd-output.patch
@@ -0,0 +1,22 @@
+From 1e7921fe7b257973b4c27c30627e9bdb4b1a8ae2 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 7 Jul 2017 15:27:50 +0200
+Subject: [PATCH] portblock: dont log dd "0+0 records in/out"
+
+---
+ heartbeat/portblock | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/portblock b/heartbeat/portblock
+index 776ad17e4..a518f49fe 100755
+--- a/heartbeat/portblock
++++ b/heartbeat/portblock
+@@ -253,7 +253,7 @@ save_tcp_connections()
+ 		netstat -tn |awk -F '[:[:space:]]+' '
+ 			$8 == "ESTABLISHED" && $4 == "'$OCF_RESKEY_ip'" \
+ 			{printf "%s:%s\t%s:%s\n", $4,$5, $6,$7}' |
+-			dd of="$statefile".new conv=fsync && 
++			dd of="$statefile".new conv=fsync status=none &&
+ 			mv "$statefile".new "$statefile"
+ 	else
+ 		netstat -tn |awk -F '[:[:space:]]+' '
diff --git a/SOURCES/bz1462802-systemd-tmpfiles.patch b/SOURCES/bz1462802-systemd-tmpfiles.patch
new file mode 100644
index 0000000..6ebb047
--- /dev/null
+++ b/SOURCES/bz1462802-systemd-tmpfiles.patch
@@ -0,0 +1,59 @@
+diff -uNr a/configure.ac b/configure.ac
+--- a/configure.ac	2017-09-01 15:04:40.575443547 +0200
++++ b/configure.ac	2017-09-01 15:05:26.542004352 +0200
+@@ -80,6 +80,14 @@
+       [AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])])
+ AM_CONDITIONAL([HAVE_SYSTEMD], [test "x$with_systemdsystemunitdir" != "xno"])
+ 
++AC_ARG_WITH([systemdtmpfilesdir],
++     AS_HELP_STRING([--with-systemdtmpfilesdir=DIR], [Directory for systemd tmp files]),
++     [], [with_systemdtmpfilesdir=$($PKGCONFIG --variable=tmpfilesdir systemd)])
++     if test "x$with_systemdtmpfilesdir" != xno; then
++         AC_SUBST([systemdtmpfilesdir], [$with_systemdtmpfilesdir])
++     fi
++AM_CONDITIONAL(HAVE_SYSTEMD, [test -n "$with_systemdtmpfilesdir" -a "x$with_systemdtmpfilesdir" != xno ])
++
+ dnl 
+ dnl AM_INIT_AUTOMAKE([1.11.1 foreign dist-bzip2 dist-xz])
+ dnl
+diff -uNr a/resource-agents.spec.in b/resource-agents.spec.in
+--- a/resource-agents.spec.in	2017-09-01 15:04:40.576443537 +0200
++++ b/resource-agents.spec.in	2017-09-01 15:06:23.343461633 +0200
+@@ -174,6 +174,12 @@
+ %configure \
+ 	%{?conf_opt_rsctmpdir:%conf_opt_rsctmpdir} \
+ 	%{conf_opt_fatal} \
++%if %{defined _unitdir}
++    --with-systemdsystemunitdir=%{_unitdir} \
++%endif
++%if %{defined _tmpfilesdir}
++    --with-systemdtmpfilesdir=%{_tmpfilesdir} \
++%endif
+ 	--with-pkg-name=%{name} \
+ 	--with-ras-set=%{rasset}
+ 
+@@ -234,6 +240,9 @@
+ %if %{defined _unitdir}
+ %{_unitdir}/resource-agents-deps.target
+ %endif
++%if %{defined _tmpfilesdir}
++%{_tmpfilesdir}/%{name}.conf
++%endif
+ 
+ %dir %{_datadir}/%{name}
+ %dir %{_datadir}/%{name}/ocft
+diff -uNr a/systemd/Makefile.am b/systemd/Makefile.am
+--- a/systemd/Makefile.am	2017-09-01 15:04:40.577443527 +0200
++++ b/systemd/Makefile.am	2017-09-01 15:05:26.543004342 +0200
+@@ -20,4 +20,6 @@
+ 
+ if HAVE_SYSTEMD
+ dist_systemdsystemunit_DATA = resource-agents-deps.target
++
++dist_systemdtmpfiles_DATA = resource-agents.conf
+ endif
+diff -uNr a/systemd/resource-agents.conf b/systemd/resource-agents.conf
+--- a/systemd/resource-agents.conf	1970-01-01 01:00:00.000000000 +0100
++++ b/systemd/resource-agents.conf	2017-09-01 15:05:26.543004342 +0200
+@@ -0,0 +1 @@
++d /var/run/resource-agents/ 1755 root root
diff --git a/SOURCES/bz1465822-OCF-improve-locking.patch b/SOURCES/bz1465822-OCF-improve-locking.patch
new file mode 100644
index 0000000..707db71
--- /dev/null
+++ b/SOURCES/bz1465822-OCF-improve-locking.patch
@@ -0,0 +1,185 @@
+From 738577dd30b782104057496bf01f09e28216892b Mon Sep 17 00:00:00 2001
+From: Dejan Muhamedagic <dejan@hello-penguin.com>
+Date: Mon, 26 Jun 2017 15:56:01 +0200
+Subject: [PATCH 1/2] Medium: ocf-shellfuncs: improve locking (ocf_take_lock)
+
+This change improves locking by ocf_take_lock(). It uses mkdir(1)
+to prevent two instances from creating the same directory (named
+by the lock).
+
+The major difficulty is to prevent a race when a stale lock is
+discovered. If two processes try to remove the stale lock at
+about the same time, the one which runs slightly later can remove
+the lock which just got created by the one which run slightly
+earlier. The probability of this race is significantly reduced by
+testing for stale lock twice with a random sleep in between.
+
+Though this change does not exclude a race entirely, it makes it
+extremely improbable. In addition, stale locks are result of only
+abnormal circumstances and occur seldom.
+
+The function providing random numbers has been modified to use
+either /dev/urandom or awk (with the process pid as the seed).
+
+It was thoroughly tested with both stale lock simulation and
+without, by running 64 instances of processes trying to get the
+lock on a workstation with 4 cpus.
+---
+ heartbeat/ocf-shellfuncs.in | 74 ++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 57 insertions(+), 17 deletions(-)
+
+diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
+index ebc221d5f..615f5b4b8 100644
+--- a/heartbeat/ocf-shellfuncs.in
++++ b/heartbeat/ocf-shellfuncs.in
+@@ -72,10 +72,11 @@ ocf_is_root() {
+ }
+ 
+ ocf_maybe_random() {
+-	local rnd="$RANDOM"
+-	# Something sane-ish in case a shell doesn't support $RANDOM
+-	[ -n "$rnd" ] || rnd=$$
+-	echo $rnd
++	if test -c /dev/urandom; then
++		od -An -N4 -tu4 /dev/urandom | tr -d '[:space:]'
++	else
++		awk -v pid=$$ 'BEGIN{srand(pid); print rand()}' | sed 's/^.*[.]//'
++	fi
+ }
+ 
+ # Portability comments:
+@@ -465,24 +466,63 @@ ocf_pidfile_status() {
+     return 1
+ }
+ 
+-ocf_take_lock() {
+-    local lockfile=$1
+-    local rnd=$(ocf_maybe_random)
++# mkdir(1) based locking
++# first the directory is created with the name given as $1
++# then a file named "pid" is created within that directory with
++# the process PID
+ 
+-    sleep 0.$rnd
+-    while 
+-	ocf_pidfile_status $lockfile
+-    do
+-	ocf_log info "Sleeping until $lockfile is released..."
+-	sleep 0.$rnd
+-    done
+-    echo $$ > $lockfile
++ocf_get_stale_pid() {
++	local piddir=$1
++	local pid
++	[ -z "$piddir" ] && return 2
++	pid=`cat $piddir/pid 2>/dev/null`
++	[ -z "$pid" ] && return 1 # no process
++	kill -0 $pid >/dev/null 2>&1 && return 1 # not stale
++	echo $pid
+ }
+ 
++# There is a race when the following two functions to manage the
++# lock file (mk and rm) are invoked in parallel by different
++# instances. It is up to the caller to reduce probability of that
++# taking place (see ocf_take_lock() below).
++
++ocf_mk_pid() {
++	mkdir $1 2>/dev/null && echo $$ > $1/pid
++}
++ocf_rm_pid() {
++	rm -f $1/pid
++	rmdir $1 2>/dev/null
++}
++
++# Testing and subsequently removing a stale lock (containing the
++# process pid) is inherently difficult to do in such a way as to
++# prevent a race between creating a pid file and removing it and
++# its directory. We reduce the probability of that happening by
++# checking if the stale lock persists over a random period of
++# time.
++
++ocf_take_lock() {
++	local lockdir=$1
++	local rnd
++	local stale_pid
++
++	# we don't want it too short, so strip leading zeros
++	rnd=$(ocf_maybe_random | sed 's/^0*//')
++	stale_pid=`ocf_get_stale_pid $lockdir`
++	if [ -n "$stale_pid" ]; then
++		sleep 0.$rnd
++		# remove "stale pid" only if it persists
++		[ "$stale_pid" = "`ocf_get_stale_pid $lockdir`" ] &&
++			ocf_rm_pid $lockdir
++	fi
++	while ! ocf_mk_pid $lockdir; do
++		ocf_log info "Sleeping until $lockdir is released..."
++		sleep 0.$rnd
++	done
++}
+ 
+ ocf_release_lock_on_exit() {
+-    local lockfile=$1
+-    trap "rm -f $lockfile" EXIT
++	trap "ocf_rm_pid $1" EXIT
+ }
+ 
+ # returns true if the CRM is currently running a probe. A probe is
+
+From 46e6f1d0e736e68c7a48c94083d7037e590365b4 Mon Sep 17 00:00:00 2001
+From: Dejan Muhamedagic <dejan@hello-penguin.com>
+Date: Mon, 26 Jun 2017 20:29:06 +0200
+Subject: [PATCH 2/2] Dev: ocf-shellfuncs: handle empty lock directories
+
+---
+ heartbeat/ocf-shellfuncs.in | 34 ++++++++++++++++++++++++++++------
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
+index 615f5b4b8..817b2a557 100644
+--- a/heartbeat/ocf-shellfuncs.in
++++ b/heartbeat/ocf-shellfuncs.in
+@@ -470,15 +470,37 @@ ocf_pidfile_status() {
+ # first the directory is created with the name given as $1
+ # then a file named "pid" is created within that directory with
+ # the process PID
+-
++# stale locks are handled carefully, the inode of a directory
++# needs to match before and after test if the process is running
++# empty directories are also handled appropriately
++# we relax (sleep) occasionally to allow for other processes to
++# finish managing the lock in case they are in the middle of the
++# business
++
++relax() { sleep 0.5; }
+ ocf_get_stale_pid() {
+-	local piddir=$1
+-	local pid
++	local piddir pid dir_inode
++
++	piddir="$1"
+ 	[ -z "$piddir" ] && return 2
++	dir_inode="`ls -di $piddir 2>/dev/null`"
++	[ -z "$dir_inode" ] && return 1
+ 	pid=`cat $piddir/pid 2>/dev/null`
+-	[ -z "$pid" ] && return 1 # no process
+-	kill -0 $pid >/dev/null 2>&1 && return 1 # not stale
+-	echo $pid
++	if [ -z "$pid" ]; then
++		# empty directory?
++		relax
++		if [ "$dir_inode" = "`ls -di $piddir 2>/dev/null`" ]; then
++			echo $dir_inode
++		else
++			return 1
++		fi
++	elif kill -0 $pid >/dev/null 2>&1; then
++		return 1
++	elif relax && [ -e "$piddir/pid" ] && [ "$dir_inode" = "`ls -di $piddir 2>/dev/null`" ]; then
++		echo $pid
++	else
++		return 1
++	fi
+ }
+ 
+ # There is a race when the following two functions to manage the
diff --git a/SOURCES/bz1465827-mysql-fix-master-score-maintenance.patch b/SOURCES/bz1465827-mysql-fix-master-score-maintenance.patch
new file mode 100644
index 0000000..ef4b0fd
--- /dev/null
+++ b/SOURCES/bz1465827-mysql-fix-master-score-maintenance.patch
@@ -0,0 +1,121 @@
+From 7fe4d007da92381c692b5ae47cec7f63e06b1a6a Mon Sep 17 00:00:00 2001
+From: vaLentin chernoZemski <valentin@siteground.com>
+Date: Thu, 13 Oct 2016 13:17:59 +0300
+Subject: [PATCH 1/2]     heartbeat/mysql - Fixed bug where crm_admin is never
+ called, leaving master scores to -1 in certain conditions.
+
+    Consider the following scenario:
+
+    - crm got mysql master slave resource configured without providing check_level and test_table in the config
+    - crm is put into maintenance mode
+    - mysql replication is adjusted automatically or by hand
+    - crm is restarted on all nodes
+    - crm resources are reprobed
+    - crm is put into live mode
+    - at this point all nodes are working as expected but NONE of them got any master-mysql score set thus defaulting to -1. monitor of the resource never called crm_master.
+    - master fails
+    - crm will refuse to elect any slaves with the following error
+
+            failednode.com pengine: debug: master_color: mysql:0 master score: -1
+
+	When ms_mysql resource is configured master-mysql attribute/score for each node is not set by default thus returning -1. This translates to 'never promote this service as master on this machine'
+
+    master-mysql should be set to positive value by the resource agent when RA decides that this machine is suitable for master.
+
+    In the configuration set specified above if crm never did any operations on the mysql service such as start/stop/promote/demote score on particular node score remains -1 for that node. It just never called crm_master.
+
+    When current master fails and new one needs to be promoted/elected crm is unable to choose new master with following error:
+
+        failednode.com pengine: debug: master_color: mysql:1 master score: 0 ---> because node that hosts mysql:1 is down
+        failednode.com pengine: debug: master_color: mysql:0 master score: -1 --> because the current live node got initial default valule
+
+    Respectively we fail to promote new master node for the particular service.
+
+        failednode.com pengine: info: master_color: ms_mysql: Promoted 0 instances of a possible 1 to master
+
+    When failover procedure is started crm calls resource agents (read ocfs 'init' script with action 'monitor' on all live nodes that host the have the particular master/slave resource started.
+
+    This monitor operation is expected to return master-mysql scorenum here. But it did not due to specific conditions and configurations.
+
+    To solve this issue we modified the mysql resource agent to always export master-mysql scores depending on the response if called with 'monitor'.
+
+    Scores are exported by calling:
+
+        crm_master -l reboot -v SCORE - if status is success. The higher the score, the better the chance to elect this node,
+        crm_master -l reboot -D - if monitor operation fails thus instructing the engine that the current node can not be used as master as it got some issues.
+---
+ heartbeat/mysql | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/mysql b/heartbeat/mysql
+index be914d3b2..707bff33c 100755
+--- a/heartbeat/mysql
++++ b/heartbeat/mysql
+@@ -719,13 +719,22 @@ mysql_monitor() {
+     fi
+  
+     mysql_common_status $status_loglevel
+-
+     rc=$?
+ 
+     # TODO: check max connections error
+ 
+     # If status returned an error, return that immediately
+     if [ $rc -ne $OCF_SUCCESS ]; then
++        if ( ocf_is_ms ); then
++            # This is a master slave setup but monitored host returned some errors.
++            # Immediately remove it from the pool of possible masters by erasing its master-mysql key
++            # When new mysql master election is started and node got no or negative master-mysql attribute the following is logged
++            #   nodename.com pengine: debug: master_color: mysql:0 master score: -1
++            # If there are NO nodes with positive vaule election of mysql master will fail with
++            #   nodename.com pengine: info: master_color: ms_mysql: Promoted 0 instances of a possible 1 to master
++            $CRM_MASTER -D
++        fi
++
+         return $rc
+     fi
+ 
+@@ -742,13 +751,20 @@ mysql_monitor() {
+         rc=$?
+ 
+         if [ $rc -ne 0 ]; then
++            # We are master/slave and test failed. Delete master score for this node as it is considered unhealthy because of this particular failed check.
++            ocf_is_ms && $CRM_MASTER -D
+             ocf_exit_reason "Failed to select from $test_table";
+             return $OCF_ERR_GENERIC;
+         fi
++    else
++        # In case no exnteded tests are enabled and we are in master/slave mode _always_ set the master score to 1 if we reached this point
++        ocf_is_ms && $CRM_MASTER -v 1
+     fi
+ 
+     if ocf_is_ms && ! get_read_only; then
+         ocf_log debug "MySQL monitor succeeded (master)";
++        # Always set master score for the master
++        $CRM_MASTER -v 2
+         return $OCF_RUNNING_MASTER
+     else
+         ocf_log debug "MySQL monitor succeeded";
+
+From 8ba16bcd7ff23be983570df0afe447beabd1c682 Mon Sep 17 00:00:00 2001
+From: vaLentin chernoZemski <valentin@siteground.com>
+Date: Mon, 23 Jan 2017 10:46:52 +0200
+Subject: [PATCH 2/2] heartbeat/mysql - don't run ocf_is_ms check in a subshell
+
+---
+ heartbeat/mysql | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/mysql b/heartbeat/mysql
+index 707bff33c..9e779e4f9 100755
+--- a/heartbeat/mysql
++++ b/heartbeat/mysql
+@@ -725,7 +725,7 @@ mysql_monitor() {
+ 
+     # If status returned an error, return that immediately
+     if [ $rc -ne $OCF_SUCCESS ]; then
+-        if ( ocf_is_ms ); then
++        if ocf_is_ms ; then
+             # This is a master slave setup but monitored host returned some errors.
+             # Immediately remove it from the pool of possible masters by erasing its master-mysql key
+             # When new mysql master election is started and node got no or negative master-mysql attribute the following is logged
diff --git a/SOURCES/bz1466187-SAPInstance-IS_ERS-parameter-for-ASCS-ERS-Netweaver.patch b/SOURCES/bz1466187-SAPInstance-IS_ERS-parameter-for-ASCS-ERS-Netweaver.patch
new file mode 100644
index 0000000..bbadce1
--- /dev/null
+++ b/SOURCES/bz1466187-SAPInstance-IS_ERS-parameter-for-ASCS-ERS-Netweaver.patch
@@ -0,0 +1,73 @@
+From 2118e5324917938ee2e00926778cfe5159043165 Mon Sep 17 00:00:00 2001
+From: Fabian Herschel <fabian.herschel@suse.com>
+Date: Thu, 27 Apr 2017 12:47:37 +0200
+Subject: [PATCH] Medium: SAPInstance: Add IS_ERS parameter (bsc#1036486)
+
+If IS_ERS is true, mark a per cluster attribute for a
+specific ASCS/ERS pair describing which node is the
+best place to failover a failed ASCS.
+---
+ heartbeat/SAPInstance | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/heartbeat/SAPInstance b/heartbeat/SAPInstance
+index 49e60aa30..871cbcf3d 100755
+--- a/heartbeat/SAPInstance
++++ b/heartbeat/SAPInstance
+@@ -31,6 +31,7 @@
+ #	OCF_RESKEY_POST_START_USEREXIT	(optional, lists a script which can be executed after the resource is started)
+ #	OCF_RESKEY_PRE_STOP_USEREXIT	(optional, lists a script which can be executed before the resource is stopped)
+ #	OCF_RESKEY_POST_STOP_USEREXIT	(optional, lists a script which can be executed after the resource is stopped)
++#	OCF_RESKEY_IS_ERS               (needed for ENQ/REPL NW 740)
+ #
+ #  TODO: - Option to shutdown sapstartsrv for non-active instances -> that means: do probes only with OS tools (sapinstance_status)
+ #        - Option for better standalone enqueue server monitoring, using ensmon (test enque-deque)
+@@ -195,6 +196,15 @@ The name of the SAP START profile. Specify this parameter, if you have changed t
+   <shortdesc lang="en">Path to a post-start script</shortdesc>
+   <content type="string" default="" />
+  </parameter>
++ <parameter name="IS_ERS" unique="0" required="0">
++  <longdesc lang="en">Only used for ASCS/ERS SAP Netweaver installations without implementing a master/slave resource to
++    allow the ASCS to 'find' the ERS running on an other cluster node after a resource failure. This parameter should be set
++    to true 'only' for the ERS instance for implementations following the SAP NetWeaver 7.40 HA certification (NW-HA-CLU-740). This includes also
++    systems for NetWeaver less than 7.40, if you like to impelemnt the NW-HA-CLU-740 scenario.
++  </longdesc>
++  <shortdesc lang="en">Mark SAPInstance as ERS instance</shortdesc>
++  <content type="boolean" default="false" />
++ </parameter>
+ </parameters>
+ 
+ <actions>
+@@ -342,6 +352,12 @@ sapinstance_init() {
+     currentSTART_PROFILE=$OCF_RESKEY_START_PROFILE
+   fi
+ 
++  if [ -z "$OCF_RESKEY_IS_ERS" ]; then
++      is_ers="no"
++  else
++      is_ers="$OCF_RESKEY_IS_ERS"
++  fi
++
+   if [ -z "$currentSTART_PROFILE" ]
+   then
+     SAPSTARTPROFILE="$DIR_PROFILE/START_${InstanceName}_${SAPVIRHOST}"
+@@ -568,9 +584,11 @@ sapinstance_start() {
+     ocf_log info "SAP Instance $SID-$InstanceName started: $output"
+     rc=$OCF_SUCCESS
+     sapuserexit POST_START_USEREXIT "$OCF_RESKEY_POST_START_USEREXIT"
++    if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 1 -l reboot; fi
+   else
+     ocf_log err "SAP Instance $SID-$InstanceName start failed: $output"
+     rc=$OCF_NOT_RUNNING
++    if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 0 -l reboot; fi
+   fi
+ 
+   return $rc
+@@ -628,6 +646,7 @@ sapinstance_stop() {
+   fi
+ 
+   sapuserexit POST_STOP_USEREXIT "$OCF_RESKEY_POST_STOP_USEREXIT"
++  if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 0 -l reboot;  fi
+ 
+   return $rc
+ }
diff --git a/SOURCES/bz1484473-ethmonitor-vlan-fix.patch b/SOURCES/bz1484473-ethmonitor-vlan-fix.patch
new file mode 100644
index 0000000..066771f
--- /dev/null
+++ b/SOURCES/bz1484473-ethmonitor-vlan-fix.patch
@@ -0,0 +1,25 @@
+From 5fae12629fcfbd00ef2433071d1c09503829624b Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 25 Aug 2017 13:03:10 +0200
+Subject: [PATCH] ethmonitor: fix for VLAN interfaces
+
+---
+ heartbeat/ethmonitor | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/ethmonitor b/heartbeat/ethmonitor
+index 81a7c0b75..6628c474d 100755
+--- a/heartbeat/ethmonitor
++++ b/heartbeat/ethmonitor
+@@ -206,9 +206,9 @@ is_interface() {
+ 	#
+ 	# List interfaces but exclude FreeS/WAN ipsecN virtual interfaces
+ 	#
+-	local iface=`$IP2UTIL -o -f link addr show | grep " $1:" \
++	local iface=`$IP2UTIL -o -f link addr show | grep " $1\(@[A-Za-z0-9\.]*\)\?:" \
+ 		| cut -d ' ' -f2 | sort -u | grep -v '^ipsec[0-9][0-9]*$' \
+-		| sed -e 's/:$//'`
++		| sed -e 's/\(@.*\)\?:$//'`
+ 		[ "$iface" != "" ]
+ }
+ 
diff --git a/SOURCES/bz1489734-1-support-per-host-per-bundle-attribs.patch b/SOURCES/bz1489734-1-support-per-host-per-bundle-attribs.patch
new file mode 100644
index 0000000..aef9b6d
--- /dev/null
+++ b/SOURCES/bz1489734-1-support-per-host-per-bundle-attribs.patch
@@ -0,0 +1,94 @@
+From 708e11c13ac25e1db5a4552db699a652f4e32353 Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Thu, 7 Sep 2017 18:56:24 +0200
+Subject: [PATCH 1/2] Introduce helper functions for container-attribute-target
+
+In this change we introduce the ocf_attribute_target() function that helps
+RAs decide where to store per-node attributes. The rationale is that
+when an OCF resource runs in a bundle (e.g. rabbitmq-bundle-0) the
+NODENAME will point to the bundle name and not to the physical node
+running the bundle. Since a bundle can run on any cluster node, this
+is not ideal in the situations in which an RA wants to remember on which
+*host* a bundle was running (this is typically the case when there is no
+shared storage)
+
+The way this new ocf_attribute_target() function works is the following:
+A) When the meta-attr 'container-attribute-target' == 'host' and the
+   function is called without arguments it will return the physical
+   hostname the resource is running on.
+B) When the meta-attr 'container-attribute-target' != 'host' and the
+   function is called without arguments it will return the NODENAME
+   (default)
+C) When the meta-attr 'container-attribute-target' == 'host' and the
+   function is called with an argument it will return the physical
+   hostname on which the corresponding argument is running on.
+D) When the meta-attr 'container-attribute-target' != 'host' and the
+   function is called with an argument it will return the NODENAME
+   (default)
+
+The basic idea is that if resources need to store per-host attributes
+you will set the meta attribute 'container-attribute-target' equal to
+host (the no-shared storage case). If resources need to store attributes
+on a per-bundle basis (because they access data from shared-storage)
+then no change is needed on meta attributes (this is the default
+behaviour).
+
+Signed-off-by: Andrew Beekhof <abeekhof@redhat.com>
+Tested-by: Michele Baldessari <michele@acksyn.org>
+Tested-by: Damien Ciabrini <dciabrin@redhat.com>
+---
+ heartbeat/ocf-shellfuncs.in | 38 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
+index 9b6b99f88a56..ddd6854e9487 100644
+--- a/heartbeat/ocf-shellfuncs.in
++++ b/heartbeat/ocf-shellfuncs.in
+@@ -989,6 +989,44 @@ ocf_stop_trace() {
+ 	set +x
+ }
+ 
++# Helper functions to map from nodename/bundle-name and physical hostname
++# list_index_for_word "node0 node1 node2 node3 node4 node5" node4 --> 5
++# list_word_at_index "NA host1 host2 host3 host4 host5" 3      --> host2
++
++# list_index_for_word "node1 node2 node3 node4 node5" node7 --> ""
++# list_word_at_index "host1 host2 host3 host4 host5" 8      --> ""
++
++# attribute_target node1                                    --> host1
++list_index_for_word() {
++	echo $1 | tr ' ' '\n' | awk -v x="$2" '$0~x {print NR}'
++}
++
++list_word_at_index() {
++	echo $1 | tr ' ' '\n' | awk -v n="$2" 'n == NR'
++}
++
++ocf_attribute_target() {
++	if [ x$1 = x ]; then
++		if [ x$OCF_RESKEY_CRM_meta_container_attribute_target = xhost -a x$OCF_RESKEY_CRM_meta_physical_host != x ]; then
++			echo $OCF_RESKEY_CRM_meta_physical_host
++		else
++			echo $OCF_RESKEY_CRM_meta_on_node
++		fi
++		return
++	elif [ x"$OCF_RESKEY_CRM_meta_notify_all_uname" != x ]; then
++		index=$(list_index_for_word "$OCF_RESKEY_CRM_meta_notify_all_uname" $1)
++		mapping=""
++		if [ x$index != x ]; then
++			mapping=$(list_word_at_index "$OCF_RESKEY_CRM_meta_notify_all_hosts" $index)
++		fi
++		if [ x$mapping != x -a x$mapping != xNA ]; then
++			echo $mapping
++			return
++		fi
++	fi
++	echo $1
++}
++
+ __ocf_set_defaults "$@"
+ 
+ : ${OCF_TRACE_RA:=$OCF_RESKEY_trace_ra}
+-- 
+2.13.5
+
diff --git a/SOURCES/bz1489734-2-support-per-host-per-bundle-attribs.patch b/SOURCES/bz1489734-2-support-per-host-per-bundle-attribs.patch
new file mode 100644
index 0000000..fa16395
--- /dev/null
+++ b/SOURCES/bz1489734-2-support-per-host-per-bundle-attribs.patch
@@ -0,0 +1,146 @@
+From 9bd94137d77f770967d35db5de716590cfaf0435 Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Thu, 7 Sep 2017 21:07:45 +0200
+Subject: [PATCH 2/2] Make use of ocf_attribute_target in rabbitmq/redis/galera
+
+Instead of using NODENAME directly use the newly-introduced
+ocf_attribute_target function. This allows the operator to decide if an RA
+running inside a bundle should use per-host properties or per-bundle
+properties in a resource. This can be done by setting the meta-attribute
+'container-attribute-target' to 'host' in the former case and leave the
+defaults as is in the latter case.
+
+This change has been tested in the following scenarios (for rabbit/redis
+and galera):
+1) A deployment without bundles and without the container-attribute-target meta attr set.
+2) A deployment with the resources running in bundles without the meta-attr set
+3) A deployment with the resources running in bundles with the meta-attr set to 'host'
+
+Additionally we successfully tested restarting of each resource, banning
+of each resource from a node and rebooting a cluster node hosting the
+resource.
+
+Signed-off-by: Andrew Beekhof <abeekhof@redhat.com>
+Signed-off-by: Michele Baldessari <michele@acksyn.org>
+Signed-off-by: Damien Ciabrini <dciabrin@redhat.com>
+---
+ heartbeat/galera           | 16 +++++++++-------
+ heartbeat/rabbitmq-cluster |  4 ++--
+ heartbeat/redis            |  5 +++--
+ 3 files changed, 14 insertions(+), 11 deletions(-)
+
+diff --git a/heartbeat/galera b/heartbeat/galera
+index dc681a47079a..ab121a4be5a4 100755
+--- a/heartbeat/galera
++++ b/heartbeat/galera
+@@ -68,6 +68,8 @@
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+ . ${OCF_FUNCTIONS_DIR}/mysql-common.sh
+ 
++NODENAME=$(ocf_attribute_target)
++
+ # It is common for some galera instances to store
+ # check user that can be used to query status
+ # in this file
+@@ -279,7 +281,7 @@ get_status_variable()
+ 
+ set_bootstrap_node()
+ {
+-    local node=$1
++    local node=$(ocf_attribute_target $1)
+ 
+     ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -v "true"
+ }
+@@ -307,7 +309,7 @@ clear_no_grastate()
+ 
+ is_no_grastate()
+ {
+-    local node=$1
++    local node=$(ocf_attribute_target $1)
+     ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -Q 2>/dev/null
+ }
+ 
+@@ -323,7 +325,7 @@ set_last_commit()
+ 
+ get_last_commit()
+ {
+-    local node=$1
++    local node=$(ocf_attribute_target $1)
+ 
+     if [ -z "$node" ]; then
+        ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -Q 2>/dev/null
+@@ -413,7 +415,7 @@ master_exists()
+ 
+ clear_master_score()
+ {
+-    local node=$1
++    local node=$(ocf_attribute_target $1)
+     if [ -z "$node" ]; then
+         $CRM_MASTER -D
+     else 
+@@ -423,7 +425,7 @@ clear_master_score()
+ 
+ set_master_score()
+ {
+-    local node=$1
++    local node=$(ocf_attribute_target $1)
+ 
+     if [ -z "$node" ]; then
+         $CRM_MASTER -v 100
+@@ -542,7 +544,7 @@ detect_first_master()
+ 
+         greater_than_equal_long "$last_commit" "$best_commit"
+         if [ $? -eq 0 ]; then
+-            best_node=$node
++            best_node=$(ocf_attribute_target $node)
+             best_commit=$last_commit
+         fi
+ 
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index 1e78d9ecab98..362556d3f644 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -37,7 +37,7 @@ RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia"
+ RMQ_PID_DIR="/var/run/rabbitmq"
+ RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid"
+ RMQ_LOG_DIR="/var/log/rabbitmq"
+-NODENAME=$(ocf_local_nodename)
++NODENAME=$(ocf_attribute_target)
+ 
+ # this attr represents the current active local rmq node name.
+ # when rmq stops or the node is fenced, this attr disappears
+@@ -340,7 +340,7 @@ rmq_notify() {
+ 
+ 	# forget each stopped rmq instance in the provided pcmk node in the list.
+ 	for node in $(echo "$node_list"); do
+-		local rmq_node="$(${HA_SBIN_DIR}/crm_attribute -N $node -l forever --query --name $RMQ_CRM_ATTR_COOKIE_LAST_KNOWN -q)"
++		local rmq_node="$(${HA_SBIN_DIR}/crm_attribute -N $(ocf_attribute_target $node) -l forever --query --name $RMQ_CRM_ATTR_COOKIE_LAST_KNOWN -q)"
+ 		if [ -z "$rmq_node" ]; then
+ 			ocf_log warn "Unable to map pcmk node $node to a known rmq node."
+ 			continue	
+diff --git a/heartbeat/redis b/heartbeat/redis
+index 708ce84e6184..bc97f14096a6 100755
+--- a/heartbeat/redis
++++ b/heartbeat/redis
+@@ -188,7 +188,8 @@ function last_known_master()
+ }
+ 
+ function crm_master_reboot() {
+-	"${HA_SBIN_DIR}/crm_master" -l reboot "$@"
++	local node=$(ocf_attribute_target)
++	"${HA_SBIN_DIR}/crm_master" -N $node -l reboot "$@"
+ }
+ 
+ function calculate_score()
+@@ -545,7 +546,7 @@ function validate() {
+ 	fi
+ }
+ 
+-NODENAME=$(ocf_local_nodename)
++NODENAME=$(ocf_attribute_target)
+ if [ -f "$REDIS_CONFIG" ]; then
+ 	clientpasswd="$(cat $REDIS_CONFIG | sed -n -e  's/^\s*requirepass\s*\(.*\)\s*$/\1/p' | tail -n 1)"
+ fi
+-- 
+2.13.5
+
diff --git a/SOURCES/bz1493915-1-support-per-host-per-bundle-attribs.patch b/SOURCES/bz1493915-1-support-per-host-per-bundle-attribs.patch
deleted file mode 100644
index aef9b6d..0000000
--- a/SOURCES/bz1493915-1-support-per-host-per-bundle-attribs.patch
+++ /dev/null
@@ -1,94 +0,0 @@
-From 708e11c13ac25e1db5a4552db699a652f4e32353 Mon Sep 17 00:00:00 2001
-From: Michele Baldessari <michele@acksyn.org>
-Date: Thu, 7 Sep 2017 18:56:24 +0200
-Subject: [PATCH 1/2] Introduce helper functions for container-attribute-target
-
-In this change we introduce the ocf_attribute_target() function that helps
-RAs decide where to store per-node attributes. The rationale is that
-when an OCF resource runs in a bundle (e.g. rabbitmq-bundle-0) the
-NODENAME will point to the bundle name and not to the physical node
-running the bundle. Since a bundle can run on any cluster node, this
-is not ideal in the situations in which an RA wants to remember on which
-*host* a bundle was running (this is typically the case when there is no
-shared storage)
-
-The way this new ocf_attribute_target() function works is the following:
-A) When the meta-attr 'container-attribute-target' == 'host' and the
-   function is called without arguments it will return the physical
-   hostname the resource is running on.
-B) When the meta-attr 'container-attribute-target' != 'host' and the
-   function is called without arguments it will return the NODENAME
-   (default)
-C) When the meta-attr 'container-attribute-target' == 'host' and the
-   function is called with an argument it will return the physical
-   hostname on which the corresponding argument is running on.
-D) When the meta-attr 'container-attribute-target' != 'host' and the
-   function is called with an argument it will return the NODENAME
-   (default)
-
-The basic idea is that if resources need to store per-host attributes
-you will set the meta attribute 'container-attribute-target' equal to
-host (the no-shared storage case). If resources need to store attributes
-on a per-bundle basis (because they access data from shared-storage)
-then no change is needed on meta attributes (this is the default
-behaviour).
-
-Signed-off-by: Andrew Beekhof <abeekhof@redhat.com>
-Tested-by: Michele Baldessari <michele@acksyn.org>
-Tested-by: Damien Ciabrini <dciabrin@redhat.com>
----
- heartbeat/ocf-shellfuncs.in | 38 ++++++++++++++++++++++++++++++++++++++
- 1 file changed, 38 insertions(+)
-
-diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
-index 9b6b99f88a56..ddd6854e9487 100644
---- a/heartbeat/ocf-shellfuncs.in
-+++ b/heartbeat/ocf-shellfuncs.in
-@@ -989,6 +989,44 @@ ocf_stop_trace() {
- 	set +x
- }
- 
-+# Helper functions to map from nodename/bundle-name and physical hostname
-+# list_index_for_word "node0 node1 node2 node3 node4 node5" node4 --> 5
-+# list_word_at_index "NA host1 host2 host3 host4 host5" 3      --> host2
-+
-+# list_index_for_word "node1 node2 node3 node4 node5" node7 --> ""
-+# list_word_at_index "host1 host2 host3 host4 host5" 8      --> ""
-+
-+# attribute_target node1                                    --> host1
-+list_index_for_word() {
-+	echo $1 | tr ' ' '\n' | awk -v x="$2" '$0~x {print NR}'
-+}
-+
-+list_word_at_index() {
-+	echo $1 | tr ' ' '\n' | awk -v n="$2" 'n == NR'
-+}
-+
-+ocf_attribute_target() {
-+	if [ x$1 = x ]; then
-+		if [ x$OCF_RESKEY_CRM_meta_container_attribute_target = xhost -a x$OCF_RESKEY_CRM_meta_physical_host != x ]; then
-+			echo $OCF_RESKEY_CRM_meta_physical_host
-+		else
-+			echo $OCF_RESKEY_CRM_meta_on_node
-+		fi
-+		return
-+	elif [ x"$OCF_RESKEY_CRM_meta_notify_all_uname" != x ]; then
-+		index=$(list_index_for_word "$OCF_RESKEY_CRM_meta_notify_all_uname" $1)
-+		mapping=""
-+		if [ x$index != x ]; then
-+			mapping=$(list_word_at_index "$OCF_RESKEY_CRM_meta_notify_all_hosts" $index)
-+		fi
-+		if [ x$mapping != x -a x$mapping != xNA ]; then
-+			echo $mapping
-+			return
-+		fi
-+	fi
-+	echo $1
-+}
-+
- __ocf_set_defaults "$@"
- 
- : ${OCF_TRACE_RA:=$OCF_RESKEY_trace_ra}
--- 
-2.13.5
-
diff --git a/SOURCES/bz1493915-2-support-per-host-per-bundle-attribs.patch b/SOURCES/bz1493915-2-support-per-host-per-bundle-attribs.patch
deleted file mode 100644
index fa16395..0000000
--- a/SOURCES/bz1493915-2-support-per-host-per-bundle-attribs.patch
+++ /dev/null
@@ -1,146 +0,0 @@
-From 9bd94137d77f770967d35db5de716590cfaf0435 Mon Sep 17 00:00:00 2001
-From: Michele Baldessari <michele@acksyn.org>
-Date: Thu, 7 Sep 2017 21:07:45 +0200
-Subject: [PATCH 2/2] Make use of ocf_attribute_target in rabbitmq/redis/galera
-
-Instead of using NODENAME directly use the newly-introduced
-ocf_attribute_target function. This allows the operator to decide if an RA
-running inside a bundle should use per-host properties or per-bundle
-properties in a resource. This can be done by setting the meta-attribute
-'container-attribute-target' to 'host' in the former case and leave the
-defaults as is in the latter case.
-
-This change has been tested in the following scenarios (for rabbit/redis
-and galera):
-1) A deployment without bundles and without the container-attribute-target meta attr set.
-2) A deployment with the resources running in bundles without the meta-attr set
-3) A deployment with the resources running in bundles with the meta-attr set to 'host'
-
-Additionally we successfully tested restarting of each resource, banning
-of each resource from a node and rebooting a cluster node hosting the
-resource.
-
-Signed-off-by: Andrew Beekhof <abeekhof@redhat.com>
-Signed-off-by: Michele Baldessari <michele@acksyn.org>
-Signed-off-by: Damien Ciabrini <dciabrin@redhat.com>
----
- heartbeat/galera           | 16 +++++++++-------
- heartbeat/rabbitmq-cluster |  4 ++--
- heartbeat/redis            |  5 +++--
- 3 files changed, 14 insertions(+), 11 deletions(-)
-
-diff --git a/heartbeat/galera b/heartbeat/galera
-index dc681a47079a..ab121a4be5a4 100755
---- a/heartbeat/galera
-+++ b/heartbeat/galera
-@@ -68,6 +68,8 @@
- . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
- . ${OCF_FUNCTIONS_DIR}/mysql-common.sh
- 
-+NODENAME=$(ocf_attribute_target)
-+
- # It is common for some galera instances to store
- # check user that can be used to query status
- # in this file
-@@ -279,7 +281,7 @@ get_status_variable()
- 
- set_bootstrap_node()
- {
--    local node=$1
-+    local node=$(ocf_attribute_target $1)
- 
-     ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -v "true"
- }
-@@ -307,7 +309,7 @@ clear_no_grastate()
- 
- is_no_grastate()
- {
--    local node=$1
-+    local node=$(ocf_attribute_target $1)
-     ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -Q 2>/dev/null
- }
- 
-@@ -323,7 +325,7 @@ set_last_commit()
- 
- get_last_commit()
- {
--    local node=$1
-+    local node=$(ocf_attribute_target $1)
- 
-     if [ -z "$node" ]; then
-        ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -Q 2>/dev/null
-@@ -413,7 +415,7 @@ master_exists()
- 
- clear_master_score()
- {
--    local node=$1
-+    local node=$(ocf_attribute_target $1)
-     if [ -z "$node" ]; then
-         $CRM_MASTER -D
-     else 
-@@ -423,7 +425,7 @@ clear_master_score()
- 
- set_master_score()
- {
--    local node=$1
-+    local node=$(ocf_attribute_target $1)
- 
-     if [ -z "$node" ]; then
-         $CRM_MASTER -v 100
-@@ -542,7 +544,7 @@ detect_first_master()
- 
-         greater_than_equal_long "$last_commit" "$best_commit"
-         if [ $? -eq 0 ]; then
--            best_node=$node
-+            best_node=$(ocf_attribute_target $node)
-             best_commit=$last_commit
-         fi
- 
-diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
-index 1e78d9ecab98..362556d3f644 100755
---- a/heartbeat/rabbitmq-cluster
-+++ b/heartbeat/rabbitmq-cluster
-@@ -37,7 +37,7 @@ RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia"
- RMQ_PID_DIR="/var/run/rabbitmq"
- RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid"
- RMQ_LOG_DIR="/var/log/rabbitmq"
--NODENAME=$(ocf_local_nodename)
-+NODENAME=$(ocf_attribute_target)
- 
- # this attr represents the current active local rmq node name.
- # when rmq stops or the node is fenced, this attr disappears
-@@ -340,7 +340,7 @@ rmq_notify() {
- 
- 	# forget each stopped rmq instance in the provided pcmk node in the list.
- 	for node in $(echo "$node_list"); do
--		local rmq_node="$(${HA_SBIN_DIR}/crm_attribute -N $node -l forever --query --name $RMQ_CRM_ATTR_COOKIE_LAST_KNOWN -q)"
-+		local rmq_node="$(${HA_SBIN_DIR}/crm_attribute -N $(ocf_attribute_target $node) -l forever --query --name $RMQ_CRM_ATTR_COOKIE_LAST_KNOWN -q)"
- 		if [ -z "$rmq_node" ]; then
- 			ocf_log warn "Unable to map pcmk node $node to a known rmq node."
- 			continue	
-diff --git a/heartbeat/redis b/heartbeat/redis
-index 708ce84e6184..bc97f14096a6 100755
---- a/heartbeat/redis
-+++ b/heartbeat/redis
-@@ -188,7 +188,8 @@ function last_known_master()
- }
- 
- function crm_master_reboot() {
--	"${HA_SBIN_DIR}/crm_master" -l reboot "$@"
-+	local node=$(ocf_attribute_target)
-+	"${HA_SBIN_DIR}/crm_master" -N $node -l reboot "$@"
- }
- 
- function calculate_score()
-@@ -545,7 +546,7 @@ function validate() {
- 	fi
- }
- 
--NODENAME=$(ocf_local_nodename)
-+NODENAME=$(ocf_attribute_target)
- if [ -f "$REDIS_CONFIG" ]; then
- 	clientpasswd="$(cat $REDIS_CONFIG | sed -n -e  's/^\s*requirepass\s*\(.*\)\s*$/\1/p' | tail -n 1)"
- fi
--- 
-2.13.5
-
diff --git a/SOURCES/bz1496393-NovaEvacuate-Instance-HA-OSP12.patch b/SOURCES/bz1496393-NovaEvacuate-Instance-HA-OSP12.patch
new file mode 100644
index 0000000..7d0c08a
--- /dev/null
+++ b/SOURCES/bz1496393-NovaEvacuate-Instance-HA-OSP12.patch
@@ -0,0 +1,183 @@
+diff -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
+--- a/heartbeat/NovaEvacuate	2017-09-27 11:59:28.430326918 +0200
++++ b/heartbeat/NovaEvacuate	2017-09-27 12:04:21.599608299 +0200
+@@ -77,6 +77,24 @@
+ <content type="string" default="" />
+ </parameter>
+ 
++<parameter name="region_name" unique="0" required="0">
++<longdesc lang="en">
++Region name for connecting to nova.
++</longdesc>
++<shortdesc lang="en">Region name</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="insecure" unique="0" required="0">
++<longdesc lang="en">
++Explicitly allow client to perform "insecure" TLS (https) requests.
++The server's certificate will not be verified against any certificate authorities.
++This option should be used with caution.
++</longdesc>
++<shortdesc lang="en">Allow insecure TLS requests</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
+ <parameter name="no_shared_storage" unique="0" required="0">
+ <longdesc lang="en">
+ Disable shared storage recovery for instances. Use at your own risk!
+@@ -85,6 +103,14 @@
+ <content type="boolean" default="0" />
+ </parameter>
+ 
++<parameter name="verbose" unique="0" required="0">
++<longdesc lang="en">
++Enable extra logging from the evacuation process
++</longdesc>
++<shortdesc lang="en">Enable debug logging</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
+ </parameters>
+ 
+ <actions>
+@@ -143,16 +169,20 @@
+ 	need_evacuate=0
+ 
+ 	case $state in
+-	    "") ;;
+-	    no)  ocf_log debug "$node is either fine or already handled";;
+-	    yes) need_evacuate=1;; 
++	    "")
++	        ;;
++	    no)
++		ocf_log debug "$node is either fine or already handled"
++		;;
++	    yes) need_evacuate=1
++		;; 
+ 	    *@*)
+ 		where=$(echo $state | awk -F@ '{print $1}')
+ 		when=$(echo $state | awk -F@ '{print $2}')
+ 		now=$(date +%s)
+ 
+ 		if [ $(($now - $when)) -gt 60 ]; then
+-		    ocf_log info "Processing partial evacuation of $node by $where at $when"		
++		    ocf_log info "Processing partial evacuation of $node by $where at $when"
+ 		    need_evacuate=1
+ 		else
+ 		    # Give some time for any in-flight evacuations to either complete or fail
+@@ -163,9 +193,15 @@
+ 	esac
+ 
+ 	if [ $need_evacuate = 1 ]; then
+-	    ocf_log notice "Initiating evacuation of $node"
++	    fence_agent="fence_compute"
++
++	    if have_binary fence_evacuate
++	    then
++		fence_agent="fence_evacuate"
++	    fi
+ 
+-	    fence_compute ${fence_options} -o status -n ${node}
++	    ocf_log notice "Initiating evacuation of $node with $fence_agent"
++	    $fence_agent ${fence_options} -o status -n ${node}
+ 	    if [ $? = 1 ]; then
+ 		ocf_log info "Nova does not know about ${node}"
+ 		# Dont mark as no because perhaps nova is unavailable right now
+@@ -177,7 +213,7 @@
+ 		return $OCF_SUCCESS
+ 	    fi
+ 
+-	    fence_compute ${fence_options} -o off -n $node
++	    $fence_agent ${fence_options} -o off -n $node
+ 	    rc=$?
+ 
+ 	    if [ $rc = 0 ]; then
+@@ -211,7 +247,10 @@
+     rc=$OCF_SUCCESS
+     fence_options=""
+ 
+-    check_binary fence_compute
++    
++    if ! have_binary fence_evacuate; then
++       check_binary fence_compute
++    fi
+ 
+     # Is the state directory writable? 
+     state_dir=$(dirname $statefile)
+@@ -250,12 +289,29 @@
+ 
+     fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
+ 
++    if [ -n "${OCF_RESKEY_region_name}" ]; then
++        fence_options="${fence_options} \
++            --region-name ${OCF_RESKEY_region_name}"
++    fi
++
++    if [ -n "${OCF_RESKEY_insecure}" ]; then
++        if ocf_is_true "${OCF_RESKEY_insecure}"; then
++            fence_options="${fence_options} --insecure"
++        fi
++    fi
++
+     if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
+ 	if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
+ 	    fence_options="${fence_options} --no-shared-storage"
+ 	fi
+     fi
+ 
++    if [ -n "${OCF_RESKEY_verbose}" ]; then
++        if ocf_is_true "${OCF_RESKEY_verbose}"; then
++            fence_options="${fence_options} --verbose"
++        fi
++    fi
++
+     if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
+ 	case ${OCF_RESKEY_endpoint_type} in
+ 	    adminURL|publicURL|internalURL) ;;
+@@ -276,19 +332,32 @@
+ statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
+ 
+ case $__OCF_ACTION in
+-start)		evacuate_validate; evacuate_start;;
+-stop)		evacuate_stop;;
+-monitor)	evacuate_validate; evacuate_monitor;;
+-meta-data)	meta_data
+-		exit $OCF_SUCCESS
+-		;;
+-usage|help)	evacuate_usage
+-		exit $OCF_SUCCESS
+-		;;
+-validate-all)	exit $OCF_SUCCESS;;
+-*)		evacuate_usage
+-		exit $OCF_ERR_UNIMPLEMENTED
+-		;;
++    start)
++	evacuate_validate
++	evacuate_start
++	;;
++    stop)
++	evacuate_stop
++	;;
++    monitor)
++	evacuate_validate
++	evacuate_monitor
++	;;
++    meta-data)
++	meta_data
++	exit $OCF_SUCCESS
++	;;
++    usage|help)
++	evacuate_usage
++	exit $OCF_SUCCESS
++	;;
++    validate-all)
++	exit $OCF_SUCCESS
++	;;
++    *)
++	evacuate_usage
++	exit $OCF_ERR_UNIMPLEMENTED
++	;;
+ esac
+ rc=$?
+ ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
diff --git a/SOURCES/bz1497076-NovaEvacuate-Instance-HA-OSP12.patch b/SOURCES/bz1497076-NovaEvacuate-Instance-HA-OSP12.patch
deleted file mode 100644
index 7d0c08a..0000000
--- a/SOURCES/bz1497076-NovaEvacuate-Instance-HA-OSP12.patch
+++ /dev/null
@@ -1,183 +0,0 @@
-diff -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
---- a/heartbeat/NovaEvacuate	2017-09-27 11:59:28.430326918 +0200
-+++ b/heartbeat/NovaEvacuate	2017-09-27 12:04:21.599608299 +0200
-@@ -77,6 +77,24 @@
- <content type="string" default="" />
- </parameter>
- 
-+<parameter name="region_name" unique="0" required="0">
-+<longdesc lang="en">
-+Region name for connecting to nova.
-+</longdesc>
-+<shortdesc lang="en">Region name</shortdesc>
-+<content type="string" default="" />
-+</parameter>
-+
-+<parameter name="insecure" unique="0" required="0">
-+<longdesc lang="en">
-+Explicitly allow client to perform "insecure" TLS (https) requests.
-+The server's certificate will not be verified against any certificate authorities.
-+This option should be used with caution.
-+</longdesc>
-+<shortdesc lang="en">Allow insecure TLS requests</shortdesc>
-+<content type="boolean" default="0" />
-+</parameter>
-+
- <parameter name="no_shared_storage" unique="0" required="0">
- <longdesc lang="en">
- Disable shared storage recovery for instances. Use at your own risk!
-@@ -85,6 +103,14 @@
- <content type="boolean" default="0" />
- </parameter>
- 
-+<parameter name="verbose" unique="0" required="0">
-+<longdesc lang="en">
-+Enable extra logging from the evacuation process
-+</longdesc>
-+<shortdesc lang="en">Enable debug logging</shortdesc>
-+<content type="boolean" default="0" />
-+</parameter>
-+
- </parameters>
- 
- <actions>
-@@ -143,16 +169,20 @@
- 	need_evacuate=0
- 
- 	case $state in
--	    "") ;;
--	    no)  ocf_log debug "$node is either fine or already handled";;
--	    yes) need_evacuate=1;; 
-+	    "")
-+	        ;;
-+	    no)
-+		ocf_log debug "$node is either fine or already handled"
-+		;;
-+	    yes) need_evacuate=1
-+		;; 
- 	    *@*)
- 		where=$(echo $state | awk -F@ '{print $1}')
- 		when=$(echo $state | awk -F@ '{print $2}')
- 		now=$(date +%s)
- 
- 		if [ $(($now - $when)) -gt 60 ]; then
--		    ocf_log info "Processing partial evacuation of $node by $where at $when"		
-+		    ocf_log info "Processing partial evacuation of $node by $where at $when"
- 		    need_evacuate=1
- 		else
- 		    # Give some time for any in-flight evacuations to either complete or fail
-@@ -163,9 +193,15 @@
- 	esac
- 
- 	if [ $need_evacuate = 1 ]; then
--	    ocf_log notice "Initiating evacuation of $node"
-+	    fence_agent="fence_compute"
-+
-+	    if have_binary fence_evacuate
-+	    then
-+		fence_agent="fence_evacuate"
-+	    fi
- 
--	    fence_compute ${fence_options} -o status -n ${node}
-+	    ocf_log notice "Initiating evacuation of $node with $fence_agent"
-+	    $fence_agent ${fence_options} -o status -n ${node}
- 	    if [ $? = 1 ]; then
- 		ocf_log info "Nova does not know about ${node}"
- 		# Dont mark as no because perhaps nova is unavailable right now
-@@ -177,7 +213,7 @@
- 		return $OCF_SUCCESS
- 	    fi
- 
--	    fence_compute ${fence_options} -o off -n $node
-+	    $fence_agent ${fence_options} -o off -n $node
- 	    rc=$?
- 
- 	    if [ $rc = 0 ]; then
-@@ -211,7 +247,10 @@
-     rc=$OCF_SUCCESS
-     fence_options=""
- 
--    check_binary fence_compute
-+    
-+    if ! have_binary fence_evacuate; then
-+       check_binary fence_compute
-+    fi
- 
-     # Is the state directory writable? 
-     state_dir=$(dirname $statefile)
-@@ -250,12 +289,29 @@
- 
-     fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
- 
-+    if [ -n "${OCF_RESKEY_region_name}" ]; then
-+        fence_options="${fence_options} \
-+            --region-name ${OCF_RESKEY_region_name}"
-+    fi
-+
-+    if [ -n "${OCF_RESKEY_insecure}" ]; then
-+        if ocf_is_true "${OCF_RESKEY_insecure}"; then
-+            fence_options="${fence_options} --insecure"
-+        fi
-+    fi
-+
-     if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
- 	if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
- 	    fence_options="${fence_options} --no-shared-storage"
- 	fi
-     fi
- 
-+    if [ -n "${OCF_RESKEY_verbose}" ]; then
-+        if ocf_is_true "${OCF_RESKEY_verbose}"; then
-+            fence_options="${fence_options} --verbose"
-+        fi
-+    fi
-+
-     if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
- 	case ${OCF_RESKEY_endpoint_type} in
- 	    adminURL|publicURL|internalURL) ;;
-@@ -276,19 +332,32 @@
- statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
- 
- case $__OCF_ACTION in
--start)		evacuate_validate; evacuate_start;;
--stop)		evacuate_stop;;
--monitor)	evacuate_validate; evacuate_monitor;;
--meta-data)	meta_data
--		exit $OCF_SUCCESS
--		;;
--usage|help)	evacuate_usage
--		exit $OCF_SUCCESS
--		;;
--validate-all)	exit $OCF_SUCCESS;;
--*)		evacuate_usage
--		exit $OCF_ERR_UNIMPLEMENTED
--		;;
-+    start)
-+	evacuate_validate
-+	evacuate_start
-+	;;
-+    stop)
-+	evacuate_stop
-+	;;
-+    monitor)
-+	evacuate_validate
-+	evacuate_monitor
-+	;;
-+    meta-data)
-+	meta_data
-+	exit $OCF_SUCCESS
-+	;;
-+    usage|help)
-+	evacuate_usage
-+	exit $OCF_SUCCESS
-+	;;
-+    validate-all)
-+	exit $OCF_SUCCESS
-+	;;
-+    *)
-+	evacuate_usage
-+	exit $OCF_ERR_UNIMPLEMENTED
-+	;;
- esac
- rc=$?
- ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
diff --git a/SOURCES/bz1499677-galera-recover-from-empty-gvwstate.dat.patch b/SOURCES/bz1499677-galera-recover-from-empty-gvwstate.dat.patch
new file mode 100644
index 0000000..0d6f6aa
--- /dev/null
+++ b/SOURCES/bz1499677-galera-recover-from-empty-gvwstate.dat.patch
@@ -0,0 +1,40 @@
+From 8fef58405fbac15c0ea93f0d890b114c870de0cc Mon Sep 17 00:00:00 2001
+From: Damien Ciabrini <dciabrin@redhat.com>
+Date: Wed, 8 Nov 2017 15:19:33 +0100
+Subject: [PATCH] galera: recover from empty gvwstate.dat
+
+While running, a galera node keeps track of the last known state of
+the cluster in a temporary file gvwstate.dat. This file is normally
+deleted once a node is shutdown gracefully.
+
+Some ungraceful shutdowns can leave an empty gvwstate.dat on
+disk. This will prevent galera to join the cluster if it is
+configured to attempt PC recovery. Removing that file makes the
+node fall back to the normal, unoptimized joining process next
+time it is restarted.
+---
+ heartbeat/galera | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/heartbeat/galera b/heartbeat/galera
+index ab121a4be..ee8451427 100755
+--- a/heartbeat/galera
++++ b/heartbeat/galera
+@@ -586,6 +586,17 @@ detect_last_commit()
+     local recovery_file_regex='s/.*WSREP\:.*position\s*recovery.*--log_error='\''\([^'\'']*\)'\''.*/\1/p'
+     local recovered_position_regex='s/.*WSREP\:\s*[R|r]ecovered\s*position.*\:\(.*\)\s*$/\1/p'
+ 
++    # codership/galera#354
++    # Some ungraceful shutdowns can leave an empty gvwstate.dat on
++    # disk. This will prevent galera to join the cluster if it is
++    # configured to attempt PC recovery. Removing that file makes the
++    # node fall back to the normal, unoptimized joining process.
++    if [ -f ${OCF_RESKEY_datadir}/gvwstate.dat ] && \
++       [ ! -s ${OCF_RESKEY_datadir}/gvwstate.dat ]; then
++        ocf_log warn "empty ${OCF_RESKEY_datadir}/gvwstate.dat detected, removing it to prevent PC recovery failure at next restart"
++        rm -f ${OCF_RESKEY_datadir}/gvwstate.dat
++    fi
++
+     ocf_log info "attempting to detect last commit version by reading ${OCF_RESKEY_datadir}/grastate.dat"
+     last_commit="$(cat ${OCF_RESKEY_datadir}/grastate.dat | sed -n 's/^seqno.\s*\(.*\)\s*$/\1/p')"
+     if [ -z "$last_commit" ] || [ "$last_commit" = "-1" ]; then
diff --git a/SOURCES/bz1500352-amazon-aws-agents.patch b/SOURCES/bz1500352-amazon-aws-agents.patch
new file mode 100644
index 0000000..c094553
--- /dev/null
+++ b/SOURCES/bz1500352-amazon-aws-agents.patch
@@ -0,0 +1,867 @@
+diff -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
+--- a/doc/man/Makefile.am	2018-02-21 13:07:56.172091057 +0100
++++ b/doc/man/Makefile.am	2018-02-21 13:05:37.589245986 +0100
+@@ -99,6 +99,9 @@
+                           ocf_heartbeat_anything.7 \
+                           ocf_heartbeat_apache.7 \
+                           ocf_heartbeat_asterisk.7 \
++                          ocf_heartbeat_aws-vpc-move-ip.7 \
++                          ocf_heartbeat_awseip.7 \
++                          ocf_heartbeat_awsvip.7 \
+                           ocf_heartbeat_clvm.7 \
+                           ocf_heartbeat_conntrackd.7 \
+                           ocf_heartbeat_db2.7 \
+diff -uNr a/heartbeat/awseip b/heartbeat/awseip
+--- a/heartbeat/awseip	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/awseip	2018-02-21 13:08:21.112887254 +0100
+@@ -0,0 +1,278 @@
++#!/bin/sh
++#
++#
++#    Manage Elastic IP with Pacemaker
++#
++#
++# Copyright 2016 guessi <guessi@gmail.com>
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#     http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++#
++
++#
++#  Prerequisites:
++#
++#  - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.)
++#  - a reserved secondary private IP address for EC2 instances high availablity
++#  - IAM user role with the following permissions:
++#    * DescribeInstances
++#    * AssociateAddress
++#    * DisassociateAddress
++#
++
++#######################################################################
++# Initialization:
++
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++
++#######################################################################
++
++#
++# Defaults
++#
++OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_profile_default="default"
++OCF_RESKEY_api_delay_default="3"
++
++: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
++: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
++: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
++
++meta_data() {
++    cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="awseip">
++<version>1.0</version>
++
++<longdesc lang="en">
++Resource Agent for Amazon AWS Elastic IP Addresses.
++
++It manages AWS Elastic IP Addresses with awscli.
++
++Credentials needs to be setup by running "aws configure".
++
++See https://aws.amazon.com/cli/ for more information about awscli.
++</longdesc>
++<shortdesc lang="en">Amazon AWS Elastic IP Address Resource Agent</shortdesc>
++
++<parameters>
++
++<parameter name="awscli" unique="0">
++<longdesc lang="en">
++command line tools for aws services
++</longdesc>
++<shortdesc lang="en">aws cli tools</shortdesc>
++<content type="string" default="${OCF_RESKEY_awscli_default}" />
++</parameter>
++
++<parameter name="profile">
++<longdesc lang="en">
++Valid AWS CLI profile name (see ~/.aws/config and 'aws configure')
++</longdesc>
++<shortdesc lang="en">profile name</shortdesc>
++<content type="string" default="${OCF_RESKEY_profile_default}" />
++</parameter>
++
++<parameter name="elastic_ip" unique="1" required="1">
++<longdesc lang="en">
++reserved elastic ip for ec2 instance
++</longdesc>
++<shortdesc lang="en">reserved elastic ip for ec2 instance</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="allocation_id" unique="1" required="1">
++<longdesc lang="en">
++reserved allocation id for ec2 instance
++</longdesc>
++<shortdesc lang="en">reserved allocation id for ec2 instance</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="private_ip_address" unique="1" required="0">
++<longdesc lang="en">
++predefined private ip address for ec2 instance
++</longdesc>
++<shortdesc lang="en">predefined private ip address for ec2 instance</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="api_delay" unique="0">
++<longdesc lang="en">
++a short delay between API calls, to avoid sending API too quick
++</longdesc>
++<shortdesc lang="en">a short delay between API calls</shortdesc>
++<content type="integer" default="${OCF_RESKEY_api_delay_default}" />
++</parameter>
++
++</parameters>
++
++<actions>
++<action name="start"        timeout="30" />
++<action name="stop"         timeout="30" />
++<action name="monitor"      timeout="30" interval="20" depth="0" />
++<action name="migrate_to"   timeout="30" />
++<action name="migrate_from" timeout="30" />
++<action name="meta-data"    timeout="5" />
++<action name="validate"     timeout="10" />
++<action name="validate-all" timeout="10" />
++</actions>
++</resource-agent>
++END
++}
++
++#######################################################################
++
++awseip_usage() {
++    cat <<END
++usage: $0 {start|stop|monitor|migrate_to|migrate_from|validate|validate-all|meta-data}
++
++Expects to have a fully populated OCF RA-compliant environment set.
++END
++}
++
++awseip_start() {
++    awseip_monitor && return $OCF_SUCCESS
++
++    if [ -n "${PRIVATE_IP_ADDRESS}" ]; then
++        NETWORK_INTERFACES_MACS="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/)"
++        for MAC in ${NETWORK_INTERFACES_MACS}; do
++            curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s |
++                grep -q "^${PRIVATE_IP_ADDRESS}$"
++            if [ $? -eq 0 ]; then
++                NETWORK_ID="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id)"
++            fi
++        done
++        $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address  \
++            --network-interface-id ${NETWORK_ID} \
++            --allocation-id ${ALLOCATION_ID} \
++            --private-ip-address ${PRIVATE_IP_ADDRESS}
++        RET=$?
++    else
++        $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address  \
++            --instance-id ${INSTANCE_ID} \
++            --allocation-id ${ALLOCATION_ID}
++        RET=$?
++    fi
++
++    # delay to avoid sending request too fast
++    sleep ${OCF_RESKEY_api_delay}
++
++    if [ $RET -ne 0 ]; then
++        return $OCF_NOT_RUNNING
++    fi
++
++    ocf_log info "elastic_ip has been successfully brought up (${ELASTIC_IP})"
++    return $OCF_SUCCESS
++}
++
++awseip_stop() {
++    awseip_monitor || return $OCF_SUCCESS
++
++    ASSOCIATION_ID=$($AWSCLI --profile $OCF_RESKEY_profile --output json ec2 describe-addresses \
++                         --allocation-id ${ALLOCATION_ID} | grep -m 1 "AssociationId" | awk -F'"' '{print$4}')
++    $AWSCLI --profile $OCF_RESKEY_profile ec2 disassociate-address  \
++        --association-id ${ASSOCIATION_ID}
++    RET=$?
++
++    # delay to avoid sending request too fast
++    sleep ${OCF_RESKEY_api_delay}
++
++    if [ $RET -ne 0 ]; then
++        return $OCF_NOT_RUNNING
++    fi
++
++    ocf_log info "elastic_ip has been successfully brought down (${ELASTIC_IP})"
++    return $OCF_SUCCESS
++}
++
++awseip_monitor() {
++    $AWSCLI --profile $OCF_RESKEY_profile ec2 describe-instances --instance-id "${INSTANCE_ID}" | grep -q "${ELASTIC_IP}"
++    RET=$?
++
++    if [ $RET -ne 0 ]; then
++        return $OCF_NOT_RUNNING
++    fi
++    return $OCF_SUCCESS
++}
++
++awseip_validate() {
++    check_binary ${AWSCLI}
++
++    if [ -z "$OCF_RESKEY_profile" ]; then
++        ocf_exit_reason "profile parameter not set"
++        return $OCF_ERR_CONFIGURED
++    fi
++
++    if [ -z "${INSTANCE_ID}" ]; then
++        ocf_exit_reason "instance_id not found. Is this a EC2 instance?"
++        return $OCF_ERR_GENERIC
++    fi
++
++    return $OCF_SUCCESS
++}
++
++case $__OCF_ACTION in
++    meta-data)
++        meta_data
++        exit $OCF_SUCCESS
++        ;;
++esac 
++
++AWSCLI="${OCF_RESKEY_awscli}"
++ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
++ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
++PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}"
++INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)"
++
++case $__OCF_ACTION in
++    start)
++        awseip_validate
++        awseip_start
++        ;;
++    stop)
++        awseip_stop
++        ;;
++    monitor)
++        awseip_monitor
++        ;;
++    migrate_to)
++        ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} to ${OCF_RESKEY_CRM_meta_migrate_target}."
++        awseip_stop
++        ;;
++    migrate_from)
++        ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} from ${OCF_RESKEY_CRM_meta_migrate_source}."
++        awseip_start
++        ;;
++    reload)
++        ocf_log info "Reloading ${OCF_RESOURCE_INSTANCE} ..."
++        ;;
++    validate|validate-all)
++        awseip_validate
++        ;;
++    usage|help)
++        awseip_usage
++        exit $OCF_SUCCESS
++        ;;
++    *)
++        awseip_usage
++        exit $OCF_ERR_UNIMPLEMENTED
++        ;;
++esac
++
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
+diff -uNr a/heartbeat/awsvip b/heartbeat/awsvip
+--- a/heartbeat/awsvip	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/awsvip	2018-02-21 13:08:21.112887254 +0100
+@@ -0,0 +1,245 @@
++#!/bin/sh
++#
++#
++#    Manage Secondary Private IP with Pacemaker
++#
++#
++# Copyright 2016 guessi <guessi@gmail.com>
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++#     http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++#
++
++#
++#  Prerequisites:
++#
++#  - preconfigured AWS CLI running environment (AccessKey, SecretAccessKey, etc.)
++#  - a reserved secondary private IP address for EC2 instances high availablity
++#  - IAM user role with the following permissions:
++#    * DescribeInstances
++#    * AssignPrivateIpAddresses
++#    * UnassignPrivateIpAddresses
++#
++
++#######################################################################
++# Initialization:
++
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++
++#######################################################################
++
++#
++# Defaults
++#
++OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_profile_default="default"
++OCF_RESKEY_api_delay_default="3"
++
++: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
++: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
++: ${OCF_RESKEY_api_delay=${OCF_RESKEY_api_delay_default}}
++
++meta_data() {
++    cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="awsvip">
++<version>1.0</version>
++
++<longdesc lang="en">
++Resource Agent for Amazon AWS Secondary Private IP Addresses.
++
++It manages AWS Secondary Private IP Addresses with awscli.
++
++Credentials needs to be setup by running "aws configure".
++
++See https://aws.amazon.com/cli/ for more information about awscli.
++</longdesc>
++<shortdesc lang="en">Amazon AWS Secondary Private IP Address Resource Agent</shortdesc>
++
++<parameters>
++
++<parameter name="awscli" unique="0">
++<longdesc lang="en">
++command line tools for aws services
++</longdesc>
++<shortdesc lang="en">aws cli tools</shortdesc>
++<content type="string" default="${OCF_RESKEY_awscli_default}" />
++</parameter>
++
++<parameter name="profile">
++<longdesc lang="en">
++Valid AWS CLI profile name (see ~/.aws/config and 'aws configure')
++</longdesc>
++<shortdesc lang="en">profile name</shortdesc>
++<content type="string" default="${OCF_RESKEY_profile_default}" />
++</parameter>
++
++<parameter name="secondary_private_ip" unique="1" required="1">
++<longdesc lang="en">
++reserved secondary private ip for ec2 instance
++</longdesc>
++<shortdesc lang="en">reserved secondary private ip for ec2 instance</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="api_delay" unique="0">
++<longdesc lang="en">
++a short delay between API calls, to avoid sending API too quick
++</longdesc>
++<shortdesc lang="en">a short delay between API calls</shortdesc>
++<content type="integer" default="${OCF_RESKEY_api_delay_default}" />
++</parameter>
++
++</parameters>
++
++<actions>
++<action name="start"        timeout="30" />
++<action name="stop"         timeout="30" />
++<action name="monitor"      timeout="30" interval="20" depth="0" />
++<action name="migrate_to"   timeout="30" />
++<action name="migrate_from" timeout="30" />
++<action name="meta-data"    timeout="5" />
++<action name="validate"     timeout="10" />
++<action name="validate-all" timeout="10" />
++</actions>
++</resource-agent>
++END
++}
++
++#######################################################################
++
++awsvip_usage() {
++    cat <<END
++usage: $0 {start|stop|monitor|migrate_to|migrate_from|validate|validate-all|meta-data}
++
++Expects to have a fully populated OCF RA-compliant environment set.
++END
++}
++
++awsvip_start() {
++    awsvip_monitor && return $OCF_SUCCESS
++
++    $AWSCLI --profile $OCF_RESKEY_profile ec2 assign-private-ip-addresses \
++        --network-interface-id ${NETWORK_ID} \
++        --private-ip-addresses ${SECONDARY_PRIVATE_IP} \
++        --allow-reassignment
++    RET=$?
++
++    # delay to avoid sending request too fast
++    sleep ${OCF_RESKEY_api_delay}
++
++    if [ $RET -ne 0 ]; then
++        return $OCF_NOT_RUNNING
++    fi
++
++    ocf_log info "secondary_private_ip has been successfully brought up (${SECONDARY_PRIVATE_IP})"
++    return $OCF_SUCCESS
++}
++
++awsvip_stop() {
++    awsvip_monitor || return $OCF_SUCCESS
++
++    $AWSCLI --profile $OCF_RESKEY_profile ec2 unassign-private-ip-addresses \
++        --network-interface-id ${NETWORK_ID} \
++        --private-ip-addresses ${SECONDARY_PRIVATE_IP}
++    RET=$?
++
++    # delay to avoid sending request too fast
++    sleep ${OCF_RESKEY_api_delay}
++
++    if [ $RET -ne 0 ]; then
++        return $OCF_NOT_RUNNING
++    fi
++
++    ocf_log info "secondary_private_ip has been successfully brought down (${SECONDARY_PRIVATE_IP})"
++    return $OCF_SUCCESS
++}
++
++awsvip_monitor() {
++    $AWSCLI --profile $OCF_RESKEY_profile ec2 describe-instances --instance-id "${INSTANCE_ID}" | grep -q "${SECONDARY_PRIVATE_IP}"
++    RET=$?
++
++    if [ $RET -ne 0 ]; then
++        return $OCF_NOT_RUNNING
++    fi
++    return $OCF_SUCCESS
++}
++
++awsvip_validate() {
++    check_binary ${AWSCLI}
++
++    if [ -z "$OCF_RESKEY_profile" ]; then
++        ocf_exit_reason "profile parameter not set"
++        return $OCF_ERR_CONFIGURED
++    fi
++
++    if [ -z "${INSTANCE_ID}" ]; then
++        ocf_exit_reason "instance_id not found. Is this a EC2 instance?"
++        return $OCF_ERR_GENERIC
++    fi
++
++    return $OCF_SUCCESS
++}
++
++case $__OCF_ACTION in
++    meta-data)
++        meta_data
++        exit $OCF_SUCCESS
++        ;;
++esac
++
++AWSCLI="${OCF_RESKEY_awscli}"
++SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
++INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)"
++NETWORK_ID="$($AWSCLI --profile $OCF_RESKEY_profile --output json ec2 describe-instances --instance-id ${INSTANCE_ID} | grep -m 1 'eni' | awk -F'"' '{print$4}')"
++
++case $__OCF_ACTION in
++    start)
++        awsvip_validate
++        awsvip_start
++        ;;
++    stop)
++        awsvip_stop
++        ;;
++    monitor)
++        awsvip_monitor
++        ;;
++    migrate_to)
++        ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} to ${OCF_RESKEY_CRM_meta_migrate_target}."
++	awsvip_stop
++        ;;
++    migrate_from)
++        ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} from ${OCF_RESKEY_CRM_meta_migrate_source}."
++        awsvip_start
++        ;;
++    reload)
++        ocf_log info "Reloading ${OCF_RESOURCE_INSTANCE} ..."
++        ;;
++    validate|validate-all)
++        awsvip_validate
++        ;;
++    usage|help)
++        awsvip_usage
++        exit $OCF_SUCCESS
++        ;;
++    *)
++        awsvip_usage
++        exit $OCF_ERR_UNIMPLEMENTED
++        ;;
++esac
++
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
+diff -uNr a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+--- a/heartbeat/aws-vpc-move-ip	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/aws-vpc-move-ip	2018-02-21 13:05:37.580246065 +0100
+@@ -0,0 +1,306 @@
++#!/bin/sh
++#
++#
++# OCF resource agent to move an IP address within a VPC in the AWS
++#
++# Copyright (c) 2017 Markus Guertler (SUSE)
++# Based on code of Adam Gandelman (GitHub ec2-resource-agents/elasticip)
++# All Rights Reserved.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of version 2 of the GNU General Public License as
++# published by the Free Software Foundation.
++#
++# This program is distributed in the hope that it would be useful, but
++# WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
++#
++# Further, this software is distributed without any warranty that it is
++# free of the rightful claim of any third person regarding infringement
++# or the like.  Any license provided herein, whether implied or
++# otherwise, applies only to this software file.  Patent licenses, if
++# any, provided herein do not apply to combinations of this program with
++# other software, or any other product whatsoever.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write the Free Software Foundation,
++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++#
++
++
++#######################################################################
++# Initialization:
++
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++
++# Defaults
++OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_profile_default="default"
++OCF_RESKEY_monapi_default="false"
++
++: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
++: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
++: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
++#######################################################################
++
++
++USAGE="usage: $0 {start|stop|status|meta-data}";
++###############################################################################
++
++
++###############################################################################
++#
++# Functions
++#
++###############################################################################
++
++
++metadata() {
++cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="aws-vpc-move-ip">
++<version>2.0</version>
++<longdesc lang="en">
++Resource Agent to move IP addresses within a VPC of the Amazon Webservices EC2
++by changing an entry in an specific routing table
++</longdesc>
++<shortdesc lang="en">Move IP within a APC of the AWS EC2</shortdesc>
++
++<parameters>
++<parameter name="awscli">
++<longdesc lang="en">
++Path to command line tools for AWS
++</longdesc>
++<shortdesc lang="en">Path to AWS CLI tools</shortdesc>
++<content type="string" default="${OCF_RESKEY_awscli_default}" />
++</parameter>
++
++<parameter name="profile">
++<longdesc lang="en">
++Valid AWS CLI profile name (see ~/.aws/config and 'aws configure')
++</longdesc>
++<shortdesc lang="en">profile name</shortdesc>
++<content type="string" default="${OCF_RESKEY_profile_default}" />
++</parameter>
++
++<parameter name="ip" required="1">
++<longdesc lang="en">
++VPC private IP address
++</longdesc>
++<shortdesc lang="en">VPC private IP</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="routing_table" required="1">
++<longdesc lang="en">
++Name of the routing table, where the route for the IP address should be changed, i.e. rtb-...
++</longdesc>
++<shortdesc lang="en">routing table name</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="interface" required="1">
++<longdesc lang="en">
++Name of the network interface, i.e. eth0
++</longdesc>
++<shortdesc lang="en">network interface name</shortdesc>
++<content type="string" default="eth0" />
++</parameter>
++
++<parameter name="monapi">
++<longdesc lang="en">
++Enable enhanced monitoring using AWS API calls to check route table entry
++</longdesc>
++<shortdesc lang="en">Enhanced Monitoring</shortdesc>
++<content type="boolean" default="${OCF_RESKEY_monapi_default}" />
++</parameter>
++</parameters>
++
++<actions>
++<action name="start" timeout="180" />
++<action name="stop" timeout="180" />
++<action name="monitor" depth="0" timeout="30" interval="60" />
++<action name="validate-all" timeout="5" />
++<action name="meta-data" timeout="5" />
++</actions>
++</resource-agent>
++END
++}
++
++ec2ip_validate() {
++	for cmd in aws ip curl; do
++		check_binary "$cmd"
++	done
++
++	if [ -z "$OCF_RESKEY_profile" ]; then
++		ocf_exit_reason "profile parameter not set"
++		return $OCF_ERR_CONFIGURED
++	fi
++
++	EC2_INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)"
++
++	if [ -z "${EC2_INSTANCE_ID}" ]; then
++		ocf_exit_reason "Instance ID not found. Is this a EC2 instance?"
++		return $OCF_ERR_GENERIC
++	fi
++
++	return $OCF_SUCCESS
++}
++
++ec2ip_monitor() {
++	if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ]; then
++		ocf_log info "monitor: check routing table (API call)"
++		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table"
++		ocf_log debug "executing command: $cmd"
++		ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_ip | awk '{ print $3 }')"
++		if [ -z "$ROUTE_TO_INSTANCE" ]; then
++			ROUTE_TO_INSTANCE="<unknown>"
++		fi
++
++		if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ];then 
++			ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE"
++			return $OCF_NOT_RUNNING
++		fi
++	else
++		ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call"
++	fi
++
++	cmd="ping -W 1 -c 1 $OCF_RESKEY_ip"
++	ocf_log debug "executing command: $cmd"
++	$cmd > /dev/null
++	if [ "$?" -gt 0 ]; then
++		ocf_log warn "IP $OCF_RESKEY_ip not locally reachable via ping on this system"
++		return $OCF_NOT_RUNNING
++	fi
++
++	ocf_log debug "route in VPC and locally reachable"
++	return $OCF_SUCCESS
++}
++
++
++ec2ip_drop() {
++	cmd="ip addr delete ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface"
++	ocf_log debug "executing command: $cmd"
++	$cmd
++	rc=$?
++	if [ "$rc" -gt 0 ]; then
++		ocf_log warn "command failed, rc $rc"
++		return $OCF_ERR_GENERIC
++	fi
++
++	return $OCF_SUCCESS
++}
++
++ec2ip_get_and_configure() {
++	# Adjusting the routing table
++	cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile ec2 replace-route --route-table-id $OCF_RESKEY_routing_table --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID"
++	ocf_log debug "executing command: $cmd"
++	$cmd
++	rc=$?
++	if [ "$rc" != 0 ]; then
++		ocf_log warn "command failed, rc: $rc"
++		return $OCF_ERR_GENERIC
++	fi
++
++	# Reconfigure the local ip address
++	ec2ip_drop
++	ip addr add "${OCF_RESKEY_ip}/32" dev $OCF_RESKEY_interface
++	rc=$?
++	if [ $rc != 0 ]; then
++		ocf_log warn "command failed, rc: $rc"
++		return $OCF_ERR_GENERIC
++	fi
++
++	return $OCF_SUCCESS
++}
++
++ec2ip_stop() {
++	ocf_log info "EC2: Bringing down IP address $OCF_RESKEY_ip"
++
++	ec2ip_monitor
++	if [ $? = $OCF_NOT_RUNNING ]; then
++		ocf_log info "EC2: Address $OCF_RESKEY_ip already down"
++		return $OCF_SUCCESS
++	fi
++
++	ec2ip_drop
++	if [ $? != $OCF_SUCCESS ]; then
++		return $OCF_ERR_GENERIC
++	fi
++
++	ec2ip_monitor
++	if [ $? != $OCF_NOT_RUNNING ]; then
++		ocf_log error "EC2: Couldn't bring down IP address $OCF_RESKEY_ip on interface $OCF_RESKEY_interface."
++		return $OCF_ERR_GENERIC
++	fi
++
++	ocf_log info "EC2: Successfully brought down $OCF_RESKEY_ip"
++	return $OCF_SUCCESS
++}
++
++ec2ip_start() {
++	ocf_log info "EC2: Moving IP address $OCF_RESKEY_ip to this host by adjusting routing table $OCF_RESKEY_routing_table"
++
++	ec2ip_monitor
++	if [ $? = $OCF_SUCCESS ]; then
++		ocf_log info "EC2: $OCF_RESKEY_ip already started"
++		return $OCF_SUCCESS
++	fi
++
++	ocf_log info "EC2: Adjusting routing table and locally configuring IP address"
++	ec2ip_get_and_configure
++	rc=$?
++	if [ $rc != $OCF_SUCCESS ]; then
++		ocf_log error "Received $rc from 'aws'"
++		return $OCF_ERR_GENERIC
++	fi
++
++	ec2ip_monitor
++	if [ $? != $OCF_SUCCESS ]; then
++		ocf_log error "EC2: IP address couldn't be configured on this host (IP: $OCF_RESKEY_ip, Interface: $OCF_RESKEY_interface)"
++		return $OCF_ERR_GENERIC
++	fi
++
++	return $OCF_SUCCESS
++}
++
++###############################################################################
++#
++# MAIN
++#
++###############################################################################
++
++case $__OCF_ACTION in
++	meta-data)
++		metadata
++		exit $OCF_SUCCESS
++		;;
++	usage|help)
++		echo $USAGE
++		exit $OCF_SUCCESS
++		;;
++esac
++
++if ! ocf_is_root; then
++	ocf_log err "You must be root for $__OCF_ACTION operation."
++	exit $OCF_ERR_PERM
++fi
++
++ec2ip_validate
++
++case $__OCF_ACTION in
++	start)
++		ec2ip_start;;
++	stop)
++		ec2ip_stop;;
++	monitor)
++		ec2ip_monitor;;
++	validate-all)
++		exit $?;;
++	*)	
++		echo $USAGE
++		exit $OCF_ERR_UNIMPLEMENTED
++		;;
++esac
+diff -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+--- a/heartbeat/Makefile.am	2018-02-21 13:07:45.862175305 +0100
++++ b/heartbeat/Makefile.am	2018-02-21 13:05:37.589245986 +0100
+@@ -64,6 +64,9 @@
+ 			AoEtarget		\
+ 			apache			\
+ 			asterisk		\
++			aws-vpc-move-ip		\
++			awseip			\
++			awsvip			\
+ 			nginx			\
+ 			AudibleAlarm		\
+ 			clvm		\
diff --git a/SOURCES/bz1504112-nfsserver-allow-stop-to-timeout.patch b/SOURCES/bz1504112-nfsserver-allow-stop-to-timeout.patch
new file mode 100644
index 0000000..69a29ed
--- /dev/null
+++ b/SOURCES/bz1504112-nfsserver-allow-stop-to-timeout.patch
@@ -0,0 +1,18 @@
+diff -uNr a/heartbeat/nfsserver b/heartbeat/nfsserver
+--- a/heartbeat/nfsserver	2017-11-02 12:42:24.260248092 +0100
++++ b/heartbeat/nfsserver	2017-11-02 12:43:12.494802422 +0100
+@@ -874,10 +874,10 @@
+ 	if [ "$EXEC_MODE" -eq "2" ]; then
+ 		ocf_log info "Stop: threads"
+ 		tfn="/proc/fs/nfsd/threads"
+-		if [ -f "$tfn" ] && [ "$(cat $tfn)" -gt "0" ]; then
+-			ocf_exit_reason "NFS server failed to stop: /proc/fs/nfsd/threads"
+-			return $OCF_ERR_GENERIC
+-		fi
++		while [ -f "$tfn" ] && [ "$(cat $tfn)" -gt "0" ]; do
++			ocf_log err "NFS server failed to stop: /proc/fs/nfsd/threads"
++			sleep 1
++		done
+ 
+ 		nfs_exec stop rpc-statd > /dev/null 2>&1
+ 		ocf_log info "Stop: rpc-statd"
diff --git a/SOURCES/bz1508362-docker-improve-exit-reasons.patch b/SOURCES/bz1508362-docker-improve-exit-reasons.patch
new file mode 100644
index 0000000..e4e0d7a
--- /dev/null
+++ b/SOURCES/bz1508362-docker-improve-exit-reasons.patch
@@ -0,0 +1,26 @@
+diff -uNr a/heartbeat/docker b/heartbeat/docker
+--- a/heartbeat/docker	2017-11-01 13:57:09.742513891 +0100
++++ b/heartbeat/docker	2017-11-01 13:59:20.632338967 +0100
+@@ -303,11 +303,21 @@
+ 		# we already know at this point it wouldn't be running 
+ 		remove_container
+ 		ocf_log info "running container $CONTAINER for the first time"
+-		ocf_run docker run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd
++		output=`docker run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd 2>&1`
++		rc=$?
++
++		if [ $rc -ne 0 ]; then
++			reason=`echo $output | sed -e 's@See ./usr/bin.*@@' -e 's@.*Error response from daemon: @@' -e 's@[^\:]*:@@'`
++			ocf_exit_reason "$reason"
++			ocf_log err "$output"
++			ocf_run -info docker ps -a
++			return $OCF_ERR_GENERIC
++		fi
+ 	fi
+ 
+ 	if [ $? -ne 0 ]; then
+ 		ocf_exit_reason "docker failed to launch container"
++		ocf_run -info docker ps -a
+ 		return $OCF_ERR_GENERIC
+ 	fi
+ 
diff --git a/SOURCES/bz1508366-docker-dont-ignore-stopped-containers.patch b/SOURCES/bz1508366-docker-dont-ignore-stopped-containers.patch
new file mode 100644
index 0000000..7b43ed7
--- /dev/null
+++ b/SOURCES/bz1508366-docker-dont-ignore-stopped-containers.patch
@@ -0,0 +1,26 @@
+diff -uNr a/heartbeat/docker b/heartbeat/docker
+--- a/heartbeat/docker	2017-11-01 13:46:00.935405714 +0100
++++ b/heartbeat/docker	2017-11-01 13:54:20.896006649 +0100
+@@ -234,14 +234,16 @@
+ 
+ 	# retrieve the 'Running' attribute for the container
+ 	val=$(docker inspect --format {{.State.Running}} $CONTAINER 2>/dev/null)
+-	if [ $? -ne 0 ]; then
+-		#not running as a result of container not being found
+-		return $OCF_NOT_RUNNING
++	if [ $? -eq 0 ]; then
++		if ocf_is_true "$val"; then
++			# container exists and is running
++			return $OCF_SUCCESS
++		fi
+ 	fi
+ 
+-	if ocf_is_true "$val"; then
+-		# container exists and is running
+-		return $OCF_SUCCESS
++	# Known but in a stopped state
++	if ! ocf_is_true "$OCF_RESKEY_reuse"; then
++		return $OCF_ERR_GENERIC
+ 	fi
+ 
+ 	return $OCF_NOT_RUNNING
diff --git a/SOURCES/bz1512580-CTDB-fix-probe.patch b/SOURCES/bz1512580-CTDB-fix-probe.patch
new file mode 100644
index 0000000..207ca6f
--- /dev/null
+++ b/SOURCES/bz1512580-CTDB-fix-probe.patch
@@ -0,0 +1,22 @@
+From 88a2513d0e97fe31c83151c05e10762fb5b4753a Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Fri, 1 Dec 2017 09:57:26 +0100
+Subject: [PATCH] CTDB: fix initial probe
+
+---
+ heartbeat/CTDB | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/heartbeat/CTDB b/heartbeat/CTDB
+index 709dbc8e9..09f5ecf5f 100755
+--- a/heartbeat/CTDB
++++ b/heartbeat/CTDB
+@@ -706,6 +706,8 @@ ctdb_monitor() {
+ 			return $OCF_NOT_RUNNING
+ 		elif echo "$status" | grep -qs 'No such file or directory'; then
+ 			return $OCF_NOT_RUNNING
++		elif echo $status | grep -qs 'connect() failed'; then
++			return $OCF_NOT_RUNNING
+ 		else
+ 			ocf_exit_reason "CTDB status call failed: $status"
+ 			return $OCF_ERR_GENERIC
diff --git a/SOURCES/bz1512586-galera-recover-from-empty-gvwstate.dat.patch b/SOURCES/bz1512586-galera-recover-from-empty-gvwstate.dat.patch
deleted file mode 100644
index 0d6f6aa..0000000
--- a/SOURCES/bz1512586-galera-recover-from-empty-gvwstate.dat.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From 8fef58405fbac15c0ea93f0d890b114c870de0cc Mon Sep 17 00:00:00 2001
-From: Damien Ciabrini <dciabrin@redhat.com>
-Date: Wed, 8 Nov 2017 15:19:33 +0100
-Subject: [PATCH] galera: recover from empty gvwstate.dat
-
-While running, a galera node keeps track of the last known state of
-the cluster in a temporary file gvwstate.dat. This file is normally
-deleted once a node is shutdown gracefully.
-
-Some ungraceful shutdowns can leave an empty gvwstate.dat on
-disk. This will prevent galera to join the cluster if it is
-configured to attempt PC recovery. Removing that file makes the
-node fall back to the normal, unoptimized joining process next
-time it is restarted.
----
- heartbeat/galera | 11 +++++++++++
- 1 file changed, 11 insertions(+)
-
-diff --git a/heartbeat/galera b/heartbeat/galera
-index ab121a4be..ee8451427 100755
---- a/heartbeat/galera
-+++ b/heartbeat/galera
-@@ -586,6 +586,17 @@ detect_last_commit()
-     local recovery_file_regex='s/.*WSREP\:.*position\s*recovery.*--log_error='\''\([^'\'']*\)'\''.*/\1/p'
-     local recovered_position_regex='s/.*WSREP\:\s*[R|r]ecovered\s*position.*\:\(.*\)\s*$/\1/p'
- 
-+    # codership/galera#354
-+    # Some ungraceful shutdowns can leave an empty gvwstate.dat on
-+    # disk. This will prevent galera to join the cluster if it is
-+    # configured to attempt PC recovery. Removing that file makes the
-+    # node fall back to the normal, unoptimized joining process.
-+    if [ -f ${OCF_RESKEY_datadir}/gvwstate.dat ] && \
-+       [ ! -s ${OCF_RESKEY_datadir}/gvwstate.dat ]; then
-+        ocf_log warn "empty ${OCF_RESKEY_datadir}/gvwstate.dat detected, removing it to prevent PC recovery failure at next restart"
-+        rm -f ${OCF_RESKEY_datadir}/gvwstate.dat
-+    fi
-+
-     ocf_log info "attempting to detect last commit version by reading ${OCF_RESKEY_datadir}/grastate.dat"
-     last_commit="$(cat ${OCF_RESKEY_datadir}/grastate.dat | sed -n 's/^seqno.\s*\(.*\)\s*$/\1/p')"
-     if [ -z "$last_commit" ] || [ "$last_commit" = "-1" ]; then
diff --git a/SOURCES/bz1516180-db2-fix-hadr-promote-when-master-failed.patch b/SOURCES/bz1516180-db2-fix-hadr-promote-when-master-failed.patch
new file mode 100644
index 0000000..6f248cc
--- /dev/null
+++ b/SOURCES/bz1516180-db2-fix-hadr-promote-when-master-failed.patch
@@ -0,0 +1,31 @@
+From 051743955c4f1f5fe412875afba94edd2839008c Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 22 Nov 2017 12:25:41 +0100
+Subject: [PATCH] db2: fix HADR promote when master failed
+
+---
+ heartbeat/db2 | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/db2 b/heartbeat/db2
+index 63de31582..b67363ec5 100755
+--- a/heartbeat/db2
++++ b/heartbeat/db2
+@@ -617,7 +617,7 @@ db2_instance_status() {
+     if [ $pscount -ge 4 ]; then
+         return $OCF_SUCCESS;
+     elif [ $pscount -ge 1 ]; then
+-        return $OCF_GENERIC_ERR
++        return $OCF_ERR_GENERIC
+     fi
+     return $OCF_NOT_RUNNING
+ }
+@@ -767,7 +767,7 @@ db2_promote() {
+             # must take over 
+             ;;
+ 
+-            STANDBY/PEER/DISCONNECTED|Standby/DisconnectedPeer)
++            STANDBY/PEER/DISCONNECTED|STANDBY/DISCONNECTED_PEER/DISCONNECTED|Standby/DisconnectedPeer)
+             # must take over forced 
+             force="by force peer window only"
+             ;;
diff --git a/SOURCES/bz1516435-azure-lb.patch b/SOURCES/bz1516435-azure-lb.patch
new file mode 100644
index 0000000..354a65e
--- /dev/null
+++ b/SOURCES/bz1516435-azure-lb.patch
@@ -0,0 +1,255 @@
+From 771b49a128100a986ee6508c998f296162f7c197 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 29 Nov 2017 15:09:06 +0100
+Subject: [PATCH] azure-lb: new resource agent
+
+---
+ doc/man/Makefile.am   |   1 +
+ heartbeat/Makefile.am |   1 +
+ heartbeat/azure-lb    | 213 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 215 insertions(+)
+ create mode 100755 heartbeat/azure-lb
+
+diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
+index 03cdc8867..e3abdb5b7 100644
+--- a/doc/man/Makefile.am
++++ b/doc/man/Makefile.am
+@@ -100,6 +100,7 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
+                           ocf_heartbeat_aws-vpc-move-ip.7 \
+                           ocf_heartbeat_awseip.7 \
+                           ocf_heartbeat_awsvip.7 \
++                          ocf_heartbeat_azure-lb.7 \
+                           ocf_heartbeat_clvm.7 \
+                           ocf_heartbeat_conntrackd.7 \
+                           ocf_heartbeat_db2.7 \
+diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+index 1fde5e905..1e441b9c1 100644
+--- a/heartbeat/Makefile.am
++++ b/heartbeat/Makefile.am
+@@ -97,6 +97,7 @@ ocf_SCRIPTS	     =  ClusterMon		\
+ 			aws-vpc-move-ip		\
+ 			awseip			\
+ 			awsvip			\
++			azure-lb		\
+ 			nginx			\
+ 			AudibleAlarm		\
+ 			clvm		\
+diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb
+new file mode 100755
+index 000000000..f41e83c6d
+--- /dev/null
++++ b/heartbeat/azure-lb
+@@ -0,0 +1,213 @@
++#!/bin/sh
++#
++
++#  License:      GNU General Public License (GPL)
++#  (c) 2017 O. Albrigtsen
++#           and Linux-HA contributors
++#
++# -----------------------------------------------------------------------------
++#      O C F    R E S O U R C E    S C R I P T   S P E C I F I C A T I O N
++# -----------------------------------------------------------------------------
++#
++# NAME
++#       azure-lb : OCF resource agent script for Azure Load Balancer
++#
++# Initialization:
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++
++# Defaults
++OCF_RESKEY_nc_default="/usr/bin/nc"
++OCF_RESKEY_port_default="61000"
++
++: ${OCF_RESKEY_nc=${OCF_RESKEY_nc_default}}
++: ${OCF_RESKEY_port=${OCF_RESKEY_port_default}}
++
++process="$OCF_RESOURCE_INSTANCE"
++pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid"
++
++
++lb_usage() {
++	cat <<END
++    usage: $0 (start|stop|validate-all|meta-data|help|usage|monitor)
++    $0 manages service that answers Azure Load Balancer health probe requests as a OCF HA resource.
++    The 'start' operation starts the instance.
++    The 'stop' operation stops the instance.
++    The 'status' operation reports whether the instance is running
++    The 'monitor' operation reports whether the instance seems to be working
++    The 'validate-all' operation reports whether the parameters are valid
++END
++}
++
++lb_metadata() {
++cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="azure-lb">
++<version>1.0</version>
++<longdesc lang="en">
++Resource agent to answer Azure Load Balancer health probe requests
++</longdesc>
++<shortdesc lang="en">Answers Azure Load Balancer health probe requests</shortdesc>
++
++<parameters>
++
++<parameter name="nc">
++<longdesc lang="en">
++The full name of the nc binary.
++</longdesc>
++<shortdesc lang="en">Full path name of the nc binary</shortdesc>
++<content type="string" default="${OCF_RESKEY_nc_default}"/>
++</parameter>
++
++<parameter name="port">
++<longdesc lang="en">
++Port to listen to.
++</longdesc>
++<shortdesc lang="en">Listen to port</shortdesc>
++<content type="string" default="${OCF_RESKEY_port_default}"/>
++</parameter>
++
++</parameters>
++
++<actions>
++<action name="start"   timeout="20s" />
++<action name="stop"    timeout="20s" />
++<action name="monitor" depth="0"  timeout="20s" interval="10" />
++<action name="meta-data"  timeout="5" />
++<action name="validate-all"  timeout="5" />
++</actions>
++</resource-agent>
++END
++exit 0
++}
++
++getpid() {
++        grep -o '[0-9]*' $1
++}
++
++lb_monitor() {
++	if test -f "$pidfile"; then
++		if pid=`getpid $pidfile` && [ "$pid" ] && kill -s 0 $pid; then
++			return $OCF_SUCCESS
++		else
++			# pidfile w/o process means the process died
++			return $OCF_ERR_GENERIC
++		fi
++	else
++		return $OCF_NOT_RUNNING
++	fi
++}
++
++lb_start() {
++	cmd="$OCF_RESKEY_nc -l -k $OCF_RESKEY_port"
++	if ! lb_monitor; then
++		ocf_log debug "Starting $process: $cmd"
++		# Execute the command as created above
++		eval "$cmd & echo \$!" > $pidfile
++		if lb_monitor; then
++			ocf_log debug "$process: $cmd started successfully, calling monitor"
++			lb_monitor
++			return $?
++		else 
++			ocf_log err "$process: $cmd could not be started"
++			return $OCF_ERR_GENERIC
++		fi
++	else
++		# If already running, consider start successful
++		ocf_log debug "$process: $cmd is already running"
++		return $OCF_SUCCESS
++	fi
++}
++
++lb_stop() {
++	local rc=$OCF_SUCCESS
++
++        if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
++                # Allow 2/3 of the action timeout for the orderly shutdown
++                # (The origin unit is ms, hence the conversion)
++                stop_timeout=$((OCF_RESKEY_CRM_meta_timeout/1500))
++        else
++                stop_timeout=10
++        fi
++
++	if lb_monitor; then
++                pid=`getpid $pidfile`
++                kill $pid
++
++                i=0
++                while [ $i -lt $stop_timeout ]; do
++                        if ! lb_monitor; then
++                        	rm -f $pidfile
++                                return $OCF_SUCCESS
++                        fi
++                        sleep 1 
++                        i=$((i+1))
++                done
++
++                ocf_log warn "Stop with SIGTERM failed/timed out, now sending SIGKILL."
++                kill -s 9 $pid
++                while :; do
++                        if ! lb_monitor; then
++                                ocf_log warn "SIGKILL did the job."
++                                rc=$OCF_SUCCESS
++                                break
++                        fi
++                        ocf_log info "The job still hasn't stopped yet. Waiting..."
++                        sleep 1
++                done
++	fi
++	rm -f $pidfile 
++	return $rc
++}
++
++lb_validate() {
++	check_binary "$OCF_RESKEY_nc"
++
++	if ! ocf_is_decimal "$OCF_RESKEY_port"; then
++		ocf_exit_reason "$OCF_RESKEY_port is not a valid port"
++		exit $OCF_ERR_CONFIGURED
++	fi
++
++	return $OCF_SUCCESS
++}
++
++###############################################################################
++#
++# MAIN
++#
++###############################################################################
++
++case $__OCF_ACTION in
++	meta-data)
++		lb_metadata
++		exit $OCF_SUCCESS
++		;;
++	usage|help)
++		lb_usage
++		exit $OCF_SUCCESS
++		;;
++esac
++
++if ! ocf_is_root; then
++	ocf_log err "You must be root for $__OCF_ACTION operation."
++	exit $OCF_ERR_PERM
++fi
++
++case $__OCF_ACTION in
++	start)
++		lb_validate 
++		lb_start;;
++	stop)
++		lb_stop;;
++	monitor)
++		lb_monitor;;
++	validate-all)
++		lb_validate;;
++	*)	
++		echo $USAGE
++		exit $OCF_ERR_UNIMPLEMENTED
++		;;
++esac
++
++exit $?
diff --git a/SOURCES/bz1520574-ocf_attribute_target-fallback-fix.patch b/SOURCES/bz1520574-ocf_attribute_target-fallback-fix.patch
new file mode 100644
index 0000000..7225e55
--- /dev/null
+++ b/SOURCES/bz1520574-ocf_attribute_target-fallback-fix.patch
@@ -0,0 +1,35 @@
+From f0a7a64d644c604f84ec1668849e1cc5507a8ea8 Mon Sep 17 00:00:00 2001
+From: Damien Ciabrini <dciabrin@redhat.com>
+Date: Tue, 5 Dec 2017 10:43:10 +0100
+Subject: [PATCH] Fix fallback name for ocf_attribute_target
+
+For bundles, various resource agents now use ocf_attribute_target to
+get the name of the pacemaker node to store attributes on.
+
+If a recent version of the resource agent is being run on a pacemaker
+version which does not support bundles, ocf_attribute_target will
+return an empty string as hostname.
+
+Provide a fallback path so the resource agent gets a valid name when
+the resource is not containerized.
+---
+ heartbeat/ocf-shellfuncs.in | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
+index ddd6854e9..2fa6f93f9 100644
+--- a/heartbeat/ocf-shellfuncs.in
++++ b/heartbeat/ocf-shellfuncs.in
+@@ -1010,7 +1010,11 @@ ocf_attribute_target() {
+ 		if [ x$OCF_RESKEY_CRM_meta_container_attribute_target = xhost -a x$OCF_RESKEY_CRM_meta_physical_host != x ]; then
+ 			echo $OCF_RESKEY_CRM_meta_physical_host
+ 		else
+-			echo $OCF_RESKEY_CRM_meta_on_node
++			if [ x$OCF_RESKEY_CRM_meta_on_node != x ]; then
++				echo $OCF_RESKEY_CRM_meta_on_node
++			else
++				ocf_local_nodename
++			fi
+ 		fi
+ 		return
+ 	elif [ x"$OCF_RESKEY_CRM_meta_notify_all_uname" != x ]; then
diff --git a/SOURCES/bz1521019-db2-fix-hadr-promote-when-master-failed.patch b/SOURCES/bz1521019-db2-fix-hadr-promote-when-master-failed.patch
deleted file mode 100644
index 6f248cc..0000000
--- a/SOURCES/bz1521019-db2-fix-hadr-promote-when-master-failed.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From 051743955c4f1f5fe412875afba94edd2839008c Mon Sep 17 00:00:00 2001
-From: Oyvind Albrigtsen <oalbrigt@redhat.com>
-Date: Wed, 22 Nov 2017 12:25:41 +0100
-Subject: [PATCH] db2: fix HADR promote when master failed
-
----
- heartbeat/db2 | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/heartbeat/db2 b/heartbeat/db2
-index 63de31582..b67363ec5 100755
---- a/heartbeat/db2
-+++ b/heartbeat/db2
-@@ -617,7 +617,7 @@ db2_instance_status() {
-     if [ $pscount -ge 4 ]; then
-         return $OCF_SUCCESS;
-     elif [ $pscount -ge 1 ]; then
--        return $OCF_GENERIC_ERR
-+        return $OCF_ERR_GENERIC
-     fi
-     return $OCF_NOT_RUNNING
- }
-@@ -767,7 +767,7 @@ db2_promote() {
-             # must take over 
-             ;;
- 
--            STANDBY/PEER/DISCONNECTED|Standby/DisconnectedPeer)
-+            STANDBY/PEER/DISCONNECTED|STANDBY/DISCONNECTED_PEER/DISCONNECTED|Standby/DisconnectedPeer)
-             # must take over forced 
-             force="by force peer window only"
-             ;;
diff --git a/SOURCES/bz1523953-CTDB-detect-new-config-path.patch b/SOURCES/bz1523953-CTDB-detect-new-config-path.patch
new file mode 100644
index 0000000..536b9ea
--- /dev/null
+++ b/SOURCES/bz1523953-CTDB-detect-new-config-path.patch
@@ -0,0 +1,24 @@
+From 522328ba28d2e362bf09a7b771ca32206d2dfb02 Mon Sep 17 00:00:00 2001
+From: pablomh <pablomh@gmail.com>
+Date: Fri, 8 Dec 2017 19:39:12 +0100
+Subject: [PATCH] Add new possible location for CTDB_SYSCONFIG
+
+When upgrading from Red Hat 7.3 to 7.4 the script stated that the location of the
+configuration file had moved from /etc/sysconfig/ctdb to /etc/ctdb/ctdbd.conf.
+---
+ heartbeat/CTDB | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/heartbeat/CTDB b/heartbeat/CTDB
+index 09f5ecf5f..1ee228e8b 100755
+--- a/heartbeat/CTDB
++++ b/heartbeat/CTDB
+@@ -361,6 +361,8 @@ elif [ -f /etc/default/ctdb ]; then
+ 	CTDB_SYSCONFIG=/etc/default/ctdb
+ elif [ -f "$OCF_RESKEY_ctdb_config_dir/ctdb" ]; then
+ 	CTDB_SYSCONFIG=$OCF_RESKEY_ctdb_config_dir/ctdb
++elif [ -f "$OCF_RESKEY_ctdb_config_dir/ctdbd.conf" ]; then
++	CTDB_SYSCONFIG=$OCF_RESKEY_ctdb_config_dir/ctdbd.conf
+ fi
+ 
+ # Backup paths
diff --git a/SOURCES/bz1524454-ocf_attribute_target-fallback-fix.patch b/SOURCES/bz1524454-ocf_attribute_target-fallback-fix.patch
deleted file mode 100644
index 7225e55..0000000
--- a/SOURCES/bz1524454-ocf_attribute_target-fallback-fix.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From f0a7a64d644c604f84ec1668849e1cc5507a8ea8 Mon Sep 17 00:00:00 2001
-From: Damien Ciabrini <dciabrin@redhat.com>
-Date: Tue, 5 Dec 2017 10:43:10 +0100
-Subject: [PATCH] Fix fallback name for ocf_attribute_target
-
-For bundles, various resource agents now use ocf_attribute_target to
-get the name of the pacemaker node to store attributes on.
-
-If a recent version of the resource agent is being run on a pacemaker
-version which does not support bundles, ocf_attribute_target will
-return an empty string as hostname.
-
-Provide a fallback path so the resource agent gets a valid name when
-the resource is not containerized.
----
- heartbeat/ocf-shellfuncs.in | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
-index ddd6854e9..2fa6f93f9 100644
---- a/heartbeat/ocf-shellfuncs.in
-+++ b/heartbeat/ocf-shellfuncs.in
-@@ -1010,7 +1010,11 @@ ocf_attribute_target() {
- 		if [ x$OCF_RESKEY_CRM_meta_container_attribute_target = xhost -a x$OCF_RESKEY_CRM_meta_physical_host != x ]; then
- 			echo $OCF_RESKEY_CRM_meta_physical_host
- 		else
--			echo $OCF_RESKEY_CRM_meta_on_node
-+			if [ x$OCF_RESKEY_CRM_meta_on_node != x ]; then
-+				echo $OCF_RESKEY_CRM_meta_on_node
-+			else
-+				ocf_local_nodename
-+			fi
- 		fi
- 		return
- 	elif [ x"$OCF_RESKEY_CRM_meta_notify_all_uname" != x ]; then
diff --git a/SOURCES/bz1533168-NovaEvacuate-add-support-for-keystone-v3-authentication.patch b/SOURCES/bz1533168-NovaEvacuate-add-support-for-keystone-v3-authentication.patch
new file mode 100644
index 0000000..43ad9fe
--- /dev/null
+++ b/SOURCES/bz1533168-NovaEvacuate-add-support-for-keystone-v3-authentication.patch
@@ -0,0 +1,55 @@
+From 121ec00c8ea0f2e8b0c6336bd78fcb58b0bd490c Mon Sep 17 00:00:00 2001
+From: Andrew Beekhof <andrew@beekhof.net>
+Date: Mon, 27 Nov 2017 13:35:18 +1100
+Subject: [PATCH] NovaEvacuate: Additional parameters for v3 keywstone
+ authentication
+
+Change-Id: I22d2733b17e5a6098b66c4644879b2e1255dbff5
+---
+ heartbeat/NovaEvacuate | 26 +++++++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
+index b6dadce..ba19ca4 100644
+--- a/heartbeat/NovaEvacuate
++++ b/heartbeat/NovaEvacuate
+@@ -65,7 +65,23 @@ Password for connecting to keystone in admin context
+ Tenant name for connecting to keystone in admin context.
+ Note that with Keystone V3 tenant names are only unique within a domain.
+ </longdesc>
+-<shortdesc lang="en">Tenant name</shortdesc>
++<shortdesc lang="en">Keystone v2 Tenant or v3 Project Name</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="user_domain" unique="0" required="1">
++<longdesc lang="en">
++User's domain name. Used when authenticating to Keystone.
++</longdesc>
++<shortdesc lang="en">Keystone v3 User Domain</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="project_domain" unique="0" required="1">
++<longdesc lang="en">
++Domain name containing project. Used when authenticating to Keystone.
++</longdesc>
++<shortdesc lang="en">Keystone v3 Project Domain</shortdesc>
+ <content type="string" default="" />
+ </parameter>
+ 
+@@ -289,6 +305,14 @@ evacuate_validate() {
+ 
+     fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
+ 
++    if [ -n "${OCF_RESKEY_user_domain}" ]; then
++        fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}"
++    fi
++
++    if [ -n "${OCF_RESKEY_project_domain}" ]; then
++        fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}"
++    fi
++
+     if [ -n "${OCF_RESKEY_region_name}" ]; then
+         fence_options="${fence_options} \
+             --region-name ${OCF_RESKEY_region_name}"
diff --git a/SOURCES/bz1535394-NovaEvacuate-add-support-for-keystone-v3-authentication.patch b/SOURCES/bz1535394-NovaEvacuate-add-support-for-keystone-v3-authentication.patch
deleted file mode 100644
index 43ad9fe..0000000
--- a/SOURCES/bz1535394-NovaEvacuate-add-support-for-keystone-v3-authentication.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From 121ec00c8ea0f2e8b0c6336bd78fcb58b0bd490c Mon Sep 17 00:00:00 2001
-From: Andrew Beekhof <andrew@beekhof.net>
-Date: Mon, 27 Nov 2017 13:35:18 +1100
-Subject: [PATCH] NovaEvacuate: Additional parameters for v3 keywstone
- authentication
-
-Change-Id: I22d2733b17e5a6098b66c4644879b2e1255dbff5
----
- heartbeat/NovaEvacuate | 26 +++++++++++++++++++++++++-
- 1 file changed, 25 insertions(+), 1 deletion(-)
-
-diff --git a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
-index b6dadce..ba19ca4 100644
---- a/heartbeat/NovaEvacuate
-+++ b/heartbeat/NovaEvacuate
-@@ -65,7 +65,23 @@ Password for connecting to keystone in admin context
- Tenant name for connecting to keystone in admin context.
- Note that with Keystone V3 tenant names are only unique within a domain.
- </longdesc>
--<shortdesc lang="en">Tenant name</shortdesc>
-+<shortdesc lang="en">Keystone v2 Tenant or v3 Project Name</shortdesc>
-+<content type="string" default="" />
-+</parameter>
-+
-+<parameter name="user_domain" unique="0" required="1">
-+<longdesc lang="en">
-+User's domain name. Used when authenticating to Keystone.
-+</longdesc>
-+<shortdesc lang="en">Keystone v3 User Domain</shortdesc>
-+<content type="string" default="" />
-+</parameter>
-+
-+<parameter name="project_domain" unique="0" required="1">
-+<longdesc lang="en">
-+Domain name containing project. Used when authenticating to Keystone.
-+</longdesc>
-+<shortdesc lang="en">Keystone v3 Project Domain</shortdesc>
- <content type="string" default="" />
- </parameter>
- 
-@@ -289,6 +305,14 @@ evacuate_validate() {
- 
-     fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
- 
-+    if [ -n "${OCF_RESKEY_user_domain}" ]; then
-+        fence_options="${fence_options} -u ${OCF_RESKEY_user_domain}"
-+    fi
-+
-+    if [ -n "${OCF_RESKEY_project_domain}" ]; then
-+        fence_options="${fence_options} -P ${OCF_RESKEY_project_domain}"
-+    fi
-+
-     if [ -n "${OCF_RESKEY_region_name}" ]; then
-         fence_options="${fence_options} \
-             --region-name ${OCF_RESKEY_region_name}"
diff --git a/SOURCES/bz1536548-sap_redhat_cluster_connector-fix-unknown-gvi-function.patch b/SOURCES/bz1536548-sap_redhat_cluster_connector-fix-unknown-gvi-function.patch
new file mode 100644
index 0000000..b661edc
--- /dev/null
+++ b/SOURCES/bz1536548-sap_redhat_cluster_connector-fix-unknown-gvi-function.patch
@@ -0,0 +1,106 @@
+diff -uNr a/sap_redhat_cluster_connector-6353d27/sap_redhat_cluster_connector b/sap_redhat_cluster_connector-6353d27/sap_redhat_cluster_connector
+--- a/sap_redhat_cluster_connector-6353d27/sap_redhat_cluster_connector	2018-01-23 10:15:48.167424070 +0100
++++ b/sap_redhat_cluster_connector-6353d27/sap_redhat_cluster_connector	2018-01-22 10:19:06.001422855 +0100
+@@ -41,6 +41,11 @@
+ my $logident = "sap_redhat_cluster_connector";
+ my $logoptions = "pid";
+ my $logfacility = "LOG_USER";
++my $protocolVersion=1;
++my $haProd="RHEL HA add-on";
++my $haProdSAP="sap_redhat_cluster_connector";
++my $haProdDoc="https://access.redhat.com/solutions/963123";
++
+ #
+ # open syslog
+ #
+@@ -54,9 +59,10 @@
+       where cmd could be:  
+       help 
+       init
+-      cpa --res RES --act ACT 
+-      lsr --out FILE --sid SID --ino INO | --dbhost HOST --dbtype TYPE
++      gvi --out FILE
++      cpa --res RES --act ACT
+       fra --res RES --act ACT  [ --nod NODE ]
++      lsr --out FILE --sid SID --ino INO | --dbhost HOST --dbtype TYPE
+       lsn --out FILE --res RES
+ ";
+ }
+@@ -110,7 +116,7 @@
+ 
+ sub fire_resource_action {
+ 	my ($rsc, $act, $nod) = ("", "", "");
+-	($rsc, $act, $nod) = @_;
++	my ($rsc, $act, $nod) = @_;
+ 	my $rc=0;
+         my $sysconfig = "/etc/sysconfig/sap_redhat_cluster_connector";
+ 	$nowstring = localtime;
+@@ -349,10 +355,6 @@
+ 	printf "%s : lsn()\n", $nowstring;
+ 	# TODO: check implemented action
+ 	###############################################################################################
+-	###############################################################################################
+-	###############################################################################################
+-	###############################################################################################
+-	###############################################################################################
+ 	#
+ 	# 1. GET HOSTNAME WHERE FUNCTION WAS CALLED
+ 	#
+@@ -452,6 +454,26 @@
+ 	return $rc;
+ }
+ 
++sub get_version_info($)
++{
++	my ($outfile, $resource) = @_;
++	my $rc=0;
++	$nowstring = localtime;
++	printf "%s : gvi()\n", $nowstring;
++	if ( $outfile ne "" ) {
++		#HASCRIPTCO-VERS
++		##HAPROD
++		##HAPROD-SAP
++		##HAPROD-DOC
++		open OUTFILE, ">$outfile";
++		syslog("LOG_INFO", "gvi result: %s\n%s\n%s\n%s\n", $protocolVersion, $haProd, $haProdSAP, $haProdDoc);
++		printf OUTFILE "%s\n%s\n%s\n%s\n", $protocolVersion, $haProd, $haProdSAP, $haProdDoc;
++		close OUTFILE;
++	} else {
++		printf "%s\n%s\n%s\n%s\n", $protocolVersion, $haProd, $haProdSAP, $haProdDoc;
++	}
++	return $rc;
++}
+ 
+ #
+ # "main"
+@@ -492,7 +514,6 @@
+ 	syslog("LOG_INFO", "lsr call (out=%s,sid=%s,ino=%s)", $out, $sid, $ino);
+ 	$return_code=list_sap_resources($out, $sid, $ino);
+ 	
+-
+  } elsif ( $cmd eq "fra" ) {
+ 	open($DEBUG, ">>$logident" . ".log");
+ 	*STDOUT=*$DEBUG;
+@@ -518,6 +539,14 @@
+ 	syslog("LOG_INFO", "lsn call (out=%s,res=%s)", $out, $res);
+ 	$return_code=list_sap_nodes($out, $res);
+ 
++ } elsif ( $cmd eq "gvi" ) {
++	open($DEBUG, ">>$logident" . ".log");
++	*STDOUT=*$DEBUG;
++	$result = GetOptions ("out=s" => \$out,
++		) &&
++	checkavail(($out)) || paramproblem();
++	syslog("LOG_INFO", "gvi call (out=%s)", $out);
++	$return_code=get_version_info($out);
+ 
+  } else  {
+ 	open($DEBUG, ">>$logident" . ".log");
+@@ -530,7 +559,6 @@
+  	paramproblem()
+  }
+ 	
+- syslog("LOG_INFO", "TEST END");
+ closelog();
+ exit $return_code;
+ #
diff --git a/SOURCES/bz1537444-sap_redhat_cluster_connector-fix-unknown-gvi-function.patch b/SOURCES/bz1537444-sap_redhat_cluster_connector-fix-unknown-gvi-function.patch
deleted file mode 100644
index b661edc..0000000
--- a/SOURCES/bz1537444-sap_redhat_cluster_connector-fix-unknown-gvi-function.patch
+++ /dev/null
@@ -1,106 +0,0 @@
-diff -uNr a/sap_redhat_cluster_connector-6353d27/sap_redhat_cluster_connector b/sap_redhat_cluster_connector-6353d27/sap_redhat_cluster_connector
---- a/sap_redhat_cluster_connector-6353d27/sap_redhat_cluster_connector	2018-01-23 10:15:48.167424070 +0100
-+++ b/sap_redhat_cluster_connector-6353d27/sap_redhat_cluster_connector	2018-01-22 10:19:06.001422855 +0100
-@@ -41,6 +41,11 @@
- my $logident = "sap_redhat_cluster_connector";
- my $logoptions = "pid";
- my $logfacility = "LOG_USER";
-+my $protocolVersion=1;
-+my $haProd="RHEL HA add-on";
-+my $haProdSAP="sap_redhat_cluster_connector";
-+my $haProdDoc="https://access.redhat.com/solutions/963123";
-+
- #
- # open syslog
- #
-@@ -54,9 +59,10 @@
-       where cmd could be:  
-       help 
-       init
--      cpa --res RES --act ACT 
--      lsr --out FILE --sid SID --ino INO | --dbhost HOST --dbtype TYPE
-+      gvi --out FILE
-+      cpa --res RES --act ACT
-       fra --res RES --act ACT  [ --nod NODE ]
-+      lsr --out FILE --sid SID --ino INO | --dbhost HOST --dbtype TYPE
-       lsn --out FILE --res RES
- ";
- }
-@@ -110,7 +116,7 @@
- 
- sub fire_resource_action {
- 	my ($rsc, $act, $nod) = ("", "", "");
--	($rsc, $act, $nod) = @_;
-+	my ($rsc, $act, $nod) = @_;
- 	my $rc=0;
-         my $sysconfig = "/etc/sysconfig/sap_redhat_cluster_connector";
- 	$nowstring = localtime;
-@@ -349,10 +355,6 @@
- 	printf "%s : lsn()\n", $nowstring;
- 	# TODO: check implemented action
- 	###############################################################################################
--	###############################################################################################
--	###############################################################################################
--	###############################################################################################
--	###############################################################################################
- 	#
- 	# 1. GET HOSTNAME WHERE FUNCTION WAS CALLED
- 	#
-@@ -452,6 +454,26 @@
- 	return $rc;
- }
- 
-+sub get_version_info($)
-+{
-+	my ($outfile, $resource) = @_;
-+	my $rc=0;
-+	$nowstring = localtime;
-+	printf "%s : gvi()\n", $nowstring;
-+	if ( $outfile ne "" ) {
-+		#HASCRIPTCO-VERS
-+		##HAPROD
-+		##HAPROD-SAP
-+		##HAPROD-DOC
-+		open OUTFILE, ">$outfile";
-+		syslog("LOG_INFO", "gvi result: %s\n%s\n%s\n%s\n", $protocolVersion, $haProd, $haProdSAP, $haProdDoc);
-+		printf OUTFILE "%s\n%s\n%s\n%s\n", $protocolVersion, $haProd, $haProdSAP, $haProdDoc;
-+		close OUTFILE;
-+	} else {
-+		printf "%s\n%s\n%s\n%s\n", $protocolVersion, $haProd, $haProdSAP, $haProdDoc;
-+	}
-+	return $rc;
-+}
- 
- #
- # "main"
-@@ -492,7 +514,6 @@
- 	syslog("LOG_INFO", "lsr call (out=%s,sid=%s,ino=%s)", $out, $sid, $ino);
- 	$return_code=list_sap_resources($out, $sid, $ino);
- 	
--
-  } elsif ( $cmd eq "fra" ) {
- 	open($DEBUG, ">>$logident" . ".log");
- 	*STDOUT=*$DEBUG;
-@@ -518,6 +539,14 @@
- 	syslog("LOG_INFO", "lsn call (out=%s,res=%s)", $out, $res);
- 	$return_code=list_sap_nodes($out, $res);
- 
-+ } elsif ( $cmd eq "gvi" ) {
-+	open($DEBUG, ">>$logident" . ".log");
-+	*STDOUT=*$DEBUG;
-+	$result = GetOptions ("out=s" => \$out,
-+		) &&
-+	checkavail(($out)) || paramproblem();
-+	syslog("LOG_INFO", "gvi call (out=%s)", $out);
-+	$return_code=get_version_info($out);
- 
-  } else  {
- 	open($DEBUG, ">>$logident" . ".log");
-@@ -530,7 +559,6 @@
-  	paramproblem()
-  }
- 	
-- syslog("LOG_INFO", "TEST END");
- closelog();
- exit $return_code;
- #
diff --git a/SOURCES/bz1543366-redis-add-support-for-tunneling-replication-traffic.patch b/SOURCES/bz1543366-redis-add-support-for-tunneling-replication-traffic.patch
new file mode 100644
index 0000000..fa11895
--- /dev/null
+++ b/SOURCES/bz1543366-redis-add-support-for-tunneling-replication-traffic.patch
@@ -0,0 +1,160 @@
+From 273963331bd303f595e820ca6da17cd63f5514db Mon Sep 17 00:00:00 2001
+From: Damien Ciabrini <dciabrin@redhat.com>
+Date: Sat, 2 Dec 2017 11:53:56 +0100
+Subject: [PATCH] redis: add support for tunneling replication traffic
+
+Add parameters in the resource agent to assign specific redis port to
+each pacemaker node. When redis slave wants to connect to a redis
+master, it will instead connect to a tunnel host, on the port assigned
+to the targeted redis master.
+
+This makes it possible for redis replication traffic to go through
+pre-existing tunnels. This can be used to encrypt such traffic.
+---
+ heartbeat/redis | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 86 insertions(+), 3 deletions(-)
+
+diff --git a/heartbeat/redis b/heartbeat/redis
+index fcd8c234..d9e29e2c 100755
+--- a/heartbeat/redis
++++ b/heartbeat/redis
+@@ -38,6 +38,7 @@
+ : ${OCF_RESKEY_pidfile_name:=redis-server.pid}
+ : ${OCF_RESKEY_socket_name:=redis.sock}
+ : ${OCF_RESKEY_port:=6379}
++: ${OCF_RESKEY_tunnel_host:=127.0.0.1}
+ 
+ if [ -z "$OCF_RESKEY_config" ]; then
+ 	if [ -f "/etc/redis.conf" ]; then
+@@ -156,6 +157,39 @@ Port for replication client to connect to on remote server
+ <content type="string" default="${OCF_RESKEY_port}"/>
+ </parameter>
+ 
++<parameter name="tunnel_host" unique="0" required="0">
++<longdesc lang="en">
++When replication traffic is tunnelled, this is the host to target
++to forward outgoing traffic to the redis master. The resource
++agent configures the redis slave to target the master via
++tunnel_host:tunnel_port.
++
++Note that in order to enable replication traffic tunneling,
++parameter {tunnel_port_map} must be populated.
++</longdesc>
++<shortdesc lang="en">Tunnel host for replication traffic</shortdesc>
++<content type="string" default="${OCF_RESKEY_tunnel_host}"/>
++</parameter>
++
++<parameter name="tunnel_port_map" unique="0" required="0">
++<longdesc lang="en">
++A mapping of pacemaker node names to redis port number.
++
++To be used when redis servers need to tunnel replication traffic.
++On every node where the redis resource is running, the redis server
++listens to a different port. Each redis server can access its peers
++for replication traffic via a tunnel accessible at {tunnel_host}:port.
++
++The mapping the form of:
++pcmk1-name:port-for-redis1;pcmk2-name:port-for-redis2;pcmk3-name:port-for-redis3
++
++where the redis resource started on node pcmk1-name would listen on
++port port-for-redis1
++</longdesc>
++<shortdesc lang="en">Mapping of Redis server name to redis port</shortdesc>
++<content type="string" default=""/>
++</parameter>
++
+ <parameter name="wait_last_known_master" unique="0" required="0">
+ <longdesc lang="en">
+ During redis cluster bootstrap, wait for the last known master to be
+@@ -291,6 +325,8 @@ simple_status() {
+ 
+ function monitor() {
+ 	local res
++	local master_name
++	local last_known_master_port
+ 
+ 	simple_status
+ 	res=$?
+@@ -334,14 +370,48 @@ redis_monitor() {
+ 				return $OCF_ERR_GENERIC
+ 			fi
+ 			if [[ "${info[master_host]}" != "$(last_known_master)" ]]; then
+-				ocf_log err "monitor: Slave mode current master does not match running master. current=${info[master_host]}, running=$(last_known_master)"
+-				return $OCF_ERR_GENERIC
++				if [ -n "${OCF_RESKEY_tunnel_port_map}" ]; then
++					master_name=$(port_to_redis_node ${info[master_port]})
++					last_known_master_port=$(redis_node_to_port $(last_known_master))
++					if [[ "${info[master_host]}" != "${OCF_RESKEY_tunnel_host}" ]] ||
++					   [[  "${info[master_port]}" != "${last_known_master_port}" ]]; then
++						ocf_log err "monitor: Slave mode current tunnelled connection to redis server does not match running master. tunnelled='${info[master_host]}:${info[master_port]} (${master_name})', running='$(last_known_master)'"
++						return $OCF_ERR_GENERIC
++					fi
++				else
++					ocf_log err "monitor: Slave mode current master does not match running master. current=${info[master_host]}, running=$(last_known_master)"
++					return $OCF_ERR_GENERIC
++				fi
+ 			fi
+ 		fi
+ 	fi
+ 	return $OCF_SUCCESS
+ }
+ 
++redis_node_to_port()
++{
++	local node=$1
++	echo "$OCF_RESKEY_tunnel_port_map" | tr ';' '\n' | tr -d ' ' | sed 's/:/ /' | awk -F' ' '$1=="'"$node"'" {print $2;exit}'
++}
++
++port_to_redis_node()
++{
++	local port=$1
++	echo "$OCF_RESKEY_tunnel_port_map" | tr ';' '\n' | tr -d ' ' | sed 's/:/ /' | awk -F' ' '$2=="'"$port"'" {print $1;exit}'
++}
++
++get_tunnel_port_from_master()
++{
++	local master_name=$1
++	crm_attribute --node "$master_name" -l forever --name ${INSTANCE_ATTR_NAME}-tunnel-port --query -q 2>/dev/null
++}
++
++get_master_from_tunnel_port()
++{
++	local master_name=$1
++	crm_attribute --node "$master_name" -l forever --name ${INSTANCE_ATTR_NAME}-tunnel-port --query -q 2>/dev/null
++}
++
+ function check_dump_file()
+ {
+ 	if ! have_binary "$REDIS_CHECK_DUMP"; then
+@@ -479,6 +549,7 @@ redis_promote() {
+ function demote() {
+ 	local master_host
+ 	local master_port
++	local tunnel_port
+ 
+ 	# client kill is only supported in Redis 2.8.12 or greater
+ 	version=$(redis_client -v | awk '{print $NF}')
+@@ -512,7 +583,19 @@ redis_demote() {
+ 		master_host="no-such-master"
+ 	fi
+ 
+-	ocf_log info "demote: Setting master to '$master_host'"
++	if [ -n "${OCF_RESKEY_tunnel_port_map}" ]; then
++		# master_host can be the special marker "no-such-master"
++		# while a master is being selected. In this case, no
++		# tunnel port is returned, but this is not fatal.
++		tunnel_port=$(redis_node_to_port "$master_host")
++		if [ -n "$tunnel_port" ]; then
++			ocf_log info "demote: Setting master to '$master_host' via local tunnel '${OCF_RESKEY_tunnel_host}' on port '$tunnel_port'"
++			master_host="${OCF_RESKEY_tunnel_host}"
++			master_port="$tunnel_port"
++		fi
++	else
++		ocf_log info "demote: Setting master to '$master_host'"
++	fi
+ 
+ 	redis_client slaveof "$master_host" "$master_port"
+ 
+-- 
+2.14.3
+
diff --git a/SOURCES/bz1544483-redis-add-support-for-tunneling-replication-traffic.patch b/SOURCES/bz1544483-redis-add-support-for-tunneling-replication-traffic.patch
deleted file mode 100644
index fa11895..0000000
--- a/SOURCES/bz1544483-redis-add-support-for-tunneling-replication-traffic.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-From 273963331bd303f595e820ca6da17cd63f5514db Mon Sep 17 00:00:00 2001
-From: Damien Ciabrini <dciabrin@redhat.com>
-Date: Sat, 2 Dec 2017 11:53:56 +0100
-Subject: [PATCH] redis: add support for tunneling replication traffic
-
-Add parameters in the resource agent to assign specific redis port to
-each pacemaker node. When redis slave wants to connect to a redis
-master, it will instead connect to a tunnel host, on the port assigned
-to the targeted redis master.
-
-This makes it possible for redis replication traffic to go through
-pre-existing tunnels. This can be used to encrypt such traffic.
----
- heartbeat/redis | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 86 insertions(+), 3 deletions(-)
-
-diff --git a/heartbeat/redis b/heartbeat/redis
-index fcd8c234..d9e29e2c 100755
---- a/heartbeat/redis
-+++ b/heartbeat/redis
-@@ -38,6 +38,7 @@
- : ${OCF_RESKEY_pidfile_name:=redis-server.pid}
- : ${OCF_RESKEY_socket_name:=redis.sock}
- : ${OCF_RESKEY_port:=6379}
-+: ${OCF_RESKEY_tunnel_host:=127.0.0.1}
- 
- if [ -z "$OCF_RESKEY_config" ]; then
- 	if [ -f "/etc/redis.conf" ]; then
-@@ -156,6 +157,39 @@ Port for replication client to connect to on remote server
- <content type="string" default="${OCF_RESKEY_port}"/>
- </parameter>
- 
-+<parameter name="tunnel_host" unique="0" required="0">
-+<longdesc lang="en">
-+When replication traffic is tunnelled, this is the host to target
-+to forward outgoing traffic to the redis master. The resource
-+agent configures the redis slave to target the master via
-+tunnel_host:tunnel_port.
-+
-+Note that in order to enable replication traffic tunneling,
-+parameter {tunnel_port_map} must be populated.
-+</longdesc>
-+<shortdesc lang="en">Tunnel host for replication traffic</shortdesc>
-+<content type="string" default="${OCF_RESKEY_tunnel_host}"/>
-+</parameter>
-+
-+<parameter name="tunnel_port_map" unique="0" required="0">
-+<longdesc lang="en">
-+A mapping of pacemaker node names to redis port number.
-+
-+To be used when redis servers need to tunnel replication traffic.
-+On every node where the redis resource is running, the redis server
-+listens to a different port. Each redis server can access its peers
-+for replication traffic via a tunnel accessible at {tunnel_host}:port.
-+
-+The mapping the form of:
-+pcmk1-name:port-for-redis1;pcmk2-name:port-for-redis2;pcmk3-name:port-for-redis3
-+
-+where the redis resource started on node pcmk1-name would listen on
-+port port-for-redis1
-+</longdesc>
-+<shortdesc lang="en">Mapping of Redis server name to redis port</shortdesc>
-+<content type="string" default=""/>
-+</parameter>
-+
- <parameter name="wait_last_known_master" unique="0" required="0">
- <longdesc lang="en">
- During redis cluster bootstrap, wait for the last known master to be
-@@ -291,6 +325,8 @@ simple_status() {
- 
- function monitor() {
- 	local res
-+	local master_name
-+	local last_known_master_port
- 
- 	simple_status
- 	res=$?
-@@ -334,14 +370,48 @@ redis_monitor() {
- 				return $OCF_ERR_GENERIC
- 			fi
- 			if [[ "${info[master_host]}" != "$(last_known_master)" ]]; then
--				ocf_log err "monitor: Slave mode current master does not match running master. current=${info[master_host]}, running=$(last_known_master)"
--				return $OCF_ERR_GENERIC
-+				if [ -n "${OCF_RESKEY_tunnel_port_map}" ]; then
-+					master_name=$(port_to_redis_node ${info[master_port]})
-+					last_known_master_port=$(redis_node_to_port $(last_known_master))
-+					if [[ "${info[master_host]}" != "${OCF_RESKEY_tunnel_host}" ]] ||
-+					   [[  "${info[master_port]}" != "${last_known_master_port}" ]]; then
-+						ocf_log err "monitor: Slave mode current tunnelled connection to redis server does not match running master. tunnelled='${info[master_host]}:${info[master_port]} (${master_name})', running='$(last_known_master)'"
-+						return $OCF_ERR_GENERIC
-+					fi
-+				else
-+					ocf_log err "monitor: Slave mode current master does not match running master. current=${info[master_host]}, running=$(last_known_master)"
-+					return $OCF_ERR_GENERIC
-+				fi
- 			fi
- 		fi
- 	fi
- 	return $OCF_SUCCESS
- }
- 
-+redis_node_to_port()
-+{
-+	local node=$1
-+	echo "$OCF_RESKEY_tunnel_port_map" | tr ';' '\n' | tr -d ' ' | sed 's/:/ /' | awk -F' ' '$1=="'"$node"'" {print $2;exit}'
-+}
-+
-+port_to_redis_node()
-+{
-+	local port=$1
-+	echo "$OCF_RESKEY_tunnel_port_map" | tr ';' '\n' | tr -d ' ' | sed 's/:/ /' | awk -F' ' '$2=="'"$port"'" {print $1;exit}'
-+}
-+
-+get_tunnel_port_from_master()
-+{
-+	local master_name=$1
-+	crm_attribute --node "$master_name" -l forever --name ${INSTANCE_ATTR_NAME}-tunnel-port --query -q 2>/dev/null
-+}
-+
-+get_master_from_tunnel_port()
-+{
-+	local master_name=$1
-+	crm_attribute --node "$master_name" -l forever --name ${INSTANCE_ATTR_NAME}-tunnel-port --query -q 2>/dev/null
-+}
-+
- function check_dump_file()
- {
- 	if ! have_binary "$REDIS_CHECK_DUMP"; then
-@@ -479,6 +549,7 @@ redis_promote() {
- function demote() {
- 	local master_host
- 	local master_port
-+	local tunnel_port
- 
- 	# client kill is only supported in Redis 2.8.12 or greater
- 	version=$(redis_client -v | awk '{print $NF}')
-@@ -512,7 +583,19 @@ redis_demote() {
- 		master_host="no-such-master"
- 	fi
- 
--	ocf_log info "demote: Setting master to '$master_host'"
-+	if [ -n "${OCF_RESKEY_tunnel_port_map}" ]; then
-+		# master_host can be the special marker "no-such-master"
-+		# while a master is being selected. In this case, no
-+		# tunnel port is returned, but this is not fatal.
-+		tunnel_port=$(redis_node_to_port "$master_host")
-+		if [ -n "$tunnel_port" ]; then
-+			ocf_log info "demote: Setting master to '$master_host' via local tunnel '${OCF_RESKEY_tunnel_host}' on port '$tunnel_port'"
-+			master_host="${OCF_RESKEY_tunnel_host}"
-+			master_port="$tunnel_port"
-+		fi
-+	else
-+		ocf_log info "demote: Setting master to '$master_host'"
-+	fi
- 
- 	redis_client slaveof "$master_host" "$master_port"
- 
--- 
-2.14.3
-
diff --git a/SOURCES/bz1546083-galera-fix-temp-logfile-rights.patch b/SOURCES/bz1546083-galera-fix-temp-logfile-rights.patch
new file mode 100644
index 0000000..dd5090e
--- /dev/null
+++ b/SOURCES/bz1546083-galera-fix-temp-logfile-rights.patch
@@ -0,0 +1,28 @@
+From 2754db9d03995e944a53e364f304bc7b0b24d75d Mon Sep 17 00:00:00 2001
+From: Damien Ciabrini <dciabrin@redhat.com>
+Date: Thu, 2 Mar 2017 18:41:50 +0100
+Subject: [PATCH] galera: fix permission of temporary log file for mariadb
+ 10.1.21+
+
+Since MariaDB/server@8fcdd6b0ecbb966f4479856efe93a963a7a422f7,
+mysqld_safe relies on a helper subprocess to write into log files.
+This new logging mechanism expects log file to be writable by the
+user configured to run mysqld.
+
+Fix the generation of temporary log file accordingly.
+---
+ heartbeat/galera | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/heartbeat/galera b/heartbeat/galera
+index 0cab9a464..decbaa257 100755
+--- a/heartbeat/galera
++++ b/heartbeat/galera
+@@ -520,6 +520,7 @@ detect_last_commit()
+     last_commit="$(cat ${OCF_RESKEY_datadir}/grastate.dat | sed -n 's/^seqno.\s*\(.*\)\s*$/\1/p')"
+     if [ -z "$last_commit" ] || [ "$last_commit" = "-1" ]; then
+         local tmp=$(mktemp)
++        chown $OCF_RESKEY_user:$OCF_RESKEY_group $tmp
+ 
+         # if we pass here because grastate.dat doesn't exist,
+         # try not to bootstrap from this node if possible
diff --git a/SOURCES/bz1547142-galera-fix-temp-logfile-rights.patch b/SOURCES/bz1547142-galera-fix-temp-logfile-rights.patch
deleted file mode 100644
index dd5090e..0000000
--- a/SOURCES/bz1547142-galera-fix-temp-logfile-rights.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 2754db9d03995e944a53e364f304bc7b0b24d75d Mon Sep 17 00:00:00 2001
-From: Damien Ciabrini <dciabrin@redhat.com>
-Date: Thu, 2 Mar 2017 18:41:50 +0100
-Subject: [PATCH] galera: fix permission of temporary log file for mariadb
- 10.1.21+
-
-Since MariaDB/server@8fcdd6b0ecbb966f4479856efe93a963a7a422f7,
-mysqld_safe relies on a helper subprocess to write into log files.
-This new logging mechanism expects log file to be writable by the
-user configured to run mysqld.
-
-Fix the generation of temporary log file accordingly.
----
- heartbeat/galera | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/heartbeat/galera b/heartbeat/galera
-index 0cab9a464..decbaa257 100755
---- a/heartbeat/galera
-+++ b/heartbeat/galera
-@@ -520,6 +520,7 @@ detect_last_commit()
-     last_commit="$(cat ${OCF_RESKEY_datadir}/grastate.dat | sed -n 's/^seqno.\s*\(.*\)\s*$/\1/p')"
-     if [ -z "$last_commit" ] || [ "$last_commit" = "-1" ]; then
-         local tmp=$(mktemp)
-+        chown $OCF_RESKEY_user:$OCF_RESKEY_group $tmp
- 
-         # if we pass here because grastate.dat doesn't exist,
-         # try not to bootstrap from this node if possible
diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec
index 871b89d..a16660a 100644
--- a/SPECS/resource-agents.spec
+++ b/SPECS/resource-agents.spec
@@ -48,7 +48,7 @@
 Name:		resource-agents
 Summary:	Open Source HA Reusable Cluster Resource Scripts
 Version:	3.9.5
-Release:	105%{?dist}.11
+Release:	124%{?dist}
 License:	GPLv2+, LGPLv2+ and ASL 2.0
 URL:		https://github.com/ClusterLabs/resource-agents
 %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
@@ -229,22 +229,39 @@ Patch168:	bz1451097-1-galera-fix-bootstrap-when-cluster-has-no-data.patch
 Patch169:	bz1451097-2-galera-fix-bootstrap-when-cluster-has-no-data.patch
 Patch170:	bz1451097-3-galera-fix-bootstrap-when-cluster-has-no-data.patch
 Patch171:	bz1452049-docker-create-directories.patch
-Patch172:	bz1454699-LVM-status-check-for-missing-VG.patch
-Patch173:	bz1451933-LVM-update-metadata-on-start-relocate.patch
+#Patch172:	bz1454699-LVM-status-check-for-missing-VG.patch
+#Patch173:	bz1451933-LVM-update-metadata-on-start-relocate.patch
 Patch174:	bz1451933-LVM-warn-when-cache-mode-not-writethrough.patch
 Patch175:	bz1449681-2-saphana-saphanatopology-update-0.152.21.patch
 Patch176:	bz1342376-2-rabbitmq-cluster-backup-and-restore-users-policies.patch
 Patch177:	bz1342376-3-rabbitmq-cluster-backup-and-restore-users-policies.patch
-Patch178:	bz1493915-1-support-per-host-per-bundle-attribs.patch
-Patch179:	bz1493915-2-support-per-host-per-bundle-attribs.patch
-Patch180:	bz1497076-NovaEvacuate-Instance-HA-OSP12.patch
-Patch181:	bz1512586-galera-recover-from-empty-gvwstate.dat.patch
-Patch182:	bz1521019-db2-fix-hadr-promote-when-master-failed.patch
-Patch183:	bz1524454-ocf_attribute_target-fallback-fix.patch
-Patch184:	bz1535394-NovaEvacuate-add-support-for-keystone-v3-authentication.patch
-Patch185:	bz1537444-sap_redhat_cluster_connector-fix-unknown-gvi-function.patch
-Patch186:	bz1544483-redis-add-support-for-tunneling-replication-traffic.patch
-Patch187:	bz1547142-galera-fix-temp-logfile-rights.patch
+Patch178:	bz1436189-sybase.patch
+Patch179:	bz1465822-OCF-improve-locking.patch
+Patch180:	bz1466187-SAPInstance-IS_ERS-parameter-for-ASCS-ERS-Netweaver.patch
+Patch181:	bz1455305-VirtualDomain-fix-sed-migrate_options.patch
+Patch182:	bz1462802-systemd-tmpfiles.patch
+Patch183:	bz1445628-findif-improve-IPv6-NIC-detection.patch
+Patch184:	bz1489734-1-support-per-host-per-bundle-attribs.patch
+Patch185:	bz1489734-2-support-per-host-per-bundle-attribs.patch
+Patch186:	bz1496393-NovaEvacuate-Instance-HA-OSP12.patch
+Patch187:	bz1500352-amazon-aws-agents.patch
+Patch188:	bz1465827-mysql-fix-master-score-maintenance.patch
+Patch189:	bz1508366-docker-dont-ignore-stopped-containers.patch
+Patch190:	bz1508362-docker-improve-exit-reasons.patch
+Patch191:	bz1484473-ethmonitor-vlan-fix.patch
+Patch192:	bz1504112-nfsserver-allow-stop-to-timeout.patch
+Patch193:	bz1457382-portblock-suppress-dd-output.patch
+Patch194:	bz1364242-ethmonitor-add-intel-omnipath-support.patch
+Patch195:	bz1499677-galera-recover-from-empty-gvwstate.dat.patch
+Patch196:	bz1516180-db2-fix-hadr-promote-when-master-failed.patch
+Patch197:	bz1516435-azure-lb.patch
+Patch198:	bz1512580-CTDB-fix-probe.patch
+Patch199:	bz1520574-ocf_attribute_target-fallback-fix.patch
+Patch200:	bz1523953-CTDB-detect-new-config-path.patch
+Patch201:	bz1533168-NovaEvacuate-add-support-for-keystone-v3-authentication.patch
+Patch202:	bz1536548-sap_redhat_cluster_connector-fix-unknown-gvi-function.patch
+Patch203:	bz1543366-redis-add-support-for-tunneling-replication-traffic.patch
+Patch204:	bz1546083-galera-fix-temp-logfile-rights.patch
 
 Obsoletes:	heartbeat-resources <= %{version}
 Provides:	heartbeat-resources = %{version}
@@ -531,28 +548,49 @@ exit 1
 %patch169 -p1
 %patch170 -p1
 %patch171 -p1
-%patch172 -p1
-%patch173 -p1
+#%patch172 -p1
+#%patch173 -p1
 %patch174 -p1
 %patch175 -p1
 %patch176 -p1
 %patch177 -p1
 %patch178 -p1
-%patch179 -p1 -F2
+%patch179 -p1
 %patch180 -p1
 %patch181 -p1
 %patch182 -p1
 %patch183 -p1
 %patch184 -p1
-%patch185 -p1
-#%patch186 -p1
+%patch185 -p1 -F2
+%patch186 -p1
 %patch187 -p1
+%patch188 -p1
+%patch189 -p1
+%patch190 -p1
+%patch191 -p1
+%patch192 -p1
+%patch193 -p1
+%patch194 -p1
+%patch195 -p1
+%patch196 -p1
+%patch197 -p1
+%patch198 -p1 -F2
+%patch199 -p1
+%patch200 -p1 -F2
+%patch201 -p1
+%patch202 -p1
+%patch203 -p1
+%patch204 -p1
 
 %build
 if [ ! -f configure ]; then
 	./autogen.sh
 fi
 
+chmod 755 heartbeat/awseip
+chmod 755 heartbeat/awsvip
+chmod 755 heartbeat/aws-vpc-move-ip
+chmod 755 heartbeat/azure-lb
 chmod 755 heartbeat/galera
 chmod 755 heartbeat/garbd
 chmod 755 heartbeat/mysql-common.sh
@@ -568,6 +606,7 @@ chmod 755 heartbeat/NovaEvacuate
 chmod 755 heartbeat/NodeUtilization
 chmod 755 heartbeat/SAPHana
 chmod 755 heartbeat/SAPHanaTopology
+chmod 755 heartbeat/sybaseASE
 
 %if 0%{?fedora} >= 11 || 0%{?centos_version} > 5 || 0%{?rhel} > 5
 CFLAGS="$(echo '%{optflags}')"
@@ -596,6 +635,9 @@ chmod 755 heartbeat/clvm
 %if %{defined _unitdir}
     --with-systemdsystemunitdir=%{_unitdir} \
 %endif
+%if %{defined _tmpfilesdir}
+    --with-systemdtmpfilesdir=%{_tmpfilesdir} \
+%endif
 	--with-pkg-name=%{name} \
 	--with-ras-set=%{rasset} \
 	--with-ocft-cases=fedora
@@ -657,6 +699,9 @@ rm -rf %{buildroot}
 %if %{defined _unitdir}
 %{_unitdir}/resource-agents-deps.target
 %endif
+%if %{defined _tmpfilesdir}
+%{_tmpfilesdir}/%{name}.conf
+%endif
 
 %dir %{_datadir}/%{name}
 %dir %{_datadir}/%{name}/ocft
@@ -810,46 +855,119 @@ ccs_update_schema > /dev/null 2>&1 ||:
 %endif
 
 %changelog
-* Tue Feb 27 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105.11
+* Thu Feb 22 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-124
+- awseip/awsvip: increase default "api_delay" to 3s to avoid failures
+
+  Resolves: rhbz#1500352
+
+* Wed Feb 21 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-123
+- awseip: fix for multi-NICs
+
+  Resolves: rhbz#1547218
+
+* Mon Feb 19 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-122
 - galera: fix temp logfile rights to support MySQL 10.1.21+
 
-  Resolves: rhbz#1547142
+  Resolves: rhbz#1546083
 
-* Tue Jan 23 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105.8
+* Mon Feb 12 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-121
+- redis: support tunneling replication traffic
+
+  Resolves: rhbz#1543366
+
+* Tue Jan 23 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-120
 - sap_redhat_cluster_connector: fix unknown gvi function
 
-  Resolves: rhbz#1537444
+  Resolves: rhbz#1536548
 
-* Wed Jan 17 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105.7
+* Thu Jan 11 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-119
 - NovaEvacuate: add support for keystone v3 authentication
 
-  Resolves: rhbz#1535394
+  Resolves: rhbz#1533168
+
+* Mon Dec 11 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-118
+- CTDB: detect new config path
 
-* Mon Dec 11 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105.6
+  Resolves: rhbz#1523953
+
+* Thu Dec  7 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-117
 - ocf_attribute_target: add fallback for Pacemaker versions without
   bundle support
 
-  Resolves: rhbz#1524454
+  Resolves: rhbz#1520574
+
+* Fri Dec  1 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-116
+- azure-lb: new resource agent
+- CTDB: fix initial probe
+
+  Resolves: rhbz#1516435
+  Resolves: rhbz#1512580
 
-* Wed Dec  6 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105.5
+* Wed Nov 22 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-115
 - db2: fix HADR promote when master failed
 
-  Resolves: rhbz#1521019
+  Resolves: rhbz#1516180
 
-* Mon Nov 13 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105.3
+* Thu Nov  9 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-114
+- ethmonitor: add intel omnipath support
+
+  Resolves: rhbz#1364242
+
+* Thu Nov  9 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-113
 - galera: recover from empty gvwstate.dat
 
-  Resolves: rhbz#1512586
+  Resolves: rhbz#1499677
+
+* Thu Nov  2 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-112
+- ethmonitor: VLAN fix
+- nfsserver: allow stop to timeout
+- portblock: suppress dd output
+- LVM: dont use "vgscan --cache"
+
+  Resolves: rhbz#1484473
+  Resolves: rhbz#1504112
+  Resolves: rhbz#1457382
+  Resolves: rhbz#1486888
+
+* Wed Nov  1 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-111
+- docker: dont ignore stopped containers
+- docker: improve exit reasons
+
+  Resolves: rhbz#bz1508366
+  Resolves: rhbz#bz1508362
 
-* Fri Sep 29 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105.2
+* Thu Oct 26 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-110
+- mysql: fix master score after maintenance mode
+
+  Resolves: rhbz#1465827
+
+* Fri Oct 20 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-109
+- awseip/awsvip/aws-vpc-move-ip: new resource agents for Amazon AWS
+
+  Resolves: rhbz#1500352
+
+* Thu Sep 28 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-107
 - NovaEvacuate: changes to support Instance HA on OSP12
 
-  Resolves: rhbz#1497076
+  Resolves: rhbz#1496393
 
-* Thu Sep 21 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105.1
+* Wed Sep 20 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-106
+- sybaseASE: new resource agent
+- OCF: improve locking
+- SAPInstance: add "IS_ERS" parameter for ASCS ERS Netweaver
+- VirtualDomain: fix "migrate_options" parsing
+- systemd: use tmpfiles.d to create temp directory on boot
+- findif: improve IPv6 NIC detection
 - support per-host and per-bundle attributes
 
-  Resolves: rhbz#1493915
+  Resolves: rhbz#1436189
+  Resolves: rhbz#1465822
+  Resolves: rhbz#1466187
+  Resolves: rhbz#1455305
+  Resolves: rhbz#1462802
+  Resolves: rhbz#1445628
+  Resolves: rhbz#1489734
+
 
 * Fri Jun 23 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.5-105
 - rabbitmq-cluster: fix to keep expiration policy