diff --git a/SOURCES/bz1734062-podman-1-avoid-double-inspect-call.patch b/SOURCES/bz1734062-podman-1-avoid-double-inspect-call.patch
new file mode 100644
index 0000000..5aeada6
--- /dev/null
+++ b/SOURCES/bz1734062-podman-1-avoid-double-inspect-call.patch
@@ -0,0 +1,46 @@
+From d8400a30604229d349f36855c30a6a438204023b Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Wed, 12 Jun 2019 11:29:17 +0200
+Subject: [PATCH] Avoid double call to podman inspect in podman_simple_status()
+
+Right now podman_simple_status() does the following:
+- It calls container_exists() which then calls "podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1"
+- Then it calls "podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null"
+
+This duplication is unnecessary and we can rely on the second podman inspect
+call.  We need to do this because podman inspect calls are very expensive as
+soon as moderate I/O kicks in.
+
+Tested as follows:
+1) Injected the change on an existing bundle-based cluster
+2) Observed that monitoring operations kept working okay
+3) Verified by adding set -x that only a single podman inspect per monitor
+   operation was called (as opposed to two before)
+4) Restarted a bundle with an OCF resource inside correctly
+5) Did a podman stop of a bundle and correctly observed that:
+5.a) It was detected as non running:
+* haproxy-bundle-podman-1_monitor_60000 on controller-0 'not running' (7): call=192, status=complete, exitreason='',
+    last-rc-change='Wed Jun 12 09:22:18 2019', queued=0ms, exec=0ms
+5.b) It was correctly started afterwards
+
+Signed-off-by: Michele Baldessari <michele@acksyn.org>
+---
+ heartbeat/podman | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/heartbeat/podman b/heartbeat/podman
+index 34e11da6b..b2b3081f9 100755
+--- a/heartbeat/podman
++++ b/heartbeat/podman
+@@ -238,11 +238,6 @@ podman_simple_status()
+ {
+ 	local val
+ 
+-	container_exists
+-	if [ $? -ne 0 ]; then
+-		return $OCF_NOT_RUNNING
+-	fi
+-
+ 	# retrieve the 'Running' attribute for the container
+ 	val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null)
+ 	if [ $? -ne 0 ]; then
diff --git a/SOURCES/bz1734062-podman-2-improve-monitor-action.patch b/SOURCES/bz1734062-podman-2-improve-monitor-action.patch
new file mode 100644
index 0000000..1537139
--- /dev/null
+++ b/SOURCES/bz1734062-podman-2-improve-monitor-action.patch
@@ -0,0 +1,63 @@
+From 9685e8e6bf2896377a9cf0e07a85de5dd5fcf2df Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Wed, 12 Jun 2019 12:00:31 +0200
+Subject: [PATCH] Simplify podman_monitor()
+
+Before this change podman_monitor() does two things:
+\-> podman_simple_status()
+    \-> podman inspect {{.State.Running}}
+\-> if podman_simple_status == 0 then monitor_cmd_exec()
+    \-> if [ -z "$OCF_RESKEY_monitor_cmd" ]; then # so if OCF_RESKEY_monitor_cmd is empty we just return SUCCESS
+          return $rc
+        fi
+        # if OCF_RESKEY_monitor_cmd is set to something we execute it
+        podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd
+
+Let's actually only rely on podman exec as invoked inside monitor_cmd_exec
+when $OCF_RESKEY_monitor_cmd is non empty (which is the default as it is set to "/bin/true").
+When there is no monitor_cmd command defined then it makes sense to rely on podman inspect
+calls container in podman_simple_status().
+
+Tested as follows:
+1) Injected the change on an existing bundle-based cluster
+2) Observed that monitoring operations kept working okay
+3) Restarted rabbitmq-bundle and galera-bundle successfully
+4) Killed a container and we correctly detected the monitor failure
+Jun 12 09:52:12 controller-0 pacemaker-controld[25747]: notice: controller-0-haproxy-bundle-podman-1_monitor_60000:230 [ ocf-exit-reason:monitor cmd failed (rc=125), output: cannot exec into container that is not running\n ]
+5) Container correctly got restarted after the monitor failure:
+   haproxy-bundle-podman-1      (ocf::heartbeat:podman):        Started controller-0
+6) Stopped and removed a container and pcmk detected it correctly:
+Jun 12 09:55:15 controller-0 podman(haproxy-bundle-podman-1)[841411]: ERROR: monitor cmd failed (rc=125), output: unable to exec into haproxy-bundle-podman-1: no container with name or ID haproxy-bundle-podman-1 found: no such container
+Jun 12 09:55:15 controller-0 pacemaker-execd[25744]: notice: haproxy-bundle-podman-1_monitor_60000:841411:stderr [ ocf-exit-reason:monitor cmd failed (rc=125), output: unable to exec into haproxy-bundle-podman-1: no container with name or ID haproxy-bundle-podman-1 found: no such container ]
+7) pcmk was able to start the container that was stopped and removed:
+Jun 12 09:55:16 controller-0 pacemaker-controld[25747]: notice: Result of start operation for haproxy-bundle-podman-1 on controller-0: 0 (ok)
+8) Added 'set -x' to the RA and correctly observed that no 'podman inspect' has been invoked during monitoring operations
+
+Signed-off-by: Michele Baldessari <michele@acksyn.org>
+---
+ heartbeat/podman | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/heartbeat/podman b/heartbeat/podman
+index b2b3081f9..a9bd57dea 100755
+--- a/heartbeat/podman
++++ b/heartbeat/podman
+@@ -255,15 +255,10 @@ podman_simple_status()
+ 
+ podman_monitor()
+ {
+-	local rc=0
+-
+-	podman_simple_status
+-	rc=$?
+-
+-	if [ $rc -ne 0 ]; then
+-		return $rc
++	if [ -z "$OCF_RESKEY_monitor_cmd" ]; then
++		podman_simple_status
++		return $?
+ 	fi
+-
+ 	monitor_cmd_exec
+ }
+ 
diff --git a/SOURCES/bz1734062-podman-3-remove-docker-remnant.patch b/SOURCES/bz1734062-podman-3-remove-docker-remnant.patch
new file mode 100644
index 0000000..56f7302
--- /dev/null
+++ b/SOURCES/bz1734062-podman-3-remove-docker-remnant.patch
@@ -0,0 +1,34 @@
+From 69c5d35a7a5421d4728db824558007bbb91a9d4a Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Wed, 12 Jun 2019 12:02:06 +0200
+Subject: [PATCH] Remove unneeded podman exec --help call
+
+There are no podman releases that do not have the exec argument, so
+let's just drop this remnant that came from the docker RA.
+
+Signed-off-by: Michele Baldessari <michele@acksyn.org>
+---
+ heartbeat/podman | 10 ++--------
+ 1 file changed, 2 insertions(+), 8 deletions(-)
+
+diff --git a/heartbeat/podman b/heartbeat/podman
+index a9bd57dea..858023555 100755
+--- a/heartbeat/podman
++++ b/heartbeat/podman
+@@ -190,14 +190,8 @@ monitor_cmd_exec()
+ 		return $rc
+ 	fi
+ 
+-	if podman exec --help >/dev/null 2>&1; then
+-		out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
+-		rc=$?
+-	else
+-		out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(podman inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1)
+-		rc=$?
+-	fi
+-
++	out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
++	rc=$?
+ 	if [ $rc -eq 127 ]; then
+ 		ocf_log err "monitor cmd failed (rc=$rc), output: $out"
+ 		ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container."
diff --git a/SOURCES/bz1734062-podman-4-use-exec-to-avoid-performance-issues.patch b/SOURCES/bz1734062-podman-4-use-exec-to-avoid-performance-issues.patch
new file mode 100644
index 0000000..351207f
--- /dev/null
+++ b/SOURCES/bz1734062-podman-4-use-exec-to-avoid-performance-issues.patch
@@ -0,0 +1,161 @@
+From 6016283dfdcb45bf750f96715fc653a4c0904bca Mon Sep 17 00:00:00 2001
+From: Damien Ciabrini <dciabrin@redhat.com>
+Date: Fri, 28 Jun 2019 13:34:40 +0200
+Subject: [PATCH] podman: only use exec to manage container's lifecycle
+
+Under heavy IO load, podman may be impacted and take a long time
+to execute some actions. If that takes more than the default
+20s container monitoring timeout, containers will restart unexpectedly.
+
+Replace all IO-sensitive podman calls (inspect, exists...) by
+equivalent "podman exec" calls, because the latter command seems
+less prone to performance degradation under IO load.
+
+With this commit, the resource agent now requires podman 1.0.2+,
+because it relies on of two different patches [1,2] that improve
+IO performance and enable to distinguish "container stopped"
+"container doesn't exist" error codes.
+
+Tested on an OpenStack environment with podman 1.0.2, with the
+following scenario:
+  . regular start/stop/monitor operations
+  . probe operations (pcs resource cleanup/refresh)
+  . unmanage/manage operations
+  . reboot
+
+[1] https://github.com/containers/libpod/commit/90b835db69d589de559462d988cb3fae5cf1ef49
+[2] https://github.com/containers/libpod/commit/a19975f96d2ee7efe186d9aa0be42285cfafa3f4
+---
+ heartbeat/podman | 75 ++++++++++++++++++++++++------------------------
+ 1 file changed, 37 insertions(+), 38 deletions(-)
+
+diff --git a/heartbeat/podman b/heartbeat/podman
+index 51f6ba883..8fc2c4695 100755
+--- a/heartbeat/podman
++++ b/heartbeat/podman
+@@ -129,9 +129,6 @@ the health of the container. This command must return 0 to indicate that
+ the container is healthy. A non-zero return code will indicate that the
+ container has failed and should be recovered.
+ 
+-If 'podman exec' is supported, it is used to execute the command. If not,
+-nsenter is used.
+-
+ Note: Using this method for monitoring processes inside a container
+ is not recommended, as containerd tries to track processes running
+ inside the container and does not deal well with many short-lived
+@@ -192,17 +189,13 @@ monitor_cmd_exec()
+ 	local rc=$OCF_SUCCESS
+ 	local out
+ 
+-	if [ -z "$OCF_RESKEY_monitor_cmd" ]; then
+-		return $rc
+-	fi
+-
+ 	out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
+ 	rc=$?
+-	if [ $rc -eq 127 ]; then
+-		ocf_log err "monitor cmd failed (rc=$rc), output: $out"
+-		ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container."
+-		# there is no recovering from this, exit immediately
+-		exit $OCF_ERR_ARGS
++	# 125: no container with name or ID ${CONTAINER} found
++	# 126: container state improper (not running)
++	# 127: any other error
++	if [ $rc -eq 125 ] || [ $rc -eq 126 ]; then
++		rc=$OCF_NOT_RUNNING
+ 	elif [ $rc -ne 0 ]; then
+ 		ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out"
+ 		rc=$OCF_ERR_GENERIC
+@@ -215,7 +208,16 @@ monitor_cmd_exec()
+ 
+ container_exists()
+ {
+-	podman inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1
++	local rc
++	local out
++
++	out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
++	rc=$?
++	# 125: no container with name or ID ${CONTAINER} found
++	if [ $rc -ne 125 ]; then
++		return 0
++	fi
++	return 1
+ }
+ 
+ remove_container()
+@@ -236,30 +238,30 @@ remove_container()
+ 
+ podman_simple_status()
+ {
+-	local val
+-
+-	# retrieve the 'Running' attribute for the container
+-	val=$(podman inspect --format {{.State.Running}} $CONTAINER 2>/dev/null)
+-	if [ $? -ne 0 ]; then
+-		#not running as a result of container not being found
+-		return $OCF_NOT_RUNNING
+-	fi
++	local rc
+ 
+-	if ocf_is_true "$val"; then
+-		# container exists and is running
+-		return $OCF_SUCCESS
++	# simple status is implemented via podman exec
++	# everything besides success is considered "not running"
++	monitor_cmd_exec
++	rc=$?
++	if [ $rc -ne $OCF_SUCCESS ]; then
++		rc=$OCF_NOT_RUNNING;
+ 	fi
+-
+-	return $OCF_NOT_RUNNING
++	return $rc
+ }
+ 
+ podman_monitor()
+ {
+-	if [ -z "$OCF_RESKEY_monitor_cmd" ]; then
+-		podman_simple_status
+-		return $?
+-	fi
++	# We rely on running podman exec to monitor the container
++	# state because that command seems to be less prone to
++	# performance issue under IO load.
++	#
++	# For probes to work, we expect cmd_exec to be able to report
++	# when a container is not running. Here, we're not interested
++	# in distinguishing whether it's stopped or non existing
++	# (there's function container_exists for that)
+ 	monitor_cmd_exec
++	return $?
+ }
+ 
+ podman_create_mounts() {
+@@ -416,14 +418,6 @@ podman_validate()
+ 		exit $OCF_ERR_CONFIGURED
+ 	fi
+ 
+-	if [ -n "$OCF_RESKEY_monitor_cmd" ]; then
+-		podman exec --help >/dev/null 2>&1
+-		if [ ! $? ]; then
+-			ocf_log info "checking for nsenter, which is required when 'monitor_cmd' is specified"
+-			check_binary nsenter
+-		fi
+-	fi
+-
+ 	image_exists
+ 	if [ $? -ne 0 ]; then
+ 		ocf_exit_reason "base image, ${OCF_RESKEY_image}, could not be found."
+@@ -457,6 +451,11 @@ fi
+ 
+ CONTAINER=$OCF_RESKEY_name
+ 
++# Note: we currently monitor podman containers by with the "podman exec"
++# command, so make sure that invocation is always valid by enforcing the
++# exec command to be non-empty
++: ${OCF_RESKEY_monitor_cmd:=/bin/true}
++
+ case $__OCF_ACTION in
+ meta-data) meta_data
+ 		exit $OCF_SUCCESS;;
diff --git a/SOURCES/bz1734067-CTDB-1-explicitly-use-bash-shell.patch b/SOURCES/bz1734067-CTDB-1-explicitly-use-bash-shell.patch
new file mode 100644
index 0000000..cb13c0a
--- /dev/null
+++ b/SOURCES/bz1734067-CTDB-1-explicitly-use-bash-shell.patch
@@ -0,0 +1,39 @@
+From 1ff4ce7cbe58b5309f00ac1bbe124c562b6dcaf6 Mon Sep 17 00:00:00 2001
+From: David Disseldorp <ddiss@suse.de>
+Date: Fri, 27 Jul 2018 16:02:26 +0200
+Subject: [PATCH] CTDB: explicitly use bash shell
+
+Upcoming recovery lock substring processing is bash specific.
+
+Signed-off-by: David Disseldorp <ddiss@suse.de>
+---
+ configure.ac                | 1 +
+ heartbeat/{CTDB => CTDB.in} | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+ rename heartbeat/{CTDB => CTDB.in} (99%)
+
+diff --git a/configure.ac b/configure.ac
+index 039b4942c..10f5314da 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -978,6 +978,7 @@ AC_CONFIG_FILES([heartbeat/slapd], [chmod +x heartbeat/slapd])
+ AC_CONFIG_FILES([heartbeat/sybaseASE], [chmod +x heartbeat/sybaseASE])
+ AC_CONFIG_FILES([heartbeat/syslog-ng], [chmod +x heartbeat/syslog-ng])
+ AC_CONFIG_FILES([heartbeat/vsftpd], [chmod +x heartbeat/vsftpd])
++AC_CONFIG_FILES([heartbeat/CTDB], [chmod +x heartbeat/CTDB])
+ AC_CONFIG_FILES([rgmanager/src/resources/ASEHAagent.sh], [chmod +x rgmanager/src/resources/ASEHAagent.sh])
+ AC_CONFIG_FILES([rgmanager/src/resources/apache.sh], [chmod +x rgmanager/src/resources/apache.sh])
+ AC_CONFIG_FILES([rgmanager/src/resources/bind-mount.sh], [chmod +x rgmanager/src/resources/bind-mount.sh])
+diff --git a/heartbeat/CTDB b/heartbeat/CTDB.in
+similarity index 99%
+rename from heartbeat/CTDB
+rename to heartbeat/CTDB.in
+index 28e58cea0..7d87a4ef7 100755
+--- a/heartbeat/CTDB
++++ b/heartbeat/CTDB.in
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!@BASH_SHELL@
+ #
+ #             OCF Resource Agent for managing CTDB
+ #
diff --git a/SOURCES/bz1734067-CTDB-2-add-ctdb_max_open_files-parameter.patch b/SOURCES/bz1734067-CTDB-2-add-ctdb_max_open_files-parameter.patch
new file mode 100644
index 0000000..c30bfee
--- /dev/null
+++ b/SOURCES/bz1734067-CTDB-2-add-ctdb_max_open_files-parameter.patch
@@ -0,0 +1,40 @@
+From 61f7cb5954d1727f58fab6d642a124ef342c8641 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Wed, 20 Feb 2019 11:24:28 +0100
+Subject: [PATCH] CTDB: add ctdb_max_open_files parameter
+
+---
+ heartbeat/CTDB.in | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
+index 0d58c850a..bbf8ef627 100755
+--- a/heartbeat/CTDB.in
++++ b/heartbeat/CTDB.in
+@@ -288,6 +288,14 @@ What debug level to run at (0-10). Higher means more verbose.
+ <content type="integer" default="2" />
+ </parameter>
+ 
++<parameter name="ctdb_max_open_files" required="0">
++<longdesc lang="en">
++Maximum number of open files (for ulimit -n)
++</longdesc>
++<shortdesc lang="en">Max open files</shortdesc>
++<content type="integer" default="" />
++</parameter>
++
+ <parameter name="smb_conf" unique="0" required="0">
+ <longdesc lang="en">
+ Path to default samba config file.  Only necessary if CTDB
+@@ -611,6 +619,11 @@ ctdb_start() {
+ 	start_as_disabled="--start-as-disabled"
+ 	ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" || start_as_disabled=""
+ 
++	# set nofile ulimit for ctdbd process
++	if [ -n "$OCF_RESKEY_ctdb_max_open_files" ]; then
++		ulimit -n "$OCF_RESKEY_ctdb_max_open_files"
++	fi
++
+ 	# Start her up
+ 	"$OCF_RESKEY_ctdbd_binary" \
+ 		--reclock="$OCF_RESKEY_ctdb_recovery_lock" \
diff --git a/SOURCES/bz1734067-CTDB-3-fixes.patch b/SOURCES/bz1734067-CTDB-3-fixes.patch
new file mode 100644
index 0000000..813bf81
--- /dev/null
+++ b/SOURCES/bz1734067-CTDB-3-fixes.patch
@@ -0,0 +1,131 @@
+From 8c61f2019d11781b737251b5cf839437b25fc53f Mon Sep 17 00:00:00 2001
+From: David Disseldorp <ddiss@suse.de>
+Date: Wed, 25 Jul 2018 23:15:10 +0200
+Subject: [PATCH 1/3] CTDB: fix incorrect db corruption reports (bsc#1101668)
+
+If a database was disconnected during an active transaction, then
+tdbdump may fail with e.g.:
+> /usr/bin/tdbdump /var/lib/ctdb/persistent/secrets.tdb.1
+Failed to open /var/lib/ctdb/persistent/secrets.tdb.1
+tdb(/var/lib/ctdb/persistent/secrets.tdb.1): FATAL:
+tdb_transaction_recover: attempt to recover read only database
+
+This does *not* indicate corruption, only that tdbdump, which opens the
+database readonly, isn't able to perform recovery.
+
+Using tdbtool check, instead of tdbdump, passes:
+> tdbtool /var/lib/ctdb/persistent/secrets.tdb.1 check
+tdb_transaction_recover: recovered 2146304 byte database
+Database integrity is OK and has 2 records.
+
+Drop the tdbdump checks, and instead rely on the core ctdb event script,
+which performs the same checks with tdbtool.
+
+Signed-off-by: David Disseldorp <ddiss@suse.de>
+---
+ heartbeat/CTDB.in | 18 ++++--------------
+ 1 file changed, 4 insertions(+), 14 deletions(-)
+
+diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
+index 1456ea32b..28e58cea0 100755
+--- a/heartbeat/CTDB.in
++++ b/heartbeat/CTDB.in
+@@ -392,6 +392,8 @@ enable_event_scripts() {
+ 	local event_dir
+ 	event_dir=$OCF_RESKEY_ctdb_config_dir/events.d
+ 
++	chmod u+x "$event_dir/00.ctdb"	# core database health check
++
+ 	if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then
+ 		chmod u+x "$event_dir/10.interface"
+ 	else
+@@ -563,17 +565,6 @@ ctdb_start() {
+ 	rv=$?
+ 	[ $rv -ne 0 ] && return $rv
+ 
+-	# Die if databases are corrupted
+-	persistent_db_dir="${OCF_RESKEY_ctdb_dbdir}/persistent"
+-	mkdir -p $persistent_db_dir 2>/dev/null
+-	for pdbase in $persistent_db_dir/*.tdb.[0-9]; do
+-		[ -f "$pdbase" ] || break
+-		/usr/bin/tdbdump "$pdbase" >/dev/null 2>/dev/null || {
+-			ocf_exit_reason "Persistent database $pdbase is corrupted!  CTDB will not start."
+-			return $OCF_ERR_GENERIC
+-		}
+-	done
+-
+ 	# Add necessary configuration to smb.conf
+ 	init_smb_conf
+ 	if [ $? -ne 0 ]; then
+@@ -737,9 +728,8 @@ ctdb_monitor() {
+ 
+ 
+ ctdb_validate() {
+-	# Required binaries (full path to tdbdump is intentional, as that's
+-	# what's used in ctdb_start, which was lifted from the init script)
+-	for binary in pkill /usr/bin/tdbdump; do
++	# Required binaries
++	for binary in pkill; do
+ 		check_binary $binary
+ 	done
+ 
+
+From 1ff4ce7cbe58b5309f00ac1bbe124c562b6dcaf6 Mon Sep 17 00:00:00 2001
+From: David Disseldorp <ddiss@suse.de>
+Date: Fri, 27 Jul 2018 16:02:26 +0200
+Subject: [PATCH 2/3] CTDB: explicitly use bash shell
+
+Upcoming recovery lock substring processing is bash specific.
+
+Signed-off-by: David Disseldorp <ddiss@suse.de>
+---
+ configure.ac                | 1 +
+ heartbeat/CTDB.in           | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
+index 7d87a4ef7..f9b5c564f 100755
+--- a/heartbeat/CTDB.in
++++ b/heartbeat/CTDB.in
+@@ -134,8 +134,8 @@ For more information see http://linux-ha.org/wiki/CTDB_(resource_agent)
+ 
+ <parameter name="ctdb_recovery_lock" unique="1" required="1">
+ <longdesc lang="en">
+-The location of a shared lock file, common across all nodes.
+-This must be on shared storage, e.g.: /shared-fs/samba/ctdb.lock
++The location of a shared lock file or helper binary, common across all nodes.
++See CTDB documentation for details.
+ </longdesc>
+ <shortdesc lang="en">CTDB shared lock file</shortdesc>
+ <content type="string" default="" />
+@@ -757,13 +757,24 @@ ctdb_validate() {
+ 		return $OCF_ERR_CONFIGURED
+ 	fi
+ 
+-	lock_dir=$(dirname "$OCF_RESKEY_ctdb_recovery_lock")
+-	touch "$lock_dir/$$" 2>/dev/null
+-	if [ $? != 0 ]; then
+-		ocf_exit_reason "Directory for lock file '$OCF_RESKEY_ctdb_recovery_lock' does not exist, or is not writable."
+-		return $OCF_ERR_ARGS
++	if [ "${OCF_RESKEY_ctdb_recovery_lock:0:1}" == '!' ]; then
++		# '!' prefix means recovery lock is handled via a helper binary
++		binary="${OCF_RESKEY_ctdb_recovery_lock:1}"
++		binary="${binary%% *}"	# trim any parameters
++		if [ -z "$binary" ]; then
++			ocf_exit_reason "ctdb_recovery_lock invalid helper"
++			return $OCF_ERR_CONFIGURED
++		fi
++		check_binary "${binary}"
++	else
++		lock_dir=$(dirname "$OCF_RESKEY_ctdb_recovery_lock")
++		touch "$lock_dir/$$" 2>/dev/null
++		if [ $? != 0 ]; then
++			ocf_exit_reason "Directory for lock file '$OCF_RESKEY_ctdb_recovery_lock' does not exist, or is not writable."
++			return $OCF_ERR_ARGS
++		fi
++		rm "$lock_dir/$$"
+ 	fi
+-	rm "$lock_dir/$$"
+ 
+ 	return $OCF_SUCCESS
+ }
diff --git a/SOURCES/bz1734067-CTDB-4-add-v4.9-support.patch b/SOURCES/bz1734067-CTDB-4-add-v4.9-support.patch
new file mode 100644
index 0000000..a3332ef
--- /dev/null
+++ b/SOURCES/bz1734067-CTDB-4-add-v4.9-support.patch
@@ -0,0 +1,452 @@
+From 30b9f55325d2acfba27aa6859c7360e10b7201d7 Mon Sep 17 00:00:00 2001
+From: David Disseldorp <ddiss@suse.de>
+Date: Wed, 5 Jun 2019 00:41:13 +0200
+Subject: [PATCH 1/3] CTDB: support Samba 4.9+
+
+With Samba 4.9+, all ctdbd parameters have moved to config files.
+Generate a new /etc/ctdb/ctdb.conf file during ctdb startup, based on RA
+configuration.
+
+Event scripts in Samba 4.9+ are also no longer enabled/disabled based on
+file mode. Use the "ctdb event script enable/disable" helpers, which now
+work without a running ctdbd.
+
+Fixes: https://github.com/ClusterLabs/resource-agents/issues/1196
+Signed-off-by: David Disseldorp <ddiss@suse.de>
+Signed-off-by: Noel Power <noel.power@suse.com>
+Signed-off-by: Amitay Isaacs <amitay@samba.org>
+---
+ heartbeat/CTDB.in | 214 ++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 167 insertions(+), 47 deletions(-)
+
+diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
+index 4dd646896..79a2f97e7 100755
+--- a/heartbeat/CTDB.in
++++ b/heartbeat/CTDB.in
+@@ -143,6 +143,10 @@ OCF_RESKEY_smb_fileid_algorithm_default=""
+ 
+ #######################################################################
+ 
++ctdb_version() {
++	$OCF_RESKEY_ctdb_binary version | awk '{print $NF}' | sed "s/[-\.]\?[[:alpha:]].*//"
++}
++
+ meta_data() {
+ 	cat <<END
+ <?xml version="1.0"?>
+@@ -256,7 +260,7 @@ host any public ip addresses.
+ <longdesc lang="en">
+ The directory containing various CTDB configuration files.
+ The "nodes" and "notify.sh" scripts are expected to be
+-in this directory, as is the "events.d" subdirectory.
++in this directory.
+ </longdesc>
+ <shortdesc lang="en">CTDB config file directory</shortdesc>
+ <content type="string" default="/etc/ctdb" />
+@@ -282,8 +286,10 @@ Full path to the CTDB cluster daemon binary.
+ <longdesc lang="en">
+ Full path to the domain socket that ctdbd will create, used for
+ local clients to attach and communicate with the ctdb daemon.
++With CTDB 4.9.0 and later the socket path is hardcoded at build
++time, so this parameter is ignored.
+ </longdesc>
+-<shortdesc lang="en">CTDB socket location</shortdesc>
++<shortdesc lang="en">CTDB socket location (ignored with CTDB 4.9+)</shortdesc>
+ <content type="string" default="${OCF_RESKEY_ctdb_socket}" />
+ </parameter>
+ 
+@@ -421,16 +427,28 @@ invoke_ctdb() {
+ 		timeout=$((OCF_RESKEY_CRM_meta_timeout/1000))
+ 		timelimit=$((OCF_RESKEY_CRM_meta_timeout/1000))
+ 	fi
+-	$OCF_RESKEY_ctdb_binary --socket="$OCF_RESKEY_ctdb_socket" \
+-		-t $timeout -T $timelimit \
+-		"$@"
++
++	local vers=$(ctdb_version)
++	ocf_version_cmp "$vers" "4.9.0"
++
++	# if version < 4.9.0 specify '--socket' otherwise it's
++	# a compiled option
++	if [ "$?" -eq "0" ]; then
++		$OCF_RESKEY_ctdb_binary --socket="$OCF_RESKEY_ctdb_socket" \
++			-t $timeout -T $timelimit \
++			"$@"
++	else
++		$OCF_RESKEY_ctdb_binary \
++			-t $timeout -T $timelimit \
++			"$@"
++	fi
+ }
+ 
+ # Enable any event scripts that are explicitly required.
+ # Any others will ultimately be invoked or not based on how they ship
+ # with CTDB, but will generally have no effect, beacuase the relevant
+ # CTDB_MANAGES_* options won't be set in /etc/sysconfig/ctdb.
+-enable_event_scripts() {
++enable_event_scripts_chmod() {
+ 	local event_dir
+ 	event_dir=$OCF_RESKEY_ctdb_config_dir/events.d
+ 
+@@ -454,6 +472,36 @@ enable_event_scripts() {
+ 	fi
+ }
+ 
++enable_event_scripts_symlink() {
++	# event scripts are symlinked once enabled, with the link source in...
++	mkdir -p "$OCF_RESKEY_ctdb_config_dir/events/legacy" 2>/dev/null
++
++	invoke_ctdb event script enable legacy 00.ctdb
++
++	if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then
++		invoke_ctdb event script enable legacy 10.interface
++	else
++		invoke_ctdb event script disable legacy 10.interface
++	fi
++	if [ -f "${OCF_RESKEY_ctdb_config_dir}/static-routes" ]; then
++		invoke_ctdb event script enable legacy 11.routing
++	else
++		invoke_ctdb event script disable legacy 11.routing
++	fi
++
++	if ocf_is_true "$OCF_RESKEY_ctdb_manages_winbind"; then
++		invoke_ctdb event script enable legacy 49.winbind
++	else
++		invoke_ctdb event script disable legacy 49.winbind
++	fi
++
++	if ocf_is_true "$OCF_RESKEY_ctdb_manages_samba"; then
++		invoke_ctdb event script enable legacy 50.samba
++	else
++		invoke_ctdb event script disable legacy 50.samba
++	fi
++}
++
+ # This function has no effect (currently no way to set CTDB_SET_*)
+ # but remains here in case we need it in future.
+ set_ctdb_variables() {
+@@ -556,6 +604,46 @@ append_ctdb_sysconfig() {
+ 	[ -n "$2" ] && echo "$1=$2" >> "$CTDB_SYSCONFIG"
+ }
+ 
++generate_ctdb_config() {
++	local ctdb_config="$OCF_RESKEY_ctdb_config_dir/ctdb.conf"
++
++	# Backup existing config if we're not already using an auto-generated one
++	grep -qa '# CTDB-RA: Auto-generated' $ctdb_config || cp -p $ctdb_config ${ctdb_config}.ctdb-ra-orig
++	if [ $? -ne 0 ]; then
++		ocf_log warn "Unable to backup $ctdb_config to ${ctdb_config}.ctdb-ra-orig"
++	fi
++
++	local log_option="file:$OCF_RESKEY_ctdb_logfile"
++	if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
++		log_option="syslog"
++	fi
++
++	local start_as_disabled="false"
++	ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" && start_as_disabled="true"
++
++	local dbdir_volatile="$OCF_RESKEY_ctdb_dbdir/volatile"
++	[ -d "$dbdir_volatile" ] || mkdir -p "$dbdir_volatile" 2>/dev/null
++	local dbdir_persistent="$OCF_RESKEY_ctdb_dbdir/persistent"
++	[ -d "$dbdir_persistent" ] || mkdir -p "$dbdir_persistent" 2>/dev/null
++	local dbdir_state="$OCF_RESKEY_ctdb_dbdir/state"
++	[ -d "$dbdir_state" ] || mkdir -p "$dbdir_state" 2>/dev/null
++
++cat >$ctdb_config <<EOF
++# CTDB-RA: Auto-generated
++[logging]
++	location = $log_option
++	log level = $OCF_RESKEY_ctdb_debuglevel
++[cluster]
++	recovery lock = $OCF_RESKEY_ctdb_recovery_lock
++[database]
++	volatile database directory = $dbdir_volatile
++	persistent database directory = $dbdir_persistent
++	state database directory = $dbdir_state
++[legacy]
++	start as disabled = $start_as_disabled
++EOF
++}
++
+ # Generate a new, minimal CTDB config file that's just enough
+ # to get CTDB running as configured by the RA parameters.
+ generate_ctdb_sysconfig() {
+@@ -589,6 +677,58 @@ EOF
+ }
+ 
+ 
++invoke_ctdbd() {
++	local vers="$1"
++
++	ocf_version_cmp "$vers" "4.9.0"
++	if [ "$?" -ne "0" ]; then
++		# With 4.9+, all ctdbd binary parameters are provided as
++		# config settings
++		$OCF_RESKEY_ctdbd_binary
++		return
++	fi
++
++	# Use logfile by default, or syslog if asked for
++	local log_option
++	# --logging supported from v4.3.0 and --logfile / --syslog support
++	# has been removed from newer versions
++	ocf_version_cmp "$vers" "4.2.14"
++	if [ "$?" -eq "2" ]; then
++		log_option="--logging=file:$OCF_RESKEY_ctdb_logfile"
++		if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
++			log_option="--logging=syslog"
++		fi
++	else
++		log_option="--logfile=$OCF_RESKEY_ctdb_logfile"
++		if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
++			log_option="--syslog"
++		fi
++	fi
++
++	# public addresses file (should not be present, but need to set for correctness if it is)
++	local pub_addr_option
++	pub_addr_option=""
++	[ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ] && \
++		pub_addr_option="--public-addresses=${OCF_RESKEY_ctdb_config_dir}/public_addresses"
++	# start as disabled
++	local start_as_disabled
++	start_as_disabled="--start-as-disabled"
++	ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" || start_as_disabled=""
++
++	$OCF_RESKEY_ctdbd_binary \
++		--reclock="$OCF_RESKEY_ctdb_recovery_lock" \
++		--nlist="$OCF_RESKEY_ctdb_config_dir/nodes" \
++		--socket="$OCF_RESKEY_ctdb_socket" \
++		--dbdir="$OCF_RESKEY_ctdb_dbdir" \
++		--dbdir-persistent="$OCF_RESKEY_ctdb_dbdir/persistent" \
++		--event-script-dir="$OCF_RESKEY_ctdb_config_dir/events.d" \
++		--notification-script="$OCF_RESKEY_ctdb_config_dir/notify.sh" \
++		--transport=tcp \
++		$start_as_disabled $log_option $pub_addr_option \
++		-d "$OCF_RESKEY_ctdb_debuglevel"
++}
++
++
+ ctdb_usage() {
+ 	cat <<END
+ usage: $0 {start|stop|monitor|validate-all|meta-data}
+@@ -614,27 +754,26 @@ ctdb_start() {
+ 		return $OCF_ERR_GENERIC
+ 	fi
+ 
+-	# Generate new CTDB sysconfig
+-	generate_ctdb_sysconfig
+-	enable_event_scripts
++	local version=$(ctdb_version)
+ 
+-	# Use logfile by default, or syslog if asked for
+-	local log_option
+-	# --logging supported from v4.3.0 and --logfile / --syslog support 
+-	# has been removed from newer versions
+-	version=$(ctdb version | awk '{print $NF}')
+-	ocf_version_cmp "$version" "4.2.14"
+-	if [ "$?" -eq "2" ]; then
+-		log_option="--logging=file:$OCF_RESKEY_ctdb_logfile"
+-		if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
+-			log_option="--logging=syslog"
+-		fi
++	ocf_version_cmp "$version" "4.9.0"
++	if [ "$?" -eq "0" ]; then
++		# prior to 4.9, ctdbd parameters are in sysconfig or passed as
++		# binary arguments
++		generate_ctdb_sysconfig
++
++		# prior to 4.9, event script enablement without a running
++		# ctdbd is done by chmoding the scripts directly
++		enable_event_scripts_chmod
+ 	else
+-		log_option="--logfile=$OCF_RESKEY_ctdb_logfile"
+-		if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
+-			log_option="--syslog"
+-		fi
++		# 4.9+ moves all ctdbd parameters to ctdb.conf
++		generate_ctdb_config
++
++		# 4.9+ event scripts can be enabled with ctdb directly, which
++		# performs a symlink
++		enable_event_scripts_symlink
+ 	fi
++
+ 	if [ ! -d "$(dirname $OCF_RESKEY_ctdb_logfile)" ]; then
+ 		# ensure the logfile's directory exists, otherwise ctdb will fail to start
+ 		mkdir -p $(dirname $OCF_RESKEY_ctdb_logfile)
+@@ -643,33 +782,14 @@ ctdb_start() {
+ 	# ensure ctdb's rundir exists, otherwise it will fail to start
+ 	mkdir -p $OCF_RESKEY_ctdb_rundir 2>/dev/null
+ 
+-	# public addresses file (should not be present, but need to set for correctness if it is)
+-	local pub_addr_option
+-	pub_addr_option=""
+-	[ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ] && \
+-		pub_addr_option="--public-addresses=${OCF_RESKEY_ctdb_config_dir}/public_addresses"
+-	# start as disabled
+-	local start_as_disabled
+-	start_as_disabled="--start-as-disabled"
+-	ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" || start_as_disabled=""
+-
+ 	# set nofile ulimit for ctdbd process
+ 	if [ -n "$OCF_RESKEY_ctdb_max_open_files" ]; then
+ 		ulimit -n "$OCF_RESKEY_ctdb_max_open_files"
+ 	fi
+ 
+ 	# Start her up
+-	"$OCF_RESKEY_ctdbd_binary" \
+-		--reclock="$OCF_RESKEY_ctdb_recovery_lock" \
+-		--nlist="$OCF_RESKEY_ctdb_config_dir/nodes" \
+-		--socket="$OCF_RESKEY_ctdb_socket" \
+-		--dbdir="$OCF_RESKEY_ctdb_dbdir" \
+-		--dbdir-persistent="$OCF_RESKEY_ctdb_dbdir/persistent" \
+-		--event-script-dir="$OCF_RESKEY_ctdb_config_dir/events.d" \
+-		--notification-script="$OCF_RESKEY_ctdb_config_dir/notify.sh" \
+-		--transport=tcp \
+-		$start_as_disabled $log_option $pub_addr_option \
+-		-d "$OCF_RESKEY_ctdb_debuglevel"
++	invoke_ctdbd "$version"
++
+ 	if [ $? -ne 0 ]; then
+ 		# cleanup smb.conf
+ 		cleanup_smb_conf
+@@ -688,7 +808,7 @@ ctdb_start() {
+ 			if [ $? -ne 0 ]; then
+ 				# CTDB will be running, kill it before returning
+ 				ctdb_stop
+-				ocf_exit_reason "Can't invoke $OCF_RESKEY_ctdb_binary --socket=$OCF_RESKEY_ctdb_socket status"
++				ocf_exit_reason "Can't invoke $OCF_RESKEY_ctdb_binary status"
+ 				return $OCF_ERR_GENERIC
+ 			fi
+ 			if ! echo "$status" | grep -qs 'UNHEALTHY (THIS'; then
+@@ -725,7 +845,7 @@ ctdb_stop() {
+ 		[ $count -gt 10 ] && {
+ 			ocf_log info "killing ctdbd "
+ 			pkill -9 -f "$OCF_RESKEY_ctdbd_binary"
+-			pkill -9 -f "${OCF_RESKEY_ctdb_config_dir}/events.d/"
++			pkill -9 -f "${OCF_RESKEY_ctdb_config_dir}/events"
+ 		}
+ 	done
+ 
+
+From b4753b7cb46045bb9e7ed5e3a0a20f6104264b12 Mon Sep 17 00:00:00 2001
+From: David Disseldorp <ddiss@suse.de>
+Date: Wed, 10 Jul 2019 17:11:50 +0200
+Subject: [PATCH 2/3] CTDB: generate script.options file for 4.9+
+
+Event scripts in CTDB 4.9+ ignore sysconfig configuration and instead
+parse parameters in ctdb_config_dir/script.options .
+
+Signed-off-by: David Disseldorp <ddiss@suse.de>
+---
+ heartbeat/CTDB.in | 35 ++++++++++++++++++++++++++++++-----
+ 1 file changed, 30 insertions(+), 5 deletions(-)
+
+diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
+index 79a2f97e7..0906f3da9 100755
+--- a/heartbeat/CTDB.in
++++ b/heartbeat/CTDB.in
+@@ -242,6 +242,7 @@ If the amount of free memory drops below this value the node will
+ become unhealthy and ctdb and all managed services will be shutdown.
+ Once this occurs, the administrator needs to find the reason for the
+ OOM situation, rectify it and restart ctdb with "service ctdb start".
++With CTDB 4.4.0 and later this parameter is ignored.
+ </longdesc>
+ <shortdesc lang="en">Minimum amount of free memory (MB)</shortdesc>
+ <content type="integer" default="${OCF_RESKEY_ctdb_monitor_free_memory_default}" />
+@@ -600,8 +601,10 @@ cleanup_smb_conf() {
+ 	mv "$OCF_RESKEY_smb_conf.$$" "$OCF_RESKEY_smb_conf"
+ }
+ 
+-append_ctdb_sysconfig() {
+-	[ -n "$2" ] && echo "$1=$2" >> "$CTDB_SYSCONFIG"
++append_conf() {
++	local file_path="$1"
++	shift
++	[ -n "$2" ] && echo "$1=$2" >> "$file_path"
+ }
+ 
+ generate_ctdb_config() {
+@@ -644,6 +647,25 @@ cat >$ctdb_config <<EOF
+ EOF
+ }
+ 
++generate_event_script_options() {
++	local script_options="$OCF_RESKEY_ctdb_config_dir/script.options"
++
++	# Backup existing config if we're not already using an auto-generated one
++	grep -qa '# CTDB-RA: Auto-generated' $script_options || cp -p $script_options ${script_options}.ctdb-ra-orig
++	if [ $? -ne 0 ]; then
++		ocf_log warn "Unable to backup $script_options to ${script_options}.ctdb-ra-orig"
++	fi
++
++cat >$script_options <<EOF
++# CTDB-RA: Auto-generated
++CTDB_SAMBA_SKIP_SHARE_CHECK=$(ocf_is_true "$OCF_RESKEY_ctdb_samba_skip_share_check" && echo 'yes' || echo 'no')
++EOF
++
++	append_conf "$script_options" CTDB_SERVICE_SMB $OCF_RESKEY_ctdb_service_smb
++	append_conf "$script_options" CTDB_SERVICE_NMB $OCF_RESKEY_ctdb_service_nmb
++	append_conf "$script_options" CTDB_SERVICE_WINBIND $OCF_RESKEY_ctdb_service_winbind
++}
++
+ # Generate a new, minimal CTDB config file that's just enough
+ # to get CTDB running as configured by the RA parameters.
+ generate_ctdb_sysconfig() {
+@@ -671,9 +693,9 @@ CTDB_SAMBA_SKIP_SHARE_CHECK=$(ocf_is_true "$OCF_RESKEY_ctdb_samba_skip_share_che
+ CTDB_MANAGES_SAMBA=$(ocf_is_true "$OCF_RESKEY_ctdb_manages_samba" && echo 'yes' || echo 'no')
+ CTDB_MANAGES_WINBIND=$(ocf_is_true "$OCF_RESKEY_ctdb_manages_winbind" && echo 'yes' || echo 'no')
+ EOF
+-	append_ctdb_sysconfig CTDB_SERVICE_SMB $OCF_RESKEY_ctdb_service_smb
+-	append_ctdb_sysconfig CTDB_SERVICE_NMB $OCF_RESKEY_ctdb_service_nmb
+-	append_ctdb_sysconfig CTDB_SERVICE_WINBIND $OCF_RESKEY_ctdb_service_winbind
++	append_conf "$CTDB_SYSCONFIG" CTDB_SERVICE_SMB $OCF_RESKEY_ctdb_service_smb
++	append_conf "$CTDB_SYSCONFIG" CTDB_SERVICE_NMB $OCF_RESKEY_ctdb_service_nmb
++	append_conf "$CTDB_SYSCONFIG" CTDB_SERVICE_WINBIND $OCF_RESKEY_ctdb_service_winbind
+ }
+ 
+ 
+@@ -769,6 +791,9 @@ ctdb_start() {
+ 		# 4.9+ moves all ctdbd parameters to ctdb.conf
+ 		generate_ctdb_config
+ 
++		# 4.9+ event script options are in script.options
++		generate_event_script_options
++
+ 		# 4.9+ event scripts can be enabled with ctdb directly, which
+ 		# performs a symlink
+ 		enable_event_scripts_symlink
+
+From 0a8610711f90c4cc7a2b380a4795f463532d9520 Mon Sep 17 00:00:00 2001
+From: David Disseldorp <ddiss@suse.de>
+Date: Wed, 10 Jul 2019 17:54:01 +0200
+Subject: [PATCH 3/3] CTDB: drop sysconfig presence check during validate
+
+There are two reasons to avoid this check:
+- for ctdb versions prior to 4.9.0, the sysconfig file is generated by
+  the resource agent start hook *after* ctdb_validate() is called.
+- post 4.9.0 versions don't use the sysconfig file.
+
+Signed-off-by: David Disseldorp <ddiss@suse.de>
+---
+ heartbeat/CTDB.in | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/heartbeat/CTDB.in b/heartbeat/CTDB.in
+index 0906f3da9..15d78902e 100755
+--- a/heartbeat/CTDB.in
++++ b/heartbeat/CTDB.in
+@@ -925,11 +925,6 @@ ctdb_validate() {
+ 		check_binary $binary
+ 	done
+ 
+-	if [ -z "$CTDB_SYSCONFIG" ]; then
+-		ocf_exit_reason "Can't find CTDB config file (expecting /etc/sysconfig/ctdb, /etc/default/ctdb or similar)"
+-		return $OCF_ERR_INSTALLED
+-	fi
+-
+ 	if ocf_is_true "$OCF_RESKEY_ctdb_manages_samba" && [ ! -f "$OCF_RESKEY_smb_conf" ]; then
+ 		ocf_exit_reason "Samba config file '$OCF_RESKEY_smb_conf' does not exist."
+ 		return $OCF_ERR_INSTALLED
diff --git a/SOURCES/bz1738303-podman-drop-in-support.patch b/SOURCES/bz1738303-podman-drop-in-support.patch
new file mode 100644
index 0000000..8c4be1a
--- /dev/null
+++ b/SOURCES/bz1738303-podman-drop-in-support.patch
@@ -0,0 +1,193 @@
+From 462ada6164cb77c81f5291d88287d68506d38056 Mon Sep 17 00:00:00 2001
+From: Damien Ciabrini <dciabrin@redhat.com>
+Date: Tue, 9 Jul 2019 23:14:21 +0200
+Subject: [PATCH] Generate addition drop-in dependencies for podman containers
+
+When podman creates a container, it creates two additional systemd
+scope files dynamically:
+
+  - libpod-conmon-<CONTAINERID>.scope - runs a conmon process that
+    tracks a container's pid1 into a dedicated pidfile.
+  - libpod-<CONTAINERID>.scope - created dynamically by runc,
+    for cgroups accounting
+
+On shutdown, it can happen that systemd stops those scope early,
+which in turn sends a SIGTERM to pacemaker-managed containers
+before pacemaker has scheduled any stop operation. That
+confuses the cluster and may break shutdown.
+
+Add a new option in the resource-agent to inject additional
+dependencies into the dynamically created scope files, so that
+systemd is not allowed to stop scopes before the pacemaker
+service itself is stopped.
+
+When that option is enabled, the scopes look like:
+
+    # podman ps | grep galera
+    c329819a1227  192.168.122.8:8787/rhosp15/openstack-mariadb:latest                     dumb-init -- /bin...  About an hour ago  Up About an hour ago         galera-bundle-podman-0
+
+    # systemctl cat libpod*c329819a1227*
+    # /run/systemd/transient/libpod-conmon-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope
+    # This is a transient unit file, created programmatically via the systemd API. Do not edit.
+    [Scope]
+    Slice=machine.slice
+    Delegate=yes
+
+    [Unit]
+    DefaultDependencies=no
+
+    # /run/systemd/transient/libpod-conmon-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope.d/dep.conf
+    [Unit]
+    Before=pacemaker.service
+
+    # /run/systemd/transient/libpod-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope
+    # This is a transient unit file, created programmatically via the systemd API. Do not edit.
+    [Unit]
+    Description=libcontainer container c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b
+
+    [Scope]
+    Slice=machine.slice
+    Delegate=yes
+    MemoryAccounting=yes
+    CPUAccounting=yes
+    BlockIOAccounting=yes
+
+    [Unit]
+    DefaultDependencies=no
+
+    # /run/systemd/transient/libpod-c329819a1227ec548d678861994ef755b1fde9a244e1e4d966d17674df88ce7b.scope.d/dep.conf
+    [Unit]
+    Before=pacemaker.service
+
+Effectively, this prevents systemd from managing the shutdown of any
+pacemaker-managed podman container.
+
+Related: rhbz#1726442
+---
+ heartbeat/podman | 82 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 81 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/podman b/heartbeat/podman
+index 8fc2c4695..8a916eb8c 100755
+--- a/heartbeat/podman
++++ b/heartbeat/podman
+@@ -158,6 +158,16 @@ to have the particular one persist when this happens.
+ <shortdesc lang="en">reuse container</shortdesc>
+ <content type="boolean" default="${OCF_RESKEY_reuse_default}"/>
+ </parameter>
++
++<parameter name="drop_in_dependency" required="0" unique="0">
++<longdesc lang="en">
++Use transient drop-in files to add extra dependencies to the systemd
++scopes associated to the container. During reboot, this prevents systemd
++to stop the container before pacemaker.
++</longdesc>
++<shortdesc lang="en">drop-in dependency</shortdesc>
++<content type="boolean"/>
++</parameter>
+ </parameters>
+ 
+ <actions>
+@@ -273,8 +283,57 @@ podman_create_mounts() {
+ 	IFS="$oldIFS"
+ }
+ 
++podman_container_id()
++{
++	# Retrieve the container ID by doing a "podman ps" rather than
++	# a "podman inspect", because the latter has performance issues
++	# under IO load.
++	# We could have run "podman start $CONTAINER" to get the ID back
++	# but if the container is stopped, the command will return a
++	# name instead of a container ID. This would break us.
++	podman ps --no-trunc --format '{{.ID}} {{.Names}}' | grep -F -w -m1 "$CONTAINER" | cut -d' ' -f1
++}
++
++
++create_transient_drop_in_dependency()
++{
++	local cid=$1
++	local rc=$OCF_SUCCESS
++
++	if [ -z "$cid" ]; then
++		ocf_log error "Container ID not found for \"$CONTAINER\". Not creating drop-in dependency"
++		return $OCF_ERR_GENERIC
++	fi
++
++	ocf_log info "Creating drop-in dependency for \"$CONTAINER\" ($cid)"
++	for scope in "libpod-$cid.scope.d" "libpod-conmon-$cid.scope.d"; do
++		if [ $rc -eq $OCF_SUCCESS ] && [ ! -d /run/systemd/transient/"$scope" ]; then
++			mkdir -p /run/systemd/transient/"$scope" && \
++			echo -e "[Unit]\nBefore=pacemaker.service" > /run/systemd/transient/"$scope"/dep.conf && \
++			chmod ago+r /run/systemd/transient/"$scope" /run/systemd/transient/"$scope"/dep.conf
++			rc=$?
++		fi
++	done
++
++	if [ $rc -ne $OCF_SUCCESS ]; then
++		ocf_log error "Could not create drop-in dependency for \"$CONTAINER\" ($cid)"
++	else
++		systemctl daemon-reload
++		rc=$?
++		if [ $rc -ne $OCF_SUCCESS ]; then
++			ocf_log error "Could not refresh service definition after creating drop-in for \"$CONTAINER\""
++		fi
++	fi
++
++	return $rc
++}
++
++
+ podman_start()
+ {
++	local cid
++	local rc
++
+ 	podman_create_mounts
+ 	local run_opts="-d --name=${CONTAINER}"
+ 	# check to see if the container has already started
+@@ -306,8 +365,17 @@ podman_start()
+ 		ocf_log info "running container $CONTAINER for the first time"
+ 		ocf_run podman run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd
+ 	fi
++	rc=$?
+ 
+-	if [ $? -ne 0 ]; then
++	# if the container was stopped or didn't exist before, systemd
++	# removed the libpod* scopes. So always try to recreate the drop-ins
++	if [ $rc -eq 0 ] && ocf_is_true "$OCF_RESKEY_drop_in_dependency"; then
++		cid=$(podman_container_id)
++		create_transient_drop_in_dependency "$cid"
++		rc=$?
++	fi
++
++	if [ $rc -ne 0 ]; then
+ 		ocf_exit_reason "podman failed to launch container"
+ 		return $OCF_ERR_GENERIC
+ 	fi
+@@ -353,6 +421,8 @@ podman_stop()
+ 	else
+ 		ocf_log debug "waiting $timeout second[s] before killing container"
+ 		ocf_run podman stop -t=$timeout $CONTAINER
++		# on stop, systemd will automatically delete any transient
++		# drop-in conf that has been created earlier
+ 	fi
+ 
+ 	if [ $? -ne 0 ]; then
+@@ -456,6 +526,16 @@ CONTAINER=$OCF_RESKEY_name
+ # exec command to be non-empty
+ : ${OCF_RESKEY_monitor_cmd:=/bin/true}
+ 
++# When OCF_RESKEY_drop_in_dependency is not populated, we
++# look at another file-based way of enabling the option.
++# Otherwise, consider it disabled.
++if [ -z "$OCF_RESKEY_drop_in_dependency" ]; then
++	if [ -f "/etc/sysconfig/podman_drop_in" ] || \
++	   [ -f "/etc/default/podman_drop_in" ]; then
++		OCF_RESKEY_drop_in_dependency=yes
++	fi
++fi
++
+ case $__OCF_ACTION in
+ meta-data) meta_data
+ 		exit $OCF_SUCCESS;;
diff --git a/SOURCES/bz1746148-rabbitmq-cluster-1-monitor-mnesia-status.patch b/SOURCES/bz1746148-rabbitmq-cluster-1-monitor-mnesia-status.patch
new file mode 100644
index 0000000..fab8bfd
--- /dev/null
+++ b/SOURCES/bz1746148-rabbitmq-cluster-1-monitor-mnesia-status.patch
@@ -0,0 +1,57 @@
+From fcaa52bb98a8686d993550c6f4ab7867625c8059 Mon Sep 17 00:00:00 2001
+From: John Eckersberg <jeckersb@redhat.com>
+Date: Wed, 29 Aug 2018 16:18:55 -0400
+Subject: [PATCH] rabbitmq-cluster: get cluster status from mnesia during
+ monitor
+
+If mnesia is not running (for example if `rabbitmqctl stop_app` has
+been called, or the service has paused during partition due to the
+pause_minority strategy) then the cluster_status command to
+rabbitmqctl will read the cached cluster status from disk and the
+command returns 0 even though the service isn't really running at all.
+
+Instead, force the cluster status to be read from mnesia.  If mnesia
+is not running due to the above or similar circumstances, the command
+will catch that and properly fail the monitor action.
+
+Resolves: RHBZ#1595753
+---
+ heartbeat/rabbitmq-cluster | 20 +++++---------------
+ 1 file changed, 5 insertions(+), 15 deletions(-)
+
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index a7d2db614..204917475 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -181,26 +181,16 @@ remove_pid () {
+ rmq_monitor() {
+ 	local rc
+ 
+-	$RMQ_CTL cluster_status > /dev/null 2>&1
+-	rc=$?
+-	case "$rc" in
+-	0)
++	if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then
+ 		ocf_log debug "RabbitMQ server is running normally"
+ 		rmq_write_nodename
+-		
++
+ 		return $OCF_SUCCESS
+-	;;
+-	2|68|69|70|75|78)
+-		ocf_log info "RabbitMQ server is not running"
++	else
++		ocf_log info "RabbitMQ server could not get cluster status from mnesia"
+ 		rmq_delete_nodename
+ 		return $OCF_NOT_RUNNING
+-	;;
+-	*)
+-		ocf_log err "Unexpected return code from '$RMQ_CTL cluster_status' exit code: $rc"
+-		rmq_delete_nodename
+-		return $OCF_ERR_GENERIC
+-	;;
+-	esac
++	fi
+ }
+ 
+ rmq_init_and_wait()
diff --git a/SOURCES/bz1746148-rabbitmq-cluster-2-fail-when-in-minority-partition.patch b/SOURCES/bz1746148-rabbitmq-cluster-2-fail-when-in-minority-partition.patch
new file mode 100644
index 0000000..72f5ff6
--- /dev/null
+++ b/SOURCES/bz1746148-rabbitmq-cluster-2-fail-when-in-minority-partition.patch
@@ -0,0 +1,96 @@
+From cc23c5523a0185fa557a5ab9056d50a60300d12a Mon Sep 17 00:00:00 2001
+From: John Eckersberg <jeckersb@redhat.com>
+Date: Tue, 16 Oct 2018 16:21:25 -0400
+Subject: [PATCH] rabbitmq-cluster: fail monitor when node is in minority
+ partition
+
+It's possible for mnesia to still be running, but for mnesia to be
+partitioned.  And it's also possible to get into this state without
+pacemaker seeing the node go down so no corrective action is taken.
+
+When monitoring, check the number of nodes that pacemaker thinks is
+running, and compare to the number of nodes that mnesia thinks is
+running.  If mnesia only sees a minority of the total nodes, fail it
+so corrective action can be taken to rejoin the cluster.
+
+This also adds a new function, rmq_app_running, which simply checks
+whether the app is running or not and does not care about the
+partition status.  This is now used instead of the full monitor in a
+few places where we don't care about partition state.
+
+Resolves: RHBZ#1639826
+---
+ heartbeat/rabbitmq-cluster | 28 +++++++++++++++++++++++++---
+ 1 file changed, 25 insertions(+), 3 deletions(-)
+
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index 204917475..78b2bbadf 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -178,10 +178,31 @@ remove_pid () {
+ 	rm -f ${RMQ_PID_FILE} > /dev/null 2>&1
+ }
+ 
++rmq_app_running() {
++	if $RMQ_CTL eval 'application:which_applications().' | grep -q '{rabbit,'; then
++		ocf_log debug "RabbitMQ application is running"
++		return $OCF_SUCCESS
++	else
++		ocf_log debug "RabbitMQ application is stopped"
++		return $OCF_NOT_RUNNING
++	fi
++}
++
+ rmq_monitor() {
+ 	local rc
+ 
+ 	if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then
++		pcs_running=$(rmq_join_list | wc -w)
++		ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running"
++		rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).')
++		ocf_log debug "RabbitMQ thinks ${rmq_running} RabbitMQ nodes are running"
++
++		if [ $(( $rmq_running * 2 )) -lt $pcs_running ]; then
++			ocf_log info "RabbitMQ is a minority partition, failing monitor"
++			rmq_delete_nodename
++			return $OCF_ERR_GENERIC
++		fi
++
+ 		ocf_log debug "RabbitMQ server is running normally"
+ 		rmq_write_nodename
+ 
+@@ -215,7 +236,7 @@ rmq_init_and_wait()
+ 		return $OCF_ERR_GENERIC
+ 	fi
+ 
+-	rmq_monitor
++	rmq_app_running
+ 	return $?
+ }
+ 
+@@ -236,6 +257,7 @@ rmq_start_first()
+ 	if [ $rc -eq 0 ]; then
+ 		rc=$OCF_SUCCESS
+ 		ocf_log info "cluster bootstrapped"
++		rmq_write_nodename
+ 
+ 		if [ -n "$OCF_RESKEY_set_policy" ]; then
+ 			# do not quote set_policy, we are passing in arguments
+@@ -492,7 +514,7 @@ rmq_stop() {
+ 		end.
+ 	"
+ 
+-	rmq_monitor
++	rmq_app_running
+ 	if [ $? -eq $OCF_NOT_RUNNING ]; then
+ 		return $OCF_SUCCESS
+ 	fi
+@@ -508,7 +530,7 @@ rmq_stop() {
+ 	#TODO add kill logic
+ 	stop_wait=1
+ 	while [ $stop_wait = 1 ]; do
+-		rmq_monitor
++		rmq_app_running
+ 		rc=$?
+ 		if [ "$rc" -eq $OCF_NOT_RUNNING ]; then
+ 			stop_wait=0
diff --git a/SOURCES/bz1746148-rabbitmq-cluster-3-fix-stop-regression.patch b/SOURCES/bz1746148-rabbitmq-cluster-3-fix-stop-regression.patch
new file mode 100644
index 0000000..8b422eb
--- /dev/null
+++ b/SOURCES/bz1746148-rabbitmq-cluster-3-fix-stop-regression.patch
@@ -0,0 +1,63 @@
+From 19ee29342f8bb573722991b8cbe4503309ad0bf9 Mon Sep 17 00:00:00 2001
+From: John Eckersberg <jeckersb@redhat.com>
+Date: Fri, 2 Nov 2018 13:12:53 -0400
+Subject: [PATCH] rabbitmq-cluster: fix regression in rmq_stop
+
+This regression was introduced in PR#1249 (cc23c55).  The stop action
+was modified to use rmq_app_running in order to check the service
+status, which allows for the following sequence of events:
+
+- service is started, unclustered
+- stop_app is called
+- cluster_join is attempted and fails
+- stop is called
+
+Because stop_app was called, rmq_app_running returns $OCF_NOT_RUNNING
+and the stop action is a no-op.  This means the erlang VM continues
+running.
+
+When the start action is attempted again, a new erlang VM is launched,
+but this VM fails to boot because the old one is still running and is
+registered with the same name (rabbit@nodename).
+
+This adds a new function, rmq_node_alive, which does a simple eval to
+test whether the erlang VM is up, independent of the rabbit app.  The
+stop action now uses rmq_node_alive to check the service status, so
+even if stop_app was previously called, the erlang VM will be stopped
+properly.
+
+Resolves: RHBZ#1639826
+---
+ heartbeat/rabbitmq-cluster | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index 78b2bbadf..a2de9dc20 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -188,6 +188,16 @@ rmq_app_running() {
+ 	fi
+ }
+ 
++rmq_node_alive() {
++	if $RMQ_CTL eval 'ok.'; then
++		ocf_log debug "RabbitMQ node is alive"
++		return $OCF_SUCCESS
++	else
++		ocf_log debug "RabbitMQ node is down"
++		return $OCF_NOT_RUNNING
++	fi
++}
++
+ rmq_monitor() {
+ 	local rc
+ 
+@@ -514,7 +524,7 @@ rmq_stop() {
+ 		end.
+ 	"
+ 
+-	rmq_app_running
++	rmq_node_alive
+ 	if [ $? -eq $OCF_NOT_RUNNING ]; then
+ 		return $OCF_SUCCESS
+ 	fi
diff --git a/SOURCES/bz1746148-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch b/SOURCES/bz1746148-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch
new file mode 100644
index 0000000..80fe18b
--- /dev/null
+++ b/SOURCES/bz1746148-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch
@@ -0,0 +1,83 @@
+From 63c9449bfa9a7fecbc0f00394699a475a384671d Mon Sep 17 00:00:00 2001
+From: Damien Ciabrini <dciabrin@redhat.com>
+Date: Thu, 9 Aug 2018 16:33:26 +0200
+Subject: [PATCH] rabbitmq-cluster: retry start when cluster join fails
+
+When a node tries to join an existing cluster, it fetches a node
+list to try to connect from any of those running nodes.
+
+If the nodes from this list become unavailable while we're joining
+the cluster, the rabbitmq server will fail to get clustered and
+make the start operation fail.
+
+Give the resource a chance to start anyway by retrying the entire
+start actions until it succeeds or until the start timeout is
+reached and pacemaker stops the start operation.
+
+Co-Authored-by: <michele@acksyn.org>
+Suggested-by: <abeekhof@redhat.com>
+---
+ heartbeat/rabbitmq-cluster | 29 ++++++++++++++++++++++++++---
+ 1 file changed, 26 insertions(+), 3 deletions(-)
+
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index 9ff49e075..84f383460 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -31,6 +31,12 @@
+ 
+ #######################################################################
+ 
++# This arbitrary value here is used by the rmq_start action to
++# signify that the resource agent must retry the start process
++# It might potentially conflict with OCF assigned error code
++# in the future.
++RMQ_TRY_RESTART_ERROR_CODE=126
++
+ RMQ_SERVER=/usr/sbin/rabbitmq-server
+ RMQ_CTL=/usr/sbin/rabbitmqctl
+ RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia"
+@@ -354,7 +360,7 @@ rmq_notify() {
+ 	return $OCF_SUCCESS
+ }
+ 
+-rmq_start() {
++rmq_try_start() {
+ 	local join_list=""
+ 	local rc
+ 
+@@ -384,8 +390,16 @@ rmq_start() {
+ 	rc=$?
+ 
+ 	if [ $rc -ne 0 ]; then
+-		ocf_log info "node failed to join even after reseting local data. Check SELINUX policy"
+-		return $OCF_ERR_GENERIC
++		# we could not join the rabbitmq cluster from any of the running nodes
++		# this might be due to a unexpected reset of those nodes. Give ourself
++		# a chance to start by retrying the entire start sequence.
++
++		ocf_log warn "Failed to join the RabbitMQ cluster from nodes ${join_list}. Stopping local unclustered rabbitmq"
++		rmq_stop
++
++		ocf_log warn "Re-detect available rabbitmq nodes and try to start again"
++		# return an unused OCF value to signify a "retry" condition
++		return $RMQ_TRY_RESTART_ERROR_CODE
+ 	fi
+ 
+ 	# Restore users, user permissions, and policies (if any)
+@@ -443,6 +457,15 @@ rmq_start() {
+ 	return $OCF_SUCCESS
+ }
+ 
++rmq_start() {
++	local rc=$RMQ_TRY_RESTART_ERROR_CODE
++	while [ $rc -eq $RMQ_TRY_RESTART_ERROR_CODE ]; do
++		rmq_try_start
++		rc=$?
++	done
++	return $rc
++}
++
+ rmq_stop() {
+ 	# Backup users, user permissions, and policies
+ 	BaseDataDir=`dirname $RMQ_DATA_DIR`
diff --git a/SOURCES/bz1746148-rabbitmq-cluster-5-ensure-node-attribures-removed.patch b/SOURCES/bz1746148-rabbitmq-cluster-5-ensure-node-attribures-removed.patch
new file mode 100644
index 0000000..0a25333
--- /dev/null
+++ b/SOURCES/bz1746148-rabbitmq-cluster-5-ensure-node-attribures-removed.patch
@@ -0,0 +1,42 @@
+From 8ed87936e9ad06318cc49ea767885a405dfde11e Mon Sep 17 00:00:00 2001
+From: John Eckersberg <jeckersb@redhat.com>
+Date: Wed, 5 Dec 2018 11:45:43 -0500
+Subject: [PATCH] rabbitmq-cluster: better ensure node attributes are removed
+
+Ensure that the attribute is removed at the end of the stop action.
+Also if rmq_app_running or rmq_node_alive shows the service as down,
+ensure the attribute is deleted as well.
+
+Resolves: RHBZ#1656368
+---
+ heartbeat/rabbitmq-cluster | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index 1643dd1e7..2dca3e216 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -184,6 +184,7 @@ rmq_app_running() {
+ 		return $OCF_SUCCESS
+ 	else
+ 		ocf_log debug "RabbitMQ application is stopped"
++		rmq_delete_nodename
+ 		return $OCF_NOT_RUNNING
+ 	fi
+ }
+@@ -194,6 +195,7 @@ rmq_node_alive() {
+ 		return $OCF_SUCCESS
+ 	else
+ 		ocf_log debug "RabbitMQ node is down"
++		rmq_delete_nodename
+ 		return $OCF_NOT_RUNNING
+ 	fi
+ }
+@@ -554,6 +556,7 @@ rmq_stop() {
+ 		sleep 1
+ 	done
+ 
++	rmq_delete_nodename
+ 	remove_pid
+ 	return $OCF_SUCCESS
+ }
diff --git a/SOURCES/bz1746148-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch b/SOURCES/bz1746148-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch
new file mode 100644
index 0000000..b39150a
--- /dev/null
+++ b/SOURCES/bz1746148-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch
@@ -0,0 +1,32 @@
+From 2b6e4a94c847129dd014a1efa733cd1b4a2448e6 Mon Sep 17 00:00:00 2001
+From: John Eckersberg <jeckersb@redhat.com>
+Date: Fri, 2 Nov 2018 10:11:41 -0400
+Subject: [PATCH] rabbitmq-cluster: debug log detailed output when mnesia query
+ fails
+
+---
+ heartbeat/rabbitmq-cluster | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index 78b2bbadf..fabfeedfb 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -191,7 +191,8 @@ rmq_app_running() {
+ rmq_monitor() {
+ 	local rc
+ 
+-	if $RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' | grep -q '^{ok'; then
++	status=$($RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1)
++	if echo "${status}" | grep -q '^{ok'; then
+ 		pcs_running=$(rmq_join_list | wc -w)
+ 		ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running"
+ 		rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).')
+@@ -209,6 +210,7 @@ rmq_monitor() {
+ 		return $OCF_SUCCESS
+ 	else
+ 		ocf_log info "RabbitMQ server could not get cluster status from mnesia"
++		ocf_log debug "${status}"
+ 		rmq_delete_nodename
+ 		return $OCF_NOT_RUNNING
+ 	fi
diff --git a/SOURCES/bz1746148-rabbitmq-cluster-7-suppress-additional-output.patch b/SOURCES/bz1746148-rabbitmq-cluster-7-suppress-additional-output.patch
new file mode 100644
index 0000000..8b58191
--- /dev/null
+++ b/SOURCES/bz1746148-rabbitmq-cluster-7-suppress-additional-output.patch
@@ -0,0 +1,87 @@
+From 5a33171b2c40e2e1587e82aad0cb7e39abcf615d Mon Sep 17 00:00:00 2001
+From: John Eckersberg <jeckersb@redhat.com>
+Date: Thu, 13 Dec 2018 12:58:43 -0500
+Subject: [PATCH] rabbitmq-cluster: always use quiet flag for eval calls
+
+On older rabbitmq versions, rabbitmqctl appends "...done." at the end
+of the output.  However we expect eval without this extra output so it
+can be used for further processing.  The -q option to rabbitmqctl
+suppresses the extra output, so ensure we always pass that when
+calling eval.
+
+Resolves: RHBZ#1659072
+---
+ heartbeat/rabbitmq-cluster | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
+index 2dca3e216..e82ac2399 100755
+--- a/heartbeat/rabbitmq-cluster
++++ b/heartbeat/rabbitmq-cluster
+@@ -39,6 +39,7 @@ RMQ_TRY_RESTART_ERROR_CODE=126
+ 
+ RMQ_SERVER=/usr/sbin/rabbitmq-server
+ RMQ_CTL=/usr/sbin/rabbitmqctl
++RMQ_EVAL="${RMQ_CTL} eval -q"
+ RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia"
+ RMQ_PID_DIR="/var/run/rabbitmq"
+ RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid"
+@@ -179,7 +180,7 @@ remove_pid () {
+ }
+ 
+ rmq_app_running() {
+-	if $RMQ_CTL eval 'application:which_applications().' | grep -q '{rabbit,'; then
++	if $RMQ_EVAL 'application:which_applications().' | grep -q '{rabbit,'; then
+ 		ocf_log debug "RabbitMQ application is running"
+ 		return $OCF_SUCCESS
+ 	else
+@@ -190,7 +191,7 @@ rmq_app_running() {
+ }
+ 
+ rmq_node_alive() {
+-	if $RMQ_CTL eval 'ok.'; then
++	if $RMQ_EVAL 'ok.'; then
+ 		ocf_log debug "RabbitMQ node is alive"
+ 		return $OCF_SUCCESS
+ 	else
+@@ -203,11 +204,11 @@ rmq_node_alive() {
+ rmq_monitor() {
+ 	local rc
+ 
+-	status=$($RMQ_CTL eval 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1)
++	status=$($RMQ_EVAL 'rabbit_mnesia:cluster_status_from_mnesia().' 2>&1)
+ 	if echo "${status}" | grep -q '^{ok'; then
+ 		pcs_running=$(rmq_join_list | wc -w)
+ 		ocf_log debug "Pacemaker thinks ${pcs_running} RabbitMQ nodes are running"
+-		rmq_running=$($RMQ_CTL eval 'length(mnesia:system_info(running_db_nodes)).')
++		rmq_running=$($RMQ_EVAL 'length(mnesia:system_info(running_db_nodes)).')
+ 		ocf_log debug "RabbitMQ thinks ${rmq_running} RabbitMQ nodes are running"
+ 
+ 		if [ $(( $rmq_running * 2 )) -lt $pcs_running ]; then
+@@ -294,7 +295,7 @@ rmq_start_first()
+ 
+ rmq_is_clustered()
+ {
+-    $RMQ_CTL eval 'rabbit_mnesia:is_clustered().' | grep -q true
++    $RMQ_EVAL 'rabbit_mnesia:is_clustered().' | grep -q true
+ }
+ 
+ rmq_join_existing()
+@@ -432,7 +433,7 @@ rmq_try_start() {
+ 
+ 	# Restore users, user permissions, and policies (if any)
+ 	BaseDataDir=`dirname $RMQ_DATA_DIR`
+-	$RMQ_CTL eval "
++	$RMQ_EVAL "
+ 		%% Run only if Mnesia is ready.
+ 		lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso
+ 		begin
+@@ -497,7 +498,7 @@ rmq_start() {
+ rmq_stop() {
+ 	# Backup users, user permissions, and policies
+ 	BaseDataDir=`dirname $RMQ_DATA_DIR`
+-	$RMQ_CTL eval "
++	$RMQ_EVAL "
+ 		%% Run only if Mnesia is still available.
+ 		lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso
+ 		begin
diff --git a/SOURCES/bz1746148-redis-mute-password-warning.patch b/SOURCES/bz1746148-redis-mute-password-warning.patch
new file mode 100644
index 0000000..b3b89e0
--- /dev/null
+++ b/SOURCES/bz1746148-redis-mute-password-warning.patch
@@ -0,0 +1,62 @@
+From 6303448af77d2ed64c7436a84b30cf7fa4941e19 Mon Sep 17 00:00:00 2001
+From: Michele Baldessari <michele@acksyn.org>
+Date: Wed, 30 Jan 2019 21:36:17 +0100
+Subject: [PATCH] redis: Filter warning from stderr when calling 'redis-cli -a'
+
+In some versions of redis (starting with 4.0.10) we have commits [1] and
+[2] which add a warning on stderr which will be printed out every single
+time a monitor operation takes place:
+
+  foo pacemaker-remoted[57563]:  notice: redis_monitor_20000:1930:stderr
+  [ Warning: Using a password with '-a' option on the command line interface may not be safe. ]
+
+Later on commit [3] (merged with 5.0rc4) was merged which added the option
+'--no-auth-warning' to disable said warning since it broke a bunch of
+scripts [4]. I tried to forcibly either try the command twice (first
+with --no-auth-warning and then without in case of errors) but it is
+impossible to distinguish between error due to missing param and other
+errors.
+
+So instead of inspecting the version of the redis-cli tool and do the following:
+- >= 5.0.0 use --no-auth-warning all the time
+- >= 4.0.10 & < 5.0.0 filter the problematic line from stderr only
+- else do it like before
+
+We simply filter out from stderr the 'Using a password' message
+unconditionally while making sure we keep stdout just the same.
+
+Tested on a redis 4.0.10 cluster and confirmed that it is working as
+intended.
+
+All this horror and pain is due to the fact that redis does not support
+any other means to pass a password (we could in theory first connect to
+the server and then issue an AUTH command, but that seems even more
+complex and error prone). See [5] for more info (or [6] for extra fun)
+
+[1] https://github.com/antirez/redis/commit/c082221aefbb2a472c7193dbdbb90900256ce1a2
+[2] https://github.com/antirez/redis/commit/ef931ef93e909b4f504e8c6fbed350ed70c1c67c
+[3] https://github.com/antirez/redis/commit/a4ef94d2f71a32f73ce4ebf154580307a144b48f
+[4] https://github.com/antirez/redis/issues/5073
+[5] https://github.com/antirez/redis/issues/3483
+[6] https://github.com/antirez/redis/pull/2413
+
+Signed-off-by: Michele Baldessari <michele@acksyn.org>
+---
+ heartbeat/redis.in | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/heartbeat/redis.in b/heartbeat/redis.in
+index 1dff067e9..e257bcc5e 100644
+--- a/heartbeat/redis.in
++++ b/heartbeat/redis.in
+@@ -302,7 +302,9 @@ set_score()
+ redis_client() {
+ 	ocf_log debug "redis_client: '$REDIS_CLIENT' -s '$REDIS_SOCKET' $*"
+ 	if [ -n "$clientpasswd" ]; then
+-		"$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" | sed 's/\r//'
++		# Starting with 4.0.10 there is a warning on stderr when using a pass
++		# Once we stop supporting versions < 5.0.0 we can add --no-auth-warning here
++		("$REDIS_CLIENT" -s "$REDIS_SOCKET" -a "$clientpasswd" "$@" 2>&1 >&3 3>&- | grep -v "Using a password" >&2 3>&-) 3>&1 | sed 's/\r//'
+ 	else
+ 		"$REDIS_CLIENT" -s "$REDIS_SOCKET" "$@" | sed 's/\r//'
+ 	fi
diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec
index e8ed8c7..396a054 100644
--- a/SPECS/resource-agents.spec
+++ b/SPECS/resource-agents.spec
@@ -72,7 +72,7 @@
 Name:		resource-agents
 Summary:	Open Source HA Reusable Cluster Resource Scripts
 Version:	4.1.1
-Release:	17%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.3
+Release:	17%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.6
 License:	GPLv2+ and LGPLv2+
 URL:		https://github.com/ClusterLabs/resource-agents
 %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
@@ -133,6 +133,23 @@ Patch38:	bz1710061-aws-vpc-move-ip-avoid-possible-race-condition.patch
 Patch39:	bz1714104-aws-vpc-move-ip-1-multi-route-table-support.patch
 Patch40:	bz1714104-aws-vpc-move-ip-2-fix-route-update-multi-NICs.patch
 Patch41:	bz1710058-Squid-2-dont-run-pgrep-without-PID.patch
+Patch42:	bz1734062-podman-1-avoid-double-inspect-call.patch
+Patch43:	bz1734062-podman-2-improve-monitor-action.patch
+Patch44:	bz1734062-podman-3-remove-docker-remnant.patch
+Patch45:	bz1734062-podman-4-use-exec-to-avoid-performance-issues.patch
+Patch46:	bz1734067-CTDB-1-explicitly-use-bash-shell.patch
+Patch47:	bz1734067-CTDB-2-add-ctdb_max_open_files-parameter.patch
+Patch48:	bz1734067-CTDB-3-fixes.patch
+Patch49:	bz1734067-CTDB-4-add-v4.9-support.patch
+Patch50:	bz1738303-podman-drop-in-support.patch
+Patch51:	bz1746148-redis-mute-password-warning.patch
+Patch52:	bz1746148-rabbitmq-cluster-1-monitor-mnesia-status.patch
+Patch53:	bz1746148-rabbitmq-cluster-2-fail-when-in-minority-partition.patch
+Patch54:	bz1746148-rabbitmq-cluster-3-fix-stop-regression.patch
+Patch55:	bz1746148-rabbitmq-cluster-4-retry-start-cluster-join-fails.patch
+Patch56:	bz1746148-rabbitmq-cluster-5-ensure-node-attribures-removed.patch
+Patch57:	bz1746148-rabbitmq-cluster-6-debug-log-mnesia-query-fails.patch
+Patch58:	bz1746148-rabbitmq-cluster-7-suppress-additional-output.patch
 # bundle patches
 Patch1000:	7-gcp-bundled.patch
 Patch1001:	8-google-cloud-sdk-fixes.patch
@@ -358,6 +375,23 @@ exit 1
 %patch39 -p1
 %patch40 -p1
 %patch41 -p1
+%patch42 -p1
+%patch43 -p1
+%patch44 -p1
+%patch45 -p1
+%patch46 -p1
+%patch47 -p1
+%patch48 -p1
+%patch49 -p1 -F1
+%patch50 -p1 -F2
+%patch51 -p1
+%patch52 -p1
+%patch53 -p1
+%patch54 -p1
+%patch55 -p1
+%patch56 -p1
+%patch57 -p1
+%patch58 -p1
 
 chmod 755 heartbeat/nova-compute-wait
 chmod 755 heartbeat/NovaEvacuate
@@ -925,6 +959,26 @@ ccs_update_schema > /dev/null 2>&1 ||:
 %endif
 
 %changelog
+* Wed Aug 28 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-17.6
+- rabbitmq-cluster: fail monitor when node is in minority partition,
+  fix stop regression, retry start when cluster join fails, ensure
+  node attributes are removed
+
+  Resolves: rhbz#1746148
+
+* Wed Aug  7 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-17.5
+- podman: add drop-in dependency support
+
+  Resolves: rhbz#1738303
+
+* Tue Jul 30 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-17.4
+- podman: fixes to avoid bundle resources restarting when probing
+  takes too long
+- CTDB: add support for v4.9+
+
+  Resolves: rhbz#1734062
+  Resolves: rhbz#1734067
+
 * Wed May 29 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-17.3
 - Squid: fix PID file issue