diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..5b2683b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+SOURCES/ClusterLabs-resource-agents-e76b7d3a.tar.gz
diff --git a/.resource-agents.metadata b/.resource-agents.metadata
new file mode 100644
index 0000000..802a19c
--- /dev/null
+++ b/.resource-agents.metadata
@@ -0,0 +1 @@
+b1c1e65d690d94e080319217486f8dcd248b2a2a SOURCES/ClusterLabs-resource-agents-e76b7d3a.tar.gz
diff --git a/SOURCES/bz1952005-pgsqlms-new-ra.patch b/SOURCES/bz1952005-pgsqlms-new-ra.patch
new file mode 100644
index 0000000..b3b314e
--- /dev/null
+++ b/SOURCES/bz1952005-pgsqlms-new-ra.patch
@@ -0,0 +1,3338 @@
+diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
+--- a/doc/man/Makefile.am	2021-04-12 12:51:56.831835953 +0200
++++ b/doc/man/Makefile.am	2021-04-13 13:38:14.198361848 +0200
+@@ -154,6 +154,7 @@
+                           ocf_heartbeat_ovsmonitor.7 \
+                           ocf_heartbeat_pgagent.7 \
+                           ocf_heartbeat_pgsql.7 \
++                          ocf_heartbeat_pgsqlms.7 \
+                           ocf_heartbeat_pingd.7 \
+                           ocf_heartbeat_podman.7 \
+                           ocf_heartbeat_portblock.7 \
+diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+--- a/heartbeat/Makefile.am	2021-04-12 12:51:56.831835953 +0200
++++ b/heartbeat/Makefile.am	2021-04-13 13:37:45.741292178 +0200
+@@ -149,6 +149,7 @@
+ 			ovsmonitor		\
+ 			pgagent			\
+ 			pgsql			\
++			pgsqlms			\
+ 			pingd			\
+ 			podman			\
+ 			portblock		\
+@@ -209,7 +210,10 @@
+ 			  mysql-common.sh	\
+ 			  nfsserver-redhat.sh	\
+ 			  findif.sh		\
+-			  ocf.py
++			  ocf.py		\
++			  OCF_Directories.pm	\
++			  OCF_Functions.pm	\
++			  OCF_ReturnCodes.pm
+ 
+ # Legacy locations
+ hbdir			= $(sysconfdir)/ha.d
+diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm
+--- a/heartbeat/OCF_Directories.pm	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/OCF_Directories.pm	2021-04-13 13:37:35.621267404 +0200
+@@ -0,0 +1,139 @@
++#!/usr/bin/perl
++# This program is open source, licensed under the PostgreSQL License.
++# For license terms, see the LICENSE file.
++#
++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault
++
++=head1 NAME
++
++OCF_Directories - Binaries and binary options for use in Resource Agents
++
++=head1 SYNOPSIS
++
++  use FindBin;
++  use lib "$FindBin::RealBin/../../lib/heartbeat/";
++  
++  use OCF_Directories;
++
++=head1 DESCRIPTION
++
++This module has been ported from the ocf-directories shell script of the
++resource-agents project. See L<https://github.com/ClusterLabs/resource-agents/>.
++
++=head1 VARIABLES
++
++Here are the variables exported by this module:
++
++=over
++
++=item $INITDIR
++
++=item $HA_DIR
++
++=item $HA_RCDIR
++
++=item $HA_CONFDIR
++
++=item $HA_CF
++
++=item $HA_VARLIB
++
++=item $HA_RSCTMP
++
++=item $HA_RSCTMP_OLD
++
++=item $HA_FIFO
++
++=item $HA_BIN
++
++=item $HA_SBIN_DIR
++
++=item $HA_DATEFMT
++
++=item $HA_DEBUGLOG
++
++=item $HA_RESOURCEDIR
++
++=item $HA_DOCDIR
++
++=item $__SCRIPT_NAME
++
++=item $HA_VARRUN
++
++=item $HA_VARLOCK
++
++=item $ocf_prefix
++
++=item $ocf_exec_prefix
++
++=back
++
++=cut
++
++package OCF_Directories;
++
++use strict;
++use warnings;
++use 5.008;
++use File::Basename;
++
++BEGIN {
++    use Exporter;
++
++
++    our $VERSION   = 'v2.3.0';
++    our @ISA       = ('Exporter');
++    our @EXPORT    = qw(
++        $INITDIR
++        $HA_DIR
++        $HA_RCDIR
++        $HA_CONFDIR
++        $HA_CF
++        $HA_VARLIB
++        $HA_RSCTMP
++        $HA_RSCTMP_OLD
++        $HA_FIFO
++        $HA_BIN
++        $HA_SBIN_DIR
++        $HA_DATEFMT
++        $HA_DEBUGLOG
++        $HA_RESOURCEDIR
++        $HA_DOCDIR
++        $__SCRIPT_NAME
++        $HA_VARRUN
++        $HA_VARLOCK
++        $ocf_prefix
++        $ocf_exec_prefix
++    );
++    our @EXPORT_OK = ( @EXPORT );
++}
++
++our $INITDIR         = ( $ENV{'INITDIR'}       || '/etc/init.d' );
++our $HA_DIR          = ( $ENV{'HA_DIR'}        || '/etc/ha.d' );
++our $HA_RCDIR        = ( $ENV{'HA_RCDIR'}      || '/etc/ha.d/rc.d' );
++our $HA_CONFDIR      = ( $ENV{'HA_CONFDIR'}    || '/etc/ha.d/conf' );
++our $HA_CF           = ( $ENV{'HA_CF'}         || '/etc/ha.d/ha.cf' );
++our $HA_VARLIB       = ( $ENV{'HA_VARLIB'}     || '/var/lib/heartbeat' );
++our $HA_RSCTMP       = ( $ENV{'HA_RSCTMP'}     || '/run/resource-agents' );
++our $HA_RSCTMP_OLD   = ( $ENV{'HA_RSCTMP_OLD'} || '/var/run/heartbeat/rsctmp' );
++our $HA_FIFO         = ( $ENV{'HA_FIFO'}       || '/var/lib/heartbeat/fifo' );
++our $HA_BIN          = ( $ENV{'HA_BIN'}        || '/usr/libexec/heartbeat' );
++our $HA_SBIN_DIR     = ( $ENV{'HA_SBIN_DIR'}   || '/usr/sbin' );
++our $HA_DATEFMT      = ( $ENV{'HA_DATEFMT'}    || '%b %d %T ' );
++our $HA_DEBUGLOG     = ( $ENV{'HA_DEBUGLOG'}   || '/dev/null' );
++our $HA_RESOURCEDIR  = ( $ENV{'HA_RESOURCEDIR'}|| '/etc/ha.d/resource.d' );
++our $HA_DOCDIR       = ( $ENV{'HA_DOCDIR'}     || '/usr/share/doc/heartbeat' );
++our $__SCRIPT_NAME   = ( $ENV{'__SCRIPT_NAME'} || fileparse($0) );
++our $HA_VARRUN       = ( $ENV{'HA_VARRUN'}     || '/var/run' );
++our $HA_VARLOCK      = ( $ENV{'HA_VARLOCK'}    || '/var/lock/subsys' );
++our $ocf_prefix      = '/usr';
++our $ocf_exec_prefix = '/usr';
++
++1;
++
++=head1 COPYRIGHT AND LICENSE
++
++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault.
++
++Licensed under the PostgreSQL License.
++
+diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm
+--- a/heartbeat/OCF_Functions.pm	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/OCF_Functions.pm	2021-04-13 13:37:35.621267404 +0200
+@@ -0,0 +1,631 @@
++#!/usr/bin/perl
++# This program is open source, licensed under the PostgreSQL License.
++# For license terms, see the LICENSE file.
++#
++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault
++
++=head1 NAME
++
++OCF_Functions - helper subroutines for OCF agent
++
++=head1 SYNOPSIS
++
++  use FindBin;
++  use lib "$FindBin::RealBin/../../lib/heartbeat/";
++  
++  use OCF_Functions;
++
++=head1 DESCRIPTION
++
++This module has been ported from the ocf-shellfuncs shell script of the
++resource-agents project. See L<https://github.com/ClusterLabs/resource-agents/>.
++
++=head1 VARIABLE
++
++The only variable exported by this module is C<__OCF_ACTION>.
++
++=head1 SUBROUTINES
++
++Here are the subroutines ported from ocf-shellfuncs and exported by this module:
++
++=over
++
++=item ha_debug
++
++=item ha_log
++
++=item hadate
++
++=item ocf_is_clone
++
++=item ocf_is_ms
++
++=item ocf_is_probe
++
++=item ocf_is_root
++
++=item ocf_is_true
++
++=item ocf_is_ver
++
++=item ocf_local_nodename
++
++=item ocf_log
++
++=item ocf_exit_reason
++
++=item ocf_maybe_random
++
++=item ocf_ver2num
++
++=item ocf_ver_complete_level
++
++=item ocf_ver_level
++
++=item ocf_version_cmp
++
++=item set_logtag
++
++=back
++
++Here are the subroutines only existing in the perl module but not in the
++ocf-shellfuncs script:
++
++=over
++
++=item ocf_notify_env
++
++=back
++
++=cut
++
++package OCF_Functions;
++
++use strict;
++use warnings;
++use 5.008;
++use POSIX qw( strftime setlocale LC_ALL );
++use English;
++
++use FindBin;
++use lib "$FindBin::RealBin/../../lib/heartbeat/";
++
++use OCF_ReturnCodes;
++use OCF_Directories;
++
++BEGIN {
++    use Exporter;
++
++    our $VERSION   = 'v2.3.0';
++    our @ISA       = ('Exporter');
++    our @EXPORT    = qw(
++        $__OCF_ACTION
++        ocf_is_root
++        ocf_maybe_random
++        ocf_is_true
++        hadate
++        set_logtag
++        ha_log
++        ha_debug
++        ocf_log
++        ocf_exit_reason
++        ocf_is_probe
++        ocf_is_clone
++        ocf_is_ms
++        ocf_is_ver
++        ocf_ver2num
++        ocf_ver_level
++        ocf_ver_complete_level
++        ocf_version_cmp
++        ocf_local_nodename
++        ocf_notify_env
++    );
++    our @EXPORT_OK = ( @EXPORT );
++}
++
++our $__OCF_ACTION;
++
++sub ocf_is_root {
++    return $EUID == 0;
++}
++
++sub ocf_maybe_random {
++    return int( rand( 32767 ) );
++}
++
++sub ocf_is_true {
++    my $v = shift;
++    return ( defined $v and $v =~ /^(?:yes|true|1|YES|TRUE|ja|on|ON)$/ );
++}
++
++sub hadate {
++  return strftime( $HA_DATEFMT, localtime );
++}
++
++sub set_logtag {
++
++    return if defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '';
++
++    if ( defined $ENV{'OCF_RESOURCE_INSTANCE'} and $ENV{'OCF_RESOURCE_INSTANCE'} ne '' ) {
++        $ENV{'HA_LOGTAG'} = "$__SCRIPT_NAME($ENV{'OCF_RESOURCE_INSTANCE'})[$PID]";
++    }
++    else {
++        $ENV{'HA_LOGTAG'}="${__SCRIPT_NAME}[$PID]";
++    }
++}
++
++sub __ha_log {
++    my $ignore_stderr = 0;
++    my $loglevel      = '';
++
++    if ( $_[0] eq '--ignore-stderr' ) {
++        $ignore_stderr = 1;
++        shift;
++    }
++
++    $ENV{'HA_LOGFACILITY'} = '' if not defined $ENV{'HA_LOGFACILITY'}
++        or $ENV{'HA_LOGFACILITY'} eq 'none';
++
++    # if we're connected to a tty, then output to stderr
++    if ( -t STDERR ) {
++        # FIXME
++        # T.N.: this was ported with the bug on $loglevel being empty
++        # and never set before the test here...
++        if ( defined $ENV{'HA_debug'}
++             and $ENV{'HA_debug'} == 0
++             and $loglevel eq 'debug'
++        ) {
++            return 0;
++        }
++        elsif ( $ignore_stderr ) {
++            # something already printed this error to stderr, so ignore
++            return 0;
++        }
++        if ( defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '' ) {
++            printf STDERR "%s: %s\n", $ENV{'HA_LOGTAG'}, join ' ', @ARG;
++        }
++        else {
++            printf STDERR "%s\n", join ' ', @ARG;
++        }
++        return 0;
++    }
++
++    set_logtag();
++
++    if ( defined $ENV{'HA_LOGD'} and $ENV{'HA_LOGD'} eq 'yes' ) {
++        system 'ha_logger', '-t', $ENV{'HA_LOGTAG'}, @ARG;
++        return 0 if ( $? >> 8 ) == 0;
++    }
++
++    unless ( $ENV{'HA_LOGFACILITY'} eq '' ) {
++        # logging through syslog
++        # loglevel is unknown, use 'notice' for now
++        $loglevel = 'notice';
++        for ( "@ARG" ) {
++            if ( /ERROR/ ) {
++                $loglevel = 'err';
++            }
++            elsif ( /WARN/ ) {
++                $loglevel = 'warning';
++            }
++            elsif (/INFO|info/ ) {
++                $loglevel = 'info';
++            }
++        }
++
++        system 'logger', '-t', $ENV{'HA_LOGTAG'}, '-p',
++            "$ENV{'HA_LOGFACILITY'}.$loglevel", @ARG;
++    }
++
++    if ( defined $ENV{'HA_LOGFILE'} and $ENV{'HA_LOGFILE'} ne '' ) {
++        # appending to $HA_LOGFILE
++        open my $logfile, '>>', $ENV{'HA_LOGFILE'};
++        printf $logfile "%s:	%s %s\n", $ENV{'HA_LOGTAG'}, hadate(),
++            join (' ', @ARG);
++        close $logfile;
++    }
++
++    # appending to stderr
++    printf STDERR "%s %s\n", hadate(), join ' ', @ARG
++        if (not defined $ENV{'HA_LOGFACILITY'} or $ENV{'HA_LOGFACILITY'} eq '')
++            and (not defined $ENV{'HA_LOGFILE'} or $ENV{'HA_LOGFILE'} eq '' )
++            and not $ignore_stderr;
++
++    if ( defined $ENV{'HA_DEBUGLOG'} and $ENV{'HA_DEBUGLOG'} ne ''
++        and $ENV{'HA_LOGFILE'} ne $ENV{'HA_DEBUGLOG'}
++    ) {
++        # appending to $HA_DEBUGLOG
++        open my $logfile, '>>', $ENV{'HA_DEBUGLOG'};
++        printf $logfile "%s:	%s %s\n", $ENV{'HA_LOGTAG'}, hadate(),
++            join (' ', @ARG);
++        close $logfile;
++    }
++}
++
++sub ha_log {
++    return __ha_log( @ARG );
++}
++
++sub ha_debug {
++
++    return 0 if defined $ENV{'HA_debug'} and $ENV{'HA_debug'} == 0;
++
++    if ( -t STDERR ) {
++        if ( defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '' ) {
++            printf STDERR "%s: %s\n", $ENV{'HA_LOGTAG'}, join ' ', @ARG;
++        }
++        else {
++            printf STDERR "%s\n", join ' ', @ARG;
++        }
++        
++        return 0;
++    }
++
++    set_logtag();
++
++    if ( defined $ENV{'HA_LOGD'} and $ENV{'HA_LOGD'} eq 'yes' ) {
++        system 'ha_logger', '-t', $ENV{'HA_LOGTAG'}, '-D', 'ha-debug', @ARG;
++        return 0 if ( $? >> 8 ) == 0;
++    }
++
++    $ENV{'HA_LOGFACILITY'} = '' if not defined $ENV{'HA_LOGFACILITY'}
++        or $ENV{'HA_LOGFACILITY'} eq 'none';
++
++    unless ( $ENV{'HA_LOGFACILITY'} eq '' ) {
++        # logging through syslog
++
++        system 'logger', '-t', $ENV{'HA_LOGTAG'}, '-p',
++            "$ENV{'HA_LOGFACILITY'}.debug", @ARG;
++    }
++
++    if ( defined $ENV{'HA_DEBUGLOG'} and -f $ENV{'HA_DEBUGLOG'} ) {
++        my $logfile;
++        # appending to $HA_DEBUGLOG
++        open $logfile, '>>', $ENV{'HA_DEBUGLOG'};
++        printf $logfile "%s:	%s %s\n", $ENV{'HA_LOGTAG'}, hadate(),
++            join (' ', @ARG);
++        close $logfile;
++    }
++
++    # appending to stderr
++    printf STDERR "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), join ' ', @ARG
++        if (not defined $ENV{'HA_LOGFACILITY'} or $ENV{'HA_LOGFACILITY'} eq '')
++            and (not defined $ENV{'HA_DEBUGLOG'} or $ENV{'HA_DEBUGLOG'} eq '' );
++}
++
++#
++# ocf_log: log messages from the resource agent
++# This function is slightly different from its equivalent in ocf-shellfuncs.in
++# as it behaves like printf.
++# Arguments:
++#   * __OCF_PRIO: log level
++#   * __OCF_MSG:  printf-like format string
++#   * all other arguments are values for the printf-like format string
++#
++sub ocf_log {
++    my $__OCF_PRIO;
++    my $__OCF_MSG;
++
++    # TODO: Revisit and implement internally.
++    if ( scalar @ARG < 2 ) {
++        ocf_log ( 'err', "Not enough arguments [%d] to ocf_log", scalar @ARG );
++    }
++
++    $__OCF_PRIO = shift;
++    $__OCF_MSG  = shift;
++    $__OCF_MSG  = sprintf $__OCF_MSG, @ARG;
++
++    for ( $__OCF_PRIO ) {
++        if    ( /crit/  ) { $__OCF_PRIO = 'CRIT'    }
++        elsif ( /err/   ) { $__OCF_PRIO = 'ERROR'   }
++        elsif ( /warn/  ) { $__OCF_PRIO = 'WARNING' }
++        elsif ( /info/  ) { $__OCF_PRIO = 'INFO'    }
++        elsif ( /debug/ ) { $__OCF_PRIO = 'DEBUG'   }
++        else  { $__OCF_PRIO =~ tr/[a-z]/[A-Z]/ }
++    }
++
++    if ( $__OCF_PRIO eq 'DEBUG' ) {
++        ha_debug( "$__OCF_PRIO: $__OCF_MSG");
++    }
++    else {
++        ha_log( "$__OCF_PRIO: $__OCF_MSG");
++    }
++}
++
++
++#
++# ocf_exit_reason: print exit error string to stderr and log
++# Usage:           Allows the OCF script to provide a string
++#                  describing why the exit code was returned.
++# Arguments:       reason - required, The string that represents
++#                  why the error occured.
++#
++sub ocf_exit_reason {
++    my $cookie = $ENV{'OCF_EXIT_REASON_PREFIX'} || 'ocf-exit-reason:';
++    my $fmt;
++    my $msg;
++
++    # No argument is likely not intentional.
++    # Just one argument implies a printf format string of just "%s".
++    # "Least surprise" in case some interpolated string from variable
++    # expansion or other contains a percent sign.
++    # More than one argument: first argument is going to be the format string.
++    ocf_log ( 'err', 'Not enough arguments [%d] to ocf_exit_reason',
++        scalar @ARG ) if scalar @ARG < 1;
++
++    $fmt = shift;
++    $msg = sprintf $fmt, @ARG;
++
++    print STDERR "$cookie$msg\n";
++    __ha_log( '--ignore-stderr', "ERROR: $msg" );
++}
++
++# returns true if the CRM is currently running a probe. A probe is
++# defined as a monitor operation with a monitoring interval of zero.
++sub ocf_is_probe {
++    return ( $__OCF_ACTION eq 'monitor'
++        and $ENV{'OCF_RESKEY_CRM_meta_interval'} == 0 );
++}
++
++# returns true if the resource is configured as a clone. This is
++# defined as a resource where the clone-max meta attribute is present,
++# and set to greater than zero.
++sub ocf_is_clone {
++    return ( defined $ENV{'OCF_RESKEY_CRM_meta_clone_max'}
++        and $ENV{'OCF_RESKEY_CRM_meta_clone_max'} > 0 );
++}
++
++# returns true if the resource is configured as a multistate
++# (master/slave) resource. This is defined as a resource where the
++# master-max meta attribute is present, and set to greater than zero.
++sub ocf_is_ms {
++    return ( defined $ENV{'OCF_RESKEY_CRM_meta_master_max'}
++        and  $ENV{'OCF_RESKEY_CRM_meta_master_max'} > 0 );
++}
++
++# version check functions
++# allow . and - to delimit version numbers
++# max version number is 999
++# letters and such are effectively ignored
++#
++sub ocf_is_ver {
++    return $ARG[0] =~ /^[0-9][0-9.-]*[0-9]$/;
++}
++
++sub ocf_ver2num {
++    my $v = 0;
++    
++    $v = $v * 1000 + $1 while $ARG[0] =~ /(\d+)/g;
++
++    return $v;
++}
++
++sub ocf_ver_level {
++    my $v = () = $ARG[0] =~ /(\d+)/g;
++    return $v;
++}
++
++sub ocf_ver_complete_level {
++    my $ver   = shift;
++    my $level = shift;
++    my $i     = 0;
++
++    for ( my $i = 0; $i < $level; $i++ ) {
++        $ver .= "$ver.0";
++    }
++
++    return $ver;
++}
++
++# usage: ocf_version_cmp VER1 VER2
++#     version strings can contain digits, dots, and dashes
++#     must start and end with a digit
++# returns:
++#     0: VER1 smaller (older) than VER2
++#     1: versions equal
++#     2: VER1 greater (newer) than VER2
++#     3: bad format
++sub ocf_version_cmp {
++    my $v1 = shift;
++    my $v2 = shift;
++    my $v1_level;
++    my $v2_level;
++    my $level_diff;
++    
++    return 3 unless ocf_is_ver( $v1 );
++    return 3 unless ocf_is_ver( $v2 );
++
++    $v1_level = ocf_ver_level( $v1 );
++    $v2_level = ocf_ver_level( $v2 );
++
++    if ( $v1_level < $v2_level ) {
++        $level_diff = $v2_level - $v1_level;
++        $v1 = ocf_ver_complete_level( $v1, $level_diff );
++    }
++    elsif ( $v1_level > $v2_level ) {
++        $level_diff = $v1_level - $v2_level;
++        $v2 = ocf_ver_complete_level( $v2, $level_diff );
++    }
++
++    $v1 = ocf_ver2num( $v1 );
++    $v2 = ocf_ver2num( $v2 );
++
++    if    ( $v1 == $v2 ) { return 1; }
++    elsif ( $v1 < $v2  ) { return 0; }
++
++    return 2; # -1 would look funny in shell ;-) ( T.N. not in perl ;) )
++}
++
++sub ocf_local_nodename {
++    # use crm_node -n for pacemaker > 1.1.8
++    my $nodename;
++
++    qx{ which pacemakerd > /dev/null 2>&1 };
++    if ( $? == 0 ) {
++        my $version;
++        my $ret = qx{ pacemakerd -\$ };
++
++        $ret =~ /Pacemaker ([\d.]+)/;
++        $version = $1;
++
++        if ( ocf_version_cmp( $version, '1.1.8' ) == 2 ) {
++            qx{ which crm_node > /dev/null 2>&1 };
++            $nodename = qx{ crm_node -n } if $? == 0;
++        }
++    }
++    else {
++        # otherwise use uname -n
++        $nodename = qx { uname -n };
++    }
++
++    chomp $nodename;
++    return $nodename;
++}
++
++# Parse and returns the notify environment variables in a convenient structure
++# Returns undef if the action is not a notify
++# Returns undef if the resource is neither a clone or a multistate one
++sub ocf_notify_env {
++    my $i;
++    my %notify_env;
++
++    return undef unless $__OCF_ACTION eq 'notify';
++
++    return undef unless ocf_is_clone() or ocf_is_ms();
++
++    %notify_env = (
++        'type'       => $ENV{'OCF_RESKEY_CRM_meta_notify_type'}      || '',
++        'operation'  => $ENV{'OCF_RESKEY_CRM_meta_notify_operation'} || '',
++        'active'     => [ ],
++        'inactive'   => [ ],
++        'start'      => [ ],
++        'stop'       => [ ],
++    );
++
++    for my $action ( qw{ active start stop } ) {
++        next unless
++                defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"}
++            and defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"};
++
++        $i = 0;
++        $notify_env{ $action }[$i++]{'rsc'} = $_ foreach split /\s+/ =>
++            $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"};
++
++        $i = 0;
++        $notify_env{ $action }[$i++]{'uname'} = $_ foreach split /\s+/ =>
++            $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"};
++    }
++
++    # notify_nactive_uname doesn't exists. See:
++    # http://lists.clusterlabs.org/pipermail/developers/2017-January/000406.html
++    if ( defined $ENV{"OCF_RESKEY_CRM_meta_notify_inactive_resource"} ) {
++        $i = 0;
++        $notify_env{'inactive'}[$i++]{'rsc'} = $_ foreach split /\s+/ =>
++            $ENV{"OCF_RESKEY_CRM_meta_notify_inactive_resource"};
++    }
++
++    # exit if the resource is not a mutistate one
++    return %notify_env unless ocf_is_ms();
++
++    for my $action ( qw{ master slave promote demote } ) {
++        $notify_env{ $action } = [ ];
++
++        next unless
++                defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"}
++            and defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"};
++
++        $i = 0;
++        $notify_env{ $action }[$i++]{'rsc'} = $_ foreach split /\s+/ =>
++            $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"};
++
++        $i = 0;
++        $notify_env{ $action }[$i++]{'uname'} = $_ foreach split /\s+/ =>
++            $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"};
++    }
++
++    # Fix active and inactive fields for Pacemaker version < 1.1.16
++    # ie. crm_feature_set < 3.0.11
++    # See http://lists.clusterlabs.org/pipermail/developers/2016-August/000265.html
++    # and git commit a6713c5d40327eff8549e7f596501ab1785b8765
++    if (
++        ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.11' ) == 0
++    ) {
++        $notify_env{ 'active' } = [
++            @{ $notify_env{ 'master' } },
++            @{ $notify_env{ 'slave' } }
++        ];
++    }
++
++    return %notify_env;
++}
++
++$__OCF_ACTION = $ARGV[0];
++
++# Return to sanity for the agents...
++
++undef $ENV{'LC_ALL'};
++$ENV{'LC_ALL'} = 'C';
++setlocale( LC_ALL, 'C' );
++undef $ENV{'LANG'};
++undef $ENV{'LANGUAGE'};
++
++$ENV{'OCF_ROOT'} = '/usr/lib/ocf'
++    unless defined $ENV{'OCF_ROOT'} and $ENV{'OCF_ROOT'} ne '';
++
++# old
++undef $ENV{'OCF_FUNCTIONS_DIR'}
++    if defined $ENV{'OCF_FUNCTIONS_DIR'}
++    and $ENV{'OCF_FUNCTIONS_DIR'} eq "$ENV{'OCF_ROOT'}/resource.d/heartbeat";
++
++# Define OCF_RESKEY_CRM_meta_interval in case it isn't already set,
++# to make sure that ocf_is_probe() always works
++$ENV{'OCF_RESKEY_CRM_meta_interval'} = 0
++    unless defined $ENV{'OCF_RESKEY_CRM_meta_interval'};
++
++# Strip the OCF_RESKEY_ prefix from this particular parameter
++unless ( defined $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'}
++    and $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'} ne ''
++) {
++    $ENV{'OCF_CHECK_LEVEL'} = $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'};
++}
++else {
++    ENV{'OCF_CHECK_LEVEL'} = 0;
++}
++
++unless ( -d $ENV{'OCF_ROOT'} ) {
++    ha_log( "ERROR: OCF_ROOT points to non-directory $ENV{'OCF_ROOT'}." );
++    $! = $OCF_ERR_GENERIC;
++    die;
++}
++
++$ENV{'OCF_RESOURCE_TYPE'} = $__SCRIPT_NAME
++    unless defined $ENV{'OCF_RESOURCE_TYPE'}
++    and $ENV{'OCF_RESOURCE_TYPE'} ne '';
++
++unless ( defined $ENV{'OCF_RA_VERSION_MAJOR'}
++    and $ENV{'OCF_RA_VERSION_MAJOR'} ne ''
++) {
++    # We are being invoked as an init script.
++    # Fill in some things with reasonable values.
++    $ENV{'OCF_RESOURCE_INSTANCE'} = 'default';
++    return 1;
++}
++
++$ENV{'OCF_RESOURCE_INSTANCE'} = "undef" if $__OCF_ACTION eq 'meta-data';
++
++unless ( defined $ENV{'OCF_RESOURCE_INSTANCE'}
++    and $ENV{'OCF_RESOURCE_INSTANCE'} ne ''
++) {
++    ha_log( "ERROR: Need to tell us our resource instance name." );
++    $! = $OCF_ERR_ARGS;
++    die;
++}
++
++1;
++
++
++=head1 COPYRIGHT AND LICENSE
++
++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault.
++
++Licensed under the PostgreSQL License.
+diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm
+--- a/heartbeat/OCF_ReturnCodes.pm	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/OCF_ReturnCodes.pm	2021-04-13 13:37:35.621267404 +0200
+@@ -0,0 +1,97 @@
++#!/usr/bin/perl
++# This program is open source, licensed under the PostgreSQL License.
++# For license terms, see the LICENSE file.
++#
++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault
++
++=head1 NAME
++
++OCF_ReturnCodes - Common varibales for the OCF Resource Agents supplied by
++heartbeat.
++
++=head1 SYNOPSIS
++
++  use FindBin;
++  use lib "$FindBin::RealBin/../../lib/heartbeat/";
++  
++  use OCF_ReturnCodes;
++
++=head1 DESCRIPTION
++
++This module has been ported from the ocf-retrurncodes shell script of the
++resource-agents project. See L<https://github.com/ClusterLabs/resource-agents/>.
++
++=head1 VARIABLES
++
++Here are the variables exported by this module:
++
++=over
++
++=item $OCF_SUCCESS
++
++=item $OCF_ERR_GENERIC
++
++=item $OCF_ERR_ARGS
++
++=item $OCF_ERR_UNIMPLEMENTED
++
++=item $OCF_ERR_PERM
++
++=item $OCF_ERR_INSTALLED
++
++=item $OCF_ERR_CONFIGURED
++
++=item $OCF_NOT_RUNNING
++
++=item $OCF_RUNNING_MASTER
++
++=item $OCF_FAILED_MASTER
++
++=back
++
++=cut
++
++package OCF_ReturnCodes;
++
++use strict;
++use warnings;
++use 5.008;
++
++BEGIN {
++    use Exporter;
++
++    our $VERSION   = 'v2.3.0';
++    our @ISA       = ('Exporter');
++    our @EXPORT    = qw(
++        $OCF_SUCCESS
++        $OCF_ERR_GENERIC
++        $OCF_ERR_ARGS
++        $OCF_ERR_UNIMPLEMENTED
++        $OCF_ERR_PERM
++        $OCF_ERR_INSTALLED
++        $OCF_ERR_CONFIGURED
++        $OCF_NOT_RUNNING
++        $OCF_RUNNING_MASTER
++        $OCF_FAILED_MASTER
++    );
++    our @EXPORT_OK = ( @EXPORT );
++}
++
++our $OCF_SUCCESS           = 0;
++our $OCF_ERR_GENERIC       = 1;
++our $OCF_ERR_ARGS          = 2;
++our $OCF_ERR_UNIMPLEMENTED = 3;
++our $OCF_ERR_PERM          = 4;
++our $OCF_ERR_INSTALLED     = 5;
++our $OCF_ERR_CONFIGURED    = 6;
++our $OCF_NOT_RUNNING       = 7;
++our $OCF_RUNNING_MASTER    = 8;
++our $OCF_FAILED_MASTER     = 9;
++
++1;
++
++=head1 COPYRIGHT AND LICENSE
++
++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault.
++
++Licensed under the PostgreSQL License.
+diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
+--- a/heartbeat/pgsqlms	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/pgsqlms	2021-04-13 13:37:40.934280411 +0200
+@@ -0,0 +1,2308 @@
++#!/usr/bin/perl
++# This program is open source, licensed under the PostgreSQL License.
++# For license terms, see the LICENSE file.
++#
++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault
++
++=head1 NAME
++
++ocf_heartbeat_pgsqlms - A PostgreSQL multi-state resource agent for Pacemaker
++
++=head1 SYNOPSIS
++
++B<pgsqlms> [start | stop | monitor | promote | demote | notify | reload | methods | meta-data | validate-all]
++
++=head1 DESCRIPTION
++
++Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource.
++
++=cut
++
++use strict;
++use warnings;
++use 5.008;
++
++use POSIX qw(locale_h);
++use Scalar::Util qw(looks_like_number);
++use File::Spec;
++use File::Temp;
++use Data::Dumper;
++
++my $OCF_FUNCTIONS_DIR;
++BEGIN {
++	$OCF_FUNCTIONS_DIR = defined $ENV{'OCF_FUNCTIONS_DIR'} ? "$ENV{'OCF_FUNCTIONS_DIR'}" : "$ENV{'OCF_ROOT'}/lib/heartbeat";
++}
++use lib "$OCF_FUNCTIONS_DIR";
++
++use OCF_ReturnCodes;
++use OCF_Directories;
++use OCF_Functions;
++
++our $VERSION = 'v2.3.0';
++our $PROGRAM = 'pgsqlms';
++
++# OCF environment
++my $OCF_RESOURCE_INSTANCE = $ENV{'OCF_RESOURCE_INSTANCE'};
++my $OCF_RUNNING_SLAVE     = $OCF_SUCCESS;
++my %OCF_NOTIFY_ENV        = ocf_notify_env() if $__OCF_ACTION eq 'notify';
++
++# Default parameters values
++my $system_user_default = "postgres";
++my $bindir_default      = "/usr/bin";
++my $pgdata_default      = "/var/lib/pgsql/data";
++my $pghost_default      = "/tmp";
++my $pgport_default      = 5432;
++my $start_opts_default  = "";
++my $maxlag_default      = "0";
++
++# Set default values if not found in environment
++my $system_user  = $ENV{'OCF_RESKEY_system_user'} || $system_user_default;
++my $bindir       = $ENV{'OCF_RESKEY_bindir'} || $bindir_default;
++my $pgdata       = $ENV{'OCF_RESKEY_pgdata'} || $pgdata_default;
++my $datadir      = $ENV{'OCF_RESKEY_datadir'} || $pgdata;
++my $pghost       = $ENV{'OCF_RESKEY_pghost'} || $pghost_default;
++my $pgport       = $ENV{'OCF_RESKEY_pgport'} || $pgport_default;
++my $start_opts   = $ENV{'OCF_RESKEY_start_opts'} || $start_opts_default;
++my $maxlag       = $ENV{'OCF_RESKEY_maxlag'} || $maxlag_default;
++my $recovery_tpl = $ENV{'OCF_RESKEY_recovery_template'}
++    || "$pgdata/recovery.conf.pcmk";
++
++
++# PostgreSQL commands path
++my $POSTGRES   = "$bindir/postgres";
++my $PGCTL      = "$bindir/pg_ctl";
++my $PGPSQL     = "$bindir/psql";
++my $PGCTRLDATA = "$bindir/pg_controldata";
++my $PGISREADY  = "$bindir/pg_isready";
++my $PGWALDUMP  = "$bindir/pg_waldump";
++
++# pacemaker commands path
++my $CRM_MASTER    = "$HA_SBIN_DIR/crm_master --lifetime forever";
++my $CRM_NODE      = "$HA_SBIN_DIR/crm_node";
++my $CRM_RESOURCE  = "$HA_SBIN_DIR/crm_resource";
++my $ATTRD_PRIV    = "$HA_SBIN_DIR/attrd_updater --private --lifetime reboot";
++
++# Global vars
++my $nodename;
++my $exit_code = 0;
++# numeric pgsql versions
++my $PGVERNUM;
++my $PGVER_93 = 90300;
++my $PGVER_10 = 100000;
++my $PGVER_12 = 120000;
++
++# Run a query using psql.
++#
++# This function returns an array with psql return code as first element and
++# the result as second one.
++#
++sub _query {
++    my $query        = shift;
++    my $res          = shift;
++    my $connstr      = "dbname=postgres";
++    my $RS           = chr(30); # ASCII RS  (record separator)
++    my $FS           = chr(3);  # ASCII ETX (end of text)
++    my $postgres_uid = getpwnam( $system_user );
++    my $oldeuid      = $>;
++    my $tmpfile;
++    my @res;
++    my $ans;
++    my $pid;
++    my $rc;
++
++    unless ( defined $res and defined $query and $query ne '' ) {
++        ocf_log( 'debug', '_query: wrong parameters!' );
++        return -1;
++    }
++
++    unless ( $tmpfile = File::Temp->new(
++            TEMPLATE => 'pgsqlms-XXXXXXXX',
++            DIR      => $HA_RSCTMP
++        ) )
++    {
++        ocf_exit_reason( 'Could not create or write in a temp file' );
++        exit $OCF_ERR_INSTALLED;
++    }
++
++    print $tmpfile $query;
++    chmod 0644, $tmpfile;
++
++    ocf_log( 'debug', '_query: %s', $query );
++
++    # Change the effective user to the given system_user so after forking
++    # the given uid to the process should allow psql to connect w/o password
++    $> = $postgres_uid;
++
++    # Forking + piping
++    $pid = open(my $KID, "-|");
++
++    if ( $pid == 0 ) { # child
++        exec $PGPSQL, '--set', 'ON_ERROR_STOP=1', '-qXAtf', $tmpfile,
++            '-R', $RS, '-F', $FS, '--port', $pgport, '--host', $pghost,
++            $connstr;
++    }
++
++    # parent
++    $> = $oldeuid;
++
++    {
++        local $/;
++        $ans = <$KID>;
++    }
++
++    close $KID;
++    $rc = $? >> 8;
++
++    ocf_log( 'debug', '_query: psql return code: %d', $rc );
++
++    if ( defined $ans ) {
++        chop $ans;
++
++        push @{ $res }, [ split(chr(3) => $_, -1) ]
++            foreach split (chr(30) => $ans, -1);
++
++        ocf_log( 'debug', '_query: @res: %s',
++            Data::Dumper->new( [ $res ] )->Terse(1)->Dump );
++    }
++
++    # Possible return codes:
++    #  -1: wrong parameters
++    #   0: OK
++    #   1: failed to get resources (memory, missing file, ...)
++    #   2: unable to connect
++    #   3: query failed
++    return $rc;
++}
++
++# Get the last received location on a standby
++# if the first argument is true, returns the value as decimal
++# if the first argument is false, returns the value as LSN
++# Returns undef if query failed
++sub _get_last_received_lsn {
++    my ( $dec ) = @_;
++    my $pg_last_wal_receive_lsn = 'pg_last_wal_receive_lsn()';
++    my $pg_wal_lsn_diff         = 'pg_wal_lsn_diff';
++    my $query;
++    my $rc;
++    my @rs;
++
++    if ( $PGVERNUM < $PGVER_10  ) {
++        $pg_last_wal_receive_lsn = 'pg_last_xlog_receive_location()';
++        $pg_wal_lsn_diff         = 'pg_xlog_location_diff';
++    }
++
++    if ( $dec ) {
++        $query = "SELECT $pg_wal_lsn_diff( $pg_last_wal_receive_lsn, '0/0' )";
++    }
++    else {
++        $query = "SELECT $pg_last_wal_receive_lsn";
++    }
++
++    $rc = _query( $query, \@rs );
++
++    return $rs[0][0] if $rc == 0 and $rs[0][0];
++
++    ocf_log( 'err', 'Could not query last received LSN (%s)', $rc ) if $rc != 0;
++    ocf_log( 'err', 'No values for last received LSN' )
++        if $rc == 0 and not $rs[0][0];
++
++    return undef;
++}
++
++# Get the master score for each connected standby
++# Returns directly the result set of the query or exit with an error.
++# Exits with OCF_ERR_GENERIC if the query failed
++sub _get_lag_scores {
++    my $pg_current_wal_lsn = 'pg_current_wal_lsn()';
++    my $pg_wal_lsn_diff    = 'pg_wal_lsn_diff';
++    my $write_lsn          = 'write_lsn';
++    my $query;
++    my $rc;
++    my @rs;
++
++    if ( $PGVERNUM < $PGVER_10  ) {
++        $pg_current_wal_lsn = 'pg_current_xlog_location()';
++        $pg_wal_lsn_diff    = 'pg_xlog_location_diff';
++        $write_lsn          = 'write_location';
++    }
++
++    # We check locations of connected standbies by querying the
++    # "pg_stat_replication" view.
++    # The row_number applies on the result set ordered on write_location ASC so
++    # the highest row_number should be given to the closest node from the
++    # master, then the lowest node name (alphanumeric sort) in case of equality.
++    # The result set itself is order by priority DESC to process best known
++    # candidate first.
++    $query = qq{
++      SELECT application_name, priority, location, state, current_lag
++      FROM (
++        SELECT application_name,
++          (1000 - (
++            row_number() OVER (
++              PARTITION BY state IN ('startup', 'backup')
++              ORDER BY location ASC, application_name ASC
++            ) - 1
++           ) * 10
++          ) * CASE WHEN ( $maxlag > 0
++                     AND current_lag > $maxlag)
++                        THEN -1
++                   ELSE 1
++              END AS priority,
++          location, state, current_lag
++        FROM (
++          SELECT application_name, $write_lsn AS location, state,
++            $pg_wal_lsn_diff($pg_current_wal_lsn, $write_lsn) AS current_lag
++          FROM pg_stat_replication
++        ) AS s2
++      ) AS s1
++      ORDER BY priority DESC
++    };
++
++    $rc = _query( $query, \@rs );
++
++    if ( $rc != 0 ) {
++        ocf_exit_reason( 'Query to get standby locations failed (%d)', $rc );
++        exit $OCF_ERR_GENERIC;
++    }
++
++    return \@rs;
++}
++
++# get the timeout for the current action given from environment var
++# Returns   timeout as integer
++#           undef if unknown
++sub _get_action_timeout {
++    my $timeout = $ENV{'OCF_RESKEY_CRM_meta_timeout'} / 1000;
++
++    ocf_log( 'debug', '_get_action_timeout: known timeout: %s',
++        defined $timeout ? $timeout : 'undef' );
++
++    return $timeout if defined $timeout and $timeout =~ /^\d+$/;
++
++    return undef;
++}
++
++# Get, parse and return the value of the given private attribute name
++# Returns an empty string if not found.
++sub _get_priv_attr {
++    my ( $name, $node ) = @_;
++    my $val             = '';
++    my $node_arg        = '';
++    my $ans;
++
++    $node = '' unless defined $node;
++    $name = "$name-$OCF_RESOURCE_INSTANCE";
++
++    $node_arg= "--node $node" if $node ne '';
++
++    $ans = qx{ $ATTRD_PRIV --name "$name" --query $node_arg };
++
++    $ans =~ m/^name=".*" host=".*" value="(.*)"$/;
++
++    $val = $1 if defined $1;
++
++    ocf_log( 'debug', '_get_priv_attr: value of "%s"%s is "%s"', $name,
++        ( $node ? " on \"$node\"": ""),
++        $val );
++
++    return $val;
++}
++
++# Set the given private attribute name to the given value
++# As setting an attribute is asynchronous, this will return as soon as the
++# attribute is really set by attrd and available.
++sub _set_priv_attr {
++    my ( $name, $val ) = @_;
++    my $name_instance  = "$name-$OCF_RESOURCE_INSTANCE";
++
++    ocf_log( 'debug', '_set_priv_attr: set "%s=%s"...', $name_instance, $val );
++
++    qx{ $ATTRD_PRIV --name "$name_instance" --update "$val" };
++
++    # give attr name without the resource instance name as _get_priv_attr adds
++    # it as well
++    while ( _get_priv_attr( $name ) ne $val ) {
++        ocf_log( 'debug', '_set_priv_attr: waiting attrd ack for "%s"...', $name_instance );
++        select( undef, undef, undef, 0.1 );
++    }
++
++    return;
++}
++
++# Delete the given private attribute.
++# As setting an attribute is asynchronous, this will return as soon as the
++# attribute is really deleted by attrd.
++sub _delete_priv_attr {
++    my ( $name ) = @_;
++    my $name_instance  = "$name-$OCF_RESOURCE_INSTANCE";
++
++    ocf_log( 'debug', '_delete_priv_attr: delete "%s"...', $name_instance );
++
++    qx{ $ATTRD_PRIV --name "$name_instance" --delete };
++
++    # give attr name without the resource instance name as _get_priv_attr adds
++    # it as well
++    while ( _get_priv_attr( $name ) ne '' ) {
++        ocf_log( 'debug', '_delete_priv_attr: waiting attrd ack for "%s"...',
++            $name_instance );
++        select( undef, undef, undef, 0.1 );
++    }
++
++    return;
++}
++
++# Get, parse and return the resource master score on given node.
++# Returns an empty string if not found.
++# Returns undef on crm_master call on error
++sub _get_master_score {
++    my ( $node ) = @_;
++    my $node_arg = '';
++    my $score;
++
++    $node_arg = sprintf '--node "%s"', $node if defined $node and $node ne '';
++
++    $score = qx{ $CRM_MASTER --quiet --get-value $node_arg 2> /dev/null };
++
++    return '' unless $? == 0 and defined $score;
++
++    chomp $score;
++
++    return $score;
++}
++
++# Set the master score of the local node or the optionally given node.
++# As setting an attribute is asynchronous, this will return as soon as the
++# attribute is really set by attrd and available everywhere.
++sub _set_master_score {
++    my ( $score, $node ) = @_;
++    my $node_arg = '';
++    my $tmp;
++
++    $node_arg = sprintf '--node "%s"', $node if defined $node and $node ne '';
++
++    qx{ $CRM_MASTER $node_arg --quiet --update "$score" };
++
++    while ( ( $tmp = _get_master_score( $node ) ) ne $score ) {
++        ocf_log( 'debug',
++            '_set_master_score: waiting to set score to "%s" (currently "%s")...',
++            $score, $tmp );
++        select(undef, undef, undef, 0.1);
++    }
++
++    return;
++}
++
++# _master_score_exists
++# This subroutine checks if a master score is set for one of the relative clones
++# in the cluster and the score is greater or equal of 0.
++# Returns 1 if at least one master score >= 0 is found.
++# Returns 0 otherwise
++sub _master_score_exists {
++    my @partition_nodes = split /\s+/ => qx{ $CRM_NODE --partition };
++
++    foreach my $node ( @partition_nodes ) {
++        my $score = _get_master_score( $node );
++
++        return 1 if defined $score and $score ne '' and $score > -1;
++    }
++
++    return 0;
++}
++
++# Check if the current transiation is a recover of a master clone on given node.
++sub _is_master_recover {
++    my ( $n ) = @_;
++
++    return (
++            scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'master'} }
++        and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'promote'} }
++    );
++}
++
++# Check if the current transition is a recover of a slave clone on given node.
++sub _is_slave_recover {
++    my ( $n ) = @_;
++
++    return (
++            scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'slave'} }
++        and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'start'} }
++    );
++}
++
++# check if th current transition is a switchover to the given node.
++sub _is_switchover {
++    my ( $n ) = @_;
++    my $old = $OCF_NOTIFY_ENV{'master'}[0]{'uname'};
++
++    return 0 if scalar @{ $OCF_NOTIFY_ENV{'master'} }  != 1
++             or scalar @{ $OCF_NOTIFY_ENV{'demote'} }  != 1
++             or scalar @{ $OCF_NOTIFY_ENV{'promote'} } != 1;
++
++    return (
++           scalar grep { $_->{'uname'} eq $old } @{ $OCF_NOTIFY_ENV{'demote'} }
++       and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'slave'} }
++       and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'promote'} }
++       and not scalar grep { $_->{'uname'} eq $old } @{ $OCF_NOTIFY_ENV{'stop'} }
++    );
++}
++
++# Run the given command as the "system_user" given as parameter.
++# It basically forks and seteuid/setuid away from root.
++#
++sub _runas {
++    my $rc;
++    my $pid;
++    my @cmd = @_;
++    my (undef, undef, $postgres_uid, $postgres_gid ) = getpwnam( $system_user );
++
++    $pid = fork;
++
++    if ( $pid == 0 ) { # in child
++        $) = "$postgres_gid $postgres_gid";
++        while ( my ( undef, undef, $gid, $members ) = getgrent ) {
++            $) .= " $gid" if grep { $system_user eq $_ } split /\s+/, $members
++        }
++        $( = $postgres_gid;
++
++        $< = $> = $postgres_uid;
++
++        exec @cmd;
++    }
++
++    ocf_log( 'debug', '_runas: launching as "%s" command "%s"', $system_user,
++        join(' ', @cmd) );
++
++    waitpid $pid, 0;
++    $rc = $? >> 8;
++
++    return $rc;
++}
++
++# Check if instance is listening on the given host/port.
++#
++sub _pg_isready {
++    # Add 60s to the timeout or use a 24h timeout fallback to make sure
++    # Pacemaker will give up before us and take decisions
++    my $timeout = ( _get_action_timeout() || 60*60*24 )  + 60;
++    my $rc = _runas( $PGISREADY, '-h', $pghost, '-p', $pgport, '-d', 'postgres', '-t', $timeout );
++
++    # Possible error codes:
++    #   1: ping rejected (usually when instance is in startup, in crash
++    #      recovery, in warm standby, or when a shutdown is in progress)
++    #   2: no response, usually means the instance is down
++    #   3: no attempt, probably a syntax error, should not happen
++    return $rc;
++}
++
++# Check the postmaster.pid file and the postmaster process.
++# WARNING: we do not distinguish the scenario where postmaster.pid does not
++# exist from the scenario where the process is still alive. It should be ok
++# though, as this is considered a hard error from monitor.
++#
++sub _pg_ctl_status {
++    my $rc = _runas( $PGCTL, '--pgdata', $pgdata, 'status' );
++
++    # Possible error codes:
++    #   3: postmaster.pid file does not exist OR it does but the process
++    #      with the PID found in the file is not alive
++    return $rc;
++}
++
++# Start the local instance using pg_ctl
++#
++sub _pg_ctl_start {
++    # Add 60s to the timeout or use a 24h timeout fallback to make sure
++    # Pacemaker will give up before us and take decisions
++    my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60;
++
++    my @cmd = ( $PGCTL, '--pgdata', $pgdata, '-w', '--timeout', $timeout, 'start' );
++
++    push @cmd => ( '-o', $start_opts ) if $start_opts ne '';
++
++    return _runas( @cmd );
++}
++
++# Enable the Standby mode.
++#
++# Up to v11, creates the recovery.conf file based on the given template.
++# Since v12, creates standby.signal.
++sub _enable_recovery {
++    my $fh;
++    my $content      = '';
++    my $standby_file = "$datadir/standby.signal";
++    my (undef, undef, $uid, $gid) = getpwnam($system_user);
++
++    if ( $PGVERNUM < $PGVER_12 ) {
++        $standby_file = "$datadir/recovery.conf";
++
++        ocf_log( 'debug',
++            '_enable_recovery: get replication configuration from the template file "%s"',
++            $recovery_tpl );
++
++        # Create the recovery.conf file to start the instance as a secondary.
++        # NOTE: the recovery.conf is supposed to be set up so the secondary can
++        # connect to the primary instance, eg. using a virtual IP address.
++        # As there is no primary instance available at startup, secondaries will
++        # complain about failing to connect.
++        # As we can not reload a recovery.conf file on a standby without restarting
++        # it, we will leave with this.
++        # FIXME how would the reload help us in this case ?
++        unless ( defined open( $fh, '<', $recovery_tpl ) ) {
++            ocf_exit_reason( 'Could not open file "%s": %s', $recovery_tpl, $! );
++            exit $OCF_ERR_CONFIGURED;
++        }
++
++        # Copy all parameters from the template file
++        while (my $line = <$fh>) {
++            chomp $line;
++            $content .= "$line\n";
++        }
++        close $fh;
++    }
++
++    ocf_log( 'debug', '_enable_recovery: write the standby file "%s"', $standby_file );
++
++    unless ( open( $fh, '>', $standby_file ) ) {
++        ocf_exit_reason( 'Could not open file "%s": %s', $standby_file, $! );
++        exit $OCF_ERR_CONFIGURED;
++    }
++
++    # Write the recovery.conf file using configuration from the template file
++    print $fh $content;
++
++    close $fh;
++
++    unless ( chown $uid, $gid, $standby_file ) {
++        ocf_exit_reason( 'Could not set owner of "%s"', $standby_file );
++        exit $OCF_ERR_CONFIGURED;
++    };
++}
++
++# Parse and return various informations about the local PostgreSQL instance as
++# reported by its controldata file.
++#
++# WARNING: the status is NOT updated in case of crash.
++#
++# This sub exit the script with an error on failure
++sub _get_controldata {
++    my %controldata;
++    my $ans;
++
++    $ans = qx{ $PGCTRLDATA "$datadir" 2>/dev/null };
++
++    # Parse the output of pg_controldata.
++    # This output is quite stable between pg versions, but we might need to sort
++    # it at some point if things are moving in there...
++    $ans =~ m{
++        # get the current state
++        ^\QDatabase cluster state\E:\s+(.*?)\s*$
++        .*
++        # Get the latest known REDO location
++        ^\QLatest checkpoint's REDO location\E:\s+([/0-9A-F]+)\s*$
++        .*
++        # Get the latest known TL
++        ^\QLatest checkpoint's TimeLineID\E:\s+(\d+)\s*$
++        .*
++        # Get the wal level
++        # NOTE: pg_controldata output changed with PostgreSQL 9.5, so we need to
++        # account for both syntaxes
++        ^(?:\QCurrent \E)?\Qwal_level setting\E:\s+(.*?)\s*$
++    }smx;
++
++    $controldata{'state'}     = $1 if defined $1;
++    $controldata{'redo'}      = $2 if defined $2;
++    $controldata{'tl'}        = $3 if defined $3;
++    $controldata{'wal_level'} = $4 if defined $4;
++
++    ocf_log( 'debug',
++        "_get_controldata: found: %s",
++        Data::Dumper->new( [ \%controldata ] )->Terse(1)->Dump );
++
++    return %controldata if defined $controldata{'state'}
++                        and defined $controldata{'tl'}
++                        and defined $controldata{'redo'}
++                        and defined $controldata{'wal_level'};
++
++    ocf_exit_reason( 'Could not read all datas from controldata file for "%s"',
++        $datadir );
++
++    ocf_log( 'debug',
++        "_get_controldata: controldata file: %s",
++        Data::Dumper->new( [ \%controldata ] )->Terse(1)->Dump, $ans );
++
++    exit $OCF_ERR_ARGS;
++}
++
++# Pead major version from datadir/PG_VERSION and return it as numeric version
++sub _get_pg_version {
++    my $fh;
++    my $PGVERSION;
++    my $PGVERNUM;
++
++    # check PG_VERSION
++    if ( ! -s "$datadir/PG_VERSION" ) {
++        ocf_exit_reason( 'PG_VERSION does not exist in "%s"', $datadir );
++        exit $OCF_ERR_ARGS;
++    }
++
++    unless ( open( $fh, '<', "$datadir/PG_VERSION" ) ) {
++        ocf_exit_reason( "Could not open file \"$datadir/PG_VERSION\": $!" );
++        exit $OCF_ERR_ARGS;
++    }
++
++    read( $fh, $PGVERSION, 32 );
++    close $fh;
++
++    chomp $PGVERSION;
++
++    $PGVERSION =~ /^(\d+)(?:\.(\d+))?$/;
++    $PGVERNUM  = $1 * 10000;
++    $PGVERNUM += $2 * 100 if $1 < 10; # no 2nd num in the major version from v10
++
++    return $PGVERNUM;
++}
++
++# Use pg_controldata to check the state of the PostgreSQL server. This
++# function returns codes depending on this state, so we can find whether the
++# instance is a primary or a secondary, or use it to detect any inconsistency
++# that could indicate the instance has crashed.
++#
++sub _controldata_to_ocf {
++    my %cdata = _get_controldata();
++
++    while ( 1 ) {
++        ocf_log( 'debug', '_controldata: instance "%s" state is "%s"',
++            $OCF_RESOURCE_INSTANCE, $cdata{'state'} );
++
++        # Instance should be running as a primary.
++        return $OCF_RUNNING_MASTER if $cdata{'state'} eq "in production";
++
++        # Instance should be running as a secondary.
++        # This state includes warm standby (rejects connections attempts,
++        # including pg_isready)
++        return $OCF_SUCCESS if $cdata{'state'} eq "in archive recovery";
++
++
++        # The instance should be stopped.
++        # We don't care if it was a primary or secondary before, because we
++        # always start instances as secondaries, and then promote if necessary.
++        return $OCF_NOT_RUNNING if $cdata{'state'} eq "shut down"
++            or $cdata{'state'} eq "shut down in recovery";
++
++        # The state is "in crash recovery", "starting up" or "shutting down".
++        # This state should be transitional, so we wait and loop to check if
++        # it changes.
++        # If it does not, pacemaker will eventually abort with a timeout.
++        ocf_log( 'debug',
++            '_controldata: waiting for transitionnal state "%s" to finish',
++            $cdata{'state'} );
++        sleep 1;
++        %cdata = _get_controldata();
++    }
++
++    # If we reach this point, something went really wrong with this code or
++    # pg_controldata.
++    ocf_exit_reason( 'Unable get instance "%s" state using pg_controldata',
++        $OCF_RESOURCE_INSTANCE );
++
++    return $OCF_ERR_INSTALLED ;
++}
++
++# Check the write_location of all secondaries, and adapt their master score so
++# that the instance closest to the master will be the selected candidate should
++# a promotion be triggered.
++# NOTE: This is only a hint to pacemaker! The selected candidate to promotion
++# actually re-check it is the best candidate and force a re-election by failing
++# if a better one exists. This avoid a race condition between the call of the
++# monitor action and the promotion where another slave might have catchup faster
++# with the master.
++# NOTE: we cannot directly use the write_location, neither a lsn_diff value as
++# promotion score as Pacemaker considers any value greater than 1,000,000 as
++# INFINITY.
++#
++# This sub must be executed from a master monitor action.
++#
++sub _check_locations {
++    my $partition_nodes;
++    my $node_score;
++    my $row_num;
++    my $row;
++    my @rs;
++
++    # Set the master score if not already done
++    $node_score = _get_master_score();
++    _set_master_score( '1001' ) unless $node_score eq '1001';
++
++    # Ask crm_node what nodes are present in our current cluster partition
++    $partition_nodes = qx{ $CRM_NODE --partition };
++
++    @rs = @{ _get_lag_scores() };
++
++    $row_num = scalar @rs;
++
++    # If no lag are reported at this point, it means that there is no
++    # secondary instance connected.
++    ocf_log( 'warning', 'No secondary connected to the master' )
++        if $row_num == 0;
++
++    # For each standby connected, set their master score based on the following
++    # rule: the first known node/application, with the highest priority and
++    # an acceptable state.
++    while ( $row = shift @rs ) {
++
++        if ( $partition_nodes !~ /$row->[0]/ ) {
++            ocf_log( 'info', 'Ignoring unknown application_name/node "%s"',
++                $row->[0] );
++            next;
++        }
++
++        if ( $row->[0] eq $nodename ) {
++            ocf_log( 'warning', 'Streaming replication with myself!' );
++            next;
++        }
++
++        $node_score = _get_master_score( $row->[0] );
++
++        if ( $row->[3] =~ /^\s*(?:startup|backup)\s*$/ ) {
++            # We exclude any standby being in state backup (pg_basebackup) or
++            # startup (new standby or failing standby)
++            ocf_log( 'info', 'Forbidding promotion on "%s" in state "%s"',
++                $row->[0], $row->[3] );
++
++            _set_master_score( '-1', $row->[0] ) unless $node_score eq '-1';
++        }
++        else {
++            ocf_log( 'debug',
++                '_check_locations: checking "%s" promotion ability (current_score: %s, priority: %s, location: %s, lag: %s)',
++                $row->[0], $node_score, $row->[1], $row->[2], $row->[4] );
++
++            if ( $node_score ne $row->[1] ) {
++                if ( $row->[1] < -1 ) {
++                    ocf_log( 'info', 'Update score of "%s" from %s to %s because replication lag (%s) is higher than given maxlag (%s).',
++                        $row->[0], $node_score, $row->[1], $row->[4], $maxlag );
++                }
++                else {
++                    ocf_log( 'info', 'Update score of "%s" from %s to %s because of a change in the replication lag (%s).',
++                        $row->[0], $node_score, $row->[1], $row->[4] );
++                }
++                _set_master_score( $row->[1], $row->[0] );
++            }
++            else {
++                ocf_log( 'debug',
++                    '_check_locations: "%s" keeps its current score of %s',
++                    $row->[0], $row->[1] );
++            }
++        }
++
++        # Remove this node from the known nodes list.
++        $partition_nodes =~ s/(?:^|\s)$row->[0](?:\s|$)/ /g;
++    }
++
++    $partition_nodes =~ s/(?:^\s+)|(?:\s+$)//g;
++
++    # If there are still nodes in "partition_nodes", it means there is no
++    # corresponding line in "pg_stat_replication".
++    # Exclude these nodes that are not part of the cluster at this
++    # point.
++    foreach my $node (split /\s+/ => $partition_nodes) {
++        # Exclude the current node.
++        next if $node eq $nodename;
++
++        # do not warn if the master score is already set to -1000.
++        # this avoid log flooding (gh #138)
++        $node_score = _get_master_score( $node );
++        next if $node_score eq '-1000';
++
++        ocf_log( 'warning', '"%s" is not connected to the primary', $node );
++        _set_master_score( '-1000', $node );
++    }
++
++    return $OCF_SUCCESS;
++}
++
++# _check_switchover
++# check if the pgsql switchover to the localnode is safe.
++# This is supposed to be called **after** the master has been stopped or demoted.
++# This sub checks if the local standby received the shutdown checkpoint from the
++# old master to make sure it can take over the master role and the old master
++# will be able to catchup as a standby after.
++#
++# Returns 0 if switchover is safe
++# Returns 1 if swithcover is not safe
++# Returns 2 for internal error
++sub _check_switchover {
++    my $has_sht_chk = 0;
++    my $last_redo;
++    my $last_lsn;
++    my $ans;
++    my $rc;
++    my $tl;
++    my %cdata;
++
++    $PGWALDUMP = "$bindir/pg_xlogdump" if $PGVERNUM < $PGVER_10;
++
++    ocf_log( 'info', 'Switchover in progress from "%s" to "%s".'
++        .' Need to check the last record in WAL',
++        $OCF_NOTIFY_ENV{'demote'}[0]{'uname'}, $nodename );
++
++    # check if we received the shutdown checkpoint of the master during its
++    # demote process.
++    # We need the last local checkpoint LSN and the last received LSN from
++    # master to check in the WAL between these adresses if we have a
++    # "checkpoint shutdown" using pg_xlogdump/pg_waldump.
++    #
++    # Force a checkpoint to make sure the controldata shows the very last TL
++    # and the master's shutdown checkpoint
++    _query( q{ CHECKPOINT }, {} );
++    %cdata     = _get_controldata();
++    $tl        = $cdata{'tl'};
++    $last_redo = $cdata{'redo'};
++
++    # Get the last received LSN from master
++    $last_lsn = _get_last_received_lsn();
++
++    unless ( defined $last_lsn ) {
++        ocf_exit_reason( 'Could not fetch last received LSN!' );
++
++        return 2;
++    }
++
++    $ans = qx{ $PGWALDUMP --path "$datadir" --timeline "$tl" \\
++               --start "$last_redo" --end "$last_lsn" 2>&1 };
++    $rc = $?;
++
++    ocf_log( 'debug',
++        '_check_switchover: %s rc: "%s", tl: "%s", last_chk: %s, last_lsn: %s, output: "%s"',
++        $PGWALDUMP, $rc, $tl, $last_redo, $last_lsn, $ans
++    );
++
++    if ( $rc == 0 and
++         $ans =~ m{^rmgr: XLOG.*desc: (?i:checkpoint)(?::|_SHUTDOWN) redo [0-9A-F/]+; tli $tl;.*; shutdown$}m
++    ) {
++        ocf_log( 'info', 'Slave received the shutdown checkpoint' );
++        return 0;
++    }
++
++    ocf_exit_reason(
++        'Did not receive the shutdown checkpoint from the old master!' );
++
++    return 1;
++}
++
++# Check to confirm if the instance is really started as _pg_isready stated and
++# check if the instance is primary or secondary.
++#
++sub _confirm_role {
++    my $is_in_recovery;
++    my $rc;
++    my @rs;
++
++    $rc = _query( "SELECT pg_is_in_recovery()", \@rs );
++
++    $is_in_recovery = $rs[0][0];
++
++    if ( $rc == 0 ) {
++        # The query was executed, check the result.
++        if ( $is_in_recovery eq 't' ) {
++            # The instance is a secondary.
++            ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a secondary");
++            return $OCF_SUCCESS;
++        }
++        elsif ( $is_in_recovery eq 'f' ) {
++            # The instance is a primary.
++            ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a primary");
++            # Check lsn diff with current slaves if any
++            _check_locations() if $__OCF_ACTION eq 'monitor';
++            return $OCF_RUNNING_MASTER;
++        }
++
++        # This should not happen, raise a hard configuration error.
++        ocf_exit_reason(
++            'Unexpected result from query to check if "%s" is a primary or a secondary: "%s"',
++            $OCF_RESOURCE_INSTANCE, $is_in_recovery );
++
++        return $OCF_ERR_CONFIGURED;
++    }
++    elsif ( $rc == 1 or $rc == 2 ) {
++        # psql cound not connect to the instance.
++        # As pg_isready reported the instance was listening, this error
++        # could be a max_connection saturation. Just report a soft error.
++        ocf_exit_reason( 'psql could not connect to instance "%s"',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_ERR_GENERIC;
++    }
++
++    # The query failed (rc: 3) or bad parameters (rc: -1).
++    # This should not happen, raise a hard configuration error.
++    ocf_exit_reason(
++        'The query to check if instance "%s" is a primary or a secondary failed (rc: %d)',
++        $OCF_RESOURCE_INSTANCE, $rc );
++
++    return $OCF_ERR_CONFIGURED;
++}
++
++
++# Check to confirm if the instance is really stopped as _pg_isready stated
++# and if it was propertly shut down.
++#
++sub _confirm_stopped {
++    my $pgctlstatus_rc;
++    my $controldata_rc;
++
++    # Check the postmaster process status.
++    $pgctlstatus_rc = _pg_ctl_status();
++
++    if ( $pgctlstatus_rc == 0 ) {
++        # The PID file exists and the process is available.
++        # That should not be the case, return an error.
++        ocf_exit_reason(
++            'Instance "%s" is not listening, but the process referenced in postmaster.pid exists',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_ERR_GENERIC;
++    }
++
++    # The PID file does not exist or the process is not available.
++    ocf_log( 'debug',
++        '_confirm_stopped: no postmaster process found for instance "%s"',
++        $OCF_RESOURCE_INSTANCE );
++
++    if ( -f "$datadir/backup_label" ) {
++        # We are probably on a freshly built secondary that was not started yet.
++        ocf_log( 'debug',
++            '_confirm_stopped: backup_label file exists: probably on a never started secondary',
++        );
++        return $OCF_NOT_RUNNING;
++    }
++
++    # Continue the check with pg_controldata.
++    $controldata_rc = _controldata_to_ocf();
++    if ( $controldata_rc == $OCF_RUNNING_MASTER ) {
++        # The controldata has not been updated to "shutdown".
++        # It should mean we had a crash on a primary instance.
++        ocf_exit_reason(
++            'Instance "%s" controldata indicates a running primary instance, the instance has probably crashed',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_FAILED_MASTER;
++    }
++    elsif ( $controldata_rc == $OCF_SUCCESS ) {
++        # The controldata has not been updated to "shutdown in recovery".
++        # It should mean we had a crash on a secondary instance.
++        # There is no "FAILED_SLAVE" return code, so we return a generic error.
++        ocf_exit_reason(
++            'Instance "%s" controldata indicates a running secondary instance, the instance has probably crashed',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_ERR_GENERIC;
++    }
++    elsif ( $controldata_rc == $OCF_NOT_RUNNING ) {
++        # The controldata state is consistent, the instance was probably
++        # propertly shut down.
++        ocf_log( 'debug',
++            '_confirm_stopped: instance "%s" controldata indicates that the instance was propertly shut down',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_NOT_RUNNING;
++    }
++
++    # Something went wrong with the controldata check.
++    ocf_exit_reason(
++        'Could not get instance "%s" status from controldata (returned: %d)',
++        $OCF_RESOURCE_INSTANCE, $controldata_rc );
++
++    return $OCF_ERR_GENERIC;
++}
++
++############################################################
++#### OCF FUNCS
++
++
++
++=head1 SUPPORTED PARAMETERS
++
++=over
++
++=item B<pgdata>
++
++Location of the PGDATA of your instance
++
++(optional, string, default "/var/lib/pgsql/data")
++
++=item B<pghost>
++
++The socket directory or IP address to use to connect to the local instance
++
++(optional, string, default "/tmp")
++
++=item B<pgport>
++
++The port to connect to the local instance
++
++(optional, integer, default "5432")
++
++=item B<bindir>
++
++Location of the PostgreSQL binaries.
++
++(optional, string, default "/usr/bin")
++
++=item B<system_user>
++
++The system owner of your instance's process
++
++(optional, string, default "postgres")
++
++=item B<recovery_template>
++
++B<ONLY> for PostgreSQL 11 and bellow.
++
++The local template that will be copied as the C<PGDATA/recovery.conf> file.
++This template file must exists on all node.
++
++With PostgreSQL 12 and higher, the cluster will refuse to start if this
++parameter is set or a template file is found.
++
++(optional, string, default "$PGDATA/recovery.conf.pcmk")
++
++=item B<maxlag>
++
++Maximum lag allowed on a standby before we set a negative master score on it.
++The calculation is based on the difference between the current xlog location on
++the master and the write location on the standby.
++
++(optional, integer, default "0" disables this feature)
++
++=item B<datadir>
++
++Path to the directory set in C<data_directory> from your postgresql.conf file.
++This parameter has same default than PostgreSQL itself: the C<pgdata> parameter
++value.
++
++Unless you have a special PostgreSQL setup and you understand this parameter,
++B<ignore it>
++
++(optional, string, default to the value of C<pgdata>)
++
++=item B<start_opts>
++
++Additional arguments given to the postgres process on startup. See
++"postgres --help" for available options. Useful when the postgresql.conf file
++is not in the data directory (PGDATA), eg.:
++
++  -c config_file=/etc/postgresql/9.3/main/postgresql.conf
++
++(optinal, string, default "")
++
++=back
++
++=cut
++
++sub ocf_meta_data {
++    print qq{<?xml version="1.0"?>
++        <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++        <resource-agent name="pgsqlms">
++          <version>1.0</version>
++
++          <longdesc lang="en">
++            Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource.
++          </longdesc>
++          <shortdesc lang="en">Manages PostgreSQL servers in replication</shortdesc>
++          <parameters>
++            <parameter name="system_user" unique="0" required="0">
++              <longdesc lang="en">
++                System user account used to run the PostgreSQL server
++              </longdesc>
++              <shortdesc lang="en">PostgreSQL system User</shortdesc>
++              <content type="string" default="$system_user_default" />
++            </parameter>
++
++            <parameter name="bindir" unique="0" required="0">
++              <longdesc lang="en">
++                Path to the directory storing the PostgreSQL binaries. The agent uses psql, pg_isready, pg_controldata and pg_ctl.
++              </longdesc>
++              <shortdesc lang="en">Path to the PostgreSQL binaries</shortdesc>
++              <content type="string" default="$bindir_default" />
++            </parameter>
++
++            <parameter name="pgdata" unique="1" required="0">
++              <longdesc lang="en">
++                Path to the data directory, e.g. PGDATA
++              </longdesc>
++              <shortdesc lang="en">Path to the data directory</shortdesc>
++              <content type="string" default="$pgdata_default" />
++            </parameter>
++
++            <parameter name="datadir" unique="1" required="0">
++              <longdesc lang="en">
++                Path to the directory set in data_directory from your postgresql.conf file. This parameter
++                has the same default than PostgreSQL itself: the pgdata parameter value. Unless you have a
++                special PostgreSQL setup and you understand this parameter, ignore it.
++              </longdesc>
++              <shortdesc lang="en">Path to the directory set in data_directory from your postgresql.conf file</shortdesc>
++              <content type="string" default="PGDATA" />
++            </parameter>
++
++            <parameter name="pghost" unique="0" required="0">
++              <longdesc lang="en">
++                Host IP address or unix socket folder the instance is listening on.
++              </longdesc>
++              <shortdesc lang="en">Instance IP or unix socket folder</shortdesc>
++              <content type="string" default="$pghost_default" />
++            </parameter>
++
++            <parameter name="pgport" unique="0" required="0">
++              <longdesc lang="en">
++                Port the instance is listening on.
++              </longdesc>
++              <shortdesc lang="en">Instance port</shortdesc>
++              <content type="integer" default="$pgport_default" />
++            </parameter>
++
++           <parameter name="maxlag" unique="0" required="0">
++              <longdesc lang="en">
++                Maximum lag allowed on a standby before we set a negative master score on it. The calculation
++                is based on the difference between the current LSN on the master and the LSN
++                written on the standby.
++                This parameter must be a valid positive number as described in PostgreSQL documentation.
++                See: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC
++              </longdesc>
++              <shortdesc lang="en">Maximum write lag before we mark a standby as inappropriate to promote</shortdesc>
++              <content type="integer" default="$maxlag_default" />
++            </parameter>
++
++            <parameter name="recovery_template" unique="1" required="0">
++              <longdesc lang="en">
++                Path to the recovery.conf template. This file is simply copied to \$PGDATA
++                before starting the instance as slave.
++                ONLY for PostgreSQL 11 and bellow. This parameter is IGNORED for
++                PostgreSQL 12 and higher. The cluster will refuse to start if a template
++                file is found.
++              </longdesc>
++              <shortdesc lang="en">Path to the recovery.conf template for PostgreSQL 11 and older.</shortdesc>
++              <content type="string" default="PGDATA/recovery.conf.pcmk" />
++            </parameter>
++
++            <parameter name="start_opts" unique="0" required="0">
++              <longdesc lang="en">
++                Additionnal arguments given to the postgres process on startup.
++                See "postgres --help" for available options. Usefull when the
++                postgresql.conf file is not in the data directory (PGDATA), eg.:
++                "-c config_file=/etc/postgresql/9.3/main/postgresql.conf".
++              </longdesc>
++              <shortdesc lang="en">Additionnal arguments given to the postgres process on startup.</shortdesc>
++              <content type="string" default="$start_opts_default" />
++            </parameter>
++
++          </parameters>
++          <actions>
++            <action name="start" timeout="60" />
++            <action name="stop" timeout="60" />
++            <action name="reload" timeout="20" />
++            <action name="promote" timeout="30" />
++            <action name="demote" timeout="120" />
++            <action name="monitor" depth="0" timeout="10" interval="15"/>
++            <action name="monitor" depth="0" timeout="10" interval="15" role="Master"/>
++            <action name="monitor" depth="0" timeout="10" interval="16" role="Slave"/>
++            <action name="notify" timeout="60" />
++            <action name="meta-data" timeout="5" />
++            <action name="validate-all" timeout="5" />
++            <action name="methods" timeout="5" />
++          </actions>
++        </resource-agent>
++    };
++    return $OCF_SUCCESS;
++}
++
++
++=head1 SUPPORTED ACTIONS
++
++This resource agent supports the following actions (operations):
++
++=over
++
++=item B<start>
++
++Starts the resource. Suggested minimum timeout: 60.
++
++=item B<stop>
++
++Stops the resource. Suggested minimum timeout: 60.
++
++=item B<reload>
++
++Suggested minimum timeout: 20.
++
++=item B<promote>
++
++Promotes the resource to the Master role. Suggested minimum timeout: 30.
++
++=item B<demote>
++
++Demotes the resource to the Slave role. Suggested minimum timeout: 120.
++
++=item B<monitor (Master role)>
++
++Performs a detailed status check. Suggested minimum timeout: 10.
++Suggested interval: 15.
++
++=item B<monitor (Slave role)>
++
++Performs a detailed status check. Suggested minimum timeout: 10.
++Suggested interval: 16.
++
++=item B<notify>
++
++Suggested minimum timeout: 60
++
++=item B<meta-data>
++
++Retrieves resource agent metadata (internal use only).
++Suggested minimum timeout: 5.
++
++=item B<methods>
++
++Suggested minimum timeout: 5.
++
++=item B<validate-all>
++
++Performs a validation of the resource configuration.
++Suggested minimum timeout: 5.
++
++=back
++
++=cut
++
++sub ocf_methods {
++    print q{
++        start
++        stop
++        reload
++        promote
++        demote
++        monitor
++        notify
++        methods
++        meta-data
++        validate-all
++    };
++
++    return $OCF_SUCCESS;
++}
++
++############################################################
++#### RA FUNCS
++
++sub pgsql_validate_all {
++    my $fh;
++    my $ans = '';
++    my %cdata;
++
++    unless (
++        ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.9' ) == 2
++    ) {
++        ocf_exit_reason(
++            'PAF %s is compatible with Pacemaker 1.1.13 and greater',
++            $VERSION
++        );
++        return $OCF_ERR_INSTALLED;
++    }
++
++    # check notify=true
++    $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\
++                 --meta --get-parameter notify 2>/dev/null };
++    chomp $ans;
++    unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) {
++        ocf_exit_reason(
++            'You must set meta parameter notify=true for your master resource'
++        );
++        return $OCF_ERR_INSTALLED;
++    }
++
++    # check master-max=1
++    unless (
++        defined $ENV{'OCF_RESKEY_CRM_meta_master_max'}
++            and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1'
++    ) {
++        ocf_exit_reason(
++            'You must set meta parameter master-max=1 for your master resource'
++        );
++        return $OCF_ERR_INSTALLED;
++    }
++
++    if ( $PGVERNUM >= $PGVER_12 ) {
++        # check PostgreSQL setup: checks related to v12 and after
++        my $guc;
++
++        # recovery.conf template must not exists
++        if ( -f $recovery_tpl ) {
++            ocf_exit_reason(
++                'Recovery template file "%s" is forbidden for PostgreSQL 12 and above',
++                $recovery_tpl );
++            exit $OCF_ERR_ARGS;
++        }
++
++        # WARNING: you MUST put -C as first argument to bypass the root check
++        $guc = qx{ $POSTGRES -C recovery_target_timeline -D "$pgdata" $start_opts};
++        chomp $guc;
++        unless ( $guc eq 'latest' ) {
++            ocf_exit_reason(
++                q{Parameter "recovery_target_timeline" MUST be set to 'latest'. } .
++                q{It is currently set to '%s'}, $guc );
++            return $OCF_ERR_ARGS;
++        }
++
++        $guc = qx{ $POSTGRES -C primary_conninfo -D "$pgdata" $start_opts};
++        unless ($guc =~ /\bapplication_name='?$nodename'?\b/) {
++            ocf_exit_reason(
++                q{Parameter "primary_conninfo" MUST contain 'application_name=%s'. }.
++                q{It is currently set to '%s'}, $nodename, $guc );
++            return $OCF_ERR_ARGS;
++        }
++    }
++    else {
++        my @content;
++
++        # check recovery template
++        if ( ! -f $recovery_tpl ) {
++            ocf_exit_reason( 'Recovery template file "%s" does not exist',
++                $recovery_tpl );
++            return $OCF_ERR_ARGS;
++        }
++
++        # check content of the recovery template file
++        unless ( open( $fh, '<', $recovery_tpl ) ) {
++            ocf_exit_reason( 'Could not open file "%s": %s', $recovery_tpl, $! );
++            return $OCF_ERR_ARGS;
++        }
++        @content = <$fh>;
++        close $fh;
++
++
++        unless ( grep /^\s*standby_mode\s*=\s*'?on'?\s*$/, @content ) {
++            ocf_exit_reason(
++                'Recovery template file must contain "standby_mode = on"' );
++            return $OCF_ERR_ARGS;
++        }
++
++        unless ( grep /^\s*recovery_target_timeline\s*=\s*'?latest'?\s*$/, @content ) {
++            ocf_exit_reason(
++                "Recovery template file must contain \"recovery_target_timeline = 'latest'\""
++            );
++            return $OCF_ERR_ARGS;
++        }
++
++        unless (
++            grep /^\s*primary_conninfo\s*=.*['\s]application_name=$nodename['\s]/,
++            @content
++        ) {
++            ocf_exit_reason(
++                'Recovery template file must contain in primary_conninfo parameter "application_name=%s"',
++                $nodename );
++            return $OCF_ERR_ARGS;
++        }
++    }
++
++    unless ( looks_like_number($maxlag) ) {
++        ocf_exit_reason( 'maxlag is not a number: "%s"', $maxlag );
++        return $OCF_ERR_INSTALLED;
++    }
++
++    # check system user
++    unless ( defined getpwnam $system_user ) {
++        ocf_exit_reason( 'System user "%s" does not exist', $system_user );
++        return $OCF_ERR_ARGS;
++    }
++
++    # require 9.3 minimum
++    if ( $PGVERNUM < $PGVER_93 ) {
++        ocf_exit_reason( "Require 9.3 and more" );
++        return $OCF_ERR_INSTALLED;
++    }
++
++    # check binaries
++    unless ( -x $PGCTL and -x $PGPSQL and -x $PGCTRLDATA and -x $PGISREADY
++         and ( -x $PGWALDUMP or -x "$bindir/pg_xlogdump")
++     ) {
++        ocf_exit_reason(
++            "Missing one or more binary. Check following path: %s, %s, %s, %s, %s or %s",
++            $PGCTL, $PGPSQL, $PGCTRLDATA, $PGISREADY, $PGWALDUMP, "$bindir/pg_xlogdump" );
++        return $OCF_ERR_ARGS;
++    }
++
++    # require wal_level >= hot_standby
++    %cdata = _get_controldata();
++    unless ( $cdata{'wal_level'} =~ m{hot_standby|logical|replica} ) {
++        ocf_exit_reason(
++            'wal_level must be one of "hot_standby", "logical" or "replica"' );
++        return $OCF_ERR_ARGS;
++    }
++
++    return $OCF_SUCCESS;
++}
++
++
++# Start the PostgreSQL instance as a *secondary*
++#
++sub pgsql_start {
++    my $rc         = pgsql_monitor();
++    my %cdata      = _get_controldata();
++    my $prev_state = $cdata{'state'};
++
++    # Instance must be running as secondary or being stopped.
++    # Anything else is an error.
++    if ( $rc == $OCF_SUCCESS ) {
++        ocf_log( 'info', 'Instance "%s" already started',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_SUCCESS;
++    }
++    elsif ( $rc != $OCF_NOT_RUNNING ) {
++        ocf_exit_reason( 'Unexpected state for instance "%s" (returned %d)',
++            $OCF_RESOURCE_INSTANCE, $rc );
++        return $OCF_ERR_GENERIC;
++    }
++
++    #
++    # From here, the instance is NOT running for sure.
++    #
++
++    ocf_log( 'debug',
++        'pgsql_start: instance "%s" is not running, starting it as a secondary',
++        $OCF_RESOURCE_INSTANCE );
++
++    # Must start as a standby, so enable recovery.
++    _enable_recovery();
++
++    # Start the instance as a secondary.
++    $rc = _pg_ctl_start();
++
++    if ( $rc == 0 ) {
++
++        # Wait for the start to finish.
++        sleep 1 while ( $rc = pgsql_monitor() ) == $OCF_NOT_RUNNING;
++
++        if ( $rc == $OCF_SUCCESS ) {
++            ocf_log( 'info', 'Instance "%s" started', $OCF_RESOURCE_INSTANCE );
++
++            # Check if a master score exists in the cluster.
++            # During the very first start of the cluster, no master score will
++            # exists on any of the existing slaves, unless an admin designated
++            # one of them using crm_master. If no master exists the cluster will
++            # not promote a master among the slaves.
++            # To solve this situation, we check if there is at least one master
++            # score existing on one node in the cluster. Do nothing if at least
++            # one master score is found among the clones of the resource. If no
++            # master score exists, set a score of 1 only if the resource was a
++            # shut downed master before the start.
++            if ( $prev_state eq "shut down" and not _master_score_exists() ) {
++                ocf_log( 'info', 'No master score around. Set mine to 1' );
++
++                _set_master_score( '1' );
++            }
++
++            return $OCF_SUCCESS;
++        }
++
++        ocf_exit_reason(
++            'Instance "%s" is not running as a slave (returned %d)',
++             $OCF_RESOURCE_INSTANCE, $rc );
++
++        return $OCF_ERR_GENERIC;
++    }
++
++    ocf_exit_reason( 'Instance "%s" failed to start (rc: %d)',
++        $OCF_RESOURCE_INSTANCE, $rc );
++
++    return $OCF_ERR_GENERIC;
++}
++
++# Stop the PostgreSQL instance
++#
++sub pgsql_stop {
++    my $rc;
++    my $state;
++    my $pidfile = "$datadir/postmaster.pid";
++    # Add 60s to the timeout or use a 24h timeout fallback to make sure
++    # Pacemaker will give up before us and take decisions
++    my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60;
++
++    # Instance must be running as secondary or primary or being stopped.
++    # Anything else is an error.
++    $rc = pgsql_monitor();
++    if ( $rc == $OCF_NOT_RUNNING ) {
++        ocf_log( 'info', 'Instance "%s" already stopped',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_SUCCESS;
++    }
++    elsif ( $rc != $OCF_SUCCESS and $rc != $OCF_RUNNING_MASTER ) {
++        ocf_exit_reason( 'Unexpected state for instance "%s" (returned %d)',
++            $OCF_RESOURCE_INSTANCE, $rc );
++        return $OCF_ERR_GENERIC;
++    }
++
++    #
++    # From here, the instance is running for sure.
++    #
++
++    ocf_log( 'debug', 'pgsql_stop: instance "%s" is running, stopping it',
++        $OCF_RESOURCE_INSTANCE );
++
++    # Try to quit with proper shutdown.
++
++
++    $rc = _runas( $PGCTL, '--pgdata', $pgdata, '-w', '--timeout', $timeout,
++        '-m', 'fast', 'stop' );
++
++    if ( $rc == 0 ) {
++        # Wait for the stop to finish.
++        sleep 1 while ( $rc = pgsql_monitor() ) != $OCF_NOT_RUNNING ;
++
++        ocf_log( 'info', 'Instance "%s" stopped', $OCF_RESOURCE_INSTANCE );
++
++        return $OCF_SUCCESS;
++    }
++
++    ocf_exit_reason( 'Instance "%s" failed to stop', $OCF_RESOURCE_INSTANCE );
++    return $OCF_ERR_GENERIC;
++}
++
++# Monitor the PostgreSQL instance
++#
++sub pgsql_monitor {
++    my $pgisready_rc;
++    my $controldata_rc;
++
++    ocf_log( 'debug', 'pgsql_monitor: monitor is a probe' ) if ocf_is_probe();
++
++    # First check, verify if the instance is listening.
++    $pgisready_rc = _pg_isready();
++
++    if ( $pgisready_rc == 0 ) {
++        # The instance is listening.
++        # We confirm that the instance is up and return if it is a primary or a
++        # secondary
++        ocf_log( 'debug', 'pgsql_monitor: instance "%s" is listening',
++            $OCF_RESOURCE_INSTANCE );
++        return _confirm_role();
++    }
++
++    if ( $pgisready_rc == 1 ) {
++        # The attempt was rejected.
++        # This could happen in several cases:
++        #   - at startup
++        #   - during shutdown
++        #   - during crash recovery
++        #   - if instance is a warm standby
++        # Except for the warm standby case, this should be a transitional state.
++        # We try to confirm using pg_controldata.
++        ocf_log( 'debug',
++            'pgsql_monitor: instance "%s" rejects connections - checking again...',
++            $OCF_RESOURCE_INSTANCE );
++        $controldata_rc = _controldata_to_ocf();
++
++        if ( $controldata_rc == $OCF_RUNNING_MASTER
++            or $controldata_rc == $OCF_SUCCESS
++        ) {
++            # This state indicates that pg_isready check should succeed.
++            # We check again.
++            ocf_log( 'debug',
++                'pgsql_monitor: instance "%s" controldata shows a running status',
++                $OCF_RESOURCE_INSTANCE );
++
++            $pgisready_rc = _pg_isready();
++            if ( $pgisready_rc == 0 ) {
++                # Consistent with pg_controdata output.
++                # We can check if the instance is primary or secondary
++                ocf_log( 'debug', 'pgsql_monitor: instance "%s" is listening',
++                    $OCF_RESOURCE_INSTANCE );
++                return _confirm_role();
++            }
++
++            # Still not consistent, raise an error.
++            # NOTE: if the instance is a warm standby, we end here.
++            # TODO raise an hard error here ?
++            ocf_exit_reason(
++                'Instance "%s" controldata is not consistent with pg_isready (returned: %d)',
++                $OCF_RESOURCE_INSTANCE, $pgisready_rc );
++            ocf_log( 'info',
++                'If this instance is in warm standby, this resource agent only supports hot standby',
++                $OCF_RESOURCE_INSTANCE, $pgisready_rc );
++
++            return $OCF_ERR_GENERIC;
++        }
++
++        if ( $controldata_rc == $OCF_NOT_RUNNING ) {
++            # This state indicates that pg_isready check should fail with rc 2.
++            # We check again.
++            $pgisready_rc = _pg_isready();
++            if ( $pgisready_rc == 2 ) {
++                # Consistent with pg_controdata output.
++                # We check the process status using pg_ctl status and check
++                # if it was propertly shut down using pg_controldata.
++                ocf_log( 'debug',
++                    'pgsql_monitor: instance "%s" is not listening',
++                    $OCF_RESOURCE_INSTANCE );
++                return _confirm_stopped();
++            }
++            # Still not consistent, raise an error.
++            # TODO raise an hard error here ?
++            ocf_exit_reason(
++                'Instance "%s" controldata is not consistent with pg_isready (returned: %d)',
++                $OCF_RESOURCE_INSTANCE, $pgisready_rc );
++
++            return $OCF_ERR_GENERIC;
++        }
++
++        # Something went wrong with the controldata check, hard fail.
++        ocf_exit_reason(
++            'Could not get instance "%s" status from controldata (returned: %d)',
++            $OCF_RESOURCE_INSTANCE, $controldata_rc );
++
++        return $OCF_ERR_INSTALLED;
++    }
++
++    elsif ( $pgisready_rc == 2 ) {
++        # The instance is not listening.
++        # We check the process status using pg_ctl status and check
++        # if it was propertly shut down using pg_controldata.
++        ocf_log( 'debug', 'pgsql_monitor: instance "%s" is not listening',
++            $OCF_RESOURCE_INSTANCE );
++        return _confirm_stopped();
++    }
++
++    elsif ( $pgisready_rc == 3 ) {
++        # No attempt was done, probably a syntax error.
++        # Hard configuration error, we don't want to retry or failover here.
++        ocf_exit_reason(
++            'Unknown error while checking if instance "%s" is listening (returned %d)',
++            $OCF_RESOURCE_INSTANCE, $pgisready_rc );
++
++        return $OCF_ERR_CONFIGURED;
++    }
++
++    ocf_exit_reason( 'Unexpected result when checking instance "%s" status',
++        $OCF_RESOURCE_INSTANCE );
++
++    return $OCF_ERR_GENERIC;
++}
++
++
++# Demote the PostgreSQL instance from primary to secondary
++# To demote a PostgreSQL instance, we must:
++#   * stop it gracefully
++#   * create recovery.conf with standby_mode = on
++#   * start it
++#
++sub pgsql_demote {
++    my $rc;
++
++    $rc = pgsql_monitor();
++
++    # Running as primary. Normal, expected behavior.
++    if ( $rc == $OCF_RUNNING_MASTER ) {
++        ocf_log( 'debug', 'pgsql_demote: "%s" currently running as a primary',
++            $OCF_RESOURCE_INSTANCE )  ;
++    }
++    elsif ( $rc == $OCF_SUCCESS ) {
++        # Already running as secondary. Nothing to do.
++        ocf_log( 'debug',
++            'pgsql_demote: "%s" currently running as a secondary',
++            $OCF_RESOURCE_INSTANCE );
++            return $OCF_SUCCESS;
++    }
++    elsif ( $rc == $OCF_NOT_RUNNING ) {
++        # Instance is stopped. Nothing to do.
++        ocf_log( 'debug', 'pgsql_demote: "%s" currently shut down',
++            $OCF_RESOURCE_INSTANCE );
++    }
++    elsif ( $rc == $OCF_ERR_CONFIGURED ) {
++        # We actually prefer raising a hard or fatal error instead of leaving
++        # the CRM abording its transition for a new one because of a soft error.
++        # The hard error will force the CRM to move the resource immediately.
++        return $OCF_ERR_CONFIGURED;
++    }
++    else {
++        return $OCF_ERR_GENERIC;
++    }
++
++    # TODO we need to make sure at least one slave is connected!!
++
++    # WARNING if the resource state is stopped instead of master, the ocf ra dev
++    # rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop where
++    # it computes transitions of demote(failing)->stop->start->promote actions
++    # until failcount == migration-threshold.
++    # This is a really ugly trick to keep going with the demode action if the
++    # rsc is already stopped gracefully.
++    # See discussion "CRM trying to demote a stopped resource" on
++    # developers@clusterlabs.org
++    unless ( $rc == $OCF_NOT_RUNNING ) {
++        # Add 60s to the timeout or use a 24h timeout fallback to make sure
++        # Pacemaker will give up before us and take decisions
++        my $timeout = ( _get_action_timeout() || 60*60*24 )  + 60;
++
++        # WARNING the instance **MUST** be stopped gracefully.
++        # Do **not** use pg_stop() or service or systemctl here as these
++        # commands might force-stop the PostgreSQL instance using immediate
++        # after some timeout and return success, which is misleading.
++
++        $rc = _runas( $PGCTL, '--pgdata', $pgdata, '--mode', 'fast', '-w',
++            '--timeout', $timeout , 'stop' );
++
++        # No need to wait for stop to complete, this is handled in pg_ctl
++        # using -w option.
++        unless ( $rc == 0 ) {
++            ocf_exit_reason( 'Failed to stop "%s" using pg_ctl (returned %d)',
++                $OCF_RESOURCE_INSTANCE, $rc );
++            return $OCF_ERR_GENERIC;
++        }
++
++        # Double check that the instance is stopped correctly.
++        $rc = pgsql_monitor();
++        unless ( $rc == $OCF_NOT_RUNNING ) {
++            ocf_exit_reason(
++                'Unexpected "%s" state: monitor status (%d) disagree with pg_ctl return code',
++                $OCF_RESOURCE_INSTANCE, $rc );
++            return $OCF_ERR_GENERIC;
++        }
++    }
++
++    #
++    # At this point, the instance **MUST** be stopped gracefully.
++    #
++
++    # Note: We do not need to handle the recovery.conf file here as pgsql_start
++    # deal with that itself. Equally, no need to wait for the start to complete
++    # here, handled in pgsql_start.
++    $rc = pgsql_start();
++    if ( $rc == $OCF_SUCCESS ) {
++        ocf_log( 'info', 'pgsql_demote: "%s" started as a secondary',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_SUCCESS;
++    }
++
++    # NOTE: No need to double check the instance state as pgsql_start already use
++    # pgsql_monitor to check the state before returning.
++
++    ocf_exit_reason( 'Starting "%s" as a standby failed (returned %d)',
++        $OCF_RESOURCE_INSTANCE, $rc );
++    return $OCF_ERR_GENERIC;
++}
++
++
++# Promote the secondary instance to primary
++#
++sub pgsql_promote {
++    my $rc;
++    my $cancel_switchover;
++
++    $rc = pgsql_monitor();
++
++    if ( $rc == $OCF_SUCCESS ) {
++        # Running as slave. Normal, expected behavior.
++        ocf_log( 'debug', 'pgsql_promote: "%s" currently running as a standby',
++            $OCF_RESOURCE_INSTANCE );
++    }
++    elsif ( $rc == $OCF_RUNNING_MASTER ) {
++        # Already a master. Unexpected, but not a problem.
++        ocf_log( 'info', '"%s" already running as a primary',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_SUCCESS;
++    }
++    elsif ( $rc == $OCF_NOT_RUNNING ) { # INFO this is not supposed to happen.
++        # Currently not running. Need to start before promoting.
++        ocf_log( 'info', '"%s" currently not running, starting it',
++            $OCF_RESOURCE_INSTANCE );
++
++        $rc = pgsql_start();
++        if ( $rc != $OCF_SUCCESS ) {
++            ocf_exit_reason( 'Failed to start the instance "%s"',
++                $OCF_RESOURCE_INSTANCE );
++            return $OCF_ERR_GENERIC;
++        }
++    }
++    else {
++        ocf_exit_reason( 'Unexpected error, cannot promote "%s"',
++            $OCF_RESOURCE_INSTANCE );
++        return $OCF_ERR_GENERIC;
++    }
++
++    #
++    # At this point, the instance **MUST** be started as a secondary.
++    #
++
++    # Cancel the switchover if it has been considered not safe during the
++    # pre-promote action
++    $cancel_switchover = _get_priv_attr('cancel_switchover');
++    if ( $cancel_switchover ) { # if not empty or not 0
++        ocf_exit_reason( 'Switchover has been canceled from pre-promote action' );
++
++        _delete_priv_attr( 'cancel_switchover' );
++
++        return $OCF_ERR_GENERIC if $cancel_switchover eq '1';
++        return $OCF_ERR_ARGS; # ban the resource from the node if we have an
++                              # internal error during _check_switchover
++    }
++
++    # Do not check for a better candidate if we try to recover the master
++    # Recover of a master is detected during the pre-promote action. It sets the
++    # private attribute 'recover_master' to '1' if this is a master recover.
++    if ( _get_priv_attr( 'recover_master' ) eq '1' ) {
++        ocf_log( 'info', 'Recovering old master, no election needed');
++    }
++    else {
++
++        # The promotion is occurring on the best known candidate (highest
++        # master score), as chosen by pacemaker during the last working monitor
++        # on previous master (see pgsql_monitor/_check_locations subs).
++        # To avoid any race condition between the last monitor action on the
++        # previous master and the **real** most up-to-date standby, we
++        # set each standby location during the "pre-promote" action, and stored
++        # them using the "lsn_location" resource attribute.
++        #
++        # The best standby to promote would have the highest known LSN. If the
++        # current resource is not the best one, we need to modify the master
++        # scores accordingly, and abort the current promotion.
++        ocf_log( 'debug',
++            'pgsql_promote: checking if current node is the best candidate for promotion' );
++
++        # Exclude nodes that are known to be unavailable (not in the current
++        # partition) using the "crm_node" command
++        my @active_nodes    = split /\s+/ => _get_priv_attr( 'nodes' );
++        my $node_to_promote = '';
++        my $ans;
++        my $max_tl;
++        my $max_lsn;
++        my $node_tl;
++        my $node_lsn;
++        my $wal_num;
++        my $wal_off;
++
++        # Get the "lsn_location" attribute value for the current node, as set
++        # during the "pre-promote" action.
++        # It should be the greatest among the secondary instances.
++        $ans = _get_priv_attr( 'lsn_location' );
++
++        if ( $ans eq '' ) {
++            # This should not happen as the "lsn_location" attribute should have
++            # been updated during the "pre-promote" action.
++            ocf_exit_reason( 'Can not get current node LSN location' );
++            return $OCF_ERR_GENERIC;
++        }
++
++        chomp $ans;
++        ( $max_tl, $max_lsn ) = split /#/, $ans;
++
++        ocf_log( 'debug', 'pgsql_promote: current node TL#LSN location: %s#%s',
++            $max_tl, $max_lsn );
++
++        # Now we compare with the other available nodes.
++        foreach my $node ( @active_nodes ) {
++            # We exclude the current node from the check.
++            next if $node eq $nodename;
++
++            # Get the "lsn_location" attribute value for the node, as set during
++            # the "pre-promote" action.
++            # This is implemented as a loop as private attributes are asynchronously
++            # available from other nodes.
++            # see: https://github.com/ClusterLabs/PAF/issues/131
++            # NOTE: if a node did not set its lsn_location for some reason, this will end
++            # with a timeout and the whole promotion will start again.
++            WAIT_FOR_LSN: {
++                $ans = _get_priv_attr( 'lsn_location', $node );
++                if ( $ans eq '' ) {
++                    ocf_log( 'info', 'pgsql_promote: waiting for LSN from %s', $node );
++                    select( undef, undef, undef, 0.1 );
++                    redo WAIT_FOR_LSN;
++                }
++            }
++
++            chomp $ans;
++            ( $node_tl, $node_lsn ) = split /#/, $ans;
++
++            ocf_log( 'debug',
++                'pgsql_promote: comparing with "%s": TL#LSN is %s#%s',
++                $node, $node_tl, $node_lsn );
++
++            # If the node has a higher LSN, select it as a best candidate to
++            # promotion and keep looping to check the TL/LSN of other nodes.
++            if ( $node_tl > $max_tl
++                or ( $node_tl == $max_tl and $node_lsn > $max_lsn )
++            ) {
++                ocf_log( 'debug',
++                    'pgsql_promote: "%s" is a better candidate to promote (%s#%s > %s#%s)',
++                    $node, $node_tl, $node_lsn, $max_tl, $max_lsn );
++                $node_to_promote = $node;
++                $max_tl          = $node_tl;
++                $max_lsn         = $node_lsn;
++            }
++        }
++
++        # If any node has been selected, we adapt the master scores accordingly
++        # and break the current promotion.
++        if ( $node_to_promote ne '' ) {
++            ocf_exit_reason(
++                '%s is the best candidate to promote, aborting current promotion',
++                $node_to_promote );
++
++            # Reset current node master score.
++            _set_master_score( '1' );
++
++            # Set promotion candidate master score.
++            _set_master_score( '1000', $node_to_promote );
++
++            # We fail the promotion to trigger another promotion transition
++            # with the new scores.
++            return $OCF_ERR_GENERIC;
++        }
++
++        # Else, we will keep on promoting the current node.
++    }
++
++    unless (
++        # Promote the instance on the current node.
++        _runas( $PGCTL, '--pgdata', $pgdata, '-w', 'promote' ) == 0 )
++    {
++        ocf_exit_reason( 'Error during promotion command' );
++        return $OCF_ERR_GENERIC;
++    }
++
++    # The instance promotion is asynchronous, so we need to wait for this
++    # process to complete.
++    while ( pgsql_monitor() != $OCF_RUNNING_MASTER ) {
++        ocf_log( 'info', 'Waiting for the promote to complete' );
++        sleep 1;
++    }
++
++    ocf_log( 'info', 'Promote complete' );
++
++    return $OCF_SUCCESS;
++}
++
++# This action is called **before** the actual promotion when a failing master is
++# considered unreclaimable, recoverable or a new master must be promoted
++# (switchover or first start).
++# As every "notify" action, it is executed almost simultaneously on all
++# available nodes.
++sub pgsql_notify_pre_promote {
++    my $rc;
++    my $node_tl;
++    my $node_lsn;
++    my %cdata;
++    my %active_nodes;
++    my $attr_nodes;
++
++    ocf_log( 'info', 'Promoting instance on node "%s"',
++        $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} );
++
++    # No need to do an election between slaves if this is recovery of the master
++    if ( _is_master_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) {
++        ocf_log( 'warning', 'This is a master recovery!' );
++
++        _set_priv_attr( 'recover_master', '1' )
++            if $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename;
++
++        return $OCF_SUCCESS;
++    }
++
++    # Environment cleanup!
++    _delete_priv_attr( 'lsn_location'      );
++    _delete_priv_attr( 'recover_master'    );
++    _delete_priv_attr( 'nodes'             );
++    _delete_priv_attr( 'cancel_switchover' );
++
++    # check for the last received entry of WAL from the master if we are
++    # the designated slave to promote
++    if ( _is_switchover( $nodename ) and scalar
++         grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'promote'} }
++    ) {
++        $rc = _check_switchover();
++
++        unless ( $rc == 0 ) {
++            # Shortcut the election process as the switchover will be
++            # canceled
++            _set_priv_attr( 'cancel_switchover', $rc );
++            return $OCF_SUCCESS; # return code is ignored during notify
++        }
++
++        # If the sub keeps going, that means the switchover is safe.
++        # Keep going with the election process in case the switchover was
++        # instruct to the wrong node.
++        # FIXME: should we allow a switchover to a lagging slave?
++    }
++
++    # We need to trigger an election between existing slaves to promote the best
++    # one based on its current LSN location. Each node set a private attribute
++    # "lsn_location" with its TL and LSN location.
++    #
++    # During the following promote action, The designated standby for
++    # promotion use these attributes to check if the instance to be promoted
++    # is the best one, so we can avoid a race condition between the last
++    # successful monitor on the previous master and the current promotion.
++
++    # As we can not break the transition from a notification action, we check
++    # during the promotion if each node TL and LSN are valid.
++
++    # Force a checpoint to make sure the controldata shows the very last TL
++    _query( q{ CHECKPOINT }, {} );
++    %cdata    = _get_controldata();
++    $node_lsn = _get_last_received_lsn( 'in decimal' );
++
++    unless ( defined $node_lsn ) {
++        ocf_log( 'warning', 'Unknown current node LSN' );
++        # Return code are ignored during notifications...
++        return $OCF_SUCCESS;
++    }
++
++    $node_lsn = "$cdata{'tl'}#$node_lsn";
++
++    ocf_log( 'info', 'Current node TL#LSN: %s', $node_lsn );
++
++    # Set the "lsn_location" attribute value for this node so we can use it
++    # during the following "promote" action.
++    _set_priv_attr( 'lsn_location', $node_lsn );
++
++    ocf_log( 'warning', 'Could not set the current node LSN' )
++        if $? != 0 ;
++
++    # If this node is the future master, keep track of the slaves that
++    # received the same notification to compare our LSN with them during
++    # promotion
++    if ( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename ) {
++        # Build the list of active nodes:
++        #   master + slave + start - stop
++        # FIXME: Deal with rsc started during the same transaction but **after**
++        #        the promotion ?
++        $active_nodes{ $_->{'uname'} }++ foreach @{ $OCF_NOTIFY_ENV{'active'} },
++                                                 @{ $OCF_NOTIFY_ENV{'start'} };
++        $active_nodes{ $_->{'uname'} }-- foreach @{ $OCF_NOTIFY_ENV{'stop'} };
++
++        $attr_nodes = join " "
++            => grep { $active_nodes{$_} > 0 } keys %active_nodes;
++
++        _set_priv_attr( 'nodes', $attr_nodes );
++    }
++
++    return $OCF_SUCCESS;
++}
++
++# This action is called after a promote action.
++sub pgsql_notify_post_promote {
++
++    # We have a new master (or the previous one recovered).
++    # Environment cleanup!
++    _delete_priv_attr( 'lsn_location'      );
++    _delete_priv_attr( 'recover_master'    );
++    _delete_priv_attr( 'nodes'             );
++    _delete_priv_attr( 'cancel_switchover' );
++
++    return $OCF_SUCCESS;
++}
++
++# This is called before a demote occurs.
++sub pgsql_notify_pre_demote {
++    my $rc;
++    my %cdata;
++
++    # do nothing if the local node will not be demoted
++    return $OCF_SUCCESS unless scalar
++        grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'demote'} };
++
++    $rc = pgsql_monitor();
++
++    # do nothing if this is not a master recovery
++    return $OCF_SUCCESS unless _is_master_recover( $nodename )
++                           and $rc == $OCF_FAILED_MASTER;
++
++    # in case of master crash, we need to detect if the CRM tries to recover
++    # the master clone. The usual transition is to do:
++    #   demote->stop->start->promote
++    #
++    # There are multiple flaws with this transition:
++    #  * the 1st and 2nd actions will fail because the instance is in
++    #    OCF_FAILED_MASTER step
++    #  * the usual start action is dangerous as the instance will start with
++    #    a recovery.conf instead of entering a normal recovery process
++    #
++    # To avoid this, we try to start the instance in recovery from here.
++    # If it success, at least it will be demoted correctly with a normal
++    # status. If it fails, it will be catched up in next steps.
++
++    ocf_log( 'info', 'Trying to start failing master "%s"...',
++        $OCF_RESOURCE_INSTANCE );
++
++    # Either the instance managed to start or it couldn't.
++    # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't
++    # start, this error will be catched up later during the various checks
++    _pg_ctl_start();
++
++    %cdata = _get_controldata();
++
++    ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} );
++
++    return $OCF_SUCCESS;
++}
++
++# This is called before a stop occurs.
++sub pgsql_notify_pre_stop {
++    my $rc;
++    my %cdata;
++
++    # do nothing if the local node will not be stopped
++    return $OCF_SUCCESS unless scalar
++        grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'stop'} };
++
++    $rc = _controldata_to_ocf();
++
++    # do nothing if this is not a slave recovery
++    return $OCF_SUCCESS unless _is_slave_recover( $nodename )
++                           and $rc == $OCF_RUNNING_SLAVE;
++
++    # in case of slave crash, we need to detect if the CRM tries to recover
++    # the slaveclone. The usual transition is to do: stop->start
++    #
++    # This transition can no twork because the instance is in
++    # OCF_ERR_GENERIC step. So the stop action will fail, leading most
++    # probably to fencing action.
++    #
++    # To avoid this, we try to start the instance in recovery from here.
++    # If it success, at least it will be stopped correctly with a normal
++    # status. If it fails, it will be catched up in next steps.
++
++    ocf_log( 'info', 'Trying to start failing slave "%s"...',
++        $OCF_RESOURCE_INSTANCE );
++
++    # Either the instance managed to start or it couldn't.
++    # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't
++    # start, this error will be catched up later during the various checks
++    _pg_ctl_start();
++
++    %cdata = _get_controldata();
++
++    ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} );
++
++    return $OCF_SUCCESS;
++}
++
++# Notify type actions, called on all available nodes before (pre) and after
++# (post) other actions, like promote, start, ...
++#
++sub pgsql_notify {
++    my $type_op;
++
++    ocf_log( 'debug', "pgsql_notify: environment variables: %s",
++        Data::Dumper->new( [ \%OCF_NOTIFY_ENV ] )->Sortkeys(1)->Terse(1)->Dump );
++
++    return unless %OCF_NOTIFY_ENV;
++
++    $type_op = "$OCF_NOTIFY_ENV{'type'}-$OCF_NOTIFY_ENV{'operation'}";
++
++    for ( $type_op ) {
++        if    ( /^pre-promote$/  ) { return pgsql_notify_pre_promote()  }
++        elsif ( /^post-promote$/ ) { return pgsql_notify_post_promote() }
++        elsif ( /^pre-demote$/   ) { return pgsql_notify_pre_demote()   }
++        elsif ( /^pre-stop$/     ) { return pgsql_notify_pre_stop()     }
++    }
++
++    return $OCF_SUCCESS;
++}
++
++# Action used to allow for online modification of resource parameters value.
++#
++sub pgsql_reload {
++
++    # No action necessary, the action declaration is enough to inform pacemaker
++    # that the modification of any non-unique parameter can be applied without
++    # having to restart the resource.
++    ocf_log( 'info', 'Instance "%s" reloaded', $OCF_RESOURCE_INSTANCE );
++    return $OCF_SUCCESS;
++
++}
++
++############################################################
++#### MAIN
++
++exit ocf_meta_data() if $__OCF_ACTION eq 'meta-data';
++exit ocf_methods()   if $__OCF_ACTION eq 'methods';
++
++# Avoid "could not change directory" when executing commands as "system-user".
++chdir File::Spec->tmpdir();
++
++# mandatory sanity checks
++# check pgdata
++if ( ! -d $pgdata ) {
++    ocf_exit_reason( 'PGDATA "%s" does not exist', $pgdata );
++    exit $OCF_ERR_ARGS;
++}
++
++# check datadir
++if ( ! -d $datadir ) {
++    ocf_exit_reason( 'data_directory "%s" does not exist', $datadir );
++    exit $OCF_ERR_ARGS;
++}
++
++# Set PostgreSQL version
++$PGVERNUM = _get_pg_version();
++
++# Set current node name.
++$nodename = ocf_local_nodename();
++
++$exit_code = pgsql_validate_all();
++
++exit $exit_code if $exit_code != $OCF_SUCCESS or $__OCF_ACTION eq 'validate-all';
++
++# Run action
++for ( $__OCF_ACTION ) {
++    if    ( /^start$/     ) { $exit_code = pgsql_start()   }
++    elsif ( /^stop$/      ) { $exit_code = pgsql_stop()    }
++    elsif ( /^monitor$/   ) { $exit_code = pgsql_monitor() }
++    elsif ( /^promote$/   ) { $exit_code = pgsql_promote() }
++    elsif ( /^demote$/    ) { $exit_code = pgsql_demote()  }
++    elsif ( /^notify$/    ) { $exit_code = pgsql_notify()  }
++    elsif ( /^reload$/    ) { $exit_code = pgsql_reload()  }
++    else  { $exit_code = $OCF_ERR_UNIMPLEMENTED }
++}
++
++exit $exit_code;
++
++
++=head1 EXAMPLE CRM SHELL
++
++The following is an example configuration for a pgsqlms resource using the
++crm(8) shell:
++
++  primitive pgsqld pgsqlms                                                 \
++    params pgdata="/var/lib/postgresql/9.6/main"                           \
++      bindir="/usr/lib/postgresql/9.6/bin"                                 \
++      pghost="/var/run/postgresql"                                         \
++      recovery_template="/etc/postgresql/9.6/main/recovery.conf.pcmk"      \
++      start_opts="-c config_file=/etc/postgresql/9.6/main/postgresql.conf" \
++    op start timeout=60s                                                   \
++    op stop timeout=60s                                                    \
++    op promote timeout=30s                                                 \
++    op demote timeout=120s                                                 \
++    op monitor interval=15s timeout=10s role="Master"                      \
++    op monitor interval=16s timeout=10s role="Slave"                       \
++    op notify timeout=60s
++
++  ms pgsql-ha pgsqld meta notify=true
++
++
++=head1 EXAMPLE PCS
++
++The following is an example configuration for a pgsqlms resource using pcs(8):
++
++  pcs resource create pgsqld ocf:heartbeat:pgsqlms            \
++    bindir=/usr/pgsql-9.6/bin pgdata=/var/lib/pgsql/9.6/data  \
++    op start timeout=60s                                      \
++    op stop timeout=60s                                       \
++    op promote timeout=30s                                    \
++    op demote timeout=120s                                    \
++    op monitor interval=15s timeout=10s role="Master"         \
++    op monitor interval=16s timeout=10s role="Slave"          \
++    op notify timeout=60s --master notify=true
++
++=head1 SEE ALSO
++
++http://clusterlabs.org/
++
++=head1 AUTHOR
++
++Jehan-Guillaume de Rorthais and Mael Rimbault.
++
++=cut
+diff --color -uNr a/paf_LICENSE b/paf_LICENSE
+--- a/paf_LICENSE	1970-01-01 01:00:00.000000000 +0100
++++ b/paf_LICENSE	2021-04-14 09:16:39.083555835 +0200
+@@ -0,0 +1,19 @@
++Copyright (c) 2016-2020, Jehan-Guillaume de Rorthais, Mael Rimbault.
++
++Permission to use, copy, modify, and distribute this software and its
++documentation for any purpose, without fee, and without a written agreement
++is hereby granted, provided that the above copyright notice and this
++paragraph and the following two paragraphs appear in all copies.
++
++IN NO EVENT SHALL THE AUTHOR OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR
++DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
++LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
++DOCUMENTATION, EVEN IF THE AUTHOR OR DISTRIBUTORS HAVE BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGE.
++
++THE AUTHOR AND DISTRIBUTORS SPECIFICALLY DISCLAIMS ANY WARRANTIES,
++INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
++AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
++ON AN "AS IS" BASIS, AND THE AUTHOR AND DISTRIBUTORS HAS NO OBLIGATIONS TO
++PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
++
+diff --color -uNr a/paf_README.md b/paf_README.md
+--- a/paf_README.md	1970-01-01 01:00:00.000000000 +0100
++++ b/paf_README.md	2021-04-14 09:18:57.450968048 +0200
+@@ -0,0 +1,86 @@
++# PostgreSQL Automatic Failover
++
++High-Availibility for Postgres, based on industry references Pacemaker and
++Corosync.
++
++## Description
++
++Pacemaker is nowadays the industry reference for High Availability. In the same
++fashion than for Systemd, all Linux distributions moved (or are moving) to this
++unique Pacemaker+Corosync stack, removing all other existing high availability
++stacks (CMAN, RGManager, OpenAIS, ...). It is able to detect failure on various
++services and automatically decide to failover the failing resource to another
++node when possible.
++
++To be able to manage a specific service resource, Pacemaker interact with it
++through a so-called "Resource Agent". Resource agents must comply to the OCF
++specification which define what they must implement (start, stop, promote,
++etc), how they should behave and inform Pacemaker of their results.
++
++PostgreSQL Automatic Failover is a new OCF resource Agent dedicated to
++PostgreSQL. Its original wish is to keep a clear limit between the Pacemaker
++administration and the PostgreSQL one, to keep things simple, documented and
++yet powerful.
++
++Once your PostgreSQL cluster built using internal streaming replication, PAF is
++able to expose to Pacemaker what is the current status of the PostgreSQL
++instance on each node: master, slave, stopped, catching up, etc. Should a
++failure occurs on the master, Pacemaker will try to recover it by default.
++Should the failure be non-recoverable, PAF allows the slaves to be able to
++elect the best of them (the closest one to the old master) and promote it as
++the new master. All of this thanks to the robust, feature-full and most
++importantly experienced project: Pacemaker.
++
++For information about how to install this agent, see `INSTALL.md`.
++
++## Setup and requirements
++
++PAF supports PostgreSQL 9.3 and higher. It has been extensively tested under
++CentOS 6 and 7 in various scenario.
++
++PAF has been written to give to the administrator the maximum control
++over their PostgreSQL configuration and architecture. Thus, you are 100%
++responsible for the master/slave creations and their setup. The agent
++will NOT edit your setup. It only requires you to follow these pre-requisites:
++
++  * slave __must__ be in hot_standby (accept read-only connections) ;
++  * the following parameters __must__ be configured in the appropriate place :
++    * `standby_mode = on` (for PostgreSQL 11 and before)
++    * `recovery_target_timeline = 'latest'`
++    * `primary_conninfo` wih `application_name` set to the node name as seen
++      in Pacemaker.
++  * these last parameters has been merged inside the instance configuration
++    file with PostgreSQL 12. For PostgreSQL 11 and before, you __must__
++    provide a `recovery.conf` template file.
++
++When setting up the resource in Pacemaker, here are the available parameters you
++can set:
++
++  * `bindir`: location of the PostgreSQL binaries (default: `/usr/bin`)
++  * `pgdata`: location of the PGDATA of your instance (default:
++    `/var/lib/pgsql/data`)
++  * `datadir`: path to the directory set in `data_directory` from your
++    postgresql.conf file. This parameter has same default than PostgreSQL
++    itself: the `pgdata` parameter value. Unless you have a special PostgreSQL
++    setup and you understand this parameter, __ignore it__
++  * `pghost`: the socket directory or IP address to use to connect to the
++    local instance (default: `/tmp` or `/var/run/postgresql` for DEBIAN)
++  * `pgport`:  the port to connect to the local instance (default: `5432`)
++  * `recovery_template`: __only__ for PostgreSQL 11 and before. The local 
++    template that will be copied as the `PGDATA/recovery.conf` file. This
++    file must not exist on any node for PostgreSQL 12 and after.
++    (default: `$PGDATA/recovery.conf.pcmk`)
++  * `start_opts`: Additional arguments given to the postgres process on startup.
++    See "postgres --help" for available options. Useful when the postgresql.conf
++    file is not in the data directory (PGDATA), eg.:
++    `-c config_file=/etc/postgresql/9.3/main/postgresql.conf`
++  * `system_user`: the system owner of your instance's process (default:
++    `postgres`)
++  * `maxlag`: maximum lag allowed on a standby before we set a negative master
++    score on it. The calculation is based on the difference between the current
++    xlog location on the master and the write location on the standby.
++    (default: 0, which disables this feature)
++
++For a demonstration about how to setup a cluster, see
++[http://clusterlabs.github.io/PAF/documentation.html](http://clusterlabs.github.io/PAF/documentation.html).
++
diff --git a/SOURCES/bz1991855-nfsserver-add-nfsconvert.patch b/SOURCES/bz1991855-nfsserver-add-nfsconvert.patch
new file mode 100644
index 0000000..e955c1a
--- /dev/null
+++ b/SOURCES/bz1991855-nfsserver-add-nfsconvert.patch
@@ -0,0 +1,429 @@
+From 6a1e619d46d7ff04b610eb9f6e20ed41ac23b0ab Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Thu, 19 Aug 2021 09:37:57 +0200
+Subject: [PATCH] tools: add nfsconvert for RHEL-based distros
+
+---
+ .gitignore                    |   1 +
+ configure.ac                  |   5 +
+ heartbeat/nfsserver-redhat.sh |   6 +
+ tools/Makefile.am             |   5 +
+ tools/nfsconvert.in           | 324 ++++++++++++++++++++++++++++++++++
+ 5 files changed, 341 insertions(+)
+ create mode 100644 tools/nfsconvert.in
+
+diff --git a/.gitignore b/.gitignore
+index f7277bf04e..ec30a3bb00 100644
+--- a/.gitignore
++++ b/.gitignore
+@@ -99,6 +99,7 @@ ldirectord/init.d/ldirectord.debian.default
+ ldirectord/systemd/ldirectord.service
+ systemd/resource-agents.conf
+ tools/findif
++tools/nfsconvert
+ tools/ocf-tester
+ tools/send_arp
+ tools/tickle_tcp
+diff --git a/configure.ac b/configure.ac
+index c125df98f6..058c0f1da7 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -787,11 +787,15 @@ case $host_os in
+      *Linux*|*linux*) sendarp_linux=1;;
+ esac
+ 
++redhat_based=0
++AC_CHECK_FILE(/etc/redhat-release, [redhat_based=1])
++
+ AC_SUBST(LIBNETLIBS)
+ AC_SUBST(LIBNETDEFINES)
+ 
+ AM_CONDITIONAL(SENDARP_LINUX, test $sendarp_linux = 1 )
+ AM_CONDITIONAL(USE_LIBNET, test "x$libnet_version" != "xnone" )
++AM_CONDITIONAL(NFSCONVERT, test $redhat_based = 1 )
+ 
+ dnl ************************************************************************
+ dnl * Check for netinet/icmp6.h to enable the IPv6addr resource agent
+@@ -932,6 +936,7 @@ heartbeat/Makefile						\
+ systemd/Makefile						\
+    systemd/resource-agents.conf					\
+ tools/Makefile							\
++   tools/nfsconvert						\
+    tools/ocf-tester						\
+    tools/ocft/Makefile						\
+    tools/ocft/ocft						\
+diff --git a/heartbeat/nfsserver-redhat.sh b/heartbeat/nfsserver-redhat.sh
+index 73a70c186c..aec0966050 100644
+--- a/heartbeat/nfsserver-redhat.sh
++++ b/heartbeat/nfsserver-redhat.sh
+@@ -1,6 +1,7 @@
+ NFS_SYSCONFIG="/etc/sysconfig/nfs"
+ NFS_SYSCONFIG_LOCAL_BACKUP="/etc/sysconfig/nfs.ha.bu"
+ NFS_SYSCONFIG_AUTOGEN_TAG="AUTOGENERATED by $0 high availability resource-agent"
++NFSCONVERT="$HA_BIN/nfsconvert"
+ 
+ nfsserver_redhat_meta_data() {
+ cat<<EOF
+@@ -168,4 +169,9 @@ set_env_args()
+ 		fi
+ 	fi
+ 	rm -f $tmpconfig
++
++	if [ -e "$NFSCONVERT" ]; then
++		ocf_log debug "Running $NFSCONVERT"
++		$NFSCONVERT
++	fi
+ }
+diff --git a/tools/Makefile.am b/tools/Makefile.am
+index 83ff43651d..1309223b48 100644
+--- a/tools/Makefile.am
++++ b/tools/Makefile.am
+@@ -31,6 +31,7 @@ sbin_PROGRAMS		=
+ sbin_PROGRAMS		=
+ sbin_SCRIPTS		= ocf-tester
+ halib_PROGRAMS		= findif
++halib_SCRIPTS		=
+ 
+ man8_MANS		= ocf-tester.8
+ 
+@@ -52,6 +53,10 @@ halib_PROGRAMS		+= send_arp
+ send_arp_SOURCES	= send_arp.linux.c
+ endif
+ 
++if NFSCONVERT
++halib_SCRIPTS		+= nfsconvert
++endif
++
+ endif
+ 
+ sfex_daemon_SOURCES	= sfex_daemon.c sfex.h sfex_lib.c sfex_lib.h
+diff --git a/tools/nfsconvert.in b/tools/nfsconvert.in
+new file mode 100644
+index 0000000000..c58a16a4e6
+--- /dev/null
++++ b/tools/nfsconvert.in
+@@ -0,0 +1,324 @@
++#!@PYTHON@ -tt
++"""
++Read in the deprecated /etc/sysconfig/nfs file and
++set the corresponding values in nfs.conf
++"""
++
++from __future__ import print_function
++import os
++import sys
++import getopt
++import subprocess
++import configparser
++
++CONF_NFS = '/etc/nfs.conf'
++CONF_IDMAP = '/etc/idmapd.conf'
++SYSCONF_NFS = '/etc/sysconfig/nfs'
++SYSCONF_BACKUP = ".rpmsave"
++CONF_TOOL = '/usr/sbin/nfsconf'
++
++# options for nfsd found in RPCNFSDARGS
++OPTS_NFSD = 'dH:p:rR:N:V:stTuUG:L:'
++LONG_NFSD = ['debug', 'host=', 'port=', 'rdma=', 'nfs-version=', 'no-nfs-version=',
++             'tcp', 'no-tcp', 'udp', 'no-udp', 'grace-time=', 'lease-time=']
++CONV_NFSD = {'-d': (CONF_NFS, 'nfsd', 'debug', 'all'),
++             '-H': (CONF_NFS, 'nfsd', 'host', ','),
++             '-p': (CONF_NFS, 'nfsd', 'port', '$1'),
++             '-r': (CONF_NFS, 'nfsd', 'rdma', 'nfsrdma'),
++             '-R': (CONF_NFS, 'nfsd', 'rdma', '$1'),
++             '-N': (CONF_NFS, 'nfsd', 'vers$1', 'n'),
++             '-V': (CONF_NFS, 'nfsd', 'vers$1', 'y'),
++             '-t': (CONF_NFS, 'nfsd', 'tcp', '1'),
++             '-T': (CONF_NFS, 'nfsd', 'tcp', '0'),
++             '-u': (CONF_NFS, 'nfsd', 'udp', '1'),
++             '-U': (CONF_NFS, 'nfsd', 'udp', '0'),
++             '-G': (CONF_NFS, 'nfsd', 'grace-time', '$1'),
++             '-L': (CONF_NFS, 'nfsd', 'lease-time', '$1'),
++             '$1': (CONF_NFS, 'nfsd', 'threads', '$1'),
++             '--debug': (CONF_NFS, 'nfsd', 'debug', 'all'),
++             '--host': (CONF_NFS, 'nfsd', 'host', ','),
++             '--port': (CONF_NFS, 'nfsd', 'port', '$1'),
++             '--rdma': (CONF_NFS, 'nfsd', 'rdma', '$1'),
++             '--no-nfs-version': (CONF_NFS, 'nfsd', 'vers$1', 'n'),
++             '--nfs-version': (CONF_NFS, 'nfsd', 'vers$1', 'y'),
++             '--tcp': (CONF_NFS, 'nfsd', 'tcp', '1'),
++             '--no-tcp': (CONF_NFS, 'nfsd', 'tcp', '0'),
++             '--udp': (CONF_NFS, 'nfsd', 'udp', '1'),
++             '--no-udp': (CONF_NFS, 'nfsd', 'udp', '0'),
++             '--grace-time': (CONF_NFS, 'nfsd', 'grace-time', '$1'),
++             '--lease-time': (CONF_NFS, 'nfsd', 'lease-time', '$1'),
++            }
++
++# options for mountd found in RPCMOUNTDOPTS
++OPTS_MOUNTD = 'go:d:H:p:N:nrs:t:V:'
++LONG_MOUNTD = ['descriptors=', 'debug=', 'nfs-version=', 'no-nfs-version=',
++               'port=', 'no-tcp', 'ha-callout=', 'state-directory-path=',
++               'num-threads=', 'reverse-lookup', 'manage-gids', 'no-udp']
++
++CONV_MOUNTD = {'-g': (CONF_NFS, 'mountd', 'manage-gids', '1'),
++               '-o': (CONF_NFS, 'mountd', 'descriptors', '$1'),
++               '-d': (CONF_NFS, 'mountd', 'debug', '$1'),
++               '-H': (CONF_NFS, 'mountd', 'ha-callout', '$1'),
++               '-p': (CONF_NFS, 'mountd', 'port', '$1'),
++               '-N': (CONF_NFS, 'nfsd', 'vers$1', 'n'),
++               '-V': (CONF_NFS, 'nfsd', 'vers$1', 'y'),
++               '-n': (CONF_NFS, 'nfsd', 'tcp', '0'),
++               '-s': (CONF_NFS, 'mountd', 'stat-directory-path', '$1'),
++               '-t': (CONF_NFS, 'mountd', 'threads', '$1'),
++               '-r': (CONF_NFS, 'mountd', 'reverse-lookup', '1'),
++               '-u': (CONF_NFS, 'nfsd', 'udp', '0'),
++               '--manage-gids': (CONF_NFS, 'mountd', 'manage-gids', '1'),
++               '--descriptors': (CONF_NFS, 'mountd', 'descriptors', '$1'),
++               '--debug': (CONF_NFS, 'mountd', 'debug', '$1'),
++               '--ha-callout': (CONF_NFS, 'mountd', 'ha-callout', '$1'),
++               '--port': (CONF_NFS, 'mountd', 'port', '$1'),
++               '--nfs-version': (CONF_NFS, 'nfsd', 'vers$1', 'y'),
++               '--no-nfs-version': (CONF_NFS, 'nfsd', 'vers$1', 'n'),
++               '--no-tcp': (CONF_NFS, 'nfsd', 'tcp', '0'),
++               '--state-directory-path': (CONF_NFS, 'mountd', 'state-directory-path', '$1'),
++               '--num-threads': (CONF_NFS, 'mountd', 'threads', '$1'),
++               '--reverse-lookup': (CONF_NFS, 'mountd', 'reverse-lookup', '1'),
++               '--no-udp': (CONF_NFS, 'nfsd', 'udp', '0'),
++              }
++
++# options for statd found in STATDARG
++OPTS_STATD = 'o:p:T:U:n:P:H:L'
++LONG_STATD = ['outgoing-port=', 'port=', 'name=', 'state-directory-path=',
++              'ha-callout=', 'nlm-port=', 'nlm-udp-port=', 'no-notify']
++CONV_STATD = {'-o': (CONF_NFS, 'statd', 'outgoing-port', '$1'),
++              '-p': (CONF_NFS, 'statd', 'port', '$1'),
++              '-T': (CONF_NFS, 'lockd', 'port', '$1'),
++              '-U': (CONF_NFS, 'lockd', 'udp-port', '$1'),
++              '-n': (CONF_NFS, 'statd', 'name', '$1'),
++              '-P': (CONF_NFS, 'statd', 'state-directory-path', '$1'),
++              '-H': (CONF_NFS, 'statd', 'ha-callout', '$1'),
++              '-L': (CONF_NFS, 'statd', 'no-notify', '1'),
++              '--outgoing-port': (CONF_NFS, 'statd', 'outgoing-port', '$1'),
++              '--port': (CONF_NFS, 'statd', 'port', '$1'),
++              '--name': (CONF_NFS, 'statd', 'name', '$1'),
++              '--state-directory-path': (CONF_NFS, 'statd', 'state-directory-path', '$1'),
++              '--ha-callout': (CONF_NFS, 'statd', 'ha-callout', '$1'),
++              '--nlm-port': (CONF_NFS, 'lockd', 'port', '$1'),
++              '--nlm-udp-port': (CONF_NFS, 'lockd', 'udp-port', '$1'),
++              '--no-notify': (CONF_NFS, 'statd', 'no-notify', '1'),
++             }
++
++# options for sm-notify found in SMNOTIFYARGS
++OPTS_SMNOTIFY = 'dm:np:v:P:f'
++CONV_SMNOTIFY = {'-d': (CONF_NFS, 'sm-notify', 'debug', 'all'),
++                 '-m': (CONF_NFS, 'sm-notify', 'retry-time', '$1'),
++                 '-n': (CONF_NFS, 'sm-notify', 'update-state', '1'),
++                 '-p': (CONF_NFS, 'sm-notify', 'outgoing-port', '$1'),
++                 '-v': (CONF_NFS, 'sm-notify', 'outgoing-addr', '$1'),
++                 '-f': (CONF_NFS, 'sm-notify', 'force', '1'),
++                 '-P': (CONF_NFS, 'statd', 'state-directory-path', '$1'),
++                }
++
++# options for idmapd found in RPCIDMAPDARGS
++OPTS_IDMAPD = 'vp:CS'
++CONV_IDMAPD = {'-v': (CONF_IDMAP, 'general', 'verbosity', '+'),
++               '-p': (CONF_NFS, 'general', 'pipefs-directory', '$1'),
++               '-C': (CONF_IDMAP, 'general', 'client-only', '1'),
++               '-S': (CONF_IDMAP, 'general', 'server-only', '1'),
++              }
++
++# options for gssd found in RPCGSSDARGS
++OPTS_GSSD = 'Mnvrp:k:d:t:T:R:lD'
++CONV_GSSD = {'-M': (CONF_NFS, 'gssd', 'use-memcache', '1'),
++             '-n': (CONF_NFS, 'gssd', 'root_uses_machine_creds', '0'),
++             '-v': (CONF_NFS, 'gssd', 'verbosity', '+'),
++             '-r': (CONF_NFS, 'gssd', 'rpc-verbosity', '+'),
++             '-p': (CONF_NFS, 'general', 'pipefs-directory', '$1'),
++             '-k': (CONF_NFS, 'gssd', 'keytab-file', '$1'),
++             '-d': (CONF_NFS, 'gssd', 'cred-cache-directory', '$1'),
++             '-t': (CONF_NFS, 'gssd', 'context-timeout', '$1'),
++             '-T': (CONF_NFS, 'gssd', 'rpc-timeout', '$1'),
++             '-R': (CONF_NFS, 'gssd', 'preferred-realm', '$1'),
++             '-l': (CONF_NFS, 'gssd', 'limit-to-legacy-enctypes', '0'),
++             '-D': (CONF_NFS, 'gssd', 'avoid-dns', '0'),
++            }
++
++# options for blkmapd found in BLKMAPDARGS
++OPTS_BLKMAPD = ''
++CONV_BLKMAPD = {}
++
++# meta list of all the getopt lists
++GETOPT_MAPS = [('RPCNFSDARGS', OPTS_NFSD, LONG_NFSD, CONV_NFSD),
++               ('RPCMOUNTDOPTS', OPTS_MOUNTD, LONG_MOUNTD, CONV_MOUNTD),
++               ('STATDARG', OPTS_STATD, LONG_STATD, CONV_STATD),
++               ('STATDARGS', OPTS_STATD, LONG_STATD, CONV_STATD),
++               ('SMNOTIFYARGS', OPTS_SMNOTIFY, [], CONV_SMNOTIFY),
++               ('RPCIDMAPDARGS', OPTS_IDMAPD, [], CONV_IDMAPD),
++               ('RPCGSSDARGS', OPTS_GSSD, [], CONV_GSSD),
++               ('BLKMAPDARGS', OPTS_BLKMAPD, [], CONV_BLKMAPD),
++              ]
++
++# any fixups we need to apply first
++GETOPT_FIXUP = {'RPCNFSDARGS': ('--rdma', '--rdma=nfsrdma'),
++               }
++
++# map for all of the single option values
++VALUE_MAPS = {'LOCKD_TCPPORT': (CONF_NFS, 'lockd', 'port', '$1'),
++              'LOCKD_UDPPORT': (CONF_NFS, 'lockd', 'udp-port', '$1'),
++              'RPCNFSDCOUNT': (CONF_NFS, 'nfsd', 'threads', '$1'),
++              'NFSD_V4_GRACE': (CONF_NFS, 'nfsd', 'grace-time', '$1'),
++              'NFSD_V4_LEASE': (CONF_NFS, 'nfsd', 'lease-time', '$1'),
++              'MOUNTD_PORT': (CONF_NFS, 'mountd', 'port', '$1'),
++              'STATD_PORT': (CONF_NFS, 'statd', 'port', '$1'),
++              'STATD_OUTGOING_PORT': (CONF_NFS, 'statd', 'outgoing-port', '$1'),
++              'STATD_HA_CALLOUT': (CONF_NFS, 'statd', 'ha-callout', '$1'),
++              'GSS_USE_PROXY': (CONF_NFS, 'gssd', 'use-gss-proxy', '$1')
++             }
++
++def eprint(*args, **kwargs):
++    """ Print error to stderr """
++    print(*args, file=sys.stderr, **kwargs)
++
++def makesub(param, value):
++    """ Variable substitution """
++    return param.replace('$1', value)
++
++def set_value(value, entry):
++    """ Set a configuration value by running nfsconf tool"""
++    cfile, section, tag, param = entry
++
++    tag = makesub(tag, value)
++    param = makesub(param, value)
++    if param == '+':
++        param = value
++    if param == ',':
++        param = value
++    args = [CONF_TOOL, "--file", cfile, "--set", section, tag, param]
++
++    try:
++        subprocess.check_output(args, stderr=subprocess.STDOUT)
++    except subprocess.CalledProcessError as e:
++        print("Error running nfs-conf tool:\n %s" % (e.output.decode()))
++        print("Args: %s\n" % args)
++        raise Exception
++
++def convert_getopt(optname, options, optstring, longopts, conversions):
++    """ Parse option string into seperate config items
++
++        Take a getopt string and a table of conversions
++        parse it all and spit out the converted config
++
++        Keyword arguments:
++        options -- the argv string to convert
++        optstring --  getopt format option list
++        conversions -- table of translations
++    """
++    optcount = 0
++    try:
++        args = options.strip('\"').split()
++        if optname in GETOPT_FIXUP:
++            (k, v) = GETOPT_FIXUP[optname]
++            for i, opt in enumerate(args):
++                if opt == k:
++                    args[i] = v
++                elif opt == '--':
++                    break
++        optlist, optargs = getopt.gnu_getopt(args, optstring, longopts=longopts)
++    except getopt.GetoptError as err:
++        eprint(err)
++        raise Exception
++
++    setlist = {}
++    for (k, v) in optlist:
++        if k in conversions:
++            # it's already been set once
++            param = conversions[k][3]
++            tag = k + makesub(conversions[k][2], v)
++            if tag in setlist:
++                value = setlist[tag][0]
++                # is it a cummulative entry
++                if param == '+':
++                    value = str(int(value) + 1)
++                if param == ',':
++                    value += "," + v
++            else:
++                if param == '+':
++                    value = "1"
++                elif param == ',':
++                    value = v
++                else:
++                    value = v
++            setlist[tag] = (value, conversions[k])
++        else:
++            if v:
++                eprint("Ignoring unrecognised option %s=%s in %s" % (k, v, optname))
++            else:
++                eprint("Ignoring unrecognised option %s in %s" % (k, optname))
++
++
++    for v, c in setlist.values():
++        try:
++            set_value(v, c)
++            optcount += 1
++        except Exception:
++            raise
++
++    i = 1
++    for o in optargs:
++        opname = '$' + str(i)
++        if opname in conversions:
++            try:
++                set_value(o, conversions[opname])
++                optcount += 1
++            except Exception:
++                raise
++        else:
++            eprint("Unrecognised trailing arguments")
++            raise Exception
++        i += 1
++
++    return optcount
++
++def map_values():
++    """ Main function """
++    mapcount = 0
++
++    # Lets load the old config
++    with open(SYSCONF_NFS) as cfile:
++        file_content = '[sysconf]\n' + cfile.read()
++    sysconfig = configparser.RawConfigParser()
++    sysconfig.read_string(file_content)
++
++    # Map all the getopt option lists
++    for (name, opts, lopts, conv) in GETOPT_MAPS:
++        if name in sysconfig['sysconf']:
++            try:
++                mapcount += convert_getopt(name, sysconfig['sysconf'][name], opts,
++                                           lopts, conv)
++            except Exception:
++                eprint("Error whilst converting %s to nfsconf options." % (name))
++                raise
++
++    # Map the single value options
++    for name, opts in VALUE_MAPS.items():
++        if name in sysconfig['sysconf']:
++            try:
++                value = sysconfig['sysconf'][name]
++                set_value(value.strip('\"'), opts)
++                mapcount += 1
++            except Exception:
++                raise
++
++    # All went well, move aside the old file
++    # but dont bother if there were no changes and
++    # an old config file already exists
++    backupfile = SYSCONF_NFS + SYSCONF_BACKUP
++    if mapcount > 0 or not os.path.exists(backupfile):
++        try:
++            os.replace(SYSCONF_NFS, backupfile)
++        except OSError as err:
++            eprint("Error moving old config %s: %s" % (SYSCONF_NFS, err))
++            raise
++
++# Main routine
++try:
++    map_values()
++except Exception as e:
++    eprint(e)
++    eprint("Conversion failed. Please correct the error and try again.")
++    exit(1)
diff --git a/SOURCES/bz1998039-nfsnotify-fix-notify_args-default.patch b/SOURCES/bz1998039-nfsnotify-fix-notify_args-default.patch
new file mode 100644
index 0000000..75ca836
--- /dev/null
+++ b/SOURCES/bz1998039-nfsnotify-fix-notify_args-default.patch
@@ -0,0 +1,22 @@
+From 1c037b3ac0288509fb2b74fb4a661a504155da15 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen <oalbrigt@redhat.com>
+Date: Thu, 26 Aug 2021 12:27:50 +0200
+Subject: [PATCH] nfsnotify: fix default value for "notify_args"
+
+---
+ heartbeat/nfsnotify.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/nfsnotify.in b/heartbeat/nfsnotify.in
+index 851f6ad6b4..fe6d2793ba 100644
+--- a/heartbeat/nfsnotify.in
++++ b/heartbeat/nfsnotify.in
+@@ -33,7 +33,7 @@
+ # Parameter defaults
+ 
+ OCF_RESKEY_source_host_default=""
+-OCF_RESKEY_notify_args_default="false"
++OCF_RESKEY_notify_args_default=""
+ 
+ : ${OCF_RESKEY_source_host=${OCF_RESKEY_source_host_default}}
+ : ${OCF_RESKEY_notify_args=${OCF_RESKEY_notify_args_default}}
diff --git a/SOURCES/ha-cloud-support-aliyun.patch b/SOURCES/ha-cloud-support-aliyun.patch
new file mode 100644
index 0000000..93d78aa
--- /dev/null
+++ b/SOURCES/ha-cloud-support-aliyun.patch
@@ -0,0 +1,12 @@
+diff --color -uNr a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
+--- a/heartbeat/aliyun-vpc-move-ip	2021-08-19 09:37:57.000000000 +0200
++++ b/heartbeat/aliyun-vpc-move-ip	2021-08-25 13:38:26.786626079 +0200
+@@ -17,7 +17,7 @@
+ OCF_RESKEY_interface_default="eth0"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_endpoint_default="vpc.aliyuncs.com"
+-OCF_RESKEY_aliyuncli_default="detect"
++OCF_RESKEY_aliyuncli_default="/usr/lib/fence-agents/support/aliyun/bin/aliyuncli"
+ 
+ 
+ : ${OCF_RESKEY_address=${OCF_RESKEY_address_default}}
diff --git a/SOURCES/ha-cloud-support-aws.patch b/SOURCES/ha-cloud-support-aws.patch
new file mode 100644
index 0000000..d858d5a
--- /dev/null
+++ b/SOURCES/ha-cloud-support-aws.patch
@@ -0,0 +1,48 @@
+diff --color -uNr a/heartbeat/awseip b/heartbeat/awseip
+--- a/heartbeat/awseip	2020-12-03 14:31:17.000000000 +0100
++++ b/heartbeat/awseip	2021-02-15 16:47:36.624610378 +0100
+@@ -43,7 +43,7 @@
+ #
+ # Defaults
+ #
+-OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/aws/bin/aws"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_api_delay_default="3"
+ 
+diff --color -uNr a/heartbeat/awsvip b/heartbeat/awsvip
+--- a/heartbeat/awsvip	2020-12-03 14:31:17.000000000 +0100
++++ b/heartbeat/awsvip	2021-02-15 16:47:48.960632484 +0100
+@@ -42,7 +42,7 @@
+ #
+ # Defaults
+ #
+-OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/aws/bin/aws"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_api_delay_default="3"
+ 
+diff --color -uNr a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+--- a/heartbeat/aws-vpc-move-ip	2020-12-03 14:31:17.000000000 +0100
++++ b/heartbeat/aws-vpc-move-ip	2021-02-15 16:47:55.484644118 +0100
+@@ -35,7 +35,7 @@
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+ 
+ # Defaults
+-OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/aws/bin/aws"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_region_default=""
+ OCF_RESKEY_ip_default=""
+diff --color -uNr a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
+--- a/heartbeat/aws-vpc-route53.in	2020-12-03 14:31:17.000000000 +0100
++++ b/heartbeat/aws-vpc-route53.in	2021-02-15 16:47:59.808651828 +0100
+@@ -45,7 +45,7 @@
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+ 
+ # Defaults
+-OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/aws/bin/aws"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_hostedzoneid_default=""
+ OCF_RESKEY_fullname_default=""
diff --git a/SOURCES/ha-cloud-support-gcloud.patch b/SOURCES/ha-cloud-support-gcloud.patch
new file mode 100644
index 0000000..95b0d7a
--- /dev/null
+++ b/SOURCES/ha-cloud-support-gcloud.patch
@@ -0,0 +1,33 @@
+diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
+--- a/heartbeat/gcp-pd-move.in	2021-08-19 09:37:57.000000000 +0200
++++ b/heartbeat/gcp-pd-move.in	2021-08-25 13:50:54.461732967 +0200
+@@ -32,6 +32,7 @@
+ from ocf import logger
+ 
+ try:
++  sys.path.insert(0, '/usr/lib/fence-agents/support/google')
+   import googleapiclient.discovery
+ except ImportError:
+   pass
+diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+--- a/heartbeat/gcp-vpc-move-route.in	2021-08-19 09:37:57.000000000 +0200
++++ b/heartbeat/gcp-vpc-move-route.in	2021-08-25 13:51:17.489797999 +0200
+@@ -45,6 +45,7 @@
+ from ocf import *
+ 
+ try:
++  sys.path.insert(0, '/usr/lib/fence-agents/support/google')
+   import googleapiclient.discovery
+   import pyroute2
+   try:
+diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+--- a/heartbeat/gcp-vpc-move-vip.in	2021-08-19 09:37:57.000000000 +0200
++++ b/heartbeat/gcp-vpc-move-vip.in	2021-08-25 13:51:35.012847487 +0200
+@@ -29,6 +29,7 @@
+ from ocf import *
+ 
+ try:
++  sys.path.insert(0, '/usr/lib/fence-agents/support/google')
+   import googleapiclient.discovery
+   try:
+     from google.oauth2.service_account import Credentials as ServiceAccountCredentials
diff --git a/SOURCES/nova-compute-wait-NovaEvacuate.patch b/SOURCES/nova-compute-wait-NovaEvacuate.patch
new file mode 100644
index 0000000..0e7b605
--- /dev/null
+++ b/SOURCES/nova-compute-wait-NovaEvacuate.patch
@@ -0,0 +1,787 @@
+diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
+--- a/doc/man/Makefile.am	2021-08-25 09:51:53.037906134 +0200
++++ b/doc/man/Makefile.am	2021-08-25 09:48:44.578408475 +0200
+@@ -97,6 +97,8 @@
+                           ocf_heartbeat_ManageRAID.7 \
+                           ocf_heartbeat_ManageVE.7 \
+                           ocf_heartbeat_NodeUtilization.7 \
++                          ocf_heartbeat_nova-compute-wait.7 \
++                          ocf_heartbeat_NovaEvacuate.7 \
+                           ocf_heartbeat_Pure-FTPd.7 \
+                           ocf_heartbeat_Raid1.7 \
+                           ocf_heartbeat_Route.7 \
+diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+--- a/heartbeat/Makefile.am	2021-08-25 09:51:53.038906137 +0200
++++ b/heartbeat/Makefile.am	2021-08-25 09:48:44.588408501 +0200
+@@ -29,6 +29,8 @@
+ 
+ ocfdir		        = $(OCF_RA_DIR_PREFIX)/heartbeat
+ 
++ospdir			= $(OCF_RA_DIR_PREFIX)/openstack
++
+ dtddir			= $(datadir)/$(PACKAGE_NAME)
+ dtd_DATA		= ra-api-1.dtd metadata.rng
+ 
+@@ -50,6 +52,9 @@
+ send_ua_SOURCES         = send_ua.c IPv6addr_utils.c
+ send_ua_LDADD           = $(LIBNETLIBS)
+ 
++osp_SCRIPTS	     =  nova-compute-wait	\
++			NovaEvacuate
++
+ ocf_SCRIPTS	      = AoEtarget		\
+ 			AudibleAlarm		\
+ 			ClusterMon		\
+diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
+--- a/heartbeat/nova-compute-wait	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/nova-compute-wait	2021-08-25 09:50:14.626646141 +0200
+@@ -0,0 +1,345 @@
++#!/bin/sh
++#
++#
++# nova-compute-wait agent manages compute daemons.
++#
++# Copyright (c) 2015
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of version 2 of the GNU General Public License as
++# published by the Free Software Foundation.
++#
++# This program is distributed in the hope that it would be useful, but
++# WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
++#
++# Further, this software is distributed without any warranty that it is
++# free of the rightful claim of any third person regarding infringement
++# or the like.  Any license provided herein, whether implied or
++# otherwise, applies only to this software file.  Patent licenses, if
++# any, provided herein do not apply to combinations of this program with
++# other software, or any other product whatsoever.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write the Free Software Foundation,
++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++#
++
++#######################################################################
++# Initialization:
++
++
++###
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++###
++
++: ${__OCF_ACTION=$1}
++
++#######################################################################
++
++meta_data() {
++	cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="nova-compute-wait" version="1.0">
++<version>1.0</version>
++
++<longdesc lang="en">
++OpenStack Nova Compute Server.
++</longdesc>
++<shortdesc lang="en">OpenStack Nova Compute Server</shortdesc>
++
++<parameters>
++
++<parameter name="auth_url" unique="0" required="1">
++<longdesc lang="en">
++Deprecated option not in use
++</longdesc>
++<shortdesc lang="en">Deprecated</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="username" unique="0" required="1">
++<longdesc lang="en">
++Deprecated option not in use
++</longdesc>
++<shortdesc lang="en">Deprecated</shortdesc>
++</parameter>
++
++<parameter name="password" unique="0" required="1">
++<longdesc lang="en">
++Deprecated option not in use
++</longdesc>
++<shortdesc lang="en">Deprecated</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="tenant_name" unique="0" required="1">
++<longdesc lang="en">
++Deprecated option not in use
++</longdesc>
++<shortdesc lang="en">Deprecated</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="domain" unique="0" required="0">
++<longdesc lang="en">
++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
++</longdesc>
++<shortdesc lang="en">DNS domain</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="endpoint_type" unique="0" required="0">
++<longdesc lang="en">
++Deprecated option not in use
++</longdesc>
++<shortdesc lang="en">Deprecated</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="no_shared_storage" unique="0" required="0">
++<longdesc lang="en">
++Deprecated option not in use
++</longdesc>
++<shortdesc lang="en">Deprecated</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
++<parameter name="evacuation_delay" unique="0" required="0">
++<longdesc lang="en">
++How long to wait for nova to finish evacuating instances elsewhere
++before starting nova-compute.  Only used when the agent detects
++evacuations might be in progress.
++
++You may need to increase the start timeout when increasing this value.
++</longdesc>
++<shortdesc lang="en">Delay to allow evacuations time to complete</shortdesc>
++<content type="integer" default="120" />
++</parameter>
++
++</parameters>
++
++<actions>
++<action name="start"        timeout="600" />
++<action name="stop"         timeout="300" />
++<action name="monitor"      timeout="20" interval="10" depth="0"/>
++<action name="validate-all" timeout="20" />
++<action name="meta-data"    timeout="5" />
++</actions>
++</resource-agent>
++END
++}
++
++#######################################################################
++
++# don't exit on TERM, to test that lrmd makes sure that we do exit
++trap sigterm_handler TERM
++sigterm_handler() {
++	ocf_log info "They use TERM to bring us down. No such luck."
++	return
++}
++
++nova_usage() {
++	cat <<END
++usage: $0 {start|stop|monitor|validate-all|meta-data}
++
++Expects to have a fully populated OCF RA-compliant environment set.
++END
++}
++
++nova_start() {
++    build_unfence_overlay
++
++    state=$(attrd_updater -p -n evacuate -N ${NOVA_HOST} | sed -e 's/.*value=//' | tr -d '"' )
++    if [ "x$state" = x ]; then
++	: never been fenced
++
++    elif [ "x$state" = xno ]; then
++	: has been evacuated, however it could have been 1s ago
++	ocf_log info "Pausing to give evacuations from ${NOVA_HOST} time to complete"
++	sleep ${OCF_RESKEY_evacuation_delay}
++
++    else
++	while [ "x$state" != "xno" ]; do
++	    ocf_log info "Waiting for pending evacuations from ${NOVA_HOST}"
++	    state=$(attrd_updater -p -n evacuate -N ${NOVA_HOST} | sed -e 's/.*value=//' | tr -d '"' )
++	    sleep 5
++	done
++
++	ocf_log info "Pausing to give evacuations from ${NOVA_HOST} time to complete"
++	sleep ${OCF_RESKEY_evacuation_delay}
++    fi
++
++    touch "$statefile"
++
++    return $OCF_SUCCESS
++}
++
++nova_stop() {
++    rm -f "$statefile"
++    return $OCF_SUCCESS
++}
++
++nova_monitor() {
++    if [ ! -f "$statefile" ]; then
++        return $OCF_NOT_RUNNING
++    fi
++
++    return $OCF_SUCCESS
++}
++
++nova_notify() {
++    return $OCF_SUCCESS
++}
++
++build_unfence_overlay() {
++    fence_options=""
++
++    if [ -z "${OCF_RESKEY_auth_url}" ]; then
++	candidates=$(/usr/sbin/stonith_admin -l ${NOVA_HOST})
++	for candidate in ${candidates}; do
++	    pcs stonith show $d | grep -q fence_compute
++	    if [ $? = 0 ]; then
++		ocf_log info "Unfencing nova based on: $candidate"
++		fence_auth=$(pcs stonith show $candidate | grep Attributes: | sed -e s/Attributes:// -e s/-/_/g -e 's/[^ ]\+=/OCF_RESKEY_\0/g' -e s/passwd/password/g)
++		eval "export $fence_auth"
++		break
++	    fi
++	done
++    fi    
++
++    # Copied from NovaEvacuate 
++    if [ -z "${OCF_RESKEY_auth_url}" ]; then
++        ocf_exit_reason "auth_url not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
++
++    if [ -z "${OCF_RESKEY_username}" ]; then
++        ocf_exit_reason "username not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -l ${OCF_RESKEY_username}"
++
++    if [ -z "${OCF_RESKEY_password}" ]; then
++        ocf_exit_reason "password not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -p ${OCF_RESKEY_password}"
++
++    if [ -z "${OCF_RESKEY_tenant_name}" ]; then
++        ocf_exit_reason "tenant_name not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
++
++    if [ -n "${OCF_RESKEY_domain}" ]; then
++        fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
++    fi
++
++    if [ -n "${OCF_RESKEY_region_name}" ]; then
++        fence_options="${fence_options} \
++            --region-name ${OCF_RESKEY_region_name}"
++    fi
++
++    if [ -n "${OCF_RESKEY_insecure}" ]; then
++        if ocf_is_true "${OCF_RESKEY_insecure}"; then
++            fence_options="${fence_options} --insecure"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
++        if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
++            fence_options="${fence_options} --no-shared-storage"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
++        case ${OCF_RESKEY_endpoint_type} in
++            adminURL|publicURL|internalURL)
++                ;;
++            *)
++                ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type}" \
++                    "not valid. Use adminURL or publicURL or internalURL"
++                exit $OCF_ERR_CONFIGURED
++                ;;
++        esac
++        fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
++    fi
++
++    mkdir -p /run/systemd/system/openstack-nova-compute.service.d
++    cat<<EOF>/run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf
++[Service]
++ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST}
++EOF
++}
++
++nova_validate() {
++    rc=$OCF_SUCCESS
++
++    check_binary crudini
++    check_binary nova-compute
++    check_binary fence_compute
++
++    if [ ! -f /etc/nova/nova.conf ]; then
++	   ocf_exit_reason "/etc/nova/nova.conf not found"
++	   exit $OCF_ERR_CONFIGURED
++    fi
++
++    # Is the state directory writable?
++    state_dir=$(dirname $statefile)
++    touch "$state_dir/$$"
++    if [ $? != 0 ]; then
++        ocf_exit_reason "Invalid state directory: $state_dir"
++        return $OCF_ERR_ARGS
++    fi
++    rm -f "$state_dir/$$"
++
++    NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null)
++    if [ $? = 1 ]; then
++        short_host=$(uname -n | awk -F. '{print $1}')
++        if [ "x${OCF_RESKEY_domain}" != x ]; then
++            NOVA_HOST=${short_host}.${OCF_RESKEY_domain}
++        else
++            NOVA_HOST=$(uname -n)
++        fi
++    fi
++
++    if [ $rc != $OCF_SUCCESS ]; then
++	exit $rc
++    fi
++    return $rc
++}
++
++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
++
++: ${OCF_RESKEY_evacuation_delay=120}
++case $__OCF_ACTION in
++meta-data)	meta_data
++		exit $OCF_SUCCESS
++		;;
++usage|help)	nova_usage
++		exit $OCF_SUCCESS
++		;;
++esac
++
++case $__OCF_ACTION in
++start)		nova_validate; nova_start;;
++stop)		nova_stop;;
++monitor)	nova_validate; nova_monitor;;
++notify)		nova_notify;;
++validate-all)	exit $OCF_SUCCESS;;
++*)		nova_usage
++		exit $OCF_ERR_UNIMPLEMENTED
++		;;
++esac
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
++
+diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
+--- a/heartbeat/NovaEvacuate	1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/NovaEvacuate	2021-08-25 09:50:23.780670326 +0200
+@@ -0,0 +1,400 @@
++#!/bin/bash
++#
++# Copyright 2015 Red Hat, Inc.
++#
++# Description:  Manages evacuation of nodes running nova-compute
++#
++# Authors: Andrew Beekhof
++#
++# Support:      openstack@lists.openstack.org
++# License:      Apache Software License (ASL) 2.0
++#
++
++
++#######################################################################
++# Initialization:
++
++###
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++###
++
++: ${__OCF_ACTION=$1}
++
++#######################################################################
++
++meta_data() {
++    cat <<END
++<?xml version="1.0"?>
++<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
++<resource-agent name="NovaEvacuate" version="1.0">
++<version>1.0</version>
++
++<longdesc lang="en">
++Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged.
++</longdesc>
++<shortdesc lang="en">Evacuator for OpenStack Nova Compute Server</shortdesc>
++
++<parameters>
++
++<parameter name="auth_url" unique="0" required="1">
++<longdesc lang="en">
++Authorization URL for connecting to keystone in admin context
++</longdesc>
++<shortdesc lang="en">Authorization URL</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="username" unique="0" required="1">
++<longdesc lang="en">
++Username for connecting to keystone in admin context
++</longdesc>
++<shortdesc lang="en">Username</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="password" unique="0" required="1">
++<longdesc lang="en">
++Password for connecting to keystone in admin context
++</longdesc>
++<shortdesc lang="en">Password</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="tenant_name" unique="0" required="1">
++<longdesc lang="en">
++Tenant name for connecting to keystone in admin context.
++Note that with Keystone V3 tenant names are only unique within a domain.
++</longdesc>
++<shortdesc lang="en">Tenant name</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="domain" unique="0" required="0">
++<longdesc lang="en">
++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
++</longdesc>
++<shortdesc lang="en">DNS domain</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="endpoint_type" unique="0" required="0">
++<longdesc lang="en">
++Nova API location (internal, public or admin URL)
++</longdesc>
++<shortdesc lang="en">Nova API location (internal, public or admin URL)</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="region_name" unique="0" required="0">
++<longdesc lang="en">
++Region name for connecting to nova.
++</longdesc>
++<shortdesc lang="en">Region name</shortdesc>
++<content type="string" default="" />
++</parameter>
++
++<parameter name="insecure" unique="0" required="0">
++<longdesc lang="en">
++Explicitly allow client to perform "insecure" TLS (https) requests.
++The server's certificate will not be verified against any certificate authorities.
++This option should be used with caution.
++</longdesc>
++<shortdesc lang="en">Allow insecure TLS requests</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
++<parameter name="no_shared_storage" unique="0" required="0">
++<longdesc lang="en">
++Indicate that nova storage for instances is not shared across compute
++nodes. This must match the reality of how nova storage is configured!
++Otherwise VMs could end up in error state upon evacuation. When
++storage is non-shared, instances on dead hypervisors will be rebuilt
++from their original image or volume, so anything on ephemeral storage
++will be lost.
++</longdesc>
++<shortdesc lang="en">Disable shared storage recovery for instances</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
++<parameter name="verbose" unique="0" required="0">
++<longdesc lang="en">
++Enable extra logging from the evacuation process
++</longdesc>
++<shortdesc lang="en">Enable debug logging</shortdesc>
++<content type="boolean" default="0" />
++</parameter>
++
++<parameter name="evacuate_delay" unique="0" required="0">
++<longdesc lang="en">
++Allows delaying the nova evacuate API call, e.g. to give a storage array time to clean
++up eventual locks/leases.
++</longdesc>
++<shortdesc lang="en">Nova evacuate delay</shortdesc>
++<content type="integer" default="0" />
++</parameter>
++
++</parameters>
++
++<actions>
++<action name="start"        timeout="20" />
++<action name="stop"         timeout="20" />
++<action name="monitor"      timeout="600" interval="10" depth="0"/>
++<action name="validate-all" timeout="20" />
++<action name="meta-data"    timeout="5" />
++</actions>
++</resource-agent>
++END
++}
++
++#######################################################################
++
++# don't exit on TERM, to test that lrmd makes sure that we do exit
++trap sigterm_handler TERM
++sigterm_handler() {
++    ocf_log info "They use TERM to bring us down. No such luck."
++    return
++}
++
++evacuate_usage() {
++    cat <<END
++usage: $0 {start|stop|monitor|validate-all|meta-data}
++
++Expects to have a fully populated OCF RA-compliant environment set.
++END
++}
++
++evacuate_stop() {
++    rm -f "$statefile"
++    return $OCF_SUCCESS
++}
++
++evacuate_start() {
++    touch "$statefile"
++    # Do not invole monitor here so that the start timeout can be low
++    return $?
++}
++
++update_evacuation() {
++    attrd_updater -p -n evacuate -Q -N ${1} -U ${2}
++    arc=$?
++    if [ ${arc} != 0 ]; then
++        ocf_log warn "Can not set evacuation state of ${1} to ${2}: ${arc}"
++    fi
++    return ${arc}
++}
++
++handle_evacuations() {
++    while [ $# -gt 0 ]; do
++        node=$1
++        state=$2
++        shift; shift;
++        need_evacuate=0
++
++        case $state in
++            "")
++                ;;
++            no)
++                ocf_log debug "$node is either fine or already handled"
++                ;;
++            yes) need_evacuate=1
++                ;;
++            *@*)
++                where=$(echo $state | awk -F@ '{print $1}')
++                when=$(echo $state | awk -F@ '{print $2}')
++                now=$(date +%s)
++
++                if [ $(($now - $when)) -gt 60 ]; then
++                    ocf_log info "Processing partial evacuation of $node by" \
++                        "$where at $when"
++                    need_evacuate=1
++                else
++                    # Give some time for any in-flight evacuations to either
++                    # complete or fail Nova won't react well if there are two
++                    # overlapping requests
++                    ocf_log info "Deferring processing partial evacuation of" \
++                        "$node by $where at $when"
++                fi
++                ;;
++        esac
++
++        if [ $need_evacuate = 1 ]; then
++            fence_agent="fence_compute"
++
++            if have_binary fence_evacuate; then
++                fence_agent="fence_evacuate"
++            fi
++
++            if [ ${OCF_RESKEY_evacuate_delay} != 0 ]; then
++                ocf_log info "Delaying nova evacuate by $OCF_RESKEY_evacuate_delay seconds"
++                sleep ${OCF_RESKEY_evacuate_delay}
++            fi
++
++            ocf_log notice "Initiating evacuation of $node with $fence_agent"
++            $fence_agent ${fence_options} -o status -n ${node}
++            if [ $? = 1 ]; then
++                ocf_log info "Nova does not know about ${node}"
++                # Dont mark as no because perhaps nova is unavailable right now
++                continue
++            fi
++
++            update_evacuation ${node} "$(uname -n)@$(date +%s)"
++            if [ $? != 0 ]; then
++                return $OCF_SUCCESS
++            fi
++
++            $fence_agent ${fence_options} -o off -n $node
++            rc=$?
++
++            if [ $rc = 0 ]; then
++                update_evacuation ${node} no
++                ocf_log notice "Completed evacuation of $node"
++            else
++                ocf_log warn "Evacuation of $node failed: $rc"
++                update_evacuation ${node} yes
++            fi
++        fi
++    done
++
++    return $OCF_SUCCESS
++}
++
++evacuate_monitor() {
++    if [ ! -f "$statefile" ]; then
++        return $OCF_NOT_RUNNING
++    fi
++
++    handle_evacuations $(
++        attrd_updater -n evacuate -A \
++            2> >(grep -v "attribute does not exist" 1>&2) |
++            sed 's/ value=""/ value="no"/' |
++            tr '="' '  ' |
++            awk '{print $4" "$6}'
++    )
++    return $OCF_SUCCESS
++}
++
++evacuate_validate() {
++    rc=$OCF_SUCCESS
++    fence_options=""
++
++    if ! have_binary fence_evacuate; then
++       check_binary fence_compute
++    fi
++
++    # Is the state directory writable?
++    state_dir=$(dirname $statefile)
++    touch "$state_dir/$$"
++    if [ $? != 0 ]; then
++        ocf_exit_reason "Invalid state directory: $state_dir"
++        return $OCF_ERR_ARGS
++    fi
++    rm -f "$state_dir/$$"
++
++    if [ -z "${OCF_RESKEY_auth_url}" ]; then
++        ocf_exit_reason "auth_url not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
++
++    if [ -z "${OCF_RESKEY_username}" ]; then
++        ocf_exit_reason "username not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -l ${OCF_RESKEY_username}"
++
++    if [ -z "${OCF_RESKEY_password}" ]; then
++        ocf_exit_reason "password not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -p ${OCF_RESKEY_password}"
++
++    if [ -z "${OCF_RESKEY_tenant_name}" ]; then
++        ocf_exit_reason "tenant_name not configured"
++        exit $OCF_ERR_CONFIGURED
++    fi
++
++    fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
++
++    if [ -n "${OCF_RESKEY_domain}" ]; then
++        fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
++    fi
++
++    if [ -n "${OCF_RESKEY_region_name}" ]; then
++        fence_options="${fence_options} \
++            --region-name ${OCF_RESKEY_region_name}"
++    fi
++
++    if [ -n "${OCF_RESKEY_insecure}" ]; then
++        if ocf_is_true "${OCF_RESKEY_insecure}"; then
++            fence_options="${fence_options} --insecure"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
++        if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
++            fence_options="${fence_options} --no-shared-storage"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_verbose}" ]; then
++        if ocf_is_true "${OCF_RESKEY_verbose}"; then
++            fence_options="${fence_options} --verbose"
++        fi
++    fi
++
++    if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
++        case ${OCF_RESKEY_endpoint_type} in
++            adminURL|publicURL|internalURL)
++                ;;
++            *)
++                ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type}" \
++                    "not valid. Use adminURL or publicURL or internalURL"
++                exit $OCF_ERR_CONFIGURED
++                ;;
++        esac
++        fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
++    fi
++
++    if [ $rc != $OCF_SUCCESS ]; then
++        exit $rc
++    fi
++    return $rc
++}
++
++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
++
++case $__OCF_ACTION in
++    start)
++        evacuate_validate
++        evacuate_start
++        ;;
++    stop)
++        evacuate_stop
++        ;;
++    monitor)
++        evacuate_validate
++        evacuate_monitor
++        ;;
++    meta-data)
++        meta_data
++        exit $OCF_SUCCESS
++        ;;
++    usage|help)
++        evacuate_usage
++        exit $OCF_SUCCESS
++        ;;
++    validate-all)
++        exit $OCF_SUCCESS
++        ;;
++    *)
++        evacuate_usage
++        exit $OCF_ERR_UNIMPLEMENTED
++        ;;
++esac
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec
new file mode 100644
index 0000000..d7d81e5
--- /dev/null
+++ b/SPECS/resource-agents.spec
@@ -0,0 +1,1056 @@
+#
+# All modifications and additions to the file contributed by third parties
+# remain the property of their copyright owners, unless otherwise agreed
+# upon. The license for this file, and modifications and additions to the
+# file, is the same license as for the pristine package itself (unless the
+# license for the pristine package is not an Open Source License, in which
+# case the license is the MIT License). An "Open Source License" is a
+# license that conforms to the Open Source Definition (Version 1.9)
+# published by the Open Source Initiative.
+#
+
+# Below is the script used to generate a new source file
+# from the resource-agent upstream git repo.
+#
+# TAG=$(git log --pretty="format:%h" -n 1)
+# distdir="ClusterLabs-resource-agents-${TAG}"
+# TARFILE="${distdir}.tar.gz"
+# rm -rf $TARFILE $distdir
+# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE
+#
+
+%global upstream_prefix ClusterLabs-resource-agents
+%global upstream_version e76b7d3a
+
+# Whether this platform defaults to using systemd as an init system
+# (needs to be evaluated prior to BuildRequires being enumerated and
+# installed as it's intended to conditionally select some of these, and
+# for that there are only few indicators with varying reliability:
+# - presence of systemd-defined macros (when building in a full-fledged
+#   environment, which is not the case with ordinary mock-based builds)
+# - systemd-aware rpm as manifested with the presence of particular
+#   macro (rpm itself will trivially always be present when building)
+# - existence of /usr/lib/os-release file, which is something heavily
+#   propagated by systemd project
+# - when not good enough, there's always a possibility to check
+#   particular distro-specific macros (incl. version comparison)
+%define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \
+  } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \
+  } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?))
+
+# determine the ras-set to process based on configure invokation
+%bcond_with rgmanager
+%bcond_without linuxha
+
+Name:		resource-agents
+Summary:	Open Source HA Reusable Cluster Resource Scripts
+Version:	4.8.0
+Release:	13%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}
+License:	GPLv2+ and LGPLv2+
+URL:		https://github.com/ClusterLabs/resource-agents
+Source0:	%{upstream_prefix}-%{upstream_version}.tar.gz
+Patch0:		nova-compute-wait-NovaEvacuate.patch
+Patch1:		bz1952005-pgsqlms-new-ra.patch
+Patch2:		bz1991855-nfsserver-add-nfsconvert.patch
+Patch3:		bz1998039-nfsnotify-fix-notify_args-default.patch
+
+# bundled ha-cloud-support libs
+Patch500:	ha-cloud-support-aws.patch
+Patch501:	ha-cloud-support-aliyun.patch
+Patch502:	ha-cloud-support-gcloud.patch
+
+Obsoletes:	heartbeat-resources <= %{version}
+Provides:	heartbeat-resources = %{version}
+
+# Build dependencies
+BuildRequires: make
+BuildRequires: automake autoconf pkgconfig gcc
+BuildRequires: libxslt glib2-devel
+BuildRequires: systemd
+BuildRequires: which
+
+%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version}
+BuildRequires: python3-devel
+%else
+BuildRequires: python-devel
+%endif
+
+# for pgsqlms
+BuildRequires: perl-devel perl-English perl-FindBin
+
+%ifarch x86_64
+BuildRequires: ha-cloud-support
+%endif
+
+%if 0%{?fedora} || 0%{?centos} || 0%{?rhel}
+BuildRequires: docbook-style-xsl docbook-dtds
+%if 0%{?rhel} == 0
+BuildRequires: libnet-devel
+%endif
+%endif
+
+%if 0%{?suse_version}
+BuildRequires:  libnet-devel
+BuildRequires:  libglue-devel
+BuildRequires:  libxslt docbook_4 docbook-xsl-stylesheets
+%endif
+
+## Runtime deps
+# system tools shared by several agents
+Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk
+Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat
+Requires: /usr/sbin/fuser /bin/mount
+
+# Filesystem / fs.sh / netfs.sh
+Requires: /sbin/fsck
+Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4
+Requires: /usr/sbin/fsck.xfs
+Requires: /sbin/mount.nfs /sbin/mount.nfs4
+%if 0%{?fedora} < 33 || (0%{?rhel} && 0%{?rhel} < 9) || (0%{?centos} && 0%{?centos} < 9) || 0%{?suse_version}
+%if (0%{?rhel} && 0%{?rhel} < 8) || (0%{?centos} && 0%{?centos} < 8)
+Requires: /usr/sbin/mount.cifs
+%else
+Recommends: /usr/sbin/mount.cifs
+%endif
+%endif
+
+# IPaddr2
+Requires: /sbin/ip
+
+# LVM / lvm.sh
+Requires: /usr/sbin/lvm
+
+# nfsserver / netfs.sh
+Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd
+
+# rgmanager
+%if %{with rgmanager}
+# ip.sh
+Requires: /usr/sbin/ethtool
+Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6
+
+# nfsexport.sh
+Requires: /sbin/findfs
+Requires: /sbin/quotaon /sbin/quotacheck
+%endif
+
+%description
+A set of scripts to interface with several services to operate in a
+High Availability environment for both Pacemaker and rgmanager
+service managers.
+
+%ifarch x86_64
+%package cloud
+License:	GPLv2+ and LGPLv2+
+Summary:	Cloud resource agents
+Requires:	%{name} = %{version}-%{release}
+Requires:	ha-cloud-support
+Provides:	resource-agents-aliyun
+Obsoletes:	resource-agents-aliyun <= %{version}
+Provides:	resource-agents-gcp
+Obsoletes:	resource-agents-gcp <= %{version}
+
+%description cloud
+Cloud resource agents allows Cloud instances to be managed
+in a cluster environment.
+%endif
+
+%package paf
+License:	PostgreSQL
+Summary:	PostgreSQL Automatic Failover (PAF) resource agent
+Requires:	%{name} = %{version}-%{release}
+Requires:	perl-interpreter perl-English perl-FindBin
+
+%description paf
+PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL
+databases to be managed in a cluster environment.
+
+%prep
+%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos} == 0 && 0%{?rhel} == 0
+%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.}
+exit 1
+%endif
+%setup -q -n %{upstream_prefix}-%{upstream_version}
+%patch0 -p1 -F1
+%patch1 -p1
+%patch2 -p1 -F1
+%patch3 -p1
+
+# bundled ha-cloud-support libs
+%patch500 -p1
+%patch501 -p1
+%patch502 -p1 -F2
+
+chmod 755 heartbeat/nova-compute-wait
+chmod 755 heartbeat/NovaEvacuate
+chmod 755 heartbeat/pgsqlms
+
+%build
+if [ ! -f configure ]; then
+	./autogen.sh
+fi
+
+%if 0%{?fedora} >= 11 || 0%{?centos} > 5 || 0%{?rhel} > 5
+CFLAGS="$(echo '%{optflags}')"
+%global conf_opt_fatal "--enable-fatal-warnings=no"
+%else
+CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}"
+%global conf_opt_fatal "--enable-fatal-warnings=yes"
+%endif
+
+%if %{with rgmanager}
+%global rasset rgmanager
+%endif
+%if %{with linuxha}
+%global rasset linux-ha
+%endif
+%if %{with rgmanager} && %{with linuxha}
+%global rasset all
+%endif
+
+export CFLAGS
+
+%configure \
+%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version}
+	PYTHON="%{__python3}" \
+%endif
+%ifarch x86_64
+	PYTHONPATH="%{_usr}/lib/fence-agents/support/google" \
+%endif
+	%{conf_opt_fatal} \
+%if %{defined _unitdir}
+    --with-systemdsystemunitdir=%{_unitdir} \
+%endif
+%if %{defined _tmpfilesdir}
+    --with-systemdtmpfilesdir=%{_tmpfilesdir} \
+    --with-rsctmpdir=/run/resource-agents \
+%endif
+	--with-pkg-name=%{name} \
+	--with-ras-set=%{rasset}
+
+%if %{defined jobs}
+JFLAGS="$(echo '-j%{jobs}')"
+%else
+JFLAGS="$(echo '%{_smp_mflags}')"
+%endif
+
+make $JFLAGS
+
+%install
+rm -rf %{buildroot}
+make install DESTDIR=%{buildroot}
+
+## tree fixup
+# remove docs (there is only one and they should come from doc sections in files)
+rm -rf %{buildroot}/usr/share/doc/resource-agents
+
+%files
+%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog
+%if %{with linuxha}
+%doc heartbeat/README.galera
+%doc doc/README.webapps
+%doc %{_datadir}/%{name}/ra-api-1.dtd
+%doc %{_datadir}/%{name}/metadata.rng
+%endif
+
+%if %{with rgmanager}
+%{_datadir}/cluster
+%{_sbindir}/rhev-check.sh
+%endif
+
+%if %{with linuxha}
+%dir %{_usr}/lib/ocf
+%dir %{_usr}/lib/ocf/resource.d
+%dir %{_usr}/lib/ocf/lib
+
+%{_usr}/lib/ocf/lib/heartbeat
+
+%{_usr}/lib/ocf/resource.d/heartbeat
+%{_usr}/lib/ocf/resource.d/openstack
+
+%{_datadir}/pkgconfig/%{name}.pc
+
+%if %{defined _unitdir}
+%{_unitdir}/resource-agents-deps.target
+%endif
+%if %{defined _tmpfilesdir}
+%{_tmpfilesdir}/%{name}.conf
+%endif
+
+%dir %{_datadir}/%{name}
+%dir %{_datadir}/%{name}/ocft
+%{_datadir}/%{name}/ocft/configs
+%{_datadir}/%{name}/ocft/caselib
+%{_datadir}/%{name}/ocft/README
+%{_datadir}/%{name}/ocft/README.zh_CN
+%{_datadir}/%{name}/ocft/helpers.sh
+%exclude %{_datadir}/%{name}/ocft/runocft
+%exclude %{_datadir}/%{name}/ocft/runocft.prereq
+
+%{_sbindir}/ocft
+
+%{_includedir}/heartbeat
+
+%if %{defined _tmpfilesdir}
+%dir %attr (1755, root, root)	/run/resource-agents
+%else
+%dir %attr (1755, root, root)	%{_var}/run/resource-agents
+%endif
+
+%{_mandir}/man7/*.7*
+
+###
+# Supported, but in another sub package
+###
+%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip*
+%exclude /usr/lib/ocf/resource.d/heartbeat/aws*
+%exclude /usr/lib/ocf/resource.d/heartbeat/azure-*
+%exclude %{_mandir}/man7/*aliyun-vpc-move-ip*
+%exclude /usr/lib/ocf/resource.d/heartbeat/gcp*
+%exclude %{_mandir}/man7/*gcp*
+%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms
+%exclude %{_mandir}/man7/*pgsqlms*
+%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
+
+###
+# Moved to separate packages
+###
+%exclude /usr/lib/ocf/resource.d/heartbeat/SAP*
+%exclude /usr/lib/ocf/lib/heartbeat/sap*
+%exclude %{_mandir}/man7/*SAP*
+
+###
+# Unsupported
+###
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AoEtarget
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AudibleAlarm
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ClusterMon
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/EvmsSCC
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Evmsd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ICP
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/IPaddr
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LVM
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LinuxSCSI
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageRAID
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageVE
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Pure-FTPd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Raid1
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ServeRAID
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Stateful
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SysInfo
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/VIPArip
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS6
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WinPopup
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Xen
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ZFS
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/anything
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/asterisk
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/clvm
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dnsupdate
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/docker*
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/eDir88
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/fio
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ids
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iface-bridge
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ipsec
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iscsi
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jboss
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jira
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/kamailio
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ldirectord
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxc
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxd-info
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/machine-info
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mariadb
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/minio
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mpathpersist
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mysql-proxy
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/openstack-cinder-volume
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/openstack-floating-ip
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/openstack-info
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ovsmonitor
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pgagent
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pingd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pound
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/proftpd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rkt
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rsyslog
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/scsi2reservation
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sfex
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sg_persist
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/syslog-ng
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/varnish
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vmware
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vsftpd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/zabbixserver
+%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_LVM.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_clvm.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_docker*.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-cinder-volume.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-floating-ip.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz
+
+###
+# Other excluded files.
+###
+# This tool has to be updated for the new pacemaker lrmd.
+%exclude %{_sbindir}/ocf-tester
+%exclude %{_mandir}/man8/ocf-tester.8*
+# ldirectord is not supported
+%exclude /etc/ha.d/resource.d/ldirectord
+%exclude /etc/rc.d/init.d/ldirectord
+%exclude %{_unitdir}/ldirectord.service
+%exclude /etc/logrotate.d/ldirectord
+%exclude /usr/sbin/ldirectord
+%exclude %{_mandir}/man8/ldirectord.8.gz
+
+# For compatability with pre-existing agents
+%dir %{_sysconfdir}/ha.d
+%{_sysconfdir}/ha.d/shellfuncs
+
+%{_libexecdir}/heartbeat
+%endif
+
+%ifarch x86_64
+%files cloud
+/usr/lib/ocf/resource.d/heartbeat/aliyun-*
+%{_mandir}/man7/*aliyun-*
+/usr/lib/ocf/resource.d/heartbeat/aws*
+%{_mandir}/man7/*aws*
+/usr/lib/ocf/resource.d/heartbeat/azure-*
+%{_mandir}/man7/*azure-*
+/usr/lib/ocf/resource.d/heartbeat/gcp-*
+%{_mandir}/man7/*gcp-*
+%exclude /usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-ip
+%exclude %{_mandir}/man7/*gcp-vpc-move-ip*
+%endif
+
+%files paf
+%doc paf_README.md
+%license paf_LICENSE
+%defattr(-,root,root)
+%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms
+%{_mandir}/man7/*pgsqlms*
+%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
+
+%changelog
+* Thu Aug 26 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.8.0-13
+- nfsnotify: fix default value for "notify_args"
+
+  Resolves: rhbz#1998039
+
+* Wed Aug 25 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.8.0-12
+- Create cloud subpackage
+- Add nova-compute-wait/NovaEvacuate
+- nfsserver: fix nfs-convert issue
+
+  Resolves: rhbz#1997548, rhbz#1997576, rhbz#1991855
+
+* Mon Aug 16 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.8.0-11
+- Filesystem: force_unmount: remove "Default value" text from metadata
+
+  Resolves: rhbz#1993900
+
+* Tue Aug 10 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.8.0-10
+- pgsqlms: new resource agent
+
+  Resolves: rhbz#1952005
+
+* Tue Aug 10 2021 Mohan Boddu <mboddu@redhat.com> - 4.8.0-8.1
+- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags
+  Related: rhbz#1991688
+
+* Tue Jun 29 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.8.0-8
+- Exclude SAP agents that are in separate -sap subpackage
+
+  Resolves: rhbz#1977208
+
+* Mon May 17 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.8.0-3
+- Remove redhat-lsb-core dependency (lsb_release)
+
+  Resolves: rhbz#1961539
+
+* Wed Apr 21 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.8.0-2
+- Solve build issues
+
+  Resolves: rhbz#1951253
+
+* Fri Apr 16 2021 Mohan Boddu <mboddu@redhat.com> - 4.8.0-1.1
+- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937
+
+* Tue Mar 16 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.7.0-7
+- Filesystem: change force_unmount default to safe for RHEL9+ (1843578)
+
+* Wed Mar  3 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.7.0-5
+- Exclude unsupported agents
+
+* Wed Feb 24 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.7.0-4
+- remove ldirectord subpackage
+
+  Resolves: rhbz#1932218
+
+* Tue Feb 16 2021 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.7.0-3
+- add BuildRequires for google lib
+- use HA cloud support supplied awscli
+
+* Wed Jan 27 2021 Fedora Release Engineering <releng@fedoraproject.org> - 4.7.0-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
+
+* Wed Dec  9 2020 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.7.0-1
+- Rebase to resource-agents 4.7.0 upstream release.
+
+* Mon Aug 24 2020 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.6.1-4
+- spec: improvements from upstream project
+
+* Mon Aug 24 2020 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.6.1-3
+- ldirectord: add dependency for perl-IO-Socket-INET6
+
+  Resolves: rhbz#1868063
+
+* Wed Jul 29 2020 Fedora Release Engineering <releng@fedoraproject.org> - 4.6.1-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
+
+* Fri Jul 24 2020 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.6.1-2
+- Make Samba/CIFS dependency weak for Fedora 32 and remove the
+  dependency from 33+
+
+* Thu Jun 18 2020 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.6.1-1
+- Rebase to resource-agents 4.6.1 upstream release.
+
+* Thu Jun 18 2020 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.6.0-1
+- Rebase to resource-agents 4.6.0 upstream release.
+
+* Mon Mar  9 2020 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.5.0-1
+- Rebase to resource-agents 4.5.0 upstream release.
+
+* Thu Jan 30 2020 Fedora Release Engineering <releng@fedoraproject.org> - 4.4.0-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
+
+* Wed Oct 23 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.4.0-1
+- Rebase to resource-agents 4.4.0 upstream release.
+
+* Fri Jul 26 2019 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.0-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
+
+* Fri Jun 21 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.3.0-1
+- Rebase to resource-agents 4.3.0 upstream release.
+
+* Fri May 24 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.2.0-4
+- Fix build issues
+
+* Fri Mar 15 2019 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.2.0-3
+- systemd-tmpfiles: change path to /run/resource-agents
+
+  Resolves: rhbz#1688865
+
+* Sat Feb 02 2019 Fedora Release Engineering <releng@fedoraproject.org> - 4.2.0-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
+
+* Wed Oct 24 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.2.0-1
+- Rebase to resource-agents 4.2.0 upstream release.
+- spec: fix missing systemd config files
+
+* Sat Jul 14 2018 Fedora Release Engineering <releng@fedoraproject.org> - 4.1.1-1.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
+
+* Sun Mar 18 2018 Iryna Shcherbina <ishcherb@redhat.com> - 4.1.1-1.1
+- Update Python 2 dependency declarations to new packaging standards
+  (See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3)
+
+* Tue Mar 13 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.1-1
+- Rebase to resource-agents 4.1.1 upstream release.
+
+* Mon Feb 19 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.0-2
+- Add gcc to BuildRequires
+
+* Fri Feb 09 2018 Igor Gnatenko <ignatenkobrain@fedoraproject.org> - 4.1.0-1.1
+- Escape macros in %%changelog
+
+* Wed Jan 10 2018 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.1.0-1
+- Rebase to resource-agents 4.1.0 upstream release.
+
+* Thu Aug 03 2017 Fedora Release Engineering <releng@fedoraproject.org> - 4.0.1-1.3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
+
+* Thu Jul 27 2017 Fedora Release Engineering <releng@fedoraproject.org> - 4.0.1-1.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
+
+* Sat Feb 11 2017 Fedora Release Engineering <releng@fedoraproject.org> - 4.0.1-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild
+
+* Thu Feb  2 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.0.1-1
+- Rebase to resource-agents 4.0.1 upstream release.
+
+* Wed Feb  1 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.0.0-2
+- galera: remove "long SST monitoring" support due to corner-case issues
+
+* Tue Jan 31 2017 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.0.0-1
+- Rebase to resource-agents 4.0.0 upstream release.
+
+* Thu Dec 15 2016 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.7-6
+- Add netstat dependency
+
+* Tue Feb  9 2016 Oyvind Albrigtsen <oalbrigt@redhat.com> - 3.9.7-4
+- Rebase to resource-agents 3.9.7 upstream release.
+
+* Thu Feb 04 2016 Fedora Release Engineering <releng@fedoraproject.org> - 3.9.6-2.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild
+
+* Thu Jun 18 2015 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.6-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild
+
+* Mon Apr 20 2015 David Vossel <dvossel@redhat.com> - 3.9.6-2
+- Rebase to latest upstream code in order to pull in rabbitmq-cluster agent
+
+* Fri Feb 13 2015 David Vossel <dvossel@redhat.com> - 3.9.6-1
+- Rebase to resource-agents 3.9.6 upstream release.
+
+* Sun Aug 17 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.5-12.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild
+
+* Sun Jun 08 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.5-12.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild
+
+* Wed Apr 30 2014 David Vossel <dvossel@redhat.com> - 3.9.5-12
+- Sync with latest upstream.
+
+* Thu Jan 2 2014 David Vossel <dvossel@redhat.com> - 3.9.5-11
+- Sync with latest upstream.
+
+* Sun Oct 20 2013 David Vossel <dvossel@redhat.com> - 3.9.5-10
+- Fix build system for rawhide.
+
+* Wed Oct 16 2013 David Vossel <dvossel@redhat.com> - 3.9.5-9
+- Remove rgmanager agents from build.
+
+* Sun Aug 04 2013 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.5-8
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild
+
+* Wed Jul 17 2013 Petr Pisar <ppisar@redhat.com> - 3.9.5-7
+- Perl 5.18 rebuild
+
+* Tue Jun 18 2013 David Vossel <dvossel@redhat.com> - 3.9.5-6
+- Restores rsctmp directory to upstream default.
+
+* Tue Jun 18 2013 David Vossel <dvossel@redhat.com> - 3.9.5-5
+- Merges redhat provider into heartbeat provider. Remove
+  rgmanager's redhat provider.
+
+  Resolves: rhbz#917681
+  Resolves: rhbz#928890
+  Resolves: rhbz#952716
+  Resolves: rhbz#960555
+
+* Tue Mar 12 2013 David Vossel <dvossel@redhat.com> - 3.9.5-3
+- Fixes build system error with conditional logic involving
+  IPv6addr and updates spec file to build against rhel 7 as
+  well as fedora 19.
+
+* Mon Mar 11 2013 David Vossel <dvossel@redhat.com> - 3.9.5-2
+- Resolves rhbz#915050
+
+* Mon Mar 11 2013 David Vossel <dvossel@redhat.com> - 3.9.5-1
+- New upstream release.
+
+* Fri Jan 25 2013 Kevin Fenzi <kevin@scrye.com> - 3.9.2-5
+- Fix cifs mount requires
+
+* Mon Nov 12 2012 Chris Feist <cfeist@redhat.com> - 3.9.2-4
+- Removed version number after dist
+
+* Mon Oct 29 2012 Chris Feist <cfeist@redhat.com> - 3.9.2-3.8
+- Remove cluster-glue-libs-devel
+- Disable IPv6addr & sfex to fix deps on libplumgpl & libplum (due to
+  disappearance of cluster-glue in F18)
+
+* Sat Jul 21 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.2-3.5
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
+
+* Thu Jul 05 2012 Chris Feist <cfeist@redhat.com> - 3.9.2-3.4
+- Fix location of lvm (change from /sbin to /usr/sbin)
+
+* Wed Apr 04 2012 Jon Ciesla <limburgher@gmail.com> - 3.9.2-3.3
+- Rebuilt to fix rawhide dependency issues (caused by move of fsck from
+  /sbin to /usr/sbin).
+
+* Fri Mar 30 2012 Jon Ciesla <limburgher@gmail.com> - 3.9.2-3.1
+- libnet rebuild.
+
+* Sat Jan 14 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.2-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild
+
+* Fri Jul  8 2011 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.9.2-2
+- add post call to resource-agents to integrate with cluster 3.1.4
+
+* Thu Jun 30 2011 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.9.2-1
+- new upstream release
+- fix 2 regressions from 3.9.1
+
+* Mon Jun 20 2011 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.9.1-1
+- new upstream release
+- import spec file from upstream
+
+* Tue Mar  1 2011 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.1.1-1
+- new upstream release 3.1.1 and 1.0.4
+
+* Wed Feb 09 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.1.0-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
+* Thu Dec  2 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.1.0-1
+- new upstream release
+- spec file update:
+  Update upstream URL
+  Update source URL
+  use standard configure macro
+  use standard make invokation
+
+* Thu Oct  7 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.17-1
+- new upstream release
+  Resolves: rhbz#632595, rhbz#633856, rhbz#632385, rhbz#628013
+  Resolves: rhbz#621313, rhbz#595383, rhbz#580492, rhbz#605733
+  Resolves: rhbz#636243, rhbz#591003, rhbz#637913, rhbz#634718
+  Resolves: rhbz#617247, rhbz#617247, rhbz#617234, rhbz#631943
+  Resolves: rhbz#639018
+
+* Thu Oct  7 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.16-2
+- new upstream release of the Pacemaker agents: 71b1377f907c
+
+* Thu Sep  2 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.16-1
+- new upstream release
+  Resolves: rhbz#619096, rhbz#614046, rhbz#620679, rhbz#619680
+  Resolves: rhbz#621562, rhbz#621694, rhbz#608887, rhbz#622844
+  Resolves: rhbz#623810, rhbz#617306, rhbz#623816, rhbz#624691
+  Resolves: rhbz#622576
+
+* Thu Jul 29 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.14-1
+- new upstream release
+  Resolves: rhbz#553383, rhbz#557563, rhbz#578625, rhbz#591003
+  Resolves: rhbz#593721, rhbz#593726, rhbz#595455, rhbz#595547
+  Resolves: rhbz#596918, rhbz#601315, rhbz#604298, rhbz#606368
+  Resolves: rhbz#606470, rhbz#606480, rhbz#606754, rhbz#606989
+  Resolves: rhbz#607321, rhbz#608154, rhbz#608887, rhbz#609181
+  Resolves: rhbz#609866, rhbz#609978, rhbz#612097, rhbz#612110
+  Resolves: rhbz#612165, rhbz#612941, rhbz#614127, rhbz#614356
+  Resolves: rhbz#614421, rhbz#614457, rhbz#614961, rhbz#615202
+  Resolves: rhbz#615203, rhbz#615255, rhbz#617163, rhbz#617566
+  Resolves: rhbz#618534, rhbz#618703, rhbz#618806, rhbz#618814
+
+* Mon Jun  7 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.13-1
+- new upstream release
+  Resolves: rhbz#592103, rhbz#593108, rhbz#578617, rhbz#594626
+  Resolves: rhbz#594511, rhbz#596046, rhbz#594111, rhbz#597002
+  Resolves: rhbz#599643
+
+* Tue May 18 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.12-2
+- libnet is not available on RHEL
+- Do not package ldirectord on RHEL
+  Resolves: rhbz#577264
+
+* Mon May 10 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.12-1
+- new upstream release
+  Resolves: rhbz#585217, rhbz#586100, rhbz#581533, rhbz#582753
+  Resolves: rhbz#582754, rhbz#585083, rhbz#587079, rhbz#588890
+  Resolves: rhbz#588925, rhbz#583789, rhbz#589131, rhbz#588010
+  Resolves: rhbz#576871, rhbz#576871, rhbz#590000, rhbz#589823
+
+* Mon May 10 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.12-1
+- New pacemaker agents upstream release: a7c0f35916bf
+  + High: pgsql: properly implement pghost parameter
+  + High: RA: mysql: fix syntax error
+  + High: SAPInstance RA: do not rely on op target rc when monitoring clones (lf#2371)
+  + High: set the HA_RSCTMP directory to /var/run/resource-agents (lf#2378)
+  + Medium: IPaddr/IPaddr2: add a description of the assumption in meta-data
+  + Medium: IPaddr: return the correct code if interface delete failed
+  + Medium: nfsserver: rpc.statd as the notify cmd does not work with -v (thanks to Carl Lewis)
+  + Medium: oracle: reduce output from sqlplus to the last line for queries (bnc#567815)
+  + Medium: pgsql: implement "config" parameter
+  + Medium: RA: iSCSITarget: follow changed IET access policy
+
+* Wed Apr 21 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.11-1
+- new upstream release
+  Resolves: rhbz#583945, rhbz#581047, rhbz#576330, rhbz#583017
+  Resolves: rhbz#583019, rhbz#583948, rhbz#584003, rhbz#582017
+  Resolves: rhbz#555901, rhbz#582754, rhbz#582573, rhbz#581533
+- Switch to file based Requires.
+  Also address several other problems related to missing runtime
+  components in different agents.
+  With the current Requires: set, we guarantee all basic functionalities
+  out of the box for lvm/fs/clusterfs/netfs/networking.
+  Resolves: rhbz#570008
+
+* Sat Apr 17 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.10-2
+- New pacemaker agents upstream release
+  + High: RA: vmware: fix set_environment() invocation (LF 2342)
+  + High: RA: vmware: update to version 0.2
+  + Medium: Filesystem: prefer /proc/mounts to /etc/mtab for non-bind mounts (lf#2388)
+  + Medium: IPaddr2: don't bring the interface down on stop (thanks to Lars Ellenberg)
+  + Medium: IPsrcaddr: modify the interface route (lf#2367)
+  + Medium: ldirectord: Allow multiple email addresses (LF 2168)
+  + Medium: ldirectord: fix setting defaults for configfile and ldirectord (lf#2328)
+  + Medium: meta-data: improve timeouts in most resource agents
+  + Medium: nfsserver: use default values (lf#2321)
+  + Medium: ocf-shellfuncs: don't log but print to stderr if connected to a terminal
+  + Medium: ocf-shellfuncs: don't output to stderr if using syslog
+  + Medium: oracle/oralsnr: improve exit codes if the environment isn't valid
+  + Medium: RA: iSCSILogicalUnit: fix monitor for STGT
+  + Medium: RA: make sure that OCF_RESKEY_CRM_meta_interval is always defined (LF 2284)
+  + Medium: RA: ManageRAID: require bash
+  + Medium: RA: ManageRAID: require bash
+  + Medium: RA: VirtualDomain: bail out early if config file can't be read during probe (Novell 593988)
+  + Medium: RA: VirtualDomain: fix incorrect use of __OCF_ACTION
+  + Medium: RA: VirtualDomain: improve error messages
+  + Medium: RA: VirtualDomain: spin on define until we definitely have a domain name
+  + Medium: Route: add route table parameter (lf#2335)
+  + Medium: sfex: don't use pid file (lf#2363,bnc#585416)
+  + Medium: sfex: exit with success on stop if sfex has never been started (bnc#585416)
+
+* Fri Apr  9 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.10-1
+- New rgmanager resource agents upstream release
+  Resolves: rhbz#519491, rhbz#570525, rhbz#571806, rhbz#574027
+  Resolves: rhbz#574215, rhbz#574886, rhbz#576322, rhbz#576335
+  Resolves: rhbz#575103, rhbz#577856, rhbz#577874, rhbz#578249
+  Resolves: rhbz#578625, rhbz#578626, rhbz#578628, rhbz#578626
+  Resolves: rhbz#579621, rhbz#579623, rhbz#579625, rhbz#579626
+  Resolves: rhbz#579059
+
+* Wed Mar 24 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.9-2
+- Resolves: rhbz#572993 - Patched build process to correctly generate ldirectord man page
+- Resolves: rhbz#574732 - Add libnet-devel as a dependancy to ensure IPaddrv6 is built
+
+* Mon Mar  1 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.9-1
+- New rgmanager resource agents upstream release
+  Resolves: rhbz#455300, rhbz#568446, rhbz#561862, rhbz#536902
+  Resolves: rhbz#512171, rhbz#519491
+
+* Mon Feb 22 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.8-1
+- New rgmanager resource agents upstream release
+  Resolves: rhbz#548133, rhbz#565907, rhbz#545602, rhbz#555901
+  Resolves: rhbz#564471, rhbz#515717, rhbz#557128, rhbz#536157
+  Resolves: rhbz#455300, rhbz#561416, rhbz#562237, rhbz#537201
+  Resolves: rhbz#536962, rhbz#553383, rhbz#556961, rhbz#555363
+  Resolves: rhbz#557128, rhbz#455300, rhbz#557167, rhbz#459630
+  Resolves: rhbz#532808, rhbz#556603, rhbz#554968, rhbz#555047
+  Resolves: rhbz#554968, rhbz#555047
+- spec file update:
+  * update spec file copyright date
+  * use bz2 tarball
+
+* Fri Jan 15 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.7-2
+- Add python as BuildRequires
+
+* Mon Jan 11 2010 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.7-1
+- New rgmanager resource agents upstream release
+  Resolves: rhbz#526286, rhbz#533461
+
+* Mon Jan 11 2010 Andrew Beekhof <andrew@beekhof.net> - 3.0.6-2
+- Update Pacameker agents to upstream version: c76b4a6eb576
+  + High: RA: VirtualDomain: fix forceful stop (LF 2283)
+  + High: apache: monitor operation of depth 10 for web applications (LF 2234)
+  + Medium: IPaddr2: CLUSTERIP/iptables rule not always inserted on failed monitor (LF 2281)
+  + Medium: RA: Route: improve validate (LF 2232)
+  + Medium: mark obsolete RAs as deprecated (LF 2244)
+  + Medium: mysql: escalate stop to KILL if regular shutdown doesn't work
+
+* Mon Dec 7 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.6-1
+- New rgmanager resource agents upstream release
+- spec file update:
+  * use global instead of define
+  * use new Source0 url
+  * use %%name macro more aggressively
+
+* Mon Dec 7 2009 Andrew Beekhof <andrew@beekhof.net> - 3.0.5-2
+- Update Pacameker agents to upstream version: bc00c0b065d9
+  + High: RA: introduce OCF_FUNCTIONS_DIR, allow it to be overridden (LF2239)
+  + High: doc: add man pages for all RAs (LF2237)
+  + High: syslog-ng: new RA
+  + High: vmware: make meta-data work and several cleanups (LF 2212)
+  + Medium: .ocf-shellfuncs: add ocf_is_probe function
+  + Medium: Dev: make RAs executable (LF2239)
+  + Medium: IPv6addr: ifdef out the ip offset hack for libnet v1.1.4 (LF 2034)
+  + Medium: add mercurial repository version information to .ocf-shellfuncs
+  + Medium: build: add perl-MailTools runtime dependency to ldirectord package (LF 1469)
+  + Medium: iSCSITarget, iSCSILogicalUnit: support LIO
+  + Medium: nfsserver: use check_binary properly in validate (LF 2211)
+  + Medium: nfsserver: validate should not check if nfs_shared_infodir exists (thanks to eelco@procolix.com) (LF 2219)
+  + Medium: oracle/oralsnr: export variables properly
+  + Medium: pgsql: remove the previous backup_label if it exists
+  + Medium: postfix: fix double stop (thanks to Dinh N. Quoc)
+  + RA: LVM: Make monitor operation quiet in logs (bnc#546353)
+  + RA: Xen: Remove instance_attribute "allow_migrate" (bnc#539968)
+  + ldirectord: OCF agent: overhaul
+
+* Fri Nov 20 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.5-1
+- New rgmanager resource agents upstream release
+- Allow pacemaker to use rgmanager resource agents
+
+* Wed Oct 28 2009 Andrew Beekhof <andrew@beekhof.net> - 3.0.4-2
+- Update Pacameker agents to upstream version: e2338892f59f
+  + High: send_arp - turn on unsolicited mode for compatibilty with the libnet version's exit codes
+  + High: Trap sigterm for compatibility with the libnet version of send_arp
+  + Medium: Bug - lf#2147: IPaddr2: behave if the interface is down
+  + Medium: IPv6addr: recognize network masks properly
+  + Medium: RA: VirtualDomain: avoid needlessly invoking "virsh define"
+
+* Wed Oct 21 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.4-1
+- New rgmanager resource agents upstream release
+
+* Mon Oct 12 2009 Andrew Beekhof <andrew@beekhof.net> - 3.0.3-3
+- Update Pacameker agents to upstream version: 099c0e5d80db
+  + Add the ha_parameter function back into .ocf-shellfuncs.
+  + Bug bnc#534803 - Provide a default for MAILCMD
+  + Fix use of undefined macro @HA_NOARCHDATAHBDIR@
+  + High (LF 2138): IPsrcaddr: replace 0/0 with proper ip prefix (thanks to Michael Ricordeau and Michael Schwartzkopff)
+  + Import shellfuncs from heartbeat as badly written RAs use it
+  + Medium (LF 2173): nfsserver: exit properly in nfsserver_validate
+  + Medium: RA: Filesystem: implement monitor operation
+  + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable
+  + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable (addendum)
+  + Medium: RA: iSCSILogicalUnit: use a 16-byte default SCSI ID
+  + Medium: RA: iSCSITarget: be more persistent deleting targets on stop
+  + Medium: RA: portblock: add per-IP filtering capability
+  + Medium: mysql-proxy: log_level and keepalive parameters
+  + Medium: oracle: drop spurious output from sqlplus
+  + RA: Filesystem: allow configuring smbfs mounts as clones
+
+* Wed Sep 23 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.3-1
+- New rgmanager resource agents upstream release
+
+* Thu Aug 20 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.1-1
+- New rgmanager resource agents upstream release
+
+* Tue Aug 18 2009 Andrew Beekhof <andrew@beekhof.net> - 3.0.0-16
+- Create an ldirectord package
+- Update Pacameker agents to upstream version: 2198dc90bec4
+  + Build: Import ldirectord.
+  + Ensure HA_VARRUNDIR has a value to substitute
+  + High: Add findif tool (mandatory for IPaddr/IPaddr2)
+  + High: IPv6addr: new nic and cidr_netmask parameters
+  + High: postfix: new resource agent
+  + Include license information
+  + Low (LF 2159): Squid: make the regexp match more precisely output of netstat
+  + Low: configure: Fix package name.
+  + Low: ldirectord: add dependency on $remote_fs.
+  + Low: ldirectord: add mandatory required header to init script.
+  + Medium (LF 2165): IPaddr2: remove all colons from the mac address before passing it to send_arp
+  + Medium: VirtualDomain: destroy domain shortly before timeout expiry
+  + Medium: shellfuncs: Make the mktemp wrappers work.
+  + Remove references to Echo function
+  + Remove references to heartbeat shellfuncs.
+  + Remove useless path lookups
+  + findif: actually include the right header. Simplify configure.
+  + ldirectord: Remove superfluous configure artifact.
+  + ocf-tester: Fix package reference and path to DTD.
+
+* Tue Aug 11 2009 Ville Skyttä <ville.skytta@iki.fi> - 3.0.0-15
+- Use bzipped upstream hg tarball.
+
+* Wed Jul 29 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-14
+- Merge Pacemaker cluster resource agents:
+  * Add Source1.
+  * Drop noarch. We have real binaries now.
+  * Update BuildRequires.
+  * Update all relevant prep/build/install/files/description sections.
+
+* Sun Jul 26 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.0.0-13
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
+
+* Wed Jul  8 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-12
+- spec file updates:
+  * Update copyright header
+  * final release.. undefine alphatag
+
+* Thu Jul  2 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-11.rc4
+- New upstream release.
+
+* Sat Jun 20 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-10.rc3
+- New upstream release.
+
+* Wed Jun 10 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-9.rc2
+- New upstream release + git94df30ca63e49afb1e8aeede65df8a3e5bcd0970
+
+* Tue Mar 24 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-8.rc1
+- New upstream release.
+- Update BuildRoot usage to preferred versions/names
+
+* Mon Mar  9 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-7.beta1
+- New upstream release.
+
+* Fri Mar  6 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-6.alpha7
+- New upstream release.
+
+* Tue Mar  3 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-5.alpha6
+- New upstream release.
+
+* Tue Feb 24 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-4.alpha5
+- Drop Conflicts with rgmanager.
+
+* Mon Feb 23 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-3.alpha5
+- New upstream release.
+
+* Thu Feb 19 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-2.alpha4
+- Add comments on how to build this package.
+
+* Thu Feb  5 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-1.alpha4
+- New upstream release.
+- Fix datadir/cluster directory ownership.
+
+* Tue Jan 27 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 3.0.0-1.alpha3
+  - Initial packaging