diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..11e610e
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+SOURCES/ClusterLabs-resource-agents-fd0720f7.tar.gz
diff --git a/.resource-agents.metadata b/.resource-agents.metadata
new file mode 100644
index 0000000..d893801
--- /dev/null
+++ b/.resource-agents.metadata
@@ -0,0 +1 @@
+3b517ecdbe2103df77813050e5c998e102c5de7e SOURCES/ClusterLabs-resource-agents-fd0720f7.tar.gz
diff --git a/SOURCES/bz1952005-pgsqlms-new-ra.patch b/SOURCES/bz1952005-pgsqlms-new-ra.patch
new file mode 100644
index 0000000..b3b314e
--- /dev/null
+++ b/SOURCES/bz1952005-pgsqlms-new-ra.patch
@@ -0,0 +1,3338 @@
+diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
+--- a/doc/man/Makefile.am 2021-04-12 12:51:56.831835953 +0200
++++ b/doc/man/Makefile.am 2021-04-13 13:38:14.198361848 +0200
+@@ -154,6 +154,7 @@
+ ocf_heartbeat_ovsmonitor.7 \
+ ocf_heartbeat_pgagent.7 \
+ ocf_heartbeat_pgsql.7 \
++ ocf_heartbeat_pgsqlms.7 \
+ ocf_heartbeat_pingd.7 \
+ ocf_heartbeat_podman.7 \
+ ocf_heartbeat_portblock.7 \
+diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+--- a/heartbeat/Makefile.am 2021-04-12 12:51:56.831835953 +0200
++++ b/heartbeat/Makefile.am 2021-04-13 13:37:45.741292178 +0200
+@@ -149,6 +149,7 @@
+ ovsmonitor \
+ pgagent \
+ pgsql \
++ pgsqlms \
+ pingd \
+ podman \
+ portblock \
+@@ -209,7 +210,10 @@
+ mysql-common.sh \
+ nfsserver-redhat.sh \
+ findif.sh \
+- ocf.py
++ ocf.py \
++ OCF_Directories.pm \
++ OCF_Functions.pm \
++ OCF_ReturnCodes.pm
+
+ # Legacy locations
+ hbdir = $(sysconfdir)/ha.d
+diff --color -uNr a/heartbeat/OCF_Directories.pm b/heartbeat/OCF_Directories.pm
+--- a/heartbeat/OCF_Directories.pm 1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/OCF_Directories.pm 2021-04-13 13:37:35.621267404 +0200
+@@ -0,0 +1,139 @@
++#!/usr/bin/perl
++# This program is open source, licensed under the PostgreSQL License.
++# For license terms, see the LICENSE file.
++#
++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault
++
++=head1 NAME
++
++OCF_Directories - Binaries and binary options for use in Resource Agents
++
++=head1 SYNOPSIS
++
++ use FindBin;
++ use lib "$FindBin::RealBin/../../lib/heartbeat/";
++
++ use OCF_Directories;
++
++=head1 DESCRIPTION
++
++This module has been ported from the ocf-directories shell script of the
++resource-agents project. See L.
++
++=head1 VARIABLES
++
++Here are the variables exported by this module:
++
++=over
++
++=item $INITDIR
++
++=item $HA_DIR
++
++=item $HA_RCDIR
++
++=item $HA_CONFDIR
++
++=item $HA_CF
++
++=item $HA_VARLIB
++
++=item $HA_RSCTMP
++
++=item $HA_RSCTMP_OLD
++
++=item $HA_FIFO
++
++=item $HA_BIN
++
++=item $HA_SBIN_DIR
++
++=item $HA_DATEFMT
++
++=item $HA_DEBUGLOG
++
++=item $HA_RESOURCEDIR
++
++=item $HA_DOCDIR
++
++=item $__SCRIPT_NAME
++
++=item $HA_VARRUN
++
++=item $HA_VARLOCK
++
++=item $ocf_prefix
++
++=item $ocf_exec_prefix
++
++=back
++
++=cut
++
++package OCF_Directories;
++
++use strict;
++use warnings;
++use 5.008;
++use File::Basename;
++
++BEGIN {
++ use Exporter;
++
++
++ our $VERSION = 'v2.3.0';
++ our @ISA = ('Exporter');
++ our @EXPORT = qw(
++ $INITDIR
++ $HA_DIR
++ $HA_RCDIR
++ $HA_CONFDIR
++ $HA_CF
++ $HA_VARLIB
++ $HA_RSCTMP
++ $HA_RSCTMP_OLD
++ $HA_FIFO
++ $HA_BIN
++ $HA_SBIN_DIR
++ $HA_DATEFMT
++ $HA_DEBUGLOG
++ $HA_RESOURCEDIR
++ $HA_DOCDIR
++ $__SCRIPT_NAME
++ $HA_VARRUN
++ $HA_VARLOCK
++ $ocf_prefix
++ $ocf_exec_prefix
++ );
++ our @EXPORT_OK = ( @EXPORT );
++}
++
++our $INITDIR = ( $ENV{'INITDIR'} || '/etc/init.d' );
++our $HA_DIR = ( $ENV{'HA_DIR'} || '/etc/ha.d' );
++our $HA_RCDIR = ( $ENV{'HA_RCDIR'} || '/etc/ha.d/rc.d' );
++our $HA_CONFDIR = ( $ENV{'HA_CONFDIR'} || '/etc/ha.d/conf' );
++our $HA_CF = ( $ENV{'HA_CF'} || '/etc/ha.d/ha.cf' );
++our $HA_VARLIB = ( $ENV{'HA_VARLIB'} || '/var/lib/heartbeat' );
++our $HA_RSCTMP = ( $ENV{'HA_RSCTMP'} || '/run/resource-agents' );
++our $HA_RSCTMP_OLD = ( $ENV{'HA_RSCTMP_OLD'} || '/var/run/heartbeat/rsctmp' );
++our $HA_FIFO = ( $ENV{'HA_FIFO'} || '/var/lib/heartbeat/fifo' );
++our $HA_BIN = ( $ENV{'HA_BIN'} || '/usr/libexec/heartbeat' );
++our $HA_SBIN_DIR = ( $ENV{'HA_SBIN_DIR'} || '/usr/sbin' );
++our $HA_DATEFMT = ( $ENV{'HA_DATEFMT'} || '%b %d %T ' );
++our $HA_DEBUGLOG = ( $ENV{'HA_DEBUGLOG'} || '/dev/null' );
++our $HA_RESOURCEDIR = ( $ENV{'HA_RESOURCEDIR'}|| '/etc/ha.d/resource.d' );
++our $HA_DOCDIR = ( $ENV{'HA_DOCDIR'} || '/usr/share/doc/heartbeat' );
++our $__SCRIPT_NAME = ( $ENV{'__SCRIPT_NAME'} || fileparse($0) );
++our $HA_VARRUN = ( $ENV{'HA_VARRUN'} || '/var/run' );
++our $HA_VARLOCK = ( $ENV{'HA_VARLOCK'} || '/var/lock/subsys' );
++our $ocf_prefix = '/usr';
++our $ocf_exec_prefix = '/usr';
++
++1;
++
++=head1 COPYRIGHT AND LICENSE
++
++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault.
++
++Licensed under the PostgreSQL License.
++
+diff --color -uNr a/heartbeat/OCF_Functions.pm b/heartbeat/OCF_Functions.pm
+--- a/heartbeat/OCF_Functions.pm 1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/OCF_Functions.pm 2021-04-13 13:37:35.621267404 +0200
+@@ -0,0 +1,631 @@
++#!/usr/bin/perl
++# This program is open source, licensed under the PostgreSQL License.
++# For license terms, see the LICENSE file.
++#
++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault
++
++=head1 NAME
++
++OCF_Functions - helper subroutines for OCF agent
++
++=head1 SYNOPSIS
++
++ use FindBin;
++ use lib "$FindBin::RealBin/../../lib/heartbeat/";
++
++ use OCF_Functions;
++
++=head1 DESCRIPTION
++
++This module has been ported from the ocf-shellfuncs shell script of the
++resource-agents project. See L.
++
++=head1 VARIABLE
++
++The only variable exported by this module is C<__OCF_ACTION>.
++
++=head1 SUBROUTINES
++
++Here are the subroutines ported from ocf-shellfuncs and exported by this module:
++
++=over
++
++=item ha_debug
++
++=item ha_log
++
++=item hadate
++
++=item ocf_is_clone
++
++=item ocf_is_ms
++
++=item ocf_is_probe
++
++=item ocf_is_root
++
++=item ocf_is_true
++
++=item ocf_is_ver
++
++=item ocf_local_nodename
++
++=item ocf_log
++
++=item ocf_exit_reason
++
++=item ocf_maybe_random
++
++=item ocf_ver2num
++
++=item ocf_ver_complete_level
++
++=item ocf_ver_level
++
++=item ocf_version_cmp
++
++=item set_logtag
++
++=back
++
++Here are the subroutines only existing in the perl module but not in the
++ocf-shellfuncs script:
++
++=over
++
++=item ocf_notify_env
++
++=back
++
++=cut
++
++package OCF_Functions;
++
++use strict;
++use warnings;
++use 5.008;
++use POSIX qw( strftime setlocale LC_ALL );
++use English;
++
++use FindBin;
++use lib "$FindBin::RealBin/../../lib/heartbeat/";
++
++use OCF_ReturnCodes;
++use OCF_Directories;
++
++BEGIN {
++ use Exporter;
++
++ our $VERSION = 'v2.3.0';
++ our @ISA = ('Exporter');
++ our @EXPORT = qw(
++ $__OCF_ACTION
++ ocf_is_root
++ ocf_maybe_random
++ ocf_is_true
++ hadate
++ set_logtag
++ ha_log
++ ha_debug
++ ocf_log
++ ocf_exit_reason
++ ocf_is_probe
++ ocf_is_clone
++ ocf_is_ms
++ ocf_is_ver
++ ocf_ver2num
++ ocf_ver_level
++ ocf_ver_complete_level
++ ocf_version_cmp
++ ocf_local_nodename
++ ocf_notify_env
++ );
++ our @EXPORT_OK = ( @EXPORT );
++}
++
++our $__OCF_ACTION;
++
++sub ocf_is_root {
++ return $EUID == 0;
++}
++
++sub ocf_maybe_random {
++ return int( rand( 32767 ) );
++}
++
++sub ocf_is_true {
++ my $v = shift;
++ return ( defined $v and $v =~ /^(?:yes|true|1|YES|TRUE|ja|on|ON)$/ );
++}
++
++sub hadate {
++ return strftime( $HA_DATEFMT, localtime );
++}
++
++sub set_logtag {
++
++ return if defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '';
++
++ if ( defined $ENV{'OCF_RESOURCE_INSTANCE'} and $ENV{'OCF_RESOURCE_INSTANCE'} ne '' ) {
++ $ENV{'HA_LOGTAG'} = "$__SCRIPT_NAME($ENV{'OCF_RESOURCE_INSTANCE'})[$PID]";
++ }
++ else {
++ $ENV{'HA_LOGTAG'}="${__SCRIPT_NAME}[$PID]";
++ }
++}
++
++sub __ha_log {
++ my $ignore_stderr = 0;
++ my $loglevel = '';
++
++ if ( $_[0] eq '--ignore-stderr' ) {
++ $ignore_stderr = 1;
++ shift;
++ }
++
++ $ENV{'HA_LOGFACILITY'} = '' if not defined $ENV{'HA_LOGFACILITY'}
++ or $ENV{'HA_LOGFACILITY'} eq 'none';
++
++ # if we're connected to a tty, then output to stderr
++ if ( -t STDERR ) {
++ # FIXME
++ # T.N.: this was ported with the bug on $loglevel being empty
++ # and never set before the test here...
++ if ( defined $ENV{'HA_debug'}
++ and $ENV{'HA_debug'} == 0
++ and $loglevel eq 'debug'
++ ) {
++ return 0;
++ }
++ elsif ( $ignore_stderr ) {
++ # something already printed this error to stderr, so ignore
++ return 0;
++ }
++ if ( defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '' ) {
++ printf STDERR "%s: %s\n", $ENV{'HA_LOGTAG'}, join ' ', @ARG;
++ }
++ else {
++ printf STDERR "%s\n", join ' ', @ARG;
++ }
++ return 0;
++ }
++
++ set_logtag();
++
++ if ( defined $ENV{'HA_LOGD'} and $ENV{'HA_LOGD'} eq 'yes' ) {
++ system 'ha_logger', '-t', $ENV{'HA_LOGTAG'}, @ARG;
++ return 0 if ( $? >> 8 ) == 0;
++ }
++
++ unless ( $ENV{'HA_LOGFACILITY'} eq '' ) {
++ # logging through syslog
++ # loglevel is unknown, use 'notice' for now
++ $loglevel = 'notice';
++ for ( "@ARG" ) {
++ if ( /ERROR/ ) {
++ $loglevel = 'err';
++ }
++ elsif ( /WARN/ ) {
++ $loglevel = 'warning';
++ }
++ elsif (/INFO|info/ ) {
++ $loglevel = 'info';
++ }
++ }
++
++ system 'logger', '-t', $ENV{'HA_LOGTAG'}, '-p',
++ "$ENV{'HA_LOGFACILITY'}.$loglevel", @ARG;
++ }
++
++ if ( defined $ENV{'HA_LOGFILE'} and $ENV{'HA_LOGFILE'} ne '' ) {
++ # appending to $HA_LOGFILE
++ open my $logfile, '>>', $ENV{'HA_LOGFILE'};
++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(),
++ join (' ', @ARG);
++ close $logfile;
++ }
++
++ # appending to stderr
++ printf STDERR "%s %s\n", hadate(), join ' ', @ARG
++ if (not defined $ENV{'HA_LOGFACILITY'} or $ENV{'HA_LOGFACILITY'} eq '')
++ and (not defined $ENV{'HA_LOGFILE'} or $ENV{'HA_LOGFILE'} eq '' )
++ and not $ignore_stderr;
++
++ if ( defined $ENV{'HA_DEBUGLOG'} and $ENV{'HA_DEBUGLOG'} ne ''
++ and $ENV{'HA_LOGFILE'} ne $ENV{'HA_DEBUGLOG'}
++ ) {
++ # appending to $HA_DEBUGLOG
++ open my $logfile, '>>', $ENV{'HA_DEBUGLOG'};
++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(),
++ join (' ', @ARG);
++ close $logfile;
++ }
++}
++
++sub ha_log {
++ return __ha_log( @ARG );
++}
++
++sub ha_debug {
++
++ return 0 if defined $ENV{'HA_debug'} and $ENV{'HA_debug'} == 0;
++
++ if ( -t STDERR ) {
++ if ( defined $ENV{'HA_LOGTAG'} and $ENV{'HA_LOGTAG'} ne '' ) {
++ printf STDERR "%s: %s\n", $ENV{'HA_LOGTAG'}, join ' ', @ARG;
++ }
++ else {
++ printf STDERR "%s\n", join ' ', @ARG;
++ }
++
++ return 0;
++ }
++
++ set_logtag();
++
++ if ( defined $ENV{'HA_LOGD'} and $ENV{'HA_LOGD'} eq 'yes' ) {
++ system 'ha_logger', '-t', $ENV{'HA_LOGTAG'}, '-D', 'ha-debug', @ARG;
++ return 0 if ( $? >> 8 ) == 0;
++ }
++
++ $ENV{'HA_LOGFACILITY'} = '' if not defined $ENV{'HA_LOGFACILITY'}
++ or $ENV{'HA_LOGFACILITY'} eq 'none';
++
++ unless ( $ENV{'HA_LOGFACILITY'} eq '' ) {
++ # logging through syslog
++
++ system 'logger', '-t', $ENV{'HA_LOGTAG'}, '-p',
++ "$ENV{'HA_LOGFACILITY'}.debug", @ARG;
++ }
++
++ if ( defined $ENV{'HA_DEBUGLOG'} and -f $ENV{'HA_DEBUGLOG'} ) {
++ my $logfile;
++ # appending to $HA_DEBUGLOG
++ open $logfile, '>>', $ENV{'HA_DEBUGLOG'};
++ printf $logfile "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(),
++ join (' ', @ARG);
++ close $logfile;
++ }
++
++ # appending to stderr
++ printf STDERR "%s: %s %s\n", $ENV{'HA_LOGTAG'}, hadate(), join ' ', @ARG
++ if (not defined $ENV{'HA_LOGFACILITY'} or $ENV{'HA_LOGFACILITY'} eq '')
++ and (not defined $ENV{'HA_DEBUGLOG'} or $ENV{'HA_DEBUGLOG'} eq '' );
++}
++
++#
++# ocf_log: log messages from the resource agent
++# This function is slightly different from its equivalent in ocf-shellfuncs.in
++# as it behaves like printf.
++# Arguments:
++# * __OCF_PRIO: log level
++# * __OCF_MSG: printf-like format string
++# * all other arguments are values for the printf-like format string
++#
++sub ocf_log {
++ my $__OCF_PRIO;
++ my $__OCF_MSG;
++
++ # TODO: Revisit and implement internally.
++ if ( scalar @ARG < 2 ) {
++ ocf_log ( 'err', "Not enough arguments [%d] to ocf_log", scalar @ARG );
++ }
++
++ $__OCF_PRIO = shift;
++ $__OCF_MSG = shift;
++ $__OCF_MSG = sprintf $__OCF_MSG, @ARG;
++
++ for ( $__OCF_PRIO ) {
++ if ( /crit/ ) { $__OCF_PRIO = 'CRIT' }
++ elsif ( /err/ ) { $__OCF_PRIO = 'ERROR' }
++ elsif ( /warn/ ) { $__OCF_PRIO = 'WARNING' }
++ elsif ( /info/ ) { $__OCF_PRIO = 'INFO' }
++ elsif ( /debug/ ) { $__OCF_PRIO = 'DEBUG' }
++ else { $__OCF_PRIO =~ tr/[a-z]/[A-Z]/ }
++ }
++
++ if ( $__OCF_PRIO eq 'DEBUG' ) {
++ ha_debug( "$__OCF_PRIO: $__OCF_MSG");
++ }
++ else {
++ ha_log( "$__OCF_PRIO: $__OCF_MSG");
++ }
++}
++
++
++#
++# ocf_exit_reason: print exit error string to stderr and log
++# Usage: Allows the OCF script to provide a string
++# describing why the exit code was returned.
++# Arguments: reason - required, The string that represents
++# why the error occured.
++#
++sub ocf_exit_reason {
++ my $cookie = $ENV{'OCF_EXIT_REASON_PREFIX'} || 'ocf-exit-reason:';
++ my $fmt;
++ my $msg;
++
++ # No argument is likely not intentional.
++ # Just one argument implies a printf format string of just "%s".
++ # "Least surprise" in case some interpolated string from variable
++ # expansion or other contains a percent sign.
++ # More than one argument: first argument is going to be the format string.
++ ocf_log ( 'err', 'Not enough arguments [%d] to ocf_exit_reason',
++ scalar @ARG ) if scalar @ARG < 1;
++
++ $fmt = shift;
++ $msg = sprintf $fmt, @ARG;
++
++ print STDERR "$cookie$msg\n";
++ __ha_log( '--ignore-stderr', "ERROR: $msg" );
++}
++
++# returns true if the CRM is currently running a probe. A probe is
++# defined as a monitor operation with a monitoring interval of zero.
++sub ocf_is_probe {
++ return ( $__OCF_ACTION eq 'monitor'
++ and $ENV{'OCF_RESKEY_CRM_meta_interval'} == 0 );
++}
++
++# returns true if the resource is configured as a clone. This is
++# defined as a resource where the clone-max meta attribute is present,
++# and set to greater than zero.
++sub ocf_is_clone {
++ return ( defined $ENV{'OCF_RESKEY_CRM_meta_clone_max'}
++ and $ENV{'OCF_RESKEY_CRM_meta_clone_max'} > 0 );
++}
++
++# returns true if the resource is configured as a multistate
++# (master/slave) resource. This is defined as a resource where the
++# master-max meta attribute is present, and set to greater than zero.
++sub ocf_is_ms {
++ return ( defined $ENV{'OCF_RESKEY_CRM_meta_master_max'}
++ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} > 0 );
++}
++
++# version check functions
++# allow . and - to delimit version numbers
++# max version number is 999
++# letters and such are effectively ignored
++#
++sub ocf_is_ver {
++ return $ARG[0] =~ /^[0-9][0-9.-]*[0-9]$/;
++}
++
++sub ocf_ver2num {
++ my $v = 0;
++
++ $v = $v * 1000 + $1 while $ARG[0] =~ /(\d+)/g;
++
++ return $v;
++}
++
++sub ocf_ver_level {
++ my $v = () = $ARG[0] =~ /(\d+)/g;
++ return $v;
++}
++
++sub ocf_ver_complete_level {
++ my $ver = shift;
++ my $level = shift;
++ my $i = 0;
++
++ for ( my $i = 0; $i < $level; $i++ ) {
++ $ver .= "$ver.0";
++ }
++
++ return $ver;
++}
++
++# usage: ocf_version_cmp VER1 VER2
++# version strings can contain digits, dots, and dashes
++# must start and end with a digit
++# returns:
++# 0: VER1 smaller (older) than VER2
++# 1: versions equal
++# 2: VER1 greater (newer) than VER2
++# 3: bad format
++sub ocf_version_cmp {
++ my $v1 = shift;
++ my $v2 = shift;
++ my $v1_level;
++ my $v2_level;
++ my $level_diff;
++
++ return 3 unless ocf_is_ver( $v1 );
++ return 3 unless ocf_is_ver( $v2 );
++
++ $v1_level = ocf_ver_level( $v1 );
++ $v2_level = ocf_ver_level( $v2 );
++
++ if ( $v1_level < $v2_level ) {
++ $level_diff = $v2_level - $v1_level;
++ $v1 = ocf_ver_complete_level( $v1, $level_diff );
++ }
++ elsif ( $v1_level > $v2_level ) {
++ $level_diff = $v1_level - $v2_level;
++ $v2 = ocf_ver_complete_level( $v2, $level_diff );
++ }
++
++ $v1 = ocf_ver2num( $v1 );
++ $v2 = ocf_ver2num( $v2 );
++
++ if ( $v1 == $v2 ) { return 1; }
++ elsif ( $v1 < $v2 ) { return 0; }
++
++ return 2; # -1 would look funny in shell ;-) ( T.N. not in perl ;) )
++}
++
++sub ocf_local_nodename {
++ # use crm_node -n for pacemaker > 1.1.8
++ my $nodename;
++
++ qx{ which pacemakerd > /dev/null 2>&1 };
++ if ( $? == 0 ) {
++ my $version;
++ my $ret = qx{ pacemakerd -\$ };
++
++ $ret =~ /Pacemaker ([\d.]+)/;
++ $version = $1;
++
++ if ( ocf_version_cmp( $version, '1.1.8' ) == 2 ) {
++ qx{ which crm_node > /dev/null 2>&1 };
++ $nodename = qx{ crm_node -n } if $? == 0;
++ }
++ }
++ else {
++ # otherwise use uname -n
++ $nodename = qx { uname -n };
++ }
++
++ chomp $nodename;
++ return $nodename;
++}
++
++# Parse and returns the notify environment variables in a convenient structure
++# Returns undef if the action is not a notify
++# Returns undef if the resource is neither a clone or a multistate one
++sub ocf_notify_env {
++ my $i;
++ my %notify_env;
++
++ return undef unless $__OCF_ACTION eq 'notify';
++
++ return undef unless ocf_is_clone() or ocf_is_ms();
++
++ %notify_env = (
++ 'type' => $ENV{'OCF_RESKEY_CRM_meta_notify_type'} || '',
++ 'operation' => $ENV{'OCF_RESKEY_CRM_meta_notify_operation'} || '',
++ 'active' => [ ],
++ 'inactive' => [ ],
++ 'start' => [ ],
++ 'stop' => [ ],
++ );
++
++ for my $action ( qw{ active start stop } ) {
++ next unless
++ defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"}
++ and defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"};
++
++ $i = 0;
++ $notify_env{ $action }[$i++]{'rsc'} = $_ foreach split /\s+/ =>
++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"};
++
++ $i = 0;
++ $notify_env{ $action }[$i++]{'uname'} = $_ foreach split /\s+/ =>
++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"};
++ }
++
++ # notify_nactive_uname doesn't exists. See:
++ # http://lists.clusterlabs.org/pipermail/developers/2017-January/000406.html
++ if ( defined $ENV{"OCF_RESKEY_CRM_meta_notify_inactive_resource"} ) {
++ $i = 0;
++ $notify_env{'inactive'}[$i++]{'rsc'} = $_ foreach split /\s+/ =>
++ $ENV{"OCF_RESKEY_CRM_meta_notify_inactive_resource"};
++ }
++
++ # exit if the resource is not a mutistate one
++ return %notify_env unless ocf_is_ms();
++
++ for my $action ( qw{ master slave promote demote } ) {
++ $notify_env{ $action } = [ ];
++
++ next unless
++ defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"}
++ and defined $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"};
++
++ $i = 0;
++ $notify_env{ $action }[$i++]{'rsc'} = $_ foreach split /\s+/ =>
++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_resource"};
++
++ $i = 0;
++ $notify_env{ $action }[$i++]{'uname'} = $_ foreach split /\s+/ =>
++ $ENV{"OCF_RESKEY_CRM_meta_notify_${action}_uname"};
++ }
++
++ # Fix active and inactive fields for Pacemaker version < 1.1.16
++ # ie. crm_feature_set < 3.0.11
++ # See http://lists.clusterlabs.org/pipermail/developers/2016-August/000265.html
++ # and git commit a6713c5d40327eff8549e7f596501ab1785b8765
++ if (
++ ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.11' ) == 0
++ ) {
++ $notify_env{ 'active' } = [
++ @{ $notify_env{ 'master' } },
++ @{ $notify_env{ 'slave' } }
++ ];
++ }
++
++ return %notify_env;
++}
++
++$__OCF_ACTION = $ARGV[0];
++
++# Return to sanity for the agents...
++
++undef $ENV{'LC_ALL'};
++$ENV{'LC_ALL'} = 'C';
++setlocale( LC_ALL, 'C' );
++undef $ENV{'LANG'};
++undef $ENV{'LANGUAGE'};
++
++$ENV{'OCF_ROOT'} = '/usr/lib/ocf'
++ unless defined $ENV{'OCF_ROOT'} and $ENV{'OCF_ROOT'} ne '';
++
++# old
++undef $ENV{'OCF_FUNCTIONS_DIR'}
++ if defined $ENV{'OCF_FUNCTIONS_DIR'}
++ and $ENV{'OCF_FUNCTIONS_DIR'} eq "$ENV{'OCF_ROOT'}/resource.d/heartbeat";
++
++# Define OCF_RESKEY_CRM_meta_interval in case it isn't already set,
++# to make sure that ocf_is_probe() always works
++$ENV{'OCF_RESKEY_CRM_meta_interval'} = 0
++ unless defined $ENV{'OCF_RESKEY_CRM_meta_interval'};
++
++# Strip the OCF_RESKEY_ prefix from this particular parameter
++unless ( defined $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'}
++ and $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'} ne ''
++) {
++ $ENV{'OCF_CHECK_LEVEL'} = $ENV{'$OCF_RESKEY_OCF_CHECK_LEVEL'};
++}
++else {
++ ENV{'OCF_CHECK_LEVEL'} = 0;
++}
++
++unless ( -d $ENV{'OCF_ROOT'} ) {
++ ha_log( "ERROR: OCF_ROOT points to non-directory $ENV{'OCF_ROOT'}." );
++ $! = $OCF_ERR_GENERIC;
++ die;
++}
++
++$ENV{'OCF_RESOURCE_TYPE'} = $__SCRIPT_NAME
++ unless defined $ENV{'OCF_RESOURCE_TYPE'}
++ and $ENV{'OCF_RESOURCE_TYPE'} ne '';
++
++unless ( defined $ENV{'OCF_RA_VERSION_MAJOR'}
++ and $ENV{'OCF_RA_VERSION_MAJOR'} ne ''
++) {
++ # We are being invoked as an init script.
++ # Fill in some things with reasonable values.
++ $ENV{'OCF_RESOURCE_INSTANCE'} = 'default';
++ return 1;
++}
++
++$ENV{'OCF_RESOURCE_INSTANCE'} = "undef" if $__OCF_ACTION eq 'meta-data';
++
++unless ( defined $ENV{'OCF_RESOURCE_INSTANCE'}
++ and $ENV{'OCF_RESOURCE_INSTANCE'} ne ''
++) {
++ ha_log( "ERROR: Need to tell us our resource instance name." );
++ $! = $OCF_ERR_ARGS;
++ die;
++}
++
++1;
++
++
++=head1 COPYRIGHT AND LICENSE
++
++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault.
++
++Licensed under the PostgreSQL License.
+diff --color -uNr a/heartbeat/OCF_ReturnCodes.pm b/heartbeat/OCF_ReturnCodes.pm
+--- a/heartbeat/OCF_ReturnCodes.pm 1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/OCF_ReturnCodes.pm 2021-04-13 13:37:35.621267404 +0200
+@@ -0,0 +1,97 @@
++#!/usr/bin/perl
++# This program is open source, licensed under the PostgreSQL License.
++# For license terms, see the LICENSE file.
++#
++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault
++
++=head1 NAME
++
++OCF_ReturnCodes - Common varibales for the OCF Resource Agents supplied by
++heartbeat.
++
++=head1 SYNOPSIS
++
++ use FindBin;
++ use lib "$FindBin::RealBin/../../lib/heartbeat/";
++
++ use OCF_ReturnCodes;
++
++=head1 DESCRIPTION
++
++This module has been ported from the ocf-retrurncodes shell script of the
++resource-agents project. See L.
++
++=head1 VARIABLES
++
++Here are the variables exported by this module:
++
++=over
++
++=item $OCF_SUCCESS
++
++=item $OCF_ERR_GENERIC
++
++=item $OCF_ERR_ARGS
++
++=item $OCF_ERR_UNIMPLEMENTED
++
++=item $OCF_ERR_PERM
++
++=item $OCF_ERR_INSTALLED
++
++=item $OCF_ERR_CONFIGURED
++
++=item $OCF_NOT_RUNNING
++
++=item $OCF_RUNNING_MASTER
++
++=item $OCF_FAILED_MASTER
++
++=back
++
++=cut
++
++package OCF_ReturnCodes;
++
++use strict;
++use warnings;
++use 5.008;
++
++BEGIN {
++ use Exporter;
++
++ our $VERSION = 'v2.3.0';
++ our @ISA = ('Exporter');
++ our @EXPORT = qw(
++ $OCF_SUCCESS
++ $OCF_ERR_GENERIC
++ $OCF_ERR_ARGS
++ $OCF_ERR_UNIMPLEMENTED
++ $OCF_ERR_PERM
++ $OCF_ERR_INSTALLED
++ $OCF_ERR_CONFIGURED
++ $OCF_NOT_RUNNING
++ $OCF_RUNNING_MASTER
++ $OCF_FAILED_MASTER
++ );
++ our @EXPORT_OK = ( @EXPORT );
++}
++
++our $OCF_SUCCESS = 0;
++our $OCF_ERR_GENERIC = 1;
++our $OCF_ERR_ARGS = 2;
++our $OCF_ERR_UNIMPLEMENTED = 3;
++our $OCF_ERR_PERM = 4;
++our $OCF_ERR_INSTALLED = 5;
++our $OCF_ERR_CONFIGURED = 6;
++our $OCF_NOT_RUNNING = 7;
++our $OCF_RUNNING_MASTER = 8;
++our $OCF_FAILED_MASTER = 9;
++
++1;
++
++=head1 COPYRIGHT AND LICENSE
++
++Copyright (C) 2016: Jehan-Guillaume de Rorthais and Mael Rimbault.
++
++Licensed under the PostgreSQL License.
+diff --color -uNr a/heartbeat/pgsqlms b/heartbeat/pgsqlms
+--- a/heartbeat/pgsqlms 1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/pgsqlms 2021-04-13 13:37:40.934280411 +0200
+@@ -0,0 +1,2308 @@
++#!/usr/bin/perl
++# This program is open source, licensed under the PostgreSQL License.
++# For license terms, see the LICENSE file.
++#
++# Copyright (C) 2016-2020: Jehan-Guillaume de Rorthais and Mael Rimbault
++
++=head1 NAME
++
++ocf_heartbeat_pgsqlms - A PostgreSQL multi-state resource agent for Pacemaker
++
++=head1 SYNOPSIS
++
++B [start | stop | monitor | promote | demote | notify | reload | methods | meta-data | validate-all]
++
++=head1 DESCRIPTION
++
++Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource.
++
++=cut
++
++use strict;
++use warnings;
++use 5.008;
++
++use POSIX qw(locale_h);
++use Scalar::Util qw(looks_like_number);
++use File::Spec;
++use File::Temp;
++use Data::Dumper;
++
++my $OCF_FUNCTIONS_DIR;
++BEGIN {
++ $OCF_FUNCTIONS_DIR = defined $ENV{'OCF_FUNCTIONS_DIR'} ? "$ENV{'OCF_FUNCTIONS_DIR'}" : "$ENV{'OCF_ROOT'}/lib/heartbeat";
++}
++use lib "$OCF_FUNCTIONS_DIR";
++
++use OCF_ReturnCodes;
++use OCF_Directories;
++use OCF_Functions;
++
++our $VERSION = 'v2.3.0';
++our $PROGRAM = 'pgsqlms';
++
++# OCF environment
++my $OCF_RESOURCE_INSTANCE = $ENV{'OCF_RESOURCE_INSTANCE'};
++my $OCF_RUNNING_SLAVE = $OCF_SUCCESS;
++my %OCF_NOTIFY_ENV = ocf_notify_env() if $__OCF_ACTION eq 'notify';
++
++# Default parameters values
++my $system_user_default = "postgres";
++my $bindir_default = "/usr/bin";
++my $pgdata_default = "/var/lib/pgsql/data";
++my $pghost_default = "/tmp";
++my $pgport_default = 5432;
++my $start_opts_default = "";
++my $maxlag_default = "0";
++
++# Set default values if not found in environment
++my $system_user = $ENV{'OCF_RESKEY_system_user'} || $system_user_default;
++my $bindir = $ENV{'OCF_RESKEY_bindir'} || $bindir_default;
++my $pgdata = $ENV{'OCF_RESKEY_pgdata'} || $pgdata_default;
++my $datadir = $ENV{'OCF_RESKEY_datadir'} || $pgdata;
++my $pghost = $ENV{'OCF_RESKEY_pghost'} || $pghost_default;
++my $pgport = $ENV{'OCF_RESKEY_pgport'} || $pgport_default;
++my $start_opts = $ENV{'OCF_RESKEY_start_opts'} || $start_opts_default;
++my $maxlag = $ENV{'OCF_RESKEY_maxlag'} || $maxlag_default;
++my $recovery_tpl = $ENV{'OCF_RESKEY_recovery_template'}
++ || "$pgdata/recovery.conf.pcmk";
++
++
++# PostgreSQL commands path
++my $POSTGRES = "$bindir/postgres";
++my $PGCTL = "$bindir/pg_ctl";
++my $PGPSQL = "$bindir/psql";
++my $PGCTRLDATA = "$bindir/pg_controldata";
++my $PGISREADY = "$bindir/pg_isready";
++my $PGWALDUMP = "$bindir/pg_waldump";
++
++# pacemaker commands path
++my $CRM_MASTER = "$HA_SBIN_DIR/crm_master --lifetime forever";
++my $CRM_NODE = "$HA_SBIN_DIR/crm_node";
++my $CRM_RESOURCE = "$HA_SBIN_DIR/crm_resource";
++my $ATTRD_PRIV = "$HA_SBIN_DIR/attrd_updater --private --lifetime reboot";
++
++# Global vars
++my $nodename;
++my $exit_code = 0;
++# numeric pgsql versions
++my $PGVERNUM;
++my $PGVER_93 = 90300;
++my $PGVER_10 = 100000;
++my $PGVER_12 = 120000;
++
++# Run a query using psql.
++#
++# This function returns an array with psql return code as first element and
++# the result as second one.
++#
++sub _query {
++ my $query = shift;
++ my $res = shift;
++ my $connstr = "dbname=postgres";
++ my $RS = chr(30); # ASCII RS (record separator)
++ my $FS = chr(3); # ASCII ETX (end of text)
++ my $postgres_uid = getpwnam( $system_user );
++ my $oldeuid = $>;
++ my $tmpfile;
++ my @res;
++ my $ans;
++ my $pid;
++ my $rc;
++
++ unless ( defined $res and defined $query and $query ne '' ) {
++ ocf_log( 'debug', '_query: wrong parameters!' );
++ return -1;
++ }
++
++ unless ( $tmpfile = File::Temp->new(
++ TEMPLATE => 'pgsqlms-XXXXXXXX',
++ DIR => $HA_RSCTMP
++ ) )
++ {
++ ocf_exit_reason( 'Could not create or write in a temp file' );
++ exit $OCF_ERR_INSTALLED;
++ }
++
++ print $tmpfile $query;
++ chmod 0644, $tmpfile;
++
++ ocf_log( 'debug', '_query: %s', $query );
++
++ # Change the effective user to the given system_user so after forking
++ # the given uid to the process should allow psql to connect w/o password
++ $> = $postgres_uid;
++
++ # Forking + piping
++ $pid = open(my $KID, "-|");
++
++ if ( $pid == 0 ) { # child
++ exec $PGPSQL, '--set', 'ON_ERROR_STOP=1', '-qXAtf', $tmpfile,
++ '-R', $RS, '-F', $FS, '--port', $pgport, '--host', $pghost,
++ $connstr;
++ }
++
++ # parent
++ $> = $oldeuid;
++
++ {
++ local $/;
++ $ans = <$KID>;
++ }
++
++ close $KID;
++ $rc = $? >> 8;
++
++ ocf_log( 'debug', '_query: psql return code: %d', $rc );
++
++ if ( defined $ans ) {
++ chop $ans;
++
++ push @{ $res }, [ split(chr(3) => $_, -1) ]
++ foreach split (chr(30) => $ans, -1);
++
++ ocf_log( 'debug', '_query: @res: %s',
++ Data::Dumper->new( [ $res ] )->Terse(1)->Dump );
++ }
++
++ # Possible return codes:
++ # -1: wrong parameters
++ # 0: OK
++ # 1: failed to get resources (memory, missing file, ...)
++ # 2: unable to connect
++ # 3: query failed
++ return $rc;
++}
++
++# Get the last received location on a standby
++# if the first argument is true, returns the value as decimal
++# if the first argument is false, returns the value as LSN
++# Returns undef if query failed
++sub _get_last_received_lsn {
++ my ( $dec ) = @_;
++ my $pg_last_wal_receive_lsn = 'pg_last_wal_receive_lsn()';
++ my $pg_wal_lsn_diff = 'pg_wal_lsn_diff';
++ my $query;
++ my $rc;
++ my @rs;
++
++ if ( $PGVERNUM < $PGVER_10 ) {
++ $pg_last_wal_receive_lsn = 'pg_last_xlog_receive_location()';
++ $pg_wal_lsn_diff = 'pg_xlog_location_diff';
++ }
++
++ if ( $dec ) {
++ $query = "SELECT $pg_wal_lsn_diff( $pg_last_wal_receive_lsn, '0/0' )";
++ }
++ else {
++ $query = "SELECT $pg_last_wal_receive_lsn";
++ }
++
++ $rc = _query( $query, \@rs );
++
++ return $rs[0][0] if $rc == 0 and $rs[0][0];
++
++ ocf_log( 'err', 'Could not query last received LSN (%s)', $rc ) if $rc != 0;
++ ocf_log( 'err', 'No values for last received LSN' )
++ if $rc == 0 and not $rs[0][0];
++
++ return undef;
++}
++
++# Get the master score for each connected standby
++# Returns directly the result set of the query or exit with an error.
++# Exits with OCF_ERR_GENERIC if the query failed
++sub _get_lag_scores {
++ my $pg_current_wal_lsn = 'pg_current_wal_lsn()';
++ my $pg_wal_lsn_diff = 'pg_wal_lsn_diff';
++ my $write_lsn = 'write_lsn';
++ my $query;
++ my $rc;
++ my @rs;
++
++ if ( $PGVERNUM < $PGVER_10 ) {
++ $pg_current_wal_lsn = 'pg_current_xlog_location()';
++ $pg_wal_lsn_diff = 'pg_xlog_location_diff';
++ $write_lsn = 'write_location';
++ }
++
++ # We check locations of connected standbies by querying the
++ # "pg_stat_replication" view.
++ # The row_number applies on the result set ordered on write_location ASC so
++ # the highest row_number should be given to the closest node from the
++ # master, then the lowest node name (alphanumeric sort) in case of equality.
++ # The result set itself is order by priority DESC to process best known
++ # candidate first.
++ $query = qq{
++ SELECT application_name, priority, location, state, current_lag
++ FROM (
++ SELECT application_name,
++ (1000 - (
++ row_number() OVER (
++ PARTITION BY state IN ('startup', 'backup')
++ ORDER BY location ASC, application_name ASC
++ ) - 1
++ ) * 10
++ ) * CASE WHEN ( $maxlag > 0
++ AND current_lag > $maxlag)
++ THEN -1
++ ELSE 1
++ END AS priority,
++ location, state, current_lag
++ FROM (
++ SELECT application_name, $write_lsn AS location, state,
++ $pg_wal_lsn_diff($pg_current_wal_lsn, $write_lsn) AS current_lag
++ FROM pg_stat_replication
++ ) AS s2
++ ) AS s1
++ ORDER BY priority DESC
++ };
++
++ $rc = _query( $query, \@rs );
++
++ if ( $rc != 0 ) {
++ ocf_exit_reason( 'Query to get standby locations failed (%d)', $rc );
++ exit $OCF_ERR_GENERIC;
++ }
++
++ return \@rs;
++}
++
++# get the timeout for the current action given from environment var
++# Returns timeout as integer
++# undef if unknown
++sub _get_action_timeout {
++ my $timeout = $ENV{'OCF_RESKEY_CRM_meta_timeout'} / 1000;
++
++ ocf_log( 'debug', '_get_action_timeout: known timeout: %s',
++ defined $timeout ? $timeout : 'undef' );
++
++ return $timeout if defined $timeout and $timeout =~ /^\d+$/;
++
++ return undef;
++}
++
++# Get, parse and return the value of the given private attribute name
++# Returns an empty string if not found.
++sub _get_priv_attr {
++ my ( $name, $node ) = @_;
++ my $val = '';
++ my $node_arg = '';
++ my $ans;
++
++ $node = '' unless defined $node;
++ $name = "$name-$OCF_RESOURCE_INSTANCE";
++
++ $node_arg= "--node $node" if $node ne '';
++
++ $ans = qx{ $ATTRD_PRIV --name "$name" --query $node_arg };
++
++ $ans =~ m/^name=".*" host=".*" value="(.*)"$/;
++
++ $val = $1 if defined $1;
++
++ ocf_log( 'debug', '_get_priv_attr: value of "%s"%s is "%s"', $name,
++ ( $node ? " on \"$node\"": ""),
++ $val );
++
++ return $val;
++}
++
++# Set the given private attribute name to the given value
++# As setting an attribute is asynchronous, this will return as soon as the
++# attribute is really set by attrd and available.
++sub _set_priv_attr {
++ my ( $name, $val ) = @_;
++ my $name_instance = "$name-$OCF_RESOURCE_INSTANCE";
++
++ ocf_log( 'debug', '_set_priv_attr: set "%s=%s"...', $name_instance, $val );
++
++ qx{ $ATTRD_PRIV --name "$name_instance" --update "$val" };
++
++ # give attr name without the resource instance name as _get_priv_attr adds
++ # it as well
++ while ( _get_priv_attr( $name ) ne $val ) {
++ ocf_log( 'debug', '_set_priv_attr: waiting attrd ack for "%s"...', $name_instance );
++ select( undef, undef, undef, 0.1 );
++ }
++
++ return;
++}
++
++# Delete the given private attribute.
++# As setting an attribute is asynchronous, this will return as soon as the
++# attribute is really deleted by attrd.
++sub _delete_priv_attr {
++ my ( $name ) = @_;
++ my $name_instance = "$name-$OCF_RESOURCE_INSTANCE";
++
++ ocf_log( 'debug', '_delete_priv_attr: delete "%s"...', $name_instance );
++
++ qx{ $ATTRD_PRIV --name "$name_instance" --delete };
++
++ # give attr name without the resource instance name as _get_priv_attr adds
++ # it as well
++ while ( _get_priv_attr( $name ) ne '' ) {
++ ocf_log( 'debug', '_delete_priv_attr: waiting attrd ack for "%s"...',
++ $name_instance );
++ select( undef, undef, undef, 0.1 );
++ }
++
++ return;
++}
++
++# Get, parse and return the resource master score on given node.
++# Returns an empty string if not found.
++# Returns undef on crm_master call on error
++sub _get_master_score {
++ my ( $node ) = @_;
++ my $node_arg = '';
++ my $score;
++
++ $node_arg = sprintf '--node "%s"', $node if defined $node and $node ne '';
++
++ $score = qx{ $CRM_MASTER --quiet --get-value $node_arg 2> /dev/null };
++
++ return '' unless $? == 0 and defined $score;
++
++ chomp $score;
++
++ return $score;
++}
++
++# Set the master score of the local node or the optionally given node.
++# As setting an attribute is asynchronous, this will return as soon as the
++# attribute is really set by attrd and available everywhere.
++sub _set_master_score {
++ my ( $score, $node ) = @_;
++ my $node_arg = '';
++ my $tmp;
++
++ $node_arg = sprintf '--node "%s"', $node if defined $node and $node ne '';
++
++ qx{ $CRM_MASTER $node_arg --quiet --update "$score" };
++
++ while ( ( $tmp = _get_master_score( $node ) ) ne $score ) {
++ ocf_log( 'debug',
++ '_set_master_score: waiting to set score to "%s" (currently "%s")...',
++ $score, $tmp );
++ select(undef, undef, undef, 0.1);
++ }
++
++ return;
++}
++
++# _master_score_exists
++# This subroutine checks if a master score is set for one of the relative clones
++# in the cluster and the score is greater or equal of 0.
++# Returns 1 if at least one master score >= 0 is found.
++# Returns 0 otherwise
++sub _master_score_exists {
++ my @partition_nodes = split /\s+/ => qx{ $CRM_NODE --partition };
++
++ foreach my $node ( @partition_nodes ) {
++ my $score = _get_master_score( $node );
++
++ return 1 if defined $score and $score ne '' and $score > -1;
++ }
++
++ return 0;
++}
++
++# Check if the current transiation is a recover of a master clone on given node.
++sub _is_master_recover {
++ my ( $n ) = @_;
++
++ return (
++ scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'master'} }
++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'promote'} }
++ );
++}
++
++# Check if the current transition is a recover of a slave clone on given node.
++sub _is_slave_recover {
++ my ( $n ) = @_;
++
++ return (
++ scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'slave'} }
++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'start'} }
++ );
++}
++
++# check if th current transition is a switchover to the given node.
++sub _is_switchover {
++ my ( $n ) = @_;
++ my $old = $OCF_NOTIFY_ENV{'master'}[0]{'uname'};
++
++ return 0 if scalar @{ $OCF_NOTIFY_ENV{'master'} } != 1
++ or scalar @{ $OCF_NOTIFY_ENV{'demote'} } != 1
++ or scalar @{ $OCF_NOTIFY_ENV{'promote'} } != 1;
++
++ return (
++ scalar grep { $_->{'uname'} eq $old } @{ $OCF_NOTIFY_ENV{'demote'} }
++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'slave'} }
++ and scalar grep { $_->{'uname'} eq $n } @{ $OCF_NOTIFY_ENV{'promote'} }
++ and not scalar grep { $_->{'uname'} eq $old } @{ $OCF_NOTIFY_ENV{'stop'} }
++ );
++}
++
++# Run the given command as the "system_user" given as parameter.
++# It basically forks and seteuid/setuid away from root.
++#
++sub _runas {
++ my $rc;
++ my $pid;
++ my @cmd = @_;
++ my (undef, undef, $postgres_uid, $postgres_gid ) = getpwnam( $system_user );
++
++ $pid = fork;
++
++ if ( $pid == 0 ) { # in child
++ $) = "$postgres_gid $postgres_gid";
++ while ( my ( undef, undef, $gid, $members ) = getgrent ) {
++ $) .= " $gid" if grep { $system_user eq $_ } split /\s+/, $members
++ }
++ $( = $postgres_gid;
++
++ $< = $> = $postgres_uid;
++
++ exec @cmd;
++ }
++
++ ocf_log( 'debug', '_runas: launching as "%s" command "%s"', $system_user,
++ join(' ', @cmd) );
++
++ waitpid $pid, 0;
++ $rc = $? >> 8;
++
++ return $rc;
++}
++
++# Check if instance is listening on the given host/port.
++#
++sub _pg_isready {
++ # Add 60s to the timeout or use a 24h timeout fallback to make sure
++ # Pacemaker will give up before us and take decisions
++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60;
++ my $rc = _runas( $PGISREADY, '-h', $pghost, '-p', $pgport, '-d', 'postgres', '-t', $timeout );
++
++ # Possible error codes:
++ # 1: ping rejected (usually when instance is in startup, in crash
++ # recovery, in warm standby, or when a shutdown is in progress)
++ # 2: no response, usually means the instance is down
++ # 3: no attempt, probably a syntax error, should not happen
++ return $rc;
++}
++
++# Check the postmaster.pid file and the postmaster process.
++# WARNING: we do not distinguish the scenario where postmaster.pid does not
++# exist from the scenario where the process is still alive. It should be ok
++# though, as this is considered a hard error from monitor.
++#
++sub _pg_ctl_status {
++ my $rc = _runas( $PGCTL, '--pgdata', $pgdata, 'status' );
++
++ # Possible error codes:
++ # 3: postmaster.pid file does not exist OR it does but the process
++ # with the PID found in the file is not alive
++ return $rc;
++}
++
++# Start the local instance using pg_ctl
++#
++sub _pg_ctl_start {
++ # Add 60s to the timeout or use a 24h timeout fallback to make sure
++ # Pacemaker will give up before us and take decisions
++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60;
++
++ my @cmd = ( $PGCTL, '--pgdata', $pgdata, '-w', '--timeout', $timeout, 'start' );
++
++ push @cmd => ( '-o', $start_opts ) if $start_opts ne '';
++
++ return _runas( @cmd );
++}
++
++# Enable the Standby mode.
++#
++# Up to v11, creates the recovery.conf file based on the given template.
++# Since v12, creates standby.signal.
++sub _enable_recovery {
++ my $fh;
++ my $content = '';
++ my $standby_file = "$datadir/standby.signal";
++ my (undef, undef, $uid, $gid) = getpwnam($system_user);
++
++ if ( $PGVERNUM < $PGVER_12 ) {
++ $standby_file = "$datadir/recovery.conf";
++
++ ocf_log( 'debug',
++ '_enable_recovery: get replication configuration from the template file "%s"',
++ $recovery_tpl );
++
++ # Create the recovery.conf file to start the instance as a secondary.
++ # NOTE: the recovery.conf is supposed to be set up so the secondary can
++ # connect to the primary instance, eg. using a virtual IP address.
++ # As there is no primary instance available at startup, secondaries will
++ # complain about failing to connect.
++ # As we can not reload a recovery.conf file on a standby without restarting
++ # it, we will leave with this.
++ # FIXME how would the reload help us in this case ?
++ unless ( defined open( $fh, '<', $recovery_tpl ) ) {
++ ocf_exit_reason( 'Could not open file "%s": %s', $recovery_tpl, $! );
++ exit $OCF_ERR_CONFIGURED;
++ }
++
++ # Copy all parameters from the template file
++ while (my $line = <$fh>) {
++ chomp $line;
++ $content .= "$line\n";
++ }
++ close $fh;
++ }
++
++ ocf_log( 'debug', '_enable_recovery: write the standby file "%s"', $standby_file );
++
++ unless ( open( $fh, '>', $standby_file ) ) {
++ ocf_exit_reason( 'Could not open file "%s": %s', $standby_file, $! );
++ exit $OCF_ERR_CONFIGURED;
++ }
++
++ # Write the recovery.conf file using configuration from the template file
++ print $fh $content;
++
++ close $fh;
++
++ unless ( chown $uid, $gid, $standby_file ) {
++ ocf_exit_reason( 'Could not set owner of "%s"', $standby_file );
++ exit $OCF_ERR_CONFIGURED;
++ };
++}
++
++# Parse and return various informations about the local PostgreSQL instance as
++# reported by its controldata file.
++#
++# WARNING: the status is NOT updated in case of crash.
++#
++# This sub exit the script with an error on failure
++sub _get_controldata {
++ my %controldata;
++ my $ans;
++
++ $ans = qx{ $PGCTRLDATA "$datadir" 2>/dev/null };
++
++ # Parse the output of pg_controldata.
++ # This output is quite stable between pg versions, but we might need to sort
++ # it at some point if things are moving in there...
++ $ans =~ m{
++ # get the current state
++ ^\QDatabase cluster state\E:\s+(.*?)\s*$
++ .*
++ # Get the latest known REDO location
++ ^\QLatest checkpoint's REDO location\E:\s+([/0-9A-F]+)\s*$
++ .*
++ # Get the latest known TL
++ ^\QLatest checkpoint's TimeLineID\E:\s+(\d+)\s*$
++ .*
++ # Get the wal level
++ # NOTE: pg_controldata output changed with PostgreSQL 9.5, so we need to
++ # account for both syntaxes
++ ^(?:\QCurrent \E)?\Qwal_level setting\E:\s+(.*?)\s*$
++ }smx;
++
++ $controldata{'state'} = $1 if defined $1;
++ $controldata{'redo'} = $2 if defined $2;
++ $controldata{'tl'} = $3 if defined $3;
++ $controldata{'wal_level'} = $4 if defined $4;
++
++ ocf_log( 'debug',
++ "_get_controldata: found: %s",
++ Data::Dumper->new( [ \%controldata ] )->Terse(1)->Dump );
++
++ return %controldata if defined $controldata{'state'}
++ and defined $controldata{'tl'}
++ and defined $controldata{'redo'}
++ and defined $controldata{'wal_level'};
++
++ ocf_exit_reason( 'Could not read all datas from controldata file for "%s"',
++ $datadir );
++
++ ocf_log( 'debug',
++ "_get_controldata: controldata file: %s",
++ Data::Dumper->new( [ \%controldata ] )->Terse(1)->Dump, $ans );
++
++ exit $OCF_ERR_ARGS;
++}
++
++# Pead major version from datadir/PG_VERSION and return it as numeric version
++sub _get_pg_version {
++ my $fh;
++ my $PGVERSION;
++ my $PGVERNUM;
++
++ # check PG_VERSION
++ if ( ! -s "$datadir/PG_VERSION" ) {
++ ocf_exit_reason( 'PG_VERSION does not exist in "%s"', $datadir );
++ exit $OCF_ERR_ARGS;
++ }
++
++ unless ( open( $fh, '<', "$datadir/PG_VERSION" ) ) {
++ ocf_exit_reason( "Could not open file \"$datadir/PG_VERSION\": $!" );
++ exit $OCF_ERR_ARGS;
++ }
++
++ read( $fh, $PGVERSION, 32 );
++ close $fh;
++
++ chomp $PGVERSION;
++
++ $PGVERSION =~ /^(\d+)(?:\.(\d+))?$/;
++ $PGVERNUM = $1 * 10000;
++ $PGVERNUM += $2 * 100 if $1 < 10; # no 2nd num in the major version from v10
++
++ return $PGVERNUM;
++}
++
++# Use pg_controldata to check the state of the PostgreSQL server. This
++# function returns codes depending on this state, so we can find whether the
++# instance is a primary or a secondary, or use it to detect any inconsistency
++# that could indicate the instance has crashed.
++#
++sub _controldata_to_ocf {
++ my %cdata = _get_controldata();
++
++ while ( 1 ) {
++ ocf_log( 'debug', '_controldata: instance "%s" state is "%s"',
++ $OCF_RESOURCE_INSTANCE, $cdata{'state'} );
++
++ # Instance should be running as a primary.
++ return $OCF_RUNNING_MASTER if $cdata{'state'} eq "in production";
++
++ # Instance should be running as a secondary.
++ # This state includes warm standby (rejects connections attempts,
++ # including pg_isready)
++ return $OCF_SUCCESS if $cdata{'state'} eq "in archive recovery";
++
++
++ # The instance should be stopped.
++ # We don't care if it was a primary or secondary before, because we
++ # always start instances as secondaries, and then promote if necessary.
++ return $OCF_NOT_RUNNING if $cdata{'state'} eq "shut down"
++ or $cdata{'state'} eq "shut down in recovery";
++
++ # The state is "in crash recovery", "starting up" or "shutting down".
++ # This state should be transitional, so we wait and loop to check if
++ # it changes.
++ # If it does not, pacemaker will eventually abort with a timeout.
++ ocf_log( 'debug',
++ '_controldata: waiting for transitionnal state "%s" to finish',
++ $cdata{'state'} );
++ sleep 1;
++ %cdata = _get_controldata();
++ }
++
++ # If we reach this point, something went really wrong with this code or
++ # pg_controldata.
++ ocf_exit_reason( 'Unable get instance "%s" state using pg_controldata',
++ $OCF_RESOURCE_INSTANCE );
++
++ return $OCF_ERR_INSTALLED ;
++}
++
++# Check the write_location of all secondaries, and adapt their master score so
++# that the instance closest to the master will be the selected candidate should
++# a promotion be triggered.
++# NOTE: This is only a hint to pacemaker! The selected candidate to promotion
++# actually re-check it is the best candidate and force a re-election by failing
++# if a better one exists. This avoid a race condition between the call of the
++# monitor action and the promotion where another slave might have catchup faster
++# with the master.
++# NOTE: we cannot directly use the write_location, neither a lsn_diff value as
++# promotion score as Pacemaker considers any value greater than 1,000,000 as
++# INFINITY.
++#
++# This sub must be executed from a master monitor action.
++#
++sub _check_locations {
++ my $partition_nodes;
++ my $node_score;
++ my $row_num;
++ my $row;
++ my @rs;
++
++ # Set the master score if not already done
++ $node_score = _get_master_score();
++ _set_master_score( '1001' ) unless $node_score eq '1001';
++
++ # Ask crm_node what nodes are present in our current cluster partition
++ $partition_nodes = qx{ $CRM_NODE --partition };
++
++ @rs = @{ _get_lag_scores() };
++
++ $row_num = scalar @rs;
++
++ # If no lag are reported at this point, it means that there is no
++ # secondary instance connected.
++ ocf_log( 'warning', 'No secondary connected to the master' )
++ if $row_num == 0;
++
++ # For each standby connected, set their master score based on the following
++ # rule: the first known node/application, with the highest priority and
++ # an acceptable state.
++ while ( $row = shift @rs ) {
++
++ if ( $partition_nodes !~ /$row->[0]/ ) {
++ ocf_log( 'info', 'Ignoring unknown application_name/node "%s"',
++ $row->[0] );
++ next;
++ }
++
++ if ( $row->[0] eq $nodename ) {
++ ocf_log( 'warning', 'Streaming replication with myself!' );
++ next;
++ }
++
++ $node_score = _get_master_score( $row->[0] );
++
++ if ( $row->[3] =~ /^\s*(?:startup|backup)\s*$/ ) {
++ # We exclude any standby being in state backup (pg_basebackup) or
++ # startup (new standby or failing standby)
++ ocf_log( 'info', 'Forbidding promotion on "%s" in state "%s"',
++ $row->[0], $row->[3] );
++
++ _set_master_score( '-1', $row->[0] ) unless $node_score eq '-1';
++ }
++ else {
++ ocf_log( 'debug',
++ '_check_locations: checking "%s" promotion ability (current_score: %s, priority: %s, location: %s, lag: %s)',
++ $row->[0], $node_score, $row->[1], $row->[2], $row->[4] );
++
++ if ( $node_score ne $row->[1] ) {
++ if ( $row->[1] < -1 ) {
++ ocf_log( 'info', 'Update score of "%s" from %s to %s because replication lag (%s) is higher than given maxlag (%s).',
++ $row->[0], $node_score, $row->[1], $row->[4], $maxlag );
++ }
++ else {
++ ocf_log( 'info', 'Update score of "%s" from %s to %s because of a change in the replication lag (%s).',
++ $row->[0], $node_score, $row->[1], $row->[4] );
++ }
++ _set_master_score( $row->[1], $row->[0] );
++ }
++ else {
++ ocf_log( 'debug',
++ '_check_locations: "%s" keeps its current score of %s',
++ $row->[0], $row->[1] );
++ }
++ }
++
++ # Remove this node from the known nodes list.
++ $partition_nodes =~ s/(?:^|\s)$row->[0](?:\s|$)/ /g;
++ }
++
++ $partition_nodes =~ s/(?:^\s+)|(?:\s+$)//g;
++
++ # If there are still nodes in "partition_nodes", it means there is no
++ # corresponding line in "pg_stat_replication".
++ # Exclude these nodes that are not part of the cluster at this
++ # point.
++ foreach my $node (split /\s+/ => $partition_nodes) {
++ # Exclude the current node.
++ next if $node eq $nodename;
++
++ # do not warn if the master score is already set to -1000.
++ # this avoid log flooding (gh #138)
++ $node_score = _get_master_score( $node );
++ next if $node_score eq '-1000';
++
++ ocf_log( 'warning', '"%s" is not connected to the primary', $node );
++ _set_master_score( '-1000', $node );
++ }
++
++ return $OCF_SUCCESS;
++}
++
++# _check_switchover
++# check if the pgsql switchover to the localnode is safe.
++# This is supposed to be called **after** the master has been stopped or demoted.
++# This sub checks if the local standby received the shutdown checkpoint from the
++# old master to make sure it can take over the master role and the old master
++# will be able to catchup as a standby after.
++#
++# Returns 0 if switchover is safe
++# Returns 1 if swithcover is not safe
++# Returns 2 for internal error
++sub _check_switchover {
++ my $has_sht_chk = 0;
++ my $last_redo;
++ my $last_lsn;
++ my $ans;
++ my $rc;
++ my $tl;
++ my %cdata;
++
++ $PGWALDUMP = "$bindir/pg_xlogdump" if $PGVERNUM < $PGVER_10;
++
++ ocf_log( 'info', 'Switchover in progress from "%s" to "%s".'
++ .' Need to check the last record in WAL',
++ $OCF_NOTIFY_ENV{'demote'}[0]{'uname'}, $nodename );
++
++ # check if we received the shutdown checkpoint of the master during its
++ # demote process.
++ # We need the last local checkpoint LSN and the last received LSN from
++ # master to check in the WAL between these adresses if we have a
++ # "checkpoint shutdown" using pg_xlogdump/pg_waldump.
++ #
++ # Force a checkpoint to make sure the controldata shows the very last TL
++ # and the master's shutdown checkpoint
++ _query( q{ CHECKPOINT }, {} );
++ %cdata = _get_controldata();
++ $tl = $cdata{'tl'};
++ $last_redo = $cdata{'redo'};
++
++ # Get the last received LSN from master
++ $last_lsn = _get_last_received_lsn();
++
++ unless ( defined $last_lsn ) {
++ ocf_exit_reason( 'Could not fetch last received LSN!' );
++
++ return 2;
++ }
++
++ $ans = qx{ $PGWALDUMP --path "$datadir" --timeline "$tl" \\
++ --start "$last_redo" --end "$last_lsn" 2>&1 };
++ $rc = $?;
++
++ ocf_log( 'debug',
++ '_check_switchover: %s rc: "%s", tl: "%s", last_chk: %s, last_lsn: %s, output: "%s"',
++ $PGWALDUMP, $rc, $tl, $last_redo, $last_lsn, $ans
++ );
++
++ if ( $rc == 0 and
++ $ans =~ m{^rmgr: XLOG.*desc: (?i:checkpoint)(?::|_SHUTDOWN) redo [0-9A-F/]+; tli $tl;.*; shutdown$}m
++ ) {
++ ocf_log( 'info', 'Slave received the shutdown checkpoint' );
++ return 0;
++ }
++
++ ocf_exit_reason(
++ 'Did not receive the shutdown checkpoint from the old master!' );
++
++ return 1;
++}
++
++# Check to confirm if the instance is really started as _pg_isready stated and
++# check if the instance is primary or secondary.
++#
++sub _confirm_role {
++ my $is_in_recovery;
++ my $rc;
++ my @rs;
++
++ $rc = _query( "SELECT pg_is_in_recovery()", \@rs );
++
++ $is_in_recovery = $rs[0][0];
++
++ if ( $rc == 0 ) {
++ # The query was executed, check the result.
++ if ( $is_in_recovery eq 't' ) {
++ # The instance is a secondary.
++ ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a secondary");
++ return $OCF_SUCCESS;
++ }
++ elsif ( $is_in_recovery eq 'f' ) {
++ # The instance is a primary.
++ ocf_log( 'debug', "_confirm_role: instance $OCF_RESOURCE_INSTANCE is a primary");
++ # Check lsn diff with current slaves if any
++ _check_locations() if $__OCF_ACTION eq 'monitor';
++ return $OCF_RUNNING_MASTER;
++ }
++
++ # This should not happen, raise a hard configuration error.
++ ocf_exit_reason(
++ 'Unexpected result from query to check if "%s" is a primary or a secondary: "%s"',
++ $OCF_RESOURCE_INSTANCE, $is_in_recovery );
++
++ return $OCF_ERR_CONFIGURED;
++ }
++ elsif ( $rc == 1 or $rc == 2 ) {
++ # psql cound not connect to the instance.
++ # As pg_isready reported the instance was listening, this error
++ # could be a max_connection saturation. Just report a soft error.
++ ocf_exit_reason( 'psql could not connect to instance "%s"',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_ERR_GENERIC;
++ }
++
++ # The query failed (rc: 3) or bad parameters (rc: -1).
++ # This should not happen, raise a hard configuration error.
++ ocf_exit_reason(
++ 'The query to check if instance "%s" is a primary or a secondary failed (rc: %d)',
++ $OCF_RESOURCE_INSTANCE, $rc );
++
++ return $OCF_ERR_CONFIGURED;
++}
++
++
++# Check to confirm if the instance is really stopped as _pg_isready stated
++# and if it was propertly shut down.
++#
++sub _confirm_stopped {
++ my $pgctlstatus_rc;
++ my $controldata_rc;
++
++ # Check the postmaster process status.
++ $pgctlstatus_rc = _pg_ctl_status();
++
++ if ( $pgctlstatus_rc == 0 ) {
++ # The PID file exists and the process is available.
++ # That should not be the case, return an error.
++ ocf_exit_reason(
++ 'Instance "%s" is not listening, but the process referenced in postmaster.pid exists',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_ERR_GENERIC;
++ }
++
++ # The PID file does not exist or the process is not available.
++ ocf_log( 'debug',
++ '_confirm_stopped: no postmaster process found for instance "%s"',
++ $OCF_RESOURCE_INSTANCE );
++
++ if ( -f "$datadir/backup_label" ) {
++ # We are probably on a freshly built secondary that was not started yet.
++ ocf_log( 'debug',
++ '_confirm_stopped: backup_label file exists: probably on a never started secondary',
++ );
++ return $OCF_NOT_RUNNING;
++ }
++
++ # Continue the check with pg_controldata.
++ $controldata_rc = _controldata_to_ocf();
++ if ( $controldata_rc == $OCF_RUNNING_MASTER ) {
++ # The controldata has not been updated to "shutdown".
++ # It should mean we had a crash on a primary instance.
++ ocf_exit_reason(
++ 'Instance "%s" controldata indicates a running primary instance, the instance has probably crashed',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_FAILED_MASTER;
++ }
++ elsif ( $controldata_rc == $OCF_SUCCESS ) {
++ # The controldata has not been updated to "shutdown in recovery".
++ # It should mean we had a crash on a secondary instance.
++ # There is no "FAILED_SLAVE" return code, so we return a generic error.
++ ocf_exit_reason(
++ 'Instance "%s" controldata indicates a running secondary instance, the instance has probably crashed',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_ERR_GENERIC;
++ }
++ elsif ( $controldata_rc == $OCF_NOT_RUNNING ) {
++ # The controldata state is consistent, the instance was probably
++ # propertly shut down.
++ ocf_log( 'debug',
++ '_confirm_stopped: instance "%s" controldata indicates that the instance was propertly shut down',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_NOT_RUNNING;
++ }
++
++ # Something went wrong with the controldata check.
++ ocf_exit_reason(
++ 'Could not get instance "%s" status from controldata (returned: %d)',
++ $OCF_RESOURCE_INSTANCE, $controldata_rc );
++
++ return $OCF_ERR_GENERIC;
++}
++
++############################################################
++#### OCF FUNCS
++
++
++
++=head1 SUPPORTED PARAMETERS
++
++=over
++
++=item B
++
++Location of the PGDATA of your instance
++
++(optional, string, default "/var/lib/pgsql/data")
++
++=item B
++
++The socket directory or IP address to use to connect to the local instance
++
++(optional, string, default "/tmp")
++
++=item B
++
++The port to connect to the local instance
++
++(optional, integer, default "5432")
++
++=item B
++
++Location of the PostgreSQL binaries.
++
++(optional, string, default "/usr/bin")
++
++=item B
++
++The system owner of your instance's process
++
++(optional, string, default "postgres")
++
++=item B
++
++B for PostgreSQL 11 and bellow.
++
++The local template that will be copied as the C file.
++This template file must exists on all node.
++
++With PostgreSQL 12 and higher, the cluster will refuse to start if this
++parameter is set or a template file is found.
++
++(optional, string, default "$PGDATA/recovery.conf.pcmk")
++
++=item B
++
++Maximum lag allowed on a standby before we set a negative master score on it.
++The calculation is based on the difference between the current xlog location on
++the master and the write location on the standby.
++
++(optional, integer, default "0" disables this feature)
++
++=item B
++
++Path to the directory set in C from your postgresql.conf file.
++This parameter has same default than PostgreSQL itself: the C parameter
++value.
++
++Unless you have a special PostgreSQL setup and you understand this parameter,
++B
++
++(optional, string, default to the value of C)
++
++=item B
++
++Additional arguments given to the postgres process on startup. See
++"postgres --help" for available options. Useful when the postgresql.conf file
++is not in the data directory (PGDATA), eg.:
++
++ -c config_file=/etc/postgresql/9.3/main/postgresql.conf
++
++(optinal, string, default "")
++
++=back
++
++=cut
++
++sub ocf_meta_data {
++ print qq{
++
++
++ 1.0
++
++
++ Resource script for PostgreSQL in replication. It manages PostgreSQL servers using streaming replication as an HA resource.
++
++ Manages PostgreSQL servers in replication
++
++
++
++ System user account used to run the PostgreSQL server
++
++ PostgreSQL system User
++
++
++
++
++
++ Path to the directory storing the PostgreSQL binaries. The agent uses psql, pg_isready, pg_controldata and pg_ctl.
++
++ Path to the PostgreSQL binaries
++
++
++
++
++
++ Path to the data directory, e.g. PGDATA
++
++ Path to the data directory
++
++
++
++
++
++ Path to the directory set in data_directory from your postgresql.conf file. This parameter
++ has the same default than PostgreSQL itself: the pgdata parameter value. Unless you have a
++ special PostgreSQL setup and you understand this parameter, ignore it.
++
++ Path to the directory set in data_directory from your postgresql.conf file
++
++
++
++
++
++ Host IP address or unix socket folder the instance is listening on.
++
++ Instance IP or unix socket folder
++
++
++
++
++
++ Port the instance is listening on.
++
++ Instance port
++
++
++
++
++
++ Maximum lag allowed on a standby before we set a negative master score on it. The calculation
++ is based on the difference between the current LSN on the master and the LSN
++ written on the standby.
++ This parameter must be a valid positive number as described in PostgreSQL documentation.
++ See: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS-NUMERIC
++
++ Maximum write lag before we mark a standby as inappropriate to promote
++
++
++
++
++
++ Path to the recovery.conf template. This file is simply copied to \$PGDATA
++ before starting the instance as slave.
++ ONLY for PostgreSQL 11 and bellow. This parameter is IGNORED for
++ PostgreSQL 12 and higher. The cluster will refuse to start if a template
++ file is found.
++
++ Path to the recovery.conf template for PostgreSQL 11 and older.
++
++
++
++
++
++ Additionnal arguments given to the postgres process on startup.
++ See "postgres --help" for available options. Usefull when the
++ postgresql.conf file is not in the data directory (PGDATA), eg.:
++ "-c config_file=/etc/postgresql/9.3/main/postgresql.conf".
++
++ Additionnal arguments given to the postgres process on startup.
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ };
++ return $OCF_SUCCESS;
++}
++
++
++=head1 SUPPORTED ACTIONS
++
++This resource agent supports the following actions (operations):
++
++=over
++
++=item B
++
++Starts the resource. Suggested minimum timeout: 60.
++
++=item B
++
++Stops the resource. Suggested minimum timeout: 60.
++
++=item B
++
++Suggested minimum timeout: 20.
++
++=item B
++
++Promotes the resource to the Master role. Suggested minimum timeout: 30.
++
++=item B
++
++Demotes the resource to the Slave role. Suggested minimum timeout: 120.
++
++=item B
++
++Performs a detailed status check. Suggested minimum timeout: 10.
++Suggested interval: 15.
++
++=item B
++
++Performs a detailed status check. Suggested minimum timeout: 10.
++Suggested interval: 16.
++
++=item B
++
++Suggested minimum timeout: 60
++
++=item B
++
++Retrieves resource agent metadata (internal use only).
++Suggested minimum timeout: 5.
++
++=item B
++
++Suggested minimum timeout: 5.
++
++=item B
++
++Performs a validation of the resource configuration.
++Suggested minimum timeout: 5.
++
++=back
++
++=cut
++
++sub ocf_methods {
++ print q{
++ start
++ stop
++ reload
++ promote
++ demote
++ monitor
++ notify
++ methods
++ meta-data
++ validate-all
++ };
++
++ return $OCF_SUCCESS;
++}
++
++############################################################
++#### RA FUNCS
++
++sub pgsql_validate_all {
++ my $fh;
++ my $ans = '';
++ my %cdata;
++
++ unless (
++ ocf_version_cmp( $ENV{"OCF_RESKEY_crm_feature_set"}, '3.0.9' ) == 2
++ ) {
++ ocf_exit_reason(
++ 'PAF %s is compatible with Pacemaker 1.1.13 and greater',
++ $VERSION
++ );
++ return $OCF_ERR_INSTALLED;
++ }
++
++ # check notify=true
++ $ans = qx{ $CRM_RESOURCE --resource "$OCF_RESOURCE_INSTANCE" \\
++ --meta --get-parameter notify 2>/dev/null };
++ chomp $ans;
++ unless ( lc($ans) =~ /^true$|^on$|^yes$|^y$|^1$/ ) {
++ ocf_exit_reason(
++ 'You must set meta parameter notify=true for your master resource'
++ );
++ return $OCF_ERR_INSTALLED;
++ }
++
++ # check master-max=1
++ unless (
++ defined $ENV{'OCF_RESKEY_CRM_meta_master_max'}
++ and $ENV{'OCF_RESKEY_CRM_meta_master_max'} eq '1'
++ ) {
++ ocf_exit_reason(
++ 'You must set meta parameter master-max=1 for your master resource'
++ );
++ return $OCF_ERR_INSTALLED;
++ }
++
++ if ( $PGVERNUM >= $PGVER_12 ) {
++ # check PostgreSQL setup: checks related to v12 and after
++ my $guc;
++
++ # recovery.conf template must not exists
++ if ( -f $recovery_tpl ) {
++ ocf_exit_reason(
++ 'Recovery template file "%s" is forbidden for PostgreSQL 12 and above',
++ $recovery_tpl );
++ exit $OCF_ERR_ARGS;
++ }
++
++ # WARNING: you MUST put -C as first argument to bypass the root check
++ $guc = qx{ $POSTGRES -C recovery_target_timeline -D "$pgdata" $start_opts};
++ chomp $guc;
++ unless ( $guc eq 'latest' ) {
++ ocf_exit_reason(
++ q{Parameter "recovery_target_timeline" MUST be set to 'latest'. } .
++ q{It is currently set to '%s'}, $guc );
++ return $OCF_ERR_ARGS;
++ }
++
++ $guc = qx{ $POSTGRES -C primary_conninfo -D "$pgdata" $start_opts};
++ unless ($guc =~ /\bapplication_name='?$nodename'?\b/) {
++ ocf_exit_reason(
++ q{Parameter "primary_conninfo" MUST contain 'application_name=%s'. }.
++ q{It is currently set to '%s'}, $nodename, $guc );
++ return $OCF_ERR_ARGS;
++ }
++ }
++ else {
++ my @content;
++
++ # check recovery template
++ if ( ! -f $recovery_tpl ) {
++ ocf_exit_reason( 'Recovery template file "%s" does not exist',
++ $recovery_tpl );
++ return $OCF_ERR_ARGS;
++ }
++
++ # check content of the recovery template file
++ unless ( open( $fh, '<', $recovery_tpl ) ) {
++ ocf_exit_reason( 'Could not open file "%s": %s', $recovery_tpl, $! );
++ return $OCF_ERR_ARGS;
++ }
++ @content = <$fh>;
++ close $fh;
++
++
++ unless ( grep /^\s*standby_mode\s*=\s*'?on'?\s*$/, @content ) {
++ ocf_exit_reason(
++ 'Recovery template file must contain "standby_mode = on"' );
++ return $OCF_ERR_ARGS;
++ }
++
++ unless ( grep /^\s*recovery_target_timeline\s*=\s*'?latest'?\s*$/, @content ) {
++ ocf_exit_reason(
++ "Recovery template file must contain \"recovery_target_timeline = 'latest'\""
++ );
++ return $OCF_ERR_ARGS;
++ }
++
++ unless (
++ grep /^\s*primary_conninfo\s*=.*['\s]application_name=$nodename['\s]/,
++ @content
++ ) {
++ ocf_exit_reason(
++ 'Recovery template file must contain in primary_conninfo parameter "application_name=%s"',
++ $nodename );
++ return $OCF_ERR_ARGS;
++ }
++ }
++
++ unless ( looks_like_number($maxlag) ) {
++ ocf_exit_reason( 'maxlag is not a number: "%s"', $maxlag );
++ return $OCF_ERR_INSTALLED;
++ }
++
++ # check system user
++ unless ( defined getpwnam $system_user ) {
++ ocf_exit_reason( 'System user "%s" does not exist', $system_user );
++ return $OCF_ERR_ARGS;
++ }
++
++ # require 9.3 minimum
++ if ( $PGVERNUM < $PGVER_93 ) {
++ ocf_exit_reason( "Require 9.3 and more" );
++ return $OCF_ERR_INSTALLED;
++ }
++
++ # check binaries
++ unless ( -x $PGCTL and -x $PGPSQL and -x $PGCTRLDATA and -x $PGISREADY
++ and ( -x $PGWALDUMP or -x "$bindir/pg_xlogdump")
++ ) {
++ ocf_exit_reason(
++ "Missing one or more binary. Check following path: %s, %s, %s, %s, %s or %s",
++ $PGCTL, $PGPSQL, $PGCTRLDATA, $PGISREADY, $PGWALDUMP, "$bindir/pg_xlogdump" );
++ return $OCF_ERR_ARGS;
++ }
++
++ # require wal_level >= hot_standby
++ %cdata = _get_controldata();
++ unless ( $cdata{'wal_level'} =~ m{hot_standby|logical|replica} ) {
++ ocf_exit_reason(
++ 'wal_level must be one of "hot_standby", "logical" or "replica"' );
++ return $OCF_ERR_ARGS;
++ }
++
++ return $OCF_SUCCESS;
++}
++
++
++# Start the PostgreSQL instance as a *secondary*
++#
++sub pgsql_start {
++ my $rc = pgsql_monitor();
++ my %cdata = _get_controldata();
++ my $prev_state = $cdata{'state'};
++
++ # Instance must be running as secondary or being stopped.
++ # Anything else is an error.
++ if ( $rc == $OCF_SUCCESS ) {
++ ocf_log( 'info', 'Instance "%s" already started',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_SUCCESS;
++ }
++ elsif ( $rc != $OCF_NOT_RUNNING ) {
++ ocf_exit_reason( 'Unexpected state for instance "%s" (returned %d)',
++ $OCF_RESOURCE_INSTANCE, $rc );
++ return $OCF_ERR_GENERIC;
++ }
++
++ #
++ # From here, the instance is NOT running for sure.
++ #
++
++ ocf_log( 'debug',
++ 'pgsql_start: instance "%s" is not running, starting it as a secondary',
++ $OCF_RESOURCE_INSTANCE );
++
++ # Must start as a standby, so enable recovery.
++ _enable_recovery();
++
++ # Start the instance as a secondary.
++ $rc = _pg_ctl_start();
++
++ if ( $rc == 0 ) {
++
++ # Wait for the start to finish.
++ sleep 1 while ( $rc = pgsql_monitor() ) == $OCF_NOT_RUNNING;
++
++ if ( $rc == $OCF_SUCCESS ) {
++ ocf_log( 'info', 'Instance "%s" started', $OCF_RESOURCE_INSTANCE );
++
++ # Check if a master score exists in the cluster.
++ # During the very first start of the cluster, no master score will
++ # exists on any of the existing slaves, unless an admin designated
++ # one of them using crm_master. If no master exists the cluster will
++ # not promote a master among the slaves.
++ # To solve this situation, we check if there is at least one master
++ # score existing on one node in the cluster. Do nothing if at least
++ # one master score is found among the clones of the resource. If no
++ # master score exists, set a score of 1 only if the resource was a
++ # shut downed master before the start.
++ if ( $prev_state eq "shut down" and not _master_score_exists() ) {
++ ocf_log( 'info', 'No master score around. Set mine to 1' );
++
++ _set_master_score( '1' );
++ }
++
++ return $OCF_SUCCESS;
++ }
++
++ ocf_exit_reason(
++ 'Instance "%s" is not running as a slave (returned %d)',
++ $OCF_RESOURCE_INSTANCE, $rc );
++
++ return $OCF_ERR_GENERIC;
++ }
++
++ ocf_exit_reason( 'Instance "%s" failed to start (rc: %d)',
++ $OCF_RESOURCE_INSTANCE, $rc );
++
++ return $OCF_ERR_GENERIC;
++}
++
++# Stop the PostgreSQL instance
++#
++sub pgsql_stop {
++ my $rc;
++ my $state;
++ my $pidfile = "$datadir/postmaster.pid";
++ # Add 60s to the timeout or use a 24h timeout fallback to make sure
++ # Pacemaker will give up before us and take decisions
++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60;
++
++ # Instance must be running as secondary or primary or being stopped.
++ # Anything else is an error.
++ $rc = pgsql_monitor();
++ if ( $rc == $OCF_NOT_RUNNING ) {
++ ocf_log( 'info', 'Instance "%s" already stopped',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_SUCCESS;
++ }
++ elsif ( $rc != $OCF_SUCCESS and $rc != $OCF_RUNNING_MASTER ) {
++ ocf_exit_reason( 'Unexpected state for instance "%s" (returned %d)',
++ $OCF_RESOURCE_INSTANCE, $rc );
++ return $OCF_ERR_GENERIC;
++ }
++
++ #
++ # From here, the instance is running for sure.
++ #
++
++ ocf_log( 'debug', 'pgsql_stop: instance "%s" is running, stopping it',
++ $OCF_RESOURCE_INSTANCE );
++
++ # Try to quit with proper shutdown.
++
++
++ $rc = _runas( $PGCTL, '--pgdata', $pgdata, '-w', '--timeout', $timeout,
++ '-m', 'fast', 'stop' );
++
++ if ( $rc == 0 ) {
++ # Wait for the stop to finish.
++ sleep 1 while ( $rc = pgsql_monitor() ) != $OCF_NOT_RUNNING ;
++
++ ocf_log( 'info', 'Instance "%s" stopped', $OCF_RESOURCE_INSTANCE );
++
++ return $OCF_SUCCESS;
++ }
++
++ ocf_exit_reason( 'Instance "%s" failed to stop', $OCF_RESOURCE_INSTANCE );
++ return $OCF_ERR_GENERIC;
++}
++
++# Monitor the PostgreSQL instance
++#
++sub pgsql_monitor {
++ my $pgisready_rc;
++ my $controldata_rc;
++
++ ocf_log( 'debug', 'pgsql_monitor: monitor is a probe' ) if ocf_is_probe();
++
++ # First check, verify if the instance is listening.
++ $pgisready_rc = _pg_isready();
++
++ if ( $pgisready_rc == 0 ) {
++ # The instance is listening.
++ # We confirm that the instance is up and return if it is a primary or a
++ # secondary
++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is listening',
++ $OCF_RESOURCE_INSTANCE );
++ return _confirm_role();
++ }
++
++ if ( $pgisready_rc == 1 ) {
++ # The attempt was rejected.
++ # This could happen in several cases:
++ # - at startup
++ # - during shutdown
++ # - during crash recovery
++ # - if instance is a warm standby
++ # Except for the warm standby case, this should be a transitional state.
++ # We try to confirm using pg_controldata.
++ ocf_log( 'debug',
++ 'pgsql_monitor: instance "%s" rejects connections - checking again...',
++ $OCF_RESOURCE_INSTANCE );
++ $controldata_rc = _controldata_to_ocf();
++
++ if ( $controldata_rc == $OCF_RUNNING_MASTER
++ or $controldata_rc == $OCF_SUCCESS
++ ) {
++ # This state indicates that pg_isready check should succeed.
++ # We check again.
++ ocf_log( 'debug',
++ 'pgsql_monitor: instance "%s" controldata shows a running status',
++ $OCF_RESOURCE_INSTANCE );
++
++ $pgisready_rc = _pg_isready();
++ if ( $pgisready_rc == 0 ) {
++ # Consistent with pg_controdata output.
++ # We can check if the instance is primary or secondary
++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is listening',
++ $OCF_RESOURCE_INSTANCE );
++ return _confirm_role();
++ }
++
++ # Still not consistent, raise an error.
++ # NOTE: if the instance is a warm standby, we end here.
++ # TODO raise an hard error here ?
++ ocf_exit_reason(
++ 'Instance "%s" controldata is not consistent with pg_isready (returned: %d)',
++ $OCF_RESOURCE_INSTANCE, $pgisready_rc );
++ ocf_log( 'info',
++ 'If this instance is in warm standby, this resource agent only supports hot standby',
++ $OCF_RESOURCE_INSTANCE, $pgisready_rc );
++
++ return $OCF_ERR_GENERIC;
++ }
++
++ if ( $controldata_rc == $OCF_NOT_RUNNING ) {
++ # This state indicates that pg_isready check should fail with rc 2.
++ # We check again.
++ $pgisready_rc = _pg_isready();
++ if ( $pgisready_rc == 2 ) {
++ # Consistent with pg_controdata output.
++ # We check the process status using pg_ctl status and check
++ # if it was propertly shut down using pg_controldata.
++ ocf_log( 'debug',
++ 'pgsql_monitor: instance "%s" is not listening',
++ $OCF_RESOURCE_INSTANCE );
++ return _confirm_stopped();
++ }
++ # Still not consistent, raise an error.
++ # TODO raise an hard error here ?
++ ocf_exit_reason(
++ 'Instance "%s" controldata is not consistent with pg_isready (returned: %d)',
++ $OCF_RESOURCE_INSTANCE, $pgisready_rc );
++
++ return $OCF_ERR_GENERIC;
++ }
++
++ # Something went wrong with the controldata check, hard fail.
++ ocf_exit_reason(
++ 'Could not get instance "%s" status from controldata (returned: %d)',
++ $OCF_RESOURCE_INSTANCE, $controldata_rc );
++
++ return $OCF_ERR_INSTALLED;
++ }
++
++ elsif ( $pgisready_rc == 2 ) {
++ # The instance is not listening.
++ # We check the process status using pg_ctl status and check
++ # if it was propertly shut down using pg_controldata.
++ ocf_log( 'debug', 'pgsql_monitor: instance "%s" is not listening',
++ $OCF_RESOURCE_INSTANCE );
++ return _confirm_stopped();
++ }
++
++ elsif ( $pgisready_rc == 3 ) {
++ # No attempt was done, probably a syntax error.
++ # Hard configuration error, we don't want to retry or failover here.
++ ocf_exit_reason(
++ 'Unknown error while checking if instance "%s" is listening (returned %d)',
++ $OCF_RESOURCE_INSTANCE, $pgisready_rc );
++
++ return $OCF_ERR_CONFIGURED;
++ }
++
++ ocf_exit_reason( 'Unexpected result when checking instance "%s" status',
++ $OCF_RESOURCE_INSTANCE );
++
++ return $OCF_ERR_GENERIC;
++}
++
++
++# Demote the PostgreSQL instance from primary to secondary
++# To demote a PostgreSQL instance, we must:
++# * stop it gracefully
++# * create recovery.conf with standby_mode = on
++# * start it
++#
++sub pgsql_demote {
++ my $rc;
++
++ $rc = pgsql_monitor();
++
++ # Running as primary. Normal, expected behavior.
++ if ( $rc == $OCF_RUNNING_MASTER ) {
++ ocf_log( 'debug', 'pgsql_demote: "%s" currently running as a primary',
++ $OCF_RESOURCE_INSTANCE ) ;
++ }
++ elsif ( $rc == $OCF_SUCCESS ) {
++ # Already running as secondary. Nothing to do.
++ ocf_log( 'debug',
++ 'pgsql_demote: "%s" currently running as a secondary',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_SUCCESS;
++ }
++ elsif ( $rc == $OCF_NOT_RUNNING ) {
++ # Instance is stopped. Nothing to do.
++ ocf_log( 'debug', 'pgsql_demote: "%s" currently shut down',
++ $OCF_RESOURCE_INSTANCE );
++ }
++ elsif ( $rc == $OCF_ERR_CONFIGURED ) {
++ # We actually prefer raising a hard or fatal error instead of leaving
++ # the CRM abording its transition for a new one because of a soft error.
++ # The hard error will force the CRM to move the resource immediately.
++ return $OCF_ERR_CONFIGURED;
++ }
++ else {
++ return $OCF_ERR_GENERIC;
++ }
++
++ # TODO we need to make sure at least one slave is connected!!
++
++ # WARNING if the resource state is stopped instead of master, the ocf ra dev
++ # rsc advises to return OCF_ERR_GENERIC, misleading the CRM in a loop where
++ # it computes transitions of demote(failing)->stop->start->promote actions
++ # until failcount == migration-threshold.
++ # This is a really ugly trick to keep going with the demode action if the
++ # rsc is already stopped gracefully.
++ # See discussion "CRM trying to demote a stopped resource" on
++ # developers@clusterlabs.org
++ unless ( $rc == $OCF_NOT_RUNNING ) {
++ # Add 60s to the timeout or use a 24h timeout fallback to make sure
++ # Pacemaker will give up before us and take decisions
++ my $timeout = ( _get_action_timeout() || 60*60*24 ) + 60;
++
++ # WARNING the instance **MUST** be stopped gracefully.
++ # Do **not** use pg_stop() or service or systemctl here as these
++ # commands might force-stop the PostgreSQL instance using immediate
++ # after some timeout and return success, which is misleading.
++
++ $rc = _runas( $PGCTL, '--pgdata', $pgdata, '--mode', 'fast', '-w',
++ '--timeout', $timeout , 'stop' );
++
++ # No need to wait for stop to complete, this is handled in pg_ctl
++ # using -w option.
++ unless ( $rc == 0 ) {
++ ocf_exit_reason( 'Failed to stop "%s" using pg_ctl (returned %d)',
++ $OCF_RESOURCE_INSTANCE, $rc );
++ return $OCF_ERR_GENERIC;
++ }
++
++ # Double check that the instance is stopped correctly.
++ $rc = pgsql_monitor();
++ unless ( $rc == $OCF_NOT_RUNNING ) {
++ ocf_exit_reason(
++ 'Unexpected "%s" state: monitor status (%d) disagree with pg_ctl return code',
++ $OCF_RESOURCE_INSTANCE, $rc );
++ return $OCF_ERR_GENERIC;
++ }
++ }
++
++ #
++ # At this point, the instance **MUST** be stopped gracefully.
++ #
++
++ # Note: We do not need to handle the recovery.conf file here as pgsql_start
++ # deal with that itself. Equally, no need to wait for the start to complete
++ # here, handled in pgsql_start.
++ $rc = pgsql_start();
++ if ( $rc == $OCF_SUCCESS ) {
++ ocf_log( 'info', 'pgsql_demote: "%s" started as a secondary',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_SUCCESS;
++ }
++
++ # NOTE: No need to double check the instance state as pgsql_start already use
++ # pgsql_monitor to check the state before returning.
++
++ ocf_exit_reason( 'Starting "%s" as a standby failed (returned %d)',
++ $OCF_RESOURCE_INSTANCE, $rc );
++ return $OCF_ERR_GENERIC;
++}
++
++
++# Promote the secondary instance to primary
++#
++sub pgsql_promote {
++ my $rc;
++ my $cancel_switchover;
++
++ $rc = pgsql_monitor();
++
++ if ( $rc == $OCF_SUCCESS ) {
++ # Running as slave. Normal, expected behavior.
++ ocf_log( 'debug', 'pgsql_promote: "%s" currently running as a standby',
++ $OCF_RESOURCE_INSTANCE );
++ }
++ elsif ( $rc == $OCF_RUNNING_MASTER ) {
++ # Already a master. Unexpected, but not a problem.
++ ocf_log( 'info', '"%s" already running as a primary',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_SUCCESS;
++ }
++ elsif ( $rc == $OCF_NOT_RUNNING ) { # INFO this is not supposed to happen.
++ # Currently not running. Need to start before promoting.
++ ocf_log( 'info', '"%s" currently not running, starting it',
++ $OCF_RESOURCE_INSTANCE );
++
++ $rc = pgsql_start();
++ if ( $rc != $OCF_SUCCESS ) {
++ ocf_exit_reason( 'Failed to start the instance "%s"',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_ERR_GENERIC;
++ }
++ }
++ else {
++ ocf_exit_reason( 'Unexpected error, cannot promote "%s"',
++ $OCF_RESOURCE_INSTANCE );
++ return $OCF_ERR_GENERIC;
++ }
++
++ #
++ # At this point, the instance **MUST** be started as a secondary.
++ #
++
++ # Cancel the switchover if it has been considered not safe during the
++ # pre-promote action
++ $cancel_switchover = _get_priv_attr('cancel_switchover');
++ if ( $cancel_switchover ) { # if not empty or not 0
++ ocf_exit_reason( 'Switchover has been canceled from pre-promote action' );
++
++ _delete_priv_attr( 'cancel_switchover' );
++
++ return $OCF_ERR_GENERIC if $cancel_switchover eq '1';
++ return $OCF_ERR_ARGS; # ban the resource from the node if we have an
++ # internal error during _check_switchover
++ }
++
++ # Do not check for a better candidate if we try to recover the master
++ # Recover of a master is detected during the pre-promote action. It sets the
++ # private attribute 'recover_master' to '1' if this is a master recover.
++ if ( _get_priv_attr( 'recover_master' ) eq '1' ) {
++ ocf_log( 'info', 'Recovering old master, no election needed');
++ }
++ else {
++
++ # The promotion is occurring on the best known candidate (highest
++ # master score), as chosen by pacemaker during the last working monitor
++ # on previous master (see pgsql_monitor/_check_locations subs).
++ # To avoid any race condition between the last monitor action on the
++ # previous master and the **real** most up-to-date standby, we
++ # set each standby location during the "pre-promote" action, and stored
++ # them using the "lsn_location" resource attribute.
++ #
++ # The best standby to promote would have the highest known LSN. If the
++ # current resource is not the best one, we need to modify the master
++ # scores accordingly, and abort the current promotion.
++ ocf_log( 'debug',
++ 'pgsql_promote: checking if current node is the best candidate for promotion' );
++
++ # Exclude nodes that are known to be unavailable (not in the current
++ # partition) using the "crm_node" command
++ my @active_nodes = split /\s+/ => _get_priv_attr( 'nodes' );
++ my $node_to_promote = '';
++ my $ans;
++ my $max_tl;
++ my $max_lsn;
++ my $node_tl;
++ my $node_lsn;
++ my $wal_num;
++ my $wal_off;
++
++ # Get the "lsn_location" attribute value for the current node, as set
++ # during the "pre-promote" action.
++ # It should be the greatest among the secondary instances.
++ $ans = _get_priv_attr( 'lsn_location' );
++
++ if ( $ans eq '' ) {
++ # This should not happen as the "lsn_location" attribute should have
++ # been updated during the "pre-promote" action.
++ ocf_exit_reason( 'Can not get current node LSN location' );
++ return $OCF_ERR_GENERIC;
++ }
++
++ chomp $ans;
++ ( $max_tl, $max_lsn ) = split /#/, $ans;
++
++ ocf_log( 'debug', 'pgsql_promote: current node TL#LSN location: %s#%s',
++ $max_tl, $max_lsn );
++
++ # Now we compare with the other available nodes.
++ foreach my $node ( @active_nodes ) {
++ # We exclude the current node from the check.
++ next if $node eq $nodename;
++
++ # Get the "lsn_location" attribute value for the node, as set during
++ # the "pre-promote" action.
++ # This is implemented as a loop as private attributes are asynchronously
++ # available from other nodes.
++ # see: https://github.com/ClusterLabs/PAF/issues/131
++ # NOTE: if a node did not set its lsn_location for some reason, this will end
++ # with a timeout and the whole promotion will start again.
++ WAIT_FOR_LSN: {
++ $ans = _get_priv_attr( 'lsn_location', $node );
++ if ( $ans eq '' ) {
++ ocf_log( 'info', 'pgsql_promote: waiting for LSN from %s', $node );
++ select( undef, undef, undef, 0.1 );
++ redo WAIT_FOR_LSN;
++ }
++ }
++
++ chomp $ans;
++ ( $node_tl, $node_lsn ) = split /#/, $ans;
++
++ ocf_log( 'debug',
++ 'pgsql_promote: comparing with "%s": TL#LSN is %s#%s',
++ $node, $node_tl, $node_lsn );
++
++ # If the node has a higher LSN, select it as a best candidate to
++ # promotion and keep looping to check the TL/LSN of other nodes.
++ if ( $node_tl > $max_tl
++ or ( $node_tl == $max_tl and $node_lsn > $max_lsn )
++ ) {
++ ocf_log( 'debug',
++ 'pgsql_promote: "%s" is a better candidate to promote (%s#%s > %s#%s)',
++ $node, $node_tl, $node_lsn, $max_tl, $max_lsn );
++ $node_to_promote = $node;
++ $max_tl = $node_tl;
++ $max_lsn = $node_lsn;
++ }
++ }
++
++ # If any node has been selected, we adapt the master scores accordingly
++ # and break the current promotion.
++ if ( $node_to_promote ne '' ) {
++ ocf_exit_reason(
++ '%s is the best candidate to promote, aborting current promotion',
++ $node_to_promote );
++
++ # Reset current node master score.
++ _set_master_score( '1' );
++
++ # Set promotion candidate master score.
++ _set_master_score( '1000', $node_to_promote );
++
++ # We fail the promotion to trigger another promotion transition
++ # with the new scores.
++ return $OCF_ERR_GENERIC;
++ }
++
++ # Else, we will keep on promoting the current node.
++ }
++
++ unless (
++ # Promote the instance on the current node.
++ _runas( $PGCTL, '--pgdata', $pgdata, '-w', 'promote' ) == 0 )
++ {
++ ocf_exit_reason( 'Error during promotion command' );
++ return $OCF_ERR_GENERIC;
++ }
++
++ # The instance promotion is asynchronous, so we need to wait for this
++ # process to complete.
++ while ( pgsql_monitor() != $OCF_RUNNING_MASTER ) {
++ ocf_log( 'info', 'Waiting for the promote to complete' );
++ sleep 1;
++ }
++
++ ocf_log( 'info', 'Promote complete' );
++
++ return $OCF_SUCCESS;
++}
++
++# This action is called **before** the actual promotion when a failing master is
++# considered unreclaimable, recoverable or a new master must be promoted
++# (switchover or first start).
++# As every "notify" action, it is executed almost simultaneously on all
++# available nodes.
++sub pgsql_notify_pre_promote {
++ my $rc;
++ my $node_tl;
++ my $node_lsn;
++ my %cdata;
++ my %active_nodes;
++ my $attr_nodes;
++
++ ocf_log( 'info', 'Promoting instance on node "%s"',
++ $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} );
++
++ # No need to do an election between slaves if this is recovery of the master
++ if ( _is_master_recover( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} ) ) {
++ ocf_log( 'warning', 'This is a master recovery!' );
++
++ _set_priv_attr( 'recover_master', '1' )
++ if $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename;
++
++ return $OCF_SUCCESS;
++ }
++
++ # Environment cleanup!
++ _delete_priv_attr( 'lsn_location' );
++ _delete_priv_attr( 'recover_master' );
++ _delete_priv_attr( 'nodes' );
++ _delete_priv_attr( 'cancel_switchover' );
++
++ # check for the last received entry of WAL from the master if we are
++ # the designated slave to promote
++ if ( _is_switchover( $nodename ) and scalar
++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'promote'} }
++ ) {
++ $rc = _check_switchover();
++
++ unless ( $rc == 0 ) {
++ # Shortcut the election process as the switchover will be
++ # canceled
++ _set_priv_attr( 'cancel_switchover', $rc );
++ return $OCF_SUCCESS; # return code is ignored during notify
++ }
++
++ # If the sub keeps going, that means the switchover is safe.
++ # Keep going with the election process in case the switchover was
++ # instruct to the wrong node.
++ # FIXME: should we allow a switchover to a lagging slave?
++ }
++
++ # We need to trigger an election between existing slaves to promote the best
++ # one based on its current LSN location. Each node set a private attribute
++ # "lsn_location" with its TL and LSN location.
++ #
++ # During the following promote action, The designated standby for
++ # promotion use these attributes to check if the instance to be promoted
++ # is the best one, so we can avoid a race condition between the last
++ # successful monitor on the previous master and the current promotion.
++
++ # As we can not break the transition from a notification action, we check
++ # during the promotion if each node TL and LSN are valid.
++
++ # Force a checpoint to make sure the controldata shows the very last TL
++ _query( q{ CHECKPOINT }, {} );
++ %cdata = _get_controldata();
++ $node_lsn = _get_last_received_lsn( 'in decimal' );
++
++ unless ( defined $node_lsn ) {
++ ocf_log( 'warning', 'Unknown current node LSN' );
++ # Return code are ignored during notifications...
++ return $OCF_SUCCESS;
++ }
++
++ $node_lsn = "$cdata{'tl'}#$node_lsn";
++
++ ocf_log( 'info', 'Current node TL#LSN: %s', $node_lsn );
++
++ # Set the "lsn_location" attribute value for this node so we can use it
++ # during the following "promote" action.
++ _set_priv_attr( 'lsn_location', $node_lsn );
++
++ ocf_log( 'warning', 'Could not set the current node LSN' )
++ if $? != 0 ;
++
++ # If this node is the future master, keep track of the slaves that
++ # received the same notification to compare our LSN with them during
++ # promotion
++ if ( $OCF_NOTIFY_ENV{'promote'}[0]{'uname'} eq $nodename ) {
++ # Build the list of active nodes:
++ # master + slave + start - stop
++ # FIXME: Deal with rsc started during the same transaction but **after**
++ # the promotion ?
++ $active_nodes{ $_->{'uname'} }++ foreach @{ $OCF_NOTIFY_ENV{'active'} },
++ @{ $OCF_NOTIFY_ENV{'start'} };
++ $active_nodes{ $_->{'uname'} }-- foreach @{ $OCF_NOTIFY_ENV{'stop'} };
++
++ $attr_nodes = join " "
++ => grep { $active_nodes{$_} > 0 } keys %active_nodes;
++
++ _set_priv_attr( 'nodes', $attr_nodes );
++ }
++
++ return $OCF_SUCCESS;
++}
++
++# This action is called after a promote action.
++sub pgsql_notify_post_promote {
++
++ # We have a new master (or the previous one recovered).
++ # Environment cleanup!
++ _delete_priv_attr( 'lsn_location' );
++ _delete_priv_attr( 'recover_master' );
++ _delete_priv_attr( 'nodes' );
++ _delete_priv_attr( 'cancel_switchover' );
++
++ return $OCF_SUCCESS;
++}
++
++# This is called before a demote occurs.
++sub pgsql_notify_pre_demote {
++ my $rc;
++ my %cdata;
++
++ # do nothing if the local node will not be demoted
++ return $OCF_SUCCESS unless scalar
++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'demote'} };
++
++ $rc = pgsql_monitor();
++
++ # do nothing if this is not a master recovery
++ return $OCF_SUCCESS unless _is_master_recover( $nodename )
++ and $rc == $OCF_FAILED_MASTER;
++
++ # in case of master crash, we need to detect if the CRM tries to recover
++ # the master clone. The usual transition is to do:
++ # demote->stop->start->promote
++ #
++ # There are multiple flaws with this transition:
++ # * the 1st and 2nd actions will fail because the instance is in
++ # OCF_FAILED_MASTER step
++ # * the usual start action is dangerous as the instance will start with
++ # a recovery.conf instead of entering a normal recovery process
++ #
++ # To avoid this, we try to start the instance in recovery from here.
++ # If it success, at least it will be demoted correctly with a normal
++ # status. If it fails, it will be catched up in next steps.
++
++ ocf_log( 'info', 'Trying to start failing master "%s"...',
++ $OCF_RESOURCE_INSTANCE );
++
++ # Either the instance managed to start or it couldn't.
++ # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't
++ # start, this error will be catched up later during the various checks
++ _pg_ctl_start();
++
++ %cdata = _get_controldata();
++
++ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} );
++
++ return $OCF_SUCCESS;
++}
++
++# This is called before a stop occurs.
++sub pgsql_notify_pre_stop {
++ my $rc;
++ my %cdata;
++
++ # do nothing if the local node will not be stopped
++ return $OCF_SUCCESS unless scalar
++ grep { $_->{'uname'} eq $nodename } @{ $OCF_NOTIFY_ENV{'stop'} };
++
++ $rc = _controldata_to_ocf();
++
++ # do nothing if this is not a slave recovery
++ return $OCF_SUCCESS unless _is_slave_recover( $nodename )
++ and $rc == $OCF_RUNNING_SLAVE;
++
++ # in case of slave crash, we need to detect if the CRM tries to recover
++ # the slaveclone. The usual transition is to do: stop->start
++ #
++ # This transition can no twork because the instance is in
++ # OCF_ERR_GENERIC step. So the stop action will fail, leading most
++ # probably to fencing action.
++ #
++ # To avoid this, we try to start the instance in recovery from here.
++ # If it success, at least it will be stopped correctly with a normal
++ # status. If it fails, it will be catched up in next steps.
++
++ ocf_log( 'info', 'Trying to start failing slave "%s"...',
++ $OCF_RESOURCE_INSTANCE );
++
++ # Either the instance managed to start or it couldn't.
++ # We rely on the pg_ctk '-w' switch to take care of this. If it couldn't
++ # start, this error will be catched up later during the various checks
++ _pg_ctl_start();
++
++ %cdata = _get_controldata();
++
++ ocf_log( 'info', 'State is "%s" after recovery attempt', $cdata{'state'} );
++
++ return $OCF_SUCCESS;
++}
++
++# Notify type actions, called on all available nodes before (pre) and after
++# (post) other actions, like promote, start, ...
++#
++sub pgsql_notify {
++ my $type_op;
++
++ ocf_log( 'debug', "pgsql_notify: environment variables: %s",
++ Data::Dumper->new( [ \%OCF_NOTIFY_ENV ] )->Sortkeys(1)->Terse(1)->Dump );
++
++ return unless %OCF_NOTIFY_ENV;
++
++ $type_op = "$OCF_NOTIFY_ENV{'type'}-$OCF_NOTIFY_ENV{'operation'}";
++
++ for ( $type_op ) {
++ if ( /^pre-promote$/ ) { return pgsql_notify_pre_promote() }
++ elsif ( /^post-promote$/ ) { return pgsql_notify_post_promote() }
++ elsif ( /^pre-demote$/ ) { return pgsql_notify_pre_demote() }
++ elsif ( /^pre-stop$/ ) { return pgsql_notify_pre_stop() }
++ }
++
++ return $OCF_SUCCESS;
++}
++
++# Action used to allow for online modification of resource parameters value.
++#
++sub pgsql_reload {
++
++ # No action necessary, the action declaration is enough to inform pacemaker
++ # that the modification of any non-unique parameter can be applied without
++ # having to restart the resource.
++ ocf_log( 'info', 'Instance "%s" reloaded', $OCF_RESOURCE_INSTANCE );
++ return $OCF_SUCCESS;
++
++}
++
++############################################################
++#### MAIN
++
++exit ocf_meta_data() if $__OCF_ACTION eq 'meta-data';
++exit ocf_methods() if $__OCF_ACTION eq 'methods';
++
++# Avoid "could not change directory" when executing commands as "system-user".
++chdir File::Spec->tmpdir();
++
++# mandatory sanity checks
++# check pgdata
++if ( ! -d $pgdata ) {
++ ocf_exit_reason( 'PGDATA "%s" does not exist', $pgdata );
++ exit $OCF_ERR_ARGS;
++}
++
++# check datadir
++if ( ! -d $datadir ) {
++ ocf_exit_reason( 'data_directory "%s" does not exist', $datadir );
++ exit $OCF_ERR_ARGS;
++}
++
++# Set PostgreSQL version
++$PGVERNUM = _get_pg_version();
++
++# Set current node name.
++$nodename = ocf_local_nodename();
++
++$exit_code = pgsql_validate_all();
++
++exit $exit_code if $exit_code != $OCF_SUCCESS or $__OCF_ACTION eq 'validate-all';
++
++# Run action
++for ( $__OCF_ACTION ) {
++ if ( /^start$/ ) { $exit_code = pgsql_start() }
++ elsif ( /^stop$/ ) { $exit_code = pgsql_stop() }
++ elsif ( /^monitor$/ ) { $exit_code = pgsql_monitor() }
++ elsif ( /^promote$/ ) { $exit_code = pgsql_promote() }
++ elsif ( /^demote$/ ) { $exit_code = pgsql_demote() }
++ elsif ( /^notify$/ ) { $exit_code = pgsql_notify() }
++ elsif ( /^reload$/ ) { $exit_code = pgsql_reload() }
++ else { $exit_code = $OCF_ERR_UNIMPLEMENTED }
++}
++
++exit $exit_code;
++
++
++=head1 EXAMPLE CRM SHELL
++
++The following is an example configuration for a pgsqlms resource using the
++crm(8) shell:
++
++ primitive pgsqld pgsqlms \
++ params pgdata="/var/lib/postgresql/9.6/main" \
++ bindir="/usr/lib/postgresql/9.6/bin" \
++ pghost="/var/run/postgresql" \
++ recovery_template="/etc/postgresql/9.6/main/recovery.conf.pcmk" \
++ start_opts="-c config_file=/etc/postgresql/9.6/main/postgresql.conf" \
++ op start timeout=60s \
++ op stop timeout=60s \
++ op promote timeout=30s \
++ op demote timeout=120s \
++ op monitor interval=15s timeout=10s role="Master" \
++ op monitor interval=16s timeout=10s role="Slave" \
++ op notify timeout=60s
++
++ ms pgsql-ha pgsqld meta notify=true
++
++
++=head1 EXAMPLE PCS
++
++The following is an example configuration for a pgsqlms resource using pcs(8):
++
++ pcs resource create pgsqld ocf:heartbeat:pgsqlms \
++ bindir=/usr/pgsql-9.6/bin pgdata=/var/lib/pgsql/9.6/data \
++ op start timeout=60s \
++ op stop timeout=60s \
++ op promote timeout=30s \
++ op demote timeout=120s \
++ op monitor interval=15s timeout=10s role="Master" \
++ op monitor interval=16s timeout=10s role="Slave" \
++ op notify timeout=60s --master notify=true
++
++=head1 SEE ALSO
++
++http://clusterlabs.org/
++
++=head1 AUTHOR
++
++Jehan-Guillaume de Rorthais and Mael Rimbault.
++
++=cut
+diff --color -uNr a/paf_LICENSE b/paf_LICENSE
+--- a/paf_LICENSE 1970-01-01 01:00:00.000000000 +0100
++++ b/paf_LICENSE 2021-04-14 09:16:39.083555835 +0200
+@@ -0,0 +1,19 @@
++Copyright (c) 2016-2020, Jehan-Guillaume de Rorthais, Mael Rimbault.
++
++Permission to use, copy, modify, and distribute this software and its
++documentation for any purpose, without fee, and without a written agreement
++is hereby granted, provided that the above copyright notice and this
++paragraph and the following two paragraphs appear in all copies.
++
++IN NO EVENT SHALL THE AUTHOR OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR
++DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
++LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
++DOCUMENTATION, EVEN IF THE AUTHOR OR DISTRIBUTORS HAVE BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGE.
++
++THE AUTHOR AND DISTRIBUTORS SPECIFICALLY DISCLAIMS ANY WARRANTIES,
++INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
++AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
++ON AN "AS IS" BASIS, AND THE AUTHOR AND DISTRIBUTORS HAS NO OBLIGATIONS TO
++PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
++
+diff --color -uNr a/paf_README.md b/paf_README.md
+--- a/paf_README.md 1970-01-01 01:00:00.000000000 +0100
++++ b/paf_README.md 2021-04-14 09:18:57.450968048 +0200
+@@ -0,0 +1,86 @@
++# PostgreSQL Automatic Failover
++
++High-Availibility for Postgres, based on industry references Pacemaker and
++Corosync.
++
++## Description
++
++Pacemaker is nowadays the industry reference for High Availability. In the same
++fashion than for Systemd, all Linux distributions moved (or are moving) to this
++unique Pacemaker+Corosync stack, removing all other existing high availability
++stacks (CMAN, RGManager, OpenAIS, ...). It is able to detect failure on various
++services and automatically decide to failover the failing resource to another
++node when possible.
++
++To be able to manage a specific service resource, Pacemaker interact with it
++through a so-called "Resource Agent". Resource agents must comply to the OCF
++specification which define what they must implement (start, stop, promote,
++etc), how they should behave and inform Pacemaker of their results.
++
++PostgreSQL Automatic Failover is a new OCF resource Agent dedicated to
++PostgreSQL. Its original wish is to keep a clear limit between the Pacemaker
++administration and the PostgreSQL one, to keep things simple, documented and
++yet powerful.
++
++Once your PostgreSQL cluster built using internal streaming replication, PAF is
++able to expose to Pacemaker what is the current status of the PostgreSQL
++instance on each node: master, slave, stopped, catching up, etc. Should a
++failure occurs on the master, Pacemaker will try to recover it by default.
++Should the failure be non-recoverable, PAF allows the slaves to be able to
++elect the best of them (the closest one to the old master) and promote it as
++the new master. All of this thanks to the robust, feature-full and most
++importantly experienced project: Pacemaker.
++
++For information about how to install this agent, see `INSTALL.md`.
++
++## Setup and requirements
++
++PAF supports PostgreSQL 9.3 and higher. It has been extensively tested under
++CentOS 6 and 7 in various scenario.
++
++PAF has been written to give to the administrator the maximum control
++over their PostgreSQL configuration and architecture. Thus, you are 100%
++responsible for the master/slave creations and their setup. The agent
++will NOT edit your setup. It only requires you to follow these pre-requisites:
++
++ * slave __must__ be in hot_standby (accept read-only connections) ;
++ * the following parameters __must__ be configured in the appropriate place :
++ * `standby_mode = on` (for PostgreSQL 11 and before)
++ * `recovery_target_timeline = 'latest'`
++ * `primary_conninfo` wih `application_name` set to the node name as seen
++ in Pacemaker.
++ * these last parameters has been merged inside the instance configuration
++ file with PostgreSQL 12. For PostgreSQL 11 and before, you __must__
++ provide a `recovery.conf` template file.
++
++When setting up the resource in Pacemaker, here are the available parameters you
++can set:
++
++ * `bindir`: location of the PostgreSQL binaries (default: `/usr/bin`)
++ * `pgdata`: location of the PGDATA of your instance (default:
++ `/var/lib/pgsql/data`)
++ * `datadir`: path to the directory set in `data_directory` from your
++ postgresql.conf file. This parameter has same default than PostgreSQL
++ itself: the `pgdata` parameter value. Unless you have a special PostgreSQL
++ setup and you understand this parameter, __ignore it__
++ * `pghost`: the socket directory or IP address to use to connect to the
++ local instance (default: `/tmp` or `/var/run/postgresql` for DEBIAN)
++ * `pgport`: the port to connect to the local instance (default: `5432`)
++ * `recovery_template`: __only__ for PostgreSQL 11 and before. The local
++ template that will be copied as the `PGDATA/recovery.conf` file. This
++ file must not exist on any node for PostgreSQL 12 and after.
++ (default: `$PGDATA/recovery.conf.pcmk`)
++ * `start_opts`: Additional arguments given to the postgres process on startup.
++ See "postgres --help" for available options. Useful when the postgresql.conf
++ file is not in the data directory (PGDATA), eg.:
++ `-c config_file=/etc/postgresql/9.3/main/postgresql.conf`
++ * `system_user`: the system owner of your instance's process (default:
++ `postgres`)
++ * `maxlag`: maximum lag allowed on a standby before we set a negative master
++ score on it. The calculation is based on the difference between the current
++ xlog location on the master and the write location on the standby.
++ (default: 0, which disables this feature)
++
++For a demonstration about how to setup a cluster, see
++[http://clusterlabs.github.io/PAF/documentation.html](http://clusterlabs.github.io/PAF/documentation.html).
++
diff --git a/SOURCES/bz2021125-gcp-ilb-1-fix-log_enable.patch b/SOURCES/bz2021125-gcp-ilb-1-fix-log_enable.patch
new file mode 100644
index 0000000..b72f3e3
--- /dev/null
+++ b/SOURCES/bz2021125-gcp-ilb-1-fix-log_enable.patch
@@ -0,0 +1,29 @@
+From 9a7b47f1838e9d6e3c807e9db5312097adb5c499 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Fri, 5 Nov 2021 10:30:49 +0100
+Subject: [PATCH] gcp-ilb/Squid: fix issues detected by CI
+
+---
+ heartbeat/Squid.in | 2 +-
+ heartbeat/gcp-ilb | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb
+index 28484b241..48dc3ac4e 100755
+--- a/heartbeat/gcp-ilb
++++ b/heartbeat/gcp-ilb
+@@ -53,12 +53,12 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid"
+
+
+ #Validate command for logging
+-if $OCF_RESKEY_log_enable = "true"; then
++if [ $OCF_RESKEY_log_enable = "true" ]; then
+ if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then
+ logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params"
+ ocf_log debug "Logging command is: \'$logging_cmd\' "
+ else
+- $OCF_RESKEY_log_enable = "false"
++ OCF_RESKEY_log_enable="false"
+ ocf_log err "\'$logging_cmd\' is invalid. External logging disabled."
+
+ fi;
diff --git a/SOURCES/bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch b/SOURCES/bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch
new file mode 100644
index 0000000..8a7df42
--- /dev/null
+++ b/SOURCES/bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch
@@ -0,0 +1,51 @@
+From 14576f7ca02fb0abff188238ac019e88ab06e878 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Tue, 9 Nov 2021 11:49:36 +0100
+Subject: [PATCH] gcp-ilb: only check if log_cmd binary is available if
+ log_enable is true
+
+---
+ heartbeat/gcp-ilb | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/heartbeat/gcp-ilb b/heartbeat/gcp-ilb
+index 48dc3ac4e..f84f373b7 100755
+--- a/heartbeat/gcp-ilb
++++ b/heartbeat/gcp-ilb
+@@ -37,7 +37,7 @@ if type "socat" > /dev/null 2>&1; then
+ OCF_RESKEY_cat_default="socat"
+ else
+ OCF_RESKEY_cat_default="nc"
+-fi;
++fi
+
+
+ : ${OCF_RESKEY_cat=${OCF_RESKEY_cat_default}}
+@@ -53,7 +53,7 @@ pidfile="/var/run/$OCF_RESOURCE_INSTANCE.pid"
+
+
+ #Validate command for logging
+-if [ $OCF_RESKEY_log_enable = "true" ]; then
++if ocf_is_true "$OCF_RESKEY_log_enable"; then
+ if type $OCF_RESKEY_log_cmd > /dev/null 2>&1; then
+ logging_cmd="$OCF_RESKEY_log_cmd $OCF_RESKEY_log_params"
+ ocf_log debug "Logging command is: \'$logging_cmd\' "
+@@ -61,7 +61,7 @@ if [ $OCF_RESKEY_log_enable = "true" ]; then
+ OCF_RESKEY_log_enable="false"
+ ocf_log err "\'$logging_cmd\' is invalid. External logging disabled."
+
+- fi;
++ fi
+ fi
+
+
+@@ -285,7 +285,8 @@ ilb_stop() {
+
+ ilb_validate() {
+ check_binary "$OCF_RESKEY_cat"
+- check_binary "$OCF_RESKEY_log_cmd"
++
++ ocf_is_true "$OCF_RESKEY_log_enable" && check_binary "$OCF_RESKEY_log_cmd"
+
+ if ! ocf_is_decimal "$OCF_RESKEY_port"; then
+ ocf_exit_reason "$OCF_RESKEY_port is not a valid port"
diff --git a/SOURCES/bz2029704-1-db2-crm_attribute-use-forever.patch b/SOURCES/bz2029704-1-db2-crm_attribute-use-forever.patch
new file mode 100644
index 0000000..17f26c8
--- /dev/null
+++ b/SOURCES/bz2029704-1-db2-crm_attribute-use-forever.patch
@@ -0,0 +1,32 @@
+From 925180da2f41feddc5aac3c249563eb179b34029 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Mon, 22 Nov 2021 16:44:48 +0100
+Subject: [PATCH] db2: use -l forever instead of -t nodes -l reboot, as they
+ conflict with eachother
+
+---
+ heartbeat/db2 | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/db2 b/heartbeat/db2
+index 03146a957..fa2a45a5d 100755
+--- a/heartbeat/db2
++++ b/heartbeat/db2
+@@ -274,7 +274,7 @@ db2_fal_attrib() {
+ while read id node member
+ do
+ [ "$member" = member -a "$node" != "$me" ] || continue
+- crm_attribute -t nodes -l reboot --node=$node -n $attr -v "$3"
++ crm_attribute -l forever --node=$node -n $attr -v "$3"
+ rc=$?
+ ocf_log info "DB2 instance $instance($db2node/$db: setting attrib for FAL to $FIRST_ACTIVE_LOG @ $node"
+ [ $rc != 0 ] && break
+@@ -282,7 +282,7 @@ db2_fal_attrib() {
+ ;;
+
+ get)
+- crm_attribute -t nodes -l reboot -n $attr -G --quiet 2>&1
++ crm_attribute -l forever -n $attr -G --quiet 2>&1
+ rc=$?
+ if [ $rc != 0 ]
+ then
diff --git a/SOURCES/bz2029704-2-db2-fixes.patch b/SOURCES/bz2029704-2-db2-fixes.patch
new file mode 100644
index 0000000..c9fe8c3
--- /dev/null
+++ b/SOURCES/bz2029704-2-db2-fixes.patch
@@ -0,0 +1,32 @@
+From 75eaf06eea8957aa3941823955d1c8fa7933ab1d Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Wed, 23 Feb 2022 16:32:21 +0100
+Subject: [PATCH] db2: only warn when notify isnt set, and use
+ ocf_local_nodename() to get node name
+
+---
+ heartbeat/db2 | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/heartbeat/db2 b/heartbeat/db2
+index fa2a45a5d..ea24d33fc 100755
+--- a/heartbeat/db2
++++ b/heartbeat/db2
+@@ -267,7 +267,7 @@ db2_fal_attrib() {
+
+ case "$2" in
+ set)
+- me=$(uname -n)
++ me=$(ocf_local_nodename)
+
+ # loop over all member nodes and set attribute
+ crm_node -l |
+@@ -284,7 +284,7 @@ db2_fal_attrib() {
+ get)
+ crm_attribute -l forever -n $attr -G --quiet 2>&1
+ rc=$?
+- if [ $rc != 0 ]
++ if ! ocf_is_true "$OCF_RESKEY_CRM_meta_notify" && [ $rc != 0 ]
+ then
+ ocf_log warn "DB2 instance $instance($db2node/$db: can't retrieve attribute $attr, are you sure notifications are enabled ?"
+ fi
diff --git a/SOURCES/bz2029753-podman-remove-anonymous-volumes.patch b/SOURCES/bz2029753-podman-remove-anonymous-volumes.patch
new file mode 100644
index 0000000..f5ad417
--- /dev/null
+++ b/SOURCES/bz2029753-podman-remove-anonymous-volumes.patch
@@ -0,0 +1,22 @@
+From c6338011cf9ea69324f44c8c31a4ca2478aab35a Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Tue, 7 Dec 2021 08:59:50 +0100
+Subject: [PATCH] podman: remove anonymous volumes
+
+---
+ heartbeat/podman | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/heartbeat/podman b/heartbeat/podman
+index fd901c968..2b73857f1 100755
+--- a/heartbeat/podman
++++ b/heartbeat/podman
+@@ -251,7 +251,7 @@ remove_container()
+ return 0
+ fi
+ ocf_log notice "Cleaning up inactive container, ${CONTAINER}."
+- ocf_run podman rm $CONTAINER
++ ocf_run podman rm -v $CONTAINER
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ # due to a podman bug (rhbz#1841485), sometimes a stopped
diff --git a/SOURCES/bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch b/SOURCES/bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch
new file mode 100644
index 0000000..53d3299
--- /dev/null
+++ b/SOURCES/bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch
@@ -0,0 +1,43 @@
+From 7c54e4ecda33c90a1046c0688774f5b847ab10fe Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Tue, 7 Dec 2021 10:37:24 +0100
+Subject: [PATCH] Route: return OCF_NOT_RUNNING for probe action when interface
+ or route doesnt exist
+
+---
+ heartbeat/Route | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+diff --git a/heartbeat/Route b/heartbeat/Route
+index 8b390615a..7db41d0ae 100755
+--- a/heartbeat/Route
++++ b/heartbeat/Route
+@@ -227,15 +227,6 @@ route_stop() {
+ }
+
+ route_status() {
+- if [ -n "${OCF_RESKEY_device}" ]; then
+- # Must check if device exists or is gone.
+- # If device is gone, route is also unconfigured.
+- ip link show dev ${OCF_RESKEY_device} >/dev/null 2>&1
+- if [ $? -ne 0 ]; then
+- # Assume device does not exist, and short-circuit here.
+- return $OCF_NOT_RUNNING
+- fi
+- fi
+ show_output="$(ip $addr_family route show $(create_route_spec) 2>/dev/null)"
+ if [ $? -eq 0 ]; then
+ if [ -n "$show_output" ]; then
+@@ -251,7 +242,11 @@ route_status() {
+ else
+ # "ip route show" returned an error code. Assume something
+ # went wrong.
+- return $OCF_ERR_GENERIC
++ if ocf_is_probe; then
++ return $OCF_NOT_RUNNING
++ else
++ return $OCF_ERR_GENERIC
++ fi
+ fi
+ }
+
diff --git a/SOURCES/bz2055016-1-IPsrcaddr-dhcp-warning.patch b/SOURCES/bz2055016-1-IPsrcaddr-dhcp-warning.patch
new file mode 100644
index 0000000..34bad14
--- /dev/null
+++ b/SOURCES/bz2055016-1-IPsrcaddr-dhcp-warning.patch
@@ -0,0 +1,41 @@
+From 6d2ed7615614ede093f097189876d0f08553a43e Mon Sep 17 00:00:00 2001
+From: Reid Wahl
+Date: Mon, 14 Feb 2022 22:23:39 -0800
+Subject: [PATCH] IPsrcaddr: Add warning about DHCP
+
+If DHCP is enabled for the interface that serves OCF_RESKEY_ipaddress,
+then NetworkManager (and possibly dhclient in systems without NM;
+unsure) may later re-add a route that the IPsrcaddr resource replaced.
+This may cause the resource to fail or cause other unexpected behavior.
+
+So far this has been observed with a default route, albeit with an edge
+case of a configuration (OCF_RESKEY_ipaddress on a different subnet)
+that may not be totally valid. There are likely to be other situations
+as well where DHCP can cause conflicts with IPsrcaddr's manual updates
+via iproute. The safest option is to use only static configuration for
+the involved interface.
+
+Resolves: RHBZ#1654862
+
+Signed-off-by: Reid Wahl
+---
+ heartbeat/IPsrcaddr | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
+index ec868409f..fd7b6f68d 100755
+--- a/heartbeat/IPsrcaddr
++++ b/heartbeat/IPsrcaddr
+@@ -99,6 +99,12 @@ meta_data() {
+
+ Resource script for IPsrcaddr. It manages the preferred source address
+ modification.
++
++Note: DHCP should not be enabled for the interface serving the preferred
++source address. Enabling DHCP may result in unexpected behavior, such as
++the automatic addition of duplicate or conflicting routes. This may
++cause the IPsrcaddr resource to fail, or it may produce undesired
++behavior while the resource continues to run.
+
+ Manages the preferred source address for outgoing IP packets
+
diff --git a/SOURCES/bz2055016-2-IPsrcaddr-error-message-route-not-found.patch b/SOURCES/bz2055016-2-IPsrcaddr-error-message-route-not-found.patch
new file mode 100644
index 0000000..8a4a6fc
--- /dev/null
+++ b/SOURCES/bz2055016-2-IPsrcaddr-error-message-route-not-found.patch
@@ -0,0 +1,49 @@
+From 5a65f66ff803ad7ed15af958cc1efdde4d53dcb7 Mon Sep 17 00:00:00 2001
+From: Reid Wahl
+Date: Thu, 17 Feb 2022 03:53:21 -0800
+Subject: [PATCH] IPsrcaddr: Better error message when no matching route found
+
+If OCF_RESKEY_destination is not explicitly set and `ip route list`
+can't find a route matching the specifications, the NETWORK variable
+doesn't get set. This causes a certain failure of the start operation,
+because there is no PREFIX argument to `ip route replace` (syntax
+error). It may also cause unexpected behavior for stop operations (but
+not in all cases). During a monitor, this event can only happen if
+something has changed outside the cluster's control, and so is cause
+for warning there.
+
+Exit OCF_ERR_ARGS for start, log debug for probe, log warning for all
+other ops.
+
+Resolves: RHBZ#1654862
+
+Signed-off-by: Reid Wahl
+---
+ heartbeat/IPsrcaddr | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
+index fd7b6f68d..f0216722d 100755
+--- a/heartbeat/IPsrcaddr
++++ b/heartbeat/IPsrcaddr
+@@ -549,6 +549,20 @@ rc=$?
+ INTERFACE=`echo $findif_out | awk '{print $1}'`
+ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
+ NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
++
++ if [ -z "$NETWORK" ]; then
++ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
++ err_str="$err_str match $ipaddress' failed to find a matching route"
++
++ if [ "$__OCF_ACTION" = "start" ]; then
++ ocf_exit_reason "$err_str"
++ exit $OCF_ERR_ARGS
++ elif ! ocf_is_probe; then
++ ocf_log warn "$err_str"
++ else
++ ocf_log debug "$err_str"
++ fi
++ fi
+ else
+ NETWORK="$OCF_RESKEY_destination"
+ fi
diff --git a/SOURCES/bz2055016-3-IPsrcaddr-fix-indentation.patch b/SOURCES/bz2055016-3-IPsrcaddr-fix-indentation.patch
new file mode 100644
index 0000000..337943d
--- /dev/null
+++ b/SOURCES/bz2055016-3-IPsrcaddr-fix-indentation.patch
@@ -0,0 +1,56 @@
+From 0a197f1cd227e768837dff778a0c56fc1085d434 Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Mon, 21 Feb 2022 13:54:04 +0100
+Subject: [PATCH] IPsrcaddr: fix indentation in better error message code
+
+---
+ heartbeat/IPsrcaddr | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
+index f0216722d..c82adc0e9 100755
+--- a/heartbeat/IPsrcaddr
++++ b/heartbeat/IPsrcaddr
+@@ -542,27 +542,27 @@ fi
+ findif_out=`$FINDIF -C`
+ rc=$?
+ [ $rc -ne 0 ] && {
+- ocf_exit_reason "[$FINDIF -C] failed"
+- exit $rc
++ ocf_exit_reason "[$FINDIF -C] failed"
++ exit $rc
+ }
+
+ INTERFACE=`echo $findif_out | awk '{print $1}'`
+ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
+ NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
+
+- if [ -z "$NETWORK" ]; then
+- err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
+- err_str="$err_str match $ipaddress' failed to find a matching route"
+-
+- if [ "$__OCF_ACTION" = "start" ]; then
+- ocf_exit_reason "$err_str"
+- exit $OCF_ERR_ARGS
+- elif ! ocf_is_probe; then
+- ocf_log warn "$err_str"
+- else
+- ocf_log debug "$err_str"
+- fi
+- fi
++ if [ -z "$NETWORK" ]; then
++ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
++ err_str="$err_str match $ipaddress' failed to find a matching route"
++
++ if [ "$__OCF_ACTION" = "start" ]; then
++ ocf_exit_reason "$err_str"
++ exit $OCF_ERR_ARGS
++ elif ! ocf_is_probe; then
++ ocf_log warn "$err_str"
++ else
++ ocf_log debug "$err_str"
++ fi
++ fi
+ else
+ NETWORK="$OCF_RESKEY_destination"
+ fi
diff --git a/SOURCES/bz2055016-4-IPsrcaddr-fixes.patch b/SOURCES/bz2055016-4-IPsrcaddr-fixes.patch
new file mode 100644
index 0000000..c099fa5
--- /dev/null
+++ b/SOURCES/bz2055016-4-IPsrcaddr-fixes.patch
@@ -0,0 +1,117 @@
+From 50a596bfb977b18902dc62b99145bbd1a087690a Mon Sep 17 00:00:00 2001
+From: Oyvind Albrigtsen
+Date: Tue, 1 Mar 2022 11:06:07 +0100
+Subject: [PATCH] IPsrcaddr: fixes
+
+- use findif.sh to detect secondary interfaces
+- get metric and proto to update the correct route/update it correctly
+- match route using interface to fail when trying to update secondary
+ interfaces without specifying destination (would update default route
+ before)
+- also use PRIMARY_IP/OPTS during stop-action for default routes (to get
+ back to the exact routes we started with)
+- dont fail during stop-action if route doesnt exist
+- use [[:blank:]] for WS to follow POSIX standard (suggested by nrwahl)
+---
+ heartbeat/IPsrcaddr | 35 +++++++++++++++++++----------------
+ 1 file changed, 19 insertions(+), 16 deletions(-)
+
+diff --git a/heartbeat/IPsrcaddr b/heartbeat/IPsrcaddr
+index c82adc0e9..7dbf65ff5 100755
+--- a/heartbeat/IPsrcaddr
++++ b/heartbeat/IPsrcaddr
+@@ -52,6 +52,7 @@
+ # Initialization:
+ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++. ${OCF_FUNCTIONS_DIR}/findif.sh
+
+ # Defaults
+ OCF_RESKEY_ipaddress_default=""
+@@ -181,19 +182,21 @@ errorexit() {
+ #
+ # where the src clause "src Y.Y.Y.Y" may or may not be present
+
+-WS="[`echo -en ' \t'`]"
++WS="[[:blank:]]"
+ OCTET="[0-9]\{1,3\}"
+ IPADDR="\($OCTET\.\)\{3\}$OCTET"
+ SRCCLAUSE="src$WS$WS*\($IPADDR\)"
+ MATCHROUTE="\(.*${WS}\)\($SRCCLAUSE\)\($WS.*\|$\)"
+-FINDIF=$HA_BIN/findif
++METRICCLAUSE=".*\(metric$WS[^ ]\+\)"
++PROTOCLAUSE=".*\(proto$WS[^ ]\+\)"
++FINDIF=findif
+
+ # findif needs that to be set
+ export OCF_RESKEY_ip=$OCF_RESKEY_ipaddress
+
+ srca_read() {
+ # Capture matching route - doublequotes prevent word splitting...
+- ROUTE="`$CMDSHOW 2> /dev/null`" || errorexit "command '$CMDSHOW' failed"
++ ROUTE="`$CMDSHOW dev $INTERFACE 2> /dev/null`" || errorexit "command '$CMDSHOW' failed"
+
+ # ... so we can make sure there is only 1 matching route
+ [ 1 -eq `echo "$ROUTE" | wc -l` ] || \
+@@ -201,7 +204,7 @@ srca_read() {
+
+ # But there might still be no matching route
+ [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] && [ -z "$ROUTE" ] && \
+- ! ocf_is_probe && errorexit "no matching route exists"
++ ! ocf_is_probe && [ "$__OCF_ACTION" != stop ] && errorexit "no matching route exists"
+
+ # Sed out the source ip address if it exists
+ SRCIP=`echo $ROUTE | sed -n "s/$MATCHROUTE/\3/p"`
+@@ -232,8 +235,8 @@ srca_start() {
+ rc=$OCF_SUCCESS
+ ocf_log info "The ip route has been already set.($NETWORK, $INTERFACE, $ROUTE_WO_SRC)"
+ else
+- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE src $1 || \
+- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE src $1' failed"
++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC || \
++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $PROTO src $1 $METRIC' failed"
+
+ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
+ $CMDCHANGE $ROUTE_WO_SRC src $1 || \
+@@ -266,14 +269,11 @@ srca_stop() {
+
+ [ $rc = 2 ] && errorexit "The address you specified to stop does not match the preferred source address"
+
+- OPTS=""
+- if [ "$OCF_RESKEY_destination" != "0.0.0.0/0" ] ;then
+- PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')"
+- OPTS="proto kernel scope host src $PRIMARY_IP"
+- fi
++ PRIMARY_IP="$($IP2UTIL -4 -o addr show dev $INTERFACE primary | awk '{split($4,a,"/");print a[1]}')"
++ OPTS="proto kernel scope link src $PRIMARY_IP"
+
+- $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS || \
+- errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS' failed"
++ $IP2UTIL route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC || \
++ errorexit "command 'ip route replace $TABLE $NETWORK dev $INTERFACE $OPTS $METRIC' failed"
+
+ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
+ $CMDCHANGE $ROUTE_WO_SRC || \
+@@ -539,16 +539,19 @@ if [ $rc -ne $OCF_SUCCESS ]; then
+ esac
+ fi
+
+-findif_out=`$FINDIF -C`
++findif_out=`$FINDIF`
+ rc=$?
+ [ $rc -ne 0 ] && {
+- ocf_exit_reason "[$FINDIF -C] failed"
++ ocf_exit_reason "[$FINDIF] failed"
+ exit $rc
+ }
+
+ INTERFACE=`echo $findif_out | awk '{print $1}'`
++LISTROUTE=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress`
++METRIC=`echo $LISTROUTE | sed -n "s/$METRICCLAUSE/\1/p"`
++[ -z "$PROTO" ] && PROTO=`echo $LISTROUTE | sed -n "s/$PROTOCLAUSE/\1/p"`
+ if [ "$OCF_RESKEY_destination" = "0.0.0.0/0" ] ;then
+- NETWORK=`$IP2UTIL route list dev $INTERFACE scope link $PROTO match $ipaddress|grep -m 1 -o '^[^ ]*'`
++ NETWORK=`echo $LISTROUTE | grep -m 1 -o '^[^ ]*'`
+
+ if [ -z "$NETWORK" ]; then
+ err_str="command '$IP2UTIL route list dev $INTERFACE scope link $PROTO"
diff --git a/SOURCES/ha-cloud-support-aliyun.patch b/SOURCES/ha-cloud-support-aliyun.patch
new file mode 100644
index 0000000..93d78aa
--- /dev/null
+++ b/SOURCES/ha-cloud-support-aliyun.patch
@@ -0,0 +1,12 @@
+diff --color -uNr a/heartbeat/aliyun-vpc-move-ip b/heartbeat/aliyun-vpc-move-ip
+--- a/heartbeat/aliyun-vpc-move-ip 2021-08-19 09:37:57.000000000 +0200
++++ b/heartbeat/aliyun-vpc-move-ip 2021-08-25 13:38:26.786626079 +0200
+@@ -17,7 +17,7 @@
+ OCF_RESKEY_interface_default="eth0"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_endpoint_default="vpc.aliyuncs.com"
+-OCF_RESKEY_aliyuncli_default="detect"
++OCF_RESKEY_aliyuncli_default="/usr/lib/fence-agents/support/aliyun/bin/aliyuncli"
+
+
+ : ${OCF_RESKEY_address=${OCF_RESKEY_address_default}}
diff --git a/SOURCES/ha-cloud-support-aws.patch b/SOURCES/ha-cloud-support-aws.patch
new file mode 100644
index 0000000..fbda111
--- /dev/null
+++ b/SOURCES/ha-cloud-support-aws.patch
@@ -0,0 +1,48 @@
+diff --color -uNr a/heartbeat/awseip b/heartbeat/awseip
+--- a/heartbeat/awseip 2020-12-03 14:31:17.000000000 +0100
++++ b/heartbeat/awseip 2021-02-15 16:47:36.624610378 +0100
+@@ -43,7 +43,7 @@
+ #
+ # Defaults
+ #
+-OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_api_delay_default="3"
+
+diff --color -uNr a/heartbeat/awsvip b/heartbeat/awsvip
+--- a/heartbeat/awsvip 2020-12-03 14:31:17.000000000 +0100
++++ b/heartbeat/awsvip 2021-02-15 16:47:48.960632484 +0100
+@@ -42,7 +42,7 @@
+ #
+ # Defaults
+ #
+-OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_api_delay_default="3"
+
+diff --color -uNr a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
+--- a/heartbeat/aws-vpc-move-ip 2020-12-03 14:31:17.000000000 +0100
++++ b/heartbeat/aws-vpc-move-ip 2021-02-15 16:47:55.484644118 +0100
+@@ -35,7 +35,7 @@
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+ # Defaults
+-OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_region_default=""
+ OCF_RESKEY_ip_default=""
+diff --color -uNr a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in
+--- a/heartbeat/aws-vpc-route53.in 2020-12-03 14:31:17.000000000 +0100
++++ b/heartbeat/aws-vpc-route53.in 2021-02-15 16:47:59.808651828 +0100
+@@ -45,7 +45,7 @@
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+ # Defaults
+-OCF_RESKEY_awscli_default="/usr/bin/aws"
++OCF_RESKEY_awscli_default="/usr/lib/fence-agents/support/awscli/bin/aws"
+ OCF_RESKEY_profile_default="default"
+ OCF_RESKEY_hostedzoneid_default=""
+ OCF_RESKEY_fullname_default=""
diff --git a/SOURCES/ha-cloud-support-gcloud.patch b/SOURCES/ha-cloud-support-gcloud.patch
new file mode 100644
index 0000000..95b0d7a
--- /dev/null
+++ b/SOURCES/ha-cloud-support-gcloud.patch
@@ -0,0 +1,33 @@
+diff --color -uNr a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in
+--- a/heartbeat/gcp-pd-move.in 2021-08-19 09:37:57.000000000 +0200
++++ b/heartbeat/gcp-pd-move.in 2021-08-25 13:50:54.461732967 +0200
+@@ -32,6 +32,7 @@
+ from ocf import logger
+
+ try:
++ sys.path.insert(0, '/usr/lib/fence-agents/support/google')
+ import googleapiclient.discovery
+ except ImportError:
+ pass
+diff --color -uNr a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in
+--- a/heartbeat/gcp-vpc-move-route.in 2021-08-19 09:37:57.000000000 +0200
++++ b/heartbeat/gcp-vpc-move-route.in 2021-08-25 13:51:17.489797999 +0200
+@@ -45,6 +45,7 @@
+ from ocf import *
+
+ try:
++ sys.path.insert(0, '/usr/lib/fence-agents/support/google')
+ import googleapiclient.discovery
+ import pyroute2
+ try:
+diff --color -uNr a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in
+--- a/heartbeat/gcp-vpc-move-vip.in 2021-08-19 09:37:57.000000000 +0200
++++ b/heartbeat/gcp-vpc-move-vip.in 2021-08-25 13:51:35.012847487 +0200
+@@ -29,6 +29,7 @@
+ from ocf import *
+
+ try:
++ sys.path.insert(0, '/usr/lib/fence-agents/support/google')
+ import googleapiclient.discovery
+ try:
+ from google.oauth2.service_account import Credentials as ServiceAccountCredentials
diff --git a/SOURCES/nova-compute-wait-NovaEvacuate.patch b/SOURCES/nova-compute-wait-NovaEvacuate.patch
new file mode 100644
index 0000000..0e7b605
--- /dev/null
+++ b/SOURCES/nova-compute-wait-NovaEvacuate.patch
@@ -0,0 +1,787 @@
+diff --color -uNr a/doc/man/Makefile.am b/doc/man/Makefile.am
+--- a/doc/man/Makefile.am 2021-08-25 09:51:53.037906134 +0200
++++ b/doc/man/Makefile.am 2021-08-25 09:48:44.578408475 +0200
+@@ -97,6 +97,8 @@
+ ocf_heartbeat_ManageRAID.7 \
+ ocf_heartbeat_ManageVE.7 \
+ ocf_heartbeat_NodeUtilization.7 \
++ ocf_heartbeat_nova-compute-wait.7 \
++ ocf_heartbeat_NovaEvacuate.7 \
+ ocf_heartbeat_Pure-FTPd.7 \
+ ocf_heartbeat_Raid1.7 \
+ ocf_heartbeat_Route.7 \
+diff --color -uNr a/heartbeat/Makefile.am b/heartbeat/Makefile.am
+--- a/heartbeat/Makefile.am 2021-08-25 09:51:53.038906137 +0200
++++ b/heartbeat/Makefile.am 2021-08-25 09:48:44.588408501 +0200
+@@ -29,6 +29,8 @@
+
+ ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat
+
++ospdir = $(OCF_RA_DIR_PREFIX)/openstack
++
+ dtddir = $(datadir)/$(PACKAGE_NAME)
+ dtd_DATA = ra-api-1.dtd metadata.rng
+
+@@ -50,6 +52,9 @@
+ send_ua_SOURCES = send_ua.c IPv6addr_utils.c
+ send_ua_LDADD = $(LIBNETLIBS)
+
++osp_SCRIPTS = nova-compute-wait \
++ NovaEvacuate
++
+ ocf_SCRIPTS = AoEtarget \
+ AudibleAlarm \
+ ClusterMon \
+diff --color -uNr a/heartbeat/nova-compute-wait b/heartbeat/nova-compute-wait
+--- a/heartbeat/nova-compute-wait 1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/nova-compute-wait 2021-08-25 09:50:14.626646141 +0200
+@@ -0,0 +1,345 @@
++#!/bin/sh
++#
++#
++# nova-compute-wait agent manages compute daemons.
++#
++# Copyright (c) 2015
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of version 2 of the GNU General Public License as
++# published by the Free Software Foundation.
++#
++# This program is distributed in the hope that it would be useful, but
++# WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
++#
++# Further, this software is distributed without any warranty that it is
++# free of the rightful claim of any third person regarding infringement
++# or the like. Any license provided herein, whether implied or
++# otherwise, applies only to this software file. Patent licenses, if
++# any, provided herein do not apply to combinations of this program with
++# other software, or any other product whatsoever.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write the Free Software Foundation,
++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++#
++
++#######################################################################
++# Initialization:
++
++
++###
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++###
++
++: ${__OCF_ACTION=$1}
++
++#######################################################################
++
++meta_data() {
++ cat <
++
++
++1.0
++
++
++OpenStack Nova Compute Server.
++
++OpenStack Nova Compute Server
++
++
++
++
++
++Deprecated option not in use
++
++Deprecated
++
++
++
++
++
++Deprecated option not in use
++
++Deprecated
++
++
++
++
++Deprecated option not in use
++
++Deprecated
++
++
++
++
++
++Deprecated option not in use
++
++Deprecated
++
++
++
++
++
++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
++
++DNS domain
++
++
++
++
++
++Deprecated option not in use
++
++Deprecated
++
++
++
++
++
++Deprecated option not in use
++
++Deprecated
++
++
++
++
++
++How long to wait for nova to finish evacuating instances elsewhere
++before starting nova-compute. Only used when the agent detects
++evacuations might be in progress.
++
++You may need to increase the start timeout when increasing this value.
++
++Delay to allow evacuations time to complete
++
++
++
++
++
++
++
++
++
++
++
++
++
++END
++}
++
++#######################################################################
++
++# don't exit on TERM, to test that lrmd makes sure that we do exit
++trap sigterm_handler TERM
++sigterm_handler() {
++ ocf_log info "They use TERM to bring us down. No such luck."
++ return
++}
++
++nova_usage() {
++ cat </run/systemd/system/openstack-nova-compute.service.d/unfence-20.conf
++[Service]
++ExecStartPost=/sbin/fence_compute ${fence_options} -o on -n ${NOVA_HOST}
++EOF
++}
++
++nova_validate() {
++ rc=$OCF_SUCCESS
++
++ check_binary crudini
++ check_binary nova-compute
++ check_binary fence_compute
++
++ if [ ! -f /etc/nova/nova.conf ]; then
++ ocf_exit_reason "/etc/nova/nova.conf not found"
++ exit $OCF_ERR_CONFIGURED
++ fi
++
++ # Is the state directory writable?
++ state_dir=$(dirname $statefile)
++ touch "$state_dir/$$"
++ if [ $? != 0 ]; then
++ ocf_exit_reason "Invalid state directory: $state_dir"
++ return $OCF_ERR_ARGS
++ fi
++ rm -f "$state_dir/$$"
++
++ NOVA_HOST=$(crudini --get /etc/nova/nova.conf DEFAULT host 2>/dev/null)
++ if [ $? = 1 ]; then
++ short_host=$(uname -n | awk -F. '{print $1}')
++ if [ "x${OCF_RESKEY_domain}" != x ]; then
++ NOVA_HOST=${short_host}.${OCF_RESKEY_domain}
++ else
++ NOVA_HOST=$(uname -n)
++ fi
++ fi
++
++ if [ $rc != $OCF_SUCCESS ]; then
++ exit $rc
++ fi
++ return $rc
++}
++
++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
++
++: ${OCF_RESKEY_evacuation_delay=120}
++case $__OCF_ACTION in
++meta-data) meta_data
++ exit $OCF_SUCCESS
++ ;;
++usage|help) nova_usage
++ exit $OCF_SUCCESS
++ ;;
++esac
++
++case $__OCF_ACTION in
++start) nova_validate; nova_start;;
++stop) nova_stop;;
++monitor) nova_validate; nova_monitor;;
++notify) nova_notify;;
++validate-all) exit $OCF_SUCCESS;;
++*) nova_usage
++ exit $OCF_ERR_UNIMPLEMENTED
++ ;;
++esac
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
++
+diff --color -uNr a/heartbeat/NovaEvacuate b/heartbeat/NovaEvacuate
+--- a/heartbeat/NovaEvacuate 1970-01-01 01:00:00.000000000 +0100
++++ b/heartbeat/NovaEvacuate 2021-08-25 09:50:23.780670326 +0200
+@@ -0,0 +1,400 @@
++#!/bin/bash
++#
++# Copyright 2015 Red Hat, Inc.
++#
++# Description: Manages evacuation of nodes running nova-compute
++#
++# Authors: Andrew Beekhof
++#
++# Support: openstack@lists.openstack.org
++# License: Apache Software License (ASL) 2.0
++#
++
++
++#######################################################################
++# Initialization:
++
++###
++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
++###
++
++: ${__OCF_ACTION=$1}
++
++#######################################################################
++
++meta_data() {
++ cat <
++
++
++1.0
++
++
++Facility for tacking a list of compute nodes and reliably evacuating the ones that fence_evacuate has flagged.
++
++Evacuator for OpenStack Nova Compute Server
++
++
++
++
++
++Authorization URL for connecting to keystone in admin context
++
++Authorization URL
++
++
++
++
++
++Username for connecting to keystone in admin context
++
++Username
++
++
++
++
++
++Password for connecting to keystone in admin context
++
++Password
++
++
++
++
++
++Tenant name for connecting to keystone in admin context.
++Note that with Keystone V3 tenant names are only unique within a domain.
++
++Tenant name
++
++
++
++
++
++DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN
++
++DNS domain
++
++
++
++
++
++Nova API location (internal, public or admin URL)
++
++Nova API location (internal, public or admin URL)
++
++
++
++
++
++Region name for connecting to nova.
++
++Region name
++
++
++
++
++
++Explicitly allow client to perform "insecure" TLS (https) requests.
++The server's certificate will not be verified against any certificate authorities.
++This option should be used with caution.
++
++Allow insecure TLS requests
++
++
++
++
++
++Indicate that nova storage for instances is not shared across compute
++nodes. This must match the reality of how nova storage is configured!
++Otherwise VMs could end up in error state upon evacuation. When
++storage is non-shared, instances on dead hypervisors will be rebuilt
++from their original image or volume, so anything on ephemeral storage
++will be lost.
++
++Disable shared storage recovery for instances
++
++
++
++
++
++Enable extra logging from the evacuation process
++
++Enable debug logging
++
++
++
++
++
++Allows delaying the nova evacuate API call, e.g. to give a storage array time to clean
++up eventual locks/leases.
++
++Nova evacuate delay
++
++
++
++
++
++
++
++
++
++
++
++
++
++END
++}
++
++#######################################################################
++
++# don't exit on TERM, to test that lrmd makes sure that we do exit
++trap sigterm_handler TERM
++sigterm_handler() {
++ ocf_log info "They use TERM to bring us down. No such luck."
++ return
++}
++
++evacuate_usage() {
++ cat < >(grep -v "attribute does not exist" 1>&2) |
++ sed 's/ value=""/ value="no"/' |
++ tr '="' ' ' |
++ awk '{print $4" "$6}'
++ )
++ return $OCF_SUCCESS
++}
++
++evacuate_validate() {
++ rc=$OCF_SUCCESS
++ fence_options=""
++
++ if ! have_binary fence_evacuate; then
++ check_binary fence_compute
++ fi
++
++ # Is the state directory writable?
++ state_dir=$(dirname $statefile)
++ touch "$state_dir/$$"
++ if [ $? != 0 ]; then
++ ocf_exit_reason "Invalid state directory: $state_dir"
++ return $OCF_ERR_ARGS
++ fi
++ rm -f "$state_dir/$$"
++
++ if [ -z "${OCF_RESKEY_auth_url}" ]; then
++ ocf_exit_reason "auth_url not configured"
++ exit $OCF_ERR_CONFIGURED
++ fi
++
++ fence_options="${fence_options} -k ${OCF_RESKEY_auth_url}"
++
++ if [ -z "${OCF_RESKEY_username}" ]; then
++ ocf_exit_reason "username not configured"
++ exit $OCF_ERR_CONFIGURED
++ fi
++
++ fence_options="${fence_options} -l ${OCF_RESKEY_username}"
++
++ if [ -z "${OCF_RESKEY_password}" ]; then
++ ocf_exit_reason "password not configured"
++ exit $OCF_ERR_CONFIGURED
++ fi
++
++ fence_options="${fence_options} -p ${OCF_RESKEY_password}"
++
++ if [ -z "${OCF_RESKEY_tenant_name}" ]; then
++ ocf_exit_reason "tenant_name not configured"
++ exit $OCF_ERR_CONFIGURED
++ fi
++
++ fence_options="${fence_options} -t ${OCF_RESKEY_tenant_name}"
++
++ if [ -n "${OCF_RESKEY_domain}" ]; then
++ fence_options="${fence_options} -d ${OCF_RESKEY_domain}"
++ fi
++
++ if [ -n "${OCF_RESKEY_region_name}" ]; then
++ fence_options="${fence_options} \
++ --region-name ${OCF_RESKEY_region_name}"
++ fi
++
++ if [ -n "${OCF_RESKEY_insecure}" ]; then
++ if ocf_is_true "${OCF_RESKEY_insecure}"; then
++ fence_options="${fence_options} --insecure"
++ fi
++ fi
++
++ if [ -n "${OCF_RESKEY_no_shared_storage}" ]; then
++ if ocf_is_true "${OCF_RESKEY_no_shared_storage}"; then
++ fence_options="${fence_options} --no-shared-storage"
++ fi
++ fi
++
++ if [ -n "${OCF_RESKEY_verbose}" ]; then
++ if ocf_is_true "${OCF_RESKEY_verbose}"; then
++ fence_options="${fence_options} --verbose"
++ fi
++ fi
++
++ if [ -n "${OCF_RESKEY_endpoint_type}" ]; then
++ case ${OCF_RESKEY_endpoint_type} in
++ adminURL|publicURL|internalURL)
++ ;;
++ *)
++ ocf_exit_reason "endpoint_type ${OCF_RESKEY_endpoint_type}" \
++ "not valid. Use adminURL or publicURL or internalURL"
++ exit $OCF_ERR_CONFIGURED
++ ;;
++ esac
++ fence_options="${fence_options} -e ${OCF_RESKEY_endpoint_type}"
++ fi
++
++ if [ $rc != $OCF_SUCCESS ]; then
++ exit $rc
++ fi
++ return $rc
++}
++
++statefile="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.active"
++
++case $__OCF_ACTION in
++ start)
++ evacuate_validate
++ evacuate_start
++ ;;
++ stop)
++ evacuate_stop
++ ;;
++ monitor)
++ evacuate_validate
++ evacuate_monitor
++ ;;
++ meta-data)
++ meta_data
++ exit $OCF_SUCCESS
++ ;;
++ usage|help)
++ evacuate_usage
++ exit $OCF_SUCCESS
++ ;;
++ validate-all)
++ exit $OCF_SUCCESS
++ ;;
++ *)
++ evacuate_usage
++ exit $OCF_ERR_UNIMPLEMENTED
++ ;;
++esac
++rc=$?
++ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
++exit $rc
diff --git a/SPECS/resource-agents.spec b/SPECS/resource-agents.spec
new file mode 100644
index 0000000..05de3a5
--- /dev/null
+++ b/SPECS/resource-agents.spec
@@ -0,0 +1,1132 @@
+#
+# All modifications and additions to the file contributed by third parties
+# remain the property of their copyright owners, unless otherwise agreed
+# upon. The license for this file, and modifications and additions to the
+# file, is the same license as for the pristine package itself (unless the
+# license for the pristine package is not an Open Source License, in which
+# case the license is the MIT License). An "Open Source License" is a
+# license that conforms to the Open Source Definition (Version 1.9)
+# published by the Open Source Initiative.
+#
+
+# Below is the script used to generate a new source file
+# from the resource-agent upstream git repo.
+#
+# TAG=$(git log --pretty="format:%h" -n 1)
+# distdir="ClusterLabs-resource-agents-${TAG}"
+# TARFILE="${distdir}.tar.gz"
+# rm -rf $TARFILE $distdir
+# git archive --prefix=$distdir/ HEAD | gzip > $TARFILE
+#
+
+%global upstream_prefix ClusterLabs-resource-agents
+%global upstream_version fd0720f7
+
+# Whether this platform defaults to using systemd as an init system
+# (needs to be evaluated prior to BuildRequires being enumerated and
+# installed as it's intended to conditionally select some of these, and
+# for that there are only few indicators with varying reliability:
+# - presence of systemd-defined macros (when building in a full-fledged
+# environment, which is not the case with ordinary mock-based builds)
+# - systemd-aware rpm as manifested with the presence of particular
+# macro (rpm itself will trivially always be present when building)
+# - existence of /usr/lib/os-release file, which is something heavily
+# propagated by systemd project
+# - when not good enough, there's always a possibility to check
+# particular distro-specific macros (incl. version comparison)
+%define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \
+ } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \
+ } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?))
+
+# determine the ras-set to process based on configure invokation
+%bcond_with rgmanager
+%bcond_without linuxha
+
+Name: resource-agents
+Summary: Open Source HA Reusable Cluster Resource Scripts
+Version: 4.10.0
+Release: 9%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}
+License: GPLv2+ and LGPLv2+
+URL: https://github.com/ClusterLabs/resource-agents
+Source0: %{upstream_prefix}-%{upstream_version}.tar.gz
+Patch0: nova-compute-wait-NovaEvacuate.patch
+Patch1: bz1952005-pgsqlms-new-ra.patch
+Patch2: bz2021125-gcp-ilb-1-fix-log_enable.patch
+Patch3: bz2021125-gcp-ilb-2-only-check-log_cmd-if-log-enabled.patch
+Patch4: bz2029796-Route-return-OCF_NOT_RUNNING-missing-route.patch
+Patch5: bz2029704-1-db2-crm_attribute-use-forever.patch
+Patch6: bz2029704-2-db2-fixes.patch
+Patch7: bz2029753-podman-remove-anonymous-volumes.patch
+Patch8: bz2055016-1-IPsrcaddr-dhcp-warning.patch
+Patch9: bz2055016-2-IPsrcaddr-error-message-route-not-found.patch
+Patch10: bz2055016-3-IPsrcaddr-fix-indentation.patch
+Patch11: bz2055016-4-IPsrcaddr-fixes.patch
+
+# bundled ha-cloud-support libs
+Patch500: ha-cloud-support-aws.patch
+Patch501: ha-cloud-support-aliyun.patch
+Patch502: ha-cloud-support-gcloud.patch
+
+Obsoletes: heartbeat-resources <= %{version}
+Provides: heartbeat-resources = %{version}
+
+# Build dependencies
+BuildRequires: make
+BuildRequires: automake autoconf pkgconfig gcc
+BuildRequires: libxslt glib2-devel
+BuildRequires: systemd
+BuildRequires: which
+
+%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version}
+BuildRequires: python3-devel
+%else
+BuildRequires: python-devel
+%endif
+
+# for pgsqlms
+BuildRequires: perl-devel perl-English perl-FindBin
+
+%ifarch x86_64
+BuildRequires: ha-cloud-support
+%endif
+
+%if 0%{?fedora} || 0%{?centos} || 0%{?rhel}
+BuildRequires: docbook-style-xsl docbook-dtds
+%if 0%{?rhel} == 0
+BuildRequires: libnet-devel
+%endif
+%endif
+
+%if 0%{?suse_version}
+BuildRequires: libnet-devel
+BuildRequires: libglue-devel
+BuildRequires: libxslt docbook_4 docbook-xsl-stylesheets
+%endif
+
+## Runtime deps
+# system tools shared by several agents
+Requires: /bin/bash /usr/bin/grep /bin/sed /bin/gawk
+Requires: /bin/ps /usr/bin/pkill /usr/bin/hostname /usr/bin/netstat
+Requires: /usr/sbin/fuser /bin/mount
+Requires: which
+
+# Filesystem / fs.sh / netfs.sh
+Requires: /sbin/fsck
+Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4
+Requires: /usr/sbin/fsck.xfs
+Requires: /sbin/mount.nfs /sbin/mount.nfs4
+%if 0%{?fedora} < 33 || (0%{?rhel} && 0%{?rhel} < 9) || (0%{?centos} && 0%{?centos} < 9) || 0%{?suse_version}
+%if (0%{?rhel} && 0%{?rhel} < 8) || (0%{?centos} && 0%{?centos} < 8)
+Requires: /usr/sbin/mount.cifs
+%else
+Recommends: /usr/sbin/mount.cifs
+%endif
+%endif
+
+# IPaddr2
+Requires: /sbin/ip
+
+# LVM / lvm.sh
+Requires: /usr/sbin/lvm
+
+# nfsserver / netfs.sh
+Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd
+
+# ocf.py
+Requires: python3
+
+# rgmanager
+%if %{with rgmanager}
+# ip.sh
+Requires: /usr/sbin/ethtool
+Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6
+
+# nfsexport.sh
+Requires: /sbin/findfs
+Requires: /sbin/quotaon /sbin/quotacheck
+%endif
+
+%description
+A set of scripts to interface with several services to operate in a
+High Availability environment for both Pacemaker and rgmanager
+service managers.
+
+%ifarch x86_64
+%package cloud
+License: GPLv2+ and LGPLv2+
+Summary: Cloud resource agents
+Requires: %{name} = %{version}-%{release}
+Requires: ha-cloud-support
+Requires: socat
+Provides: resource-agents-aliyun
+Obsoletes: resource-agents-aliyun <= %{version}
+Provides: resource-agents-gcp
+Obsoletes: resource-agents-gcp <= %{version}
+
+%description cloud
+Cloud resource agents allows Cloud instances to be managed
+in a cluster environment.
+%endif
+
+%package paf
+License: PostgreSQL
+Summary: PostgreSQL Automatic Failover (PAF) resource agent
+Requires: %{name} = %{version}-%{release}
+Requires: perl-interpreter perl-English perl-FindBin
+
+%description paf
+PostgreSQL Automatic Failover (PAF) resource agents allows PostgreSQL
+databases to be managed in a cluster environment.
+
+%prep
+%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos} == 0 && 0%{?rhel} == 0
+%{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.}
+exit 1
+%endif
+%setup -q -n %{upstream_prefix}-%{upstream_version}
+%patch0 -p1 -F1
+%patch1 -p1
+%patch2 -p1
+%patch3 -p1
+%patch4 -p1
+%patch5 -p1
+%patch6 -p1
+%patch7 -p1
+%patch8 -p1
+%patch9 -p1
+%patch10 -p1
+%patch11 -p1
+
+# bundled ha-cloud-support libs
+%patch500 -p1
+%patch501 -p1
+%patch502 -p1
+
+chmod 755 heartbeat/nova-compute-wait
+chmod 755 heartbeat/NovaEvacuate
+chmod 755 heartbeat/pgsqlms
+
+%build
+if [ ! -f configure ]; then
+ ./autogen.sh
+fi
+
+%if 0%{?fedora} >= 11 || 0%{?centos} > 5 || 0%{?rhel} > 5
+CFLAGS="$(echo '%{optflags}')"
+%global conf_opt_fatal "--enable-fatal-warnings=no"
+%else
+CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}"
+%global conf_opt_fatal "--enable-fatal-warnings=yes"
+%endif
+
+%if %{with rgmanager}
+%global rasset rgmanager
+%endif
+%if %{with linuxha}
+%global rasset linux-ha
+%endif
+%if %{with rgmanager} && %{with linuxha}
+%global rasset all
+%endif
+
+export CFLAGS
+
+%configure \
+%if 0%{?fedora} || 0%{?centos} > 7 || 0%{?rhel} > 7 || 0%{?suse_version}
+ PYTHON="%{__python3}" \
+%endif
+%ifarch x86_64
+ PYTHONPATH="%{_usr}/lib/fence-agents/support/google" \
+%endif
+ %{conf_opt_fatal} \
+%if %{defined _unitdir}
+ SYSTEMD_UNIT_DIR=%{_unitdir} \
+%endif
+%if %{defined _tmpfilesdir}
+ SYSTEMD_TMPFILES_DIR=%{_tmpfilesdir} \
+ --with-rsctmpdir=/run/resource-agents \
+%endif
+ --with-pkg-name=%{name} \
+ --with-ras-set=%{rasset}
+
+%if %{defined jobs}
+JFLAGS="$(echo '-j%{jobs}')"
+%else
+JFLAGS="$(echo '%{_smp_mflags}')"
+%endif
+
+make $JFLAGS
+
+%install
+rm -rf %{buildroot}
+make install DESTDIR=%{buildroot}
+
+## tree fixup
+# remove docs (there is only one and they should come from doc sections in files)
+rm -rf %{buildroot}/usr/share/doc/resource-agents
+
+%files
+%doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog
+%if %{with linuxha}
+%doc heartbeat/README.galera
+%doc doc/README.webapps
+%doc %{_datadir}/%{name}/ra-api-1.dtd
+%doc %{_datadir}/%{name}/metadata.rng
+%endif
+
+%if %{with rgmanager}
+%{_datadir}/cluster
+%{_sbindir}/rhev-check.sh
+%endif
+
+%if %{with linuxha}
+%dir %{_usr}/lib/ocf
+%dir %{_usr}/lib/ocf/resource.d
+%dir %{_usr}/lib/ocf/lib
+
+%{_usr}/lib/ocf/lib/heartbeat
+
+%{_usr}/lib/ocf/resource.d/heartbeat
+%{_usr}/lib/ocf/resource.d/openstack
+
+%{_datadir}/pkgconfig/%{name}.pc
+
+%if %{defined _unitdir}
+%{_unitdir}/resource-agents-deps.target
+%endif
+%if %{defined _tmpfilesdir}
+%{_tmpfilesdir}/%{name}.conf
+%endif
+
+%dir %{_datadir}/%{name}
+%dir %{_datadir}/%{name}/ocft
+%{_datadir}/%{name}/ocft/configs
+%{_datadir}/%{name}/ocft/caselib
+%{_datadir}/%{name}/ocft/README
+%{_datadir}/%{name}/ocft/README.zh_CN
+%{_datadir}/%{name}/ocft/helpers.sh
+%exclude %{_datadir}/%{name}/ocft/runocft
+%exclude %{_datadir}/%{name}/ocft/runocft.prereq
+
+%{_sbindir}/ocft
+
+%{_includedir}/heartbeat
+
+%if %{defined _tmpfilesdir}
+%dir %attr (1755, root, root) /run/resource-agents
+%else
+%dir %attr (1755, root, root) %{_var}/run/resource-agents
+%endif
+
+%{_mandir}/man7/*.7*
+
+###
+# Supported, but in another sub package
+###
+%exclude /usr/lib/ocf/resource.d/heartbeat/aliyun-vpc-move-ip*
+%exclude /usr/lib/ocf/resource.d/heartbeat/aws*
+%exclude /usr/lib/ocf/resource.d/heartbeat/azure-*
+%exclude %{_mandir}/man7/*aliyun-vpc-move-ip*
+%exclude /usr/lib/ocf/resource.d/heartbeat/gcp*
+%exclude %{_mandir}/man7/*gcp*
+%exclude /usr/lib/ocf/resource.d/heartbeat/pgsqlms
+%exclude %{_mandir}/man7/*pgsqlms*
+%exclude %{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
+
+###
+# Moved to separate packages
+###
+%exclude /usr/lib/ocf/resource.d/heartbeat/SAP*
+%exclude /usr/lib/ocf/lib/heartbeat/sap*
+%exclude %{_mandir}/man7/*SAP*
+
+###
+# Unsupported
+###
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AoEtarget
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/AudibleAlarm
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ClusterMon
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/EvmsSCC
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Evmsd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ICP
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/IPaddr
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LVM
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/LinuxSCSI
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageRAID
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ManageVE
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Pure-FTPd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Raid1
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ServeRAID
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SphinxSearchDaemon
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Stateful
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/SysInfo
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/VIPArip
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WAS6
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/WinPopup
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/Xen
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ZFS
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/anything
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/asterisk
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/clvm
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dnsupdate
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/docker*
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dovecot
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/dummypy
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/eDir88
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/fio
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ids
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iface-bridge
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ipsec
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/iscsi
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jboss
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/jira
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/kamailio
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ldirectord
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxc
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/lxd-info
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/machine-info
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mariadb
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mdraid
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/minio
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mpathpersist
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/mysql-proxy
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/nvmet-*
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/openstack-cinder-volume
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/openstack-floating-ip
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/openstack-info
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/openstack-virtual-ip
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/ovsmonitor
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pgagent
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pingd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/pound
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/proftpd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rkt
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/rsyslog
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/scsi2reservation
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sfex
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/sg_persist
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/smb-share
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/syslog-ng
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/varnish
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vmware
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/vsftpd
+%exclude %{_usr}/lib/ocf/resource.d/heartbeat/zabbixserver
+%exclude %{_mandir}/man7/ocf_heartbeat_AoEtarget.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_AudibleAlarm.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ClusterMon.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_EvmsSCC.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Evmsd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ICP.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_IPaddr.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_LVM.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_LinuxSCSI.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ManageRAID.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ManageVE.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Pure-FTPd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Raid1.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ServeRAID.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_SphinxSearchDaemon.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Stateful.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_SysInfo.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_VIPArip.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WAS.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WAS6.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_WinPopup.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_Xen.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ZFS.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_anything.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_asterisk.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_clvm.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_dnsupdate.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_docker*.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_dovecot.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_dummypy.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_eDir88.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_fio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ids.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_iface-bridge.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ipsec.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_iscsi.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_jboss.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_jira.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_kamailio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_lxc.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_lxd-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_machine-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mariadb.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mdraid.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_minio.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mpathpersist.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_mysql-proxy.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_nvmet-*.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-cinder-volume.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-floating-ip.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-info.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_openstack-virtual-ip.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_ovsmonitor.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pgagent.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pingd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_pound.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_proftpd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_rkt.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_rsyslog.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_scsi2reservation.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_sfex.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_sg_persist.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_smb-share.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_syslog-ng.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_varnish.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_vmware.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_vsftpd.7.gz
+%exclude %{_mandir}/man7/ocf_heartbeat_zabbixserver.7.gz
+
+###
+# Other excluded files.
+###
+# This tool has to be updated for the new pacemaker lrmd.
+%exclude %{_sbindir}/ocf-tester
+%exclude %{_mandir}/man8/ocf-tester.8*
+# ldirectord is not supported
+%exclude /etc/ha.d/resource.d/ldirectord
+%exclude /etc/rc.d/init.d/ldirectord
+%exclude %{_unitdir}/ldirectord.service
+%exclude /etc/logrotate.d/ldirectord
+%exclude /usr/sbin/ldirectord
+%exclude %{_mandir}/man8/ldirectord.8.gz
+
+# For compatability with pre-existing agents
+%dir %{_sysconfdir}/ha.d
+%{_sysconfdir}/ha.d/shellfuncs
+
+%{_libexecdir}/heartbeat
+%endif
+
+%ifarch x86_64
+%files cloud
+/usr/lib/ocf/resource.d/heartbeat/aliyun-*
+%{_mandir}/man7/*aliyun-*
+/usr/lib/ocf/resource.d/heartbeat/aws*
+%{_mandir}/man7/*aws*
+/usr/lib/ocf/resource.d/heartbeat/azure-*
+%{_mandir}/man7/*azure-*
+/usr/lib/ocf/resource.d/heartbeat/gcp-*
+%{_mandir}/man7/*gcp-*
+%exclude /usr/lib/ocf/resource.d/heartbeat/gcp-vpc-move-ip
+%exclude %{_mandir}/man7/*gcp-vpc-move-ip*
+%endif
+
+%files paf
+%doc paf_README.md
+%license paf_LICENSE
+%defattr(-,root,root)
+%{_usr}/lib/ocf/resource.d/heartbeat/pgsqlms
+%{_mandir}/man7/*pgsqlms*
+%{_usr}/lib/ocf/lib/heartbeat/OCF_*.pm
+
+%changelog
+* Thu Mar 3 2022 Oyvind Albrigtsen - 4.10.0-9
+- IPsrcaddr: add warning about possible issues when used with DHCP,
+ and add error message when matching route not found
+
+ Resolves: rhbz#2055016
+
+* Wed Feb 23 2022 Oyvind Albrigtsen - 4.10.0-7
+- db2: use -l forever to fix crm_attribute issue
+
+ Resolves: rhbz#2029704
+
+* Wed Jan 5 2022 Oyvind Albrigtsen - 4.10.0-5
+- podman: remove anonymous volumes
+
+ Resolves: rhbz#2029753
+
+* Tue Dec 7 2021 Oyvind Albrigtsen - 4.10.0-4
+- Route: return OCF_NOT_RUNNING for probe action when interface
+ or route doesnt exist
+
+ Resolves: rhbz#2029796
+
+* Tue Nov 9 2021 Oyvind Albrigtsen - 4.10.0-3
+- gcp-ilb: new resource agent
+
+ Resolves: rhbz#2021125
+
+* Wed Nov 3 2021 Oyvind Albrigtsen - 4.10.0-1
+- Rebase to resource-agents 4.10.0 upstream release.
+- nfsserver: add nfs_server_scope parameter
+
+ Resolves: rhbz#1995918
+ Resolves: rhbz#2015171
+
+* Thu Oct 7 2021 Oyvind Albrigtsen - 4.9.0-2
+- Rebase to resource-agents 4.9.0 upstream release.
+- Add "which" dependency
+- All agents: set correct agent and OCF version in metadata
+
+ Resolves: rhbz#1995918
+ Resolves: rhbz#2011142
+ Resolves: rhbz#2003118
+
+* Thu Aug 26 2021 Oyvind Albrigtsen - 4.8.0-13
+- nfsnotify: fix default value for "notify_args"
+
+ Resolves: rhbz#1998039
+
+* Wed Aug 25 2021 Oyvind Albrigtsen - 4.8.0-12
+- Create cloud subpackage
+- Add nova-compute-wait/NovaEvacuate
+- nfsserver: fix nfs-convert issue
+
+ Resolves: rhbz#1997548, rhbz#1997576, rhbz#1991855
+
+* Mon Aug 16 2021 Oyvind Albrigtsen - 4.8.0-11
+- Filesystem: force_unmount: remove "Default value" text from metadata
+
+ Resolves: rhbz#1993900
+
+* Tue Aug 10 2021 Oyvind Albrigtsen - 4.8.0-10
+- pgsqlms: new resource agent
+
+ Resolves: rhbz#1952005
+
+* Tue Aug 10 2021 Mohan Boddu - 4.8.0-8.1
+- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags
+ Related: rhbz#1991688
+
+* Tue Jun 29 2021 Oyvind Albrigtsen - 4.8.0-8
+- Exclude SAP agents that are in separate -sap subpackage
+
+ Resolves: rhbz#1977208
+
+* Mon May 17 2021 Oyvind Albrigtsen - 4.8.0-3
+- Remove redhat-lsb-core dependency (lsb_release)
+
+ Resolves: rhbz#1961539
+
+* Wed Apr 21 2021 Oyvind Albrigtsen - 4.8.0-2
+- Solve build issues
+
+ Resolves: rhbz#1951253
+
+* Fri Apr 16 2021 Mohan Boddu - 4.8.0-1.1
+- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937
+
+* Tue Mar 16 2021 Oyvind Albrigtsen - 4.7.0-7
+- Filesystem: change force_unmount default to safe for RHEL9+ (1843578)
+
+* Wed Mar 3 2021 Oyvind Albrigtsen - 4.7.0-5
+- Exclude unsupported agents
+
+* Wed Feb 24 2021 Oyvind Albrigtsen - 4.7.0-4
+- remove ldirectord subpackage
+
+ Resolves: rhbz#1932218
+
+* Tue Feb 16 2021 Oyvind Albrigtsen - 4.7.0-3
+- add BuildRequires for google lib
+- use HA cloud support supplied awscli
+
+* Wed Jan 27 2021 Fedora Release Engineering - 4.7.0-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
+
+* Wed Dec 9 2020 Oyvind Albrigtsen - 4.7.0-1
+- Rebase to resource-agents 4.7.0 upstream release.
+
+* Mon Aug 24 2020 Oyvind Albrigtsen - 4.6.1-4
+- spec: improvements from upstream project
+
+* Mon Aug 24 2020 Oyvind Albrigtsen - 4.6.1-3
+- ldirectord: add dependency for perl-IO-Socket-INET6
+
+ Resolves: rhbz#1868063
+
+* Wed Jul 29 2020 Fedora Release Engineering - 4.6.1-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
+
+* Fri Jul 24 2020 Oyvind Albrigtsen - 4.6.1-2
+- Make Samba/CIFS dependency weak for Fedora 32 and remove the
+ dependency from 33+
+
+* Thu Jun 18 2020 Oyvind Albrigtsen - 4.6.1-1
+- Rebase to resource-agents 4.6.1 upstream release.
+
+* Thu Jun 18 2020 Oyvind Albrigtsen - 4.6.0-1
+- Rebase to resource-agents 4.6.0 upstream release.
+
+* Mon Mar 9 2020 Oyvind Albrigtsen - 4.5.0-1
+- Rebase to resource-agents 4.5.0 upstream release.
+
+* Thu Jan 30 2020 Fedora Release Engineering - 4.4.0-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
+
+* Wed Oct 23 2019 Oyvind Albrigtsen - 4.4.0-1
+- Rebase to resource-agents 4.4.0 upstream release.
+
+* Fri Jul 26 2019 Fedora Release Engineering - 4.3.0-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
+
+* Fri Jun 21 2019 Oyvind Albrigtsen - 4.3.0-1
+- Rebase to resource-agents 4.3.0 upstream release.
+
+* Fri May 24 2019 Oyvind Albrigtsen - 4.2.0-4
+- Fix build issues
+
+* Fri Mar 15 2019 Oyvind Albrigtsen - 4.2.0-3
+- systemd-tmpfiles: change path to /run/resource-agents
+
+ Resolves: rhbz#1688865
+
+* Sat Feb 02 2019 Fedora Release Engineering - 4.2.0-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
+
+* Wed Oct 24 2018 Oyvind Albrigtsen - 4.2.0-1
+- Rebase to resource-agents 4.2.0 upstream release.
+- spec: fix missing systemd config files
+
+* Sat Jul 14 2018 Fedora Release Engineering - 4.1.1-1.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
+
+* Sun Mar 18 2018 Iryna Shcherbina - 4.1.1-1.1
+- Update Python 2 dependency declarations to new packaging standards
+ (See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3)
+
+* Tue Mar 13 2018 Oyvind Albrigtsen - 4.1.1-1
+- Rebase to resource-agents 4.1.1 upstream release.
+
+* Mon Feb 19 2018 Oyvind Albrigtsen - 4.1.0-2
+- Add gcc to BuildRequires
+
+* Fri Feb 09 2018 Igor Gnatenko - 4.1.0-1.1
+- Escape macros in %%changelog
+
+* Wed Jan 10 2018 Oyvind Albrigtsen - 4.1.0-1
+- Rebase to resource-agents 4.1.0 upstream release.
+
+* Thu Aug 03 2017 Fedora Release Engineering - 4.0.1-1.3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
+
+* Thu Jul 27 2017 Fedora Release Engineering - 4.0.1-1.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
+
+* Sat Feb 11 2017 Fedora Release Engineering - 4.0.1-1.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild
+
+* Thu Feb 2 2017 Oyvind Albrigtsen - 4.0.1-1
+- Rebase to resource-agents 4.0.1 upstream release.
+
+* Wed Feb 1 2017 Oyvind Albrigtsen - 4.0.0-2
+- galera: remove "long SST monitoring" support due to corner-case issues
+
+* Tue Jan 31 2017 Oyvind Albrigtsen - 4.0.0-1
+- Rebase to resource-agents 4.0.0 upstream release.
+
+* Thu Dec 15 2016 Oyvind Albrigtsen - 3.9.7-6
+- Add netstat dependency
+
+* Tue Feb 9 2016 Oyvind Albrigtsen - 3.9.7-4
+- Rebase to resource-agents 3.9.7 upstream release.
+
+* Thu Feb 04 2016 Fedora Release Engineering - 3.9.6-2.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild
+
+* Thu Jun 18 2015 Fedora Release Engineering - 3.9.6-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild
+
+* Mon Apr 20 2015 David Vossel - 3.9.6-2
+- Rebase to latest upstream code in order to pull in rabbitmq-cluster agent
+
+* Fri Feb 13 2015 David Vossel - 3.9.6-1
+- Rebase to resource-agents 3.9.6 upstream release.
+
+* Sun Aug 17 2014 Fedora Release Engineering - 3.9.5-12.2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild
+
+* Sun Jun 08 2014 Fedora Release Engineering - 3.9.5-12.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild
+
+* Wed Apr 30 2014 David Vossel - 3.9.5-12
+- Sync with latest upstream.
+
+* Thu Jan 2 2014 David Vossel - 3.9.5-11
+- Sync with latest upstream.
+
+* Sun Oct 20 2013 David Vossel - 3.9.5-10
+- Fix build system for rawhide.
+
+* Wed Oct 16 2013 David Vossel - 3.9.5-9
+- Remove rgmanager agents from build.
+
+* Sun Aug 04 2013 Fedora Release Engineering - 3.9.5-8
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild
+
+* Wed Jul 17 2013 Petr Pisar - 3.9.5-7
+- Perl 5.18 rebuild
+
+* Tue Jun 18 2013 David Vossel - 3.9.5-6
+- Restores rsctmp directory to upstream default.
+
+* Tue Jun 18 2013 David Vossel - 3.9.5-5
+- Merges redhat provider into heartbeat provider. Remove
+ rgmanager's redhat provider.
+
+ Resolves: rhbz#917681
+ Resolves: rhbz#928890
+ Resolves: rhbz#952716
+ Resolves: rhbz#960555
+
+* Tue Mar 12 2013 David Vossel - 3.9.5-3
+- Fixes build system error with conditional logic involving
+ IPv6addr and updates spec file to build against rhel 7 as
+ well as fedora 19.
+
+* Mon Mar 11 2013 David Vossel - 3.9.5-2
+- Resolves rhbz#915050
+
+* Mon Mar 11 2013 David Vossel - 3.9.5-1
+- New upstream release.
+
+* Fri Jan 25 2013 Kevin Fenzi - 3.9.2-5
+- Fix cifs mount requires
+
+* Mon Nov 12 2012 Chris Feist - 3.9.2-4
+- Removed version number after dist
+
+* Mon Oct 29 2012 Chris Feist - 3.9.2-3.8
+- Remove cluster-glue-libs-devel
+- Disable IPv6addr & sfex to fix deps on libplumgpl & libplum (due to
+ disappearance of cluster-glue in F18)
+
+* Sat Jul 21 2012 Fedora Release Engineering - 3.9.2-3.5
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
+
+* Thu Jul 05 2012 Chris Feist - 3.9.2-3.4
+- Fix location of lvm (change from /sbin to /usr/sbin)
+
+* Wed Apr 04 2012 Jon Ciesla - 3.9.2-3.3
+- Rebuilt to fix rawhide dependency issues (caused by move of fsck from
+ /sbin to /usr/sbin).
+
+* Fri Mar 30 2012 Jon Ciesla - 3.9.2-3.1
+- libnet rebuild.
+
+* Sat Jan 14 2012 Fedora Release Engineering - 3.9.2-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild
+
+* Fri Jul 8 2011 Fabio M. Di Nitto - 3.9.2-2
+- add post call to resource-agents to integrate with cluster 3.1.4
+
+* Thu Jun 30 2011 Fabio M. Di Nitto - 3.9.2-1
+- new upstream release
+- fix 2 regressions from 3.9.1
+
+* Mon Jun 20 2011 Fabio M. Di Nitto - 3.9.1-1
+- new upstream release
+- import spec file from upstream
+
+* Tue Mar 1 2011 Fabio M. Di Nitto - 3.1.1-1
+- new upstream release 3.1.1 and 1.0.4
+
+* Wed Feb 09 2011 Fedora Release Engineering - 3.1.0-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
+* Thu Dec 2 2010 Fabio M. Di Nitto - 3.1.0-1
+- new upstream release
+- spec file update:
+ Update upstream URL
+ Update source URL
+ use standard configure macro
+ use standard make invokation
+
+* Thu Oct 7 2010 Fabio M. Di Nitto - 3.0.17-1
+- new upstream release
+ Resolves: rhbz#632595, rhbz#633856, rhbz#632385, rhbz#628013
+ Resolves: rhbz#621313, rhbz#595383, rhbz#580492, rhbz#605733
+ Resolves: rhbz#636243, rhbz#591003, rhbz#637913, rhbz#634718
+ Resolves: rhbz#617247, rhbz#617247, rhbz#617234, rhbz#631943
+ Resolves: rhbz#639018
+
+* Thu Oct 7 2010 Andrew Beekhof - 3.0.16-2
+- new upstream release of the Pacemaker agents: 71b1377f907c
+
+* Thu Sep 2 2010 Fabio M. Di Nitto - 3.0.16-1
+- new upstream release
+ Resolves: rhbz#619096, rhbz#614046, rhbz#620679, rhbz#619680
+ Resolves: rhbz#621562, rhbz#621694, rhbz#608887, rhbz#622844
+ Resolves: rhbz#623810, rhbz#617306, rhbz#623816, rhbz#624691
+ Resolves: rhbz#622576
+
+* Thu Jul 29 2010 Fabio M. Di Nitto - 3.0.14-1
+- new upstream release
+ Resolves: rhbz#553383, rhbz#557563, rhbz#578625, rhbz#591003
+ Resolves: rhbz#593721, rhbz#593726, rhbz#595455, rhbz#595547
+ Resolves: rhbz#596918, rhbz#601315, rhbz#604298, rhbz#606368
+ Resolves: rhbz#606470, rhbz#606480, rhbz#606754, rhbz#606989
+ Resolves: rhbz#607321, rhbz#608154, rhbz#608887, rhbz#609181
+ Resolves: rhbz#609866, rhbz#609978, rhbz#612097, rhbz#612110
+ Resolves: rhbz#612165, rhbz#612941, rhbz#614127, rhbz#614356
+ Resolves: rhbz#614421, rhbz#614457, rhbz#614961, rhbz#615202
+ Resolves: rhbz#615203, rhbz#615255, rhbz#617163, rhbz#617566
+ Resolves: rhbz#618534, rhbz#618703, rhbz#618806, rhbz#618814
+
+* Mon Jun 7 2010 Fabio M. Di Nitto - 3.0.13-1
+- new upstream release
+ Resolves: rhbz#592103, rhbz#593108, rhbz#578617, rhbz#594626
+ Resolves: rhbz#594511, rhbz#596046, rhbz#594111, rhbz#597002
+ Resolves: rhbz#599643
+
+* Tue May 18 2010 Andrew Beekhof - 3.0.12-2
+- libnet is not available on RHEL
+- Do not package ldirectord on RHEL
+ Resolves: rhbz#577264
+
+* Mon May 10 2010 Fabio M. Di Nitto - 3.0.12-1
+- new upstream release
+ Resolves: rhbz#585217, rhbz#586100, rhbz#581533, rhbz#582753
+ Resolves: rhbz#582754, rhbz#585083, rhbz#587079, rhbz#588890
+ Resolves: rhbz#588925, rhbz#583789, rhbz#589131, rhbz#588010
+ Resolves: rhbz#576871, rhbz#576871, rhbz#590000, rhbz#589823
+
+* Mon May 10 2010 Andrew Beekhof - 3.0.12-1
+- New pacemaker agents upstream release: a7c0f35916bf
+ + High: pgsql: properly implement pghost parameter
+ + High: RA: mysql: fix syntax error
+ + High: SAPInstance RA: do not rely on op target rc when monitoring clones (lf#2371)
+ + High: set the HA_RSCTMP directory to /var/run/resource-agents (lf#2378)
+ + Medium: IPaddr/IPaddr2: add a description of the assumption in meta-data
+ + Medium: IPaddr: return the correct code if interface delete failed
+ + Medium: nfsserver: rpc.statd as the notify cmd does not work with -v (thanks to Carl Lewis)
+ + Medium: oracle: reduce output from sqlplus to the last line for queries (bnc#567815)
+ + Medium: pgsql: implement "config" parameter
+ + Medium: RA: iSCSITarget: follow changed IET access policy
+
+* Wed Apr 21 2010 Fabio M. Di Nitto - 3.0.11-1
+- new upstream release
+ Resolves: rhbz#583945, rhbz#581047, rhbz#576330, rhbz#583017
+ Resolves: rhbz#583019, rhbz#583948, rhbz#584003, rhbz#582017
+ Resolves: rhbz#555901, rhbz#582754, rhbz#582573, rhbz#581533
+- Switch to file based Requires.
+ Also address several other problems related to missing runtime
+ components in different agents.
+ With the current Requires: set, we guarantee all basic functionalities
+ out of the box for lvm/fs/clusterfs/netfs/networking.
+ Resolves: rhbz#570008
+
+* Sat Apr 17 2010 Andrew Beekhof - 3.0.10-2
+- New pacemaker agents upstream release
+ + High: RA: vmware: fix set_environment() invocation (LF 2342)
+ + High: RA: vmware: update to version 0.2
+ + Medium: Filesystem: prefer /proc/mounts to /etc/mtab for non-bind mounts (lf#2388)
+ + Medium: IPaddr2: don't bring the interface down on stop (thanks to Lars Ellenberg)
+ + Medium: IPsrcaddr: modify the interface route (lf#2367)
+ + Medium: ldirectord: Allow multiple email addresses (LF 2168)
+ + Medium: ldirectord: fix setting defaults for configfile and ldirectord (lf#2328)
+ + Medium: meta-data: improve timeouts in most resource agents
+ + Medium: nfsserver: use default values (lf#2321)
+ + Medium: ocf-shellfuncs: don't log but print to stderr if connected to a terminal
+ + Medium: ocf-shellfuncs: don't output to stderr if using syslog
+ + Medium: oracle/oralsnr: improve exit codes if the environment isn't valid
+ + Medium: RA: iSCSILogicalUnit: fix monitor for STGT
+ + Medium: RA: make sure that OCF_RESKEY_CRM_meta_interval is always defined (LF 2284)
+ + Medium: RA: ManageRAID: require bash
+ + Medium: RA: ManageRAID: require bash
+ + Medium: RA: VirtualDomain: bail out early if config file can't be read during probe (Novell 593988)
+ + Medium: RA: VirtualDomain: fix incorrect use of __OCF_ACTION
+ + Medium: RA: VirtualDomain: improve error messages
+ + Medium: RA: VirtualDomain: spin on define until we definitely have a domain name
+ + Medium: Route: add route table parameter (lf#2335)
+ + Medium: sfex: don't use pid file (lf#2363,bnc#585416)
+ + Medium: sfex: exit with success on stop if sfex has never been started (bnc#585416)
+
+* Fri Apr 9 2010 Fabio M. Di Nitto - 3.0.10-1
+- New rgmanager resource agents upstream release
+ Resolves: rhbz#519491, rhbz#570525, rhbz#571806, rhbz#574027
+ Resolves: rhbz#574215, rhbz#574886, rhbz#576322, rhbz#576335
+ Resolves: rhbz#575103, rhbz#577856, rhbz#577874, rhbz#578249
+ Resolves: rhbz#578625, rhbz#578626, rhbz#578628, rhbz#578626
+ Resolves: rhbz#579621, rhbz#579623, rhbz#579625, rhbz#579626
+ Resolves: rhbz#579059
+
+* Wed Mar 24 2010 Andrew Beekhof - 3.0.9-2
+- Resolves: rhbz#572993 - Patched build process to correctly generate ldirectord man page
+- Resolves: rhbz#574732 - Add libnet-devel as a dependancy to ensure IPaddrv6 is built
+
+* Mon Mar 1 2010 Fabio M. Di Nitto - 3.0.9-1
+- New rgmanager resource agents upstream release
+ Resolves: rhbz#455300, rhbz#568446, rhbz#561862, rhbz#536902
+ Resolves: rhbz#512171, rhbz#519491
+
+* Mon Feb 22 2010 Fabio M. Di Nitto - 3.0.8-1
+- New rgmanager resource agents upstream release
+ Resolves: rhbz#548133, rhbz#565907, rhbz#545602, rhbz#555901
+ Resolves: rhbz#564471, rhbz#515717, rhbz#557128, rhbz#536157
+ Resolves: rhbz#455300, rhbz#561416, rhbz#562237, rhbz#537201
+ Resolves: rhbz#536962, rhbz#553383, rhbz#556961, rhbz#555363
+ Resolves: rhbz#557128, rhbz#455300, rhbz#557167, rhbz#459630
+ Resolves: rhbz#532808, rhbz#556603, rhbz#554968, rhbz#555047
+ Resolves: rhbz#554968, rhbz#555047
+- spec file update:
+ * update spec file copyright date
+ * use bz2 tarball
+
+* Fri Jan 15 2010 Fabio M. Di Nitto - 3.0.7-2
+- Add python as BuildRequires
+
+* Mon Jan 11 2010 Fabio M. Di Nitto - 3.0.7-1
+- New rgmanager resource agents upstream release
+ Resolves: rhbz#526286, rhbz#533461
+
+* Mon Jan 11 2010 Andrew Beekhof - 3.0.6-2
+- Update Pacameker agents to upstream version: c76b4a6eb576
+ + High: RA: VirtualDomain: fix forceful stop (LF 2283)
+ + High: apache: monitor operation of depth 10 for web applications (LF 2234)
+ + Medium: IPaddr2: CLUSTERIP/iptables rule not always inserted on failed monitor (LF 2281)
+ + Medium: RA: Route: improve validate (LF 2232)
+ + Medium: mark obsolete RAs as deprecated (LF 2244)
+ + Medium: mysql: escalate stop to KILL if regular shutdown doesn't work
+
+* Mon Dec 7 2009 Fabio M. Di Nitto - 3.0.6-1
+- New rgmanager resource agents upstream release
+- spec file update:
+ * use global instead of define
+ * use new Source0 url
+ * use %%name macro more aggressively
+
+* Mon Dec 7 2009 Andrew Beekhof - 3.0.5-2
+- Update Pacameker agents to upstream version: bc00c0b065d9
+ + High: RA: introduce OCF_FUNCTIONS_DIR, allow it to be overridden (LF2239)
+ + High: doc: add man pages for all RAs (LF2237)
+ + High: syslog-ng: new RA
+ + High: vmware: make meta-data work and several cleanups (LF 2212)
+ + Medium: .ocf-shellfuncs: add ocf_is_probe function
+ + Medium: Dev: make RAs executable (LF2239)
+ + Medium: IPv6addr: ifdef out the ip offset hack for libnet v1.1.4 (LF 2034)
+ + Medium: add mercurial repository version information to .ocf-shellfuncs
+ + Medium: build: add perl-MailTools runtime dependency to ldirectord package (LF 1469)
+ + Medium: iSCSITarget, iSCSILogicalUnit: support LIO
+ + Medium: nfsserver: use check_binary properly in validate (LF 2211)
+ + Medium: nfsserver: validate should not check if nfs_shared_infodir exists (thanks to eelco@procolix.com) (LF 2219)
+ + Medium: oracle/oralsnr: export variables properly
+ + Medium: pgsql: remove the previous backup_label if it exists
+ + Medium: postfix: fix double stop (thanks to Dinh N. Quoc)
+ + RA: LVM: Make monitor operation quiet in logs (bnc#546353)
+ + RA: Xen: Remove instance_attribute "allow_migrate" (bnc#539968)
+ + ldirectord: OCF agent: overhaul
+
+* Fri Nov 20 2009 Fabio M. Di Nitto - 3.0.5-1
+- New rgmanager resource agents upstream release
+- Allow pacemaker to use rgmanager resource agents
+
+* Wed Oct 28 2009 Andrew Beekhof - 3.0.4-2
+- Update Pacameker agents to upstream version: e2338892f59f
+ + High: send_arp - turn on unsolicited mode for compatibilty with the libnet version's exit codes
+ + High: Trap sigterm for compatibility with the libnet version of send_arp
+ + Medium: Bug - lf#2147: IPaddr2: behave if the interface is down
+ + Medium: IPv6addr: recognize network masks properly
+ + Medium: RA: VirtualDomain: avoid needlessly invoking "virsh define"
+
+* Wed Oct 21 2009 Fabio M. Di Nitto - 3.0.4-1
+- New rgmanager resource agents upstream release
+
+* Mon Oct 12 2009 Andrew Beekhof - 3.0.3-3
+- Update Pacameker agents to upstream version: 099c0e5d80db
+ + Add the ha_parameter function back into .ocf-shellfuncs.
+ + Bug bnc#534803 - Provide a default for MAILCMD
+ + Fix use of undefined macro @HA_NOARCHDATAHBDIR@
+ + High (LF 2138): IPsrcaddr: replace 0/0 with proper ip prefix (thanks to Michael Ricordeau and Michael Schwartzkopff)
+ + Import shellfuncs from heartbeat as badly written RAs use it
+ + Medium (LF 2173): nfsserver: exit properly in nfsserver_validate
+ + Medium: RA: Filesystem: implement monitor operation
+ + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable
+ + Medium: RA: VirtualDomain: loop on status if libvirtd is unreachable (addendum)
+ + Medium: RA: iSCSILogicalUnit: use a 16-byte default SCSI ID
+ + Medium: RA: iSCSITarget: be more persistent deleting targets on stop
+ + Medium: RA: portblock: add per-IP filtering capability
+ + Medium: mysql-proxy: log_level and keepalive parameters
+ + Medium: oracle: drop spurious output from sqlplus
+ + RA: Filesystem: allow configuring smbfs mounts as clones
+
+* Wed Sep 23 2009 Fabio M. Di Nitto - 3.0.3-1
+- New rgmanager resource agents upstream release
+
+* Thu Aug 20 2009 Fabio M. Di Nitto - 3.0.1-1
+- New rgmanager resource agents upstream release
+
+* Tue Aug 18 2009 Andrew Beekhof - 3.0.0-16
+- Create an ldirectord package
+- Update Pacameker agents to upstream version: 2198dc90bec4
+ + Build: Import ldirectord.
+ + Ensure HA_VARRUNDIR has a value to substitute
+ + High: Add findif tool (mandatory for IPaddr/IPaddr2)
+ + High: IPv6addr: new nic and cidr_netmask parameters
+ + High: postfix: new resource agent
+ + Include license information
+ + Low (LF 2159): Squid: make the regexp match more precisely output of netstat
+ + Low: configure: Fix package name.
+ + Low: ldirectord: add dependency on $remote_fs.
+ + Low: ldirectord: add mandatory required header to init script.
+ + Medium (LF 2165): IPaddr2: remove all colons from the mac address before passing it to send_arp
+ + Medium: VirtualDomain: destroy domain shortly before timeout expiry
+ + Medium: shellfuncs: Make the mktemp wrappers work.
+ + Remove references to Echo function
+ + Remove references to heartbeat shellfuncs.
+ + Remove useless path lookups
+ + findif: actually include the right header. Simplify configure.
+ + ldirectord: Remove superfluous configure artifact.
+ + ocf-tester: Fix package reference and path to DTD.
+
+* Tue Aug 11 2009 Ville Skyttä - 3.0.0-15
+- Use bzipped upstream hg tarball.
+
+* Wed Jul 29 2009 Fabio M. Di Nitto - 3.0.0-14
+- Merge Pacemaker cluster resource agents:
+ * Add Source1.
+ * Drop noarch. We have real binaries now.
+ * Update BuildRequires.
+ * Update all relevant prep/build/install/files/description sections.
+
+* Sun Jul 26 2009 Fedora Release Engineering - 3.0.0-13
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
+
+* Wed Jul 8 2009 Fabio M. Di Nitto - 3.0.0-12
+- spec file updates:
+ * Update copyright header
+ * final release.. undefine alphatag
+
+* Thu Jul 2 2009 Fabio M. Di Nitto - 3.0.0-11.rc4
+- New upstream release.
+
+* Sat Jun 20 2009 Fabio M. Di Nitto - 3.0.0-10.rc3
+- New upstream release.
+
+* Wed Jun 10 2009 Fabio M. Di Nitto - 3.0.0-9.rc2
+- New upstream release + git94df30ca63e49afb1e8aeede65df8a3e5bcd0970
+
+* Tue Mar 24 2009 Fabio M. Di Nitto - 3.0.0-8.rc1
+- New upstream release.
+- Update BuildRoot usage to preferred versions/names
+
+* Mon Mar 9 2009 Fabio M. Di Nitto - 3.0.0-7.beta1
+- New upstream release.
+
+* Fri Mar 6 2009 Fabio M. Di Nitto - 3.0.0-6.alpha7
+- New upstream release.
+
+* Tue Mar 3 2009 Fabio M. Di Nitto - 3.0.0-5.alpha6
+- New upstream release.
+
+* Tue Feb 24 2009 Fabio M. Di Nitto - 3.0.0-4.alpha5
+- Drop Conflicts with rgmanager.
+
+* Mon Feb 23 2009 Fabio M. Di Nitto - 3.0.0-3.alpha5
+- New upstream release.
+
+* Thu Feb 19 2009 Fabio M. Di Nitto - 3.0.0-2.alpha4
+- Add comments on how to build this package.
+
+* Thu Feb 5 2009 Fabio M. Di Nitto - 3.0.0-1.alpha4
+- New upstream release.
+- Fix datadir/cluster directory ownership.
+
+* Tue Jan 27 2009 Fabio M. Di Nitto - 3.0.0-1.alpha3
+ - Initial packaging