|
|
581d9d |
diff -uNr a/heartbeat/SAPHana b/heartbeat/SAPHana
|
|
|
581d9d |
--- a/heartbeat/SAPHana 2016-04-26 12:01:55.620889964 +0200
|
|
|
581d9d |
+++ b/heartbeat/SAPHana 2016-04-26 12:03:17.240897137 +0200
|
|
|
581d9d |
@@ -2,9 +2,9 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# SAPHana
|
|
|
581d9d |
#
|
|
|
581d9d |
-# Description: Manages two single SAP HANA Instance in System Replication
|
|
|
581d9d |
+# Description: Manages two single SAP HANA Instance in System Replication
|
|
|
581d9d |
# Planned: do also manage scale-up scenarios
|
|
|
581d9d |
-# currently the SAPHana is dependent of the analysis of
|
|
|
581d9d |
+# currently the SAPHana is dependent of the analysis of
|
|
|
581d9d |
# SAPHanaTopology
|
|
|
581d9d |
# For supported scenarios please read the README file provided
|
|
|
581d9d |
# in the same software package (rpm)
|
|
|
581d9d |
@@ -16,16 +16,17 @@
|
|
|
581d9d |
# Support: linux@sap.com
|
|
|
581d9d |
# License: GNU General Public License (GPL)
|
|
|
581d9d |
# Copyright: (c) 2013,2014 SUSE Linux Products GmbH
|
|
|
581d9d |
+# Copyright: (c) 2015 SUSE Linux GmbH
|
|
|
581d9d |
#
|
|
|
581d9d |
-# An example usage:
|
|
|
581d9d |
+# An example usage:
|
|
|
581d9d |
# See usage() function below for more details...
|
|
|
581d9d |
#
|
|
|
581d9d |
# OCF instance parameters:
|
|
|
581d9d |
-# OCF_RESKEY_SID
|
|
|
581d9d |
-# OCF_RESKEY_InstanceNumber
|
|
|
581d9d |
-# OCF_RESKEY_DIR_EXECUTABLE (optional, well known directories will be searched by default)
|
|
|
581d9d |
-# OCF_RESKEY_DIR_PROFILE (optional, well known directories will be searched by default)
|
|
|
581d9d |
-# OCF_RESKEY_INSTANCE_PROFILE (optional, well known directories will be searched by default)
|
|
|
581d9d |
+# OCF_RESKEY_SID
|
|
|
581d9d |
+# OCF_RESKEY_InstanceNumber
|
|
|
581d9d |
+# OCF_RESKEY_DIR_EXECUTABLE (optional, well known directories will be searched by default)
|
|
|
581d9d |
+# OCF_RESKEY_DIR_PROFILE (optional, well known directories will be searched by default)
|
|
|
581d9d |
+# OCF_RESKEY_INSTANCE_PROFILE (optional, well known directories will be searched by default)
|
|
|
581d9d |
# OCF_RESKEY_PREFER_SITE_TAKEOVER (optional, default is no)
|
|
|
581d9d |
# OCF_RESKEY_DUPLICATE_PRIMARY_TIMEOUT (optional, time difference needed between two last-primary-tiemstampe (lpt))
|
|
|
581d9d |
# OCF_RESKEY_SAPHanaFilter (optional, should only be set if been told by support or for debugging purposes)
|
|
|
581d9d |
@@ -71,7 +72,7 @@
|
|
|
581d9d |
info )
|
|
|
581d9d |
case "$shf" in
|
|
|
581d9d |
all) skip=0
|
|
|
581d9d |
- ;;
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
none )
|
|
|
581d9d |
skip=1
|
|
|
581d9d |
;;
|
|
|
581d9d |
@@ -80,13 +81,13 @@
|
|
|
581d9d |
mtype=${mtype#fh}
|
|
|
581d9d |
echo "$shf"| grep -iq ${mtype}; search=$?
|
|
|
581d9d |
if [ $search -eq 0 ]; then
|
|
|
581d9d |
- skip=0
|
|
|
581d9d |
+ skip=0
|
|
|
581d9d |
else
|
|
|
581d9d |
skip=1
|
|
|
581d9d |
fi
|
|
|
581d9d |
;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
- ;;
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
if [ $skip -eq 0 ]; then
|
|
|
581d9d |
ocf_log "$level" "$message"
|
|
|
581d9d |
@@ -103,8 +104,8 @@
|
|
|
581d9d |
local rc=0
|
|
|
581d9d |
methods=$(saphana_methods)
|
|
|
581d9d |
methods=$(echo $methods | tr ' ' '|')
|
|
|
581d9d |
- cat <<-!
|
|
|
581d9d |
- usage: $0 ($methods)
|
|
|
581d9d |
+ cat <<-EOF
|
|
|
581d9d |
+ usage: $0 ($methods)
|
|
|
581d9d |
|
|
|
581d9d |
$0 manages a SAP HANA Instance as an HA resource.
|
|
|
581d9d |
|
|
|
581d9d |
@@ -118,8 +119,17 @@
|
|
|
581d9d |
The 'validate-all' operation reports whether the parameters are valid
|
|
|
581d9d |
The 'methods' operation reports on the methods $0 supports
|
|
|
581d9d |
|
|
|
581d9d |
- !
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+EOF
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
+}
|
|
|
581d9d |
+
|
|
|
581d9d |
+function backup_global_and_nameserver() {
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
+ local rc=0
|
|
|
581d9d |
+ cp /hana/shared/LNX/global/hdb/custom/config/global.ini /hana/shared/LNX/global/hdb/custom/config/global.ini.$(date +"%s")
|
|
|
581d9d |
+ cp /hana/shared/LNX/global/hdb/custom/config/nameserver.ini /hana/shared/LNX/global/hdb/custom/config/nameserver.ini.$(date +"%s")
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -130,11 +140,12 @@
|
|
|
581d9d |
function saphana_meta_data() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local rc=0
|
|
|
581d9d |
- cat <
|
|
|
581d9d |
+#
|
|
|
581d9d |
+ cat <
|
|
|
581d9d |
|
|
|
581d9d |
|
|
|
581d9d |
<resource-agent name="SAPHana">
|
|
|
581d9d |
-<version>0.149.7</version>
|
|
|
581d9d |
+<version>0.151.1</version>
|
|
|
581d9d |
|
|
|
581d9d |
<shortdesc lang="en">Manages two SAP HANA instances in system replication (SR).</shortdesc>
|
|
|
581d9d |
<longdesc lang="en">
|
|
|
581d9d |
@@ -157,7 +168,7 @@
|
|
|
581d9d |
2. landscapeHostConfiguration
|
|
|
581d9d |
The interface is used to monitor a HANA system. The python script is named landscapeHostConfiguration.py.
|
|
|
581d9d |
landscapeHostConfiguration.py has some detailed output about HANA system status
|
|
|
581d9d |
- and node roles. For our monitor the overall status is relevant. This overall
|
|
|
581d9d |
+ and node roles. For our monitor the overall status is relevant. This overall
|
|
|
581d9d |
status is reported by the returncode of the script:
|
|
|
581d9d |
0: Internal Fatal, 1: ERROR, 2: WARNING, 3: INFO, 4: OK
|
|
|
581d9d |
The SAPHana resource agent will interpret returncodes 0 as FATAL, 1 as not-running or ERROR and and returncodes 2+3+4 as RUNNING.
|
|
|
581d9d |
@@ -168,14 +179,14 @@
|
|
|
581d9d |
system replication takeover (sr_takeover) or to register a former primary to a newer one (sr_register).
|
|
|
581d9d |
|
|
|
581d9d |
4. hdbsql / systemReplicationStatus
|
|
|
581d9d |
- Interface is SQL query into HANA (system replication table). The hdbsql query will be replaced by a python script
|
|
|
581d9d |
+ Interface is SQL query into HANA (system replication table). The hdbsql query will be replaced by a python script
|
|
|
581d9d |
"systemReplicationStatus.py" in SAP HANA SPS8 or 9.
|
|
|
581d9d |
As long as we need to use hdbsql you need to setup secure store users for linux user root to be able to
|
|
|
581d9d |
access the SAP HANA database. You need to configure a secure store user key "SAPHANA${SID}SR" which can connect the SAP
|
|
|
581d9d |
- HANA database:
|
|
|
581d9d |
+ HANA database:
|
|
|
581d9d |
|
|
|
581d9d |
5. saphostctrl
|
|
|
581d9d |
- The interface saphostctrl uses the function ListInstances to figure out the virtual host name of the
|
|
|
581d9d |
+ The interface saphostctrl uses the function ListInstances to figure out the virtual host name of the
|
|
|
581d9d |
SAP HANA instance. This is the hostname used during the HANA installation.
|
|
|
581d9d |
|
|
|
581d9d |
</longdesc>
|
|
|
581d9d |
@@ -207,7 +218,7 @@
|
|
|
581d9d |
</parameter>
|
|
|
581d9d |
<parameter name="DUPLICATE_PRIMARY_TIMEOUT" unique="0" required="0">
|
|
|
581d9d |
<shortdesc lang="en">Time difference needed between to primary time stamps, if a dual-primary situation occurs</shortdesc>
|
|
|
581d9d |
- <longdesc lang="en">Time difference needed between to primary time stamps,
|
|
|
581d9d |
+ <longdesc lang="en">Time difference needed between to primary time stamps,
|
|
|
581d9d |
if a dual-primary situation occurs. If the time difference is
|
|
|
581d9d |
less than the time gap, then the cluster hold one or both instances in a "WAITING" status. This is to give an admin
|
|
|
581d9d |
a chance to react on a failover. A failed former primary will be registered after the time difference is passed. After
|
|
|
581d9d |
@@ -231,12 +242,8 @@
|
|
|
581d9d |
<content type="string" default="" />
|
|
|
581d9d |
</parameter>
|
|
|
581d9d |
<parameter name="SAPHanaFilter" unique="0" required="0">
|
|
|
581d9d |
- <shortdesc lang="en">Define SAPHana resource agent messages to be printed</shortdesc>
|
|
|
581d9d |
- <longdesc lang="en">Define SAPHana resource agent messages to be printed.
|
|
|
581d9d |
- This parameter should only be set if requested by support. The default is sufficient for normal operation.
|
|
|
581d9d |
- Values: ra-act-lpa-dec-flow
|
|
|
581d9d |
- You could specify any combination of the above values like "ra-act-flow"
|
|
|
581d9d |
- </longdesc>
|
|
|
581d9d |
+ <shortdesc lang="en">OUTDATED PARAMETER</shortdesc>
|
|
|
581d9d |
+ <longdesc lang="en">OUTDATED PARAMETER</longdesc>
|
|
|
581d9d |
<content type="string" default="" />
|
|
|
581d9d |
</parameter>
|
|
|
581d9d |
</parameters>
|
|
|
581d9d |
@@ -271,7 +278,7 @@
|
|
|
581d9d |
for m in start stop status monitor promote demote notify validate-all methods meta-data usage; do
|
|
|
581d9d |
echo "$m"
|
|
|
581d9d |
done
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -298,7 +305,7 @@
|
|
|
581d9d |
local remoteNode=""
|
|
|
581d9d |
local rc=1
|
|
|
581d9d |
for cl in ${otherNodes[@]}; do
|
|
|
581d9d |
- vHost=$(get_hana_attribute $cl ${ATTR_NAME_HANA_VHOST[@]})
|
|
|
581d9d |
+ vHost=$(get_hana_attribute $cl ${ATTR_NAME_HANA_VHOST[@]} "$cl")
|
|
|
581d9d |
if [ "$vHost" = "$remoteHost" ]; then # we found the correct node
|
|
|
581d9d |
remoteNode=$cl
|
|
|
581d9d |
rc=0
|
|
|
581d9d |
@@ -347,9 +354,10 @@
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
-# function: get_hana_attribute
|
|
|
581d9d |
+# function: get_hana_attribute
|
|
|
581d9d |
# params: NODE ATTR [STORE]
|
|
|
581d9d |
# globals: -
|
|
|
581d9d |
+# output: attribute value
|
|
|
581d9d |
#
|
|
|
581d9d |
function get_hana_attribute()
|
|
|
581d9d |
{
|
|
|
581d9d |
@@ -358,14 +366,20 @@
|
|
|
581d9d |
local attr_node=$1
|
|
|
581d9d |
local attr_name=$2
|
|
|
581d9d |
local attr_store=${3:-reboot} # DONE: PRIO5 get this (optional) from parameter
|
|
|
581d9d |
- local attr_default=${5:-}
|
|
|
581d9d |
+ local attr_default=${4:-}
|
|
|
581d9d |
+ local dstr
|
|
|
581d9d |
local attr_val=""
|
|
|
581d9d |
- attr_val=$(crm_attribute -N ${attr_node} -G -n "$attr_name" -l $attr_store -q -d "$attr_default"); rc=$?
|
|
|
581d9d |
- if [ $debug_attributes -eq 1 ]; then
|
|
|
581d9d |
- dstr=$(date)
|
|
|
581d9d |
- echo "$dstr: SAPHana: crm_attribute -N ${attr_node} -G -n \"$attr_name\" -l $attr_store -q --> $attr_val" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
- fi
|
|
|
581d9d |
- echo "$attr_val"
|
|
|
581d9d |
+ dstr=$(date)
|
|
|
581d9d |
+ case "$attr_store" in
|
|
|
581d9d |
+ reboot | forever )
|
|
|
581d9d |
+ echo "$dstr: SAPHana: crm_attribute -N ${attr_node} -G -n \"$attr_name\" -l $attr_store -q" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
+ crm_attribute -N ${attr_node} -G -n "$attr_name" -l $attr_store -q -d "$attr_default" 2>>/var/log/fhATTRIBUTE; rc=$?
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ props )
|
|
|
581d9d |
+ echo "$dstr: SAPHana: crm_attribute -G -n \"$attr_name\" -t crm_config -q" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
+ crm_attribute -G -n "$attr_name" -t crm_config -q -d "$attr_default" 2>>/var/log/fhATTRIBUTE; rc=$?
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ esac
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
@@ -388,11 +402,17 @@
|
|
|
581d9d |
attr_old=$(get_hana_attribute $attr_node $attr_name $attr_store $attr_default); get_rc=$?
|
|
|
581d9d |
if [ "$attr_old" != "$attr_value" ]; then
|
|
|
581d9d |
super_ocf_log debug "DBG: SET attribute $attr_name for node ${attr_node} to ${attr_value} former ($attr_old) get_rc=$get_rc "
|
|
|
581d9d |
- crm_attribute -N $attr_node -v $attr_value -n "$attr_name" -l $attr_store; rc=$?
|
|
|
581d9d |
- if [ $debug_attributes -eq 1 ]; then
|
|
|
581d9d |
- dstr=$(date)
|
|
|
581d9d |
- echo "$dstr: SAPHana: crm_attribute -N $attr_node -v $attr_value -n \"$attr_name\" -l $attr_store" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ dstr=$(date)
|
|
|
581d9d |
+ case "$attr_store" in
|
|
|
581d9d |
+ reboot | forever )
|
|
|
581d9d |
+ echo "$dstr: SAPHana: crm_attribute -N $attr_node -v $attr_value -n \"$attr_name\" -l $attr_store" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
+ crm_attribute -N $attr_node -v $attr_value -n "$attr_name" -l $attr_store 2>>/var/log/fhATTRIBUTE; rc=$?
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ props )
|
|
|
581d9d |
+ echo "$dstr: SAPHana: crm_attribute -v $attr_value -n \"$attr_name\" -t crm_config -s SAPHanaSR" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
+ crm_attribute -v $attr_value -n "$attr_name" -t crm_config -s SAPHanaSR 2>>/var/log/fhATTRIBUTE; rc=$?
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ esac
|
|
|
581d9d |
else
|
|
|
581d9d |
super_ocf_log debug "DBG: LET attribute $attr_name for node ${attr_node} still be ${attr_value}"
|
|
|
581d9d |
rc=0
|
|
|
581d9d |
@@ -408,7 +428,8 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
function assert() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
- local err_msg=$1 local default_rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
+ local err_msg=$1
|
|
|
581d9d |
+ local default_rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
# DONE: Check, if we need to destinguish between probe and others
|
|
|
581d9d |
if ocf_is_probe; then
|
|
|
581d9d |
default_exit=$OCF_NOT_RUNNING
|
|
|
581d9d |
@@ -435,7 +456,7 @@
|
|
|
581d9d |
local score=0
|
|
|
581d9d |
if [ -n "$1" ]; then
|
|
|
581d9d |
score=$1
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
# DONE: PRIO2: Only adjust master if value is really different (try to check that)
|
|
|
581d9d |
oldscore=$(${HA_SBIN_DIR}/crm_master -G -q -l reboot)
|
|
|
581d9d |
if [ "$oldscore" != "$score" ]; then
|
|
|
581d9d |
@@ -452,7 +473,7 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: scoring_crm_master - score instance due to role ans sync match (table SCORING_TABLE_PREFERRED_SITE_TAKEOVER)
|
|
|
581d9d |
# params: NODE_ROLES NODE_SYNC_STATUS
|
|
|
581d9d |
-# globals: SCORING_TABLE_PREFERRED_SITE_TAKEOVER[@],
|
|
|
581d9d |
+# globals: SCORING_TABLE_PREFERRED_SITE_TAKEOVER[@],
|
|
|
581d9d |
#
|
|
|
581d9d |
scoring_crm_master()
|
|
|
581d9d |
{
|
|
|
581d9d |
@@ -467,7 +488,7 @@
|
|
|
581d9d |
if grep "$rolePatt" <<< "$roles"; then
|
|
|
581d9d |
if grep "$syncPatt" <<< "$sync"; then
|
|
|
581d9d |
skip=1
|
|
|
581d9d |
- myScore=$score
|
|
|
581d9d |
+ myScore=$score
|
|
|
581d9d |
fi
|
|
|
581d9d |
fi
|
|
|
581d9d |
fi
|
|
|
581d9d |
@@ -496,7 +517,7 @@
|
|
|
581d9d |
# function: saphana_init - initialize variables for the resource agent
|
|
|
581d9d |
# params: InstanceName
|
|
|
581d9d |
# globals: OCF_*(r), SID(w), sid(rw), sidadm(w), InstanceName(w), InstanceNr(w), SAPVIRHOST(w), PreferSiteTakeover(w),
|
|
|
581d9d |
-# globals: sr_name(w), remoteHost(w), otherNodes(w)
|
|
|
581d9d |
+# globals: sr_name(w), remoteHost(w), otherNodes(w), rem_SR_name(w)
|
|
|
581d9d |
# globals: ATTR_NAME_HANA_SYNC_STATUS(w), ATTR_NAME_HANA_CLONE_STATE(w)
|
|
|
581d9d |
# globals: DIR_EXECUTABLE(w), SAPSTARTSRV(w), SAPCONTROL(w), DIR_PROFILE(w), SAPSTARTPROFILE(w), LD_LIBRARY_PATH(w), PATH(w)
|
|
|
581d9d |
# globals: LPA_DIRECTORY(w), SIDInstanceName(w), remoteNode(w), hdbSrQueryTimeout(w)
|
|
|
581d9d |
@@ -506,6 +527,8 @@
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local rc=$OCF_SUCCESS
|
|
|
581d9d |
local vName
|
|
|
581d9d |
+ local clN
|
|
|
581d9d |
+ # local site
|
|
|
581d9d |
# two parameter models (for transition only)
|
|
|
581d9d |
# OLD: InstanceName
|
|
|
581d9d |
# NEW: SID InstanceNumber
|
|
|
581d9d |
@@ -528,11 +551,10 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# if saphostctrl does not know the answer, try to fallback to attribute provided by SAPHanaTopology
|
|
|
581d9d |
#
|
|
|
581d9d |
- vName=$(get_hana_attribute ${NODENAME} ${ATTR_NAME_HANA_VHOST[@]});
|
|
|
581d9d |
+ vName=$(get_hana_attribute ${NODENAME} ${ATTR_NAME_HANA_VHOST[@]} "$NODENAME");
|
|
|
581d9d |
fi
|
|
|
581d9d |
SAPVIRHOST=${vName}
|
|
|
581d9d |
PreferSiteTakeover="$OCF_RESKEY_PREFER_SITE_TAKEOVER"
|
|
|
581d9d |
- SAPHanaFilter="${OCF_RESKEY_SAPHanaFilter:-ra-act-dec-lpa}"
|
|
|
581d9d |
AUTOMATED_REGISTER="${OCF_RESKEY_AUTOMATED_REGISTER:-false}"
|
|
|
581d9d |
LPA_DIRECTORY=/var/lib/SAPHanaRA
|
|
|
581d9d |
LPA_ATTR=("lpa_${sid}_lpt" "forever")
|
|
|
581d9d |
@@ -591,6 +613,8 @@
|
|
|
581d9d |
*openais* ) otherNodes=($(crm_node -l | awk '$3 == "member" { if ($2 != me) { print $2 }}' me=${NODENAME}));;
|
|
|
581d9d |
*cman* ) otherNodes=($(crm_node -l | awk '{for (i=1; i<=NF; i++) { if ($i != me) { print $i }}}' me=${NODENAME}));;
|
|
|
581d9d |
esac
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
|
|
|
581d9d |
remoteHost=$(get_hana_attribute ${NODENAME} ${ATTR_NAME_HANA_REMOTEHOST[@]});
|
|
|
581d9d |
if [ -z "$remoteHost" ]; then
|
|
|
581d9d |
@@ -611,9 +635,13 @@
|
|
|
581d9d |
# ATTR_NAME_HANA_SITE
|
|
|
581d9d |
sr_name=$(get_hana_attribute ${NODENAME} ${ATTR_NAME_HANA_SITE[@]});
|
|
|
581d9d |
sr_mode=$(get_hana_attribute "${NODENAME}" ${ATTR_NAME_HANA_SRMODE[@]})
|
|
|
581d9d |
+
|
|
|
581d9d |
if [ -z "$sr_mode" ]; then
|
|
|
581d9d |
sr_mode="sync"
|
|
|
581d9d |
fi
|
|
|
581d9d |
+ if [ -n "$remoteNode" ]; then
|
|
|
581d9d |
+ rem_SR_name=$(get_hana_attribute ${remoteNode} ${ATTR_NAME_HANA_SITE[@]});
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
super_ocf_log debug "DBG: sr_name=$sr_name, remoteHost=$remoteHost, remoteNode=$remoteNode, sr_mode=$sr_mode"
|
|
|
581d9d |
# optional OCF parameters, we try to guess which directories are correct
|
|
|
581d9d |
if [ -z "$OCF_RESKEY_DIR_EXECUTABLE" ]
|
|
|
581d9d |
@@ -706,7 +734,7 @@
|
|
|
581d9d |
then
|
|
|
581d9d |
runninginst=$(echo "$output" | grep '^0 : ' | cut -d' ' -f3)
|
|
|
581d9d |
if [ "$runninginst" != "$InstanceName" ]
|
|
|
581d9d |
- then
|
|
|
581d9d |
+ then
|
|
|
581d9d |
super_ocf_log warn "ACT: sapstartsrv is running for instance $runninginst, that service will be killed"
|
|
|
581d9d |
restart=1
|
|
|
581d9d |
else
|
|
|
581d9d |
@@ -784,38 +812,113 @@
|
|
|
581d9d |
node_full_status=$(su - ${sidadm} -c "hdbnsutil -sr_state" 2>/dev/null )
|
|
|
581d9d |
node_status=$(echo "$node_full_status" | awk '$1=="mode:" {print $2}')
|
|
|
581d9d |
super_ocf_log debug "DBG: check_for_primary: node_status=$node_status"
|
|
|
581d9d |
+ # TODO: PRIO2: Maybe we need to use a fallback interface when hdbnsitil does not answer properly -> lookup in config files?
|
|
|
581d9d |
+ # This might also solve some problems when we could not figure-out the ilocal or remote site name
|
|
|
581d9d |
for i in 1 2 3 4 5 6 7 8 9; do
|
|
|
581d9d |
case "$node_status" in
|
|
|
581d9d |
- primary )
|
|
|
581d9d |
- super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_PRIMARY"
|
|
|
581d9d |
- return $HANA_STATE_PRIMARY;;
|
|
|
581d9d |
+ primary )
|
|
|
581d9d |
+ super_ocf_log info "FLOW: $FUNCNAME rc=HANA_STATE_PRIMARY"
|
|
|
581d9d |
+ return $HANA_STATE_PRIMARY;;
|
|
|
581d9d |
syncmem | sync | async )
|
|
|
581d9d |
- super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_SECONDARY"
|
|
|
581d9d |
- return $HANA_STATE_SECONDARY;;
|
|
|
581d9d |
- none ) # have seen that mode on second side BEFEORE we registered it as replica
|
|
|
581d9d |
- super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_STANDALONE"
|
|
|
581d9d |
- return $HANA_STATE_STANDALONE;;
|
|
|
581d9d |
+ super_ocf_log info "FLOW: $FUNCNAME rc=HANA_STATE_SECONDARY"
|
|
|
581d9d |
+ return $HANA_STATE_SECONDARY;;
|
|
|
581d9d |
+ none ) # have seen that mode on second side BEFEORE we registered it as replica
|
|
|
581d9d |
+ super_ocf_log info "FLOW: $FUNCNAME rc=HANA_STATE_STANDALONE"
|
|
|
581d9d |
+ return $HANA_STATE_STANDALONE;;
|
|
|
581d9d |
* )
|
|
|
581d9d |
- super_ocf_log err "ACT: check_for_primary: we didn't expect node_status to be: <$node_status>"
|
|
|
581d9d |
- dump=$( echo $node_status | hexdump -C );
|
|
|
581d9d |
- super_ocf_log err "ACT: check_for_primary: we didn't expect node_status to be: DUMP <$dump>"
|
|
|
581d9d |
- node_full_status=$(su - ${sidadm} -c "hdbnsutil -sr_state" 2>/dev/null )
|
|
|
581d9d |
- node_status=$(echo "$node_full_status" | awk '$1=="mode:" {print $2}')
|
|
|
581d9d |
- super_ocf_log debug "DEC: check_for_primary: loop=$i: node_status=$node_status"
|
|
|
581d9d |
- # TODO: PRIO1: Maybe we need to keep the old value for P/S/N, if hdbnsutil just crashes
|
|
|
581d9d |
+ super_ocf_log err "ACT: check_for_primary: we didn't expect node_status to be: <$node_status>"
|
|
|
581d9d |
+ dump=$( echo $node_status | hexdump -C );
|
|
|
581d9d |
+ super_ocf_log err "ACT: check_for_primary: we didn't expect node_status to be: DUMP <$dump>"
|
|
|
581d9d |
+ node_full_status=$(su - ${sidadm} -c "hdbnsutil -sr_state" 2>/dev/null )
|
|
|
581d9d |
+ node_status=$(echo "$node_full_status" | awk '$1=="mode:" {print $2}')
|
|
|
581d9d |
+ super_ocf_log debug "DEC: check_for_primary: loop=$i: node_status=$node_status"
|
|
|
581d9d |
+ # TODO: PRIO1: Maybe we need to keep the old value for P/S/N, if hdbnsutil just crashes
|
|
|
581d9d |
esac;
|
|
|
581d9d |
done
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
+# function: analyze_hana_sync_statusSRS
|
|
|
581d9d |
+# params: -
|
|
|
581d9d |
+# globals: DIR_EXECUTABLE(r), FULL_SR_STATUS(w), remoteNode
|
|
|
581d9d |
+#
|
|
|
581d9d |
+# systemReplicationStatus.py return-codes:
|
|
|
581d9d |
+# NoHSR = 10
|
|
|
581d9d |
+# Error = 11
|
|
|
581d9d |
+# Unkown = 12
|
|
|
581d9d |
+# Initializing = 13
|
|
|
581d9d |
+# Syncing = 14
|
|
|
581d9d |
+# Active = 15
|
|
|
581d9d |
+function analyze_hana_sync_statusSRS()
|
|
|
581d9d |
+{
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
+ local rc=-1 srRc=0 all_nodes_other_side="" n="" siteParam=""
|
|
|
581d9d |
+ if [ -n "$rem_SR_name" ]; then
|
|
|
581d9d |
+ siteParam="--site=$rem_SR_name"
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ FULL_SR_STATUS=$(su - $sidadm -c "python $DIR_EXECUTABLE/python_support/systemReplicationStatus.py $siteParam" 2>/dev/null); srRc=$?
|
|
|
581d9d |
+ super_ocf_log info "DEC $FUNCNAME systemReplicationStatus.py (to site '$rem_SR_name')-> $srRc"
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME systemReplicationStatus.py (to site '$rem_SR_name')-> $srRc"
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ # TODO: PRIO2: Here we might also need to filter additional sites (if multi tier should be supported)
|
|
|
581d9d |
+ # And is the check for return code capable for chains?
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ if [ $srRc -eq 15 ]; then
|
|
|
581d9d |
+ # Fix for a HANA BUG, where a non-working SR resulted in RC 15:
|
|
|
581d9d |
+ if grep -q "ACTIVE" <<< "$FULL_SR_STATUS"; then
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME SOK"
|
|
|
581d9d |
+ set_hana_attribute "$remoteNode" "SOK" ${ATTR_NAME_HANA_SYNC_STATUS[@]}
|
|
|
581d9d |
+ super_ocf_log info "ACT site=$sr_name, seting SOK for secondary (1)"
|
|
|
581d9d |
+ lpa_set_lpt 30 "$remoteNode"
|
|
|
581d9d |
+ rc=0;
|
|
|
581d9d |
+ else
|
|
|
581d9d |
+ # ok we should be careful and set secondary to SFAIL
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME SFAIL"
|
|
|
581d9d |
+ set_hana_attribute "$remoteNode" "SFAIL" ${ATTR_NAME_HANA_SYNC_STATUS[@]}
|
|
|
581d9d |
+ super_ocf_log info "ACT site=$sr_name, seting SFAIL for secondary (6) - srRc=$srRc lss=$lss No ACTIVES found in cmd output"
|
|
|
581d9d |
+ # TODO: PRIO1 - P004: need to check LSS again to avoid dying primary to block (SFAIL) secondary
|
|
|
581d9d |
+ lpa_set_lpt 10 "$remoteNode"
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ elif [ $srRc -le 11 ]; then # 11 and 10
|
|
|
581d9d |
+ # if systemReplicationStatus is ERROR and landscapeHostConfiguration is down than do NOT set SFAIL
|
|
|
581d9d |
+ get_hana_landscape_status; lss=$?
|
|
|
581d9d |
+ if [ $lss -lt 2 ]; then
|
|
|
581d9d |
+ # keep everithing like it was
|
|
|
581d9d |
+ rc=2
|
|
|
581d9d |
+ else
|
|
|
581d9d |
+ # ok we should be careful and set secondary to SFAIL
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME SFAIL"
|
|
|
581d9d |
+ set_hana_attribute "$remoteNode" "SFAIL" ${ATTR_NAME_HANA_SYNC_STATUS[@]}
|
|
|
581d9d |
+ super_ocf_log info "ACT site=$sr_name, seting SFAIL for secondary (5) - srRc=$srRc lss=$lss"
|
|
|
581d9d |
+ # TODO: PRIO1 - P004: need to check LSS again to avoid dying primary to block (SFAIL) secondary
|
|
|
581d9d |
+ lpa_set_lpt 10 "$remoteNode"
|
|
|
581d9d |
+ rc=1
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ else
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME SFAIL"
|
|
|
581d9d |
+ set_hana_attribute "$remoteNode" "SFAIL" ${ATTR_NAME_HANA_SYNC_STATUS[@]}
|
|
|
581d9d |
+ super_ocf_log info "ACT site=$sr_name, seting SFAIL for secondary (2) - srRc=$srRc"
|
|
|
581d9d |
+ # TODO: PRIO1 - P004: need to check LSS again to avoid dying primary to block (SFAIL) secondary
|
|
|
581d9d |
+ lpa_set_lpt 10 "$remoteNode"
|
|
|
581d9d |
+ rc=1;
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME PRIM+LPA"
|
|
|
581d9d |
+ super_ocf_log info "DBG PRIM"
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
+}
|
|
|
581d9d |
+
|
|
|
581d9d |
#
|
|
|
581d9d |
-# function: analyze_hana_sync_status - query and check hana system replication status
|
|
|
581d9d |
+####
|
|
|
581d9d |
+#### OLD HDBSQL STUFF FOR SPS6,7,8 AND SCALE-UP ONLY
|
|
|
581d9d |
+####
|
|
|
581d9d |
+# function: analyze_hana_sync_statusSQL - query and check hana system replication status
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
# globals: DIR_EXECUTABLE(r), remoteHost(r)
|
|
|
581d9d |
# get the HANA sync status
|
|
|
581d9d |
-#
|
|
|
581d9d |
-function analyze_hana_sync_status()
|
|
|
581d9d |
+#
|
|
|
581d9d |
+function analyze_hana_sync_statusSQL()
|
|
|
581d9d |
{
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local -a clusterNodes=()
|
|
|
581d9d |
@@ -863,35 +966,9 @@
|
|
|
581d9d |
# TODO PRIO1: REMOVE remoteNode dependency - set SFAIL
|
|
|
581d9d |
set_hana_attribute "$remoteNode" "SFAIL" ${ATTR_NAME_HANA_SYNC_STATUS[@]}
|
|
|
581d9d |
fi
|
|
|
581d9d |
- # first get a list of all secondary hosts, than a list of all secondary hosts, if the is ANY failure at this site
|
|
|
581d9d |
- # TODO: PRIO9: for first we assume there is only ONE secondary site (like ROT)
|
|
|
581d9d |
- # TODO: PRIO3: should we loop over all cluster nodes fetching their roles-attribute? To minimize sql-queries?
|
|
|
581d9d |
- #
|
|
|
581d9d |
- all_secondary_hosts=$(timeout $hdbSrQueryTimeout hdbsql -a -x -U $secUser $query_secondaries ); sqlrc=$?
|
|
|
581d9d |
- all_secondary_hosts=$(echo $all_secondary_hosts | dequote);
|
|
|
581d9d |
- if [ "$sqlrc" -eq 0 ]; then
|
|
|
581d9d |
- all_broken_secondary_hosts=$(timeout $hdbSrQueryTimeout hdbsql -a -x -U $secUser $query_failed_secondaries); sqlrc=$?
|
|
|
581d9d |
- all_broken_secondary_hosts=$(echo $all_broken_secondary_hosts | dequote);
|
|
|
581d9d |
- if [ "$sqlrc" -eq 0 ]; then
|
|
|
581d9d |
- if [ -n "$all_broken_secondary_hosts" ]; then
|
|
|
581d9d |
- #
|
|
|
581d9d |
- # we have a broken secondary site - set all hosts to "SFAIL"
|
|
|
581d9d |
- #
|
|
|
581d9d |
- # Note: since HANA hostname can be different from nodename we need to check all vhost attributes
|
|
|
581d9d |
- for n in $all_broken_secondary_hosts; do
|
|
|
581d9d |
- for cl in ${otherNodes[@]}; do
|
|
|
581d9d |
- vHost=$(get_hana_attribute $cl ${ATTR_NAME_HANA_VHOST[@]})
|
|
|
581d9d |
- if [ "$vHost" = "$n" ]; then # we found the correct node
|
|
|
581d9d |
- set_hana_attribute $cl "SFAIL" ${ATTR_NAME_HANA_SYNC_STATUS[@]}
|
|
|
581d9d |
- fi
|
|
|
581d9d |
- done
|
|
|
581d9d |
- done
|
|
|
581d9d |
- fi
|
|
|
581d9d |
- fi
|
|
|
581d9d |
- fi
|
|
|
581d9d |
else
|
|
|
581d9d |
case "$sqlrc" in
|
|
|
581d9d |
- 19 )
|
|
|
581d9d |
+ 19 )
|
|
|
581d9d |
# return codes 19: license error -> set SFAIL!
|
|
|
581d9d |
# DONE: PRIO1: We should NOT set SFAIL, if HDB is exactly broken now
|
|
|
581d9d |
# When HDB breaks during monitor this could prevent a prositive remote failover
|
|
|
581d9d |
@@ -901,7 +978,7 @@
|
|
|
581d9d |
done
|
|
|
581d9d |
;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
@@ -932,10 +1009,18 @@
|
|
|
581d9d |
local remoteInstance="";
|
|
|
581d9d |
remoteInstance=$InstanceNr
|
|
|
581d9d |
if ocf_is_true ${AUTOMATED_REGISTER}; then
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
super_ocf_log info "ACT: REGISTER: hdbnsutil -sr_register --remoteHost=$remoteHost --remoteInstance=$remoteInstance --mode=$sr_mode --name=$sr_name"
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
su - $sidadm -c "hdbnsutil -sr_register --remoteHost=$remoteHost --remoteInstance=$remoteInstance --mode=$sr_mode --name=$sr_name"; rc=$?
|
|
|
581d9d |
+ # backup_global_and_nameserver
|
|
|
581d9d |
else
|
|
|
581d9d |
- super_ocf_log info "ACT: IGNORE REGISTER because AUTOMATED_REGISTER is set to FALSE"
|
|
|
581d9d |
+ super_ocf_log info "ACT: SAPHANA DROP REGISTER because AUTOMATED_REGISTER is set to FALSE"
|
|
|
581d9d |
rc=1
|
|
|
581d9d |
fi
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
@@ -945,7 +1030,7 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: saphana_status - pure status check
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
-# globals: SIDInstanceName, OCF_*,
|
|
|
581d9d |
+# globals: SIDInstanceName, OCF_*,
|
|
|
581d9d |
function saphana_status() {
|
|
|
581d9d |
local binDeam="hdb.sap${SIDInstanceName}" rc=0
|
|
|
581d9d |
binDeam=${binDeam:0:15} # Process name is limited to the first 15 characters
|
|
|
581d9d |
@@ -956,13 +1041,13 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: saphana_start - start a hana instance
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
-# globals: OCF_*, SAPCONTROL, InstanceNr, SID, InstanceName,
|
|
|
581d9d |
+# globals: OCF_*, SAPCONTROL, InstanceNr, SID, InstanceName,
|
|
|
581d9d |
#
|
|
|
581d9d |
function saphana_start() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
local output=""
|
|
|
581d9d |
- local loopcount=0
|
|
|
581d9d |
+ local loopcount=0
|
|
|
581d9d |
check_sapstartsrv
|
|
|
581d9d |
rc=$?
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -1000,11 +1085,11 @@
|
|
|
581d9d |
# saphana_stop: Stop the SAP instance
|
|
|
581d9d |
#
|
|
|
581d9d |
function saphana_stop() {
|
|
|
581d9d |
- super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
- local output=""
|
|
|
581d9d |
- local rc=0
|
|
|
581d9d |
- check_sapstartsrv; rc=$?
|
|
|
581d9d |
- if [ $rc -eq $OCF_SUCCESS ]; then
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
+ local output=""
|
|
|
581d9d |
+ local rc=0
|
|
|
581d9d |
+ check_sapstartsrv; rc=$?
|
|
|
581d9d |
+ if [ $rc -eq $OCF_SUCCESS ]; then
|
|
|
581d9d |
output=$($SAPCONTROL -nr $InstanceNr -function Stop)
|
|
|
581d9d |
rc=$?
|
|
|
581d9d |
super_ocf_log info "ACT: Stopping SAP Instance $SID-$InstanceName: $output"
|
|
|
581d9d |
@@ -1032,7 +1117,7 @@
|
|
|
581d9d |
# function: saphana_validate - validation of (some) variables/parameters
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
# globals: OCF_*(r), SID(r), InstanceName(r), InstanceNr(r), SAPVIRHOST(r)
|
|
|
581d9d |
-# saphana_validate: Check the symantic of the input parameters
|
|
|
581d9d |
+# saphana_validate: Check the symantic of the input parameters
|
|
|
581d9d |
#
|
|
|
581d9d |
function saphana_validate() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
@@ -1060,12 +1145,12 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: saphana_start_primary - handle startup of PRIMARY in M/S
|
|
|
581d9d |
# params:
|
|
|
581d9d |
-# globals: OCF_*(r), NODENAME, ATTR_NAME_*, HANA_STATE_*,
|
|
|
581d9d |
+# globals: OCF_*(r), NODENAME, ATTR_NAME_*, HANA_STATE_*,
|
|
|
581d9d |
#
|
|
|
581d9d |
function saphana_start_primary()
|
|
|
581d9d |
{
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
- local primary_status sync_attr score_master rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
+ local primary_status sync_attr score_master rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
local lss sqlrc;
|
|
|
581d9d |
local rc=0
|
|
|
581d9d |
local lpa_dec=4
|
|
|
581d9d |
@@ -1074,7 +1159,7 @@
|
|
|
581d9d |
# we will be a master (PRIMARY) so checking, if the is an OTHER master
|
|
|
581d9d |
#
|
|
|
581d9d |
super_ocf_log debug "DBG: saphana_primary - check_for_primary reports HANA_STATE_PRIMARY"
|
|
|
581d9d |
- #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
lpa_init_lpt $HANA_STATE_PRIMARY
|
|
|
581d9d |
lpa_check_lpt_status; lpa_dec=$?
|
|
|
581d9d |
get_hana_landscape_status; lss=$?
|
|
|
581d9d |
@@ -1139,7 +1224,7 @@
|
|
|
581d9d |
1 ) # landcape says we are down, lets start and adjust scores and return code
|
|
|
581d9d |
super_ocf_log info "LPA: landcape: DOWN, LPA: start ==> start instance"
|
|
|
581d9d |
saphana_start
|
|
|
581d9d |
- rc=$?
|
|
|
581d9d |
+ rc=$?
|
|
|
581d9d |
LPTloc=$(date '+%s')
|
|
|
581d9d |
lpa_set_lpt $LPTloc
|
|
|
581d9d |
;;
|
|
|
581d9d |
@@ -1152,7 +1237,7 @@
|
|
|
581d9d |
# DONE: PRIO3: check if this reaction is correct - tell cluster about failed start
|
|
|
581d9d |
super_ocf_log info "LPA: landcape: UP, LPA: register ==> take down"
|
|
|
581d9d |
set_crm_master -inf
|
|
|
581d9d |
- rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
+ rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
;;
|
|
|
581d9d |
1 ) # lets try to register
|
|
|
581d9d |
# DONE: PRIO2: Like Action in start_secondary
|
|
|
581d9d |
@@ -1160,7 +1245,7 @@
|
|
|
581d9d |
super_ocf_log info "DEC: AN OTHER HANA IS AVAILABLE ==> LETS REGISTER"
|
|
|
581d9d |
set_crm_master 0
|
|
|
581d9d |
if wait_for_primary_master 1; then
|
|
|
581d9d |
- register_hana_secondary
|
|
|
581d9d |
+ register_hana_secondary
|
|
|
581d9d |
check_for_primary; primary_status=$?
|
|
|
581d9d |
if [ $primary_status -eq $HANA_STATE_SECONDARY ]; then
|
|
|
581d9d |
super_ocf_log info "ACT: Register successful"
|
|
|
581d9d |
@@ -1169,11 +1254,11 @@
|
|
|
581d9d |
set_crm_master 0
|
|
|
581d9d |
saphana_start_secondary
|
|
|
581d9d |
rc=$?
|
|
|
581d9d |
- lpa_set_lpt 30
|
|
|
581d9d |
+ lpa_set_lpt 10
|
|
|
581d9d |
else
|
|
|
581d9d |
super_ocf_log err "ACT: Register failed"
|
|
|
581d9d |
rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
else
|
|
|
581d9d |
# lets check next monitor, if we can register
|
|
|
581d9d |
rc=$OCF_SUCCESS
|
|
|
581d9d |
@@ -1185,6 +1270,9 @@
|
|
|
581d9d |
case "$lss" in
|
|
|
581d9d |
2 | 3 | 4 ) # as we ARE up we just keep it up
|
|
|
581d9d |
# TODO: PRIO3: I now change from "just keep it up to take that down"
|
|
|
581d9d |
+# TODO: PRIO1 differ lpt_advice!!
|
|
|
581d9d |
+# 2 => DOWN
|
|
|
581d9d |
+# 3 => KEEP
|
|
|
581d9d |
# TODO: PRIO3: OCF_SUCCESS, OCF_NOT_RUNNING or OCF_ERR_xxxx ?
|
|
|
581d9d |
set_crm_master -9000
|
|
|
581d9d |
#scoring_crm_master "$my_role" "$my_sync"
|
|
|
581d9d |
@@ -1193,7 +1281,7 @@
|
|
|
581d9d |
1 ) # we are down, so we should wait --> followup in next monitor
|
|
|
581d9d |
super_ocf_log info "LPA: landcape: DOWN, LPA: wait ==> keep waiting"
|
|
|
581d9d |
# TODO: PRIO3: Check, if WAITING is correct here
|
|
|
581d9d |
- set_hana_attribute ${NODENAME} "WAITING" ${ATTR_NAME_HANA_CLONE_STATE[@]}
|
|
|
581d9d |
+ set_hana_attribute ${NODENAME} "WAITING4LPA" ${ATTR_NAME_HANA_CLONE_STATE[@]}
|
|
|
581d9d |
set_crm_master -9000
|
|
|
581d9d |
rc=$OCF_SUCCESS
|
|
|
581d9d |
;;
|
|
|
581d9d |
@@ -1202,7 +1290,7 @@
|
|
|
581d9d |
fail ) # process a lpa FAIL
|
|
|
581d9d |
super_ocf_log info "LPA: LPA reports FAIL"
|
|
|
581d9d |
set_crm_master -inf
|
|
|
581d9d |
- rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
+ rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
@@ -1278,12 +1366,12 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: saphana_start_secondary - handle startup of PRIMARY in M/S
|
|
|
581d9d |
# params:
|
|
|
581d9d |
-# globals: OCF_*(r), NODENAME, ATTR_NAME_*,
|
|
|
581d9d |
+# globals: OCF_*(r), NODENAME, ATTR_NAME_*,
|
|
|
581d9d |
#
|
|
|
581d9d |
function saphana_start_secondary()
|
|
|
581d9d |
{
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
- local primary_status sync_attr score_master rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
+ local primary_status sync_attr score_master rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
local sqlrc;
|
|
|
581d9d |
set_crm_master 0
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -1291,9 +1379,9 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
lpa_push_lpt 10
|
|
|
581d9d |
lpa_set_lpt 10
|
|
|
581d9d |
- #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
####### LPA - end
|
|
|
581d9d |
- #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
#
|
|
|
581d9d |
# we would be slave (secondary)
|
|
|
581d9d |
# we first need to check, if there are Master Nodes, because the Scecondary only starts
|
|
|
581d9d |
@@ -1311,16 +1399,16 @@
|
|
|
581d9d |
# It seams the stating secondary could not start because of stopping primary
|
|
|
581d9d |
# so this is a WAITING situation
|
|
|
581d9d |
super_ocf_log info "ACT: PRIMARY seams to be down now ==> WAITING"
|
|
|
581d9d |
- set_hana_attribute ${NODENAME} "WAITING" ${ATTR_NAME_HANA_CLONE_STATE[@]}
|
|
|
581d9d |
+ set_hana_attribute ${NODENAME} "WAITING4PRIM" ${ATTR_NAME_HANA_CLONE_STATE[@]}
|
|
|
581d9d |
set_crm_master -INFINITY
|
|
|
581d9d |
rc=$OCF_SUCCESS
|
|
|
581d9d |
fi
|
|
|
581d9d |
else
|
|
|
581d9d |
- lpa_set_lpt 30
|
|
|
581d9d |
+ lpa_set_lpt 10
|
|
|
581d9d |
fi
|
|
|
581d9d |
else
|
|
|
581d9d |
super_ocf_log info "ACT: wait_for_primary_master ==> WAITING"
|
|
|
581d9d |
- set_hana_attribute ${NODENAME} "WAITING" ${ATTR_NAME_HANA_CLONE_STATE[@]}
|
|
|
581d9d |
+ set_hana_attribute ${NODENAME} "WAITING4PRIM" ${ATTR_NAME_HANA_CLONE_STATE[@]}
|
|
|
581d9d |
set_crm_master -INFINITY
|
|
|
581d9d |
rc=$OCF_SUCCESS
|
|
|
581d9d |
fi
|
|
|
581d9d |
@@ -1329,11 +1417,71 @@
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
+# function: saphana_check_local_instance
|
|
|
581d9d |
+# params:
|
|
|
581d9d |
+# output:
|
|
|
581d9d |
+# rc: rc=0 (UP) rc=1 (DOWN)
|
|
|
581d9d |
+# globals:
|
|
|
581d9d |
+#
|
|
|
581d9d |
+function saphana_check_local_instance()
|
|
|
581d9d |
+{
|
|
|
581d9d |
+ local rc=1
|
|
|
581d9d |
+ local count=0
|
|
|
581d9d |
+ local SERVNO
|
|
|
581d9d |
+ local output
|
|
|
581d9d |
+ local MONITOR_SERVICES="hdbnameserver|hdbdaemon" # TODO: PRIO1: exact list of Services
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
+ check_sapstartsrv
|
|
|
581d9d |
+ rc=$?
|
|
|
581d9d |
+ if [ $rc -eq $OCF_SUCCESS ]
|
|
|
581d9d |
+ then
|
|
|
581d9d |
+ output=$($SAPCONTROL -nr $InstanceNr -function GetProcessList -format script)
|
|
|
581d9d |
+ # we have to parse the output, because the returncode doesn't tell anything about the instance status
|
|
|
581d9d |
+ for SERVNO in `echo "$output" | grep '^[0-9] ' | cut -d' ' -f1 | sort -u`
|
|
|
581d9d |
+ do
|
|
|
581d9d |
+ local COLOR=`echo "$output" | grep "^$SERVNO dispstatus: " | cut -d' ' -f3`
|
|
|
581d9d |
+ local SERVICE=`echo "$output" | grep "^$SERVNO name: " | cut -d' ' -f3`
|
|
|
581d9d |
+ local STATE=0
|
|
|
581d9d |
+ local SEARCH
|
|
|
581d9d |
+
|
|
|
581d9d |
+ case $COLOR in
|
|
|
581d9d |
+ GREEN|YELLOW) STATE=$OCF_SUCCESS;;
|
|
|
581d9d |
+ *) STATE=$OCF_NOT_RUNNING;;
|
|
|
581d9d |
+ esac
|
|
|
581d9d |
+
|
|
|
581d9d |
+ SEARCH=`echo "$MONITOR_SERVICES" | sed 's/\+/\\\+/g' | sed 's/\./\\\./g'`
|
|
|
581d9d |
+ if [ `echo "$SERVICE" | egrep -c "$SEARCH"` -eq 1 ]
|
|
|
581d9d |
+ then
|
|
|
581d9d |
+ if [ $STATE -eq $OCF_NOT_RUNNING ]
|
|
|
581d9d |
+ then
|
|
|
581d9d |
+ [ "$MONLOG" != "NOLOG" ] && ocf_log err "SAP instance service $SERVICE is not running with status $COLOR !"
|
|
|
581d9d |
+ rc=$STATE
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ count=1
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ done
|
|
|
581d9d |
+
|
|
|
581d9d |
+ if [ $count -eq 0 -a $rc -eq $OCF_SUCCESS ]
|
|
|
581d9d |
+ then
|
|
|
581d9d |
+ if ocf_is_probe
|
|
|
581d9d |
+ then
|
|
|
581d9d |
+ rc=1
|
|
|
581d9d |
+ else
|
|
|
581d9d |
+ [ "$MONLOG" != "NOLOG" ] && ocf_log err "The SAP instance does not run any services which this RA could monitor!"
|
|
|
581d9d |
+ rc=1
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
+}
|
|
|
581d9d |
+
|
|
|
581d9d |
+#
|
|
|
581d9d |
# function: lpa_get_lpt - get lpt from cluster
|
|
|
581d9d |
# params: NODE
|
|
|
581d9d |
# output: LPT
|
|
|
581d9d |
# rc: rc=0: OK, rc=1: InternalERROR, rc=2: ERROR
|
|
|
581d9d |
-# globals: LPA_ATTR_*,
|
|
|
581d9d |
+# globals: LPA_ATTR_*,
|
|
|
581d9d |
#
|
|
|
581d9d |
function lpa_get_lpt() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
@@ -1348,7 +1496,7 @@
|
|
|
581d9d |
rc=2
|
|
|
581d9d |
fi
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -1372,7 +1520,7 @@
|
|
|
581d9d |
rc=0
|
|
|
581d9d |
fi
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -1398,7 +1546,7 @@
|
|
|
581d9d |
rc=2
|
|
|
581d9d |
fi
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -1422,15 +1570,15 @@
|
|
|
581d9d |
rc=2
|
|
|
581d9d |
else
|
|
|
581d9d |
rc=0
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: lpa_init_lpt - initialize local lpt, if needed
|
|
|
581d9d |
# params: HANA_STATE
|
|
|
581d9d |
-# globals: HANA_STATE_*(r), LPA_DIRECTORY(r), sid(r), NODENAME(r),
|
|
|
581d9d |
+# globals: HANA_STATE_*(r), LPA_DIRECTORY(r), sid(r), NODENAME(r),
|
|
|
581d9d |
# lpa_init_lpt
|
|
|
581d9d |
#
|
|
|
581d9d |
# Returncodes:
|
|
|
581d9d |
@@ -1439,7 +1587,7 @@
|
|
|
581d9d |
# Initializing (if NO local LPT-file):
|
|
|
581d9d |
# SECONDARY sets to 0
|
|
|
581d9d |
# PRIMARY sets to 1
|
|
|
581d9d |
-#
|
|
|
581d9d |
+#
|
|
|
581d9d |
function lpa_init_lpt() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local rc=1
|
|
|
581d9d |
@@ -1458,11 +1606,11 @@
|
|
|
581d9d |
LPTloc=10
|
|
|
581d9d |
lpa_push_lpt "10"; rc=$?
|
|
|
581d9d |
else
|
|
|
581d9d |
- rc=2
|
|
|
581d9d |
+ rc=2
|
|
|
581d9d |
fi
|
|
|
581d9d |
lpa_set_lpt $LPTloc
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -1472,6 +1620,10 @@
|
|
|
581d9d |
# lpa_check_lpt_status
|
|
|
581d9d |
#
|
|
|
581d9d |
# Returncodes:
|
|
|
581d9d |
+# 0: start
|
|
|
581d9d |
+# 1: register than start
|
|
|
581d9d |
+# 2: wait4gab
|
|
|
581d9d |
+# 3: wait4other
|
|
|
581d9d |
#
|
|
|
581d9d |
# Initializing (if NO local LPT-file):
|
|
|
581d9d |
# SECONDARY sets to 10
|
|
|
581d9d |
@@ -1480,20 +1632,20 @@
|
|
|
581d9d |
# LPRlocal OR LPTremore ARE real lpt (>1000)
|
|
|
581d9d |
# THEN:
|
|
|
581d9d |
# Bigger LPR wins, if delta-gab is OK
|
|
|
581d9d |
-# LPTlocal >> LPTremore ===> rc=0 (start)
|
|
|
581d9d |
+# LPTlocal >> LPTremore ===> rc=0 (start)
|
|
|
581d9d |
# LPTRemote >> LPTlocal ===> rc=1 (register)
|
|
|
581d9d |
-# Stalemate in all other cases ==> STALEMATE-HANDLING ===> rc=2 (wait)
|
|
|
581d9d |
+# Stalemate in all other cases ==> STALEMATE-HANDLING ===> rc=2 (wait4gab)
|
|
|
581d9d |
# LPRlocal AND LPTremore ARE NOT real lpt (<=1000)
|
|
|
581d9d |
# THEN:
|
|
|
581d9d |
# Bigger LPT wins
|
|
|
581d9d |
-# LPTlocal > LPTremore ===> rc=0 (start)
|
|
|
581d9d |
+# LPTlocal > LPTremore ===> rc=0 (start)
|
|
|
581d9d |
# LPTRemote > LPTlocal ===> rc=1 (register)
|
|
|
581d9d |
-# Stalemate in all other cases ==> STALEMATE-HANDLING ===> rc=2 (wait)
|
|
|
581d9d |
+# Stalemate in all other cases ==> STALEMATE-HANDLING ===> rc=2 (wait4gab)
|
|
|
581d9d |
# LPTRemote is not initialized or node not kown in cluster (crm_mon -l) (0)
|
|
|
581d9d |
# TODO: PRIO1: Need to introduce a return-code 3 for remote sides lpa not ready
|
|
|
581d9d |
# THEN:
|
|
|
581d9d |
# WAIT ==> like STALEMATE-HANDLING ===> rc=2 (wait)
|
|
|
581d9d |
-#
|
|
|
581d9d |
+#
|
|
|
581d9d |
function lpa_check_lpt_status() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local rc=0
|
|
|
581d9d |
@@ -1501,6 +1653,8 @@
|
|
|
581d9d |
local LPTrem=-1
|
|
|
581d9d |
local LPTMark=1000
|
|
|
581d9d |
local delta=0
|
|
|
581d9d |
+ local remSn_name=""
|
|
|
581d9d |
+ local remHost=""
|
|
|
581d9d |
#
|
|
|
581d9d |
# First GET LPT from ATTR-FILE-DEFAULT
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -1550,7 +1704,20 @@
|
|
|
581d9d |
fi
|
|
|
581d9d |
fi
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
+}
|
|
|
581d9d |
+
|
|
|
581d9d |
+# function: is_the_master_nameserver
|
|
|
581d9d |
+# params: -
|
|
|
581d9d |
+# rc: 0: yes, local node is THE master nameserver
|
|
|
581d9d |
+# 1: else
|
|
|
581d9d |
+# globals:
|
|
|
581d9d |
+function is_the_master_nameserver()
|
|
|
581d9d |
+{
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
+ local rc=0
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -1574,11 +1741,12 @@
|
|
|
581d9d |
check_for_primary; primary_status=$?
|
|
|
581d9d |
if [ $primary_status -eq $HANA_STATE_PRIMARY ]; then
|
|
|
581d9d |
saphana_start_primary; rc=$?
|
|
|
581d9d |
- else
|
|
|
581d9d |
+ else
|
|
|
581d9d |
+ lpa_set_lpt 10
|
|
|
581d9d |
saphana_start_secondary; rc=$?
|
|
|
581d9d |
- lpa_set_lpt 30
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
fi
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
@@ -1596,7 +1764,7 @@
|
|
|
581d9d |
check_for_primary; primary_status=$?
|
|
|
581d9d |
if [ $primary_status -eq $HANA_STATE_SECONDARY ]; then
|
|
|
581d9d |
lpa_set_lpt 10
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
saphana_stop; rc=$?
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
@@ -1637,7 +1805,7 @@
|
|
|
581d9d |
DEMOTED )
|
|
|
581d9d |
promoted=0;
|
|
|
581d9d |
;;
|
|
|
581d9d |
- WAITING )
|
|
|
581d9d |
+ WAITING* )
|
|
|
581d9d |
# DONE: lpa_check_lpt_status to come out of here :)
|
|
|
581d9d |
# DONE: PRIO2: CHECK IF THE FIX FOR COMING OUT OF WAITING IS CORRECT
|
|
|
581d9d |
get_hana_landscape_status; lss=$?
|
|
|
581d9d |
@@ -1648,7 +1816,8 @@
|
|
|
581d9d |
lpa_set_lpt $LPTloc
|
|
|
581d9d |
fi
|
|
|
581d9d |
lpa_check_lpt_status; lparc=$?
|
|
|
581d9d |
- if [ $lparc -ne 2 ]; then
|
|
|
581d9d |
+ # TODO: PRIO1: Need to differ lpa_check_lpt_status return codes
|
|
|
581d9d |
+ if [ $lparc -lt 2 ]; then
|
|
|
581d9d |
# lpa - no need to wait any longer - lets try a new start
|
|
|
581d9d |
saphana_start_clone
|
|
|
581d9d |
rc=$?
|
|
|
581d9d |
@@ -1663,7 +1832,7 @@
|
|
|
581d9d |
super_ocf_log info "LPA: Dual primary detected and AUTOMATED_REGISTER='false' ==> WAITING"
|
|
|
581d9d |
fi
|
|
|
581d9d |
return $OCF_SUCCESS
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
promoted=0;
|
|
|
581d9d |
;;
|
|
|
581d9d |
UNDEFINED )
|
|
|
581d9d |
@@ -1682,13 +1851,13 @@
|
|
|
581d9d |
get_hana_landscape_status; lss=$?
|
|
|
581d9d |
super_ocf_log debug "DBG: saphana_monitor_clone: get_hana_landscape_status=$lss"
|
|
|
581d9d |
case "$lss" in
|
|
|
581d9d |
- 0 ) # FATAL or ERROR
|
|
|
581d9d |
+ 0 ) # FATAL or ERROR
|
|
|
581d9d |
rc=$OCF_ERR_GENERIC
|
|
|
581d9d |
;;
|
|
|
581d9d |
- 1 ) # DOWN or ERROR
|
|
|
581d9d |
+ 1 ) # DOWN or ERROR
|
|
|
581d9d |
# DONE: PRIO2: Maybe we need to differ between 0 and 1. While 0 is a fatal sap error, 1 is down/error
|
|
|
581d9d |
if ocf_is_probe; then
|
|
|
581d9d |
- #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
# leave master score untouched, only set return code
|
|
|
581d9d |
#
|
|
|
581d9d |
rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
@@ -1699,7 +1868,7 @@
|
|
|
581d9d |
# For Migration it would be good to decrease master score
|
|
|
581d9d |
# For Reload locally we should NOT adjust the master score
|
|
|
581d9d |
# ===> Should we rely on the migration threshold?
|
|
|
581d9d |
- # set_crm_master
|
|
|
581d9d |
+ # set_crm_master
|
|
|
581d9d |
if ocf_is_true "${PreferSiteTakeover}" ; then
|
|
|
581d9d |
#
|
|
|
581d9d |
# DONE: PRIO1: first check, if remote site is already (and still) in sync
|
|
|
581d9d |
@@ -1708,7 +1877,7 @@
|
|
|
581d9d |
# TODO PRIO1: REMOVE remoteNode dependency - get_sync_status
|
|
|
581d9d |
remoteSync=$(get_hana_attribute $remoteNode ${ATTR_NAME_HANA_SYNC_STATUS[@]})
|
|
|
581d9d |
case "$remoteSync" in
|
|
|
581d9d |
- SOK )
|
|
|
581d9d |
+ SOK | PRIM )
|
|
|
581d9d |
super_ocf_log info "DEC: PreferSiteTakeover selected so decrease promotion score here (and reset lpa)"
|
|
|
581d9d |
set_crm_master 5
|
|
|
581d9d |
if check_for_primary_master; then
|
|
|
581d9d |
@@ -1718,11 +1887,11 @@
|
|
|
581d9d |
SFAIL )
|
|
|
581d9d |
super_ocf_log info "DEC: PreferSiteTakeover selected BUT remoteHost is not in sync (SFAIL) ==> local restart preferred"
|
|
|
581d9d |
;;
|
|
|
581d9d |
- * )
|
|
|
581d9d |
+ * )
|
|
|
581d9d |
super_ocf_log info "DEC: PreferSiteTakeover selected BUT remoteHost is not in sync ($remoteSync) ==> local restart preferred"
|
|
|
581d9d |
;;
|
|
|
581d9d |
- esac
|
|
|
581d9d |
- else
|
|
|
581d9d |
+ esac
|
|
|
581d9d |
+ else
|
|
|
581d9d |
# TODO: PRIO5: SCALE-OUT ONLY? Implement for local restart
|
|
|
581d9d |
# It maybe that for the local restart we only need to decrease the secondaries promotion score
|
|
|
581d9d |
#super_ocf_log info "DEC: PreferSiteTakeover selected so decrease promotion score here"
|
|
|
581d9d |
@@ -1765,8 +1934,12 @@
|
|
|
581d9d |
case "$my_role" in
|
|
|
581d9d |
[12]:P:*:master:* ) # primary is down or may not anser hdbsql query so drop analyze_hana_sync_status
|
|
|
581d9d |
;;
|
|
|
581d9d |
- [34]:P:*:master:* ) # primary is up and should now be able to anser hdbsql query
|
|
|
581d9d |
- analyze_hana_sync_status
|
|
|
581d9d |
+ [34]:P:*:*:* ) # primary is up and should now be able to anser hdbsql query
|
|
|
581d9d |
+ if [ -f $DIR_EXECUTABLE/python_support/systemReplicationStatus.py ]; then
|
|
|
581d9d |
+ analyze_hana_sync_statusSRS
|
|
|
581d9d |
+ else
|
|
|
581d9d |
+ analyze_hana_sync_statusSQL
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
rem_role=$(get_hana_attribute ${remoteNode} ${ATTR_NAME_HANA_ROLES[@]})
|
|
|
581d9d |
@@ -1776,9 +1949,9 @@
|
|
|
581d9d |
[234]:P:* ) # dual primary, but other instance marked as PROMOTED by the cluster
|
|
|
581d9d |
lpa_check_lpt_status; again_lpa_rc=$?
|
|
|
581d9d |
if [ $again_lpa_rc -eq 2 ]; then
|
|
|
581d9d |
- super_ocf_log info "DEC: Dual primary detected, other instance is PROMOTED and lpa stalemate ==> local restart"
|
|
|
581d9d |
- lpa_set_lpt 10
|
|
|
581d9d |
- lpa_push_lpt 10
|
|
|
581d9d |
+ super_ocf_log info "DEC: Dual primary detected, other instance is PROMOTED and lpa stalemate ==> local restart"
|
|
|
581d9d |
+ lpa_set_lpt 10
|
|
|
581d9d |
+ lpa_push_lpt 10
|
|
|
581d9d |
rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
fi
|
|
|
581d9d |
;;
|
|
|
581d9d |
@@ -1812,13 +1985,13 @@
|
|
|
581d9d |
function saphana_monitor_secondary()
|
|
|
581d9d |
{
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
- local rc=$OCF_ERR_GENERIC
|
|
|
581d9d |
- local promoted=0
|
|
|
581d9d |
+ local rc=$OCF_ERR_GENERIC
|
|
|
581d9d |
+ local promoted=0
|
|
|
581d9d |
local init_attribute=0
|
|
|
581d9d |
local lss
|
|
|
581d9d |
#
|
|
|
581d9d |
# OK, we are running as HANA SECONDARY
|
|
|
581d9d |
- #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
if ! lpa_get_lpt ${NODENAME}; then
|
|
|
581d9d |
lpa_set_lpt 10
|
|
|
581d9d |
lpa_push_lpt 10
|
|
|
581d9d |
@@ -1863,7 +2036,7 @@
|
|
|
581d9d |
super_ocf_log debug "DBG: saphana_monitor_clone: HANA_STATE_SECONDARY"
|
|
|
581d9d |
#
|
|
|
581d9d |
# old method was: saphana_monitor - new method is get_hana_landscape_status
|
|
|
581d9d |
- get_hana_landscape_status; lss=$?
|
|
|
581d9d |
+ get_hana_landscape_status; lss=$?
|
|
|
581d9d |
super_ocf_log debug "DBG: saphana_monitor_clone: get_hana_landscape_status=$lss"
|
|
|
581d9d |
case "$lss" in
|
|
|
581d9d |
0 ) # FATAL
|
|
|
581d9d |
@@ -1919,11 +2092,11 @@
|
|
|
581d9d |
# a) returning 7 here and force cluster a restart of the slave
|
|
|
581d9d |
# b) starting the instance here inside the monitor -> may result in longer runtime, timeouts
|
|
|
581d9d |
#
|
|
|
581d9d |
- # first check with the status function (OS tools) if there could be something like a SAP instance running
|
|
|
581d9d |
- # as we do not know here, if we are in master or slave state we do not want to start our monitoring
|
|
|
581d9d |
- # agents (sapstartsrv) on the wrong host
|
|
|
581d9d |
- local rc=$OCF_ERR_GENERIC
|
|
|
581d9d |
- local promoted=0
|
|
|
581d9d |
+ # first check with the status function (OS tools) if there could be something like a SAP instance running
|
|
|
581d9d |
+ # as we do not know here, if we are in master or slave state we do not want to start our monitoring
|
|
|
581d9d |
+ # agents (sapstartsrv) on the wrong host
|
|
|
581d9d |
+ local rc=$OCF_ERR_GENERIC
|
|
|
581d9d |
+ local promoted=0
|
|
|
581d9d |
local init_attribute=0
|
|
|
581d9d |
local lpaRc=0
|
|
|
581d9d |
local mRc=0
|
|
|
581d9d |
@@ -1973,7 +2146,7 @@
|
|
|
581d9d |
# function: saphana_promote_clone - promote a hana clone
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
# globals: OCF_*(r), NODENAME(r), HANA_STATE_*, SID(r), InstanceName(r),
|
|
|
581d9d |
-# saphana_promote_clone:
|
|
|
581d9d |
+# saphana_promote_clone:
|
|
|
581d9d |
# In a Master/Slave configuration get Master being the primary OR by running hana takeover
|
|
|
581d9d |
#
|
|
|
581d9d |
function saphana_promote_clone() {
|
|
|
581d9d |
@@ -2017,7 +2190,7 @@
|
|
|
581d9d |
rc=$OCF_SUCCESS;
|
|
|
581d9d |
else
|
|
|
581d9d |
rc=$OCF_FAILED_MASTER
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
;;
|
|
|
581d9d |
* )
|
|
|
581d9d |
super_ocf_log err "ACT: HANA SYNC STATUS IS NOT 'SOK' SO THIS HANA SITE COULD NOT BE PROMOTED"
|
|
|
581d9d |
@@ -2039,10 +2212,10 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: saphana_demote_clone - demote a hana clone instance
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
-# globals: OCF_*(r), NODENAME(r),
|
|
|
581d9d |
+# globals: OCF_*(r), NODENAME(r),
|
|
|
581d9d |
# saphana_demote_clone
|
|
|
581d9d |
-# the HANA System Replication (SR) runs in a Master/Slave
|
|
|
581d9d |
-# While we could not change a HANA instance to be really demoted, we only mark the status for
|
|
|
581d9d |
+# the HANA System Replication (SR) runs in a Master/Slave
|
|
|
581d9d |
+# While we could not change a HANA instance to be really demoted, we only mark the status for
|
|
|
581d9d |
# correct monitor return codes
|
|
|
581d9d |
#
|
|
|
581d9d |
function saphana_demote_clone() {
|
|
|
581d9d |
@@ -2056,9 +2229,9 @@
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
-# function: main - main function to operate
|
|
|
581d9d |
+# function: main - main function to operate
|
|
|
581d9d |
# params: ACTION
|
|
|
581d9d |
-# globals: OCF_*(r), SID(w), sidadm(w), InstanceName(w), SAPVIRHOST(w), DIR_EXECUTABLE(w),
|
|
|
581d9d |
+# globals: OCF_*(r), SID(w), sidadm(w), InstanceName(w), SAPVIRHOST(w), DIR_EXECUTABLE(w),
|
|
|
581d9d |
# globals: SAPSTARTSRV(w), SAPCONTROL(w), DIR_PROFILE(w), SAPSTARTPROFILE(w), ACTION(w), CLACT(w), ra_rc(rw), $0(r), %ENV(r)
|
|
|
581d9d |
#
|
|
|
581d9d |
|
|
|
581d9d |
@@ -2073,7 +2246,7 @@
|
|
|
581d9d |
SAPCONTROL=""
|
|
|
581d9d |
DIR_PROFILE=""
|
|
|
581d9d |
SAPSTARTPROFILE=""
|
|
|
581d9d |
-SAPHanaFilter="${OCF_RESKEY_SAPHanaFilter:-ra-act-dec-lpa}"
|
|
|
581d9d |
+SAPHanaFilter="ra-act-dec-lpa"
|
|
|
581d9d |
|
|
|
581d9d |
NODENAME=$(crm_node -n)
|
|
|
581d9d |
|
|
|
581d9d |
@@ -2100,7 +2273,7 @@
|
|
|
581d9d |
exit $OCF_SUCCESS;;
|
|
|
581d9d |
*);;
|
|
|
581d9d |
esac
|
|
|
581d9d |
-saphana_init
|
|
|
581d9d |
+saphana_init
|
|
|
581d9d |
|
|
|
581d9d |
if ! ocf_is_root
|
|
|
581d9d |
then
|
|
|
581d9d |
@@ -2141,7 +2314,7 @@
|
|
|
581d9d |
saphana_$ACTION$CLACT
|
|
|
581d9d |
ra_rc=$?
|
|
|
581d9d |
;;
|
|
|
581d9d |
- validate-all)
|
|
|
581d9d |
+ validate-all)
|
|
|
581d9d |
saphana_validate
|
|
|
581d9d |
ra_rc=$?
|
|
|
581d9d |
;;
|
|
|
581d9d |
@@ -2149,12 +2322,13 @@
|
|
|
581d9d |
lpa_check_lpt_status
|
|
|
581d9d |
ra_rc=$?
|
|
|
581d9d |
;;
|
|
|
581d9d |
- *) # seams to be a unknown request
|
|
|
581d9d |
- saphana_methods
|
|
|
581d9d |
+ *) # seams to be a unknown request
|
|
|
581d9d |
+ saphana_methods
|
|
|
581d9d |
ra_rc=$OCF_ERR_UNIMPLEMENTED
|
|
|
581d9d |
;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
timeE=$(date '+%s')
|
|
|
581d9d |
(( timeR = timeE - timeB ))
|
|
|
581d9d |
+#super_ocf_log info "RA ==== SAPHanaFilter=$SAPHanaFilter"
|
|
|
581d9d |
super_ocf_log info "RA ==== end action $ACTION$CLACT with rc=${ra_rc} ($THE_VERSION) (${timeR}s)===="
|
|
|
581d9d |
exit ${ra_rc}
|
|
|
581d9d |
diff -uNr a/heartbeat/SAPHanaTopology b/heartbeat/SAPHanaTopology
|
|
|
581d9d |
--- a/heartbeat/SAPHanaTopology 2016-04-26 12:01:55.620889964 +0200
|
|
|
581d9d |
+++ b/heartbeat/SAPHanaTopology 2016-04-26 12:03:18.033887556 +0200
|
|
|
581d9d |
@@ -16,7 +16,7 @@
|
|
|
581d9d |
# Copyright: (c) 2014 SUSE Linux Products GmbH
|
|
|
581d9d |
# (c) 2015 SUSE Linux GmbH
|
|
|
581d9d |
#
|
|
|
581d9d |
-# An example usage:
|
|
|
581d9d |
+# An example usage:
|
|
|
581d9d |
# See usage() function below for more details...
|
|
|
581d9d |
#
|
|
|
581d9d |
# OCF instance parameters:
|
|
|
581d9d |
@@ -41,7 +41,6 @@
|
|
|
581d9d |
HANA_STATE_DEFECT=3
|
|
|
581d9d |
|
|
|
581d9d |
debug_attributes=0
|
|
|
581d9d |
-
|
|
|
581d9d |
SH=/bin/sh
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -57,7 +56,7 @@
|
|
|
581d9d |
local shf="${SAPHanaFilter:-all}"
|
|
|
581d9d |
#ocf_log "info" "super_ocf_log: f:$shf l:$level m:$message"
|
|
|
581d9d |
# message levels: (dbg)|info|warn|err|error
|
|
|
581d9d |
- #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
# message types: (ACT|RA|FLOW|DBG|LPA|DEC
|
|
|
581d9d |
case "$level" in
|
|
|
581d9d |
dbg | debug | warn | err | error ) skip=0
|
|
|
581d9d |
@@ -65,7 +64,7 @@
|
|
|
581d9d |
info )
|
|
|
581d9d |
case "$shf" in
|
|
|
581d9d |
all) skip=0
|
|
|
581d9d |
- ;;
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
none )
|
|
|
581d9d |
skip=1
|
|
|
581d9d |
;;
|
|
|
581d9d |
@@ -74,13 +73,13 @@
|
|
|
581d9d |
mtype=${mtype#fh}
|
|
|
581d9d |
echo "$shf"| grep -iq ${mtype}; search=$?
|
|
|
581d9d |
if [ $search -eq 0 ]; then
|
|
|
581d9d |
- skip=0
|
|
|
581d9d |
+ skip=0
|
|
|
581d9d |
else
|
|
|
581d9d |
skip=1
|
|
|
581d9d |
fi
|
|
|
581d9d |
;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
- ;;
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
if [ $skip -eq 0 ]; then
|
|
|
581d9d |
ocf_log "$level" "$message"
|
|
|
581d9d |
@@ -126,15 +125,15 @@
|
|
|
581d9d |
|
|
|
581d9d |
|
|
|
581d9d |
<resource-agent name="SAPHanaTopology">
|
|
|
581d9d |
- <version>0.149.6</version>
|
|
|
581d9d |
+ <version>0.151.1</version>
|
|
|
581d9d |
<shortdesc lang="en">Analyzes SAP HANA System Replication Topology.</shortdesc>
|
|
|
581d9d |
<longdesc lang="en">This RA analyzes the SAP HANA topology and "sends" all findings via the node status attributes to
|
|
|
581d9d |
all nodes in the cluster. These attributes are taken by the SAPHana RA to control the SAP Hana Databases.
|
|
|
581d9d |
In addition it starts and monitors the local saphostagent.
|
|
|
581d9d |
|
|
|
581d9d |
-1. Interface to monitor a HANA system: landscapeHostConfiguration.py
|
|
|
581d9d |
+1. Interface to monitor a HANA system: landscapeHostConfiguration.py
|
|
|
581d9d |
landscapeHostConfiguration.py has some detailed output about HANA system status
|
|
|
581d9d |
-and node roles. For our monitor the overall status is relevant. This overall
|
|
|
581d9d |
+and node roles. For our monitor the overall status is relevant. This overall
|
|
|
581d9d |
status is reported by the returncode of the script:
|
|
|
581d9d |
0: Internal Fatal
|
|
|
581d9d |
1: ERROR
|
|
|
581d9d |
@@ -150,7 +149,7 @@
|
|
|
581d9d |
system replication takeover (sr_takeover) or to register a former primary to a newer one (sr_register).
|
|
|
581d9d |
|
|
|
581d9d |
3. saphostctrl
|
|
|
581d9d |
- The interface saphostctrl uses the function ListInstances to figure out the virtual host name of the
|
|
|
581d9d |
+ The interface saphostctrl uses the function ListInstances to figure out the virtual host name of the
|
|
|
581d9d |
SAP HANA instance. This is the hostname used during the HANA installation.
|
|
|
581d9d |
</longdesc>
|
|
|
581d9d |
<parameters>
|
|
|
581d9d |
@@ -172,13 +171,8 @@
|
|
|
581d9d |
<content type="string" default="" />
|
|
|
581d9d |
</parameter>
|
|
|
581d9d |
<parameter name="SAPHanaFilter" unique="0" required="0">
|
|
|
581d9d |
- <shortdesc lang="en">Define type of SAPHanaTopology RA messages to be printed</shortdesc>
|
|
|
581d9d |
- <longdesc lang="en">Define type of SAPHanaTopology RA messages to be printed.
|
|
|
581d9d |
-Define SAPHana resource agent messages to be printed.
|
|
|
581d9d |
- This parameter should only be set if requested by support. The default is sufficient for normal operation.
|
|
|
581d9d |
- Values: ra-act-lpa-dec-flow
|
|
|
581d9d |
- You could specify any combination of the above values like "ra-act-flow"
|
|
|
581d9d |
- </longdesc>
|
|
|
581d9d |
+ <shortdesc lang="en">OUTDATED</shortdesc>
|
|
|
581d9d |
+ <longdesc lang="en">OUTDATED</longdesc>
|
|
|
581d9d |
<content type="string" default="" />
|
|
|
581d9d |
</parameter>
|
|
|
581d9d |
</parameters>
|
|
|
581d9d |
@@ -197,7 +191,7 @@
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
-# function: get_hana_attribute
|
|
|
581d9d |
+# function: get_hana_attribute
|
|
|
581d9d |
# params: NODE ATTR [STORE]
|
|
|
581d9d |
# globals: -
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -208,16 +202,19 @@
|
|
|
581d9d |
local attr_node=$1
|
|
|
581d9d |
local attr_name=$2
|
|
|
581d9d |
local attr_store=${3:-reboot} # DONE: PRIO5 get this (optional) from parameter
|
|
|
581d9d |
- local attr_val=""
|
|
|
581d9d |
- attr_val=$(crm_attribute -N ${attr_node} -G -n "$attr_name" -l $attr_store -q); rc=$?
|
|
|
581d9d |
- if [ $debug_attributes -eq 1 ]; then
|
|
|
581d9d |
- dstr=$(date)
|
|
|
581d9d |
- echo "$dstr: SAPHanaTopology: crm_attribute -N ${attr_node} -G -n \"$attr_name\" -l $attr_store -q --> $attr_val" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
- fi
|
|
|
581d9d |
- echo "$attr_val"
|
|
|
581d9d |
- if [ $rc -ne 0 ]; then
|
|
|
581d9d |
- super_ocf_log debug "DBG: ATTRIBUTE-FAILURE: crm_attribute -N $attr_node -G -n "$attr_name" -l $attr_store -q"
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ local attr_default=${4:-}
|
|
|
581d9d |
+ local dstr
|
|
|
581d9d |
+ dstr=$(date)
|
|
|
581d9d |
+ case "$attr_store" in
|
|
|
581d9d |
+ reboot | forever )
|
|
|
581d9d |
+ echo "$dstr: SAPHanaTopology: crm_attribute -N ${attr_node} -G -n \"$attr_name\" -l $attr_store -q" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
+ crm_attribute -N ${attr_node} -G -n "$attr_name" -l $attr_store -q -d "$attr_default" 2>>/var/log/fhATTRIBUTE; rc=$?
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ props )
|
|
|
581d9d |
+ echo "$dstr: SAPHanaTopology: crm_attribute -G -n \"$attr_name\" -t crm_config -q" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
+ crm_attribute -G -n "$attr_name" -t crm_config -q -d "$attr_default" 2>>/var/log/fhATTRIBUTE; rc=$?
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ esac
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
@@ -234,19 +231,24 @@
|
|
|
581d9d |
local attr_value=$2
|
|
|
581d9d |
local attr_name=$3
|
|
|
581d9d |
local attr_store=${4:-reboot} # DONE: PRIO5 get this (optional) from parameter
|
|
|
581d9d |
+ local attr_default=${5:-}
|
|
|
581d9d |
local rc=1
|
|
|
581d9d |
- local attr_old
|
|
|
581d9d |
- attr_old=$(get_hana_attribute $attr_node $attr_name $attr_store); get_rc=$?
|
|
|
581d9d |
+ local attr_old=""
|
|
|
581d9d |
+ local dstr
|
|
|
581d9d |
+ dstr=$(date)
|
|
|
581d9d |
+ attr_old=$(get_hana_attribute $attr_node $attr_name $attr_store $attr_default); get_rc=$?
|
|
|
581d9d |
if [ "$attr_old" != "$attr_value" ]; then
|
|
|
581d9d |
super_ocf_log debug "DBG: SET attribute $attr_name for node ${attr_node} to ${attr_value} former ($attr_old) get_rc=$get_rc "
|
|
|
581d9d |
- if [ $debug_attributes -eq 1 ]; then
|
|
|
581d9d |
- dstr=$(date)
|
|
|
581d9d |
- echo "$dstr: SAPHanaTopology: crm_attribute -N $attr_node -v $attr_value -n \"$attr_name\" -l $attr_store" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
- fi
|
|
|
581d9d |
- crm_attribute -N $attr_node -v "$attr_value" -n "$attr_name" -l $attr_store; rc=$?
|
|
|
581d9d |
- if [ $rc -ne 0 ]; then
|
|
|
581d9d |
- super_ocf_log debug "DBG: ATTRIBUTE-FAILURE: crm_attribute -N $attr_node -v $attr_value -n "$attr_name" -l $attr_store"
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ case "$attr_store" in
|
|
|
581d9d |
+ reboot | forever )
|
|
|
581d9d |
+ echo "$dstr: SAPHanaTopology: crm_attribute -N $attr_node -v $attr_value -n \"$attr_name\" -l $attr_store" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
+ crm_attribute -N $attr_node -v $attr_value -n "$attr_name" -l $attr_store 2>>/var/log/fhATTRIBUTE; rc=$?
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ props )
|
|
|
581d9d |
+ echo "$dstr: SAPHanaTopology: crm_attribute -v $attr_value -n \"$attr_name\" -t crm_config -s SAPHanaSR" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
+ crm_attribute -v $attr_value -n "$attr_name" -t crm_config -s SAPHanaSR 2>>/var/log/fhATTRIBUTE; rc=$?
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ esac
|
|
|
581d9d |
else
|
|
|
581d9d |
super_ocf_log debug "DBG: LET attribute $attr_name for node ${attr_node} still be ${attr_value}"
|
|
|
581d9d |
rc=0
|
|
|
581d9d |
@@ -299,7 +301,7 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# yes it is a clone config - check, if its configured well
|
|
|
581d9d |
#
|
|
|
581d9d |
- if [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ] ; then
|
|
|
581d9d |
+ if [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ] ; then
|
|
|
581d9d |
super_ocf_log err "ACT: Clone options misconfigured. (expect: clone_node_max=1)"
|
|
|
581d9d |
exit $OCF_ERR_CONFIGURED
|
|
|
581d9d |
fi
|
|
|
581d9d |
@@ -314,8 +316,8 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: sht_init - initialize variables for the resource agent
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
-# globals: OCF_*(r), SID(w), sid(rw), sidadm(w), InstanceName(w), InstanceNr(w),
|
|
|
581d9d |
-# globals: meta_notify_master_uname(w), HANA_SR_TOLOPOGY(w), sr_name(w), remoteHost(w)
|
|
|
581d9d |
+# globals: OCF_*(r), SID(w), sid(rw), sidadm(w), InstanceName(w), InstanceNr(w),
|
|
|
581d9d |
+# globals: meta_notify_master_uname(w), HANA_SR_TOLOPOGY(w), sr_name(w), remoteHost(w)
|
|
|
581d9d |
# globals: ATTR_NAME_HANA_SYNC_STATUS(w), ATTR_NAME_HANA_PRIMARY_AT(w), ATTR_NAME_HANA_CLONE_STATE(w)
|
|
|
581d9d |
# globals: DIR_EXECUTABLE(w), SAPSTARTSRV(w), SAPCONTROL(w), DIR_PROFILE(w), SAPSTARTPROFILE(w), LD_LIBRARY_PATH(w), PATH(w), nodelist(w)
|
|
|
581d9d |
# sht_init : Define global variables with default values, if optional parameters are not set
|
|
|
581d9d |
@@ -327,6 +329,8 @@
|
|
|
581d9d |
local myInstanceName=""
|
|
|
581d9d |
local rc=$OCF_SUCCESS
|
|
|
581d9d |
local hdbANSWER=""
|
|
|
581d9d |
+ local siteID
|
|
|
581d9d |
+ local siteNAME
|
|
|
581d9d |
HOSTEXECNAME=saphostexec
|
|
|
581d9d |
USRSAP=/usr/sap
|
|
|
581d9d |
SAPSERVICE_PATH=${USRSAP}/sapservices
|
|
|
581d9d |
@@ -340,10 +344,9 @@
|
|
|
581d9d |
super_ocf_log debug "DBG2: Used new method to get SID ($SID) and InstanceNr ($InstanceNr)"
|
|
|
581d9d |
sid=$(echo "$SID" | tr [:upper:] [:lower:])
|
|
|
581d9d |
sidadm="${sid}adm"
|
|
|
581d9d |
- SAPHanaFilter="${OCF_RESKEY_SAPHanaFilter:-ra-act-dec-lpa}"
|
|
|
581d9d |
ocf_env=$(env | grep 'OCF_RESKEY_CRM')
|
|
|
581d9d |
super_ocf_log debug "DBG3: OCF: $ocf_env"
|
|
|
581d9d |
- ATTR_NAME_HANA_SYNC_STATUS=("hana_${sid}_sync_state" "reboot") # SOK, SFAIL, UNKNOWN?
|
|
|
581d9d |
+ ATTR_NAME_HANA_SYNC_STATUS=("hana_${sid}_sync_state" "reboot") # SOK, SFAIL, UNKNOWN?
|
|
|
581d9d |
ATTR_NAME_HANA_PRIMARY_AT=("hana_${sid}_primary_at" "reboot") # Not really used
|
|
|
581d9d |
ATTR_NAME_HANA_CLONE_STATE=("hana_${sid}_clone_state" "reboot") # UKNOWN?, DEMOTED, PROMOTED
|
|
|
581d9d |
ATTR_NAME_HANA_REMOTEHOST=("hana_${sid}_remoteHost" "forever")
|
|
|
581d9d |
@@ -352,8 +355,14 @@
|
|
|
581d9d |
ATTR_NAME_HANA_SRMODE=("hana_${sid}_srmode" "forever")
|
|
|
581d9d |
ATTR_NAME_HANA_VHOST=("hana_${sid}_vhost" "forever")
|
|
|
581d9d |
ATTR_NAME_HANA_STATUS=("hana_${sid}_status" "reboot")
|
|
|
581d9d |
-
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ # new "central" attributes
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ ATTR_NAME_HANA_FILTER=("hana_${sid}_glob_filter" "props" "ra-act-dec-lpa")
|
|
|
581d9d |
# optional OCF parameters, we try to guess which directories are correct
|
|
|
581d9d |
+
|
|
|
581d9d |
+ SAPHanaFilter=$(get_hana_attribute "X" ${ATTR_NAME_HANA_FILTER[@]})
|
|
|
581d9d |
+
|
|
|
581d9d |
if [ -z "$OCF_RESKEY_DIR_EXECUTABLE" ]
|
|
|
581d9d |
then
|
|
|
581d9d |
DIR_EXECUTABLE="/usr/sap/$SID/$InstanceName/exe"
|
|
|
581d9d |
@@ -387,19 +396,32 @@
|
|
|
581d9d |
# we need: mode=primary|sync|syncmem|...; site name=<site>; mapping/<me>=<site>/<node> (multiple lines)
|
|
|
581d9d |
case $(crm_attribute --type crm_config --name cluster-infrastructure -q) in
|
|
|
581d9d |
*corosync* ) nodelist=$(crm_node -l | awk '{ print $2 }');;
|
|
|
581d9d |
- *openais* ) nodelist=$(crm_node -l | awk '/member/ {print $2}');;
|
|
|
581d9d |
- *cman* ) nodelist=$(crm_node -l);;
|
|
|
581d9d |
+ *openais* ) nodelist=$(crm_node -l | awk '/member/ {print $2}');;
|
|
|
581d9d |
+ *cman* ) nodelist=$(crm_node -l);;
|
|
|
581d9d |
esac
|
|
|
581d9d |
#### SAP-CALL
|
|
|
581d9d |
- hdbANSWER=$(su - ${sidadm} -c "hdbnsutil -sr_state --sapcontrol=1" 2>/dev/null)
|
|
|
581d9d |
- super_ocf_log debug "DBG2: hdbANSWER=\$\(su - ${sidadm} -c \"hdbnsutil -sr_state --sapcontrol=1\"\)"
|
|
|
581d9d |
- site=$(echo "$hdbANSWER" | awk -F= '/site name/ {print $2}')
|
|
|
581d9d |
+ # hdbnsutil was a bit unstable in some tests so we recall the tool, if it fails to report the srmode
|
|
|
581d9d |
+ for i in 1 2 3 4 5 6 7 8 9; do
|
|
|
581d9d |
+ hdbANSWER=$(su - ${sidadm} -c "hdbnsutil -sr_state --sapcontrol=1" 2>/dev/null)
|
|
|
581d9d |
+ super_ocf_log debug "DBG2: hdbANSWER=\$\(su - ${sidadm} -c \"hdbnsutil -sr_state --sapcontrol=1\"\)"
|
|
|
581d9d |
+ srmode=$(echo "$hdbANSWER" | awk -F= '/mode/ {print $2}')
|
|
|
581d9d |
+ case "$srmode" in
|
|
|
581d9d |
+ primary | syncmem | sync | async | none )
|
|
|
581d9d |
+ # we can leave the loop as we already got a result
|
|
|
581d9d |
+ break
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ * )
|
|
|
581d9d |
+ # lets pause a bit to give hdbnsutil a chance to answer next time
|
|
|
581d9d |
+ sleep 2
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
+ esac
|
|
|
581d9d |
+ done
|
|
|
581d9d |
+ # TODO PRIO3: Implement a file lookup, if we did not get a result
|
|
|
581d9d |
+ siteID=$(echo "$hdbANSWER" | awk -F= '/site id/ {print $2}')
|
|
|
581d9d |
+ siteNAME=$(echo "$hdbANSWER" | awk -F= '/site name/ {print $2}')
|
|
|
581d9d |
+ site=$siteNAME
|
|
|
581d9d |
srmode=$(echo "$hdbANSWER" | awk -F= '/mode/ {print $2}')
|
|
|
581d9d |
- if [ $debug_attributes -eq 1 ]; then
|
|
|
581d9d |
- dstr=$(date)
|
|
|
581d9d |
- echo "$dstr: SAPHanaTopology: srmode=$srmode" >> /var/log/fhATTRIBUTE
|
|
|
581d9d |
- fi
|
|
|
581d9d |
- MAPPING=$(echo "$hdbANSWER" | awk -F[=/] '$1 == "mapping" && $3 != site { print $4 }' site=$site)
|
|
|
581d9d |
+ MAPPING=$(echo "$hdbANSWER" | awk -F[=/] '$1 ~ "mapping" && $3 !~ site { print $4 }' site=$site)
|
|
|
581d9d |
super_ocf_log debug "DBG: site=$site, mode=$srmode, MAPPING=$MAPPING"
|
|
|
581d9d |
#
|
|
|
581d9d |
# filter all non-cluster mappings
|
|
|
581d9d |
@@ -413,12 +435,12 @@
|
|
|
581d9d |
echo $hanaVHost;
|
|
|
581d9d |
fi;
|
|
|
581d9d |
done;
|
|
|
581d9d |
- done )
|
|
|
581d9d |
+ done )
|
|
|
581d9d |
super_ocf_log info "DEC: site=$site, mode=$srmode, MAPPING=$MAPPING, hanaRemoteHost=$hanaRemoteHost"
|
|
|
581d9d |
super_ocf_log debug "DBG: site=$site, mode=$srmode, MAPPING=$MAPPING, hanaRemoteHost=$hanaRemoteHost"
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$OCF_SUCCESS"
|
|
|
581d9d |
return $OCF_SUCCESS
|
|
|
581d9d |
-}
|
|
|
581d9d |
+}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: check_for_primary - check if local SAP HANA is configured as primary
|
|
|
581d9d |
@@ -428,32 +450,30 @@
|
|
|
581d9d |
function check_for_primary() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local rc=0
|
|
|
581d9d |
- # DONE: Change stderr location!!
|
|
|
581d9d |
- #sidadm=lnxadm
|
|
|
581d9d |
- #node_status=$(check_for_primary_single)
|
|
|
581d9d |
- node_status=$srmode
|
|
|
581d9d |
- super_ocf_log debug "DBG2: check_for_primary: node_status=$node_status"
|
|
|
581d9d |
- super_ocf_log debug "DBG: check_for_primary: node_status=$node_status"
|
|
|
581d9d |
- for i in 1 2 3 4 5 6 7 8 9; do
|
|
|
581d9d |
- case "$node_status" in
|
|
|
581d9d |
- primary )
|
|
|
581d9d |
+ node_status=$srmode
|
|
|
581d9d |
+ super_ocf_log debug "DBG2: check_for_primary: node_status=$node_status"
|
|
|
581d9d |
+ super_ocf_log debug "DBG: check_for_primary: node_status=$node_status"
|
|
|
581d9d |
+ for i in 1 2 3 4 5 6 7 8 9; do
|
|
|
581d9d |
+ case "$node_status" in
|
|
|
581d9d |
+ primary )
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_PRIMARY"
|
|
|
581d9d |
return $HANA_STATE_PRIMARY;;
|
|
|
581d9d |
syncmem | sync | async )
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_SECONDARY"
|
|
|
581d9d |
return $HANA_STATE_SECONDARY;;
|
|
|
581d9d |
- none ) # have seen that mode on second side BEFEORE we registered it as replica
|
|
|
581d9d |
+ none ) # have seen that mode on second side BEFEORE we registered it as replica
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_STANDALONE"
|
|
|
581d9d |
return $HANA_STATE_STANDALONE;;
|
|
|
581d9d |
* )
|
|
|
581d9d |
- super_ocf_log err "ACT: check_for_primary: we didn't expect node_status to be: <$node_status>"
|
|
|
581d9d |
- dump=$( echo $node_status | hexdump -C );
|
|
|
581d9d |
- super_ocf_log err "ACT: check_for_primary: we didn't expect node_status to be: DUMP <$dump>"
|
|
|
581d9d |
- #### SAP-CALL
|
|
|
581d9d |
- node_full_status=$(su - ${sidadm} -c "hdbnsutil -sr_state" 2>/dev/null )
|
|
|
581d9d |
- node_status=$(echo "$node_full_status" | awk '$1=="mode:" {print $2}')
|
|
|
581d9d |
- super_ocf_log info "DEC: check_for_primary: loop=$i: node_status=$node_status"
|
|
|
581d9d |
- # TODO: PRIO1: Maybe we need to keep the old value for P/S/N, if hdbnsutil just crashes
|
|
|
581d9d |
+ # TODO: PRIO1: Should we set SFAIL?
|
|
|
581d9d |
+ # TODO: PRIO2: Maybe we need to keep the old value for P/S/N, if hdbnsutil just crashes
|
|
|
581d9d |
+ dump=$( echo $node_status | hexdump -C );
|
|
|
581d9d |
+ super_ocf_log err "ACT: check_for_primary: we didn't expect node_status to be: DUMP: <$dump>"
|
|
|
581d9d |
+ #### SAP-CALL
|
|
|
581d9d |
+ node_full_status=$(su - ${sidadm} -c "hdbnsutil -sr_state" 2>/dev/null )
|
|
|
581d9d |
+ node_status=$(echo "$node_full_status" | awk '$1=="mode:" {print $2}')
|
|
|
581d9d |
+ super_ocf_log info "DEC: check_for_primary: loop=$i: node_status=$node_status"
|
|
|
581d9d |
+ # TODO: PRIO1: Maybe we need to keep the old value for P/S/N, if hdbnsutil just crashes
|
|
|
581d9d |
esac;
|
|
|
581d9d |
done
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=HANA_STATE_DEFECT"
|
|
|
581d9d |
@@ -464,7 +484,7 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: start_saphostagent
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
-# globals:
|
|
|
581d9d |
+# globals: HOSTEXEC_PATH(r), HOSTEXEC_PROFILE_PATH(r)
|
|
|
581d9d |
#
|
|
|
581d9d |
function start_saphostagent()
|
|
|
581d9d |
{
|
|
|
581d9d |
@@ -478,7 +498,7 @@
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: stop_saphostagent
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
-# globals:
|
|
|
581d9d |
+# globals: HOSTEXEC_PATH(r)
|
|
|
581d9d |
#
|
|
|
581d9d |
function stop_saphostagent()
|
|
|
581d9d |
{
|
|
|
581d9d |
@@ -496,6 +516,8 @@
|
|
|
581d9d |
function check_saphostagent()
|
|
|
581d9d |
{
|
|
|
581d9d |
local rc=1
|
|
|
581d9d |
+ # TODO: PRIO3: should the path been removed like "saphostexec" instead of "/usr/sap/hostctrl/exe/saphostexec"
|
|
|
581d9d |
+ # or should we use ${HOSTEXEC_PATH} instead?
|
|
|
581d9d |
pgrep -f /usr/sap/hostctrl/exe/saphostexec; rc=$?
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
@@ -509,15 +531,16 @@
|
|
|
581d9d |
# sht_start : Start the SAP HANA instance
|
|
|
581d9d |
#
|
|
|
581d9d |
function sht_start() {
|
|
|
581d9d |
-
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
|
|
|
581d9d |
local rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
local output=""
|
|
|
581d9d |
- local loopcount=0
|
|
|
581d9d |
+ local loopcount=0
|
|
|
581d9d |
|
|
|
581d9d |
- mkdir -p /var/lib/SAPHana
|
|
|
581d9d |
- touch /var/lib/SAPHana/SAPTopologyON
|
|
|
581d9d |
+ # TODO: PRIO3: move the string "$HA_RSCTMP/SAPHana/SAPTopologyON" to a variable
|
|
|
581d9d |
+ # TODO: PRIO3: move the file to the clusters tmp directory?
|
|
|
581d9d |
+ mkdir -p $HA_RSCTMP/SAPHana
|
|
|
581d9d |
+ touch $HA_RSCTMP/SAPHana/SAPTopologyON
|
|
|
581d9d |
if ! check_saphostagent; then
|
|
|
581d9d |
start_saphostagent
|
|
|
581d9d |
fi
|
|
|
581d9d |
@@ -532,16 +555,16 @@
|
|
|
581d9d |
# function: sht_stop - stop a hana instance
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
# globals: OCF_*(r), SAPCONTROL(r), SID(r), InstanceName(r)
|
|
|
581d9d |
-# sht_stop: Stop the SAP instance
|
|
|
581d9d |
+# sht_stop: Stop the SAP HANA Topology Resource
|
|
|
581d9d |
#
|
|
|
581d9d |
function sht_stop() {
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local output=""
|
|
|
581d9d |
local rc=0
|
|
|
581d9d |
|
|
|
581d9d |
- rm /var/lib/SAPHana/SAPTopologyON
|
|
|
581d9d |
+ rm $HA_RSCTMP/SAPHana/SAPTopologyON
|
|
|
581d9d |
rc=$OCF_SUCCESS
|
|
|
581d9d |
-
|
|
|
581d9d |
+
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
@@ -557,13 +580,13 @@
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
local rc=0
|
|
|
581d9d |
|
|
|
581d9d |
- if [ -f /var/lib/SAPHana/SAPTopologyON ]; then
|
|
|
581d9d |
+ if [ -f $HA_RSCTMP/SAPHana/SAPTopologyON ]; then
|
|
|
581d9d |
rc=$OCF_SUCCESS
|
|
|
581d9d |
else
|
|
|
581d9d |
rc=$OCF_NOT_RUNNING
|
|
|
581d9d |
fi
|
|
|
581d9d |
|
|
|
581d9d |
- super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
@@ -575,37 +598,37 @@
|
|
|
581d9d |
# sht_status: Lightweight check of SAP instance only with OS tools
|
|
|
581d9d |
#
|
|
|
581d9d |
function sht_status() {
|
|
|
581d9d |
- super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
- local rc=0
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
+ local rc=0
|
|
|
581d9d |
|
|
|
581d9d |
- sht_monitor; rc=$?
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ sht_monitor; rc=$?
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
# function: sht_validate - validation of (some) variables/parameters
|
|
|
581d9d |
# params: -
|
|
|
581d9d |
-# globals: OCF_*(r), SID(r), InstanceName(r), InstanceNr(r),
|
|
|
581d9d |
-# sht_validate: Check the symantic of the input parameters
|
|
|
581d9d |
+# globals: OCF_*(r), SID(r), InstanceName(r), InstanceNr(r),
|
|
|
581d9d |
+# sht_validate: Check the symantic of the input parameters
|
|
|
581d9d |
#
|
|
|
581d9d |
function sht_validate() {
|
|
|
581d9d |
- super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
- local rc=$OCF_SUCCESS
|
|
|
581d9d |
- if [ $(echo "$SID" | grep -c '^[A-Z][A-Z0-9][A-Z0-9]$') -ne 1 ]
|
|
|
581d9d |
- then
|
|
|
581d9d |
- super_ocf_log err "ACT: Parsing instance profile name: '$SID' is not a valid SID!"
|
|
|
581d9d |
- rc=$OCF_ERR_ARGS
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME ($*)"
|
|
|
581d9d |
+ local rc=$OCF_SUCCESS
|
|
|
581d9d |
+ if [ $(echo "$SID" | grep -c '^[A-Z][A-Z0-9][A-Z0-9]$') -ne 1 ]
|
|
|
581d9d |
+ then
|
|
|
581d9d |
+ super_ocf_log err "ACT: Parsing instance profile name: '$SID' is not a valid SID!"
|
|
|
581d9d |
+ rc=$OCF_ERR_ARGS
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
|
|
|
581d9d |
- if [ $(echo "$InstanceNr" | grep -c '^[0-9][0-9]$') -ne 1 ]
|
|
|
581d9d |
- then
|
|
|
581d9d |
- super_ocf_log err "ACT: Parsing instance profile name: '$InstanceNr' is not a valid instance number!"
|
|
|
581d9d |
- rc=$OCF_ERR_ARGS
|
|
|
581d9d |
- fi
|
|
|
581d9d |
+ if [ $(echo "$InstanceNr" | grep -c '^[0-9][0-9]$') -ne 1 ]
|
|
|
581d9d |
+ then
|
|
|
581d9d |
+ super_ocf_log err "ACT: Parsing instance profile name: '$InstanceNr' is not a valid instance number!"
|
|
|
581d9d |
+ rc=$OCF_ERR_ARGS
|
|
|
581d9d |
+ fi
|
|
|
581d9d |
|
|
|
581d9d |
- super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
- return $rc
|
|
|
581d9d |
+ super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
+ return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -661,15 +684,15 @@
|
|
|
581d9d |
|
|
|
581d9d |
if ocf_is_probe; then
|
|
|
581d9d |
super_ocf_log debug "DBG2: PROBE ONLY"
|
|
|
581d9d |
+ sht_monitor; rc=$?
|
|
|
581d9d |
else
|
|
|
581d9d |
super_ocf_log debug "DBG2: REGULAR MONITOR"
|
|
|
581d9d |
if ! check_saphostagent; then
|
|
|
581d9d |
start_saphostagent
|
|
|
581d9d |
fi
|
|
|
581d9d |
- fi
|
|
|
581d9d |
#
|
|
|
581d9d |
# First check, if we are PRIMARY or SECONDARY
|
|
|
581d9d |
- #
|
|
|
581d9d |
+ #
|
|
|
581d9d |
super_ocf_log debug "DBG2: HANA SID $SID"
|
|
|
581d9d |
super_ocf_log debug "DBG2: HANA InstanceName $InstanceName"
|
|
|
581d9d |
super_ocf_log debug "DBG2: HANA InstanceNr $InstanceNr"
|
|
|
581d9d |
@@ -721,8 +744,8 @@
|
|
|
581d9d |
set_hana_attribute ${NODENAME} "$site" ${ATTR_NAME_HANA_SITE[@]}
|
|
|
581d9d |
fi
|
|
|
581d9d |
case "$hanaPrim" in
|
|
|
581d9d |
- P ) ;;
|
|
|
581d9d |
- S ) # only secondary may propargate its sync status
|
|
|
581d9d |
+ P ) ;;
|
|
|
581d9d |
+ S ) # only secondary may propargate its sync status
|
|
|
581d9d |
case $(crm_attribute --type crm_config --name cluster-infrastructure -q) in
|
|
|
581d9d |
*corosync* ) nodelist=$(crm_node -l | awk '{ print $2 }');;
|
|
|
581d9d |
*openais* ) nodelist=$(crm_node -l | awk '/member/ {print $2}');;
|
|
|
581d9d |
@@ -732,8 +755,10 @@
|
|
|
581d9d |
for n in ${nodelist}; do
|
|
|
581d9d |
set_hana_attribute ${n} "$srmode" ${ATTR_NAME_HANA_SRMODE[@]}
|
|
|
581d9d |
done
|
|
|
581d9d |
- ;;
|
|
|
581d9d |
+ ;;
|
|
|
581d9d |
esac
|
|
|
581d9d |
+ #
|
|
|
581d9d |
+ fi # end ocf_is_NOT_probe
|
|
|
581d9d |
super_ocf_log info "FLOW $FUNCNAME rc=$rc"
|
|
|
581d9d |
return $rc
|
|
|
581d9d |
}
|
|
|
581d9d |
@@ -752,7 +777,7 @@
|
|
|
581d9d |
}
|
|
|
581d9d |
|
|
|
581d9d |
#
|
|
|
581d9d |
-# function: main - main function to operate
|
|
|
581d9d |
+# function: main - main function to operate
|
|
|
581d9d |
# params: ACTION
|
|
|
581d9d |
# globals: OCF_*(r), SID(w), sidadm(w), InstanceName(w), DIR_EXECUTABLE(w), ACTION(w), CLACT(w), ra_rc(rw), $0(r), %ENV(r)
|
|
|
581d9d |
#
|
|
|
581d9d |
@@ -763,7 +788,7 @@
|
|
|
581d9d |
InstanceName=""
|
|
|
581d9d |
InstanceNr=""
|
|
|
581d9d |
DIR_EXECUTABLE=""
|
|
|
581d9d |
-SAPHanaFilter="${OCF_RESKEY_SAPHanaFilter:-ra-act-dec-lpa}"
|
|
|
581d9d |
+SAPHanaFilter="ra-act-dec-lpa"
|
|
|
581d9d |
NODENAME=$(crm_node -n)
|
|
|
581d9d |
|
|
|
581d9d |
if [ $# -ne 1 ]
|
|
|
581d9d |
@@ -785,11 +810,11 @@
|
|
|
581d9d |
exit $OCF_SUCCESS;;
|
|
|
581d9d |
notify) sht_notify
|
|
|
581d9d |
exit $OCF_SUCCESS;;
|
|
|
581d9d |
- admin-setup) admin-setup
|
|
|
581d9d |
- exit $OCF_SUCCESS;;
|
|
|
581d9d |
+ admin-setup) admin-setup
|
|
|
581d9d |
+ exit $OCF_SUCCESS;;
|
|
|
581d9d |
*);;
|
|
|
581d9d |
esac
|
|
|
581d9d |
-sht_init
|
|
|
581d9d |
+sht_init
|
|
|
581d9d |
|
|
|
581d9d |
if ! ocf_is_root
|
|
|
581d9d |
then
|
|
|
581d9d |
@@ -810,7 +835,6 @@
|
|
|
581d9d |
exit $OCF_ERR_ARGS
|
|
|
581d9d |
fi
|
|
|
581d9d |
|
|
|
581d9d |
-
|
|
|
581d9d |
if is_clone
|
|
|
581d9d |
then
|
|
|
581d9d |
CLACT=_clone
|
|
|
581d9d |
@@ -830,12 +854,12 @@
|
|
|
581d9d |
sht_$ACTION$CLACT
|
|
|
581d9d |
ra_rc=$?
|
|
|
581d9d |
;;
|
|
|
581d9d |
- validate-all)
|
|
|
581d9d |
+ validate-all)
|
|
|
581d9d |
sht_validate
|
|
|
581d9d |
ra_rc=$?
|
|
|
581d9d |
;;
|
|
|
581d9d |
- *) # seams to be a unknown request
|
|
|
581d9d |
- sht_methods
|
|
|
581d9d |
+ *) # seams to be a unknown request
|
|
|
581d9d |
+ sht_methods
|
|
|
581d9d |
ra_rc=$OCF_ERR_UNIMPLEMENTED
|
|
|
581d9d |
;;
|
|
|
581d9d |
esac
|