diff --git a/.gitignore b/.gitignore index 302b56c..6e6662f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ SOURCES/nagios-agents-metadata-105ab8a.tar.gz -SOURCES/pacemaker-4b1f869.tar.gz +SOURCES/pacemaker-2deceaa.tar.gz diff --git a/.pacemaker.metadata b/.pacemaker.metadata index 1c52241..6a9af04 100644 --- a/.pacemaker.metadata +++ b/.pacemaker.metadata @@ -1,2 +1,2 @@ ea6c0a27fd0ae8ce02f84a11f08a0d79377041c3 SOURCES/nagios-agents-metadata-105ab8a.tar.gz -dfd19e7ec7aa96520f4948fc37d48ea69835bbdb SOURCES/pacemaker-4b1f869.tar.gz +78c94fdcf59cfb064d4433e1b8f71fd856eeec5f SOURCES/pacemaker-2deceaa.tar.gz diff --git a/SOURCES/001-rules.patch b/SOURCES/001-rules.patch new file mode 100644 index 0000000..0133975 --- /dev/null +++ b/SOURCES/001-rules.patch @@ -0,0 +1,4947 @@ +From 2f10dde2f2a0ac7a3d74cb2f398be1deaba75615 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 6 Apr 2020 11:22:50 -0400 +Subject: [PATCH 01/17] Feature: scheduler: Add new expression_type values. + +--- + include/crm/pengine/rules.h | 4 +++- + lib/pengine/rules.c | 6 ++++++ + 2 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h +index ebd3148..37f092b 100644 +--- a/include/crm/pengine/rules.h ++++ b/include/crm/pengine/rules.h +@@ -28,7 +28,9 @@ enum expression_type { + loc_expr, + role_expr, + time_expr, +- version_expr ++ version_expr, ++ rsc_expr, ++ op_expr + }; + + typedef struct pe_re_match_data { +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index fa9a222..130bada 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -189,6 +189,12 @@ find_expression_type(xmlNode * expr) + if (safe_str_eq(tag, "date_expression")) { + return time_expr; + ++ } else if (safe_str_eq(tag, "rsc_expression")) { ++ return rsc_expr; ++ ++ } else if (safe_str_eq(tag, "op_expression")) { ++ return op_expr; ++ + } else if (safe_str_eq(tag, XML_TAG_RULE)) { + return nested_rule; + +-- +1.8.3.1 + + +From bc7491e5226af2a2e7f1a9b2d61892d3af0767fe Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 3 Apr 2020 15:03:23 -0400 +Subject: [PATCH 02/17] Refactor: scheduler: Add new pe__eval_*_expr functions. + +These new functions all take the same input arguments - an xmlNodePtr +and a pe_rule_eval_data_t. This latter type holds all the parameters +that could possibly be useful for evaluating some rule. Most functions +will only need a few items out of this structure. + +Then, implement pe_test_*_expression in terms of these new functions. +--- + include/crm/pengine/common.h | 37 ++- + include/crm/pengine/rules.h | 13 - + include/crm/pengine/rules_internal.h | 5 + + lib/pengine/rules.c | 592 +++++++++++++++++++---------------- + 4 files changed, 363 insertions(+), 284 deletions(-) + +diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h +index 48c2b66..3a770b7 100644 +--- a/include/crm/pengine/common.h ++++ b/include/crm/pengine/common.h +@@ -1,5 +1,5 @@ + /* +- * Copyright 2004-2019 the Pacemaker project contributors ++ * Copyright 2004-2020 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * +@@ -15,6 +15,9 @@ extern "C" { + #endif + + # include ++# include ++ ++# include + + extern gboolean was_processing_error; + extern gboolean was_processing_warning; +@@ -131,6 +134,38 @@ recovery2text(enum rsc_recovery_type type) + return "Unknown"; + } + ++typedef struct pe_re_match_data { ++ char *string; ++ int nregs; ++ regmatch_t *pmatch; ++} pe_re_match_data_t; ++ ++typedef struct pe_match_data { ++ pe_re_match_data_t *re; ++ GHashTable *params; ++ GHashTable *meta; ++} pe_match_data_t; ++ ++typedef struct pe_rsc_eval_data { ++ const char *standard; ++ const char *provider; ++ const char *agent; ++} pe_rsc_eval_data_t; ++ ++typedef struct pe_op_eval_data { ++ const char *op_name; ++ guint interval; ++} pe_op_eval_data_t; ++ ++typedef struct pe_rule_eval_data { ++ GHashTable *node_hash; ++ enum rsc_role_e role; ++ crm_time_t *now; ++ pe_match_data_t *match_data; ++ pe_rsc_eval_data_t *rsc_data; ++ pe_op_eval_data_t *op_data; ++} pe_rule_eval_data_t; ++ + #ifdef __cplusplus + } + #endif +diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h +index 37f092b..d7bdbf9 100644 +--- a/include/crm/pengine/rules.h ++++ b/include/crm/pengine/rules.h +@@ -15,7 +15,6 @@ extern "C" { + #endif + + # include +-# include + + # include + # include +@@ -33,18 +32,6 @@ enum expression_type { + op_expr + }; + +-typedef struct pe_re_match_data { +- char *string; +- int nregs; +- regmatch_t *pmatch; +-} pe_re_match_data_t; +- +-typedef struct pe_match_data { +- pe_re_match_data_t *re; +- GHashTable *params; +- GHashTable *meta; +-} pe_match_data_t; +- + enum expression_type find_expression_type(xmlNode * expr); + + gboolean pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, +diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h +index fd65c1e..8a22108 100644 +--- a/include/crm/pengine/rules_internal.h ++++ b/include/crm/pengine/rules_internal.h +@@ -21,6 +21,11 @@ void pe_free_alert_list(GListPtr alert_list); + + crm_time_t *pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec); + ++gboolean pe__eval_attr_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); ++int pe__eval_date_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data, ++ crm_time_t *next_change); ++gboolean pe__eval_role_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); ++ + int pe_eval_date_expression(xmlNode *time_expr, + crm_time_t *now, + crm_time_t *next_change); +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index 130bada..3f316c2 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -219,201 +219,34 @@ find_expression_type(xmlNode * expr) + } + + gboolean +-pe_test_role_expression(xmlNode * expr, enum rsc_role_e role, crm_time_t * now) ++pe_test_role_expression(xmlNode *expr, enum rsc_role_e role, crm_time_t *now) + { +- gboolean accept = FALSE; +- const char *op = NULL; +- const char *value = NULL; +- +- if (role == RSC_ROLE_UNKNOWN) { +- return accept; +- } +- +- value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); +- op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); +- +- if (safe_str_eq(op, "defined")) { +- if (role > RSC_ROLE_STARTED) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "not_defined")) { +- if (role < RSC_ROLE_SLAVE && role > RSC_ROLE_UNKNOWN) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "eq")) { +- if (text2role(value) == role) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "ne")) { +- // Test "ne" only with promotable clone roles +- if (role < RSC_ROLE_SLAVE && role > RSC_ROLE_UNKNOWN) { +- accept = FALSE; +- +- } else if (text2role(value) != role) { +- accept = TRUE; +- } +- } +- return accept; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = role, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ return pe__eval_role_expr(expr, &rule_data); + } + + gboolean + pe_test_attr_expression(xmlNode *expr, GHashTable *hash, crm_time_t *now, + pe_match_data_t *match_data) + { +- gboolean accept = FALSE; +- gboolean attr_allocated = FALSE; +- int cmp = 0; +- const char *h_val = NULL; +- GHashTable *table = NULL; +- +- const char *op = NULL; +- const char *type = NULL; +- const char *attr = NULL; +- const char *value = NULL; +- const char *value_source = NULL; +- +- attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE); +- op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); +- value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); +- type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); +- value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE); +- +- if (attr == NULL || op == NULL) { +- pe_err("Invalid attribute or operation in expression" +- " (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value)); +- return FALSE; +- } +- +- if (match_data) { +- if (match_data->re) { +- char *resolved_attr = pe_expand_re_matches(attr, match_data->re); +- +- if (resolved_attr) { +- attr = (const char *) resolved_attr; +- attr_allocated = TRUE; +- } +- } +- +- if (safe_str_eq(value_source, "param")) { +- table = match_data->params; +- } else if (safe_str_eq(value_source, "meta")) { +- table = match_data->meta; +- } +- } +- +- if (table) { +- const char *param_name = value; +- const char *param_value = NULL; +- +- if (param_name && param_name[0]) { +- if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) { +- value = param_value; +- } +- } +- } +- +- if (hash != NULL) { +- h_val = (const char *)g_hash_table_lookup(hash, attr); +- } +- +- if (attr_allocated) { +- free((char *)attr); +- attr = NULL; +- } +- +- if (value != NULL && h_val != NULL) { +- if (type == NULL) { +- if (safe_str_eq(op, "lt") +- || safe_str_eq(op, "lte") +- || safe_str_eq(op, "gt") +- || safe_str_eq(op, "gte")) { +- type = "number"; +- +- } else { +- type = "string"; +- } +- crm_trace("Defaulting to %s based comparison for '%s' op", type, op); +- } +- +- if (safe_str_eq(type, "string")) { +- cmp = strcasecmp(h_val, value); +- +- } else if (safe_str_eq(type, "number")) { +- int h_val_f = crm_parse_int(h_val, NULL); +- int value_f = crm_parse_int(value, NULL); +- +- if (h_val_f < value_f) { +- cmp = -1; +- } else if (h_val_f > value_f) { +- cmp = 1; +- } else { +- cmp = 0; +- } +- +- } else if (safe_str_eq(type, "version")) { +- cmp = compare_version(h_val, value); +- +- } +- +- } else if (value == NULL && h_val == NULL) { +- cmp = 0; +- } else if (value == NULL) { +- cmp = 1; +- } else { +- cmp = -1; +- } +- +- if (safe_str_eq(op, "defined")) { +- if (h_val != NULL) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "not_defined")) { +- if (h_val == NULL) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "eq")) { +- if ((h_val == value) || cmp == 0) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "ne")) { +- if ((h_val == NULL && value != NULL) +- || (h_val != NULL && value == NULL) +- || cmp != 0) { +- accept = TRUE; +- } +- +- } else if (value == NULL || h_val == NULL) { +- // The comparison is meaningless from this point on +- accept = FALSE; +- +- } else if (safe_str_eq(op, "lt")) { +- if (cmp < 0) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "lte")) { +- if (cmp <= 0) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "gt")) { +- if (cmp > 0) { +- accept = TRUE; +- } +- +- } else if (safe_str_eq(op, "gte")) { +- if (cmp >= 0) { +- accept = TRUE; +- } +- } +- +- return accept; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = match_data, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ return pe__eval_attr_expr(expr, &rule_data); + } + + /* As per the nethack rules: +@@ -587,10 +420,18 @@ pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec) + * \return TRUE if date expression is in effect at given time, FALSE otherwise + */ + gboolean +-pe_test_date_expression(xmlNode *time_expr, crm_time_t *now, +- crm_time_t *next_change) ++pe_test_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) + { +- switch (pe_eval_date_expression(time_expr, now, next_change)) { ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ switch (pe__eval_date_expr(expr, &rule_data, next_change)) { + case pcmk_rc_within_range: + case pcmk_rc_ok: + return TRUE; +@@ -623,86 +464,18 @@ crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t) + * \return Standard Pacemaker return code + */ + int +-pe_eval_date_expression(xmlNode *time_expr, crm_time_t *now, +- crm_time_t *next_change) ++pe_eval_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) + { +- crm_time_t *start = NULL; +- crm_time_t *end = NULL; +- const char *value = NULL; +- const char *op = crm_element_value(time_expr, "operation"); +- +- xmlNode *duration_spec = NULL; +- xmlNode *date_spec = NULL; +- +- // "undetermined" will also be returned for parsing errors +- int rc = pcmk_rc_undetermined; +- +- crm_trace("Testing expression: %s", ID(time_expr)); +- +- duration_spec = first_named_child(time_expr, "duration"); +- date_spec = first_named_child(time_expr, "date_spec"); +- +- value = crm_element_value(time_expr, "start"); +- if (value != NULL) { +- start = crm_time_new(value); +- } +- value = crm_element_value(time_expr, "end"); +- if (value != NULL) { +- end = crm_time_new(value); +- } +- +- if (start != NULL && end == NULL && duration_spec != NULL) { +- end = pe_parse_xml_duration(start, duration_spec); +- } +- +- if ((op == NULL) || safe_str_eq(op, "in_range")) { +- if ((start == NULL) && (end == NULL)) { +- // in_range requires at least one of start or end +- } else if ((start != NULL) && (crm_time_compare(now, start) < 0)) { +- rc = pcmk_rc_before_range; +- crm_time_set_if_earlier(next_change, start); +- } else if ((end != NULL) && (crm_time_compare(now, end) > 0)) { +- rc = pcmk_rc_after_range; +- } else { +- rc = pcmk_rc_within_range; +- if (end && next_change) { +- // Evaluation doesn't change until second after end +- crm_time_add_seconds(end, 1); +- crm_time_set_if_earlier(next_change, end); +- } +- } +- +- } else if (safe_str_eq(op, "date_spec")) { +- rc = pe_cron_range_satisfied(now, date_spec); +- // @TODO set next_change appropriately +- +- } else if (safe_str_eq(op, "gt")) { +- if (start == NULL) { +- // gt requires start +- } else if (crm_time_compare(now, start) > 0) { +- rc = pcmk_rc_within_range; +- } else { +- rc = pcmk_rc_before_range; +- +- // Evaluation doesn't change until second after start +- crm_time_add_seconds(start, 1); +- crm_time_set_if_earlier(next_change, start); +- } +- +- } else if (safe_str_eq(op, "lt")) { +- if (end == NULL) { +- // lt requires end +- } else if (crm_time_compare(now, end) < 0) { +- rc = pcmk_rc_within_range; +- crm_time_set_if_earlier(next_change, end); +- } else { +- rc = pcmk_rc_after_range; +- } +- } +- +- crm_time_free(start); +- crm_time_free(end); +- return rc; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ return pe__eval_date_expr(expr, &rule_data, next_change); + } + + // Information about a block of nvpair elements +@@ -1111,6 +884,285 @@ pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version + } + #endif + ++gboolean ++pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) ++{ ++ gboolean accept = FALSE; ++ gboolean attr_allocated = FALSE; ++ int cmp = 0; ++ const char *h_val = NULL; ++ GHashTable *table = NULL; ++ ++ const char *op = NULL; ++ const char *type = NULL; ++ const char *attr = NULL; ++ const char *value = NULL; ++ const char *value_source = NULL; ++ ++ attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE); ++ op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); ++ value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); ++ type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); ++ value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE); ++ ++ if (attr == NULL || op == NULL) { ++ pe_err("Invalid attribute or operation in expression" ++ " (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value)); ++ return FALSE; ++ } ++ ++ if (rule_data->match_data) { ++ if (rule_data->match_data->re) { ++ char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re); ++ ++ if (resolved_attr) { ++ attr = (const char *) resolved_attr; ++ attr_allocated = TRUE; ++ } ++ } ++ ++ if (safe_str_eq(value_source, "param")) { ++ table = rule_data->match_data->params; ++ } else if (safe_str_eq(value_source, "meta")) { ++ table = rule_data->match_data->meta; ++ } ++ } ++ ++ if (table) { ++ const char *param_name = value; ++ const char *param_value = NULL; ++ ++ if (param_name && param_name[0]) { ++ if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) { ++ value = param_value; ++ } ++ } ++ } ++ ++ if (rule_data->node_hash != NULL) { ++ h_val = (const char *)g_hash_table_lookup(rule_data->node_hash, attr); ++ } ++ ++ if (attr_allocated) { ++ free((char *)attr); ++ attr = NULL; ++ } ++ ++ if (value != NULL && h_val != NULL) { ++ if (type == NULL) { ++ if (safe_str_eq(op, "lt") ++ || safe_str_eq(op, "lte") ++ || safe_str_eq(op, "gt") ++ || safe_str_eq(op, "gte")) { ++ type = "number"; ++ ++ } else { ++ type = "string"; ++ } ++ crm_trace("Defaulting to %s based comparison for '%s' op", type, op); ++ } ++ ++ if (safe_str_eq(type, "string")) { ++ cmp = strcasecmp(h_val, value); ++ ++ } else if (safe_str_eq(type, "number")) { ++ int h_val_f = crm_parse_int(h_val, NULL); ++ int value_f = crm_parse_int(value, NULL); ++ ++ if (h_val_f < value_f) { ++ cmp = -1; ++ } else if (h_val_f > value_f) { ++ cmp = 1; ++ } else { ++ cmp = 0; ++ } ++ ++ } else if (safe_str_eq(type, "version")) { ++ cmp = compare_version(h_val, value); ++ ++ } ++ ++ } else if (value == NULL && h_val == NULL) { ++ cmp = 0; ++ } else if (value == NULL) { ++ cmp = 1; ++ } else { ++ cmp = -1; ++ } ++ ++ if (safe_str_eq(op, "defined")) { ++ if (h_val != NULL) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "not_defined")) { ++ if (h_val == NULL) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "eq")) { ++ if ((h_val == value) || cmp == 0) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "ne")) { ++ if ((h_val == NULL && value != NULL) ++ || (h_val != NULL && value == NULL) ++ || cmp != 0) { ++ accept = TRUE; ++ } ++ ++ } else if (value == NULL || h_val == NULL) { ++ // The comparison is meaningless from this point on ++ accept = FALSE; ++ ++ } else if (safe_str_eq(op, "lt")) { ++ if (cmp < 0) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "lte")) { ++ if (cmp <= 0) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "gt")) { ++ if (cmp > 0) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "gte")) { ++ if (cmp >= 0) { ++ accept = TRUE; ++ } ++ } ++ ++ return accept; ++} ++ ++int ++pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) ++{ ++ crm_time_t *start = NULL; ++ crm_time_t *end = NULL; ++ const char *value = NULL; ++ const char *op = crm_element_value(expr, "operation"); ++ ++ xmlNode *duration_spec = NULL; ++ xmlNode *date_spec = NULL; ++ ++ // "undetermined" will also be returned for parsing errors ++ int rc = pcmk_rc_undetermined; ++ ++ crm_trace("Testing expression: %s", ID(expr)); ++ ++ duration_spec = first_named_child(expr, "duration"); ++ date_spec = first_named_child(expr, "date_spec"); ++ ++ value = crm_element_value(expr, "start"); ++ if (value != NULL) { ++ start = crm_time_new(value); ++ } ++ value = crm_element_value(expr, "end"); ++ if (value != NULL) { ++ end = crm_time_new(value); ++ } ++ ++ if (start != NULL && end == NULL && duration_spec != NULL) { ++ end = pe_parse_xml_duration(start, duration_spec); ++ } ++ ++ if ((op == NULL) || safe_str_eq(op, "in_range")) { ++ if ((start == NULL) && (end == NULL)) { ++ // in_range requires at least one of start or end ++ } else if ((start != NULL) && (crm_time_compare(rule_data->now, start) < 0)) { ++ rc = pcmk_rc_before_range; ++ crm_time_set_if_earlier(next_change, start); ++ } else if ((end != NULL) && (crm_time_compare(rule_data->now, end) > 0)) { ++ rc = pcmk_rc_after_range; ++ } else { ++ rc = pcmk_rc_within_range; ++ if (end && next_change) { ++ // Evaluation doesn't change until second after end ++ crm_time_add_seconds(end, 1); ++ crm_time_set_if_earlier(next_change, end); ++ } ++ } ++ ++ } else if (safe_str_eq(op, "date_spec")) { ++ rc = pe_cron_range_satisfied(rule_data->now, date_spec); ++ // @TODO set next_change appropriately ++ ++ } else if (safe_str_eq(op, "gt")) { ++ if (start == NULL) { ++ // gt requires start ++ } else if (crm_time_compare(rule_data->now, start) > 0) { ++ rc = pcmk_rc_within_range; ++ } else { ++ rc = pcmk_rc_before_range; ++ ++ // Evaluation doesn't change until second after start ++ crm_time_add_seconds(start, 1); ++ crm_time_set_if_earlier(next_change, start); ++ } ++ ++ } else if (safe_str_eq(op, "lt")) { ++ if (end == NULL) { ++ // lt requires end ++ } else if (crm_time_compare(rule_data->now, end) < 0) { ++ rc = pcmk_rc_within_range; ++ crm_time_set_if_earlier(next_change, end); ++ } else { ++ rc = pcmk_rc_after_range; ++ } ++ } ++ ++ crm_time_free(start); ++ crm_time_free(end); ++ return rc; ++} ++ ++gboolean ++pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) ++{ ++ gboolean accept = FALSE; ++ const char *op = NULL; ++ const char *value = NULL; ++ ++ if (rule_data->role == RSC_ROLE_UNKNOWN) { ++ return accept; ++ } ++ ++ value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); ++ op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); ++ ++ if (safe_str_eq(op, "defined")) { ++ if (rule_data->role > RSC_ROLE_STARTED) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "not_defined")) { ++ if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "eq")) { ++ if (text2role(value) == rule_data->role) { ++ accept = TRUE; ++ } ++ ++ } else if (safe_str_eq(op, "ne")) { ++ // Test "ne" only with promotable clone roles ++ if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) { ++ accept = FALSE; ++ ++ } else if (text2role(value) != rule_data->role) { ++ accept = TRUE; ++ } ++ } ++ return accept; ++} ++ + // Deprecated functions kept only for backward API compatibility + gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now); + gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role, +-- +1.8.3.1 + + +From 56a1337a54f3ba8a175ff3252658e1e43f7c670b Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 28 Apr 2020 14:34:40 -0400 +Subject: [PATCH 03/17] Feature: scheduler: Add new rule tests for op_defaults + and rsc_defaults. + +These are like all the other rule evaluating functions, but they do not +have any wrappers for the older style API. +--- + include/crm/pengine/rules_internal.h | 2 ++ + lib/pengine/rules.c | 68 ++++++++++++++++++++++++++++++++++++ + 2 files changed, 70 insertions(+) + +diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h +index 8a22108..f60263a 100644 +--- a/include/crm/pengine/rules_internal.h ++++ b/include/crm/pengine/rules_internal.h +@@ -24,7 +24,9 @@ crm_time_t *pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec); + gboolean pe__eval_attr_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); + int pe__eval_date_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data, + crm_time_t *next_change); ++gboolean pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data); + gboolean pe__eval_role_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); ++gboolean pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data); + + int pe_eval_date_expression(xmlNode *time_expr, + crm_time_t *now, +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index 3f316c2..a5af57a 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -1123,6 +1123,38 @@ pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t * + } + + gboolean ++pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) { ++ const char *name = crm_element_value(expr, XML_NVPAIR_ATTR_NAME); ++ const char *interval_s = crm_element_value(expr, XML_LRM_ATTR_INTERVAL); ++ guint interval; ++ ++ crm_trace("Testing op_defaults expression: %s", ID(expr)); ++ ++ if (rule_data->op_data == NULL) { ++ crm_trace("No operations data provided"); ++ return FALSE; ++ } ++ ++ interval = crm_parse_interval_spec(interval_s); ++ if (interval == 0 && errno != 0) { ++ crm_trace("Could not parse interval: %s", interval_s); ++ return FALSE; ++ } ++ ++ if (interval_s != NULL && interval != rule_data->op_data->interval) { ++ crm_trace("Interval doesn't match: %d != %d", interval, rule_data->op_data->interval); ++ return FALSE; ++ } ++ ++ if (!crm_str_eq(name, rule_data->op_data->op_name, TRUE)) { ++ crm_trace("Name doesn't match: %s != %s", name, rule_data->op_data->op_name); ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++gboolean + pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) + { + gboolean accept = FALSE; +@@ -1163,6 +1195,42 @@ pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) + return accept; + } + ++gboolean ++pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) ++{ ++ const char *class = crm_element_value(expr, XML_AGENT_ATTR_CLASS); ++ const char *provider = crm_element_value(expr, XML_AGENT_ATTR_PROVIDER); ++ const char *type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); ++ ++ crm_trace("Testing rsc_defaults expression: %s", ID(expr)); ++ ++ if (rule_data->rsc_data == NULL) { ++ crm_trace("No resource data provided"); ++ return FALSE; ++ } ++ ++ if (class != NULL && ++ !crm_str_eq(class, rule_data->rsc_data->standard, TRUE)) { ++ crm_trace("Class doesn't match: %s != %s", class, rule_data->rsc_data->standard); ++ return FALSE; ++ } ++ ++ if ((provider == NULL && rule_data->rsc_data->provider != NULL) || ++ (provider != NULL && rule_data->rsc_data->provider == NULL) || ++ !crm_str_eq(provider, rule_data->rsc_data->provider, TRUE)) { ++ crm_trace("Provider doesn't match: %s != %s", provider, rule_data->rsc_data->provider); ++ return FALSE; ++ } ++ ++ if (type != NULL && ++ !crm_str_eq(type, rule_data->rsc_data->agent, TRUE)) { ++ crm_trace("Agent doesn't match: %s != %s", type, rule_data->rsc_data->agent); ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ + // Deprecated functions kept only for backward API compatibility + gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now); + gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role, +-- +1.8.3.1 + + +From 5a4da3f77feee0d3bac50e9adc4eb4b35724dfb2 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 28 Apr 2020 14:41:08 -0400 +Subject: [PATCH 04/17] Refactor: scheduler: Reimplement core rule eval + functions. + +The core functions of pe_evaluate_rules, pe_test_rule, and +pe_test_expression have been turned into new, similarly named functions +that take a pe_rule_eval_data_t as an argument. The old ones still +exist as wrappers around the new ones. +--- + include/crm/pengine/rules.h | 7 ++ + lib/pengine/rules.c | 259 ++++++++++++++++++++++++++------------------ + 2 files changed, 162 insertions(+), 104 deletions(-) + +diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h +index d7bdbf9..a74c629 100644 +--- a/include/crm/pengine/rules.h ++++ b/include/crm/pengine/rules.h +@@ -61,6 +61,13 @@ GHashTable *pe_unpack_versioned_parameters(xmlNode *versioned_params, const char + + char *pe_expand_re_matches(const char *string, pe_re_match_data_t * match_data); + ++gboolean pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, ++ crm_time_t *next_change); ++gboolean pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, ++ crm_time_t *next_change); ++gboolean pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, ++ crm_time_t *next_change); ++ + #ifndef PCMK__NO_COMPAT + /* Everything here is deprecated and kept only for public API backward + * compatibility. It will be moved to compatibility.h when 2.1.0 is released. +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index a5af57a..a6353ef 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -38,25 +38,16 @@ gboolean + pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now, + crm_time_t *next_change) + { +- // If there are no rules, pass by default +- gboolean ruleset_default = TRUE; +- +- for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE); +- rule != NULL; rule = crm_next_same_xml(rule)) { ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + +- ruleset_default = FALSE; +- if (pe_test_rule(rule, node_hash, RSC_ROLE_UNKNOWN, now, next_change, +- NULL)) { +- /* Only the deprecated "lifetime" element of location constraints +- * may contain more than one rule at the top level -- the schema +- * limits a block of nvpairs to a single top-level rule. So, this +- * effectively means that a lifetime is active if any rule it +- * contains is active. +- */ +- return TRUE; +- } +- } +- return ruleset_default; ++ return pe_eval_rules(ruleset, &rule_data, next_change); + } + + gboolean +@@ -64,44 +55,16 @@ pe_test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role, + crm_time_t *now, crm_time_t *next_change, + pe_match_data_t *match_data) + { +- xmlNode *expr = NULL; +- gboolean test = TRUE; +- gboolean empty = TRUE; +- gboolean passed = TRUE; +- gboolean do_and = TRUE; +- const char *value = NULL; +- +- rule = expand_idref(rule, NULL); +- value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP); +- if (safe_str_eq(value, "or")) { +- do_and = FALSE; +- passed = FALSE; +- } +- +- crm_trace("Testing rule %s", ID(rule)); +- for (expr = __xml_first_child_element(rule); expr != NULL; +- expr = __xml_next_element(expr)) { +- +- test = pe_test_expression(expr, node_hash, role, now, next_change, +- match_data); +- empty = FALSE; +- +- if (test && do_and == FALSE) { +- crm_trace("Expression %s/%s passed", ID(rule), ID(expr)); +- return TRUE; +- +- } else if (test == FALSE && do_and) { +- crm_trace("Expression %s/%s failed", ID(rule), ID(expr)); +- return FALSE; +- } +- } +- +- if (empty) { +- crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule)); +- } ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = role, ++ .now = now, ++ .match_data = match_data, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + +- crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed"); +- return passed; ++ return pe_eval_expr(rule, &rule_data, next_change); + } + + /*! +@@ -125,56 +88,16 @@ pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role, + crm_time_t *now, crm_time_t *next_change, + pe_match_data_t *match_data) + { +- gboolean accept = FALSE; +- const char *uname = NULL; +- +- switch (find_expression_type(expr)) { +- case nested_rule: +- accept = pe_test_rule(expr, node_hash, role, now, next_change, +- match_data); +- break; +- case attr_expr: +- case loc_expr: +- /* these expressions can never succeed if there is +- * no node to compare with +- */ +- if (node_hash != NULL) { +- accept = pe_test_attr_expression(expr, node_hash, now, match_data); +- } +- break; +- +- case time_expr: +- accept = pe_test_date_expression(expr, now, next_change); +- break; +- +- case role_expr: +- accept = pe_test_role_expression(expr, role, now); +- break; +- +-#if ENABLE_VERSIONED_ATTRS +- case version_expr: +- if (node_hash && g_hash_table_lookup_extended(node_hash, +- CRM_ATTR_RA_VERSION, +- NULL, NULL)) { +- accept = pe_test_attr_expression(expr, node_hash, now, NULL); +- } else { +- // we are going to test it when we have ra-version +- accept = TRUE; +- } +- break; +-#endif +- +- default: +- CRM_CHECK(FALSE /* bad type */ , return FALSE); +- accept = FALSE; +- } +- if (node_hash) { +- uname = g_hash_table_lookup(node_hash, CRM_ATTR_UNAME); +- } ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = role, ++ .now = now, ++ .match_data = match_data, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + +- crm_trace("Expression %s %s on %s", +- ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes"); +- return accept; ++ return pe_eval_subexpr(expr, &rule_data, next_change); + } + + enum expression_type +@@ -885,6 +808,134 @@ pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version + #endif + + gboolean ++pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) ++{ ++ // If there are no rules, pass by default ++ gboolean ruleset_default = TRUE; ++ ++ for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE); ++ rule != NULL; rule = crm_next_same_xml(rule)) { ++ ++ ruleset_default = FALSE; ++ if (pe_eval_expr(rule, rule_data, next_change)) { ++ /* Only the deprecated "lifetime" element of location constraints ++ * may contain more than one rule at the top level -- the schema ++ * limits a block of nvpairs to a single top-level rule. So, this ++ * effectively means that a lifetime is active if any rule it ++ * contains is active. ++ */ ++ return TRUE; ++ } ++ } ++ ++ return ruleset_default; ++} ++ ++gboolean ++pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) ++{ ++ xmlNode *expr = NULL; ++ gboolean test = TRUE; ++ gboolean empty = TRUE; ++ gboolean passed = TRUE; ++ gboolean do_and = TRUE; ++ const char *value = NULL; ++ ++ rule = expand_idref(rule, NULL); ++ value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP); ++ if (safe_str_eq(value, "or")) { ++ do_and = FALSE; ++ passed = FALSE; ++ } ++ ++ crm_trace("Testing rule %s", ID(rule)); ++ for (expr = __xml_first_child_element(rule); expr != NULL; ++ expr = __xml_next_element(expr)) { ++ ++ test = pe_eval_subexpr(expr, rule_data, next_change); ++ empty = FALSE; ++ ++ if (test && do_and == FALSE) { ++ crm_trace("Expression %s/%s passed", ID(rule), ID(expr)); ++ return TRUE; ++ ++ } else if (test == FALSE && do_and) { ++ crm_trace("Expression %s/%s failed", ID(rule), ID(expr)); ++ return FALSE; ++ } ++ } ++ ++ if (empty) { ++ crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule)); ++ } ++ ++ crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed"); ++ return passed; ++} ++ ++gboolean ++pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) ++{ ++ gboolean accept = FALSE; ++ const char *uname = NULL; ++ ++ switch (find_expression_type(expr)) { ++ case nested_rule: ++ accept = pe_eval_expr(expr, rule_data, next_change); ++ break; ++ case attr_expr: ++ case loc_expr: ++ /* these expressions can never succeed if there is ++ * no node to compare with ++ */ ++ if (rule_data->node_hash != NULL) { ++ accept = pe__eval_attr_expr(expr, rule_data); ++ } ++ break; ++ ++ case time_expr: ++ accept = pe_test_date_expression(expr, rule_data->now, next_change); ++ break; ++ ++ case role_expr: ++ accept = pe__eval_role_expr(expr, rule_data); ++ break; ++ ++ case rsc_expr: ++ accept = pe__eval_rsc_expr(expr, rule_data); ++ break; ++ ++ case op_expr: ++ accept = pe__eval_op_expr(expr, rule_data); ++ break; ++ ++#if ENABLE_VERSIONED_ATTRS ++ case version_expr: ++ if (rule_data->node_hash && ++ g_hash_table_lookup_extended(rule_data->node_hash, ++ CRM_ATTR_RA_VERSION, NULL, NULL)) { ++ accept = pe__eval_attr_expr(expr, rule_data); ++ } else { ++ // we are going to test it when we have ra-version ++ accept = TRUE; ++ } ++ break; ++#endif ++ ++ default: ++ CRM_CHECK(FALSE /* bad type */ , return FALSE); ++ accept = FALSE; ++ } ++ if (rule_data->node_hash) { ++ uname = g_hash_table_lookup(rule_data->node_hash, CRM_ATTR_UNAME); ++ } ++ ++ crm_trace("Expression %s %s on %s", ++ ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes"); ++ return accept; ++} ++ ++gboolean + pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) + { + gboolean accept = FALSE; +-- +1.8.3.1 + + +From ea6318252164578fd27dcef657e80f5225337a4b Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 7 Apr 2020 15:57:06 -0400 +Subject: [PATCH 05/17] Refactor: scheduler: Add rule_data to unpack_data_s. + +This is just to get rid of a couple extra arguments to some internal +functions and make them look like the external functions. +--- + lib/pengine/rules.c | 65 ++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 44 insertions(+), 21 deletions(-) + +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index a6353ef..2709d68 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -555,10 +555,9 @@ add_versioned_attributes(xmlNode * attr_set, xmlNode * versioned_attrs) + + typedef struct unpack_data_s { + gboolean overwrite; +- GHashTable *node_hash; + void *hash; +- crm_time_t *now; + crm_time_t *next_change; ++ pe_rule_eval_data_t *rule_data; + xmlNode *top; + } unpack_data_t; + +@@ -568,14 +567,14 @@ unpack_attr_set(gpointer data, gpointer user_data) + sorted_set_t *pair = data; + unpack_data_t *unpack_data = user_data; + +- if (!pe_evaluate_rules(pair->attr_set, unpack_data->node_hash, +- unpack_data->now, unpack_data->next_change)) { ++ if (!pe_eval_rules(pair->attr_set, unpack_data->rule_data, ++ unpack_data->next_change)) { + return; + } + + #if ENABLE_VERSIONED_ATTRS +- if (get_versioned_rule(pair->attr_set) && !(unpack_data->node_hash && +- g_hash_table_lookup_extended(unpack_data->node_hash, ++ if (get_versioned_rule(pair->attr_set) && !(unpack_data->rule_data->node_hash && ++ g_hash_table_lookup_extended(unpack_data->rule_data->node_hash, + CRM_ATTR_RA_VERSION, NULL, NULL))) { + // we haven't actually tested versioned expressions yet + return; +@@ -593,8 +592,8 @@ unpack_versioned_attr_set(gpointer data, gpointer user_data) + sorted_set_t *pair = data; + unpack_data_t *unpack_data = user_data; + +- if (pe_evaluate_rules(pair->attr_set, unpack_data->node_hash, +- unpack_data->now, unpack_data->next_change)) { ++ if (pe_eval_rules(pair->attr_set, unpack_data->rule_data, ++ unpack_data->next_change)) { + add_versioned_attributes(pair->attr_set, unpack_data->hash); + } + } +@@ -658,19 +657,17 @@ make_pairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, + * \param[in] top XML document root (used to expand id-ref's) + * \param[in] xml_obj XML element containing blocks of nvpair elements + * \param[in] set_name If not NULL, only use blocks of this element type +- * \param[in] node_hash Node attributes to use when evaluating rules + * \param[out] hash Where to store extracted name/value pairs + * \param[in] always_first If not NULL, process block with this ID first + * \param[in] overwrite Whether to replace existing values with same name +- * \param[in] now Time to use when evaluating rules ++ * \param[in] rule_data Matching parameters to use when unpacking + * \param[out] next_change If not NULL, set to when rule evaluation will change + * \param[in] unpack_func Function to call to unpack each block + */ + static void + unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, +- GHashTable *node_hash, void *hash, +- const char *always_first, gboolean overwrite, +- crm_time_t *now, crm_time_t *next_change, ++ void *hash, const char *always_first, gboolean overwrite, ++ pe_rule_eval_data_t *rule_data, crm_time_t *next_change, + GFunc unpack_func) + { + GList *pairs = make_pairs(top, xml_obj, set_name, always_first); +@@ -678,11 +675,10 @@ unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, + if (pairs) { + unpack_data_t data = { + .hash = hash, +- .node_hash = node_hash, +- .now = now, + .overwrite = overwrite, + .next_change = next_change, + .top = top, ++ .rule_data = rule_data + }; + + g_list_foreach(pairs, unpack_func, &data); +@@ -709,8 +705,17 @@ pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, + const char *always_first, gboolean overwrite, + crm_time_t *now, crm_time_t *next_change) + { +- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, always_first, +- overwrite, now, next_change, unpack_attr_set); ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, ++ overwrite, &rule_data, next_change, unpack_attr_set); + } + + #if ENABLE_VERSIONED_ATTRS +@@ -720,8 +725,17 @@ pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj, + xmlNode *hash, crm_time_t *now, + crm_time_t *next_change) + { +- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, NULL, FALSE, +- now, next_change, unpack_versioned_attr_set); ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, ++ &rule_data, next_change, unpack_versioned_attr_set); + } + #endif + +@@ -1366,6 +1380,15 @@ unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name, + const char *always_first, gboolean overwrite, + crm_time_t *now) + { +- unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, always_first, +- overwrite, now, NULL, unpack_attr_set); ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = node_hash, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, ++ overwrite, &rule_data, NULL, unpack_attr_set); + } +-- +1.8.3.1 + + +From 54646db6f5e4f1bb141b35798bcad5c3cc025afe Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 8 Apr 2020 10:41:41 -0400 +Subject: [PATCH 06/17] Refactor: scheduler: Change args to + pe__unpack_dataset_nvpairs. + +It should now take a pe_rule_eval_data_t instead of various separate +arguments. This will allow passing further data that needs to be tested +against in the future (such as rsc_defaults and op_defaults). It's also +convenient to make versions of pe_unpack_nvpairs and +pe_unpack_versioned_attributes that take the same arguments. + +Then, adapt callers of pe__unpack_dataset_nvpairs to pass the new +argument. +--- + include/crm/pengine/internal.h | 2 +- + include/crm/pengine/rules.h | 9 +++++++ + lib/pengine/complex.c | 41 ++++++++++++++++++++++------- + lib/pengine/rules.c | 23 ++++++++++++++-- + lib/pengine/unpack.c | 33 ++++++++++++++++++++--- + lib/pengine/utils.c | 60 +++++++++++++++++++++++++++++++----------- + 6 files changed, 137 insertions(+), 31 deletions(-) + +diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h +index 189ba7b..3e59502 100644 +--- a/include/crm/pengine/internal.h ++++ b/include/crm/pengine/internal.h +@@ -460,7 +460,7 @@ void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set); + void pe__register_messages(pcmk__output_t *out); + + void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, +- GHashTable *node_hash, GHashTable *hash, ++ pe_rule_eval_data_t *rule_data, GHashTable *hash, + const char *always_first, gboolean overwrite, + pe_working_set_t *data_set); + +diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h +index a74c629..cbae8ed 100644 +--- a/include/crm/pengine/rules.h ++++ b/include/crm/pengine/rules.h +@@ -46,12 +46,21 @@ gboolean pe_test_expression(xmlNode *expr, GHashTable *node_hash, + crm_time_t *next_change, + pe_match_data_t *match_data); + ++void pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, ++ pe_rule_eval_data_t *rule_data, GHashTable *hash, ++ const char *always_first, gboolean overwrite, ++ crm_time_t *next_change); ++ + void pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, + GHashTable *node_hash, GHashTable *hash, + const char *always_first, gboolean overwrite, + crm_time_t *now, crm_time_t *next_change); + + #if ENABLE_VERSIONED_ATTRS ++void pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, ++ const char *set_name, pe_rule_eval_data_t *rule_data, ++ xmlNode *hash, crm_time_t *next_change); ++ + void pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj, + const char *set_name, GHashTable *node_hash, + xmlNode *hash, crm_time_t *now, +diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c +index 16f3a71..d91c95e 100644 +--- a/lib/pengine/complex.c ++++ b/lib/pengine/complex.c +@@ -95,10 +95,17 @@ void + get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + pe_node_t * node, pe_working_set_t * data_set) + { +- GHashTable *node_hash = NULL; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + + if (node) { +- node_hash = node->details->attrs; ++ rule_data.node_hash = node->details->attrs; + } + + if (rsc->xml) { +@@ -112,7 +119,7 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + } + } + +- pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, node_hash, ++ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data, + meta_hash, NULL, FALSE, data_set); + + /* set anything else based on the parent */ +@@ -122,20 +129,27 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + + /* and finally check the defaults */ + pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS, +- node_hash, meta_hash, NULL, FALSE, data_set); ++ &rule_data, meta_hash, NULL, FALSE, data_set); + } + + void + get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + pe_node_t * node, pe_working_set_t * data_set) + { +- GHashTable *node_hash = NULL; ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; + + if (node) { +- node_hash = node->details->attrs; ++ rule_data.node_hash = node->details->attrs; + } + +- pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, node_hash, ++ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data, + meta_hash, NULL, FALSE, data_set); + + /* set anything else based on the parent */ +@@ -145,7 +159,7 @@ get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + } else { + /* and finally check the defaults */ + pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS, +- node_hash, meta_hash, NULL, FALSE, data_set); ++ &rule_data, meta_hash, NULL, FALSE, data_set); + } + } + +@@ -376,6 +390,15 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, + bool remote_node = FALSE; + bool has_versioned_params = FALSE; + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + crm_log_xml_trace(xml_obj, "Processing resource input..."); + + if (id == NULL) { +@@ -706,7 +729,7 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, + + (*rsc)->utilization = crm_str_table_new(); + +- pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, NULL, ++ pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data, + (*rsc)->utilization, NULL, FALSE, data_set); + + /* data_set->resources = g_list_append(data_set->resources, (*rsc)); */ +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index 2709d68..7575011 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -686,6 +686,16 @@ unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, + } + } + ++void ++pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, ++ pe_rule_eval_data_t *rule_data, GHashTable *hash, ++ const char *always_first, gboolean overwrite, ++ crm_time_t *next_change) ++{ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, ++ overwrite, rule_data, next_change, unpack_attr_set); ++} ++ + /*! + * \brief Extract nvpair blocks contained by an XML element into a hash table + * +@@ -714,12 +724,21 @@ pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, + .op_data = NULL + }; + +- unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, +- overwrite, &rule_data, next_change, unpack_attr_set); ++ pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash, ++ always_first, overwrite, next_change); + } + + #if ENABLE_VERSIONED_ATTRS + void ++pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name, ++ pe_rule_eval_data_t *rule_data, xmlNode *hash, ++ crm_time_t *next_change) ++{ ++ unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, rule_data, ++ next_change, unpack_versioned_attr_set); ++} ++ ++void + pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj, + const char *set_name, GHashTable *node_hash, + xmlNode *hash, crm_time_t *now, +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index 532a3e6..8784857 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -188,9 +188,18 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) + const char *value = NULL; + GHashTable *config_hash = crm_str_table_new(); + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + data_set->config_hash = config_hash; + +- pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, NULL, config_hash, ++ pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash, + CIB_OPTIONS_FIRST, FALSE, data_set); + + verify_pe_options(data_set->config_hash); +@@ -515,6 +524,15 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) + const char *type = NULL; + const char *score = NULL; + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + for (xml_obj = __xml_first_child_element(xml_nodes); xml_obj != NULL; + xml_obj = __xml_next_element(xml_obj)) { + +@@ -547,7 +565,7 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) + handle_startup_fencing(data_set, new_node); + + add_node_attrs(xml_obj, new_node, FALSE, data_set); +- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, NULL, ++ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data, + new_node->details->utilization, NULL, + FALSE, data_set); + +@@ -3698,6 +3716,15 @@ add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, + { + const char *cluster_name = NULL; + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + g_hash_table_insert(node->details->attrs, + strdup(CRM_ATTR_UNAME), strdup(node->details->uname)); + +@@ -3719,7 +3746,7 @@ add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, + strdup(cluster_name)); + } + +- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, NULL, ++ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data, + node->details->attrs, NULL, overwrite, data_set); + + if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) { +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index c9b45e0..d01936d 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -597,10 +597,19 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + + if (is_set(action->flags, pe_action_have_node_attrs) == FALSE + && action->node != NULL && action->op_entry != NULL) { ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = action->node->details->attrs, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + pe_set_action_bit(action, pe_action_have_node_attrs); + pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, +- action->node->details->attrs, +- action->extra, NULL, FALSE, data_set); ++ &rule_data, action->extra, NULL, ++ FALSE, data_set); + } + + if (is_set(action->flags, pe_action_pseudo)) { +@@ -873,6 +882,15 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set + const char *timeout = NULL; + int timeout_ms = 0; + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); + child != NULL; child = crm_next_same_xml(child)) { + if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) { +@@ -884,7 +902,7 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set + if (timeout == NULL && data_set->op_defaults) { + GHashTable *action_meta = crm_str_table_new(); + pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, +- NULL, action_meta, NULL, FALSE, data_set); ++ &rule_data, action_meta, NULL, FALSE, data_set); + timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); + } + +@@ -964,10 +982,19 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + pe_rsc_action_details_t *rsc_details = NULL; + #endif + ++ pe_rule_eval_data_t rule_data = { ++ .node_hash = NULL, ++ .role = RSC_ROLE_UNKNOWN, ++ .now = data_set->now, ++ .match_data = NULL, ++ .rsc_data = NULL, ++ .op_data = NULL ++ }; ++ + CRM_CHECK(action && action->rsc, return); + + // Cluster-wide +- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL, ++ pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data, + action->meta, NULL, FALSE, data_set); + + // Probe timeouts default differently, so handle timeout default later +@@ -981,19 +1008,20 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + xmlAttrPtr xIter = NULL; + + // take precedence over defaults +- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, NULL, ++ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data, + action->meta, NULL, TRUE, data_set); + + #if ENABLE_VERSIONED_ATTRS + rsc_details = pe_rsc_action_details(action); +- pe_unpack_versioned_attributes(data_set->input, xml_obj, +- XML_TAG_ATTR_SETS, NULL, +- rsc_details->versioned_parameters, +- data_set->now, NULL); +- pe_unpack_versioned_attributes(data_set->input, xml_obj, +- XML_TAG_META_SETS, NULL, +- rsc_details->versioned_meta, +- data_set->now, NULL); ++ ++ pe_eval_versioned_attributes(data_set->input, xml_obj, ++ XML_TAG_ATTR_SETS, &rule_data, ++ rsc_details->versioned_parameters, ++ NULL); ++ pe_eval_versioned_attributes(data_set->input, xml_obj, ++ XML_TAG_META_SETS, &rule_data, ++ rsc_details->versioned_meta, ++ NULL); + #endif + + /* Anything set as an XML property has highest precedence. +@@ -2693,14 +2721,14 @@ pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set) + */ + void + pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, +- GHashTable *node_hash, GHashTable *hash, ++ pe_rule_eval_data_t *rule_data, GHashTable *hash, + const char *always_first, gboolean overwrite, + pe_working_set_t *data_set) + { + crm_time_t *next_change = crm_time_new_undefined(); + +- pe_unpack_nvpairs(data_set->input, xml_obj, set_name, node_hash, hash, +- always_first, overwrite, data_set->now, next_change); ++ pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash, ++ always_first, overwrite, next_change); + if (crm_time_is_defined(next_change)) { + time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); + +-- +1.8.3.1 + + +From ad06f60bae1fcb5d204fa18a0b21ade78aaee5f4 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 8 Apr 2020 13:43:26 -0400 +Subject: [PATCH 07/17] Refactor: scheduler: unpack_operation should be static. + +--- + lib/pengine/utils.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index d01936d..c345875 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -23,8 +23,8 @@ + extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); + void print_str_str(gpointer key, gpointer value, gpointer user_data); + gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); +-void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, +- pe_working_set_t * data_set); ++static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, ++ pe_working_set_t * data_set); + static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, + gboolean include_disabled); + +@@ -968,7 +968,7 @@ unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, + * \param[in] container Resource that contains affected resource, if any + * \param[in] data_set Cluster state + */ +-void ++static void + unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, + pe_working_set_t * data_set) + { +-- +1.8.3.1 + + +From 7e57d955c9209af62dffc0639c50d51121028c26 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 8 Apr 2020 14:58:35 -0400 +Subject: [PATCH 08/17] Refactor: scheduler: Pass interval to unpack_operation. + +--- + lib/pengine/utils.c | 36 ++++++++++++++---------------------- + 1 file changed, 14 insertions(+), 22 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index c345875..1e3b0bd 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -24,7 +24,7 @@ extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); + void print_str_str(gpointer key, gpointer value, gpointer user_data); + gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); + static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, +- pe_working_set_t * data_set); ++ pe_working_set_t * data_set, guint interval_ms); + static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, + gboolean include_disabled); + +@@ -568,9 +568,13 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + } + + if (rsc != NULL) { ++ guint interval_ms = 0; ++ + action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); ++ parse_op_key(key, NULL, NULL, &interval_ms); + +- unpack_operation(action, action->op_entry, rsc->container, data_set); ++ unpack_operation(action, action->op_entry, rsc->container, data_set, ++ interval_ms); + + if (save_action) { + rsc->actions = g_list_prepend(rsc->actions, action); +@@ -963,20 +967,20 @@ unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, + * and start delay values as integer milliseconds), requirements, and + * failure policy. + * +- * \param[in,out] action Action to unpack into +- * \param[in] xml_obj Operation XML (or NULL if all defaults) +- * \param[in] container Resource that contains affected resource, if any +- * \param[in] data_set Cluster state ++ * \param[in,out] action Action to unpack into ++ * \param[in] xml_obj Operation XML (or NULL if all defaults) ++ * \param[in] container Resource that contains affected resource, if any ++ * \param[in] data_set Cluster state ++ * \param[in] interval_ms How frequently to perform the operation + */ + static void + unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, +- pe_working_set_t * data_set) ++ pe_working_set_t * data_set, guint interval_ms) + { +- guint interval_ms = 0; + int timeout = 0; + char *value_ms = NULL; + const char *value = NULL; +- const char *field = NULL; ++ const char *field = XML_LRM_ATTR_INTERVAL; + char *default_timeout = NULL; + #if ENABLE_VERSIONED_ATTRS + pe_rsc_action_details_t *rsc_details = NULL; +@@ -1038,23 +1042,11 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + g_hash_table_remove(action->meta, "id"); + + // Normalize interval to milliseconds +- field = XML_LRM_ATTR_INTERVAL; +- value = g_hash_table_lookup(action->meta, field); +- if (value != NULL) { +- interval_ms = crm_parse_interval_spec(value); +- +- } else if ((xml_obj == NULL) && !strcmp(action->task, RSC_STATUS)) { +- /* An orphaned recurring monitor will not have any XML. However, we +- * want the interval to be set, so the action can be properly detected +- * as a recurring monitor. Parse it from the key in this case. +- */ +- parse_op_key(action->uuid, NULL, NULL, &interval_ms); +- } + if (interval_ms > 0) { + value_ms = crm_strdup_printf("%u", interval_ms); + g_hash_table_replace(action->meta, strdup(field), value_ms); + +- } else if (value) { ++ } else if (g_hash_table_lookup(action->meta, field) != NULL) { + g_hash_table_remove(action->meta, field); + } + +-- +1.8.3.1 + + +From e4c411d9674e222647dd3ed31714c369f54ccad1 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 9 Apr 2020 16:15:17 -0400 +Subject: [PATCH 09/17] Feature: scheduler: Pass rsc_defaults and op_defaults + data. + +See: rhbz#1628701. +--- + lib/pengine/complex.c | 8 +++++++- + lib/pengine/utils.c | 15 +++++++++++++-- + 2 files changed, 20 insertions(+), 3 deletions(-) + +diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c +index d91c95e..1f06348 100644 +--- a/lib/pengine/complex.c ++++ b/lib/pengine/complex.c +@@ -95,12 +95,18 @@ void + get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, + pe_node_t * node, pe_working_set_t * data_set) + { ++ pe_rsc_eval_data_t rsc_rule_data = { ++ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS), ++ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER), ++ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE) ++ }; ++ + pe_rule_eval_data_t rule_data = { + .node_hash = NULL, + .role = RSC_ROLE_UNKNOWN, + .now = data_set->now, + .match_data = NULL, +- .rsc_data = NULL, ++ .rsc_data = &rsc_rule_data, + .op_data = NULL + }; + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 1e3b0bd..d5309ed 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -986,13 +986,24 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + pe_rsc_action_details_t *rsc_details = NULL; + #endif + ++ pe_rsc_eval_data_t rsc_rule_data = { ++ .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS), ++ .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER), ++ .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE) ++ }; ++ ++ pe_op_eval_data_t op_rule_data = { ++ .op_name = action->task, ++ .interval = interval_ms ++ }; ++ + pe_rule_eval_data_t rule_data = { + .node_hash = NULL, + .role = RSC_ROLE_UNKNOWN, + .now = data_set->now, + .match_data = NULL, +- .rsc_data = NULL, +- .op_data = NULL ++ .rsc_data = &rsc_rule_data, ++ .op_data = &op_rule_data + }; + + CRM_CHECK(action && action->rsc, return); +-- +1.8.3.1 + + +From 57eedcad739071530f01e1fd691734f7681a08a1 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 17 Apr 2020 12:30:51 -0400 +Subject: [PATCH 10/17] Feature: xml: Add rsc_expression and op_expression to + the XML schema. + +--- + cts/cli/regression.upgrade.exp | 7 +- + cts/cli/regression.validity.exp | 22 ++- + xml/constraints-next.rng | 4 +- + xml/nodes-3.4.rng | 44 +++++ + xml/nvset-3.4.rng | 63 ++++++ + xml/options-3.4.rng | 111 +++++++++++ + xml/resources-3.4.rng | 425 ++++++++++++++++++++++++++++++++++++++++ + xml/rule-3.4.rng | 165 ++++++++++++++++ + 8 files changed, 833 insertions(+), 8 deletions(-) + create mode 100644 xml/nodes-3.4.rng + create mode 100644 xml/nvset-3.4.rng + create mode 100644 xml/options-3.4.rng + create mode 100644 xml/resources-3.4.rng + create mode 100644 xml/rule-3.4.rng + +diff --git a/cts/cli/regression.upgrade.exp b/cts/cli/regression.upgrade.exp +index 28ca057..50b22df 100644 +--- a/cts/cli/regression.upgrade.exp ++++ b/cts/cli/regression.upgrade.exp +@@ -79,8 +79,11 @@ update_validation debug: Configuration valid for schema: pacemaker-3.2 + update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 + update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) + update_validation debug: Configuration valid for schema: pacemaker-3.3 +-update_validation trace: Stopping at pacemaker-3.3 +-update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.3 ++update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 ++update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) ++update_validation debug: Configuration valid for schema: pacemaker-3.4 ++update_validation trace: Stopping at pacemaker-3.4 ++update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.4 + =#=#=#= Current cib after: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#= + + +diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp +index 46e54b5..4407074 100644 +--- a/cts/cli/regression.validity.exp ++++ b/cts/cli/regression.validity.exp +@@ -105,7 +105,11 @@ update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) + element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order + element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + update_validation trace: pacemaker-3.3 validation failed +-Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.3 ++update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) ++element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order ++element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ++update_validation trace: pacemaker-3.4 validation failed ++Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.4 + =#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Invalid configuration (78) =#=#=#= + * Passed: crm_simulate - Run crm_simulate with invalid CIB (enum violation) + =#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= +@@ -198,7 +202,10 @@ update_validation trace: pacemaker-3.2 validation failed + update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) + element cib: Relax-NG validity error : Invalid attribute validate-with for element cib + update_validation trace: pacemaker-3.3 validation failed +-Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.3 ++update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) ++element cib: Relax-NG validity error : Invalid attribute validate-with for element cib ++update_validation trace: pacemaker-3.4 validation failed ++Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.4 + =#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Invalid configuration (78) =#=#=#= + * Passed: crm_simulate - Run crm_simulate with invalid CIB (unrecognized validate-with) + =#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= +@@ -286,8 +293,11 @@ update_validation debug: Configuration valid for schema: pacemaker-3.2 + update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 + update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) + update_validation debug: Configuration valid for schema: pacemaker-3.3 +-update_validation trace: Stopping at pacemaker-3.3 +-update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.3 ++update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 ++update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) ++update_validation debug: Configuration valid for schema: pacemaker-3.4 ++update_validation trace: Stopping at pacemaker-3.4 ++update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.4 + unpack_resources error: Resource start-up disabled since no STONITH resources have been defined + unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option + unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +@@ -393,6 +403,8 @@ element rsc_order: Relax-NG validity error : Invalid attribute first-action for + element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order + element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ++element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order ++element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + =#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#= + + +@@ -450,6 +462,8 @@ validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attrib + validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order + validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order ++validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order ++validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order + unpack_resources error: Resource start-up disabled since no STONITH resources have been defined + unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option + unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +diff --git a/xml/constraints-next.rng b/xml/constraints-next.rng +index 7e0d98e..1fa3e75 100644 +--- a/xml/constraints-next.rng ++++ b/xml/constraints-next.rng +@@ -43,7 +43,7 @@ + + + +- ++ + + + +@@ -255,7 +255,7 @@ + + + +- ++ + + + +diff --git a/xml/nodes-3.4.rng b/xml/nodes-3.4.rng +new file mode 100644 +index 0000000..0132c72 +--- /dev/null ++++ b/xml/nodes-3.4.rng +@@ -0,0 +1,44 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ member ++ ping ++ remote ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/xml/nvset-3.4.rng b/xml/nvset-3.4.rng +new file mode 100644 +index 0000000..91a7d23 +--- /dev/null ++++ b/xml/nvset-3.4.rng +@@ -0,0 +1,63 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/xml/options-3.4.rng b/xml/options-3.4.rng +new file mode 100644 +index 0000000..22330d8 +--- /dev/null ++++ b/xml/options-3.4.rng +@@ -0,0 +1,111 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ cluster-infrastructure ++ ++ ++ ++ ++ ++ heartbeat ++ openais ++ classic openais ++ classic openais (with plugin) ++ cman ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ cluster-infrastructure ++ cluster_recheck_interval ++ dc_deadtime ++ default-action-timeout ++ default_action_timeout ++ default-migration-threshold ++ default_migration_threshold ++ default-resource-failure-stickiness ++ default_resource_failure_stickiness ++ default-resource-stickiness ++ default_resource_stickiness ++ election_timeout ++ expected-quorum-votes ++ is-managed-default ++ is_managed_default ++ no_quorum_policy ++ notification-agent ++ notification-recipient ++ remove_after_stop ++ shutdown_escalation ++ startup_fencing ++ stonith_action ++ stonith_enabled ++ stop_orphan_actions ++ stop_orphan_resources ++ symmetric_cluster ++ transition_idle_timeout ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/xml/resources-3.4.rng b/xml/resources-3.4.rng +new file mode 100644 +index 0000000..fbb4b65 +--- /dev/null ++++ b/xml/resources-3.4.rng +@@ -0,0 +1,425 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ isolation ++ isolation-host ++ isolation-instance ++ isolation-wrapper ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ([0-9\-]+) ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ requires ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ Stopped ++ Started ++ Slave ++ Master ++ ++ ++ ++ ++ ++ ++ ignore ++ block ++ stop ++ restart ++ standby ++ fence ++ restart-container ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ocf ++ ++ ++ ++ ++ lsb ++ heartbeat ++ stonith ++ upstart ++ service ++ systemd ++ nagios ++ ++ ++ ++ ++ +diff --git a/xml/rule-3.4.rng b/xml/rule-3.4.rng +new file mode 100644 +index 0000000..5d1daf0 +--- /dev/null ++++ b/xml/rule-3.4.rng +@@ -0,0 +1,165 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ or ++ and ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ lt ++ gt ++ lte ++ gte ++ eq ++ ne ++ defined ++ not_defined ++ ++ ++ ++ ++ ++ ++ ++ ++ string ++ number ++ version ++ ++ ++ ++ ++ ++ ++ literal ++ param ++ meta ++ ++ ++ ++ ++ ++ ++ ++ ++ in_range ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ gt ++ ++ ++ ++ lt ++ ++ ++ ++ ++ ++ date_spec ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From b0e2345d92fb7cf42c133b24457eeb07126db8a0 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 27 Apr 2020 16:24:22 -0400 +Subject: [PATCH 11/17] Fix: scheduler: Change trace output in populate_hash. + +Only show the "Setting attribute:" text when it comes time to actually +set the attribute. Also show the value being set. This makes it +clearer that an attribute is actually being set, not just that the +function is processing something. +--- + lib/pengine/rules.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c +index 7575011..b0fca55 100644 +--- a/lib/pengine/rules.c ++++ b/lib/pengine/rules.c +@@ -463,7 +463,6 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN + name = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_NAME); + } + +- crm_trace("Setting attribute: %s", name); + value = crm_element_value(an_attr, XML_NVPAIR_ATTR_VALUE); + if (value == NULL) { + value = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_VALUE); +@@ -471,7 +470,6 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN + + if (name == NULL || value == NULL) { + continue; +- + } + + old_value = g_hash_table_lookup(hash, name); +@@ -484,6 +482,7 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN + continue; + + } else if (old_value == NULL) { ++ crm_trace("Setting attribute: %s = %s", name, value); + g_hash_table_insert(hash, strdup(name), strdup(value)); + + } else if (overwrite) { +-- +1.8.3.1 + + +From d35854384b231c79b8aba1ce4c5caf5dd51ec982 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 1 May 2020 15:45:31 -0400 +Subject: [PATCH 12/17] Test: scheduler: Add a regression test for op_defaults. + +--- + cts/cts-scheduler.in | 3 + + cts/scheduler/op-defaults.dot | 33 ++++++ + cts/scheduler/op-defaults.exp | 211 ++++++++++++++++++++++++++++++++++++++ + cts/scheduler/op-defaults.scores | 11 ++ + cts/scheduler/op-defaults.summary | 46 +++++++++ + cts/scheduler/op-defaults.xml | 87 ++++++++++++++++ + 6 files changed, 391 insertions(+) + create mode 100644 cts/scheduler/op-defaults.dot + create mode 100644 cts/scheduler/op-defaults.exp + create mode 100644 cts/scheduler/op-defaults.scores + create mode 100644 cts/scheduler/op-defaults.summary + create mode 100644 cts/scheduler/op-defaults.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 5d72205..b83f812 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -962,6 +962,9 @@ TESTS = [ + [ "shutdown-lock", "Ensure shutdown lock works properly" ], + [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], + ], ++ [ ++ [ "op-defaults", "Test op_defaults conditional expressions " ], ++ ], + + # @TODO: If pacemaker implements versioned attributes, uncomment these tests + #[ +diff --git a/cts/scheduler/op-defaults.dot b/cts/scheduler/op-defaults.dot +new file mode 100644 +index 0000000..5536c15 +--- /dev/null ++++ b/cts/scheduler/op-defaults.dot +@@ -0,0 +1,33 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_60000 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_60000 cluster02" [ style = bold] ++"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_0 cluster01" -> "ip-rsc2_start_0 cluster01" [ style = bold] ++"ip-rsc2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_0 cluster02" -> "ip-rsc2_start_0 cluster01" [ style = bold] ++"ip-rsc2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_10000 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_start_0 cluster01" -> "ip-rsc2_monitor_10000 cluster01" [ style = bold] ++"ip-rsc2_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster01" -> "ip-rsc_start_0 cluster02" [ style = bold] ++"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster02" -> "ip-rsc_start_0 cluster02" [ style = bold] ++"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_20000 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_start_0 cluster02" -> "ip-rsc_monitor_20000 cluster02" [ style = bold] ++"ip-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/op-defaults.exp b/cts/scheduler/op-defaults.exp +new file mode 100644 +index 0000000..b81eacb +--- /dev/null ++++ b/cts/scheduler/op-defaults.exp +@@ -0,0 +1,211 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/op-defaults.scores b/cts/scheduler/op-defaults.scores +new file mode 100644 +index 0000000..1c622f0 +--- /dev/null ++++ b/cts/scheduler/op-defaults.scores +@@ -0,0 +1,11 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: ip-rsc2 allocation score on cluster01: 0 ++pcmk__native_allocate: ip-rsc2 allocation score on cluster02: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 +diff --git a/cts/scheduler/op-defaults.summary b/cts/scheduler/op-defaults.summary +new file mode 100644 +index 0000000..b580939 +--- /dev/null ++++ b/cts/scheduler/op-defaults.summary +@@ -0,0 +1,46 @@ ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ ip-rsc (ocf::heartbeat:IPaddr2): Stopped ++ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ * Start ip-rsc ( cluster02 ) ++ * Start ip-rsc2 ( cluster01 ) ++ * Start dummy-rsc ( cluster02 ) ++ * Start ping-rsc-ping ( cluster01 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: ip-rsc monitor on cluster02 ++ * Resource action: ip-rsc monitor on cluster01 ++ * Resource action: ip-rsc2 monitor on cluster02 ++ * Resource action: ip-rsc2 monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: ping-rsc-ping monitor on cluster02 ++ * Resource action: ping-rsc-ping monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ * Resource action: ip-rsc start on cluster02 ++ * Resource action: ip-rsc2 start on cluster01 ++ * Resource action: dummy-rsc start on cluster02 ++ * Resource action: ping-rsc-ping start on cluster01 ++ * Resource action: ip-rsc monitor=20000 on cluster02 ++ * Resource action: ip-rsc2 monitor=10000 on cluster01 ++ * Resource action: dummy-rsc monitor=60000 on cluster02 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ ip-rsc (ocf::heartbeat:IPaddr2): Started cluster02 ++ ip-rsc2 (ocf::heartbeat:IPaddr2): Started cluster01 ++ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 ++ ping-rsc-ping (ocf::pacemaker:ping): Started cluster01 ++ +diff --git a/cts/scheduler/op-defaults.xml b/cts/scheduler/op-defaults.xml +new file mode 100644 +index 0000000..ae3b248 +--- /dev/null ++++ b/cts/scheduler/op-defaults.xml +@@ -0,0 +1,87 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 67067927bc1b8e000c06d2b5a4ae6b9223ca13c7 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 13 May 2020 10:40:34 -0400 +Subject: [PATCH 13/17] Test: scheduler: Add a regression test for + rsc_defaults. + +--- + cts/cts-scheduler.in | 3 +- + cts/scheduler/rsc-defaults.dot | 18 ++++++ + cts/scheduler/rsc-defaults.exp | 124 +++++++++++++++++++++++++++++++++++++ + cts/scheduler/rsc-defaults.scores | 11 ++++ + cts/scheduler/rsc-defaults.summary | 38 ++++++++++++ + cts/scheduler/rsc-defaults.xml | 78 +++++++++++++++++++++++ + 6 files changed, 271 insertions(+), 1 deletion(-) + create mode 100644 cts/scheduler/rsc-defaults.dot + create mode 100644 cts/scheduler/rsc-defaults.exp + create mode 100644 cts/scheduler/rsc-defaults.scores + create mode 100644 cts/scheduler/rsc-defaults.summary + create mode 100644 cts/scheduler/rsc-defaults.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index b83f812..9022ce9 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -963,7 +963,8 @@ TESTS = [ + [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], + ], + [ +- [ "op-defaults", "Test op_defaults conditional expressions " ], ++ [ "op-defaults", "Test op_defaults conditional expressions" ], ++ [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], + ], + + # @TODO: If pacemaker implements versioned attributes, uncomment these tests +diff --git a/cts/scheduler/rsc-defaults.dot b/cts/scheduler/rsc-defaults.dot +new file mode 100644 +index 0000000..d776614 +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.dot +@@ -0,0 +1,18 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster02" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster02" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/rsc-defaults.exp b/cts/scheduler/rsc-defaults.exp +new file mode 100644 +index 0000000..4aec360 +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.exp +@@ -0,0 +1,124 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/rsc-defaults.scores b/cts/scheduler/rsc-defaults.scores +new file mode 100644 +index 0000000..e7f1bab +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.scores +@@ -0,0 +1,11 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster01: -INFINITY ++pcmk__native_allocate: ip-rsc allocation score on cluster02: -INFINITY ++pcmk__native_allocate: ip-rsc2 allocation score on cluster01: -INFINITY ++pcmk__native_allocate: ip-rsc2 allocation score on cluster02: -INFINITY ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 +diff --git a/cts/scheduler/rsc-defaults.summary b/cts/scheduler/rsc-defaults.summary +new file mode 100644 +index 0000000..0066f2e +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.summary +@@ -0,0 +1,38 @@ ++2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ ip-rsc (ocf::heartbeat:IPaddr2): Stopped (disabled) ++ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped (disabled) ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ * Start ping-rsc-ping ( cluster02 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: ip-rsc monitor on cluster02 ++ * Resource action: ip-rsc monitor on cluster01 ++ * Resource action: ip-rsc2 monitor on cluster02 ++ * Resource action: ip-rsc2 monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: ping-rsc-ping monitor on cluster02 ++ * Resource action: ping-rsc-ping monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ * Resource action: ping-rsc-ping start on cluster02 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ ip-rsc (ocf::heartbeat:IPaddr2): Stopped (disabled) ++ ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped (disabled) ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) ++ ping-rsc-ping (ocf::pacemaker:ping): Started cluster02 ++ +diff --git a/cts/scheduler/rsc-defaults.xml b/cts/scheduler/rsc-defaults.xml +new file mode 100644 +index 0000000..38cae8b +--- /dev/null ++++ b/cts/scheduler/rsc-defaults.xml +@@ -0,0 +1,78 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From bcfe068ccb3f3cb6cc3509257fbc4a59bc2b1a41 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 13 May 2020 12:47:35 -0400 +Subject: [PATCH 14/17] Test: scheduler: Add a regression test for op_defaults + with an AND expr. + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/op-defaults-2.dot | 33 ++++++ + cts/scheduler/op-defaults-2.exp | 211 ++++++++++++++++++++++++++++++++++++ + cts/scheduler/op-defaults-2.scores | 11 ++ + cts/scheduler/op-defaults-2.summary | 46 ++++++++ + cts/scheduler/op-defaults-2.xml | 73 +++++++++++++ + 6 files changed, 375 insertions(+) + create mode 100644 cts/scheduler/op-defaults-2.dot + create mode 100644 cts/scheduler/op-defaults-2.exp + create mode 100644 cts/scheduler/op-defaults-2.scores + create mode 100644 cts/scheduler/op-defaults-2.summary + create mode 100644 cts/scheduler/op-defaults-2.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 9022ce9..669b344 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -964,6 +964,7 @@ TESTS = [ + ], + [ + [ "op-defaults", "Test op_defaults conditional expressions" ], ++ [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], + [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], + ], + +diff --git a/cts/scheduler/op-defaults-2.dot b/cts/scheduler/op-defaults-2.dot +new file mode 100644 +index 0000000..5c67bd8 +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.dot +@@ -0,0 +1,33 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_10000 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_10000 cluster02" [ style = bold] ++"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster01" -> "ip-rsc_start_0 cluster02" [ style = bold] ++"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_0 cluster02" -> "ip-rsc_start_0 cluster02" [ style = bold] ++"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_monitor_20000 cluster02" [ style=bold color="green" fontcolor="black"] ++"ip-rsc_start_0 cluster02" -> "ip-rsc_monitor_20000 cluster02" [ style = bold] ++"ip-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] ++"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"rsc-passes_monitor_0 cluster01" -> "rsc-passes_start_0 cluster01" [ style = bold] ++"rsc-passes_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"rsc-passes_monitor_0 cluster02" -> "rsc-passes_start_0 cluster01" [ style = bold] ++"rsc-passes_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"rsc-passes_monitor_10000 cluster01" [ style=bold color="green" fontcolor="black"] ++"rsc-passes_start_0 cluster01" -> "rsc-passes_monitor_10000 cluster01" [ style = bold] ++"rsc-passes_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/op-defaults-2.exp b/cts/scheduler/op-defaults-2.exp +new file mode 100644 +index 0000000..4324fde +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.exp +@@ -0,0 +1,211 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/op-defaults-2.scores b/cts/scheduler/op-defaults-2.scores +new file mode 100644 +index 0000000..180c8b4 +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.scores +@@ -0,0 +1,11 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: ip-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 ++pcmk__native_allocate: rsc-passes allocation score on cluster01: 0 ++pcmk__native_allocate: rsc-passes allocation score on cluster02: 0 +diff --git a/cts/scheduler/op-defaults-2.summary b/cts/scheduler/op-defaults-2.summary +new file mode 100644 +index 0000000..16a68be +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.summary +@@ -0,0 +1,46 @@ ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ ip-rsc (ocf::heartbeat:IPaddr2): Stopped ++ rsc-passes (ocf::heartbeat:IPaddr2): Stopped ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ * Start ip-rsc ( cluster02 ) ++ * Start rsc-passes ( cluster01 ) ++ * Start dummy-rsc ( cluster02 ) ++ * Start ping-rsc-ping ( cluster01 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: ip-rsc monitor on cluster02 ++ * Resource action: ip-rsc monitor on cluster01 ++ * Resource action: rsc-passes monitor on cluster02 ++ * Resource action: rsc-passes monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: ping-rsc-ping monitor on cluster02 ++ * Resource action: ping-rsc-ping monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ * Resource action: ip-rsc start on cluster02 ++ * Resource action: rsc-passes start on cluster01 ++ * Resource action: dummy-rsc start on cluster02 ++ * Resource action: ping-rsc-ping start on cluster01 ++ * Resource action: ip-rsc monitor=20000 on cluster02 ++ * Resource action: rsc-passes monitor=10000 on cluster01 ++ * Resource action: dummy-rsc monitor=10000 on cluster02 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ ip-rsc (ocf::heartbeat:IPaddr2): Started cluster02 ++ rsc-passes (ocf::heartbeat:IPaddr2): Started cluster01 ++ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 ++ ping-rsc-ping (ocf::pacemaker:ping): Started cluster01 ++ +diff --git a/cts/scheduler/op-defaults-2.xml b/cts/scheduler/op-defaults-2.xml +new file mode 100644 +index 0000000..9f3c288 +--- /dev/null ++++ b/cts/scheduler/op-defaults-2.xml +@@ -0,0 +1,73 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 017b783c2037d641c40a39dd7ec3a9eba0aaa6df Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 13 May 2020 15:18:28 -0400 +Subject: [PATCH 15/17] Doc: Pacemaker Explained: Add documentation for + rsc_expr and op_expr. + +--- + doc/Pacemaker_Explained/en-US/Ch-Rules.txt | 174 +++++++++++++++++++++++++++++ + 1 file changed, 174 insertions(+) + +diff --git a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt +index 9d617f6..5df5f82 100644 +--- a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt ++++ b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt +@@ -522,6 +522,124 @@ You may wish to write +end="2005-03-31T23:59:59"+ to avoid confusion. + ------- + ===== + ++== Resource Expressions == ++ ++An +rsc_expression+ is a rule condition based on a resource agent's properties. ++This rule is only valid within an +rsc_defaults+ or +op_defaults+ context. None ++of the matching attributes of +class+, +provider+, and +type+ are required. If ++one is omitted, all values of that attribute will match. For instance, omitting +++type+ means every type will match. ++ ++.Attributes of an rsc_expression Element ++[width="95%",cols="2m,<5",options="header",align="center"] ++|========================================================= ++ ++|Field ++|Description ++ ++|id ++|A unique name for the expression (required) ++ indexterm:[XML attribute,id attribute,rsc_expression element] ++ indexterm:[XML element,rsc_expression element,id attribute] ++ ++|class ++|The standard name to be matched against resource agents ++ indexterm:[XML attribute,class attribute,rsc_expression element] ++ indexterm:[XML element,rsc_expression element,class attribute] ++ ++|provider ++|If given, the vendor to be matched against resource agents. This ++ only makes sense for agents using the OCF spec. ++ indexterm:[XML attribute,provider attribute,rsc_expression element] ++ indexterm:[XML element,rsc_expression element,provider attribute] ++ ++|type ++|The name of the resource agent to be matched ++ indexterm:[XML attribute,type attribute,rsc_expression element] ++ indexterm:[XML element,rsc_expression element,type attribute] ++ ++|========================================================= ++ ++=== Example Resource-Based Expressions === ++ ++A small sample of how resource-based expressions can be used: ++ ++.True for all ocf:heartbeat:IPaddr2 resources ++==== ++[source,XML] ++---- ++ ++ ++ ++---- ++==== ++ ++.Provider doesn't apply to non-OCF resources ++==== ++[source,XML] ++---- ++ ++ ++ ++---- ++==== ++ ++== Operation Expressions == ++ ++An +op_expression+ is a rule condition based on an action of some resource ++agent. This rule is only valid within an +op_defaults+ context. ++ ++.Attributes of an op_expression Element ++[width="95%",cols="2m,<5",options="header",align="center"] ++|========================================================= ++ ++|Field ++|Description ++ ++|id ++|A unique name for the expression (required) ++ indexterm:[XML attribute,id attribute,op_expression element] ++ indexterm:[XML element,op_expression element,id attribute] ++ ++|name ++|The action name to match against. This can be any action supported by ++ the resource agent; common values include +monitor+, +start+, and +stop+ ++ (required). ++ indexterm:[XML attribute,name attribute,op_expression element] ++ indexterm:[XML element,op_expression element,name attribute] ++ ++|interval ++|The interval of the action to match against. If not given, only ++ the name attribute will be used to match. ++ indexterm:[XML attribute,interval attribute,op_expression element] ++ indexterm:[XML element,op_expression element,interval attribute] ++ ++|========================================================= ++ ++=== Example Operation-Based Expressions === ++ ++A small sample of how operation-based expressions can be used: ++ ++.True for all monitor actions ++==== ++[source,XML] ++---- ++ ++ ++ ++---- ++==== ++ ++.True for all monitor actions with a 10 second interval ++==== ++[source,XML] ++---- ++ ++ ++ ++---- ++==== ++ + == Using Rules to Determine Resource Location == + indexterm:[Rule,Determine Resource Location] + indexterm:[Resource,Location,Determine by Rules] +@@ -710,6 +828,62 @@ Rules may be used similarly in +instance_attributes+ or +utilization+ blocks. + Any single block may directly contain only a single rule, but that rule may + itself contain any number of rules. + +++rsc_expression+ and +op_expression+ blocks may additionally be used to set defaults ++on either a single resource or across an entire class of resources with a single ++rule. +rsc_expression+ may be used to select resource agents within both +rsc_defaults+ ++and +op_defaults+, while +op_expression+ may only be used within +op_defaults+. If ++multiple rules succeed for a given resource agent, the last one specified will be ++the one that takes effect. As with any other rule, boolean operations may be used ++to make more complicated expressions. ++ ++.Set all IPaddr2 resources to stopped ++===== ++[source,XML] ++------- ++ ++ ++ ++ ++ ++ ++ ++ ++------- ++===== ++ ++.Set all monitor action timeouts to 7 seconds ++===== ++[source,XML] ++------- ++ ++ ++ ++ ++ ++ ++ ++ ++------- ++===== ++ ++.Set the monitor action timeout on all IPaddr2 resources with a given monitor interval to 8 seconds ++===== ++[source,XML] ++------- ++ ++ ++ ++ ++ ++ ++ ++ ++ ++------- ++===== ++ + === Using Rules to Control Cluster Options === + indexterm:[Rule,Controlling Cluster Options] + indexterm:[Cluster,Setting Options with Rules] +-- +1.8.3.1 + + +From b8dd16c5e454445f73416ae8b74649545ee1b472 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 13 May 2020 16:26:21 -0400 +Subject: [PATCH 16/17] Test: scheduler: Add a test for multiple rules applying + to the same resource. + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/op-defaults-3.dot | 14 +++++++ + cts/scheduler/op-defaults-3.exp | 83 +++++++++++++++++++++++++++++++++++++ + cts/scheduler/op-defaults-3.scores | 5 +++ + cts/scheduler/op-defaults-3.summary | 26 ++++++++++++ + cts/scheduler/op-defaults-3.xml | 54 ++++++++++++++++++++++++ + 6 files changed, 183 insertions(+) + create mode 100644 cts/scheduler/op-defaults-3.dot + create mode 100644 cts/scheduler/op-defaults-3.exp + create mode 100644 cts/scheduler/op-defaults-3.scores + create mode 100644 cts/scheduler/op-defaults-3.summary + create mode 100644 cts/scheduler/op-defaults-3.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 669b344..2c2d14f 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -965,6 +965,7 @@ TESTS = [ + [ + [ "op-defaults", "Test op_defaults conditional expressions" ], + [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], ++ [ "op-defaults-3", "Test op_defaults precedence" ], + [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], + ], + +diff --git a/cts/scheduler/op-defaults-3.dot b/cts/scheduler/op-defaults-3.dot +new file mode 100644 +index 0000000..382f630 +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.dot +@@ -0,0 +1,14 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_10000 cluster02" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_10000 cluster02" [ style = bold] ++"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/op-defaults-3.exp b/cts/scheduler/op-defaults-3.exp +new file mode 100644 +index 0000000..6d567dc +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.exp +@@ -0,0 +1,83 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/op-defaults-3.scores b/cts/scheduler/op-defaults-3.scores +new file mode 100644 +index 0000000..0a5190a +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.scores +@@ -0,0 +1,5 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 +diff --git a/cts/scheduler/op-defaults-3.summary b/cts/scheduler/op-defaults-3.summary +new file mode 100644 +index 0000000..a83eb15 +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.summary +@@ -0,0 +1,26 @@ ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ * Start dummy-rsc ( cluster02 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ * Resource action: dummy-rsc start on cluster02 ++ * Resource action: dummy-rsc monitor=10000 on cluster02 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 ++ +diff --git a/cts/scheduler/op-defaults-3.xml b/cts/scheduler/op-defaults-3.xml +new file mode 100644 +index 0000000..4a8912e +--- /dev/null ++++ b/cts/scheduler/op-defaults-3.xml +@@ -0,0 +1,54 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From b9ccde16609e7d005ac0578a603da97a1808704a Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 15 May 2020 13:48:47 -0400 +Subject: [PATCH 17/17] Test: scheduler: Add a test for rsc_defaults not + specifying type. + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/rsc-defaults-2.dot | 11 ++++++ + cts/scheduler/rsc-defaults-2.exp | 72 ++++++++++++++++++++++++++++++++++++ + cts/scheduler/rsc-defaults-2.scores | 7 ++++ + cts/scheduler/rsc-defaults-2.summary | 27 ++++++++++++++ + cts/scheduler/rsc-defaults-2.xml | 52 ++++++++++++++++++++++++++ + 6 files changed, 170 insertions(+) + create mode 100644 cts/scheduler/rsc-defaults-2.dot + create mode 100644 cts/scheduler/rsc-defaults-2.exp + create mode 100644 cts/scheduler/rsc-defaults-2.scores + create mode 100644 cts/scheduler/rsc-defaults-2.summary + create mode 100644 cts/scheduler/rsc-defaults-2.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 2c2d14f..346ada2 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -967,6 +967,7 @@ TESTS = [ + [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], + [ "op-defaults-3", "Test op_defaults precedence" ], + [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], ++ [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ], + ], + + # @TODO: If pacemaker implements versioned attributes, uncomment these tests +diff --git a/cts/scheduler/rsc-defaults-2.dot b/cts/scheduler/rsc-defaults-2.dot +new file mode 100644 +index 0000000..b43c5e6 +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.dot +@@ -0,0 +1,11 @@ ++ digraph "g" { ++"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] ++"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] ++"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/rsc-defaults-2.exp b/cts/scheduler/rsc-defaults-2.exp +new file mode 100644 +index 0000000..e9e1b5f +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.exp +@@ -0,0 +1,72 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/rsc-defaults-2.scores b/cts/scheduler/rsc-defaults-2.scores +new file mode 100644 +index 0000000..4b70f54 +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.scores +@@ -0,0 +1,7 @@ ++Allocation scores: ++pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 ++pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 ++pcmk__native_allocate: fencing allocation score on cluster01: 0 ++pcmk__native_allocate: fencing allocation score on cluster02: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 ++pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 +diff --git a/cts/scheduler/rsc-defaults-2.summary b/cts/scheduler/rsc-defaults-2.summary +new file mode 100644 +index 0000000..46a2a2d +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.summary +@@ -0,0 +1,27 @@ ++ ++Current cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Stopped ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped (unmanaged) ++ ++Transition Summary: ++ * Start fencing ( cluster01 ) ++ ++Executing cluster transition: ++ * Resource action: fencing monitor on cluster02 ++ * Resource action: fencing monitor on cluster01 ++ * Resource action: dummy-rsc monitor on cluster02 ++ * Resource action: dummy-rsc monitor on cluster01 ++ * Resource action: ping-rsc-ping monitor on cluster02 ++ * Resource action: ping-rsc-ping monitor on cluster01 ++ * Resource action: fencing start on cluster01 ++ ++Revised cluster status: ++Online: [ cluster01 cluster02 ] ++ ++ fencing (stonith:fence_xvm): Started cluster01 ++ dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) ++ ping-rsc-ping (ocf::pacemaker:ping): Stopped (unmanaged) ++ +diff --git a/cts/scheduler/rsc-defaults-2.xml b/cts/scheduler/rsc-defaults-2.xml +new file mode 100644 +index 0000000..a160fae +--- /dev/null ++++ b/cts/scheduler/rsc-defaults-2.xml +@@ -0,0 +1,52 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + diff --git a/SOURCES/001-status-deletion.patch b/SOURCES/001-status-deletion.patch deleted file mode 100644 index ca35c21..0000000 --- a/SOURCES/001-status-deletion.patch +++ /dev/null @@ -1,420 +0,0 @@ -From 6c529bb624ad548f66ce6ef1fa80b77c688918f4 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 22 Nov 2019 16:39:54 -0600 -Subject: [PATCH 1/4] Refactor: controller: rename struct recurring_op_s to - active_op_t - -... because it holds both recurring and pending non-recurring actions, -and the name was confusing ---- - daemons/controld/controld_execd.c | 18 +++++++++--------- - daemons/controld/controld_execd_state.c | 4 ++-- - daemons/controld/controld_lrm.h | 8 ++++---- - 3 files changed, 15 insertions(+), 15 deletions(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 9e8dd36..48f35dd 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -403,7 +403,7 @@ lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, - GHashTableIter gIter; - const char *key = NULL; - rsc_history_t *entry = NULL; -- struct recurring_op_s *pending = NULL; -+ active_op_t *pending = NULL; - - crm_debug("Checking for active resources before exit"); - -@@ -909,7 +909,7 @@ static gboolean - lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) - { - const char *rsc = user_data; -- struct recurring_op_s *pending = value; -+ active_op_t *pending = value; - - if (crm_str_eq(rsc, pending->rsc_id, TRUE)) { - crm_info("Removing op %s:%d for deleted resource %s", -@@ -1137,7 +1137,7 @@ cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, - { - int rc = pcmk_ok; - char *local_key = NULL; -- struct recurring_op_s *pending = NULL; -+ active_op_t *pending = NULL; - - CRM_CHECK(op != 0, return FALSE); - CRM_CHECK(rsc_id != NULL, return FALSE); -@@ -1203,7 +1203,7 @@ cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) - { - gboolean remove = FALSE; - struct cancel_data *data = user_data; -- struct recurring_op_s *op = (struct recurring_op_s *)value; -+ active_op_t *op = value; - - if (crm_str_eq(op->op_key, data->key, TRUE)) { - data->done = TRUE; -@@ -2107,7 +2107,7 @@ stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) - { - gboolean remove = FALSE; - struct stop_recurring_action_s *event = user_data; -- struct recurring_op_s *op = (struct recurring_op_s *)value; -+ active_op_t *op = value; - - if ((op->interval_ms != 0) - && crm_str_eq(op->rsc_id, event->rsc->id, TRUE)) { -@@ -2124,7 +2124,7 @@ stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) - { - gboolean remove = FALSE; - lrm_state_t *lrm_state = user_data; -- struct recurring_op_s *op = (struct recurring_op_s *)value; -+ active_op_t *op = value; - - if (op->interval_ms != 0) { - crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, -@@ -2297,9 +2297,9 @@ do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operat - * for them to complete during shutdown - */ - char *call_id_s = make_stop_id(rsc->id, call_id); -- struct recurring_op_s *pending = NULL; -+ active_op_t *pending = NULL; - -- pending = calloc(1, sizeof(struct recurring_op_s)); -+ pending = calloc(1, sizeof(active_op_t)); - crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); - - pending->call_id = call_id; -@@ -2517,7 +2517,7 @@ did_lrm_rsc_op_fail(lrm_state_t *lrm_state, const char * rsc_id, - - void - process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, -- struct recurring_op_s *pending, xmlNode *action_xml) -+ active_op_t *pending, xmlNode *action_xml) - { - char *op_id = NULL; - char *op_key = NULL; -diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c -index 0e21d18..473da97 100644 ---- a/daemons/controld/controld_execd_state.c -+++ b/daemons/controld/controld_execd_state.c -@@ -44,7 +44,7 @@ free_deletion_op(gpointer value) - static void - free_recurring_op(gpointer value) - { -- struct recurring_op_s *op = (struct recurring_op_s *)value; -+ active_op_t *op = value; - - free(op->user_data); - free(op->rsc_id); -@@ -61,7 +61,7 @@ fail_pending_op(gpointer key, gpointer value, gpointer user_data) - { - lrmd_event_data_t event = { 0, }; - lrm_state_t *lrm_state = user_data; -- struct recurring_op_s *op = (struct recurring_op_s *)value; -+ active_op_t *op = value; - - crm_trace("Pre-emptively failing " CRM_OP_FMT " on %s (call=%s, %s)", - op->rsc_id, op->op_type, op->interval_ms, -diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h -index 598682b..27df5d7 100644 ---- a/daemons/controld/controld_lrm.h -+++ b/daemons/controld/controld_lrm.h -@@ -33,8 +33,8 @@ typedef struct resource_history_s { - - void history_free(gpointer data); - --/* TODO - Replace this with lrmd_event_data_t */ --struct recurring_op_s { -+// In-flight action (recurring or pending) -+typedef struct active_op_s { - guint interval_ms; - int call_id; - gboolean remove; -@@ -45,7 +45,7 @@ struct recurring_op_s { - char *op_key; - char *user_data; - GHashTable *params; --}; -+} active_op_t; - - typedef struct lrm_state_s { - const char *node_name; -@@ -164,4 +164,4 @@ void remote_ra_process_maintenance_nodes(xmlNode *xml); - gboolean remote_ra_controlling_guest(lrm_state_t * lrm_state); - - void process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, -- struct recurring_op_s *pending, xmlNode *action_xml); -+ active_op_t *pending, xmlNode *action_xml); --- -1.8.3.1 - - -From 93a59f1df8fe11d365032d75f10cb4189ad2f1f8 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 22 Nov 2019 16:45:31 -0600 -Subject: [PATCH 2/4] Refactor: controller: convert active_op_t booleans to - bitmask - ---- - daemons/controld/controld_execd.c | 11 +++++------ - daemons/controld/controld_lrm.h | 8 ++++++-- - 2 files changed, 11 insertions(+), 8 deletions(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 48f35dd..2c9d9c0 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -1148,18 +1148,17 @@ cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, - pending = g_hash_table_lookup(lrm_state->pending_ops, key); - - if (pending) { -- if (remove && pending->remove == FALSE) { -- pending->remove = TRUE; -+ if (remove && is_not_set(pending->flags, active_op_remove)) { -+ set_bit(pending->flags, active_op_remove); - crm_debug("Scheduling %s for removal", key); - } - -- if (pending->cancelled) { -+ if (is_set(pending->flags, active_op_cancelled)) { - crm_debug("Operation %s already cancelled", key); - free(local_key); - return FALSE; - } -- -- pending->cancelled = TRUE; -+ set_bit(pending->flags, active_op_cancelled); - - } else { - crm_info("No pending op found for %s", key); -@@ -2652,7 +2651,7 @@ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, - crm_err("Recurring operation %s was cancelled without transition information", - op_key); - -- } else if (pending->remove) { -+ } else if (is_set(pending->flags, active_op_remove)) { - /* This recurring operation was cancelled (by us) and pending, and we - * have been waiting for it to finish. - */ -diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h -index 27df5d7..3ab7048 100644 ---- a/daemons/controld/controld_lrm.h -+++ b/daemons/controld/controld_lrm.h -@@ -33,12 +33,16 @@ typedef struct resource_history_s { - - void history_free(gpointer data); - -+enum active_op_e { -+ active_op_remove = (1 << 0), -+ active_op_cancelled = (1 << 1), -+}; -+ - // In-flight action (recurring or pending) - typedef struct active_op_s { - guint interval_ms; - int call_id; -- gboolean remove; -- gboolean cancelled; -+ uint32_t flags; // bitmask of active_op_e - time_t start_time; - char *rsc_id; - char *op_type; --- -1.8.3.1 - - -From 4d087d021d325e26b41a9b36b5b190dc7b25334c Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 22 Nov 2019 16:58:25 -0600 -Subject: [PATCH 3/4] Refactor: controller: remove unused argument - ---- - daemons/controld/controld_execd.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 2c9d9c0..46c1958 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -43,8 +43,8 @@ static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int ca - - static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, - const char *rsc_id, const char *operation); --static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, -- xmlNode * msg, xmlNode * request); -+static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, -+ const char *operation, xmlNode *msg); - - void send_direct_ack(const char *to_host, const char *to_sys, - lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id); -@@ -1858,7 +1858,7 @@ do_lrm_invoke(long long action, - crm_rsc_delete, user_name); - - } else { -- do_lrm_rsc_op(lrm_state, rsc, operation, input->xml, input->msg); -+ do_lrm_rsc_op(lrm_state, rsc, operation, input->xml); - } - - lrmd_free_rsc_info(rsc); -@@ -2170,8 +2170,8 @@ record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t - } - - static void --do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, -- xmlNode * request) -+do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, -+ const char *operation, xmlNode *msg) - { - int call_id = 0; - char *op_id = NULL; --- -1.8.3.1 - - -From 356b417274918b7da6cdd9c72c036c923160b318 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 6 Dec 2019 12:15:05 -0600 -Subject: [PATCH 4/4] Refactor: scheduler: combine two "if" statements - -... for readability, and ease of adding another block later ---- - lib/pacemaker/pcmk_sched_graph.c | 120 +++++++++++++++++++-------------------- - 1 file changed, 60 insertions(+), 60 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c -index e5a8a01..a6967fe 100644 ---- a/lib/pacemaker/pcmk_sched_graph.c -+++ b/lib/pacemaker/pcmk_sched_graph.c -@@ -1088,71 +1088,71 @@ action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set) - return action_xml; - } - -- /* List affected resource */ -- if (action->rsc) { -- if (is_set(action->flags, pe_action_pseudo) == FALSE) { -- int lpc = 0; -- -- xmlNode *rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml)); -- -- const char *attr_list[] = { -- XML_AGENT_ATTR_CLASS, -- XML_AGENT_ATTR_PROVIDER, -- XML_ATTR_TYPE -- }; -- -- if (is_set(action->rsc->flags, pe_rsc_orphan) && action->rsc->clone_name) { -- /* Do not use the 'instance free' name here as that -- * might interfere with the instance we plan to keep. -- * Ie. if there are more than two named /anonymous/ -- * instances on a given node, we need to make sure the -- * command goes to the right one. -- * -- * Keep this block, even when everyone is using -- * 'instance free' anonymous clone names - it means -- * we'll do the right thing if anyone toggles the -- * unique flag to 'off' -- */ -- crm_debug("Using orphan clone name %s instead of %s", action->rsc->id, -- action->rsc->clone_name); -- crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name); -- crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); -+ if (action->rsc && is_not_set(action->flags, pe_action_pseudo)) { -+ int lpc = 0; -+ xmlNode *rsc_xml = NULL; -+ const char *attr_list[] = { -+ XML_AGENT_ATTR_CLASS, -+ XML_AGENT_ATTR_PROVIDER, -+ XML_ATTR_TYPE -+ }; -+ -+ // List affected resource -+ -+ rsc_xml = create_xml_node(action_xml, -+ crm_element_name(action->rsc->xml)); -+ if (is_set(action->rsc->flags, pe_rsc_orphan) -+ && action->rsc->clone_name) { -+ /* Do not use the 'instance free' name here as that -+ * might interfere with the instance we plan to keep. -+ * Ie. if there are more than two named /anonymous/ -+ * instances on a given node, we need to make sure the -+ * command goes to the right one. -+ * -+ * Keep this block, even when everyone is using -+ * 'instance free' anonymous clone names - it means -+ * we'll do the right thing if anyone toggles the -+ * unique flag to 'off' -+ */ -+ crm_debug("Using orphan clone name %s instead of %s", action->rsc->id, -+ action->rsc->clone_name); -+ crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name); -+ crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); - -- } else if (is_not_set(action->rsc->flags, pe_rsc_unique)) { -- const char *xml_id = ID(action->rsc->xml); -- -- crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id, -- action->rsc->clone_name); -- -- /* ID is what we'd like client to use -- * ID_LONG is what they might know it as instead -- * -- * ID_LONG is only strictly needed /here/ during the -- * transition period until all nodes in the cluster -- * are running the new software /and/ have rebooted -- * once (meaning that they've only ever spoken to a DC -- * supporting this feature). -- * -- * If anyone toggles the unique flag to 'on', the -- * 'instance free' name will correspond to an orphan -- * and fall into the clause above instead -- */ -- crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id); -- if (action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) { -- crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name); -- } else { -- crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); -- } -+ } else if (is_not_set(action->rsc->flags, pe_rsc_unique)) { -+ const char *xml_id = ID(action->rsc->xml); -+ -+ crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id, -+ action->rsc->clone_name); - -+ /* ID is what we'd like client to use -+ * ID_LONG is what they might know it as instead -+ * -+ * ID_LONG is only strictly needed /here/ during the -+ * transition period until all nodes in the cluster -+ * are running the new software /and/ have rebooted -+ * once (meaning that they've only ever spoken to a DC -+ * supporting this feature). -+ * -+ * If anyone toggles the unique flag to 'on', the -+ * 'instance free' name will correspond to an orphan -+ * and fall into the clause above instead -+ */ -+ crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id); -+ if (action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) { -+ crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name); - } else { -- CRM_ASSERT(action->rsc->clone_name == NULL); -- crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id); -+ crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); - } - -- for (lpc = 0; lpc < DIMOF(attr_list); lpc++) { -- crm_xml_add(rsc_xml, attr_list[lpc], -- g_hash_table_lookup(action->rsc->meta, attr_list[lpc])); -- } -+ } else { -+ CRM_ASSERT(action->rsc->clone_name == NULL); -+ crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id); -+ } -+ -+ for (lpc = 0; lpc < DIMOF(attr_list); lpc++) { -+ crm_xml_add(rsc_xml, attr_list[lpc], -+ g_hash_table_lookup(action->rsc->meta, attr_list[lpc])); - } - } - --- -1.8.3.1 - diff --git a/SOURCES/002-demote.patch b/SOURCES/002-demote.patch new file mode 100644 index 0000000..5da2515 --- /dev/null +++ b/SOURCES/002-demote.patch @@ -0,0 +1,8664 @@ +From f1f71b3f3c342987db0058e7db0030417f3f83fa Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 08:22:00 -0500 +Subject: [PATCH 01/20] Refactor: scheduler: functionize comparing on-fail + values + +The action_fail_response enum values used for the "on-fail" operation +meta-attribute were initially intended to be in order of severity. +However as new values were added, they were added to the end (out of severity +order) to preserve API backward compatibility. + +This resulted in a convoluted comparison of values that will only get worse as +more values are added. + +This commit adds a comparison function to isolate that complexity. +--- + include/crm/pengine/common.h | 32 ++++++++++++------ + lib/pengine/unpack.c | 80 +++++++++++++++++++++++++++++++++++++++++--- + 2 files changed, 97 insertions(+), 15 deletions(-) + +diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h +index 3a770b7..2737b2e 100644 +--- a/include/crm/pengine/common.h ++++ b/include/crm/pengine/common.h +@@ -22,18 +22,29 @@ extern "C" { + extern gboolean was_processing_error; + extern gboolean was_processing_warning; + +-/* order is significant here +- * items listed in order of accending severeness +- * more severe actions take precedent over lower ones ++/* The order is (partially) significant here; the values from action_fail_ignore ++ * through action_fail_fence are in order of increasing severity. ++ * ++ * @COMPAT The values should be ordered and numbered per the "TODO" comments ++ * below, so all values are in order of severity and there is room for ++ * future additions, but that would break API compatibility. ++ * @TODO For now, we just use a function to compare the values specially, but ++ * at the next compatibility break, we should arrange things properly. + */ + enum action_fail_response { +- action_fail_ignore, +- action_fail_recover, +- action_fail_migrate, /* recover by moving it somewhere else */ +- action_fail_block, +- action_fail_stop, +- action_fail_standby, +- action_fail_fence, ++ action_fail_ignore, // @TODO = 10 ++ // @TODO action_fail_demote = 20, ++ action_fail_recover, // @TODO = 30 ++ // @TODO action_fail_reset_remote = 40, ++ // @TODO action_fail_restart_container = 50, ++ action_fail_migrate, // @TODO = 60 ++ action_fail_block, // @TODO = 70 ++ action_fail_stop, // @TODO = 80 ++ action_fail_standby, // @TODO = 90 ++ action_fail_fence, // @TODO = 100 ++ ++ // @COMPAT Values below here are out of order for API compatibility ++ + action_fail_restart_container, + + /* This is reserved for internal use for remote node connection resources. +@@ -44,6 +55,7 @@ enum action_fail_response { + */ + action_fail_reset_remote, + ++ action_fail_demote, + }; + + /* the "done" action must be the "pre" action +1 */ +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index 3c6606b..f688881 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -2770,6 +2770,78 @@ last_change_str(xmlNode *xml_op) + return ((when_s && *when_s)? when_s : "unknown time"); + } + ++/*! ++ * \internal ++ * \brief Compare two on-fail values ++ * ++ * \param[in] first One on-fail value to compare ++ * \param[in] second The other on-fail value to compare ++ * ++ * \return A negative number if second is more severe than first, zero if they ++ * are equal, or a positive number if first is more severe than second. ++ * \note This is only needed until the action_fail_response values can be ++ * renumbered at the next API compatibility break. ++ */ ++static int ++cmp_on_fail(enum action_fail_response first, enum action_fail_response second) ++{ ++ switch (first) { ++ case action_fail_reset_remote: ++ switch (second) { ++ case action_fail_ignore: ++ case action_fail_recover: ++ return 1; ++ case action_fail_reset_remote: ++ return 0; ++ default: ++ return -1; ++ } ++ break; ++ ++ case action_fail_restart_container: ++ switch (second) { ++ case action_fail_ignore: ++ case action_fail_recover: ++ case action_fail_reset_remote: ++ return 1; ++ case action_fail_restart_container: ++ return 0; ++ default: ++ return -1; ++ } ++ break; ++ ++ default: ++ break; ++ } ++ switch (second) { ++ case action_fail_reset_remote: ++ switch (first) { ++ case action_fail_ignore: ++ case action_fail_recover: ++ return -1; ++ default: ++ return 1; ++ } ++ break; ++ ++ case action_fail_restart_container: ++ switch (first) { ++ case action_fail_ignore: ++ case action_fail_recover: ++ case action_fail_reset_remote: ++ return -1; ++ default: ++ return 1; ++ } ++ break; ++ ++ default: ++ break; ++ } ++ return first - second; ++} ++ + static void + unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure, + enum action_fail_response * on_fail, pe_working_set_t * data_set) +@@ -2829,10 +2901,7 @@ unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * x + } + + action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); +- if ((action->on_fail <= action_fail_fence && *on_fail < action->on_fail) || +- (action->on_fail == action_fail_reset_remote && *on_fail <= action_fail_recover) || +- (action->on_fail == action_fail_restart_container && *on_fail <= action_fail_recover) || +- (*on_fail == action_fail_restart_container && action->on_fail >= action_fail_migrate)) { ++ if (cmp_on_fail(*on_fail, action->on_fail) < 0) { + pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail), + fail2text(action->on_fail), action->uuid, key); + *on_fail = action->on_fail; +@@ -3675,7 +3744,8 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, + + record_failed_op(xml_op, node, rsc, data_set); + +- if (failure_strategy == action_fail_restart_container && *on_fail <= action_fail_recover) { ++ if ((failure_strategy == action_fail_restart_container) ++ && cmp_on_fail(*on_fail, action_fail_recover) <= 0) { + *on_fail = failure_strategy; + } + +-- +1.8.3.1 + + +From ef246ff05d7459f9672b10ac1873e3191a3b46e9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 08:27:47 -0500 +Subject: [PATCH 02/20] Fix: scheduler: disallow on-fail=stop for stop + operations + +because it would loop infinitely as long as the stop continued to fail +--- + lib/pengine/utils.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 20a8db5..3fb7e62 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -716,16 +716,25 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + return action; + } + ++static bool ++valid_stop_on_fail(const char *value) ++{ ++ return safe_str_neq(value, "standby") ++ && safe_str_neq(value, "stop"); ++} ++ + static const char * + unpack_operation_on_fail(pe_action_t * action) + { + + const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); + +- if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { ++ if (safe_str_eq(action->task, CRMD_ACTION_STOP) ++ && !valid_stop_on_fail(value)) { ++ + pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop " +- "action to default value because 'standby' is not " +- "allowed for stop", action->rsc->id); ++ "action to default value because '%s' is not " ++ "allowed for stop", action->rsc->id, value); + return NULL; + } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { + /* demote on_fail defaults to master monitor value if present */ +-- +1.8.3.1 + + +From 8dceba792ffe65cd77c3aae430067638dbba63f9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 08:50:33 -0500 +Subject: [PATCH 03/20] Refactor: scheduler: use more appropriate types in a + couple places + +--- + lib/pengine/unpack.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index f688881..6a350e5 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -2244,7 +2244,7 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d + xmlNode *rsc_op = NULL; + xmlNode *last_failure = NULL; + +- enum action_fail_response on_fail = FALSE; ++ enum action_fail_response on_fail = action_fail_ignore; + enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; + + crm_trace("[%s] Processing %s on %s", +@@ -2287,7 +2287,6 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d + + /* process operations */ + saved_role = rsc->role; +- on_fail = action_fail_ignore; + rsc->role = RSC_ROLE_UNKNOWN; + sorted_op_list = g_list_sort(op_list, sort_op_by_callid); + +@@ -3376,7 +3375,7 @@ int pe__target_rc_from_xml(xmlNode *xml_op) + static enum action_fail_response + get_action_on_fail(pe_resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) + { +- int result = action_fail_recover; ++ enum action_fail_response result = action_fail_recover; + pe_action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); + + result = action->on_fail; +-- +1.8.3.1 + + +From a4d6a20a990d1461184f888e21aa61cddff8996d Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 12:05:57 -0500 +Subject: [PATCH 04/20] Low: libpacemaker: don't force stop when skipping + reload of failed resource + +Normal failure recovery will apply, which will stop if needed. + +(The stop was forced as of 2558d76f.) +--- + lib/pacemaker/pcmk_sched_native.c | 16 +++++++++++++--- + 1 file changed, 13 insertions(+), 3 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index bd8a0b5..ff2fb92 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -3362,9 +3362,19 @@ ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set) + pe_rsc_trace(rsc, "%s: unmanaged", rsc->id); + return; + +- } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) { +- pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); +- stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */ ++ } else if (is_set(rsc->flags, pe_rsc_failed)) { ++ /* We don't need to specify any particular actions here, normal failure ++ * recovery will apply. ++ */ ++ pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id); ++ return; ++ ++ } else if (is_set(rsc->flags, pe_rsc_start_pending)) { ++ /* If a resource's configuration changed while a start was pending, ++ * force a full restart. ++ */ ++ pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id); ++ stop_action(rsc, node, FALSE); + return; + + } else if (node == NULL) { +-- +1.8.3.1 + + +From f2d244bc4306297d5960c0ba54e0a85a68e864ee Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 12:16:33 -0500 +Subject: [PATCH 05/20] Test: scheduler: test forcing a restart instead of + reload when start is pending + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/params-3.dot | 28 ++++++ + cts/scheduler/params-3.exp | 208 +++++++++++++++++++++++++++++++++++++++++ + cts/scheduler/params-3.scores | 21 +++++ + cts/scheduler/params-3.summary | 45 +++++++++ + cts/scheduler/params-3.xml | 154 ++++++++++++++++++++++++++++++ + 6 files changed, 457 insertions(+) + create mode 100644 cts/scheduler/params-3.dot + create mode 100644 cts/scheduler/params-3.exp + create mode 100644 cts/scheduler/params-3.scores + create mode 100644 cts/scheduler/params-3.summary + create mode 100644 cts/scheduler/params-3.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 346ada2..ae8247e 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -84,6 +84,7 @@ TESTS = [ + [ "params-0", "Params: No change" ], + [ "params-1", "Params: Changed" ], + [ "params-2", "Params: Resource definition" ], ++ [ "params-3", "Params: Restart instead of reload if start pending" ], + [ "params-4", "Params: Reload" ], + [ "params-5", "Params: Restart based on probe digest" ], + [ "novell-251689", "Resource definition change + target_role=stopped" ], +diff --git a/cts/scheduler/params-3.dot b/cts/scheduler/params-3.dot +new file mode 100644 +index 0000000..d681ee5 +--- /dev/null ++++ b/cts/scheduler/params-3.dot +@@ -0,0 +1,28 @@ ++ digraph "g" { ++"Cancel rsc_c001n02_monitor_5000 c001n02" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_monitor_0 c001n01" -> "DcIPaddr_start_0 c001n02" [ style = bold] ++"DcIPaddr_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_monitor_0 c001n03" -> "DcIPaddr_start_0 c001n02" [ style = bold] ++"DcIPaddr_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_monitor_0 c001n08" -> "DcIPaddr_start_0 c001n02" [ style = bold] ++"DcIPaddr_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_monitor_5000 c001n02" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_start_0 c001n02" -> "DcIPaddr_monitor_5000 c001n02" [ style = bold] ++"DcIPaddr_start_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"DcIPaddr_stop_0 c001n02" -> "DcIPaddr_start_0 c001n02" [ style = bold] ++"DcIPaddr_stop_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n01_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n01_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n01_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n02_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n02_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n02_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n02_monitor_6000 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n03_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n03_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n03_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n08_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n08_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n08_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] ++"rsc_c001n08_monitor_5000 c001n08" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/params-3.exp b/cts/scheduler/params-3.exp +new file mode 100644 +index 0000000..5cccdec +--- /dev/null ++++ b/cts/scheduler/params-3.exp +@@ -0,0 +1,208 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/params-3.scores b/cts/scheduler/params-3.scores +new file mode 100644 +index 0000000..00417ea +--- /dev/null ++++ b/cts/scheduler/params-3.scores +@@ -0,0 +1,21 @@ ++Allocation scores: ++pcmk__native_allocate: DcIPaddr allocation score on c001n01: -INFINITY ++pcmk__native_allocate: DcIPaddr allocation score on c001n02: 0 ++pcmk__native_allocate: DcIPaddr allocation score on c001n03: -INFINITY ++pcmk__native_allocate: DcIPaddr allocation score on c001n08: -INFINITY ++pcmk__native_allocate: rsc_c001n01 allocation score on c001n01: 100 ++pcmk__native_allocate: rsc_c001n01 allocation score on c001n02: 0 ++pcmk__native_allocate: rsc_c001n01 allocation score on c001n03: 0 ++pcmk__native_allocate: rsc_c001n01 allocation score on c001n08: 0 ++pcmk__native_allocate: rsc_c001n02 allocation score on c001n01: 0 ++pcmk__native_allocate: rsc_c001n02 allocation score on c001n02: 100 ++pcmk__native_allocate: rsc_c001n02 allocation score on c001n03: 0 ++pcmk__native_allocate: rsc_c001n02 allocation score on c001n08: 0 ++pcmk__native_allocate: rsc_c001n03 allocation score on c001n01: 0 ++pcmk__native_allocate: rsc_c001n03 allocation score on c001n02: 0 ++pcmk__native_allocate: rsc_c001n03 allocation score on c001n03: 100 ++pcmk__native_allocate: rsc_c001n03 allocation score on c001n08: 0 ++pcmk__native_allocate: rsc_c001n08 allocation score on c001n01: 0 ++pcmk__native_allocate: rsc_c001n08 allocation score on c001n02: 0 ++pcmk__native_allocate: rsc_c001n08 allocation score on c001n03: 0 ++pcmk__native_allocate: rsc_c001n08 allocation score on c001n08: 100 +diff --git a/cts/scheduler/params-3.summary b/cts/scheduler/params-3.summary +new file mode 100644 +index 0000000..257f8ba +--- /dev/null ++++ b/cts/scheduler/params-3.summary +@@ -0,0 +1,45 @@ ++ ++Current cluster status: ++Online: [ c001n01 c001n02 c001n03 c001n08 ] ++ ++ DcIPaddr (ocf::heartbeat:IPaddr): Starting c001n02 ++ rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 ++ rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 ++ rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 ++ rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 ++ ++Transition Summary: ++ * Restart DcIPaddr ( c001n02 ) ++ ++Executing cluster transition: ++ * Resource action: DcIPaddr monitor on c001n08 ++ * Resource action: DcIPaddr monitor on c001n03 ++ * Resource action: DcIPaddr monitor on c001n01 ++ * Resource action: DcIPaddr stop on c001n02 ++ * Resource action: rsc_c001n08 monitor on c001n03 ++ * Resource action: rsc_c001n08 monitor on c001n02 ++ * Resource action: rsc_c001n08 monitor on c001n01 ++ * Resource action: rsc_c001n08 monitor=5000 on c001n08 ++ * Resource action: rsc_c001n02 monitor=6000 on c001n02 ++ * Resource action: rsc_c001n02 monitor on c001n08 ++ * Resource action: rsc_c001n02 monitor on c001n03 ++ * Resource action: rsc_c001n02 monitor on c001n01 ++ * Resource action: rsc_c001n02 cancel=5000 on c001n02 ++ * Resource action: rsc_c001n03 monitor on c001n08 ++ * Resource action: rsc_c001n03 monitor on c001n02 ++ * Resource action: rsc_c001n03 monitor on c001n01 ++ * Resource action: rsc_c001n01 monitor on c001n08 ++ * Resource action: rsc_c001n01 monitor on c001n03 ++ * Resource action: rsc_c001n01 monitor on c001n02 ++ * Resource action: DcIPaddr start on c001n02 ++ * Resource action: DcIPaddr monitor=5000 on c001n02 ++ ++Revised cluster status: ++Online: [ c001n01 c001n02 c001n03 c001n08 ] ++ ++ DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 ++ rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 ++ rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 ++ rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 ++ rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 ++ +diff --git a/cts/scheduler/params-3.xml b/cts/scheduler/params-3.xml +new file mode 100644 +index 0000000..ee6e157 +--- /dev/null ++++ b/cts/scheduler/params-3.xml +@@ -0,0 +1,154 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From ff6aebecf8b40b882bddbd0d78e3f8702f97147e Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 13 Apr 2020 12:22:35 -0500 +Subject: [PATCH 06/20] Doc: libpacemaker: improve comments when logging + actions + +... with slight refactoring for consistency +--- + lib/pacemaker/pcmk_sched_native.c | 41 ++++++++++++++++++++++----------------- + 1 file changed, 23 insertions(+), 18 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index ff2fb92..f14e690 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -2348,8 +2348,6 @@ native_expand(pe_resource_t * rsc, pe_working_set_t * data_set) + } \ + } while(0) + +-static int rsc_width = 5; +-static int detail_width = 5; + static void + LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal) + { +@@ -2360,6 +2358,9 @@ LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t * + bool same_role = FALSE; + bool need_role = FALSE; + ++ static int rsc_width = 5; ++ static int detail_width = 5; ++ + CRM_ASSERT(action); + CRM_ASSERT(destination != NULL || origin != NULL); + +@@ -2384,36 +2385,40 @@ LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t * + same_role = TRUE; + } + +- if(need_role && origin == NULL) { +- /* Promoting from Stopped */ ++ if (need_role && (origin == NULL)) { ++ /* Starting and promoting a promotable clone instance */ + details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname); + +- } else if(need_role && destination == NULL) { +- /* Demoting a Master or Stopping a Slave */ ++ } else if (origin == NULL) { ++ /* Starting a resource */ ++ details = crm_strdup_printf("%s", destination->details->uname); ++ ++ } else if (need_role && (destination == NULL)) { ++ /* Stopping a promotable clone instance */ + details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); + +- } else if(origin == NULL || destination == NULL) { +- /* Starting or stopping a resource */ +- details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname); ++ } else if (destination == NULL) { ++ /* Stopping a resource */ ++ details = crm_strdup_printf("%s", origin->details->uname); + +- } else if(need_role && same_role && same_host) { +- /* Recovering or restarting a promotable clone resource */ ++ } else if (need_role && same_role && same_host) { ++ /* Recovering, restarting or re-promoting a promotable clone instance */ + details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); + +- } else if(same_role && same_host) { ++ } else if (same_role && same_host) { + /* Recovering or Restarting a normal resource */ + details = crm_strdup_printf("%s", origin->details->uname); + +- } else if(same_role && need_role) { +- /* Moving a promotable clone resource */ ++ } else if (need_role && same_role) { ++ /* Moving a promotable clone instance */ + details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role)); + +- } else if(same_role) { ++ } else if (same_role) { + /* Moving a normal resource */ + details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname); + +- } else if(same_host) { +- /* Promoting or demoting a promotable clone resource */ ++ } else if (same_host) { ++ /* Promoting or demoting a promotable clone instance */ + details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname); + + } else { +@@ -2560,7 +2565,7 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) + pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), + next->details->uname); + +- } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) { ++ } else if (is_not_set(start->flags, pe_action_runnable)) { + LogAction("Stop", rsc, current, NULL, stop, + (stop && stop->reason)? stop : start, terminal); + STOP_SANITY_ASSERT(__LINE__); +-- +1.8.3.1 + + +From 98c3b649fa065b7e7a59029cc2f887bc462d170a Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 13 Apr 2020 12:23:22 -0500 +Subject: [PATCH 07/20] Log: libpacemaker: check for re-promotes specifically + +If a promotable clone instance is being demoted and promoted on its current +node, without also stopping and starting, it previously would be logged as +"Leave" indicating unchanged, because the current and next role are the same. + +Now, check for this situation specifically, and log it as "Re-promote". + +Currently, the scheduler is not capable of generating this situation, but +upcoming changes will. +--- + lib/pacemaker/pcmk_sched_native.c | 12 ++++++++++-- + 1 file changed, 10 insertions(+), 2 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index f14e690..89952bf 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -2561,9 +2561,17 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) + } else if (is_set(rsc->flags, pe_rsc_reload)) { + LogAction("Reload", rsc, current, next, start, NULL, terminal); + ++ + } else if (start == NULL || is_set(start->flags, pe_action_optional)) { +- pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), +- next->details->uname); ++ if ((demote != NULL) && (promote != NULL) ++ && is_not_set(demote->flags, pe_action_optional) ++ && is_not_set(promote->flags, pe_action_optional)) { ++ LogAction("Re-promote", rsc, current, next, promote, demote, ++ terminal); ++ } else { ++ pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, ++ role2text(rsc->role), next->details->uname); ++ } + + } else if (is_not_set(start->flags, pe_action_runnable)) { + LogAction("Stop", rsc, current, NULL, stop, +-- +1.8.3.1 + + +From fd55a6660574c0bca517fd519377340712fb443a Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 13 Apr 2020 12:51:03 -0500 +Subject: [PATCH 08/20] Doc: libpacemaker: improve comments for resource state + and action matrices + +Also, make them static, for linker efficiency. +--- + lib/pacemaker/pcmk_sched_native.c | 39 ++++++++++++++++++++++++--------------- + 1 file changed, 24 insertions(+), 15 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index 89952bf..b9bca80 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -41,27 +41,36 @@ gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, + gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); + gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); + +-/* *INDENT-OFF* */ +-enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { +-/* Current State */ +-/* Next State: Unknown Stopped Started Slave Master */ ++/* This array says what the *next* role should be when transitioning from one ++ * role to another. For example going from Stopped to Master, the next role is ++ * RSC_ROLE_SLAVE, because the resource must be started before being promoted. ++ * The current state then becomes Started, which is fed into this array again, ++ * giving a next role of RSC_ROLE_MASTER. ++ */ ++static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { ++ /* Current state Next state*/ ++ /* Unknown Stopped Started Slave Master */ + /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, + /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, + /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, +- /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, +- /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, ++ /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, ++ /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, + }; + +-gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(pe_resource_t*,pe_node_t*,gboolean,pe_working_set_t*) = { +-/* Current State */ +-/* Next State: Unknown Stopped Started Slave Master */ +- /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, +- /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, +- /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, +- /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, +- /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, }, ++typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, ++ gboolean optional, ++ pe_working_set_t *data_set); ++ ++// This array picks the function needed to transition from one role to another ++static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { ++ /* Current state Next state */ ++ /* Unknown Stopped Started Slave Master */ ++ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, ++ /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, ++ /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, ++ /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, ++ /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp , }, + }; +-/* *INDENT-ON* */ + + static gboolean + native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set) +-- +1.8.3.1 + + +From 2f1e2df1f5ec67591cddf14f9dda1c52919dd53a Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 26 May 2020 17:50:48 -0500 +Subject: [PATCH 09/20] Feature: xml: add on-fail="demote" option to resources + schema + +We don't need an XML schema version bump because it was already bumped since +the last release, for the rsc_expression/op_expression feature. +--- + xml/resources-3.4.rng | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/xml/resources-3.4.rng b/xml/resources-3.4.rng +index fbb4b65..887dc1c 100644 +--- a/xml/resources-3.4.rng ++++ b/xml/resources-3.4.rng +@@ -388,6 +388,7 @@ + + ignore + block ++ demote + stop + restart + standby +-- +1.8.3.1 + + +From 874f75e0faad91c634860221d727e51e95d97f19 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 08:29:37 -0500 +Subject: [PATCH 10/20] Feature: scheduler: new on-fail="demote" recovery + policy for promoted resources + +--- + include/crm/pengine/pe_types.h | 1 + + lib/pacemaker/pcmk_sched_native.c | 25 +++++++++++++++---- + lib/pengine/common.c | 3 +++ + lib/pengine/unpack.c | 51 ++++++++++++++++++++++++++++++++++++--- + lib/pengine/utils.c | 35 +++++++++++++++++++++++---- + 5 files changed, 102 insertions(+), 13 deletions(-) + +diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h +index ba88491..ed5eb12 100644 +--- a/include/crm/pengine/pe_types.h ++++ b/include/crm/pengine/pe_types.h +@@ -246,6 +246,7 @@ struct pe_node_s { + # define pe_rsc_allocating 0x00000200ULL + # define pe_rsc_merging 0x00000400ULL + ++# define pe_rsc_stop 0x00001000ULL + # define pe_rsc_reload 0x00002000ULL + # define pe_rsc_allow_remote_remotes 0x00004000ULL + +diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c +index b9bca80..4e3bd7c 100644 +--- a/lib/pacemaker/pcmk_sched_native.c ++++ b/lib/pacemaker/pcmk_sched_native.c +@@ -1205,6 +1205,7 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) + pe_node_t *chosen = NULL; + pe_node_t *current = NULL; + gboolean need_stop = FALSE; ++ bool need_promote = FALSE; + gboolean is_moving = FALSE; + gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE; + +@@ -1309,8 +1310,15 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) + need_stop = TRUE; + + } else if (is_set(rsc->flags, pe_rsc_failed)) { +- pe_rsc_trace(rsc, "Recovering %s", rsc->id); +- need_stop = TRUE; ++ if (is_set(rsc->flags, pe_rsc_stop)) { ++ need_stop = TRUE; ++ pe_rsc_trace(rsc, "Recovering %s", rsc->id); ++ } else { ++ pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id); ++ if (rsc->next_role == RSC_ROLE_MASTER) { ++ need_promote = TRUE; ++ } ++ } + + } else if (is_set(rsc->flags, pe_rsc_block)) { + pe_rsc_trace(rsc, "Block %s", rsc->id); +@@ -1344,10 +1352,16 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) + + + while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) { ++ bool required = need_stop; ++ + next_role = rsc_state_matrix[role][rsc->role]; ++ if ((next_role == RSC_ROLE_MASTER) && need_promote) { ++ required = true; ++ } + pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), +- rsc->id, need_stop ? " required" : ""); +- if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) { ++ rsc->id, (required? " required" : "")); ++ if (rsc_action_matrix[role][next_role](rsc, chosen, !required, ++ data_set) == FALSE) { + break; + } + role = next_role; +@@ -2631,7 +2645,8 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) + + free(key); + +- } else if (stop && is_set(rsc->flags, pe_rsc_failed)) { ++ } else if (stop && is_set(rsc->flags, pe_rsc_failed) ++ && is_set(rsc->flags, pe_rsc_stop)) { + /* 'stop' may be NULL if the failure was ignored */ + LogAction("Recover", rsc, current, next, stop, start, terminal); + STOP_SANITY_ASSERT(__LINE__); +diff --git a/lib/pengine/common.c b/lib/pengine/common.c +index ded6df8..f4f2106 100644 +--- a/lib/pengine/common.c ++++ b/lib/pengine/common.c +@@ -326,6 +326,9 @@ fail2text(enum action_fail_response fail) + case action_fail_ignore: + result = "ignore"; + break; ++ case action_fail_demote: ++ result = "demote"; ++ break; + case action_fail_block: + result = "block"; + break; +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index 6a350e5..a219805 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -108,6 +108,7 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, + */ + node->details->remote_requires_reset = TRUE; + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + } + } + +@@ -117,6 +118,7 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, + "and guest resource no longer exists", + node->details->uname, reason); + set_bit(node->details->remote_rsc->flags, pe_rsc_failed); ++ set_bit(node->details->remote_rsc->flags, pe_rsc_stop); + + } else if (pe__is_remote_node(node)) { + pe_resource_t *rsc = node->details->remote_rsc; +@@ -1914,6 +1916,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + */ + if (pe__is_guest_node(node)) { + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + should_fence = TRUE; + + } else if (is_set(data_set->flags, pe_flag_stonith_enabled)) { +@@ -1956,6 +1959,11 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + /* nothing to do */ + break; + ++ case action_fail_demote: ++ set_bit(rsc->flags, pe_rsc_failed); ++ demote_action(rsc, node, FALSE); ++ break; ++ + case action_fail_fence: + /* treat it as if it is still running + * but also mark the node as unclean +@@ -1992,12 +2000,14 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + case action_fail_recover: + if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + stop_action(rsc, node, FALSE); + } + break; + + case action_fail_restart_container: + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + + if (rsc->container && pe_rsc_is_bundled(rsc)) { + /* A bundle's remote connection can run on a different node than +@@ -2016,6 +2026,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + + case action_fail_reset_remote: + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + if (is_set(data_set->flags, pe_flag_stonith_enabled)) { + tmpnode = NULL; + if (rsc->is_remote_node) { +@@ -2071,8 +2082,17 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, + } + + native_add_running(rsc, node, data_set); +- if (on_fail != action_fail_ignore) { +- set_bit(rsc->flags, pe_rsc_failed); ++ switch (on_fail) { ++ case action_fail_ignore: ++ break; ++ case action_fail_demote: ++ case action_fail_block: ++ set_bit(rsc->flags, pe_rsc_failed); ++ break; ++ default: ++ set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); ++ break; + } + + } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { +@@ -2595,6 +2615,7 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, + } else { + /* Consider it failed here - forces a restart, prevents migration */ + set_bit(rsc->flags, pe_rsc_failed); ++ set_bit(rsc->flags, pe_rsc_stop); + clear_bit(rsc->flags, pe_rsc_allow_migrate); + } + } +@@ -2785,9 +2806,21 @@ static int + cmp_on_fail(enum action_fail_response first, enum action_fail_response second) + { + switch (first) { ++ case action_fail_demote: ++ switch (second) { ++ case action_fail_ignore: ++ return 1; ++ case action_fail_demote: ++ return 0; ++ default: ++ return -1; ++ } ++ break; ++ + case action_fail_reset_remote: + switch (second) { + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + return 1; + case action_fail_reset_remote: +@@ -2800,6 +2833,7 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second) + case action_fail_restart_container: + switch (second) { + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + case action_fail_reset_remote: + return 1; +@@ -2814,9 +2848,13 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second) + break; + } + switch (second) { ++ case action_fail_demote: ++ return (first == action_fail_ignore)? -1 : 1; ++ + case action_fail_reset_remote: + switch (first) { + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + return -1; + default: +@@ -2827,6 +2865,7 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second) + case action_fail_restart_container: + switch (first) { + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + case action_fail_reset_remote: + return -1; +@@ -3426,7 +3465,11 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c + clear_past_failure = TRUE; + + } else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) { +- /* Demote from Master does not clear an error */ ++ ++ if (*on_fail == action_fail_demote) { ++ // Demote clears an error only if on-fail=demote ++ clear_past_failure = TRUE; ++ } + rsc->role = RSC_ROLE_SLAVE; + + } else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { +@@ -3454,6 +3497,7 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c + + case action_fail_block: + case action_fail_ignore: ++ case action_fail_demote: + case action_fail_recover: + case action_fail_restart_container: + *on_fail = action_fail_ignore; +@@ -3714,6 +3758,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, + * that, ensure the remote connection is considered failed. + */ + set_bit(node->details->remote_rsc->flags, pe_rsc_failed); ++ set_bit(node->details->remote_rsc->flags, pe_rsc_stop); + } + + // fall through +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 3fb7e62..fee9efb 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -720,6 +720,7 @@ static bool + valid_stop_on_fail(const char *value) + { + return safe_str_neq(value, "standby") ++ && safe_str_neq(value, "demote") + && safe_str_neq(value, "stop"); + } + +@@ -727,6 +728,11 @@ static const char * + unpack_operation_on_fail(pe_action_t * action) + { + ++ const char *name = NULL; ++ const char *role = NULL; ++ const char *on_fail = NULL; ++ const char *interval_spec = NULL; ++ const char *enabled = NULL; + const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); + + if (safe_str_eq(action->task, CRMD_ACTION_STOP) +@@ -736,14 +742,10 @@ unpack_operation_on_fail(pe_action_t * action) + "action to default value because '%s' is not " + "allowed for stop", action->rsc->id, value); + return NULL; ++ + } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { + /* demote on_fail defaults to master monitor value if present */ + xmlNode *operation = NULL; +- const char *name = NULL; +- const char *role = NULL; +- const char *on_fail = NULL; +- const char *interval_spec = NULL; +- const char *enabled = NULL; + + CRM_CHECK(action->rsc != NULL, return NULL); + +@@ -766,12 +768,31 @@ unpack_operation_on_fail(pe_action_t * action) + continue; + } else if (crm_parse_interval_spec(interval_spec) == 0) { + continue; ++ } else if (safe_str_eq(on_fail, "demote")) { ++ continue; + } + + value = on_fail; + } + } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) { + value = "ignore"; ++ ++ } else if (safe_str_eq(value, "demote")) { ++ name = crm_element_value(action->op_entry, "name"); ++ role = crm_element_value(action->op_entry, "role"); ++ on_fail = crm_element_value(action->op_entry, XML_OP_ATTR_ON_FAIL); ++ interval_spec = crm_element_value(action->op_entry, ++ XML_LRM_ATTR_INTERVAL); ++ ++ if (safe_str_neq(name, CRMD_ACTION_PROMOTE) ++ && (safe_str_neq(name, CRMD_ACTION_STATUS) ++ || safe_str_neq(role, "Master") ++ || (crm_parse_interval_spec(interval_spec) == 0))) { ++ pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s " ++ "action to default value because 'demote' is not " ++ "allowed for it", action->rsc->id, name); ++ return NULL; ++ } + } + + return value; +@@ -1170,6 +1191,10 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai + value = NULL; + } + ++ } else if (safe_str_eq(value, "demote")) { ++ action->on_fail = action_fail_demote; ++ value = "demote instance"; ++ + } else { + pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); + value = NULL; +-- +1.8.3.1 + + +From d29433ea57796de000f4fea8c60f8da1d903108b Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 16 Jun 2020 16:03:14 -0500 +Subject: [PATCH 11/20] Test: scheduler: add regression tests for + on-fail="demote" + +--- + cts/cts-scheduler.in | 4 + + cts/scheduler/on_fail_demote1.dot | 64 ++ + cts/scheduler/on_fail_demote1.exp | 360 +++++++ + cts/scheduler/on_fail_demote1.scores | 470 +++++++++ + cts/scheduler/on_fail_demote1.summary | 86 ++ + cts/scheduler/on_fail_demote1.xml | 616 +++++++++++ + cts/scheduler/on_fail_demote2.dot | 22 + + cts/scheduler/on_fail_demote2.exp | 125 +++ + cts/scheduler/on_fail_demote2.scores | 127 +++ + cts/scheduler/on_fail_demote2.summary | 41 + + cts/scheduler/on_fail_demote2.xml | 221 ++++ + cts/scheduler/on_fail_demote3.dot | 12 + + cts/scheduler/on_fail_demote3.exp | 63 ++ + cts/scheduler/on_fail_demote3.scores | 127 +++ + cts/scheduler/on_fail_demote3.summary | 34 + + cts/scheduler/on_fail_demote3.xml | 221 ++++ + cts/scheduler/on_fail_demote4.dot | 383 +++++++ + cts/scheduler/on_fail_demote4.exp | 1818 +++++++++++++++++++++++++++++++++ + cts/scheduler/on_fail_demote4.scores | 470 +++++++++ + cts/scheduler/on_fail_demote4.summary | 187 ++++ + cts/scheduler/on_fail_demote4.xml | 625 ++++++++++++ + 21 files changed, 6076 insertions(+) + create mode 100644 cts/scheduler/on_fail_demote1.dot + create mode 100644 cts/scheduler/on_fail_demote1.exp + create mode 100644 cts/scheduler/on_fail_demote1.scores + create mode 100644 cts/scheduler/on_fail_demote1.summary + create mode 100644 cts/scheduler/on_fail_demote1.xml + create mode 100644 cts/scheduler/on_fail_demote2.dot + create mode 100644 cts/scheduler/on_fail_demote2.exp + create mode 100644 cts/scheduler/on_fail_demote2.scores + create mode 100644 cts/scheduler/on_fail_demote2.summary + create mode 100644 cts/scheduler/on_fail_demote2.xml + create mode 100644 cts/scheduler/on_fail_demote3.dot + create mode 100644 cts/scheduler/on_fail_demote3.exp + create mode 100644 cts/scheduler/on_fail_demote3.scores + create mode 100644 cts/scheduler/on_fail_demote3.summary + create mode 100644 cts/scheduler/on_fail_demote3.xml + create mode 100644 cts/scheduler/on_fail_demote4.dot + create mode 100644 cts/scheduler/on_fail_demote4.exp + create mode 100644 cts/scheduler/on_fail_demote4.scores + create mode 100644 cts/scheduler/on_fail_demote4.summary + create mode 100644 cts/scheduler/on_fail_demote4.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index ae8247e..0e68e73 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -478,6 +478,10 @@ TESTS = [ + [ "master-score-startup", "Use permanent master scores without LRM history" ], + [ "failed-demote-recovery", "Recover resource in slave role after demote fails" ], + [ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ], ++ [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ], ++ [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], ++ [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], ++ [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], + ], + [ + [ "history-1", "Correctly parse stateful-1 resource state" ], +diff --git a/cts/scheduler/on_fail_demote1.dot b/cts/scheduler/on_fail_demote1.dot +new file mode 100644 +index 0000000..d11c1c1 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.dot +@@ -0,0 +1,64 @@ ++ digraph "g" { ++"bundled_demote_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" -> "stateful-bundle-master_demoted_0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"bundled_promote_0 stateful-bundle-0" -> "stateful-bundle-master_promoted_0" [ style = bold] ++"bundled_promote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"lxc-ms-master_demote_0" -> "lxc-ms-master_demoted_0" [ style = bold] ++"lxc-ms-master_demote_0" -> "lxc-ms_demote_0 lxc2" [ style = bold] ++"lxc-ms-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_demoted_0" -> "lxc-ms-master_promote_0" [ style = bold] ++"lxc-ms-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_promote_0" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms-master_demoted_0" [ style = bold] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms_demote_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"lxc-ms_promote_0 lxc2" -> "lxc-ms-master_promoted_0" [ style = bold] ++"lxc-ms_promote_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-4" [ style = bold] ++"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-4" -> "rsc1_promote_0 rhel7-4" [ style = bold] ++"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1_promote_0 rhel7-4" -> "rsc1-clone_promoted_0" [ style = bold] ++"rsc1_promote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc2-master_demote_0" -> "rsc2-master_demoted_0" [ style = bold] ++"rsc2-master_demote_0" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] ++"rsc2-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_demoted_0" -> "rsc2-master_promote_0" [ style = bold] ++"rsc2-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_promote_0" -> "rsc2_promote_0 remote-rhel7-2" [ style = bold] ++"rsc2-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2_demote_0 remote-rhel7-2" -> "rsc2-master_demoted_0" [ style = bold] ++"rsc2_demote_0 remote-rhel7-2" -> "rsc2_promote_0 remote-rhel7-2" [ style = bold] ++"rsc2_demote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="black"] ++"rsc2_promote_0 remote-rhel7-2" -> "rsc2-master_promoted_0" [ style = bold] ++"rsc2_promote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-master_demote_0" -> "bundled_demote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_demote_0" -> "stateful-bundle-master_demoted_0" [ style = bold] ++"stateful-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle_demoted_0" [ style = bold] ++"stateful-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_promote_0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_promoted_0" -> "stateful-bundle_promoted_0" [ style = bold] ++"stateful-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_demote_0" -> "stateful-bundle-master_demote_0" [ style = bold] ++"stateful-bundle_demote_0" -> "stateful-bundle_demoted_0" [ style = bold] ++"stateful-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_demoted_0" -> "stateful-bundle_promote_0" [ style = bold] ++"stateful-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_promote_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] ++} +diff --git a/cts/scheduler/on_fail_demote1.exp b/cts/scheduler/on_fail_demote1.exp +new file mode 100644 +index 0000000..ebe1dd5 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.exp +@@ -0,0 +1,360 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote1.scores b/cts/scheduler/on_fail_demote1.scores +new file mode 100644 +index 0000000..7df582f +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.scores +@@ -0,0 +1,470 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-16 19:23:21Z ++bundled:0 promotion score on stateful-bundle-0: 10 ++bundled:1 promotion score on stateful-bundle-1: 5 ++bundled:2 promotion score on stateful-bundle-2: 5 ++lxc-ms:0 promotion score on lxc2: INFINITY ++lxc-ms:1 promotion score on lxc1: INFINITY ++pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501 ++pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501 ++pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501 ++pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY ++pcmk__clone_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY ++pcmk__clone_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY ++pcmk__clone_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms:0 allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms:1 allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on remote-rhel7-2: 6 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on lxc2: 6 ++pcmk__clone_allocate: rsc1:5 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on lxc1: 6 ++pcmk__clone_allocate: rsc1:6 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2-master allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on remote-rhel7-2: 11 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on lxc2: 6 ++pcmk__clone_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on lxc1: 6 ++pcmk__clone_allocate: rsc2:6 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on lxc1: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on lxc2: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on remote-rhel7-2: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-1: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-3: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-4: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-5: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-0: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-1: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-2: 0 ++pcmk__native_allocate: Fencing allocation score on lxc1: -INFINITY ++pcmk__native_allocate: Fencing allocation score on lxc2: -INFINITY ++pcmk__native_allocate: Fencing allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY ++pcmk__native_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY ++pcmk__native_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY ++pcmk__native_allocate: container1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: container1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: container1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: container1 allocation score on rhel7-3: INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: container1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: container2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: container2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: container2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: container2 allocation score on rhel7-3: INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: container2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on lxc2: INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-4: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on lxc1: INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on lxc2: INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:2 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc1:3 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:3 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:4 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:4 allocation score on remote-rhel7-2: 6 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:5 allocation score on lxc2: 6 ++pcmk__native_allocate: rsc1:5 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on lxc1: 6 ++pcmk__native_allocate: rsc1:6 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:0 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:1 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:2 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc2:3 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:3 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on remote-rhel7-2: 11 ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:5 allocation score on lxc2: 6 ++pcmk__native_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on lxc1: 6 ++pcmk__native_allocate: rsc2:6 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-5: 10000 ++pcmk__native_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-1: 10000 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-4: 10000 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: -INFINITY ++rsc1:0 promotion score on rhel7-4: 10 ++rsc1:1 promotion score on rhel7-3: 5 ++rsc1:2 promotion score on rhel7-5: 5 ++rsc1:3 promotion score on rhel7-1: 5 ++rsc1:4 promotion score on remote-rhel7-2: 5 ++rsc1:5 promotion score on lxc2: 5 ++rsc1:6 promotion score on lxc1: 5 ++rsc2:0 promotion score on rhel7-4: 10 ++rsc2:1 promotion score on rhel7-3: 5 ++rsc2:2 promotion score on rhel7-5: 5 ++rsc2:3 promotion score on rhel7-1: 5 ++rsc2:4 promotion score on remote-rhel7-2: 110 ++rsc2:5 promotion score on lxc2: 5 ++rsc2:6 promotion score on lxc1: 5 +diff --git a/cts/scheduler/on_fail_demote1.summary b/cts/scheduler/on_fail_demote1.summary +new file mode 100644 +index 0000000..b173582 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.summary +@@ -0,0 +1,86 @@ ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Current cluster status: ++Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++RemoteOnline: [ remote-rhel7-2 ] ++GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-4 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 ++ Slaves: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ rsc2 (ocf::pacemaker:Stateful): FAILED Master remote-rhel7-2 ++ Slaves: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++ remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 ++ container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ Clone Set: lxc-ms-master [lxc-ms] (promotable) ++ lxc-ms (ocf::pacemaker:Stateful): FAILED Master lxc2 ++ Slaves: [ lxc1 ] ++ Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++ Container bundle set: stateful-bundle [pcmktest:http] ++ stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): FAILED Master rhel7-5 ++ stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 ++ stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-4 ++ ++Transition Summary: ++ * Re-promote rsc1:0 ( Master rhel7-4 ) ++ * Re-promote rsc2:4 ( Master remote-rhel7-2 ) ++ * Re-promote lxc-ms:0 ( Master lxc2 ) ++ * Re-promote bundled:0 ( Master stateful-bundle-0 ) ++ ++Executing cluster transition: ++ * Pseudo action: rsc1-clone_demote_0 ++ * Pseudo action: rsc2-master_demote_0 ++ * Pseudo action: lxc-ms-master_demote_0 ++ * Pseudo action: stateful-bundle_demote_0 ++ * Resource action: rsc1 demote on rhel7-4 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Pseudo action: rsc1-clone_promote_0 ++ * Resource action: rsc2 demote on remote-rhel7-2 ++ * Pseudo action: rsc2-master_demoted_0 ++ * Pseudo action: rsc2-master_promote_0 ++ * Resource action: lxc-ms demote on lxc2 ++ * Pseudo action: lxc-ms-master_demoted_0 ++ * Pseudo action: lxc-ms-master_promote_0 ++ * Pseudo action: stateful-bundle-master_demote_0 ++ * Resource action: rsc1 promote on rhel7-4 ++ * Pseudo action: rsc1-clone_promoted_0 ++ * Resource action: rsc2 promote on remote-rhel7-2 ++ * Pseudo action: rsc2-master_promoted_0 ++ * Resource action: lxc-ms promote on lxc2 ++ * Pseudo action: lxc-ms-master_promoted_0 ++ * Resource action: bundled demote on stateful-bundle-0 ++ * Pseudo action: stateful-bundle-master_demoted_0 ++ * Pseudo action: stateful-bundle_demoted_0 ++ * Pseudo action: stateful-bundle_promote_0 ++ * Pseudo action: stateful-bundle-master_promote_0 ++ * Resource action: bundled promote on stateful-bundle-0 ++ * Pseudo action: stateful-bundle-master_promoted_0 ++ * Pseudo action: stateful-bundle_promoted_0 ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++RemoteOnline: [ remote-rhel7-2 ] ++GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-4 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ remote-rhel7-2 ] ++ Slaves: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++ remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 ++ container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ Clone Set: lxc-ms-master [lxc-ms] (promotable) ++ Masters: [ lxc2 ] ++ Slaves: [ lxc1 ] ++ Container bundle set: stateful-bundle [pcmktest:http] ++ stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): Master rhel7-5 ++ stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 ++ stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-4 ++ +diff --git a/cts/scheduler/on_fail_demote1.xml b/cts/scheduler/on_fail_demote1.xml +new file mode 100644 +index 0000000..9f3ff20 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote1.xml +@@ -0,0 +1,616 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote2.dot b/cts/scheduler/on_fail_demote2.dot +new file mode 100644 +index 0000000..06193cb +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.dot +@@ -0,0 +1,22 @@ ++ digraph "g" { ++"Cancel rsc1_monitor_10000 rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"Cancel rsc1_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"Cancel rsc1_monitor_11000 rhel7-3" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"Cancel rsc1_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-4" -> "rsc1_monitor_11000 rhel7-4" [ style = bold] ++"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_11000 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1_promote_0 rhel7-3" -> "rsc1-clone_promoted_0" [ style = bold] ++"rsc1_promote_0 rhel7-3" -> "rsc1_monitor_10000 rhel7-3" [ style = bold] ++"rsc1_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/on_fail_demote2.exp b/cts/scheduler/on_fail_demote2.exp +new file mode 100644 +index 0000000..492e86f +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.exp +@@ -0,0 +1,125 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote2.scores b/cts/scheduler/on_fail_demote2.scores +new file mode 100644 +index 0000000..25aea90 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.scores +@@ -0,0 +1,127 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-16 19:23:21Z ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY ++rsc1:0 promotion score on rhel7-4: -INFINITY ++rsc1:1 promotion score on rhel7-3: 5 ++rsc1:2 promotion score on rhel7-5: 5 ++rsc1:3 promotion score on rhel7-1: 5 ++rsc1:4 promotion score on rhel7-2: 5 ++rsc2:0 promotion score on rhel7-4: 10 ++rsc2:1 promotion score on rhel7-3: 5 ++rsc2:2 promotion score on rhel7-5: 5 ++rsc2:3 promotion score on rhel7-1: 5 ++rsc2:4 promotion score on rhel7-2: 5 +diff --git a/cts/scheduler/on_fail_demote2.summary b/cts/scheduler/on_fail_demote2.summary +new file mode 100644 +index 0000000..795a11d +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.summary +@@ -0,0 +1,41 @@ ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Current cluster status: ++Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ ++Transition Summary: ++ * Demote rsc1:0 ( Master -> Slave rhel7-4 ) ++ * Promote rsc1:1 ( Slave -> Master rhel7-3 ) ++ ++Executing cluster transition: ++ * Resource action: rsc1 cancel=10000 on rhel7-4 ++ * Resource action: rsc1 cancel=11000 on rhel7-3 ++ * Pseudo action: rsc1-clone_demote_0 ++ * Resource action: rsc1 demote on rhel7-4 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Pseudo action: rsc1-clone_promote_0 ++ * Resource action: rsc1 monitor=11000 on rhel7-4 ++ * Resource action: rsc1 promote on rhel7-3 ++ * Pseudo action: rsc1-clone_promoted_0 ++ * Resource action: rsc1 monitor=10000 on rhel7-3 ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Masters: [ rhel7-3 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ +diff --git a/cts/scheduler/on_fail_demote2.xml b/cts/scheduler/on_fail_demote2.xml +new file mode 100644 +index 0000000..ae91633 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote2.xml +@@ -0,0 +1,221 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote3.dot b/cts/scheduler/on_fail_demote3.dot +new file mode 100644 +index 0000000..e78325b +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.dot +@@ -0,0 +1,12 @@ ++ digraph "g" { ++"Cancel rsc1_monitor_10000 rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"Cancel rsc1_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-4" -> "rsc1_monitor_11000 rhel7-4" [ style = bold] ++"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_11000 rhel7-4" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/on_fail_demote3.exp b/cts/scheduler/on_fail_demote3.exp +new file mode 100644 +index 0000000..ed6bd6d +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.exp +@@ -0,0 +1,63 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote3.scores b/cts/scheduler/on_fail_demote3.scores +new file mode 100644 +index 0000000..a85639a +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.scores +@@ -0,0 +1,127 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-16 19:23:21Z ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY ++rsc1:0 promotion score on rhel7-4: -INFINITY ++rsc1:1 promotion score on rhel7-3: -INFINITY ++rsc1:2 promotion score on rhel7-5: -INFINITY ++rsc1:3 promotion score on rhel7-1: -INFINITY ++rsc1:4 promotion score on rhel7-2: -INFINITY ++rsc2:0 promotion score on rhel7-4: 10 ++rsc2:1 promotion score on rhel7-3: 5 ++rsc2:2 promotion score on rhel7-5: 5 ++rsc2:3 promotion score on rhel7-1: 5 ++rsc2:4 promotion score on rhel7-2: 5 +diff --git a/cts/scheduler/on_fail_demote3.summary b/cts/scheduler/on_fail_demote3.summary +new file mode 100644 +index 0000000..f1173fd +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.summary +@@ -0,0 +1,34 @@ ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Current cluster status: ++Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ ++Transition Summary: ++ * Demote rsc1:0 ( Master -> Slave rhel7-4 ) ++ ++Executing cluster transition: ++ * Resource action: rsc1 cancel=10000 on rhel7-4 ++ * Pseudo action: rsc1-clone_demote_0 ++ * Resource action: rsc1 demote on rhel7-4 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Resource action: rsc1 monitor=11000 on rhel7-4 ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-4 ] ++ Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] ++ +diff --git a/cts/scheduler/on_fail_demote3.xml b/cts/scheduler/on_fail_demote3.xml +new file mode 100644 +index 0000000..a7b6806 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote3.xml +@@ -0,0 +1,221 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote4.dot b/cts/scheduler/on_fail_demote4.dot +new file mode 100644 +index 0000000..4715cd3 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.dot +@@ -0,0 +1,383 @@ ++ digraph "g" { ++"Cancel rsc1_monitor_11000 rhel7-3" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"Cancel rsc1_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"Cancel rsc2_monitor_11000 rhel7-3" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"Cancel rsc2_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"Fencing_monitor_120000 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"Fencing_start_0 rhel7-5" -> "Fencing_monitor_120000 rhel7-5" [ style = bold] ++"Fencing_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"Fencing_stop_0 rhel7-4" -> "Fencing_start_0 rhel7-5" [ style = bold] ++"Fencing_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"bundled_demote_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" -> "stateful-bundle-master_demoted_0" [ style = bold] ++"bundled_demote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"bundled_monitor_10000 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"bundled_monitor_11000 stateful-bundle-2" [ style=bold color="green" fontcolor="black"] ++"bundled_promote_0 stateful-bundle-0" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] ++"bundled_promote_0 stateful-bundle-0" -> "stateful-bundle-master_promoted_0" [ style = bold] ++"bundled_promote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"bundled_start_0 stateful-bundle-0" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] ++"bundled_start_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"bundled_start_0 stateful-bundle-0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"bundled_start_0 stateful-bundle-0" -> "stateful-bundle-master_running_0" [ style = bold] ++"bundled_start_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] ++"bundled_start_0 stateful-bundle-2" -> "bundled_monitor_11000 stateful-bundle-2" [ style = bold] ++"bundled_start_0 stateful-bundle-2" -> "stateful-bundle-master_running_0" [ style = bold] ++"bundled_start_0 stateful-bundle-2" [ style=bold color="green" fontcolor="black"] ++"bundled_stop_0 stateful-bundle-0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"bundled_stop_0 stateful-bundle-0" -> "stateful-bundle-master_stopped_0" [ style = bold] ++"bundled_stop_0 stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"bundled_stop_0 stateful-bundle-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"bundled_stop_0 stateful-bundle-2" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] ++"bundled_stop_0 stateful-bundle-2" -> "stateful-bundle-master_stopped_0" [ style = bold] ++"bundled_stop_0 stateful-bundle-2" [ style=bold color="green" fontcolor="orange"] ++"container2_monitor_20000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"container2_start_0 rhel7-3" -> "container2_monitor_20000 rhel7-3" [ style = bold] ++"container2_start_0 rhel7-3" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"container2_start_0 rhel7-3" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"container2_start_0 rhel7-3" -> "lxc2_start_0 rhel7-3" [ style = bold] ++"container2_start_0 rhel7-3" -> "rsc1_start_0 lxc2" [ style = bold] ++"container2_start_0 rhel7-3" -> "rsc2_start_0 lxc2" [ style = bold] ++"container2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"container2_stop_0 rhel7-3" -> "container2_start_0 rhel7-3" [ style = bold] ++"container2_stop_0 rhel7-3" -> "stonith 'reboot' lxc2" [ style = bold] ++"container2_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"lxc-ms-master_demote_0" -> "lxc-ms-master_demoted_0" [ style = bold] ++"lxc-ms-master_demote_0" -> "lxc-ms_demote_0 lxc2" [ style = bold] ++"lxc-ms-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_demoted_0" -> "lxc-ms-master_promote_0" [ style = bold] ++"lxc-ms-master_demoted_0" -> "lxc-ms-master_start_0" [ style = bold] ++"lxc-ms-master_demoted_0" -> "lxc-ms-master_stop_0" [ style = bold] ++"lxc-ms-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_promote_0" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_running_0" -> "lxc-ms-master_promote_0" [ style = bold] ++"lxc-ms-master_running_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_start_0" -> "lxc-ms-master_running_0" [ style = bold] ++"lxc-ms-master_start_0" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"lxc-ms-master_start_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_stop_0" -> "lxc-ms-master_stopped_0" [ style = bold] ++"lxc-ms-master_stop_0" -> "lxc-ms_stop_0 lxc2" [ style = bold] ++"lxc-ms-master_stop_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms-master_stopped_0" -> "lxc-ms-master_promote_0" [ style = bold] ++"lxc-ms-master_stopped_0" -> "lxc-ms-master_start_0" [ style = bold] ++"lxc-ms-master_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms-master_demoted_0" [ style = bold] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms_demote_0 lxc2" -> "lxc-ms_stop_0 lxc2" [ style = bold] ++"lxc-ms_demote_0 lxc2" [ style=bold color="green" fontcolor="orange"] ++"lxc-ms_monitor_10000 lxc2" [ style=bold color="green" fontcolor="black"] ++"lxc-ms_promote_0 lxc2" -> "lxc-ms-master_promoted_0" [ style = bold] ++"lxc-ms_promote_0 lxc2" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] ++"lxc-ms_promote_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"lxc-ms_start_0 lxc2" -> "lxc-ms-master_running_0" [ style = bold] ++"lxc-ms_start_0 lxc2" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] ++"lxc-ms_start_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc-ms_start_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"lxc-ms_stop_0 lxc2" -> "lxc-ms-master_stopped_0" [ style = bold] ++"lxc-ms_stop_0 lxc2" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"lxc-ms_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] ++"lxc2_monitor_30000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"lxc2_start_0 rhel7-3" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "lxc2_monitor_30000 rhel7-3" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "rsc1_monitor_11000 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "rsc1_start_0 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "rsc2_monitor_11000 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" -> "rsc2_start_0 lxc2" [ style = bold] ++"lxc2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"lxc2_stop_0 rhel7-3" -> "container2_stop_0 rhel7-3" [ style = bold] ++"lxc2_stop_0 rhel7-3" -> "lxc2_start_0 rhel7-3" [ style = bold] ++"lxc2_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"remote-rhel7-2_monitor_60000 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"remote-rhel7-2_start_0 rhel7-1" -> "remote-rhel7-2_monitor_60000 rhel7-1" [ style = bold] ++"remote-rhel7-2_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"remote-rhel7-2_stop_0 rhel7-1" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] ++"remote-rhel7-2_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_demoted_0" -> "rsc1-clone_start_0" [ style = bold] ++"rsc1-clone_demoted_0" -> "rsc1-clone_stop_0" [ style = bold] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_running_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_running_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_start_0" -> "rsc1-clone_running_0" [ style = bold] ++"rsc1-clone_start_0" -> "rsc1_start_0 lxc2" [ style = bold] ++"rsc1-clone_start_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_stop_0" -> "rsc1-clone_stopped_0" [ style = bold] ++"rsc1-clone_stop_0" -> "rsc1_stop_0 lxc2" [ style = bold] ++"rsc1-clone_stop_0" -> "rsc1_stop_0 remote-rhel7-2" [ style = bold] ++"rsc1-clone_stop_0" -> "rsc1_stop_0 rhel7-4" [ style = bold] ++"rsc1-clone_stop_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_stopped_0" -> "rsc1-clone_promote_0" [ style = bold] ++"rsc1-clone_stopped_0" -> "rsc1-clone_start_0" [ style = bold] ++"rsc1-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-4" -> "rsc1_stop_0 rhel7-4" [ style = bold] ++"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"rsc1_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_11000 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc1_promote_0 rhel7-3" -> "rsc1-clone_promoted_0" [ style = bold] ++"rsc1_promote_0 rhel7-3" -> "rsc1_monitor_10000 rhel7-3" [ style = bold] ++"rsc1_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc1_start_0 lxc2" -> "rsc1-clone_running_0" [ style = bold] ++"rsc1_start_0 lxc2" -> "rsc1_monitor_11000 lxc2" [ style = bold] ++"rsc1_start_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc1_stop_0 lxc2" -> "rsc1-clone_stopped_0" [ style = bold] ++"rsc1_stop_0 lxc2" -> "rsc1_start_0 lxc2" [ style = bold] ++"rsc1_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] ++"rsc1_stop_0 remote-rhel7-2" -> "remote-rhel7-2_stop_0 rhel7-1" [ style = bold] ++"rsc1_stop_0 remote-rhel7-2" -> "rsc1-clone_stopped_0" [ style = bold] ++"rsc1_stop_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] ++"rsc1_stop_0 rhel7-4" -> "rsc1-clone_stopped_0" [ style = bold] ++"rsc1_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_demote_0" -> "rsc2-master_demoted_0" [ style = bold] ++"rsc2-master_demote_0" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] ++"rsc2-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_demoted_0" -> "rsc2-master_promote_0" [ style = bold] ++"rsc2-master_demoted_0" -> "rsc2-master_start_0" [ style = bold] ++"rsc2-master_demoted_0" -> "rsc2-master_stop_0" [ style = bold] ++"rsc2-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_promote_0" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"rsc2-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_running_0" -> "rsc2-master_promote_0" [ style = bold] ++"rsc2-master_running_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_start_0" -> "rsc2-master_running_0" [ style = bold] ++"rsc2-master_start_0" -> "rsc2_start_0 lxc2" [ style = bold] ++"rsc2-master_start_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_stop_0" -> "rsc2-master_stopped_0" [ style = bold] ++"rsc2-master_stop_0" -> "rsc2_stop_0 lxc2" [ style = bold] ++"rsc2-master_stop_0" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] ++"rsc2-master_stop_0" -> "rsc2_stop_0 rhel7-4" [ style = bold] ++"rsc2-master_stop_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2-master_stopped_0" -> "rsc2-master_promote_0" [ style = bold] ++"rsc2-master_stopped_0" -> "rsc2-master_start_0" [ style = bold] ++"rsc2-master_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"rsc2_demote_0 remote-rhel7-2" -> "rsc2-master_demoted_0" [ style = bold] ++"rsc2_demote_0 remote-rhel7-2" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] ++"rsc2_demote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] ++"rsc2_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc2_monitor_11000 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc2_promote_0 rhel7-3" -> "rsc2-master_promoted_0" [ style = bold] ++"rsc2_promote_0 rhel7-3" -> "rsc2_monitor_10000 rhel7-3" [ style = bold] ++"rsc2_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"rsc2_start_0 lxc2" -> "rsc2-master_running_0" [ style = bold] ++"rsc2_start_0 lxc2" -> "rsc2_monitor_11000 lxc2" [ style = bold] ++"rsc2_start_0 lxc2" [ style=bold color="green" fontcolor="black"] ++"rsc2_stop_0 lxc2" -> "rsc2-master_stopped_0" [ style = bold] ++"rsc2_stop_0 lxc2" -> "rsc2_start_0 lxc2" [ style = bold] ++"rsc2_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] ++"rsc2_stop_0 remote-rhel7-2" -> "remote-rhel7-2_stop_0 rhel7-1" [ style = bold] ++"rsc2_stop_0 remote-rhel7-2" -> "rsc2-master_stopped_0" [ style = bold] ++"rsc2_stop_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] ++"rsc2_stop_0 rhel7-4" -> "rsc2-master_stopped_0" [ style = bold] ++"rsc2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-0_monitor_30000 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-0_start_0 rhel7-5" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] ++"stateful-bundle-0_start_0 rhel7-5" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-0_start_0 rhel7-5" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-0_start_0 rhel7-5" -> "stateful-bundle-0_monitor_30000 rhel7-5" [ style = bold] ++"stateful-bundle-0_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-0_stop_0 rhel7-5" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] ++"stateful-bundle-0_stop_0 rhel7-5" -> "stateful-bundle-docker-0_stop_0 rhel7-5" [ style = bold] ++"stateful-bundle-0_stop_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-2_monitor_30000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-2_start_0 rhel7-3" -> "bundled_monitor_11000 stateful-bundle-2" [ style = bold] ++"stateful-bundle-2_start_0 rhel7-3" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle-2_start_0 rhel7-3" -> "stateful-bundle-2_monitor_30000 rhel7-3" [ style = bold] ++"stateful-bundle-2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-2_stop_0 rhel7-4" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-2_stop_0 rhel7-4" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] ++"stateful-bundle-2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-docker-0_monitor_60000 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle-docker-0_monitor_60000 rhel7-5" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle_running_0" [ style = bold] ++"stateful-bundle-docker-0_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stateful-bundle_stopped_0" [ style = bold] ++"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stonith 'reboot' stateful-bundle-0" [ style = bold] ++"stateful-bundle-docker-0_stop_0 rhel7-5" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-2_monitor_60000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-2_start_0 rhel7-3" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle-docker-2_monitor_60000 rhel7-3" [ style = bold] ++"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle_running_0" [ style = bold] ++"stateful-bundle-docker-2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style = bold] ++"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle_stopped_0" [ style = bold] ++"stateful-bundle-docker-2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-ip-192.168.122.133_monitor_60000 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" -> "stateful-bundle-ip-192.168.122.133_monitor_60000 rhel7-3" [ style = bold] ++"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] ++"stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_demote_0" -> "bundled_demote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_demote_0" -> "stateful-bundle-master_demoted_0" [ style = bold] ++"stateful-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_start_0" [ style = bold] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_stop_0" [ style = bold] ++"stateful-bundle-master_demoted_0" -> "stateful-bundle_demoted_0" [ style = bold] ++"stateful-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_promote_0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_promoted_0" -> "stateful-bundle_promoted_0" [ style = bold] ++"stateful-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_running_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle-master_running_0" -> "stateful-bundle_running_0" [ style = bold] ++"stateful-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_start_0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_start_0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle-master_start_0" -> "stateful-bundle-master_running_0" [ style = bold] ++"stateful-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_stop_0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle-master_stop_0" -> "bundled_stop_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle-master_stop_0" -> "stateful-bundle-master_stopped_0" [ style = bold] ++"stateful-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle-master_stopped_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle-master_stopped_0" -> "stateful-bundle-master_start_0" [ style = bold] ++"stateful-bundle-master_stopped_0" -> "stateful-bundle_stopped_0" [ style = bold] ++"stateful-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_demote_0" -> "stateful-bundle-master_demote_0" [ style = bold] ++"stateful-bundle_demote_0" -> "stateful-bundle_demoted_0" [ style = bold] ++"stateful-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_demoted_0" -> "stateful-bundle_promote_0" [ style = bold] ++"stateful-bundle_demoted_0" -> "stateful-bundle_start_0" [ style = bold] ++"stateful-bundle_demoted_0" -> "stateful-bundle_stop_0" [ style = bold] ++"stateful-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_promote_0" -> "stateful-bundle-master_promote_0" [ style = bold] ++"stateful-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_running_0" -> "stateful-bundle_promote_0" [ style = bold] ++"stateful-bundle_running_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_start_0" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stateful-bundle_start_0" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stateful-bundle_start_0" -> "stateful-bundle-master_start_0" [ style = bold] ++"stateful-bundle_start_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_stop_0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] ++"stateful-bundle_stop_0" -> "bundled_stop_0 stateful-bundle-2" [ style = bold] ++"stateful-bundle_stop_0" -> "stateful-bundle-docker-0_stop_0 rhel7-5" [ style = bold] ++"stateful-bundle_stop_0" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] ++"stateful-bundle_stop_0" -> "stateful-bundle-master_stop_0" [ style = bold] ++"stateful-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] ++"stateful-bundle_stopped_0" -> "stateful-bundle_promote_0" [ style = bold] ++"stateful-bundle_stopped_0" -> "stateful-bundle_start_0" [ style = bold] ++"stateful-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"stonith 'reboot' lxc2" -> "Fencing_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' lxc2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' lxc2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' lxc2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' lxc2" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms-master_stop_0" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms_demote_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc-ms_stop_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "lxc2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc1-clone_stop_0" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc1_stop_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc2-master_stop_0" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "rsc2_stop_0 lxc2" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' lxc2" [ style=bold color="green" fontcolor="orange"] ++"stonith 'reboot' remote-rhel7-2" -> "Fencing_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "lxc2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc1-clone_stop_0" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc1_stop_0 remote-rhel7-2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2-master_stop_0" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" -> "stonith 'reboot' rhel7-4" [ style = bold] ++"stonith 'reboot' remote-rhel7-2" [ style=bold color="green" fontcolor="black"] ++"stonith 'reboot' rhel7-4" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1-clone_stop_0" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc1_stop_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc2-master_stop_0" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "rsc2_stop_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style = bold] ++"stonith 'reboot' rhel7-4" -> "stonith 'reboot' stateful-bundle-2" [ style = bold] ++"stonith 'reboot' rhel7-4" [ style=bold color="green" fontcolor="black"] ++"stonith 'reboot' stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-master_stop_0" [ style = bold] ++"stonith 'reboot' stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"stonith 'reboot' stateful-bundle-2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "container2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "lxc-ms_promote_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "lxc-ms_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "rsc1_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "rsc1_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "rsc2_promote_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "rsc2_start_0 lxc2" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-master_stop_0" [ style = bold] ++"stonith 'reboot' stateful-bundle-2" [ style=bold color="green" fontcolor="orange"] ++} +diff --git a/cts/scheduler/on_fail_demote4.exp b/cts/scheduler/on_fail_demote4.exp +new file mode 100644 +index 0000000..0789a12 +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.exp +@@ -0,0 +1,1818 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/on_fail_demote4.scores b/cts/scheduler/on_fail_demote4.scores +new file mode 100644 +index 0000000..cde3fec +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.scores +@@ -0,0 +1,470 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-16 19:23:21Z ++bundled:0 promotion score on stateful-bundle-0: 10 ++bundled:1 promotion score on stateful-bundle-1: 5 ++bundled:2 promotion score on stateful-bundle-2: 5 ++lxc-ms:0 promotion score on lxc2: INFINITY ++lxc-ms:1 promotion score on lxc1: INFINITY ++pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501 ++pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501 ++pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501 ++pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0 ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY ++pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY ++pcmk__clone_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY ++pcmk__clone_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY ++pcmk__clone_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms-master allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms:0 allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on lxc1: INFINITY ++pcmk__clone_allocate: lxc-ms:1 allocation score on lxc2: INFINITY ++pcmk__clone_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 1 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on remote-rhel7-2: 1 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on lxc2: 6 ++pcmk__clone_allocate: rsc1:5 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:5 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on lxc1: 6 ++pcmk__clone_allocate: rsc1:6 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:6 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2-master allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 1 ++pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on remote-rhel7-2: 1 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on lxc1: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on lxc2: 6 ++pcmk__clone_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:5 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on lxc1: 6 ++pcmk__clone_allocate: rsc2:6 allocation score on lxc2: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc2:6 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on lxc1: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on lxc2: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on remote-rhel7-2: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-1: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-3: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-4: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-5: -INFINITY ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-0: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-1: 0 ++pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-2: 0 ++pcmk__native_allocate: Fencing allocation score on lxc1: -INFINITY ++pcmk__native_allocate: Fencing allocation score on lxc2: -INFINITY ++pcmk__native_allocate: Fencing allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY ++pcmk__native_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY ++pcmk__native_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY ++pcmk__native_allocate: container1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: container1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: container1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: container1 allocation score on rhel7-3: INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: container1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: container2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: container2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: container2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: container2 allocation score on rhel7-3: INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: container2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on lxc2: INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on lxc1: INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on lxc2: INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: lxc1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc1 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: lxc2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: lxc2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:2 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc1:3 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc1:3 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:3 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on lxc2: 6 ++pcmk__native_allocate: rsc1:5 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:5 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on lxc1: 6 ++pcmk__native_allocate: rsc1:6 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc1:6 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:6 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc2:2 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 ++pcmk__native_allocate: rsc2:3 allocation score on lxc1: 0 ++pcmk__native_allocate: rsc2:3 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:3 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on lxc2: 6 ++pcmk__native_allocate: rsc2:5 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:5 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on lxc1: 6 ++pcmk__native_allocate: rsc2:6 allocation score on lxc2: 0 ++pcmk__native_allocate: rsc2:6 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc2:6 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-5: 10000 ++pcmk__native_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-1: 10000 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-3: 10000 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc1: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc2: -10000 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: -INFINITY ++rsc1:0 promotion score on none: 0 ++rsc1:1 promotion score on rhel7-3: 5 ++rsc1:2 promotion score on rhel7-5: 5 ++rsc1:3 promotion score on rhel7-1: 5 ++rsc1:4 promotion score on none: 0 ++rsc1:5 promotion score on lxc2: 5 ++rsc1:6 promotion score on lxc1: 5 ++rsc2:0 promotion score on none: 0 ++rsc2:1 promotion score on rhel7-3: 5 ++rsc2:2 promotion score on rhel7-5: 5 ++rsc2:3 promotion score on rhel7-1: 5 ++rsc2:4 promotion score on none: 0 ++rsc2:5 promotion score on lxc2: 5 ++rsc2:6 promotion score on lxc1: 5 +diff --git a/cts/scheduler/on_fail_demote4.summary b/cts/scheduler/on_fail_demote4.summary +new file mode 100644 +index 0000000..20520ff +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.summary +@@ -0,0 +1,187 @@ ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Current cluster status: ++RemoteNode remote-rhel7-2: UNCLEAN (offline) ++Node rhel7-4 (4): UNCLEAN (offline) ++Online: [ rhel7-1 rhel7-3 rhel7-5 ] ++GuestOnline: [ lxc1:container1 stateful-bundle-1:stateful-bundle-docker-1 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-4 (UNCLEAN) ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ rsc1 (ocf::pacemaker:Stateful): Master rhel7-4 (UNCLEAN) ++ rsc1 (ocf::pacemaker:Stateful): Slave remote-rhel7-2 (UNCLEAN) ++ Slaves: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ rsc2 (ocf::pacemaker:Stateful): Slave rhel7-4 (UNCLEAN) ++ rsc2 (ocf::pacemaker:Stateful): Master remote-rhel7-2 (UNCLEAN) ++ Slaves: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] ++ remote-rhel7-2 (ocf::pacemaker:remote): FAILED rhel7-1 ++ container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ container2 (ocf::heartbeat:VirtualDomain): FAILED rhel7-3 ++ Clone Set: lxc-ms-master [lxc-ms] (promotable) ++ Slaves: [ lxc1 ] ++ Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] ++ Container bundle set: stateful-bundle [pcmktest:http] ++ stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): FAILED Master rhel7-5 ++ stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 ++ stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): FAILED rhel7-4 (UNCLEAN) ++ ++Transition Summary: ++ * Fence (reboot) stateful-bundle-2 (resource: stateful-bundle-docker-2) 'guest is unclean' ++ * Fence (reboot) stateful-bundle-0 (resource: stateful-bundle-docker-0) 'guest is unclean' ++ * Fence (reboot) lxc2 (resource: container2) 'guest is unclean' ++ * Fence (reboot) remote-rhel7-2 'remote connection is unrecoverable' ++ * Fence (reboot) rhel7-4 'peer is no longer part of the cluster' ++ * Move Fencing ( rhel7-4 -> rhel7-5 ) ++ * Stop rsc1:0 ( Master rhel7-4 ) due to node availability ++ * Promote rsc1:1 ( Slave -> Master rhel7-3 ) ++ * Stop rsc1:4 ( Slave remote-rhel7-2 ) due to node availability ++ * Recover rsc1:5 ( Slave lxc2 ) ++ * Stop rsc2:0 ( Slave rhel7-4 ) due to node availability ++ * Promote rsc2:1 ( Slave -> Master rhel7-3 ) ++ * Stop rsc2:4 ( Master remote-rhel7-2 ) due to node availability ++ * Recover rsc2:5 ( Slave lxc2 ) ++ * Recover remote-rhel7-2 ( rhel7-1 ) ++ * Recover container2 ( rhel7-3 ) ++ * Recover lxc-ms:0 ( Master lxc2 ) ++ * Recover stateful-bundle-docker-0 ( rhel7-5 ) ++ * Restart stateful-bundle-0 ( rhel7-5 ) due to required stateful-bundle-docker-0 start ++ * Recover bundled:0 ( Master stateful-bundle-0 ) ++ * Move stateful-bundle-ip-192.168.122.133 ( rhel7-4 -> rhel7-3 ) ++ * Recover stateful-bundle-docker-2 ( rhel7-4 -> rhel7-3 ) ++ * Move stateful-bundle-2 ( rhel7-4 -> rhel7-3 ) ++ * Recover bundled:2 ( Slave stateful-bundle-2 ) ++ * Restart lxc2 ( rhel7-3 ) due to required container2 start ++ ++Executing cluster transition: ++ * Pseudo action: Fencing_stop_0 ++ * Resource action: rsc1 cancel=11000 on rhel7-3 ++ * Pseudo action: rsc1-clone_demote_0 ++ * Resource action: rsc2 cancel=11000 on rhel7-3 ++ * Pseudo action: rsc2-master_demote_0 ++ * Pseudo action: lxc-ms-master_demote_0 ++ * Resource action: stateful-bundle-0 stop on rhel7-5 ++ * Pseudo action: stateful-bundle-2_stop_0 ++ * Resource action: lxc2 stop on rhel7-3 ++ * Pseudo action: stateful-bundle_demote_0 ++ * Fencing remote-rhel7-2 (reboot) ++ * Fencing rhel7-4 (reboot) ++ * Pseudo action: rsc1_demote_0 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Pseudo action: rsc2_demote_0 ++ * Pseudo action: rsc2-master_demoted_0 ++ * Resource action: container2 stop on rhel7-3 ++ * Pseudo action: stateful-bundle-master_demote_0 ++ * Pseudo action: stonith-stateful-bundle-2-reboot on stateful-bundle-2 ++ * Pseudo action: stonith-lxc2-reboot on lxc2 ++ * Resource action: Fencing start on rhel7-5 ++ * Pseudo action: rsc1-clone_stop_0 ++ * Pseudo action: rsc2-master_stop_0 ++ * Pseudo action: lxc-ms_demote_0 ++ * Pseudo action: lxc-ms-master_demoted_0 ++ * Pseudo action: lxc-ms-master_stop_0 ++ * Pseudo action: bundled_demote_0 ++ * Pseudo action: stateful-bundle-master_demoted_0 ++ * Pseudo action: stateful-bundle_demoted_0 ++ * Pseudo action: stateful-bundle_stop_0 ++ * Resource action: Fencing monitor=120000 on rhel7-5 ++ * Pseudo action: rsc1_stop_0 ++ * Pseudo action: rsc1_stop_0 ++ * Pseudo action: rsc1_stop_0 ++ * Pseudo action: rsc1-clone_stopped_0 ++ * Pseudo action: rsc1-clone_start_0 ++ * Pseudo action: rsc2_stop_0 ++ * Pseudo action: rsc2_stop_0 ++ * Pseudo action: rsc2_stop_0 ++ * Pseudo action: rsc2-master_stopped_0 ++ * Pseudo action: rsc2-master_start_0 ++ * Resource action: remote-rhel7-2 stop on rhel7-1 ++ * Pseudo action: lxc-ms_stop_0 ++ * Pseudo action: lxc-ms-master_stopped_0 ++ * Pseudo action: lxc-ms-master_start_0 ++ * Resource action: stateful-bundle-docker-0 stop on rhel7-5 ++ * Pseudo action: stateful-bundle-docker-2_stop_0 ++ * Pseudo action: stonith-stateful-bundle-0-reboot on stateful-bundle-0 ++ * Resource action: remote-rhel7-2 start on rhel7-1 ++ * Resource action: remote-rhel7-2 monitor=60000 on rhel7-1 ++ * Resource action: container2 start on rhel7-3 ++ * Resource action: container2 monitor=20000 on rhel7-3 ++ * Pseudo action: stateful-bundle-master_stop_0 ++ * Pseudo action: stateful-bundle-ip-192.168.122.133_stop_0 ++ * Resource action: lxc2 start on rhel7-3 ++ * Resource action: lxc2 monitor=30000 on rhel7-3 ++ * Resource action: rsc1 start on lxc2 ++ * Pseudo action: rsc1-clone_running_0 ++ * Resource action: rsc2 start on lxc2 ++ * Pseudo action: rsc2-master_running_0 ++ * Resource action: lxc-ms start on lxc2 ++ * Pseudo action: lxc-ms-master_running_0 ++ * Pseudo action: bundled_stop_0 ++ * Resource action: stateful-bundle-ip-192.168.122.133 start on rhel7-3 ++ * Resource action: rsc1 monitor=11000 on lxc2 ++ * Pseudo action: rsc1-clone_promote_0 ++ * Resource action: rsc2 monitor=11000 on lxc2 ++ * Pseudo action: rsc2-master_promote_0 ++ * Pseudo action: lxc-ms-master_promote_0 ++ * Pseudo action: bundled_stop_0 ++ * Pseudo action: stateful-bundle-master_stopped_0 ++ * Resource action: stateful-bundle-ip-192.168.122.133 monitor=60000 on rhel7-3 ++ * Pseudo action: stateful-bundle_stopped_0 ++ * Pseudo action: stateful-bundle_start_0 ++ * Resource action: rsc1 promote on rhel7-3 ++ * Pseudo action: rsc1-clone_promoted_0 ++ * Resource action: rsc2 promote on rhel7-3 ++ * Pseudo action: rsc2-master_promoted_0 ++ * Resource action: lxc-ms promote on lxc2 ++ * Pseudo action: lxc-ms-master_promoted_0 ++ * Pseudo action: stateful-bundle-master_start_0 ++ * Resource action: stateful-bundle-docker-0 start on rhel7-5 ++ * Resource action: stateful-bundle-docker-0 monitor=60000 on rhel7-5 ++ * Resource action: stateful-bundle-0 start on rhel7-5 ++ * Resource action: stateful-bundle-0 monitor=30000 on rhel7-5 ++ * Resource action: stateful-bundle-docker-2 start on rhel7-3 ++ * Resource action: stateful-bundle-2 start on rhel7-3 ++ * Resource action: rsc1 monitor=10000 on rhel7-3 ++ * Resource action: rsc2 monitor=10000 on rhel7-3 ++ * Resource action: lxc-ms monitor=10000 on lxc2 ++ * Resource action: bundled start on stateful-bundle-0 ++ * Resource action: bundled start on stateful-bundle-2 ++ * Pseudo action: stateful-bundle-master_running_0 ++ * Resource action: stateful-bundle-docker-2 monitor=60000 on rhel7-3 ++ * Resource action: stateful-bundle-2 monitor=30000 on rhel7-3 ++ * Pseudo action: stateful-bundle_running_0 ++ * Resource action: bundled monitor=11000 on stateful-bundle-2 ++ * Pseudo action: stateful-bundle_promote_0 ++ * Pseudo action: stateful-bundle-master_promote_0 ++ * Resource action: bundled promote on stateful-bundle-0 ++ * Pseudo action: stateful-bundle-master_promoted_0 ++ * Pseudo action: stateful-bundle_promoted_0 ++ * Resource action: bundled monitor=10000 on stateful-bundle-0 ++Using the original execution date of: 2020-06-16 19:23:21Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-3 rhel7-5 ] ++OFFLINE: [ rhel7-4 ] ++RemoteOnline: [ remote-rhel7-2 ] ++GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-5 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Masters: [ rhel7-3 ] ++ Slaves: [ lxc1 lxc2 rhel7-1 rhel7-5 ] ++ Stopped: [ remote-rhel7-2 rhel7-4 ] ++ Clone Set: rsc2-master [rsc2] (promotable) ++ Masters: [ rhel7-3 ] ++ Slaves: [ lxc1 lxc2 rhel7-1 rhel7-5 ] ++ Stopped: [ remote-rhel7-2 rhel7-4 ] ++ remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 ++ container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 ++ Clone Set: lxc-ms-master [lxc-ms] (promotable) ++ Masters: [ lxc2 ] ++ Slaves: [ lxc1 ] ++ Container bundle set: stateful-bundle [pcmktest:http] ++ stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): Master rhel7-5 ++ stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 ++ stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-3 ++ +diff --git a/cts/scheduler/on_fail_demote4.xml b/cts/scheduler/on_fail_demote4.xml +new file mode 100644 +index 0000000..eb4c4cc +--- /dev/null ++++ b/cts/scheduler/on_fail_demote4.xml +@@ -0,0 +1,625 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 204961e95d9de140d998d71a0e53b5b9baa5d39e Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 26 May 2020 18:04:32 -0500 +Subject: [PATCH 12/20] Doc: Pacemaker Explained: document new on-fail="demote" + option + +--- + doc/Pacemaker_Explained/en-US/Ch-Resources.txt | 36 ++++++++++++++++++++++++++ + 1 file changed, 36 insertions(+) + +diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +index d8e7115..9df9243 100644 +--- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt ++++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +@@ -676,6 +676,10 @@ a|The action to take if this action ever fails. Allowed values: + * +ignore:+ Pretend the resource did not fail. + * +block:+ Don't perform any further operations on the resource. + * +stop:+ Stop the resource and do not start it elsewhere. ++* +demote:+ Demote the resource, without a full restart. This is valid only for ++ +promote+ actions, and for +monitor+ actions with both a nonzero +interval+ ++ and +role+ set to +Master+; for any other action, a configuration error will ++ be logged, and the default behavior will be used. + * +restart:+ Stop the resource and start it again (possibly on a different node). + * +fence:+ STONITH the node on which the resource failed. + * +standby:+ Move _all_ resources away from the node on which the resource failed. +@@ -714,6 +718,38 @@ indexterm:[Action,Property,on-fail] + + |========================================================= + ++[NOTE] ++==== ++When +on-fail+ is set to +demote+, recovery from failure by a successful demote ++causes the cluster to recalculate whether and where a new instance should be ++promoted. The node with the failure is eligible, so if master scores have not ++changed, it will be promoted again. ++ ++There is no direct equivalent of +migration-threshold+ for the master role, but ++the same effect can be achieved with a location constraint using a ++<> with a node attribute expression for the resource's fail ++count. ++ ++For example, to immediately ban the master role from a node with any failed ++promote or master monitor: ++[source,XML] ++---- ++ ++ ++ ++ ++ ++ ++---- ++ ++This example assumes that there is a promotable clone of the +my_primitive+ ++resource (note that the primitive name, not the clone name, is used in the ++rule), and that there is a recurring 10-second-interval monitor configured for ++the master role (fail count attributes specify the interval in milliseconds). ++==== ++ + [[s-resource-monitoring]] + === Monitoring Resources for Failure === + +-- +1.8.3.1 + + +From d4b9117e72b178bb6f4458cd89bee13060f78dcb Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 26 May 2020 18:10:33 -0500 +Subject: [PATCH 13/20] Doc: Pacemaker Explained: correct on-fail default + +--- + doc/Pacemaker_Explained/en-US/Ch-Resources.txt | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +index 9df9243..88892db 100644 +--- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt ++++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +@@ -669,8 +669,13 @@ XML attributes take precedence over +nvpair+ elements if both are specified. + indexterm:[Action,Property,timeout] + + |on-fail +-|restart '(except for +stop+ operations, which default to' fence 'when +- STONITH is enabled and' block 'otherwise)' ++a|Varies by action: ++ ++* +stop+: +fence+ if +stonith-enabled+ is true or +block+ otherwise ++* +demote+: +on-fail+ of the +monitor+ action with +role+ set to +Master+, if ++ present, enabled, and configured to a value other than +demote+, or +restart+ ++ otherwise ++* all other actions: +restart+ + a|The action to take if this action ever fails. Allowed values: + + * +ignore:+ Pretend the resource did not fail. +-- +1.8.3.1 + + +From 0b683445318c783ecef8d6f023b35a6c056ee321 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 15:05:56 -0500 +Subject: [PATCH 14/20] Refactor: scheduler: functionize checking quorum policy + in effect + +... for readability and ease of future changes +--- + lib/pengine/utils.c | 18 ++++++++++++++---- + 1 file changed, 14 insertions(+), 4 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index fee9efb..5d6b836 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -481,6 +481,17 @@ sort_rsc_priority(gconstpointer a, gconstpointer b) + return 0; + } + ++static enum pe_quorum_policy ++effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) ++{ ++ enum pe_quorum_policy policy = data_set->no_quorum_policy; ++ ++ if (is_set(data_set->flags, pe_flag_have_quorum)) { ++ policy = no_quorum_ignore; ++ } ++ return policy; ++} ++ + pe_action_t * + custom_action(pe_resource_t * rsc, char *key, const char *task, + pe_node_t * on_node, gboolean optional, gboolean save_action, +@@ -593,6 +604,7 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + + if (rsc != NULL) { + enum action_tasks a_task = text2task(action->task); ++ enum pe_quorum_policy quorum_policy = effective_quorum_policy(rsc, data_set); + int warn_level = LOG_TRACE; + + if (save_action) { +@@ -675,13 +687,11 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + crm_trace("Action %s requires only stonith", action->uuid); + action->runnable = TRUE; + #endif +- } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE +- && data_set->no_quorum_policy == no_quorum_stop) { ++ } else if (quorum_policy == no_quorum_stop) { + pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); + crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); + +- } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE +- && data_set->no_quorum_policy == no_quorum_freeze) { ++ } else if (quorum_policy == no_quorum_freeze) { + pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); + if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { + pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); +-- +1.8.3.1 + + +From b1ae359382f15e28e90d9144ca7b1d5f04820c10 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 15:06:32 -0500 +Subject: [PATCH 15/20] Feature: scheduler: support "demote" choice for + no-quorum-policy option + +If quorum is lost, promotable resources in the master role will be demoted but +left running, and all other resources will be stopped. +--- + daemons/controld/controld_control.c | 2 +- + include/crm/pengine/pe_types.h | 3 ++- + lib/common/options.c | 1 + + lib/pengine/common.c | 2 +- + lib/pengine/pe_output.c | 14 ++++++++++++++ + lib/pengine/unpack.c | 7 +++++++ + lib/pengine/utils.c | 14 ++++++++++++++ + 7 files changed, 40 insertions(+), 3 deletions(-) + +diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c +index 7d29205..059eb7b 100644 +--- a/daemons/controld/controld_control.c ++++ b/daemons/controld/controld_control.c +@@ -626,7 +626,7 @@ static pcmk__cluster_option_t crmd_opts[] = { + + // Already documented in libpe_status (other values must be kept identical) + { +- "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", ++ "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, demote, suicide", + "stop", pcmk__valid_quorum, NULL, NULL + }, + { +diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h +index ed5eb12..f3cb4ef 100644 +--- a/include/crm/pengine/pe_types.h ++++ b/include/crm/pengine/pe_types.h +@@ -61,7 +61,8 @@ enum pe_quorum_policy { + no_quorum_freeze, + no_quorum_stop, + no_quorum_ignore, +- no_quorum_suicide ++ no_quorum_suicide, ++ no_quorum_demote + }; + + enum node_type { +diff --git a/lib/common/options.c b/lib/common/options.c +index 9399642..9e041c9 100644 +--- a/lib/common/options.c ++++ b/lib/common/options.c +@@ -407,6 +407,7 @@ pcmk__valid_quorum(const char *value) + return safe_str_eq(value, "stop") + || safe_str_eq(value, "freeze") + || safe_str_eq(value, "ignore") ++ || safe_str_eq(value, "demote") + || safe_str_eq(value, "suicide"); + } + +diff --git a/lib/pengine/common.c b/lib/pengine/common.c +index f4f2106..37f287b 100644 +--- a/lib/pengine/common.c ++++ b/lib/pengine/common.c +@@ -54,7 +54,7 @@ static pcmk__cluster_option_t pe_opts[] = { + * long description + */ + { +- "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", ++ "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, demote, suicide", + "stop", pcmk__valid_quorum, + "What to do when the cluster does not have quorum", + NULL +diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c +index 75bf0d5..ad469ab 100644 +--- a/lib/pengine/pe_output.c ++++ b/lib/pengine/pe_output.c +@@ -729,6 +729,11 @@ pe__cluster_options_html(pcmk__output_t *out, va_list args) { + out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); + break; + ++ case no_quorum_demote: ++ out->list_item(out, NULL, "No quorum policy: Demote promotable " ++ "resources and stop all other resources"); ++ break; ++ + case no_quorum_ignore: + out->list_item(out, NULL, "No quorum policy: Ignore"); + break; +@@ -785,6 +790,11 @@ pe__cluster_options_text(pcmk__output_t *out, va_list args) { + out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); + break; + ++ case no_quorum_demote: ++ out->list_item(out, NULL, "No quorum policy: Demote promotable " ++ "resources and stop all other resources"); ++ break; ++ + case no_quorum_ignore: + out->list_item(out, NULL, "No quorum policy: Ignore"); + break; +@@ -817,6 +827,10 @@ pe__cluster_options_xml(pcmk__output_t *out, va_list args) { + xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "stop"); + break; + ++ case no_quorum_demote: ++ xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "demote"); ++ break; ++ + case no_quorum_ignore: + xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "ignore"); + break; +diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c +index a219805..a480680 100644 +--- a/lib/pengine/unpack.c ++++ b/lib/pengine/unpack.c +@@ -268,6 +268,9 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) + } else if (safe_str_eq(value, "freeze")) { + data_set->no_quorum_policy = no_quorum_freeze; + ++ } else if (safe_str_eq(value, "demote")) { ++ data_set->no_quorum_policy = no_quorum_demote; ++ + } else if (safe_str_eq(value, "suicide")) { + if (is_set(data_set->flags, pe_flag_stonith_enabled)) { + int do_panic = 0; +@@ -297,6 +300,10 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) + case no_quorum_stop: + crm_debug("On loss of quorum: Stop ALL resources"); + break; ++ case no_quorum_demote: ++ crm_debug("On loss of quorum: " ++ "Demote promotable resources and stop other resources"); ++ break; + case no_quorum_suicide: + crm_notice("On loss of quorum: Fence all remaining nodes"); + break; +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 5d6b836..f8b631a 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -488,6 +488,20 @@ effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) + + if (is_set(data_set->flags, pe_flag_have_quorum)) { + policy = no_quorum_ignore; ++ ++ } else if (data_set->no_quorum_policy == no_quorum_demote) { ++ switch (rsc->role) { ++ case RSC_ROLE_MASTER: ++ case RSC_ROLE_SLAVE: ++ if (rsc->next_role > RSC_ROLE_SLAVE) { ++ rsc->next_role = RSC_ROLE_SLAVE; ++ } ++ policy = no_quorum_ignore; ++ break; ++ default: ++ policy = no_quorum_stop; ++ break; ++ } + } + return policy; + } +-- +1.8.3.1 + + +From 5d809e136f2927259ad570e409e3bbb68f7ce7b4 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 17 Jun 2020 12:29:50 -0500 +Subject: [PATCH 16/20] Test: scheduler: add regression test for + no-quorum-policy="demote" + +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/no_quorum_demote.dot | 22 ++++ + cts/scheduler/no_quorum_demote.exp | 81 ++++++++++++ + cts/scheduler/no_quorum_demote.scores | 72 +++++++++++ + cts/scheduler/no_quorum_demote.summary | 38 ++++++ + cts/scheduler/no_quorum_demote.xml | 224 +++++++++++++++++++++++++++++++++ + 6 files changed, 438 insertions(+) + create mode 100644 cts/scheduler/no_quorum_demote.dot + create mode 100644 cts/scheduler/no_quorum_demote.exp + create mode 100644 cts/scheduler/no_quorum_demote.scores + create mode 100644 cts/scheduler/no_quorum_demote.summary + create mode 100644 cts/scheduler/no_quorum_demote.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index 0e68e73..9e34379 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -482,6 +482,7 @@ TESTS = [ + [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], + [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], + [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], ++ [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ], + ], + [ + [ "history-1", "Correctly parse stateful-1 resource state" ], +diff --git a/cts/scheduler/no_quorum_demote.dot b/cts/scheduler/no_quorum_demote.dot +new file mode 100644 +index 0000000..ea5b30c +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.dot +@@ -0,0 +1,22 @@ ++ digraph "g" { ++"Cancel rsc1_monitor_10000 rhel7-1" -> "rsc1_demote_0 rhel7-1" [ style = bold] ++"Cancel rsc1_monitor_10000 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"Fencing_monitor_120000 rhel7-1" [ style=dashed color="red" fontcolor="black"] ++"Fencing_start_0 rhel7-1" -> "Fencing_monitor_120000 rhel7-1" [ style = dashed] ++"Fencing_start_0 rhel7-1" [ style=dashed color="red" fontcolor="black"] ++"Fencing_stop_0 rhel7-1" -> "Fencing_start_0 rhel7-1" [ style = dashed] ++"Fencing_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-1" [ style = bold] ++"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"rsc1_demote_0 rhel7-1" -> "rsc1-clone_demoted_0" [ style = bold] ++"rsc1_demote_0 rhel7-1" -> "rsc1_monitor_11000 rhel7-1" [ style = bold] ++"rsc1_demote_0 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"rsc1_monitor_11000 rhel7-1" [ style=bold color="green" fontcolor="black"] ++"rsc2_monitor_10000 rhel7-2" [ style=dashed color="red" fontcolor="black"] ++"rsc2_start_0 rhel7-2" -> "rsc2_monitor_10000 rhel7-2" [ style = dashed] ++"rsc2_start_0 rhel7-2" [ style=dashed color="red" fontcolor="black"] ++"rsc2_stop_0 rhel7-2" -> "rsc2_start_0 rhel7-2" [ style = dashed] ++"rsc2_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/cts/scheduler/no_quorum_demote.exp b/cts/scheduler/no_quorum_demote.exp +new file mode 100644 +index 0000000..245574c +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.exp +@@ -0,0 +1,81 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/cts/scheduler/no_quorum_demote.scores b/cts/scheduler/no_quorum_demote.scores +new file mode 100644 +index 0000000..dddc57b +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.scores +@@ -0,0 +1,72 @@ ++Allocation scores: ++Using the original execution date of: 2020-06-17 17:26:35Z ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 11 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 6 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 10 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 5 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 10 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 5 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 10 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 5 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 ++pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 ++pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: 11 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 6 ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: -INFINITY ++pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY ++pcmk__native_allocate: rsc2 allocation score on rhel7-1: 0 ++pcmk__native_allocate: rsc2 allocation score on rhel7-2: 0 ++pcmk__native_allocate: rsc2 allocation score on rhel7-3: 0 ++pcmk__native_allocate: rsc2 allocation score on rhel7-4: 0 ++pcmk__native_allocate: rsc2 allocation score on rhel7-5: 0 ++rsc1:0 promotion score on rhel7-1: 10 ++rsc1:1 promotion score on rhel7-2: 5 ++rsc1:2 promotion score on none: 0 ++rsc1:3 promotion score on none: 0 ++rsc1:4 promotion score on none: 0 +diff --git a/cts/scheduler/no_quorum_demote.summary b/cts/scheduler/no_quorum_demote.summary +new file mode 100644 +index 0000000..9b69ca1 +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.summary +@@ -0,0 +1,38 @@ ++Using the original execution date of: 2020-06-17 17:26:35Z ++ ++Current cluster status: ++Online: [ rhel7-1 rhel7-2 ] ++OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Started rhel7-1 ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Masters: [ rhel7-1 ] ++ Slaves: [ rhel7-2 ] ++ Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] ++ rsc2 (ocf::pacemaker:Dummy): Started rhel7-2 ++ ++Transition Summary: ++ * Stop Fencing ( rhel7-1 ) due to no quorum ++ * Demote rsc1:0 ( Master -> Slave rhel7-1 ) ++ * Stop rsc2 ( rhel7-2 ) due to no quorum ++ ++Executing cluster transition: ++ * Resource action: Fencing stop on rhel7-1 ++ * Resource action: rsc1 cancel=10000 on rhel7-1 ++ * Pseudo action: rsc1-clone_demote_0 ++ * Resource action: rsc2 stop on rhel7-2 ++ * Resource action: rsc1 demote on rhel7-1 ++ * Pseudo action: rsc1-clone_demoted_0 ++ * Resource action: rsc1 monitor=11000 on rhel7-1 ++Using the original execution date of: 2020-06-17 17:26:35Z ++ ++Revised cluster status: ++Online: [ rhel7-1 rhel7-2 ] ++OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] ++ ++ Fencing (stonith:fence_xvm): Stopped ++ Clone Set: rsc1-clone [rsc1] (promotable) ++ Slaves: [ rhel7-1 rhel7-2 ] ++ Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] ++ rsc2 (ocf::pacemaker:Dummy): Stopped ++ +diff --git a/cts/scheduler/no_quorum_demote.xml b/cts/scheduler/no_quorum_demote.xml +new file mode 100644 +index 0000000..8497f0a +--- /dev/null ++++ b/cts/scheduler/no_quorum_demote.xml +@@ -0,0 +1,224 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 015b5c012ce41a8035260522f67127135937baa2 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 28 May 2020 12:13:20 -0500 +Subject: [PATCH 17/20] Doc: Pacemaker Explained: document + no-quorum-policy=demote + +--- + doc/Pacemaker_Explained/en-US/Ch-Options.txt | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt +index faefe7c..b158f00 100644 +--- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt ++++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt +@@ -181,6 +181,8 @@ What to do when the cluster does not have quorum. Allowed values: + * +ignore:+ continue all resource management + * +freeze:+ continue resource management, but don't recover resources from nodes not in the affected partition + * +stop:+ stop all resources in the affected cluster partition ++* +demote:+ demote promotable resources and stop all other resources in the ++ affected cluster partition + * +suicide:+ fence all nodes in the affected cluster partition + + | batch-limit | 0 | +-- +1.8.3.1 + + +From 01c5ec67e0a6ee1395d771f8fbaf619a44ab2ca2 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 2 Jun 2020 19:23:11 -0500 +Subject: [PATCH 18/20] Low: scheduler: match initial no-quorum-policy struct + value to actual default + +It doesn't matter in practice since the actual default is parsed from the +option definition via pe_pref(), but it's confusing to have them different. +--- + lib/pengine/status.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/pengine/status.c b/lib/pengine/status.c +index 8dc5095..ca34639 100644 +--- a/lib/pengine/status.c ++++ b/lib/pengine/status.c +@@ -360,7 +360,7 @@ set_working_set_defaults(pe_working_set_t * data_set) + + data_set->order_id = 1; + data_set->action_id = 1; +- data_set->no_quorum_policy = no_quorum_freeze; ++ data_set->no_quorum_policy = no_quorum_stop; + + data_set->flags = 0x0ULL; + set_bit(data_set->flags, pe_flag_stop_rsc_orphans); +-- +1.8.3.1 + + +From 7eec572dbba3ade059e5206a2ba496f9da3a68bc Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 5 Jun 2020 10:02:05 -0500 +Subject: [PATCH 19/20] Build: libcrmcommon: bump CRM feature set + +... for op_expression/rsc_expression rules, on-fail=demote, and +no-quorum-policy=demote +--- + include/crm/crm.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/crm/crm.h b/include/crm/crm.h +index d2ffb61..dc2adc1 100644 +--- a/include/crm/crm.h ++++ b/include/crm/crm.h +@@ -51,7 +51,7 @@ extern "C" { + * >=3.0.13: Fail counts include operation name and interval + * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED + */ +-# define CRM_FEATURE_SET "3.3.0" ++# define CRM_FEATURE_SET "3.4.0" + + # define EOS '\0' + # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) +-- +1.8.3.1 + + +From c4429d86ef00bb1749adc476f9c6874e3f5d95b9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 16 Jun 2020 14:38:35 -0500 +Subject: [PATCH 20/20] Log: scheduler: downgrade "active on" messages to trace + +... now that they're logged more often via pcmk__rsc_is_filtered() +--- + lib/pengine/native.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/lib/pengine/native.c b/lib/pengine/native.c +index f0d83d7..20658a0 100644 +--- a/lib/pengine/native.c ++++ b/lib/pengine/native.c +@@ -359,22 +359,22 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c + gboolean + native_active(pe_resource_t * rsc, gboolean all) + { +- GListPtr gIter = rsc->running_on; +- +- for (; gIter != NULL; gIter = gIter->next) { ++ for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { + pe_node_t *a_node = (pe_node_t *) gIter->data; + + if (a_node->details->unclean) { +- crm_debug("Resource %s: node %s is unclean", rsc->id, a_node->details->uname); ++ pe_rsc_trace(rsc, "Resource %s: node %s is unclean", ++ rsc->id, a_node->details->uname); + return TRUE; + } else if (a_node->details->online == FALSE) { +- crm_debug("Resource %s: node %s is offline", rsc->id, a_node->details->uname); ++ pe_rsc_trace(rsc, "Resource %s: node %s is offline", ++ rsc->id, a_node->details->uname); + } else { +- crm_debug("Resource %s active on %s", rsc->id, a_node->details->uname); ++ pe_rsc_trace(rsc, "Resource %s active on %s", ++ rsc->id, a_node->details->uname); + return TRUE; + } + } +- + return FALSE; + } + +-- +1.8.3.1 + diff --git a/SOURCES/002-status-deletion.patch b/SOURCES/002-status-deletion.patch deleted file mode 100644 index 1a31cdc..0000000 --- a/SOURCES/002-status-deletion.patch +++ /dev/null @@ -1,2064 +0,0 @@ -From 9e4addbcb67ea8e36ba853f1e401d8a6cb6a0aa3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 20 Dec 2019 11:34:06 -0600 -Subject: [PATCH 1/8] Refactor: scheduler: reduce code duplication when - displaying resources - -Refactor native_output_string() to use GString, for readability and -maintainability. Refactor common_print() to use it, to reduce duplication and -ensure displays are consistent. - -This makes a couple small changes in how things are shown: - -* If pe_print_dev is enabled (a debugging flag not actually used by anything), - the additional resource fields are shown with the resource flags rather than - their own parenthesized list. - -* The new output model is now consistent with the legacy print model in - displaying resource flags with commas (not spaces) between them. ---- - include/crm/pengine/common.h | 24 +-- - lib/pengine/native.c | 410 +++++++++++++++++-------------------------- - 2 files changed, 168 insertions(+), 266 deletions(-) - -diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h -index e497f9c..48c2b66 100644 ---- a/include/crm/pengine/common.h -+++ b/include/crm/pengine/common.h -@@ -1,22 +1,12 @@ --/* -- * Copyright 2004-2018 the Pacemaker project contributors -+/* -+ * Copyright 2004-2019 the Pacemaker project contributors - * - * The version control history for this file may have further details. -- * -- * This program is free software; you can redistribute it and/or -- * modify it under the terms of the GNU Lesser General Public -- * License as published by the Free Software Foundation; either -- * version 2 of the License, or (at your option) any later version. -- * -- * This software is distributed in the hope that it will be useful, -- * but WITHOUT ANY WARRANTY; without even the implied warranty of -- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- * General Public License for more details. -- * -- * You should have received a copy of the GNU Lesser General Public -- * License along with this library; if not, write to the Free Software -- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ * -+ * This source code is licensed under the GNU Lesser General Public License -+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ -+ - #ifndef PE_COMMON__H - # define PE_COMMON__H - -@@ -104,7 +94,7 @@ enum pe_print_options { - pe_print_html = 0x0002, - pe_print_ncurses = 0x0004, - pe_print_printf = 0x0008, -- pe_print_dev = 0x0010, -+ pe_print_dev = 0x0010, // Debugging (@COMPAT probably not useful) - pe_print_details = 0x0020, - pe_print_max_details = 0x0040, - pe_print_rsconly = 0x0080, -diff --git a/lib/pengine/native.c b/lib/pengine/native.c -index fdb98e0..8fd98bc 100644 ---- a/lib/pengine/native.c -+++ b/lib/pengine/native.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -490,165 +490,172 @@ native_print_xml(resource_t * rsc, const char *pre_text, long options, void *pri - } - } - --/* making this inline rather than a macro prevents a coverity "unreachable" -- * warning on the first usage -- */ --static inline const char * --comma_if(int i) -+// Append a flag to resource description string's flags list -+static bool -+add_output_flag(GString *s, const char *flag_desc, bool have_flags) - { -- return i? ", " : ""; -+ g_string_append(s, (have_flags? ", " : " (")); -+ g_string_append(s, flag_desc); -+ return true; - } - --static char * --flags_string(pe_resource_t *rsc, pe_node_t *node, long options, -- const char *target_role) -+// Append a node name to resource description string's node list -+static bool -+add_output_node(GString *s, const char *node, bool have_nodes) - { -- char *flags[6] = { NULL, }; -- char *result = NULL; -- int ndx = 0; -+ g_string_append(s, (have_nodes? " " : " [ ")); -+ g_string_append(s, node); -+ return true; -+} -+ -+/*! -+ * \internal -+ * \brief Create a string description of a resource -+ * -+ * \param[in] rsc Resource to describe -+ * \param[in] name Desired identifier for the resource -+ * \param[in] node If not NULL, node that resource is "on" -+ * \param[in] options Bitmask of pe_print_* -+ * \param[in] target_role Resource's target role -+ * \param[in] show_nodes Whether to display nodes when multiply active -+ * -+ * \return Newly allocated string description of resource -+ * \note Caller must free the result with g_free(). -+ */ -+static gchar * -+native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, -+ long options, const char *target_role, bool show_nodes) -+{ -+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -+ const char *provider = NULL; -+ const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); -+ char *retval = NULL; -+ GString *outstr = NULL; -+ bool have_flags = false; -+ -+ CRM_CHECK(name != NULL, name = "unknown"); -+ CRM_CHECK(kind != NULL, kind = "unknown"); -+ CRM_CHECK(class != NULL, class = "unknown"); -+ -+ if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { -+ provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); -+ } - -- if (node && node->details->online == FALSE && node->details->unclean) { -- flags[ndx++] = strdup("UNCLEAN"); -+ if (is_set(options, pe_print_rsconly) -+ || pcmk__list_of_multiple(rsc->running_on)) { -+ node = NULL; - } - -+ // We need a string of at least this size -+ outstr = g_string_sized_new(strlen(name) + strlen(class) + strlen(kind) -+ + (provider? (strlen(provider) + 2) : 0) -+ + (node? strlen(node->details->uname) + 1 : 0) -+ + 11); -+ -+ // Resource name and agent -+ g_string_printf(outstr, "%s\t(%s%s%s:%s):\t", name, class, -+ /* @COMPAT This should be a single ':' (see CLBZ#5395) but -+ * to avoid breaking anything relying on it, we're keeping -+ * it like this until the next minor version bump. -+ */ -+ (provider? "::" : ""), (provider? provider : ""), kind); -+ -+ // State on node -+ if (is_set(rsc->flags, pe_rsc_orphan)) { -+ g_string_append(outstr, " ORPHANED"); -+ } -+ if (is_set(rsc->flags, pe_rsc_failed)) { -+ enum rsc_role_e role = native_displayable_role(rsc); -+ -+ if (role > RSC_ROLE_SLAVE) { -+ g_string_append_printf(outstr, " FAILED %s", role2text(role)); -+ } else { -+ g_string_append(outstr, " FAILED"); -+ } -+ } else { -+ g_string_append(outstr, native_displayable_state(rsc, options)); -+ } -+ if (node) { -+ g_string_append_printf(outstr, " %s", node->details->uname); -+ } -+ -+ // Flags, as: ( [...]) -+ if (node && !(node->details->online) && node->details->unclean) { -+ have_flags = add_output_flag(outstr, "UNCLEAN", have_flags); -+ } - if (is_set(options, pe_print_pending)) { - const char *pending_task = native_pending_task(rsc); - - if (pending_task) { -- flags[ndx++] = strdup(pending_task); -+ have_flags = add_output_flag(outstr, pending_task, have_flags); - } - } -- - if (target_role) { - enum rsc_role_e target_role_e = text2role(target_role); - -- /* Ignore target role Started, as it is the default anyways -- * (and would also allow a Master to be Master). -- * Show if target role limits our abilities. */ -+ /* Only show target role if it limits our abilities (i.e. ignore -+ * Started, as it is the default anyways, and doesn't prevent the -+ * resource from becoming Master). -+ */ - if (target_role_e == RSC_ROLE_STOPPED) { -- flags[ndx++] = strdup("disabled"); -+ have_flags = add_output_flag(outstr, "disabled", have_flags); - - } else if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable) - && target_role_e == RSC_ROLE_SLAVE) { -- flags[ndx++] = crm_strdup_printf("target-role:%s", target_role); -+ have_flags = add_output_flag(outstr, "target-role:", have_flags); -+ g_string_append(outstr, target_role); - } - } -- - if (is_set(rsc->flags, pe_rsc_block)) { -- flags[ndx++] = strdup("blocked"); -- -+ have_flags = add_output_flag(outstr, "blocked", have_flags); - } else if (is_not_set(rsc->flags, pe_rsc_managed)) { -- flags[ndx++] = strdup("unmanaged"); -+ have_flags = add_output_flag(outstr, "unmanaged", have_flags); - } -- - if (is_set(rsc->flags, pe_rsc_failure_ignored)) { -- flags[ndx++] = strdup("failure ignored"); -+ have_flags = add_output_flag(outstr, "failure ignored", have_flags); - } -- -- if (ndx > 0) { -- char *total = g_strjoinv(" ", flags); -- -- result = crm_strdup_printf(" (%s)", total); -- g_free(total); -- } -- -- while (--ndx >= 0) { -- free(flags[ndx]); -- } -- return result; --} -- --static char * --native_output_string(resource_t *rsc, const char *name, node_t *node, long options, -- const char *target_role) { -- const char *desc = NULL; -- const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -- const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); -- enum rsc_role_e role = native_displayable_role(rsc); -- -- char *retval = NULL; -- -- char *unames = NULL; -- char *provider = NULL; -- const char *orphan = NULL; -- char *role_s = NULL; -- char *node_s = NULL; -- char *print_dev_s = NULL; -- char *flags_s = NULL; -- -- CRM_ASSERT(kind != NULL); -- -- if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { -- provider = crm_strdup_printf("::%s", crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER)); -+ if (is_set(options, pe_print_dev)) { -+ if (is_set(options, pe_rsc_provisional)) { -+ have_flags = add_output_flag(outstr, "provisional", have_flags); -+ } -+ if (is_not_set(options, pe_rsc_runnable)) { -+ have_flags = add_output_flag(outstr, "non-startable", have_flags); -+ } -+ have_flags = add_output_flag(outstr, "variant:", have_flags); -+ g_string_append_printf(outstr, "%s priority:%f", -+ crm_element_name(rsc->xml), -+ (double) (rsc->priority)); - } -- -- if (is_set(rsc->flags, pe_rsc_orphan)) { -- orphan = " ORPHANED"; -+ if (have_flags) { -+ g_string_append(outstr, ")"); - } - -- if (role > RSC_ROLE_SLAVE && is_set(rsc->flags, pe_rsc_failed)) { -- role_s = crm_strdup_printf(" FAILED %s", role2text(role)); -- } else if (is_set(rsc->flags, pe_rsc_failed)) { -- role_s = crm_strdup_printf(" FAILED"); -- } else { -- role_s = crm_strdup_printf(" %s", native_displayable_state(rsc, options)); -- } -+ // User-supplied description -+ if (is_set(options, pe_print_rsconly) -+ || pcmk__list_of_multiple(rsc->running_on)) { -+ const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC); - -- if (node) { -- node_s = crm_strdup_printf(" %s", node->details->uname); -+ if (desc) { -+ g_string_append_printf(outstr, " %s", desc); -+ } - } - -- if (is_set(options, pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { -- desc = crm_element_value(rsc->xml, XML_ATTR_DESC); -- } -+ if (show_nodes && is_not_set(options, pe_print_rsconly) -+ && pcmk__list_of_multiple(rsc->running_on)) { -+ bool have_nodes = false; - -- if (is_not_set(options, pe_print_rsconly) && g_list_length(rsc->running_on) > 1) { -- GListPtr gIter = rsc->running_on; -- gchar **arr = calloc(g_list_length(rsc->running_on)+1, sizeof(gchar *)); -- int i = 0; -- char *total = NULL; -+ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { -+ pe_node_t *n = (pe_node_t *) iter->data; - -- for (; gIter != NULL; gIter = gIter->next) { -- node_t *n = (node_t *) gIter->data; -- arr[i] = (gchar *) strdup(n->details->uname); -- i++; -+ have_nodes = add_output_node(outstr, n->details->uname, have_nodes); -+ } -+ if (have_nodes) { -+ g_string_append(outstr, " ]"); - } -- -- total = g_strjoinv(" ", arr); -- unames = crm_strdup_printf(" [ %s ]", total); -- -- g_free(total); -- g_strfreev(arr); - } - -- if (is_set(options, pe_print_dev)) { -- print_dev_s = crm_strdup_printf(" (%s%svariant=%s, priority=%f)", -- is_set(rsc->flags, pe_rsc_provisional) ? "provisional, " : "", -- is_set(rsc->flags, pe_rsc_runnable) ? "" : "non-startable, ", -- crm_element_name(rsc->xml), (double)rsc->priority); -- } -- -- flags_s = flags_string(rsc, node, options, target_role); -- -- retval = crm_strdup_printf("%s\t(%s%s:%s):\t%s%s%s%s%s%s%s%s", -- name, class, -- provider ? provider : "", -- kind, -- orphan ? orphan : "", -- role_s, -- node_s ? node_s : "", -- print_dev_s ? print_dev_s : "", -- flags_s ? flags_s : "", -- desc ? " " : "", desc ? desc : "", -- unames ? unames : ""); -- -- free(provider); -- free(role_s); -- free(node_s); -- free(unames); -- free(print_dev_s); -- free(flags_s); -- -+ retval = outstr->str; -+ g_string_free(outstr, FALSE); - return retval; - } - -@@ -656,7 +663,6 @@ void - pe__common_output_html(pcmk__output_t *out, resource_t * rsc, - const char *name, node_t *node, long options) - { -- char *s = NULL; - const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); - const char *target_role = NULL; - -@@ -675,10 +681,6 @@ pe__common_output_html(pcmk__output_t *out, resource_t * rsc, - target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); - } - -- if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { -- node = NULL; -- } -- - if (is_not_set(rsc->flags, pe_rsc_managed)) { - cl = "rsc-managed"; - -@@ -698,10 +700,14 @@ pe__common_output_html(pcmk__output_t *out, resource_t * rsc, - cl = "rsc-ok"; - } - -- s = native_output_string(rsc, name, node, options, target_role); -- list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL); -- pcmk_create_html_node(list_node, "span", NULL, cl, s); -- free(s); -+ { -+ gchar *s = native_output_string(rsc, name, node, options, target_role, -+ true); -+ -+ list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL); -+ pcmk_create_html_node(list_node, "span", NULL, cl, s); -+ g_free(s); -+ } - - if (is_set(options, pe_print_details)) { - GHashTableIter iter; -@@ -744,7 +750,6 @@ void - pe__common_output_text(pcmk__output_t *out, resource_t * rsc, - const char *name, node_t *node, long options) - { -- char *s = NULL; - const char *target_role = NULL; - - CRM_ASSERT(rsc->variant == pe_native); -@@ -758,13 +763,13 @@ pe__common_output_text(pcmk__output_t *out, resource_t * rsc, - target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); - } - -- if (is_set(options, pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { -- node = NULL; -- } -+ { -+ gchar *s = native_output_string(rsc, name, node, options, target_role, -+ true); - -- s = native_output_string(rsc, name, node, options, target_role); -- out->list_item(out, NULL, "%s", s); -- free(s); -+ out->list_item(out, NULL, "%s", s); -+ g_free(s); -+ } - - if (is_set(options, pe_print_details)) { - GHashTableIter iter; -@@ -806,22 +811,14 @@ pe__common_output_text(pcmk__output_t *out, resource_t * rsc, - void - common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data) - { -- const char *desc = NULL; -- const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -- const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); - const char *target_role = NULL; -- enum rsc_role_e role = native_displayable_role(rsc); -- -- int offset = 0; -- int flagOffset = 0; -- char buffer[LINE_MAX]; -- char flagBuffer[LINE_MAX]; - - CRM_ASSERT(rsc->variant == pe_native); -- CRM_ASSERT(kind != NULL); - - if (rsc->meta) { -- const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); -+ const char *is_internal = g_hash_table_lookup(rsc->meta, -+ XML_RSC_ATTR_INTERNAL_RSC); -+ - if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) { - crm_trace("skipping print of internal resource %s", rsc->id); - return; -@@ -829,17 +826,13 @@ common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *n - target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); - } - -- if (pre_text == NULL && (options & pe_print_printf)) { -- pre_text = " "; -- } -- - if (options & pe_print_xml) { - native_print_xml(rsc, pre_text, options, print_data); - return; - } - -- if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { -- node = NULL; -+ if ((pre_text == NULL) && (options & pe_print_printf)) { -+ pre_text = " "; - } - - if (options & pe_print_html) { -@@ -849,10 +842,10 @@ common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *n - } else if (is_set(rsc->flags, pe_rsc_failed)) { - status_print(""); - -- } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) { -+ } else if (rsc->running_on == NULL) { - status_print(""); - -- } else if (g_list_length(rsc->running_on) > 1) { -+ } else if (pcmk__list_of_multiple(rsc->running_on)) { - status_print(""); - - } else if (is_set(rsc->flags, pe_rsc_failure_ignored)) { -@@ -863,106 +856,29 @@ common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *n - } - } - -- if(pre_text) { -- offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", pre_text); -- } -- offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", name); -- offset += snprintf(buffer + offset, LINE_MAX - offset, "\t(%s", class); -- if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { -- const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); -- offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov); -- } -- offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s):\t", kind); -- if(is_set(rsc->flags, pe_rsc_orphan)) { -- offset += snprintf(buffer + offset, LINE_MAX - offset, " ORPHANED "); -- } -- if(role > RSC_ROLE_SLAVE && is_set(rsc->flags, pe_rsc_failed)) { -- offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED %s", role2text(role)); -- } else if(is_set(rsc->flags, pe_rsc_failed)) { -- offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED"); -- } else { -- const char *rsc_state = native_displayable_state(rsc, options); -- -- offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_state); -- } -- -- if(node) { -- offset += snprintf(buffer + offset, LINE_MAX - offset, " %s", node->details->uname); -- -- if (node->details->online == FALSE && node->details->unclean) { -- flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, -- "%sUNCLEAN", comma_if(flagOffset)); -- } -- } -- -- if (options & pe_print_pending) { -- const char *pending_task = native_pending_task(rsc); -- -- if (pending_task) { -- flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, -- "%s%s", comma_if(flagOffset), pending_task); -- } -- } -- -- if (target_role) { -- enum rsc_role_e target_role_e = text2role(target_role); -- -- /* Ignore target role Started, as it is the default anyways -- * (and would also allow a Master to be Master). -- * Show if target role limits our abilities. */ -- if (target_role_e == RSC_ROLE_STOPPED) { -- flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, -- "%sdisabled", comma_if(flagOffset)); -- -- } else if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable) -- && target_role_e == RSC_ROLE_SLAVE) { -- flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, -- "%starget-role:%s", comma_if(flagOffset), target_role); -- } -- } -- -- if (is_set(rsc->flags, pe_rsc_block)) { -- flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, -- "%sblocked", comma_if(flagOffset)); -- -- } else if (is_not_set(rsc->flags, pe_rsc_managed)) { -- flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, -- "%sunmanaged", comma_if(flagOffset)); -- } -- -- if(is_set(rsc->flags, pe_rsc_failure_ignored)) { -- flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, -- "%sfailure ignored", comma_if(flagOffset)); -- } -- -- if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { -- desc = crm_element_value(rsc->xml, XML_ATTR_DESC); -- } -- -- CRM_LOG_ASSERT(offset > 0); -- if(flagOffset > 0) { -- status_print("%s (%s)%s%s", buffer, flagBuffer, desc?" ":"", desc?desc:""); -- } else { -- status_print("%s%s%s", buffer, desc?" ":"", desc?desc:""); -+ { -+ gchar *resource_s = native_output_string(rsc, name, node, options, -+ target_role, false); -+ status_print("%s%s", (pre_text? pre_text : ""), resource_s); -+ g_free(resource_s); - } - - #if CURSES_ENABLED -- if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { -- /* Done */ -- -- } else if (options & pe_print_ncurses) { -+ if (is_set(options, pe_print_ncurses) -+ && is_not_set(options, pe_print_rsconly) -+ && !pcmk__list_of_multiple(rsc->running_on)) { - /* coverity[negative_returns] False positive */ - move(-1, 0); - } - #endif - -- if (options & pe_print_html) { -+ if (is_set(options, pe_print_html)) { - status_print(" "); - } - -- if ((options & pe_print_rsconly)) { -+ if (is_not_set(options, pe_print_rsconly) -+ && pcmk__list_of_multiple(rsc->running_on)) { - -- } else if (g_list_length(rsc->running_on) > 1) { - GListPtr gIter = rsc->running_on; - int counter = 0; - -@@ -1025,10 +941,6 @@ common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *n - GHashTableIter iter; - node_t *n = NULL; - -- status_print("%s\t(%s%svariant=%s, priority=%f)", pre_text, -- is_set(rsc->flags, pe_rsc_provisional) ? "provisional, " : "", -- is_set(rsc->flags, pe_rsc_runnable) ? "" : "non-startable, ", -- crm_element_name(rsc->xml), (double)rsc->priority); - status_print("%s\tAllowed Nodes", pre_text); - g_hash_table_iter_init(&iter, rsc->allowed_nodes); - while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { --- -1.8.3.1 - - -From 41e911be8ea9151b3f0758c2c22c0e69b8b78d93 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 19 Dec 2019 17:18:41 -0600 -Subject: [PATCH 2/8] Log: scheduler: drop redundant trace messages - -We logged "applying placement constraints" three times. ---- - lib/pacemaker/pcmk_sched_allocate.c | 17 ++++------------- - 1 file changed, 4 insertions(+), 13 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c -index ca43c71..dde8b69 100644 ---- a/lib/pacemaker/pcmk_sched_allocate.c -+++ b/lib/pacemaker/pcmk_sched_allocate.c -@@ -623,21 +623,15 @@ check_actions(pe_working_set_t * data_set) - } - } - --static gboolean -+static void - apply_placement_constraints(pe_working_set_t * data_set) - { -- GListPtr gIter = NULL; -- -- crm_trace("Applying constraints..."); -- -- for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { -+ for (GList *gIter = data_set->placement_constraints; -+ gIter != NULL; gIter = gIter->next) { - pe__location_t *cons = gIter->data; - - cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons); - } -- -- return TRUE; -- - } - - static gboolean -@@ -994,10 +988,7 @@ stage2(pe_working_set_t * data_set) - { - GListPtr gIter = NULL; - -- crm_trace("Applying placement constraints"); -- -- gIter = data_set->nodes; -- for (; gIter != NULL; gIter = gIter->next) { -+ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { - node_t *node = (node_t *) gIter->data; - - if (node == NULL) { --- -1.8.3.1 - - -From 7fe136e19b5018d609beb8bad4e34234739572c9 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Sat, 7 Dec 2019 12:13:11 -0600 -Subject: [PATCH 3/8] Refactor: libcrmcommon: convenience functions for list - length comparisons - -... for efficiency and readability ---- - include/crm/common/internal.h | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - -diff --git a/include/crm/common/internal.h b/include/crm/common/internal.h -index da2c7d7..484c836 100644 ---- a/include/crm/common/internal.h -+++ b/include/crm/common/internal.h -@@ -126,6 +126,20 @@ crm_getpid_s() - return crm_strdup_printf("%lu", (unsigned long) getpid()); - } - -+// More efficient than g_list_length(list) == 1 -+static inline bool -+pcmk__list_of_1(GList *list) -+{ -+ return list && (list->next == NULL); -+} -+ -+// More efficient than g_list_length(list) > 1 -+static inline bool -+pcmk__list_of_multiple(GList *list) -+{ -+ return list && (list->next != NULL); -+} -+ - /* convenience functions for failure-related node attributes */ - - #define CRM_FAIL_COUNT_PREFIX "fail-count" --- -1.8.3.1 - - -From 9ff4f6bca540576f0a3333c959e8014ed168353f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 16 Dec 2019 14:13:30 -0600 -Subject: [PATCH 4/8] Refactor: libcrmcommon: add convenience macros for - plurals - -I've avoided making s_if_plural() an official API due to its hackiness, but -it really is the best solution for now. Promote it to pcmk__plural_s(), along -with a companion macro pcmk__plural_alt() for more complicated plurals. ---- - include/crm/common/internal.h | 23 +++++++++++++++++++++++ - 1 file changed, 23 insertions(+) - -diff --git a/include/crm/common/internal.h b/include/crm/common/internal.h -index 484c836..ee560c9 100644 ---- a/include/crm/common/internal.h -+++ b/include/crm/common/internal.h -@@ -107,6 +107,29 @@ bool crm_compress_string(const char *data, int length, int max, char **result, - unsigned int *result_len); - gint crm_alpha_sort(gconstpointer a, gconstpointer b); - -+/* Correctly displaying singular or plural is complicated; consider "1 node has" -+ * vs. "2 nodes have". A flexible solution is to pluralize entire strings, e.g. -+ * -+ * if (a == 1) { -+ * crm_info("singular message"): -+ * } else { -+ * crm_info("plural message"); -+ * } -+ * -+ * though even that's not sufficient for all languages besides English (if we -+ * ever desire to do translations of output and log messages). But the following -+ * convenience macros are "good enough" and more concise for many cases. -+ */ -+ -+/* Example: -+ * crm_info("Found %d %s", nentries, -+ * pcmk__plural_alt(nentries, "entry", "entries")); -+ */ -+#define pcmk__plural_alt(i, s1, s2) (((i) == 1)? (s1) : (s2)) -+ -+// Example: crm_info("Found %d node%s", nnodes, pcmk__plural_s(nnodes)); -+#define pcmk__plural_s(i) pcmk__plural_alt(i, "", "s") -+ - static inline char * - crm_concat(const char *prefix, const char *suffix, char join) - { --- -1.8.3.1 - - -From 0378db5030400202e59b2bae0dabd65d00a3e9c8 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 12 Dec 2019 20:50:50 -0600 -Subject: [PATCH 5/8] Log: controller: improve join messages - ---- - daemons/controld/controld_fsa.c | 81 ++++---- - daemons/controld/controld_join_dc.c | 383 +++++++++++++++++++++--------------- - 2 files changed, 268 insertions(+), 196 deletions(-) - -diff --git a/daemons/controld/controld_fsa.c b/daemons/controld/controld_fsa.c -index 6760224..b985fa9 100644 ---- a/daemons/controld/controld_fsa.c -+++ b/daemons/controld/controld_fsa.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -460,12 +460,53 @@ log_fsa_input(fsa_data_t * stored_msg) - } - } - -+static void -+check_join_counts(fsa_data_t *msg_data) -+{ -+ int count; -+ guint npeers; -+ -+ count = crmd_join_phase_count(crm_join_finalized); -+ if (count > 0) { -+ crm_err("%d cluster node%s failed to confirm join", -+ count, pcmk__plural_s(count)); -+ crmd_join_phase_log(LOG_NOTICE); -+ return; -+ } -+ -+ npeers = crm_active_peers(); -+ count = crmd_join_phase_count(crm_join_confirmed); -+ if (count == npeers) { -+ if (npeers == 1) { -+ crm_debug("Sole active cluster node is fully joined"); -+ } else { -+ crm_debug("All %d active cluster nodes are fully joined", count); -+ } -+ -+ } else if (count > npeers) { -+ crm_err("New election needed because more nodes confirmed join " -+ "than are in membership (%d > %u)", count, npeers); -+ register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL); -+ -+ } else if (saved_ccm_membership_id != crm_peer_seq) { -+ crm_info("New join needed because membership changed (%llu -> %llu)", -+ saved_ccm_membership_id, crm_peer_seq); -+ register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL); -+ -+ } else { -+ crm_warn("Only %d of %u active cluster nodes fully joined " -+ "(%d did not respond to offer)", -+ count, npeers, crmd_join_phase_count(crm_join_welcomed)); -+ } -+} -+ - long long - do_state_transition(long long actions, - enum crmd_fsa_state cur_state, - enum crmd_fsa_state next_state, fsa_data_t * msg_data) - { - int level = LOG_INFO; -+ int count = 0; - long long tmp = actions; - gboolean clear_recovery_bit = TRUE; - -@@ -563,13 +604,14 @@ do_state_transition(long long actions, - crm_warn("Progressed to state %s after %s", - fsa_state2string(next_state), fsa_cause2string(cause)); - } -- if (crmd_join_phase_count(crm_join_welcomed) > 0) { -- crm_warn("%u cluster nodes failed to respond" -- " to the join offer.", crmd_join_phase_count(crm_join_welcomed)); -+ count = crmd_join_phase_count(crm_join_welcomed); -+ if (count > 0) { -+ crm_warn("%d cluster node%s failed to respond to join offer", -+ count, pcmk__plural_s(count)); - crmd_join_phase_log(LOG_NOTICE); - - } else { -- crm_debug("All %d cluster nodes responded to the join offer.", -+ crm_debug("All cluster nodes (%d) responded to join offer", - crmd_join_phase_count(crm_join_integrated)); - } - break; -@@ -581,34 +623,7 @@ do_state_transition(long long actions, - crm_info("Progressed to state %s after %s", - fsa_state2string(next_state), fsa_cause2string(cause)); - } -- -- if (crmd_join_phase_count(crm_join_finalized) > 0) { -- crm_err("%u cluster nodes failed to confirm their join.", -- crmd_join_phase_count(crm_join_finalized)); -- crmd_join_phase_log(LOG_NOTICE); -- -- } else if (crmd_join_phase_count(crm_join_confirmed) -- == crm_active_peers()) { -- crm_debug("All %u cluster nodes are" -- " eligible to run resources.", crm_active_peers()); -- -- } else if (crmd_join_phase_count(crm_join_confirmed) > crm_active_peers()) { -- crm_err("We have more confirmed nodes than our membership does: %d vs. %d", -- crmd_join_phase_count(crm_join_confirmed), crm_active_peers()); -- register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL); -- -- } else if (saved_ccm_membership_id != crm_peer_seq) { -- crm_info("Membership changed: %llu -> %llu - join restart", -- saved_ccm_membership_id, crm_peer_seq); -- register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL); -- -- } else { -- crm_warn("Only %u of %u cluster " -- "nodes are eligible to run resources - continue %d", -- crmd_join_phase_count(crm_join_confirmed), -- crm_active_peers(), crmd_join_phase_count(crm_join_welcomed)); -- } --/* initialize_join(FALSE); */ -+ check_join_counts(msg_data); - break; - - case S_STOPPING: -diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c -index 988aaa6..54324b2 100644 ---- a/daemons/controld/controld_join_dc.c -+++ b/daemons/controld/controld_join_dc.c -@@ -26,7 +26,11 @@ void finalize_join_for(gpointer key, gpointer value, gpointer user_data); - void finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data); - gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source); - -+/* Numeric counter used to identify join rounds (an unsigned int would be -+ * appropriate, except we get and set it in XML as int) -+ */ - static int current_join_id = 0; -+ - unsigned long long saved_ccm_membership_id = 0; - - void -@@ -34,12 +38,7 @@ crm_update_peer_join(const char *source, crm_node_t * node, enum crm_join_phase - { - enum crm_join_phase last = 0; - -- if(node == NULL) { -- crm_err("Could not update join because node not specified" -- CRM_XS " join-%u source=%s phase=%s", -- current_join_id, source, crm_join_phase_str(phase)); -- return; -- } -+ CRM_CHECK(node != NULL, return); - - /* Remote nodes do not participate in joins */ - if (is_set(node->flags, crm_remote_node)) { -@@ -49,21 +48,23 @@ crm_update_peer_join(const char *source, crm_node_t * node, enum crm_join_phase - last = node->join; - - if(phase == last) { -- crm_trace("%s: Node %s[%u] - join-%u phase still %s", -- source, node->uname, node->id, current_join_id, -- crm_join_phase_str(last)); -+ crm_trace("Node %s join-%d phase is still %s " -+ CRM_XS " nodeid=%u source=%s", -+ node->uname, current_join_id, crm_join_phase_str(last), -+ node->id, source); - - } else if ((phase <= crm_join_none) || (phase == (last + 1))) { - node->join = phase; -- crm_info("%s: Node %s[%u] - join-%u phase %s -> %s", -- source, node->uname, node->id, current_join_id, -- crm_join_phase_str(last), crm_join_phase_str(phase)); -+ crm_trace("Node %s join-%d phase is now %s (was %s) " -+ CRM_XS " nodeid=%u source=%s", -+ node->uname, current_join_id, crm_join_phase_str(phase), -+ crm_join_phase_str(last), node->id, source); - - } else { -- crm_err("Could not update join for node %s because phase transition invalid " -- CRM_XS " join-%u source=%s node_id=%u last=%s new=%s", -- node->uname, current_join_id, source, node->id, -- crm_join_phase_str(last), crm_join_phase_str(phase)); -+ crm_warn("Rejecting join-%d phase update for node %s because " -+ "can't go from %s to %s " CRM_XS " nodeid=%u source=%s", -+ current_join_id, node->uname, crm_join_phase_str(last), -+ crm_join_phase_str(phase), node->id, source); - } - } - -@@ -73,9 +74,7 @@ initialize_join(gboolean before) - GHashTableIter iter; - crm_node_t *peer = NULL; - -- /* clear out/reset a bunch of stuff */ -- crm_debug("join-%d: Initializing join data (flag=%s)", -- current_join_id, before ? "true" : "false"); -+ crm_debug("Starting new join round join-%d", current_join_id); - - g_hash_table_iter_init(&iter, crm_peer_cache); - while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) { -@@ -128,7 +127,9 @@ join_make_offer(gpointer key, gpointer value, gpointer user_data) - - CRM_ASSERT(member != NULL); - if (crm_is_peer_active(member) == FALSE) { -- crm_info("Not making an offer to %s: not active (%s)", member->uname, member->state); -+ crm_info("Not making join-%d offer to inactive node %s", -+ current_join_id, -+ (member->uname? member->uname : "with unknown name")); - if(member->expected == NULL && safe_str_eq(member->state, CRM_NODE_LOST)) { - /* You would think this unsafe, but in fact this plus an - * active resource is what causes it to be fenced. -@@ -145,17 +146,21 @@ join_make_offer(gpointer key, gpointer value, gpointer user_data) - } - - if (member->uname == NULL) { -- crm_info("No recipient for welcome message.(Node uuid:%s)", member->uuid); -+ crm_info("Not making join-%d offer to node uuid %s with unknown name", -+ current_join_id, member->uuid); - return; - } - - if (saved_ccm_membership_id != crm_peer_seq) { - saved_ccm_membership_id = crm_peer_seq; -- crm_info("Making join offers based on membership %llu", crm_peer_seq); -+ crm_info("Making join-%d offers based on membership event %llu", -+ current_join_id, crm_peer_seq); - } - - if(user_data && member->join > crm_join_none) { -- crm_info("Skipping %s: already known %d", member->uname, member->join); -+ crm_info("Not making join-%d offer to already known node %s (%s)", -+ current_join_id, member->uname, -+ crm_join_phase_str(member->join)); - return; - } - -@@ -166,14 +171,11 @@ join_make_offer(gpointer key, gpointer value, gpointer user_data) - // Advertise our feature set so the joining node can bail if not compatible - crm_xml_add(offer, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); - -- /* send the welcome */ -- crm_info("join-%d: Sending offer to %s", current_join_id, member->uname); -- -+ crm_info("Sending join-%d offer to %s", current_join_id, member->uname); - send_cluster_message(member, crm_msg_crmd, offer, TRUE); - free_xml(offer); - - crm_update_peer_join(__FUNCTION__, member, crm_join_welcomed); -- /* crm_update_peer_expected(__FUNCTION__, member, CRMD_JOINSTATE_PENDING); */ - } - - /* A_DC_JOIN_OFFER_ALL */ -@@ -183,6 +185,8 @@ do_dc_join_offer_all(long long action, - enum crmd_fsa_state cur_state, - enum crmd_fsa_input current_input, fsa_data_t * msg_data) - { -+ int count; -+ - /* Reset everyone's status back to down or in_ccm in the CIB. - * Any nodes that are active in the CIB but not in the cluster membership - * will be seen as offline by the scheduler anyway. -@@ -197,9 +201,11 @@ do_dc_join_offer_all(long long action, - } - g_hash_table_foreach(crm_peer_cache, join_make_offer, NULL); - -+ count = crmd_join_phase_count(crm_join_welcomed); -+ crm_info("Waiting on join-%d requests from %d outstanding node%s", -+ current_join_id, count, pcmk__plural_s(count)); -+ - // Don't waste time by invoking the scheduler yet -- crm_info("join-%d: Waiting on %d outstanding join acks", -- current_join_id, crmd_join_phase_count(crm_join_welcomed)); - } - - /* A_DC_JOIN_OFFER_ONE */ -@@ -211,50 +217,40 @@ do_dc_join_offer_one(long long action, - { - crm_node_t *member; - ha_msg_input_t *welcome = NULL; -- -- const char *op = NULL; -+ int count; - const char *join_to = NULL; - -- if (msg_data->data) { -- welcome = fsa_typed_data(fsa_dt_ha_msg); -- -- } else { -- crm_info("An unknown node joined - (re-)offer to any unconfirmed nodes"); -+ if (msg_data->data == NULL) { -+ crm_info("Making join-%d offers to any unconfirmed nodes " -+ "because an unknown node joined", current_join_id); - g_hash_table_foreach(crm_peer_cache, join_make_offer, &member); - check_join_state(cur_state, __FUNCTION__); - return; - } - -+ welcome = fsa_typed_data(fsa_dt_ha_msg); - if (welcome == NULL) { -- crm_err("Attempt to send welcome message without a message to reply to!"); -+ // fsa_typed_data() already logged an error - return; - } - - join_to = crm_element_value(welcome->msg, F_CRM_HOST_FROM); - if (join_to == NULL) { -- crm_err("Attempt to send welcome message without a host to reply to!"); -+ crm_err("Can't make join-%d offer to unknown node", current_join_id); - return; - } -- - member = crm_get_peer(0, join_to); -- op = crm_element_value(welcome->msg, F_CRM_TASK); -- if (join_to != NULL && (cur_state == S_INTEGRATION || cur_state == S_FINALIZE_JOIN)) { -- /* note: it _is_ possible that a node will have been -- * sick or starting up when the original offer was made. -- * however, it will either re-announce itself in due course -- * _or_ we can re-store the original offer on the client. -- */ -- crm_trace("(Re-)offering membership to %s...", join_to); -- } - -- crm_info("join-%d: Processing %s request from %s in state %s", -- current_join_id, op, join_to, fsa_state2string(cur_state)); -+ /* It is possible that a node will have been sick or starting up when the -+ * original offer was made. However, it will either re-announce itself in -+ * due course, or we can re-store the original offer on the client. -+ */ - - crm_update_peer_join(__FUNCTION__, member, crm_join_none); - join_make_offer(NULL, member, NULL); - -- /* always offer to the DC (ourselves) -- * this ensures the correct value for max_generation_from -+ /* If the offer isn't to the local node, make an offer to the local node as -+ * well, to ensure the correct value for max_generation_from. - */ - if (strcmp(join_to, fsa_our_uname) != 0) { - member = crm_get_peer(0, fsa_our_uname); -@@ -266,9 +262,11 @@ do_dc_join_offer_one(long long action, - */ - abort_transition(INFINITY, tg_restart, "Node join", NULL); - -+ count = crmd_join_phase_count(crm_join_welcomed); -+ crm_info("Waiting on join-%d requests from %d outstanding node%s", -+ current_join_id, count, pcmk__plural_s(count)); -+ - // Don't waste time by invoking the scheduler yet -- crm_debug("Waiting on %d outstanding join acks for join-%d", -- crmd_join_phase_count(crm_join_welcomed), current_join_id); - } - - static int -@@ -301,22 +299,31 @@ do_dc_join_filter_offer(long long action, - - int cmp = 0; - int join_id = -1; -+ int count = 0; - gboolean ack_nack_bool = TRUE; -- const char *ack_nack = CRMD_JOINSTATE_MEMBER; - ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); - - const char *join_from = crm_element_value(join_ack->msg, F_CRM_HOST_FROM); - const char *ref = crm_element_value(join_ack->msg, F_CRM_REFERENCE); - const char *join_version = crm_element_value(join_ack->msg, - XML_ATTR_CRM_VERSION); -+ crm_node_t *join_node = NULL; - -- crm_node_t *join_node = crm_get_peer(0, join_from); -- -- crm_debug("Processing req from %s", join_from); -+ if (join_from == NULL) { -+ crm_err("Ignoring invalid join request without node name"); -+ return; -+ } -+ join_node = crm_get_peer(0, join_from); - -- generation = join_ack->xml; - crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id); -+ if (join_id != current_join_id) { -+ crm_debug("Ignoring join-%d request from %s because we are on join-%d", -+ join_id, join_from, current_join_id); -+ check_join_state(cur_state, __FUNCTION__); -+ return; -+ } - -+ generation = join_ack->xml; - if (max_generation_xml != NULL && generation != NULL) { - int lpc = 0; - -@@ -331,68 +338,71 @@ do_dc_join_filter_offer(long long action, - } - } - -- if (join_id != current_join_id) { -- crm_debug("Invalid response from %s: join-%d vs. join-%d", -- join_from, join_id, current_join_id); -- check_join_state(cur_state, __FUNCTION__); -- return; -+ if (ref == NULL) { -+ ref = "none"; // for logging only -+ } - -- } else if (join_node == NULL || crm_is_peer_active(join_node) == FALSE) { -- crm_err("Node %s is not a member", join_from); -+ if (crm_is_peer_active(join_node) == FALSE) { -+ crm_err("Rejecting join-%d request from inactive node %s " -+ CRM_XS " ref=%s", join_id, join_from, ref); - ack_nack_bool = FALSE; - - } else if (generation == NULL) { -- crm_err("Generation was NULL"); -+ crm_err("Rejecting invalid join-%d request from node %s " -+ "missing CIB generation " CRM_XS " ref=%s", -+ join_id, join_from, ref); - ack_nack_bool = FALSE; - - } else if ((join_version == NULL) - || !feature_set_compatible(CRM_FEATURE_SET, join_version)) { -- crm_err("Node %s feature set (%s) is incompatible with ours (%s)", -- join_from, (join_version? join_version : "pre-3.1.0"), -- CRM_FEATURE_SET); -+ crm_err("Rejecting join-%d request from node %s because feature set %s" -+ " is incompatible with ours (%s) " CRM_XS " ref=%s", -+ join_id, join_from, (join_version? join_version : "pre-3.1.0"), -+ CRM_FEATURE_SET, ref); - ack_nack_bool = FALSE; - - } else if (max_generation_xml == NULL) { -+ crm_debug("Accepting join-%d request from %s " -+ "(with first CIB generation) " CRM_XS " ref=%s", -+ join_id, join_from, ref); - max_generation_xml = copy_xml(generation); - max_generation_from = strdup(join_from); - - } else if (cmp < 0 || (cmp == 0 && safe_str_eq(join_from, fsa_our_uname))) { -- crm_debug("%s has a better generation number than" -- " the current max %s", join_from, max_generation_from); -- if (max_generation_xml) { -- crm_log_xml_debug(max_generation_xml, "Max generation"); -- } -- crm_log_xml_debug(generation, "Their generation"); -+ crm_debug("Accepting join-%d request from %s (with better " -+ "CIB generation than current best from %s) " CRM_XS " ref=%s", -+ join_id, join_from, max_generation_from, ref); -+ crm_log_xml_debug(max_generation_xml, "Old max generation"); -+ crm_log_xml_debug(generation, "New max generation"); - - free(max_generation_from); - free_xml(max_generation_xml); - - max_generation_from = strdup(join_from); - max_generation_xml = copy_xml(join_ack->xml); -+ -+ } else { -+ crm_debug("Accepting join-%d request from %s " CRM_XS " ref=%s", -+ join_id, join_from, ref); - } - - if (ack_nack_bool == FALSE) { -- /* NACK this client */ -- ack_nack = CRMD_JOINSTATE_NACK; - crm_update_peer_join(__FUNCTION__, join_node, crm_join_nack); -- crm_err("Rejecting cluster join request from %s " CRM_XS -- " NACK join-%d ref=%s", join_from, join_id, ref); -- -+ crm_update_peer_expected(__FUNCTION__, join_node, CRMD_JOINSTATE_NACK); - } else { -- crm_debug("join-%d: Welcoming node %s (ref %s)", join_id, join_from, ref); - crm_update_peer_join(__FUNCTION__, join_node, crm_join_integrated); -+ crm_update_peer_expected(__FUNCTION__, join_node, CRMD_JOINSTATE_MEMBER); - } - -- crm_update_peer_expected(__FUNCTION__, join_node, ack_nack); -- -- crm_debug("%u nodes have been integrated into join-%d", -- crmd_join_phase_count(crm_join_integrated), join_id); -- -+ count = crmd_join_phase_count(crm_join_integrated); -+ crm_debug("%d node%s currently integrated in join-%d", -+ count, pcmk__plural_s(count), join_id); - - if (check_join_state(cur_state, __FUNCTION__) == FALSE) { - // Don't waste time by invoking the scheduler yet -- crm_debug("join-%d: Still waiting on %d outstanding offers", -- join_id, crmd_join_phase_count(crm_join_welcomed)); -+ count = crmd_join_phase_count(crm_join_welcomed); -+ crm_debug("Waiting on join-%d requests from %d outstanding node%s", -+ join_id, count, pcmk__plural_s(count)); - } - } - -@@ -405,21 +415,24 @@ do_dc_join_finalize(long long action, - { - char *sync_from = NULL; - int rc = pcmk_ok; -+ int count_welcomed = crmd_join_phase_count(crm_join_welcomed); -+ int count_integrated = crmd_join_phase_count(crm_join_integrated); - - /* This we can do straight away and avoid clients timing us out - * while we compute the latest CIB - */ -- crm_debug("Finalizing join-%d for %d clients", -- current_join_id, crmd_join_phase_count(crm_join_integrated)); -- -- crmd_join_phase_log(LOG_INFO); -- if (crmd_join_phase_count(crm_join_welcomed) != 0) { -- crm_info("Waiting for %d more nodes", crmd_join_phase_count(crm_join_welcomed)); -+ if (count_welcomed != 0) { -+ crm_debug("Waiting on join-%d requests from %d outstanding node%s " -+ "before finalizing join", current_join_id, count_welcomed, -+ pcmk__plural_s(count_welcomed)); -+ crmd_join_phase_log(LOG_DEBUG); - /* crmd_fsa_stall(FALSE); Needed? */ - return; - -- } else if (crmd_join_phase_count(crm_join_integrated) == 0) { -- /* Nothing to do */ -+ } else if (count_integrated == 0) { -+ crm_debug("Finalization not needed for join-%d at the current time", -+ current_join_id); -+ crmd_join_phase_log(LOG_DEBUG); - check_join_state(fsa_state, __FUNCTION__); - return; - } -@@ -430,8 +443,9 @@ do_dc_join_finalize(long long action, - } - - if (is_set(fsa_input_register, R_IN_TRANSITION)) { -- crm_warn("Delaying response to cluster join offer while transition in progress " -- CRM_XS " join-%d", current_join_id); -+ crm_warn("Delaying join-%d finalization while transition in progress", -+ current_join_id); -+ crmd_join_phase_log(LOG_DEBUG); - crmd_fsa_stall(FALSE); - return; - } -@@ -440,18 +454,20 @@ do_dc_join_finalize(long long action, - /* ask for the agreed best CIB */ - sync_from = strdup(max_generation_from); - set_bit(fsa_input_register, R_CIB_ASKED); -- crm_notice("Syncing the Cluster Information Base from %s to rest of cluster " -- CRM_XS " join-%d", sync_from, current_join_id); -- crm_log_xml_notice(max_generation_xml, "Requested version"); -+ crm_notice("Finalizing join-%d for %d node%s (sync'ing CIB from %s)", -+ current_join_id, count_integrated, -+ pcmk__plural_s(count_integrated), sync_from); -+ crm_log_xml_notice(max_generation_xml, "Requested CIB version"); - - } else { - /* Send _our_ CIB out to everyone */ - sync_from = strdup(fsa_our_uname); -- crm_info("join-%d: Syncing our CIB to the rest of the cluster", -- current_join_id); -- crm_log_xml_debug(max_generation_xml, "Requested version"); -+ crm_debug("Finalizing join-%d for %d node%s (sync'ing from local CIB)", -+ current_join_id, count_integrated, -+ pcmk__plural_s(count_integrated)); -+ crm_log_xml_debug(max_generation_xml, "Requested CIB version"); - } -- -+ crmd_join_phase_log(LOG_DEBUG); - - rc = fsa_cib_conn->cmds->sync_from(fsa_cib_conn, sync_from, NULL, cib_quorum_override); - fsa_register_cib_callback(rc, FALSE, sync_from, finalize_sync_callback); -@@ -463,26 +479,33 @@ finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, voi - CRM_LOG_ASSERT(-EPERM != rc); - clear_bit(fsa_input_register, R_CIB_ASKED); - if (rc != pcmk_ok) { -- do_crm_log((rc == -pcmk_err_old_data ? LOG_WARNING : LOG_ERR), -- "Sync from %s failed: %s", (char *)user_data, pcmk_strerror(rc)); -+ do_crm_log(((rc == -pcmk_err_old_data)? LOG_WARNING : LOG_ERR), -+ "Could not sync CIB from %s in join-%d: %s", -+ (char *) user_data, current_join_id, pcmk_strerror(rc)); - - /* restart the whole join process */ - register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION_DC, NULL, NULL, __FUNCTION__); - -- } else if (AM_I_DC && fsa_state == S_FINALIZE_JOIN) { -+ } else if (!AM_I_DC) { -+ crm_debug("Sync'ed CIB for join-%d but no longer DC", current_join_id); -+ -+ } else if (fsa_state != S_FINALIZE_JOIN) { -+ crm_debug("Sync'ed CIB for join-%d but no longer in S_FINALIZE_JOIN (%s)", -+ current_join_id, fsa_state2string(fsa_state)); -+ -+ } else { - set_bit(fsa_input_register, R_HAVE_CIB); - clear_bit(fsa_input_register, R_CIB_ASKED); - - /* make sure dc_uuid is re-set to us */ - if (check_join_state(fsa_state, __FUNCTION__) == FALSE) { -- crm_debug("Notifying %d clients of join-%d results", -- crmd_join_phase_count(crm_join_integrated), current_join_id); -+ int count_integrated = crmd_join_phase_count(crm_join_integrated); -+ -+ crm_debug("Notifying %d node%s of join-%d results", -+ count_integrated, pcmk__plural_s(count_integrated), -+ current_join_id); - g_hash_table_foreach(crm_peer_cache, finalize_join_for, NULL); - } -- -- } else { -- crm_debug("No longer the DC in S_FINALIZE_JOIN: %s in %s", -- AM_I_DC ? "DC" : "controller", fsa_state2string(fsa_state)); - } - } - -@@ -492,11 +515,14 @@ join_update_complete_callback(xmlNode * msg, int call_id, int rc, xmlNode * outp - fsa_data_t *msg_data = NULL; - - if (rc == pcmk_ok) { -- crm_debug("Join update %d complete", call_id); -+ crm_debug("join-%d node history update (via CIB call %d) complete", -+ current_join_id, call_id); - check_join_state(fsa_state, __FUNCTION__); - - } else { -- crm_err("Join update %d failed", call_id); -+ crm_err("join-%d node history update (via CIB call %d) failed: %s " -+ "(next transition may determine resource status incorrectly)", -+ current_join_id, call_id, pcmk_strerror(rc)); - crm_log_xml_debug(msg, "failed"); - register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); - } -@@ -515,61 +541,75 @@ do_dc_join_ack(long long action, - - const char *op = crm_element_value(join_ack->msg, F_CRM_TASK); - const char *join_from = crm_element_value(join_ack->msg, F_CRM_HOST_FROM); -- crm_node_t *peer = crm_get_peer(0, join_from); -+ crm_node_t *peer = NULL; - -- if (safe_str_neq(op, CRM_OP_JOIN_CONFIRM) || peer == NULL) { -- crm_debug("Ignoring op=%s message from %s", op, join_from); -+ // Sanity checks -+ if (join_from == NULL) { -+ crm_warn("Ignoring message received without node identification"); -+ return; -+ } -+ if (op == NULL) { -+ crm_warn("Ignoring message received from %s without task", join_from); - return; - } - -- crm_trace("Processing ack from %s", join_from); -- crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id); -+ if (strcmp(op, CRM_OP_JOIN_CONFIRM)) { -+ crm_debug("Ignoring '%s' message from %s while waiting for '%s'", -+ op, join_from, CRM_OP_JOIN_CONFIRM); -+ return; -+ } - -+ if (crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id) != 0) { -+ crm_warn("Ignoring join confirmation from %s without valid join ID", -+ join_from); -+ return; -+ } -+ -+ peer = crm_get_peer(0, join_from); - if (peer->join != crm_join_finalized) { -- crm_info("Join not in progress: ignoring join-%d from %s (phase = %d)", -- join_id, join_from, peer->join); -+ crm_info("Ignoring out-of-sequence join-%d confirmation from %s " -+ "(currently %s not %s)", -+ join_id, join_from, crm_join_phase_str(peer->join), -+ crm_join_phase_str(crm_join_finalized)); - return; -+ } - -- } else if (join_id != current_join_id) { -- crm_err("Invalid response from %s: join-%d vs. join-%d", -- join_from, join_id, current_join_id); -+ if (join_id != current_join_id) { -+ crm_err("Rejecting join-%d confirmation from %s " -+ "because currently on join-%d", -+ join_id, join_from, current_join_id); - crm_update_peer_join(__FUNCTION__, peer, crm_join_nack); - return; - } - - crm_update_peer_join(__FUNCTION__, peer, crm_join_confirmed); - -- crm_info("join-%d: Updating node state to %s for %s", -- join_id, CRMD_JOINSTATE_MEMBER, join_from); -- -- /* update CIB with the current LRM status from the node -- * We don't need to notify the TE of these updates, a transition will -- * be started in due time -+ /* Update CIB with node's current executor state. A new transition will be -+ * triggered later, when the CIB notifies us of the change. - */ - erase_status_tag(join_from, XML_CIB_TAG_LRM, cib_scope_local); -- - if (safe_str_eq(join_from, fsa_our_uname)) { - xmlNode *now_dc_lrmd_state = do_lrm_query(TRUE, fsa_our_uname); - - if (now_dc_lrmd_state != NULL) { -- crm_debug("Local executor state updated from query"); - fsa_cib_update(XML_CIB_TAG_STATUS, now_dc_lrmd_state, - cib_scope_local | cib_quorum_override | cib_can_create, call_id, NULL); - free_xml(now_dc_lrmd_state); -+ crm_debug("Updating local node history for join-%d " -+ "from query result (via CIB call %d)", join_id, call_id); - } else { -- crm_warn("Local executor state updated from join acknowledgement because query failed"); - fsa_cib_update(XML_CIB_TAG_STATUS, join_ack->xml, - cib_scope_local | cib_quorum_override | cib_can_create, call_id, NULL); -+ crm_warn("Updating local node history from join-%d confirmation " -+ "because query failed (via CIB call %d)", join_id, call_id); - } - } else { -- crm_debug("Executor state for %s updated from join acknowledgement", -- join_from); - fsa_cib_update(XML_CIB_TAG_STATUS, join_ack->xml, - cib_scope_local | cib_quorum_override | cib_can_create, call_id, NULL); -+ crm_debug("Updating node history for %s from join-%d confirmation " -+ "(via CIB call %d)", join_from, join_id, call_id); - } -- - fsa_register_cib_callback(call_id, FALSE, NULL, join_update_complete_callback); -- crm_debug("join-%d: Registered callback for CIB status update %d", join_id, call_id); - } - - void -@@ -581,17 +621,16 @@ finalize_join_for(gpointer key, gpointer value, gpointer user_data) - const char *join_to = join_node->uname; - - if(join_node->join != crm_join_integrated) { -- crm_trace("Skipping %s in state %d", join_to, join_node->join); -+ crm_trace("Not updating non-integrated node %s (%s) for join-%d", -+ join_to, crm_join_phase_str(join_node->join), -+ current_join_id); - return; - } - -- /* make sure a node entry exists for the new node */ -- crm_trace("Creating node entry for %s", join_to); -- -+ crm_trace("Updating node state for %s", join_to); - tmp1 = create_xml_node(NULL, XML_CIB_TAG_NODE); - set_uuid(tmp1, XML_ATTR_UUID, join_node); - crm_xml_add(tmp1, XML_ATTR_UNAME, join_to); -- - fsa_cib_anon_update(XML_CIB_TAG_NODES, tmp1); - free_xml(tmp1); - -@@ -610,11 +649,10 @@ finalize_join_for(gpointer key, gpointer value, gpointer user_data) - return; - } - -- /* send the ack/nack to the node */ -- acknak = create_dc_message(CRM_OP_JOIN_ACKNAK, join_to); -- -- crm_debug("join-%d: ACK'ing join request from %s", -+ // Acknowledge node's join request -+ crm_debug("Acknowledging join-%d request from %s", - current_join_id, join_to); -+ acknak = create_dc_message(CRM_OP_JOIN_ACKNAK, join_to); - crm_xml_add(acknak, CRM_OP_JOIN_ACKNAK, XML_BOOLEAN_TRUE); - crm_update_peer_join(__FUNCTION__, join_node, crm_join_finalized); - crm_update_peer_expected(__FUNCTION__, join_node, CRMD_JOINSTATE_MEMBER); -@@ -629,11 +667,11 @@ check_join_state(enum crmd_fsa_state cur_state, const char *source) - { - static unsigned long long highest_seq = 0; - -- crm_debug("Invoked by %s in state: %s", source, fsa_state2string(cur_state)); -- - if (saved_ccm_membership_id != crm_peer_seq) { -- crm_debug("%s: Membership changed since join started: %llu -> %llu (%llu)", -- source, saved_ccm_membership_id, crm_peer_seq, highest_seq); -+ crm_debug("join-%d: Membership changed from %llu to %llu " -+ CRM_XS " highest=%llu state=%s for=%s", -+ current_join_id, saved_ccm_membership_id, crm_peer_seq, highest_seq, -+ fsa_state2string(cur_state), source); - if(highest_seq < crm_peer_seq) { - /* Don't spam the FSA with duplicates */ - highest_seq = crm_peer_seq; -@@ -642,34 +680,53 @@ check_join_state(enum crmd_fsa_state cur_state, const char *source) - - } else if (cur_state == S_INTEGRATION) { - if (crmd_join_phase_count(crm_join_welcomed) == 0) { -- crm_debug("join-%d: Integration of %d peers complete: %s", -- current_join_id, crmd_join_phase_count(crm_join_integrated), source); -+ int count = crmd_join_phase_count(crm_join_integrated); -+ -+ crm_debug("join-%d: Integration of %d peer%s complete " -+ CRM_XS " state=%s for=%s", -+ current_join_id, count, pcmk__plural_s(count), -+ fsa_state2string(cur_state), source); - register_fsa_input_before(C_FSA_INTERNAL, I_INTEGRATED, NULL); - return TRUE; - } - - } else if (cur_state == S_FINALIZE_JOIN) { - if (is_set(fsa_input_register, R_HAVE_CIB) == FALSE) { -- crm_debug("join-%d: Delaying I_FINALIZED until we have the CIB", current_join_id); -+ crm_debug("join-%d: Delaying finalization until we have CIB " -+ CRM_XS " state=%s for=%s", -+ current_join_id, fsa_state2string(cur_state), source); - return TRUE; - - } else if (crmd_join_phase_count(crm_join_welcomed) != 0) { -- crm_debug("join-%d: Still waiting on %d welcomed nodes", -- current_join_id, crmd_join_phase_count(crm_join_welcomed)); -+ int count = crmd_join_phase_count(crm_join_welcomed); -+ -+ crm_debug("join-%d: Still waiting on %d welcomed node%s " -+ CRM_XS " state=%s for=%s", -+ current_join_id, count, pcmk__plural_s(count), -+ fsa_state2string(cur_state), source); - crmd_join_phase_log(LOG_DEBUG); - - } else if (crmd_join_phase_count(crm_join_integrated) != 0) { -- crm_debug("join-%d: Still waiting on %d integrated nodes", -- current_join_id, crmd_join_phase_count(crm_join_integrated)); -+ int count = crmd_join_phase_count(crm_join_integrated); -+ -+ crm_debug("join-%d: Still waiting on %d integrated node%s " -+ CRM_XS " state=%s for=%s", -+ current_join_id, count, pcmk__plural_s(count), -+ fsa_state2string(cur_state), source); - crmd_join_phase_log(LOG_DEBUG); - - } else if (crmd_join_phase_count(crm_join_finalized) != 0) { -- crm_debug("join-%d: Still waiting on %d finalized nodes", -- current_join_id, crmd_join_phase_count(crm_join_finalized)); -+ int count = crmd_join_phase_count(crm_join_finalized); -+ -+ crm_debug("join-%d: Still waiting on %d finalized node%s " -+ CRM_XS " state=%s for=%s", -+ current_join_id, count, pcmk__plural_s(count), -+ fsa_state2string(cur_state), source); - crmd_join_phase_log(LOG_DEBUG); - - } else { -- crm_debug("join-%d complete: %s", current_join_id, source); -+ crm_debug("join-%d: Complete " CRM_XS " state=%s for=%s", -+ current_join_id, fsa_state2string(cur_state), source); - register_fsa_input_later(C_FSA_INTERNAL, I_FINALIZED, NULL); - return TRUE; - } --- -1.8.3.1 - - -From 034b27734d05e8aeddb586f2daaede8314f9516f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Dec 2019 10:39:34 -0600 -Subject: [PATCH 6/8] Log: controller: improve CIB status deletion messages - ---- - daemons/controld/controld_utils.c | 25 +++++++++++++++++-------- - 1 file changed, 17 insertions(+), 8 deletions(-) - -diff --git a/daemons/controld/controld_utils.c b/daemons/controld/controld_utils.c -index 3acd488..bb8ace9 100644 ---- a/daemons/controld/controld_utils.c -+++ b/daemons/controld/controld_utils.c -@@ -751,14 +751,18 @@ update_dc(xmlNode * msg) - return TRUE; - } - --#define STATUS_PATH_MAX 512 - static void - erase_xpath_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) - { - char *xpath = user_data; - -- do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE, -- "Deletion of \"%s\": %s (rc=%d)", xpath, pcmk_strerror(rc), rc); -+ if (rc == 0) { -+ crm_debug("Deletion of '%s' from CIB (via CIB call %d) succeeded", -+ xpath, call_id); -+ } else { -+ crm_warn("Deletion of '%s' from CIB (via CIB call %d) failed: %s " -+ CRM_XS " rc=%d", xpath, call_id, pcmk_strerror(rc), rc); -+ } - } - - #define XPATH_STATUS_TAG "//node_state[@uname='%s']/%s" -@@ -766,14 +770,19 @@ erase_xpath_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void - void - erase_status_tag(const char *uname, const char *tag, int options) - { -- if (fsa_cib_conn && uname) { -+ CRM_CHECK(uname != NULL, return); -+ -+ if (fsa_cib_conn == NULL) { -+ crm_warn("Unable to delete CIB '%s' section for node %s: " -+ "no CIB connection", tag, uname); -+ } else { - int call_id; - char *xpath = crm_strdup_printf(XPATH_STATUS_TAG, uname, tag); - -- crm_info("Deleting %s status entries for %s " CRM_XS " xpath=%s", -- tag, uname, xpath); -- call_id = fsa_cib_conn->cmds->remove(fsa_cib_conn, xpath, NULL, -- cib_quorum_override | cib_xpath | options); -+ options |= cib_quorum_override|cib_xpath; -+ call_id = fsa_cib_conn->cmds->remove(fsa_cib_conn, xpath, NULL, options); -+ crm_info("Deleting CIB '%s' section for node %s (via CIB call %d) " -+ CRM_XS " xpath=%s", tag, uname, call_id, xpath); - fsa_register_cib_callback(call_id, FALSE, xpath, erase_xpath_callback); - // CIB library handles freeing xpath - } --- -1.8.3.1 - - -From 73510818bc9905dcc130893198590b10c0067425 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Dec 2019 10:36:56 -0600 -Subject: [PATCH 7/8] Refactor: controller: move erase_status_tag() to - controld_based.c - ---- - daemons/controld/controld_based.c | 38 ++++++++++++++++++++++++++++++++++++++ - daemons/controld/controld_utils.c | 37 ------------------------------------- - 2 files changed, 38 insertions(+), 37 deletions(-) - -diff --git a/daemons/controld/controld_based.c b/daemons/controld/controld_based.c -index e6a4612..1db5650 100644 ---- a/daemons/controld/controld_based.c -+++ b/daemons/controld/controld_based.c -@@ -168,3 +168,41 @@ controld_action_is_recordable(const char *action) - } - return TRUE; - } -+ -+static void -+erase_xpath_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, -+ void *user_data) -+{ -+ char *xpath = user_data; -+ -+ if (rc == 0) { -+ crm_debug("Deletion of '%s' from CIB (via CIB call %d) succeeded", -+ xpath, call_id); -+ } else { -+ crm_warn("Deletion of '%s' from CIB (via CIB call %d) failed: %s " -+ CRM_XS " rc=%d", xpath, call_id, pcmk_strerror(rc), rc); -+ } -+} -+ -+#define XPATH_STATUS_TAG "//node_state[@uname='%s']/%s" -+ -+void -+erase_status_tag(const char *uname, const char *tag, int options) -+{ -+ CRM_CHECK(uname != NULL, return); -+ -+ if (fsa_cib_conn == NULL) { -+ crm_warn("Unable to delete CIB '%s' section for node %s: " -+ "no CIB connection", tag, uname); -+ } else { -+ int call_id; -+ char *xpath = crm_strdup_printf(XPATH_STATUS_TAG, uname, tag); -+ -+ options |= cib_quorum_override|cib_xpath; -+ call_id = fsa_cib_conn->cmds->remove(fsa_cib_conn, xpath, NULL, options); -+ crm_info("Deleting CIB '%s' section for node %s (via CIB call %d) " -+ CRM_XS " xpath=%s", tag, uname, call_id, xpath); -+ fsa_register_cib_callback(call_id, FALSE, xpath, erase_xpath_callback); -+ // CIB library handles freeing xpath -+ } -+} -diff --git a/daemons/controld/controld_utils.c b/daemons/controld/controld_utils.c -index bb8ace9..4ed6aeb 100644 ---- a/daemons/controld/controld_utils.c -+++ b/daemons/controld/controld_utils.c -@@ -751,43 +751,6 @@ update_dc(xmlNode * msg) - return TRUE; - } - --static void --erase_xpath_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) --{ -- char *xpath = user_data; -- -- if (rc == 0) { -- crm_debug("Deletion of '%s' from CIB (via CIB call %d) succeeded", -- xpath, call_id); -- } else { -- crm_warn("Deletion of '%s' from CIB (via CIB call %d) failed: %s " -- CRM_XS " rc=%d", xpath, call_id, pcmk_strerror(rc), rc); -- } --} -- --#define XPATH_STATUS_TAG "//node_state[@uname='%s']/%s" -- --void --erase_status_tag(const char *uname, const char *tag, int options) --{ -- CRM_CHECK(uname != NULL, return); -- -- if (fsa_cib_conn == NULL) { -- crm_warn("Unable to delete CIB '%s' section for node %s: " -- "no CIB connection", tag, uname); -- } else { -- int call_id; -- char *xpath = crm_strdup_printf(XPATH_STATUS_TAG, uname, tag); -- -- options |= cib_quorum_override|cib_xpath; -- call_id = fsa_cib_conn->cmds->remove(fsa_cib_conn, xpath, NULL, options); -- crm_info("Deleting CIB '%s' section for node %s (via CIB call %d) " -- CRM_XS " xpath=%s", tag, uname, call_id, xpath); -- fsa_register_cib_callback(call_id, FALSE, xpath, erase_xpath_callback); -- // CIB library handles freeing xpath -- } --} -- - void crmd_peer_down(crm_node_t *peer, bool full) - { - if(full && peer->state == NULL) { --- -1.8.3.1 - - -From c4cc759e733db894957d039f65572cc21704224f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Dec 2019 11:16:25 -0600 -Subject: [PATCH 8/8] Refactor: controller: improve efficiency when deleting - node state - -Rename erase_status_xpath() to controld_delete_node_state() to follow current -naming practice. - -Instead of passing it a node_state subsection name, pass a new enum value -indicating what to erase (resource history, transient node attributes, or -both). This allows us to improve the log messages further, as well as improving -efficiency when both need to be cleared. ---- - daemons/controld/controld_based.c | 69 +++++++++++++++++++++++++++-------- - daemons/controld/controld_callbacks.c | 8 +++- - daemons/controld/controld_execd.c | 3 +- - daemons/controld/controld_fencing.c | 5 +-- - daemons/controld/controld_join_dc.c | 3 +- - daemons/controld/controld_remote_ra.c | 24 ++++++------ - daemons/controld/controld_utils.h | 11 +++++- - 7 files changed, 87 insertions(+), 36 deletions(-) - -diff --git a/daemons/controld/controld_based.c b/daemons/controld/controld_based.c -index 1db5650..008a02d 100644 ---- a/daemons/controld/controld_based.c -+++ b/daemons/controld/controld_based.c -@@ -170,39 +170,76 @@ controld_action_is_recordable(const char *action) - } - - static void --erase_xpath_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, -- void *user_data) -+cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, -+ void *user_data) - { -- char *xpath = user_data; -+ char *desc = user_data; - - if (rc == 0) { -- crm_debug("Deletion of '%s' from CIB (via CIB call %d) succeeded", -- xpath, call_id); -+ crm_debug("Deletion of %s (via CIB call %d) succeeded", desc, call_id); - } else { -- crm_warn("Deletion of '%s' from CIB (via CIB call %d) failed: %s " -- CRM_XS " rc=%d", xpath, call_id, pcmk_strerror(rc), rc); -+ crm_warn("Deletion of %s (via CIB call %d) failed: %s " CRM_XS " rc=%d", -+ desc, call_id, pcmk_strerror(rc), rc); - } - } - --#define XPATH_STATUS_TAG "//node_state[@uname='%s']/%s" -+// Searches for various portions of node_state to delete - -+// Match a particular node's node_state (takes node name 1x) -+#define XPATH_NODE_STATE "//" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" -+ -+// Node's lrm section (name 1x) -+#define XPATH_NODE_LRM XPATH_NODE_STATE "/" XML_CIB_TAG_LRM -+ -+// Node's transient_attributes section (name 1x) -+#define XPATH_NODE_ATTRS XPATH_NODE_STATE "/" XML_TAG_TRANSIENT_NODEATTRS -+ -+// Everything under node_state (name 1x) -+#define XPATH_NODE_ALL XPATH_NODE_STATE "/*" -+ -+/*! -+ * \internal -+ * \brief Delete subsection of a node's CIB node_state -+ * -+ * \param[in] uname Desired node -+ * \param[in] section Subsection of node_state to delete -+ * \param[in] options CIB call options to use -+ */ - void --erase_status_tag(const char *uname, const char *tag, int options) -+controld_delete_node_state(const char *uname, enum controld_section_e section, -+ int options) - { -+ char *xpath = NULL; -+ char *desc = NULL; -+ - CRM_CHECK(uname != NULL, return); -+ switch (section) { -+ case controld_section_lrm: -+ xpath = crm_strdup_printf(XPATH_NODE_LRM, uname); -+ desc = crm_strdup_printf("resource history for node %s", uname); -+ break; -+ case controld_section_attrs: -+ xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname); -+ desc = crm_strdup_printf("transient attributes for node %s", uname); -+ break; -+ case controld_section_all: -+ xpath = crm_strdup_printf(XPATH_NODE_ALL, uname); -+ desc = crm_strdup_printf("all state for node %s", uname); -+ break; -+ } - - if (fsa_cib_conn == NULL) { -- crm_warn("Unable to delete CIB '%s' section for node %s: " -- "no CIB connection", tag, uname); -+ crm_warn("Unable to delete %s: no CIB connection", desc); -+ free(desc); - } else { - int call_id; -- char *xpath = crm_strdup_printf(XPATH_STATUS_TAG, uname, tag); - - options |= cib_quorum_override|cib_xpath; - call_id = fsa_cib_conn->cmds->remove(fsa_cib_conn, xpath, NULL, options); -- crm_info("Deleting CIB '%s' section for node %s (via CIB call %d) " -- CRM_XS " xpath=%s", tag, uname, call_id, xpath); -- fsa_register_cib_callback(call_id, FALSE, xpath, erase_xpath_callback); -- // CIB library handles freeing xpath -+ crm_info("Deleting %s (via CIB call %d) " CRM_XS " xpath=%s", -+ desc, call_id, xpath); -+ fsa_register_cib_callback(call_id, FALSE, desc, cib_delete_callback); -+ // CIB library handles freeing desc - } -+ free(xpath); - } -diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c -index 5cbd392..f7e3db2 100644 ---- a/daemons/controld/controld_callbacks.c -+++ b/daemons/controld/controld_callbacks.c -@@ -200,14 +200,18 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d - * transient attributes intact until it rejoins. - */ - if (compare_version(fsa_our_dc_version, "3.0.9") > 0) { -- erase_status_tag(node->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local); -+ controld_delete_node_state(node->uname, -+ controld_section_attrs, -+ cib_scope_local); - } - - } else if(AM_I_DC) { - if (appeared) { - te_trigger_stonith_history_sync(FALSE); - } else { -- erase_status_tag(node->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local); -+ controld_delete_node_state(node->uname, -+ controld_section_attrs, -+ cib_scope_local); - } - } - break; -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 46c1958..b7deeae 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -1411,7 +1411,8 @@ force_reprobe(lrm_state_t *lrm_state, const char *from_sys, - } - - /* Now delete the copy in the CIB */ -- erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local); -+ controld_delete_node_state(lrm_state->node_name, controld_section_lrm, -+ cib_scope_local); - - /* Finally, _delete_ the value in pacemaker-attrd -- setting it to FALSE - * would result in the scheduler sending us back here again -diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c -index d9b1e1e..9897cf3 100644 ---- a/daemons/controld/controld_fencing.c -+++ b/daemons/controld/controld_fencing.c -@@ -229,9 +229,8 @@ send_stonith_update(crm_action_t *action, const char *target, const char *uuid) - /* Make sure it sticks */ - /* fsa_cib_conn->cmds->bump_epoch(fsa_cib_conn, cib_quorum_override|cib_scope_local); */ - -- erase_status_tag(peer->uname, XML_CIB_TAG_LRM, cib_scope_local); -- erase_status_tag(peer->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local); -- -+ controld_delete_node_state(peer->uname, controld_section_all, -+ cib_scope_local); - free_xml(node_state); - return; - } -diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c -index 54324b2..ac6b430 100644 ---- a/daemons/controld/controld_join_dc.c -+++ b/daemons/controld/controld_join_dc.c -@@ -587,7 +587,8 @@ do_dc_join_ack(long long action, - /* Update CIB with node's current executor state. A new transition will be - * triggered later, when the CIB notifies us of the change. - */ -- erase_status_tag(join_from, XML_CIB_TAG_LRM, cib_scope_local); -+ controld_delete_node_state(join_from, controld_section_lrm, -+ cib_scope_local); - if (safe_str_eq(join_from, fsa_our_uname)) { - xmlNode *now_dc_lrmd_state = do_lrm_query(TRUE, fsa_our_uname); - -diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c -index 4fbae45..2d3dfa7 100644 ---- a/daemons/controld/controld_remote_ra.c -+++ b/daemons/controld/controld_remote_ra.c -@@ -181,13 +181,13 @@ remote_node_up(const char *node_name) - CRM_CHECK(node_name != NULL, return); - crm_info("Announcing pacemaker_remote node %s", node_name); - -- /* Clear node's operation history. The node's transient attributes should -- * and normally will be cleared when the node leaves, but since remote node -- * state has a number of corner cases, clear them here as well, to be sure. -+ /* Clear node's entire state (resource history and transient attributes). -+ * The transient attributes should and normally will be cleared when the -+ * node leaves, but since remote node state has a number of corner cases, -+ * clear them here as well, to be sure. - */ - call_opt = crmd_cib_smart_opt(); -- erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt); -- erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt); -+ controld_delete_node_state(node_name, controld_section_all, call_opt); - - /* Clear node's probed attribute */ - update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE); -@@ -252,15 +252,15 @@ remote_node_down(const char *node_name, const enum down_opts opts) - /* Purge node from attrd's memory */ - update_attrd_remote_node_removed(node_name, NULL); - -- /* Purge node's transient attributes */ -- erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt); -- -- /* Normally, the LRM operation history should be kept until the node comes -- * back up. However, after a successful fence, we want to clear it, so we -- * don't think resources are still running on the node. -+ /* Normally, only node attributes should be erased, and the resource history -+ * should be kept until the node comes back up. However, after a successful -+ * fence, we want to clear the history as well, so we don't think resources -+ * are still running on the node. - */ - if (opts == DOWN_ERASE_LRM) { -- erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt); -+ controld_delete_node_state(node_name, controld_section_all, call_opt); -+ } else { -+ controld_delete_node_state(node_name, controld_section_attrs, call_opt); - } - - /* Ensure node is in the remote peer cache with lost state */ -diff --git a/daemons/controld/controld_utils.h b/daemons/controld/controld_utils.h -index cf04f13..f902361 100644 ---- a/daemons/controld/controld_utils.h -+++ b/daemons/controld/controld_utils.h -@@ -70,7 +70,6 @@ xmlNode *create_node_state_update(crm_node_t *node, int flags, - xmlNode *parent, const char *source); - void populate_cib_nodes(enum node_update_flags flags, const char *source); - void crm_update_quorum(gboolean quorum, gboolean force_update); --void erase_status_tag(const char *uname, const char *tag, int options); - void controld_close_attrd_ipc(void); - void update_attrd(const char *host, const char *name, const char *value, const char *user_name, gboolean is_remote_node); - void update_attrd_remote_node_removed(const char *host, const char *user_name); -@@ -87,6 +86,16 @@ unsigned int cib_op_timeout(void); - bool feature_set_compatible(const char *dc_version, const char *join_version); - bool controld_action_is_recordable(const char *action); - -+// Subsections of node_state -+enum controld_section_e { -+ controld_section_lrm, -+ controld_section_attrs, -+ controld_section_all, -+}; -+ -+void controld_delete_node_state(const char *uname, -+ enum controld_section_e section, int options); -+ - const char *get_node_id(xmlNode *lrm_rsc_op); - - /* Convenience macro for registering a CIB callback --- -1.8.3.1 - diff --git a/SOURCES/003-return-codes.patch b/SOURCES/003-return-codes.patch deleted file mode 100644 index e4448af..0000000 --- a/SOURCES/003-return-codes.patch +++ /dev/null @@ -1,908 +0,0 @@ -From 55ebd895ba2c64713c3db2590ffe22c15b8563e3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Dec 2019 16:05:05 -0600 -Subject: [PATCH] Refactor: libcrmcommon: introduce new set of return codes - -Since we plan to introduce a high-level public API, it's a good time to -introduce some best practices. - -Most Pacemaker API functions currently return an integer return code, such that -its absolute value is either a system error number or a custom pcmk_err_* -number. This is less than ideal because system error numbers are constrained -only to the positive int range, so there's the possibility (though not noticed -in the wild) that system errors and custom errors could collide. - -The new method being introduced here still uses an integer return code, -but negative values are from a new enumeration, and positive values are -system error numbers. 0 still represents success. - -It is expected that the new method will be used with new functions, and -existing internal functions will be gradually refactored to use it as well. -Existing public API functions can be addressed at the next backward -compatibility break (2.1.0). ---- - include/crm/common/results.h | 59 ++++- - lib/common/results.c | 536 ++++++++++++++++++++++++++++++------------- - tools/crm_error.c | 100 +++++--- - 3 files changed, 493 insertions(+), 202 deletions(-) - -diff --git a/include/crm/common/results.h b/include/crm/common/results.h -index 7a32110..b29a016 100644 ---- a/include/crm/common/results.h -+++ b/include/crm/common/results.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2012-2019 the Pacemaker project contributors -+ * Copyright 2012-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -49,11 +49,21 @@ extern "C" { - /* - * Function return codes - * -+ * Most Pacemaker API functions return an integer return code. There are two -+ * alternative interpretations. The legacy interpration is that the absolute -+ * value of the return code is either a system error number or a custom -+ * pcmk_err_* number. This is less than ideal because system error numbers are -+ * constrained only to the positive int range, so there's the possibility -+ * (though not noticed in the wild) that system errors and custom errors could -+ * collide. The new intepretation is that negative values are from the pcmk_rc_e -+ * enum, and positive values are system error numbers. Both use 0 for success. -+ * - * For system error codes, see: - * - /usr/include/asm-generic/errno.h - * - /usr/include/asm-generic/errno-base.h - */ - -+// Legacy custom return codes for Pacemaker API functions (deprecated) - # define pcmk_ok 0 - # define PCMK_ERROR_OFFSET 190 /* Replacements on non-linux systems, see include/portability.h */ - # define PCMK_CUSTOM_OFFSET 200 /* Purely custom codes */ -@@ -75,6 +85,48 @@ extern "C" { - # define pcmk_err_bad_nvpair 216 - # define pcmk_err_unknown_format 217 - -+/*! -+ * \enum pcmk_rc_e -+ * \brief Return codes for Pacemaker API functions -+ * -+ * Any Pacemaker API function documented as returning a "standard Pacemaker -+ * return code" will return pcmk_rc_ok (0) on success, and one of this -+ * enumeration's other (negative) values or a (positive) system error number -+ * otherwise. The custom codes are at -1001 and lower, so that the caller may -+ * use -1 through -1000 for their own custom values if desired. While generally -+ * referred to as "errors", nonzero values simply indicate a result, which might -+ * or might not be an error depending on the calling context. -+ */ -+enum pcmk_rc_e { -+ /* When adding new values, use consecutively lower numbers, update the array -+ * in lib/common/results.c, and test with crm_error. -+ */ -+ pcmk_rc_no_quorum = -1017, -+ pcmk_rc_schema_validation = -1016, -+ pcmk_rc_schema_unchanged = -1015, -+ pcmk_rc_transform_failed = -1014, -+ pcmk_rc_old_data = -1013, -+ pcmk_rc_diff_failed = -1012, -+ pcmk_rc_diff_resync = -1011, -+ pcmk_rc_cib_modified = -1010, -+ pcmk_rc_cib_backup = -1009, -+ pcmk_rc_cib_save = -1008, -+ pcmk_rc_cib_corrupt = -1007, -+ pcmk_rc_multiple = -1006, -+ pcmk_rc_node_unknown = -1005, -+ pcmk_rc_already = -1004, -+ pcmk_rc_bad_nvpair = -1003, -+ pcmk_rc_unknown_format = -1002, -+ // Developers: Use a more specific code than pcmk_rc_error whenever possible -+ pcmk_rc_error = -1001, -+ -+ // Values -1 through -1000 reserved for caller use -+ -+ pcmk_rc_ok = 0 -+ -+ // Positive values reserved for system error numbers -+}; -+ - /* - * Exit status codes - * -@@ -150,6 +202,11 @@ typedef enum crm_exit_e { - CRM_EX_MAX = 255, // ensure crm_exit_t can hold this - } crm_exit_t; - -+const char *pcmk_rc_name(int rc); -+const char *pcmk_rc_str(int rc); -+crm_exit_t pcmk_rc2exitc(int rc); -+int pcmk_rc2legacy(int rc); -+int pcmk_legacy2rc(int legacy_rc); - const char *pcmk_strerror(int rc); - const char *pcmk_errorname(int rc); - const char *bz2_strerror(int rc); -diff --git a/lib/common/results.c b/lib/common/results.c -index b80191c..189648f 100644 ---- a/lib/common/results.c -+++ b/lib/common/results.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -22,148 +22,14 @@ - #include - #include - -+// @COMPAT Legacy function return codes -+ -+//! \deprecated Use standard return codes and pcmk_rc_name() instead - const char * - pcmk_errorname(int rc) - { -- int error = abs(rc); -- -- switch (error) { -- case E2BIG: return "E2BIG"; -- case EACCES: return "EACCES"; -- case EADDRINUSE: return "EADDRINUSE"; -- case EADDRNOTAVAIL: return "EADDRNOTAVAIL"; -- case EAFNOSUPPORT: return "EAFNOSUPPORT"; -- case EAGAIN: return "EAGAIN"; -- case EALREADY: return "EALREADY"; -- case EBADF: return "EBADF"; -- case EBADMSG: return "EBADMSG"; -- case EBUSY: return "EBUSY"; -- case ECANCELED: return "ECANCELED"; -- case ECHILD: return "ECHILD"; -- case ECOMM: return "ECOMM"; -- case ECONNABORTED: return "ECONNABORTED"; -- case ECONNREFUSED: return "ECONNREFUSED"; -- case ECONNRESET: return "ECONNRESET"; -- /* case EDEADLK: return "EDEADLK"; */ -- case EDESTADDRREQ: return "EDESTADDRREQ"; -- case EDOM: return "EDOM"; -- case EDQUOT: return "EDQUOT"; -- case EEXIST: return "EEXIST"; -- case EFAULT: return "EFAULT"; -- case EFBIG: return "EFBIG"; -- case EHOSTDOWN: return "EHOSTDOWN"; -- case EHOSTUNREACH: return "EHOSTUNREACH"; -- case EIDRM: return "EIDRM"; -- case EILSEQ: return "EILSEQ"; -- case EINPROGRESS: return "EINPROGRESS"; -- case EINTR: return "EINTR"; -- case EINVAL: return "EINVAL"; -- case EIO: return "EIO"; -- case EISCONN: return "EISCONN"; -- case EISDIR: return "EISDIR"; -- case ELIBACC: return "ELIBACC"; -- case ELOOP: return "ELOOP"; -- case EMFILE: return "EMFILE"; -- case EMLINK: return "EMLINK"; -- case EMSGSIZE: return "EMSGSIZE"; --#ifdef EMULTIHOP // Not available on OpenBSD -- case EMULTIHOP: return "EMULTIHOP"; --#endif -- case ENAMETOOLONG: return "ENAMETOOLONG"; -- case ENETDOWN: return "ENETDOWN"; -- case ENETRESET: return "ENETRESET"; -- case ENETUNREACH: return "ENETUNREACH"; -- case ENFILE: return "ENFILE"; -- case ENOBUFS: return "ENOBUFS"; -- case ENODATA: return "ENODATA"; -- case ENODEV: return "ENODEV"; -- case ENOENT: return "ENOENT"; -- case ENOEXEC: return "ENOEXEC"; -- case ENOKEY: return "ENOKEY"; -- case ENOLCK: return "ENOLCK"; --#ifdef ENOLINK // Not available on OpenBSD -- case ENOLINK: return "ENOLINK"; --#endif -- case ENOMEM: return "ENOMEM"; -- case ENOMSG: return "ENOMSG"; -- case ENOPROTOOPT: return "ENOPROTOOPT"; -- case ENOSPC: return "ENOSPC"; -- case ENOSR: return "ENOSR"; -- case ENOSTR: return "ENOSTR"; -- case ENOSYS: return "ENOSYS"; -- case ENOTBLK: return "ENOTBLK"; -- case ENOTCONN: return "ENOTCONN"; -- case ENOTDIR: return "ENOTDIR"; -- case ENOTEMPTY: return "ENOTEMPTY"; -- case ENOTSOCK: return "ENOTSOCK"; -- /* case ENOTSUP: return "ENOTSUP"; */ -- case ENOTTY: return "ENOTTY"; -- case ENOTUNIQ: return "ENOTUNIQ"; -- case ENXIO: return "ENXIO"; -- case EOPNOTSUPP: return "EOPNOTSUPP"; -- case EOVERFLOW: return "EOVERFLOW"; -- case EPERM: return "EPERM"; -- case EPFNOSUPPORT: return "EPFNOSUPPORT"; -- case EPIPE: return "EPIPE"; -- case EPROTO: return "EPROTO"; -- case EPROTONOSUPPORT: return "EPROTONOSUPPORT"; -- case EPROTOTYPE: return "EPROTOTYPE"; -- case ERANGE: return "ERANGE"; -- case EREMOTE: return "EREMOTE"; -- case EREMOTEIO: return "EREMOTEIO"; -- -- case EROFS: return "EROFS"; -- case ESHUTDOWN: return "ESHUTDOWN"; -- case ESPIPE: return "ESPIPE"; -- case ESOCKTNOSUPPORT: return "ESOCKTNOSUPPORT"; -- case ESRCH: return "ESRCH"; -- case ESTALE: return "ESTALE"; -- case ETIME: return "ETIME"; -- case ETIMEDOUT: return "ETIMEDOUT"; -- case ETXTBSY: return "ETXTBSY"; -- case EUNATCH: return "EUNATCH"; -- case EUSERS: return "EUSERS"; -- /* case EWOULDBLOCK: return "EWOULDBLOCK"; */ -- case EXDEV: return "EXDEV"; -- --#ifdef EBADE -- /* Not available on OSX */ -- case EBADE: return "EBADE"; -- case EBADFD: return "EBADFD"; -- case EBADSLT: return "EBADSLT"; -- case EDEADLOCK: return "EDEADLOCK"; -- case EBADR: return "EBADR"; -- case EBADRQC: return "EBADRQC"; -- case ECHRNG: return "ECHRNG"; --#ifdef EISNAM /* Not available on Illumos/Solaris */ -- case EISNAM: return "EISNAM"; -- case EKEYEXPIRED: return "EKEYEXPIRED"; -- case EKEYREJECTED: return "EKEYREJECTED"; -- case EKEYREVOKED: return "EKEYREVOKED"; --#endif -- case EL2HLT: return "EL2HLT"; -- case EL2NSYNC: return "EL2NSYNC"; -- case EL3HLT: return "EL3HLT"; -- case EL3RST: return "EL3RST"; -- case ELIBBAD: return "ELIBBAD"; -- case ELIBMAX: return "ELIBMAX"; -- case ELIBSCN: return "ELIBSCN"; -- case ELIBEXEC: return "ELIBEXEC"; --#ifdef ENOMEDIUM /* Not available on Illumos/Solaris */ -- case ENOMEDIUM: return "ENOMEDIUM"; -- case EMEDIUMTYPE: return "EMEDIUMTYPE"; --#endif -- case ENONET: return "ENONET"; -- case ENOPKG: return "ENOPKG"; -- case EREMCHG: return "EREMCHG"; -- case ERESTART: return "ERESTART"; -- case ESTRPIPE: return "ESTRPIPE"; --#ifdef EUCLEAN /* Not available on Illumos/Solaris */ -- case EUCLEAN: return "EUCLEAN"; --#endif -- case EXFULL: return "EXFULL"; --#endif -- -+ rc = abs(rc); -+ switch (rc) { - case pcmk_err_generic: return "pcmk_err_generic"; - case pcmk_err_no_quorum: return "pcmk_err_no_quorum"; - case pcmk_err_schema_validation: return "pcmk_err_schema_validation"; -@@ -180,24 +46,26 @@ pcmk_errorname(int rc) - case pcmk_err_already: return "pcmk_err_already"; - case pcmk_err_bad_nvpair: return "pcmk_err_bad_nvpair"; - case pcmk_err_unknown_format: return "pcmk_err_unknown_format"; -+ default: return pcmk_rc_name(rc); // system errno - } -- return "Unknown"; - } - -+//! \deprecated Use standard return codes and pcmk_rc_str() instead - const char * - pcmk_strerror(int rc) - { -- int error = abs(rc); -- -- if (error == 0) { -+ if (rc == 0) { - return "OK"; -+ } - -- // Of course error > 0 ... unless someone passed INT_MIN as rc -- } else if ((error > 0) && (error < PCMK_ERROR_OFFSET)) { -- return strerror(error); -+ rc = abs(rc); -+ -+ // Of course rc > 0 ... unless someone passed INT_MIN as rc -+ if ((rc > 0) && (rc < PCMK_ERROR_OFFSET)) { -+ return strerror(rc); - } - -- switch (error) { -+ switch (rc) { - case pcmk_err_generic: - return "Generic Pacemaker error"; - case pcmk_err_no_quorum: -@@ -253,11 +121,313 @@ pcmk_strerror(int rc) - case ENOKEY: - return "Required key not available"; - } -- - crm_err("Unknown error code: %d", rc); - return "Unknown error"; - } - -+// Standard Pacemaker API return codes -+ -+/* This array is used only for nonzero values of pcmk_rc_e. Its values must be -+ * kept in the exact reverse order of the enum value numbering (i.e. add new -+ * values to the end of the array). -+ */ -+static struct pcmk__rc_info { -+ const char *name; -+ const char *desc; -+ int legacy_rc; -+} pcmk__rcs[] = { -+ { "pcmk_rc_error", -+ "Error", -+ -pcmk_err_generic, -+ }, -+ { "pcmk_rc_unknown_format", -+ "Unknown output format", -+ -pcmk_err_unknown_format, -+ }, -+ { "pcmk_rc_bad_nvpair", -+ "Bad name/value pair given", -+ -pcmk_err_bad_nvpair, -+ }, -+ { "pcmk_rc_already", -+ "Already in requested state", -+ -pcmk_err_already, -+ }, -+ { "pcmk_rc_node_unknown", -+ "Node not found", -+ -pcmk_err_node_unknown, -+ }, -+ { "pcmk_rc_multiple", -+ "Resource active on multiple nodes", -+ -pcmk_err_multiple, -+ }, -+ { "pcmk_rc_cib_corrupt", -+ "Could not parse on-disk configuration", -+ -pcmk_err_cib_corrupt, -+ }, -+ { "pcmk_rc_cib_save", -+ "Could not save new configuration to disk", -+ -pcmk_err_cib_save, -+ }, -+ { "pcmk_rc_cib_backup", -+ "Could not archive previous configuration", -+ -pcmk_err_cib_backup, -+ }, -+ { "pcmk_rc_cib_modified", -+ "On-disk configuration was manually modified", -+ -pcmk_err_cib_modified, -+ }, -+ { "pcmk_rc_diff_resync", -+ "Application of update diff failed, requesting full refresh", -+ -pcmk_err_diff_resync, -+ }, -+ { "pcmk_rc_diff_failed", -+ "Application of update diff failed", -+ -pcmk_err_diff_failed, -+ }, -+ { "pcmk_rc_old_data", -+ "Update was older than existing configuration", -+ -pcmk_err_old_data, -+ }, -+ { "pcmk_rc_transform_failed", -+ "Schema transform failed", -+ -pcmk_err_transform_failed, -+ }, -+ { "pcmk_rc_schema_unchanged", -+ "Schema is already the latest available", -+ -pcmk_err_schema_unchanged, -+ }, -+ { "pcmk_rc_schema_validation", -+ "Update does not conform to the configured schema", -+ -pcmk_err_schema_validation, -+ }, -+ { "pcmk_rc_no_quorum", -+ "Operation requires quorum", -+ -pcmk_err_no_quorum, -+ }, -+}; -+ -+#define PCMK__N_RC (sizeof(pcmk__rcs) / sizeof(struct pcmk__rc_info)) -+ -+/*! -+ * \brief Get a return code constant name as a string -+ * -+ * \param[in] rc Integer return code to convert -+ * -+ * \return String of constant name corresponding to rc -+ */ -+const char * -+pcmk_rc_name(int rc) -+{ -+ if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) { -+ return pcmk__rcs[pcmk_rc_error - rc].name; -+ } -+ switch (rc) { -+ case pcmk_rc_ok: return "pcmk_rc_ok"; -+ case E2BIG: return "E2BIG"; -+ case EACCES: return "EACCES"; -+ case EADDRINUSE: return "EADDRINUSE"; -+ case EADDRNOTAVAIL: return "EADDRNOTAVAIL"; -+ case EAFNOSUPPORT: return "EAFNOSUPPORT"; -+ case EAGAIN: return "EAGAIN"; -+ case EALREADY: return "EALREADY"; -+ case EBADF: return "EBADF"; -+ case EBADMSG: return "EBADMSG"; -+ case EBUSY: return "EBUSY"; -+ case ECANCELED: return "ECANCELED"; -+ case ECHILD: return "ECHILD"; -+ case ECOMM: return "ECOMM"; -+ case ECONNABORTED: return "ECONNABORTED"; -+ case ECONNREFUSED: return "ECONNREFUSED"; -+ case ECONNRESET: return "ECONNRESET"; -+ /* case EDEADLK: return "EDEADLK"; */ -+ case EDESTADDRREQ: return "EDESTADDRREQ"; -+ case EDOM: return "EDOM"; -+ case EDQUOT: return "EDQUOT"; -+ case EEXIST: return "EEXIST"; -+ case EFAULT: return "EFAULT"; -+ case EFBIG: return "EFBIG"; -+ case EHOSTDOWN: return "EHOSTDOWN"; -+ case EHOSTUNREACH: return "EHOSTUNREACH"; -+ case EIDRM: return "EIDRM"; -+ case EILSEQ: return "EILSEQ"; -+ case EINPROGRESS: return "EINPROGRESS"; -+ case EINTR: return "EINTR"; -+ case EINVAL: return "EINVAL"; -+ case EIO: return "EIO"; -+ case EISCONN: return "EISCONN"; -+ case EISDIR: return "EISDIR"; -+ case ELIBACC: return "ELIBACC"; -+ case ELOOP: return "ELOOP"; -+ case EMFILE: return "EMFILE"; -+ case EMLINK: return "EMLINK"; -+ case EMSGSIZE: return "EMSGSIZE"; -+#ifdef EMULTIHOP // Not available on OpenBSD -+ case EMULTIHOP: return "EMULTIHOP"; -+#endif -+ case ENAMETOOLONG: return "ENAMETOOLONG"; -+ case ENETDOWN: return "ENETDOWN"; -+ case ENETRESET: return "ENETRESET"; -+ case ENETUNREACH: return "ENETUNREACH"; -+ case ENFILE: return "ENFILE"; -+ case ENOBUFS: return "ENOBUFS"; -+ case ENODATA: return "ENODATA"; -+ case ENODEV: return "ENODEV"; -+ case ENOENT: return "ENOENT"; -+ case ENOEXEC: return "ENOEXEC"; -+ case ENOKEY: return "ENOKEY"; -+ case ENOLCK: return "ENOLCK"; -+#ifdef ENOLINK // Not available on OpenBSD -+ case ENOLINK: return "ENOLINK"; -+#endif -+ case ENOMEM: return "ENOMEM"; -+ case ENOMSG: return "ENOMSG"; -+ case ENOPROTOOPT: return "ENOPROTOOPT"; -+ case ENOSPC: return "ENOSPC"; -+ case ENOSR: return "ENOSR"; -+ case ENOSTR: return "ENOSTR"; -+ case ENOSYS: return "ENOSYS"; -+ case ENOTBLK: return "ENOTBLK"; -+ case ENOTCONN: return "ENOTCONN"; -+ case ENOTDIR: return "ENOTDIR"; -+ case ENOTEMPTY: return "ENOTEMPTY"; -+ case ENOTSOCK: return "ENOTSOCK"; -+#if ENOTSUP != EOPNOTSUPP -+ case ENOTSUP: return "ENOTSUP"; -+#endif -+ case ENOTTY: return "ENOTTY"; -+ case ENOTUNIQ: return "ENOTUNIQ"; -+ case ENXIO: return "ENXIO"; -+ case EOPNOTSUPP: return "EOPNOTSUPP"; -+ case EOVERFLOW: return "EOVERFLOW"; -+ case EPERM: return "EPERM"; -+ case EPFNOSUPPORT: return "EPFNOSUPPORT"; -+ case EPIPE: return "EPIPE"; -+ case EPROTO: return "EPROTO"; -+ case EPROTONOSUPPORT: return "EPROTONOSUPPORT"; -+ case EPROTOTYPE: return "EPROTOTYPE"; -+ case ERANGE: return "ERANGE"; -+ case EREMOTE: return "EREMOTE"; -+ case EREMOTEIO: return "EREMOTEIO"; -+ case EROFS: return "EROFS"; -+ case ESHUTDOWN: return "ESHUTDOWN"; -+ case ESPIPE: return "ESPIPE"; -+ case ESOCKTNOSUPPORT: return "ESOCKTNOSUPPORT"; -+ case ESRCH: return "ESRCH"; -+ case ESTALE: return "ESTALE"; -+ case ETIME: return "ETIME"; -+ case ETIMEDOUT: return "ETIMEDOUT"; -+ case ETXTBSY: return "ETXTBSY"; -+ case EUNATCH: return "EUNATCH"; -+ case EUSERS: return "EUSERS"; -+ /* case EWOULDBLOCK: return "EWOULDBLOCK"; */ -+ case EXDEV: return "EXDEV"; -+ -+#ifdef EBADE // Not available on OS X -+ case EBADE: return "EBADE"; -+ case EBADFD: return "EBADFD"; -+ case EBADSLT: return "EBADSLT"; -+ case EDEADLOCK: return "EDEADLOCK"; -+ case EBADR: return "EBADR"; -+ case EBADRQC: return "EBADRQC"; -+ case ECHRNG: return "ECHRNG"; -+#ifdef EISNAM // Not available on OS X, Illumos, Solaris -+ case EISNAM: return "EISNAM"; -+ case EKEYEXPIRED: return "EKEYEXPIRED"; -+ case EKEYREJECTED: return "EKEYREJECTED"; -+ case EKEYREVOKED: return "EKEYREVOKED"; -+#endif -+ case EL2HLT: return "EL2HLT"; -+ case EL2NSYNC: return "EL2NSYNC"; -+ case EL3HLT: return "EL3HLT"; -+ case EL3RST: return "EL3RST"; -+ case ELIBBAD: return "ELIBBAD"; -+ case ELIBMAX: return "ELIBMAX"; -+ case ELIBSCN: return "ELIBSCN"; -+ case ELIBEXEC: return "ELIBEXEC"; -+#ifdef ENOMEDIUM // Not available on OS X, Illumos, Solaris -+ case ENOMEDIUM: return "ENOMEDIUM"; -+ case EMEDIUMTYPE: return "EMEDIUMTYPE"; -+#endif -+ case ENONET: return "ENONET"; -+ case ENOPKG: return "ENOPKG"; -+ case EREMCHG: return "EREMCHG"; -+ case ERESTART: return "ERESTART"; -+ case ESTRPIPE: return "ESTRPIPE"; -+#ifdef EUCLEAN // Not available on OS X, Illumos, Solaris -+ case EUCLEAN: return "EUCLEAN"; -+#endif -+ case EXFULL: return "EXFULL"; -+#endif // EBADE -+ default: return "Unknown"; -+ } -+} -+ -+/*! -+ * \brief Get a user-friendly description of a return code -+ * -+ * \param[in] rc Integer return code to convert -+ * -+ * \return String description of rc -+ */ -+const char * -+pcmk_rc_str(int rc) -+{ -+ if (rc == pcmk_rc_ok) { -+ return "OK"; -+ } -+ if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) { -+ return pcmk__rcs[pcmk_rc_error - rc].desc; -+ } -+ if (rc < 0) { -+ return "Unknown error"; -+ } -+ return strerror(rc); -+} -+ -+// This returns negative values for errors -+//! \deprecated Use standard return codes instead -+int -+pcmk_rc2legacy(int rc) -+{ -+ if (rc >= 0) { -+ return -rc; // OK or system errno -+ } -+ if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) { -+ return pcmk__rcs[pcmk_rc_error - rc].legacy_rc; -+ } -+ return -pcmk_err_generic; -+} -+ -+//! \deprecated Use standard return codes instead -+int -+pcmk_legacy2rc(int legacy_rc) -+{ -+ legacy_rc = abs(legacy_rc); -+ switch (legacy_rc) { -+ case pcmk_err_no_quorum: return pcmk_rc_no_quorum; -+ case pcmk_err_schema_validation: return pcmk_rc_schema_validation; -+ case pcmk_err_schema_unchanged: return pcmk_rc_schema_unchanged; -+ case pcmk_err_transform_failed: return pcmk_rc_transform_failed; -+ case pcmk_err_old_data: return pcmk_rc_old_data; -+ case pcmk_err_diff_failed: return pcmk_rc_diff_failed; -+ case pcmk_err_diff_resync: return pcmk_rc_diff_resync; -+ case pcmk_err_cib_modified: return pcmk_rc_cib_modified; -+ case pcmk_err_cib_backup: return pcmk_rc_cib_backup; -+ case pcmk_err_cib_save: return pcmk_rc_cib_save; -+ case pcmk_err_cib_corrupt: return pcmk_rc_cib_corrupt; -+ case pcmk_err_multiple: return pcmk_rc_multiple; -+ case pcmk_err_node_unknown: return pcmk_rc_node_unknown; -+ case pcmk_err_already: return pcmk_rc_already; -+ case pcmk_err_bad_nvpair: return pcmk_rc_bad_nvpair; -+ case pcmk_err_unknown_format: return pcmk_rc_unknown_format; -+ case pcmk_err_generic: return pcmk_rc_error; -+ case pcmk_ok: return pcmk_rc_ok; -+ default: return legacy_rc; // system errno -+ } -+} -+ -+// Exit status codes -+ - const char * - crm_exit_name(crm_exit_t exit_code) - { -@@ -347,26 +517,17 @@ crm_exit_str(crm_exit_t exit_code) - case CRM_EX_TIMEOUT: return "Timeout occurred"; - case CRM_EX_MAX: return "Error occurred"; - } -- if (exit_code > 128) { -+ if ((exit_code > 128) && (exit_code < CRM_EX_MAX)) { - return "Interrupted by signal"; - } - return "Unknown exit status"; - } - --/*! -- * \brief Map an errno to a similar exit status -- * -- * \param[in] errno Error number to map -- * -- * \return Exit status corresponding to errno -- */ -+//! \deprecated Use standard return codes and pcmk_rc2exitc() instead - crm_exit_t - crm_errno2exit(int rc) - { - rc = abs(rc); // Convenience for functions that return -errno -- if (rc == EOPNOTSUPP) { -- rc = ENOTSUP; // Values are same on Linux, can't use both in case -- } - switch (rc) { - case pcmk_ok: - return CRM_EX_OK; -@@ -384,6 +545,48 @@ crm_errno2exit(int rc) - case pcmk_err_bad_nvpair: - return CRM_EX_INVALID_PARAM; - -+ case pcmk_err_already: -+ return CRM_EX_EXISTS; -+ -+ case pcmk_err_multiple: -+ return CRM_EX_MULTIPLE; -+ -+ case pcmk_err_node_unknown: -+ case pcmk_err_unknown_format: -+ return CRM_EX_NOSUCH; -+ -+ default: -+ return pcmk_rc2exitc(rc); // system errno -+ } -+} -+ -+/*! -+ * \brief Map a function return code to the most similar exit code -+ * -+ * \param[in] rc Function return code -+ * -+ * \return Most similar exit code -+ */ -+crm_exit_t -+pcmk_rc2exitc(int rc) -+{ -+ switch (rc) { -+ case pcmk_rc_ok: -+ return CRM_EX_OK; -+ -+ case pcmk_rc_no_quorum: -+ return CRM_EX_QUORUM; -+ -+ case pcmk_rc_old_data: -+ return CRM_EX_OLD; -+ -+ case pcmk_rc_schema_validation: -+ case pcmk_rc_transform_failed: -+ return CRM_EX_CONFIG; -+ -+ case pcmk_rc_bad_nvpair: -+ return CRM_EX_INVALID_PARAM; -+ - case EACCES: - return CRM_EX_INSUFFICIENT_PRIV; - -@@ -414,22 +617,25 @@ crm_errno2exit(int rc) - return CRM_EX_DISCONNECT; - - case EEXIST: -- case pcmk_err_already: -+ case pcmk_rc_already: - return CRM_EX_EXISTS; - - case EIO: - return CRM_EX_IOERR; - - case ENOTSUP: -+#if EOPNOTSUPP != ENOTSUP -+ case EOPNOTSUPP: -+#endif - return CRM_EX_UNIMPLEMENT_FEATURE; - - case ENOTUNIQ: -- case pcmk_err_multiple: -+ case pcmk_rc_multiple: - return CRM_EX_MULTIPLE; - - case ENXIO: -- case pcmk_err_node_unknown: -- case pcmk_err_unknown_format: -+ case pcmk_rc_node_unknown: -+ case pcmk_rc_unknown_format: - return CRM_EX_NOSUCH; - - case ETIME: -@@ -441,6 +647,8 @@ crm_errno2exit(int rc) - } - } - -+// Other functions -+ - const char * - bz2_strerror(int rc) - { -diff --git a/tools/crm_error.c b/tools/crm_error.c -index f6dc73c..0dcae05 100644 ---- a/tools/crm_error.c -+++ b/tools/crm_error.c -@@ -1,21 +1,10 @@ --/* -- * Copyright 2012-2018 the Pacemaker project contributors -+/* -+ * Copyright 2012-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. -- * -- * This program is free software; you can redistribute it and/or -- * modify it under the terms of the GNU General Public -- * License as published by the Free Software Foundation; either -- * version 2 of the License, or (at your option) any later version. -- * -- * This software is distributed in the hope that it will be useful, -- * but WITHOUT ANY WARRANTY; without even the implied warranty of -- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- * General Public License for more details. -- * -- * You should have received a copy of the GNU General Public -- * License along with this library; if not, write to the Free Software -- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ * -+ * This source code is licensed under the GNU General Public License version 2 -+ * or later (GPLv2+) WITHOUT ANY WARRANTY. - */ - - #include -@@ -33,12 +22,31 @@ static struct crm_option long_options[] = { - "\n\t\t\tUseful for looking for sources of the error in source code"}, - - {"list", 0, 0, 'l', "\tShow all known errors."}, -- {"exit", 0, 0, 'X', "\tInterpret as exit code rather than function return value"}, -+ {"exit", 0, 0, 'X', "\tInterpret as exit code rather than legacy function return value"}, -+ {"rc", 0, 0, 'r', "\tInterpret as return code rather than legacy function return value"}, - - {0, 0, 0, 0} - }; - /* *INDENT-ON* */ - -+static bool as_exit_code = false; -+static bool as_rc = false; -+ -+static void -+get_strings(int rc, const char **name, const char **str) -+{ -+ if (as_exit_code) { -+ *str = crm_exit_str((crm_exit_t) rc); -+ *name = crm_exit_name(rc); -+ } else if (as_rc) { -+ *str = pcmk_rc_str(rc); -+ *name = pcmk_rc_name(rc); -+ } else { -+ *str = pcmk_strerror(rc); -+ *name = pcmk_errorname(rc); -+ } -+} -+ - int - main(int argc, char **argv) - { -@@ -49,10 +57,12 @@ main(int argc, char **argv) - - bool do_list = FALSE; - bool with_name = FALSE; -- bool as_exit_code = FALSE; -+ -+ const char *name = NULL; -+ const char *desc = NULL; - - crm_log_cli_init("crm_error"); -- crm_set_options(NULL, "[options] -- rc", long_options, -+ crm_set_options(NULL, "[options] -- [...]", long_options, - "Tool for displaying the textual name or description of a reported error code"); - - while (flag >= 0) { -@@ -73,6 +83,9 @@ main(int argc, char **argv) - case 'l': - do_list = TRUE; - break; -+ case 'r': -+ as_rc = true; -+ break; - case 'X': - as_exit_code = TRUE; - break; -@@ -83,30 +96,43 @@ main(int argc, char **argv) - } - - if(do_list) { -- for (rc = 0; rc < 256; rc++) { -- const char *name = as_exit_code? crm_exit_name(rc) : pcmk_errorname(rc); -- const char *desc = as_exit_code? crm_exit_str(rc) : pcmk_strerror(rc); -+ int start, end, width; -+ -+ // 256 is a hacky magic number that "should" be enough -+ if (as_rc) { -+ start = pcmk_rc_error - 256; -+ end = PCMK_CUSTOM_OFFSET; -+ width = 4; -+ } else { -+ start = 0; -+ end = 256; -+ width = 3; -+ } -+ -+ for (rc = start; rc < end; rc++) { -+ if (rc == (pcmk_rc_error + 1)) { -+ // Values in between are reserved for callers, no use iterating -+ rc = pcmk_rc_ok; -+ } -+ get_strings(rc, &name, &desc); - if (!name || !strcmp(name, "Unknown") || !strcmp(name, "CRM_EX_UNKNOWN")) { -- /* Unknown */ -+ // Undefined - } else if(with_name) { -- printf("%.3d: %-26s %s\n", rc, name, desc); -+ printf("% .*d: %-26s %s\n", width, rc, name, desc); - } else { -- printf("%.3d: %s\n", rc, desc); -+ printf("% .*d: %s\n", width, rc, desc); - } - } -- return CRM_EX_OK; -- } - -- for (lpc = optind; lpc < argc; lpc++) { -- const char *str, *name; -- -- rc = crm_atoi(argv[lpc], NULL); -- str = as_exit_code? crm_exit_str(rc) : pcmk_strerror(rc); -- if(with_name) { -- name = as_exit_code? crm_exit_name(rc) : pcmk_errorname(rc); -- printf("%s - %s\n", name, str); -- } else { -- printf("%s\n", str); -+ } else { -+ for (lpc = optind; lpc < argc; lpc++) { -+ rc = crm_atoi(argv[lpc], NULL); -+ get_strings(rc, &name, &desc); -+ if (with_name) { -+ printf("%s - %s\n", name, desc); -+ } else { -+ printf("%s\n", desc); -+ } - } - } - return CRM_EX_OK; --- -1.8.3.1 - diff --git a/SOURCES/003-trace.patch b/SOURCES/003-trace.patch new file mode 100644 index 0000000..e56e644 --- /dev/null +++ b/SOURCES/003-trace.patch @@ -0,0 +1,30 @@ +From 47c3e06b098c7e148c54675588d03b4d2bea40b5 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 22 Jun 2020 16:20:01 -0400 +Subject: [PATCH] Fix: libpacemaker: Don't allow a potential NULL in a format + string. + +This is only tripping up F32 s390x builds, but I don't suppose there's +any reason it couldn't come up elsewhere later. +--- + lib/pacemaker/pcmk_sched_constraints.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c +index 9c3a88d..d8c3e69 100644 +--- a/lib/pacemaker/pcmk_sched_constraints.c ++++ b/lib/pacemaker/pcmk_sched_constraints.c +@@ -1595,8 +1595,8 @@ custom_action_order(pe_resource_t * lh_rsc, char *lh_action_task, pe_action_t * + order = calloc(1, sizeof(pe__ordering_t)); + + crm_trace("Creating[%d] %s %s %s - %s %s %s", data_set->order_id, +- lh_rsc?lh_rsc->id:"NA", lh_action_task, lh_action?lh_action->uuid:"NA", +- rh_rsc?rh_rsc->id:"NA", rh_action_task, rh_action?rh_action->uuid:"NA"); ++ lh_rsc?lh_rsc->id:"NA", lh_action_task?lh_action_task:"NA", lh_action?lh_action->uuid:"NA", ++ rh_rsc?rh_rsc->id:"NA", rh_action_task?rh_action_task:"NA", rh_action?rh_action->uuid:"NA"); + + /* CRM_ASSERT(data_set->order_id != 291); */ + +-- +1.8.3.1 + diff --git a/SOURCES/004-test.patch b/SOURCES/004-test.patch new file mode 100644 index 0000000..e17850b --- /dev/null +++ b/SOURCES/004-test.patch @@ -0,0 +1,27 @@ +From 7ed7675615ada7d0be5654e0dcb26de60cf5b5e9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 22 Jun 2020 20:03:56 -0500 +Subject: [PATCH] Test: scheduler: explicitly disable concurrent-fencing in + on_fail_demote4 + +... so the expected output is the same regardless of what default the build was +compiled with +--- + cts/scheduler/on_fail_demote4.xml | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/cts/scheduler/on_fail_demote4.xml b/cts/scheduler/on_fail_demote4.xml +index eb4c4cc..1082266 100644 +--- a/cts/scheduler/on_fail_demote4.xml ++++ b/cts/scheduler/on_fail_demote4.xml +@@ -8,6 +8,7 @@ + + + ++ + + + +-- +1.8.3.1 + diff --git a/SOURCES/004-unused.patch b/SOURCES/004-unused.patch deleted file mode 100644 index e732b42..0000000 --- a/SOURCES/004-unused.patch +++ /dev/null @@ -1,159 +0,0 @@ -From 6df10102c02f93890c1994136b3ce6a60b33a05e Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 6 Jan 2020 11:01:38 -0600 -Subject: [PATCH] Refactor: controller: remove unused function arguments - -... and rename affected functions ---- - daemons/controld/controld_execd.c | 2 +- - daemons/controld/controld_fsa.c | 1 - - daemons/controld/controld_fsa.h | 4 ++-- - daemons/controld/controld_join_client.c | 4 ++-- - daemons/controld/controld_join_dc.c | 32 ++++++++++++++------------------ - 5 files changed, 19 insertions(+), 24 deletions(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index f068413..16751b9 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -839,7 +839,7 @@ do_lrm_query_internal(lrm_state_t *lrm_state, int update_flags) - } - - xmlNode * --do_lrm_query(gboolean is_replace, const char *node_name) -+controld_query_executor_state(const char *node_name) - { - lrm_state_t *lrm_state = lrm_state_find(node_name); - -diff --git a/daemons/controld/controld_fsa.c b/daemons/controld/controld_fsa.c -index bd732bc..db2b3f3 100644 ---- a/daemons/controld/controld_fsa.c -+++ b/daemons/controld/controld_fsa.c -@@ -41,7 +41,6 @@ enum crmd_fsa_state fsa_state = S_STARTING; - - extern uint highest_born_on; - extern uint num_join_invites; --extern void initialize_join(gboolean before); - - #define DOT_PREFIX "actions:trace: " - #define do_dot_log(fmt, args...) crm_trace( fmt, ##args) -diff --git a/daemons/controld/controld_fsa.h b/daemons/controld/controld_fsa.h -index 06794cb..8aaaadf 100644 ---- a/daemons/controld/controld_fsa.h -+++ b/daemons/controld/controld_fsa.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -484,7 +484,7 @@ extern gboolean ever_had_quorum; - // These should be moved elsewhere - void do_update_cib_nodes(gboolean overwrite, const char *caller); - int crmd_cib_smart_opt(void); --xmlNode *do_lrm_query(gboolean, const char *node_name); -+xmlNode *controld_query_executor_state(const char *node_name); - - const char *fsa_input2string(enum crmd_fsa_input input); - const char *fsa_state2string(enum crmd_fsa_state state); -diff --git a/daemons/controld/controld_join_client.c b/daemons/controld/controld_join_client.c -index 4ac0d2a..383ee29 100644 ---- a/daemons/controld/controld_join_client.c -+++ b/daemons/controld/controld_join_client.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -264,7 +264,7 @@ do_cl_join_finalize_respond(long long action, - update_dc_expected(input->msg); - - /* send our status section to the DC */ -- tmp1 = do_lrm_query(TRUE, fsa_our_uname); -+ tmp1 = controld_query_executor_state(fsa_our_uname); - if (tmp1 != NULL) { - xmlNode *reply = create_request(CRM_OP_JOIN_CONFIRM, tmp1, fsa_our_dc, - CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL); -diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c -index ac6b430..885b2a9 100644 ---- a/daemons/controld/controld_join_dc.c -+++ b/daemons/controld/controld_join_dc.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -21,7 +21,6 @@ char *max_epoch = NULL; - char *max_generation_from = NULL; - xmlNode *max_generation_xml = NULL; - --void initialize_join(gboolean before); - void finalize_join_for(gpointer key, gpointer value, gpointer user_data); - void finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data); - gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source); -@@ -68,8 +67,8 @@ crm_update_peer_join(const char *source, crm_node_t * node, enum crm_join_phase - } - } - --void --initialize_join(gboolean before) -+static void -+start_join_round() - { - GHashTableIter iter; - crm_node_t *peer = NULL; -@@ -80,19 +79,16 @@ initialize_join(gboolean before) - while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) { - crm_update_peer_join(__FUNCTION__, peer, crm_join_none); - } -- -- if (before) { -- if (max_generation_from != NULL) { -- free(max_generation_from); -- max_generation_from = NULL; -- } -- if (max_generation_xml != NULL) { -- free_xml(max_generation_xml); -- max_generation_xml = NULL; -- } -- clear_bit(fsa_input_register, R_HAVE_CIB); -- clear_bit(fsa_input_register, R_CIB_ASKED); -+ if (max_generation_from != NULL) { -+ free(max_generation_from); -+ max_generation_from = NULL; -+ } -+ if (max_generation_xml != NULL) { -+ free_xml(max_generation_xml); -+ max_generation_xml = NULL; - } -+ clear_bit(fsa_input_register, R_HAVE_CIB); -+ clear_bit(fsa_input_register, R_CIB_ASKED); - } - - /*! -@@ -192,7 +188,7 @@ do_dc_join_offer_all(long long action, - * will be seen as offline by the scheduler anyway. - */ - current_join_id++; -- initialize_join(TRUE); -+ start_join_round(); - /* do_update_cib_nodes(TRUE, __FUNCTION__); */ - - update_dc(NULL); -@@ -590,7 +586,7 @@ do_dc_join_ack(long long action, - controld_delete_node_state(join_from, controld_section_lrm, - cib_scope_local); - if (safe_str_eq(join_from, fsa_our_uname)) { -- xmlNode *now_dc_lrmd_state = do_lrm_query(TRUE, fsa_our_uname); -+ xmlNode *now_dc_lrmd_state = controld_query_executor_state(fsa_our_uname); - - if (now_dc_lrmd_state != NULL) { - fsa_cib_update(XML_CIB_TAG_STATUS, now_dc_lrmd_state, --- -1.8.3.1 - diff --git a/SOURCES/005-shutdown-lock.patch b/SOURCES/005-shutdown-lock.patch deleted file mode 100644 index 9a4fe46..0000000 --- a/SOURCES/005-shutdown-lock.patch +++ /dev/null @@ -1,207 +0,0 @@ -From 4bdda97ff76d0e682f4f58bc632cd2cbd417c423 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 14 Jan 2020 12:52:21 -0600 -Subject: [PATCH 01/18] Log: controller: improve messages when deleting CIB - resource history - -This also moves delete_rsc_status() to controld_based.c and renames it. ---- - daemons/controld/controld_based.c | 71 +++++++++++++++++++++++++++++++++++++++ - daemons/controld/controld_execd.c | 47 +++++--------------------- - daemons/controld/controld_utils.h | 4 ++- - 3 files changed, 83 insertions(+), 39 deletions(-) - -diff --git a/daemons/controld/controld_based.c b/daemons/controld/controld_based.c -index 42e321f..f3a7c4f 100644 ---- a/daemons/controld/controld_based.c -+++ b/daemons/controld/controld_based.c -@@ -243,3 +243,74 @@ controld_delete_node_state(const char *uname, enum controld_section_e section, - } - free(xpath); - } -+ -+// Takes node name and resource ID -+#define XPATH_RESOURCE_HISTORY "//" XML_CIB_TAG_STATE \ -+ "[@" XML_ATTR_UNAME "='%s'] /" \ -+ XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \ -+ "/" XML_LRM_TAG_RESOURCE \ -+ "[@" XML_ATTR_ID "='%s']" -+// @TODO could add "and @XML_CONFIG_ATTR_SHUTDOWN_LOCK" to limit to locks -+ -+/*! -+ * \internal -+ * \brief Clear resource history from CIB for a given resource and node -+ * -+ * \param[in] rsc_id ID of resource to be cleared -+ * \param[in] node Node whose resource history should be cleared -+ * \param[in] user_name ACL user name to use -+ * \param[in] call_options CIB call options -+ * -+ * \return Standard Pacemaker return code -+ */ -+int -+controld_delete_resource_history(const char *rsc_id, const char *node, -+ const char *user_name, int call_options) -+{ -+ char *desc = NULL; -+ char *xpath = NULL; -+ int rc = pcmk_rc_ok; -+ -+ CRM_CHECK((rsc_id != NULL) && (node != NULL), return EINVAL); -+ -+ desc = crm_strdup_printf("resource history for %s on %s", rsc_id, node); -+ if (fsa_cib_conn == NULL) { -+ crm_err("Unable to clear %s: no CIB connection", desc); -+ free(desc); -+ return ENOTCONN; -+ } -+ -+ // Ask CIB to delete the entry -+ xpath = crm_strdup_printf(XPATH_RESOURCE_HISTORY, node, rsc_id); -+ rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, xpath, NULL, -+ NULL, call_options|cib_xpath, user_name); -+ -+ if (rc < 0) { -+ rc = pcmk_legacy2rc(rc); -+ crm_err("Could not delete resource status of %s on %s%s%s: %s " -+ CRM_XS " rc=%d", rsc_id, node, -+ (user_name? " for user " : ""), (user_name? user_name : ""), -+ pcmk_rc_str(rc), rc); -+ free(desc); -+ free(xpath); -+ return rc; -+ } -+ -+ if (is_set(call_options, cib_sync_call)) { -+ if (is_set(call_options, cib_dryrun)) { -+ crm_debug("Deletion of %s would succeed", desc); -+ } else { -+ crm_debug("Deletion of %s succeeded", desc); -+ } -+ free(desc); -+ -+ } else { -+ crm_info("Clearing %s (via CIB call %d) " CRM_XS " xpath=%s", -+ desc, rc, xpath); -+ fsa_register_cib_callback(rc, FALSE, desc, cib_delete_callback); -+ // CIB library handles freeing desc -+ } -+ -+ free(xpath); -+ return pcmk_rc_ok; -+} -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 16751b9..212739e 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -36,8 +36,6 @@ struct delete_event_s { - static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); - static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); - static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); --static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, -- const char *user_name); - - static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, - const char *rsc_id, const char *operation); -@@ -169,7 +167,8 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_ - - if (op->rsc_deleted) { - crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); -- delete_rsc_status(lrm_state, op->rsc_id, cib_quorum_override, NULL); -+ controld_delete_resource_history(op->rsc_id, lrm_state->node_name, -+ NULL, crmd_cib_smart_opt()); - return; - } - -@@ -917,31 +916,6 @@ lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) - return FALSE; - } - --/* -- * Remove the rsc from the CIB -- * -- * Avoids refreshing the entire LRM section of this host -- */ --#define RSC_TEMPLATE "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']" -- --static int --delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, -- const char *user_name) --{ -- char *rsc_xpath = NULL; -- int rc = pcmk_ok; -- -- CRM_CHECK(rsc_id != NULL, return -ENXIO); -- -- rsc_xpath = crm_strdup_printf(RSC_TEMPLATE, lrm_state->node_name, rsc_id); -- -- rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, rsc_xpath, -- NULL, NULL, call_options | cib_xpath, user_name); -- -- free(rsc_xpath); -- return rc; --} -- - static void - delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, - GHashTableIter * rsc_gIter, int rc, const char *user_name) -@@ -958,7 +932,8 @@ delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rs - else - g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); - crm_debug("sync: Sending delete op for %s", rsc_id_copy); -- delete_rsc_status(lrm_state, rsc_id_copy, cib_quorum_override, user_name); -+ controld_delete_resource_history(rsc_id_copy, lrm_state->node_name, -+ user_name, crmd_cib_smart_opt()); - - g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy); - free(rsc_id_copy); -@@ -1694,21 +1669,17 @@ do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, - gboolean unregister = TRUE; - - #if ENABLE_ACL -- int cib_rc = delete_rsc_status(lrm_state, rsc->id, -- cib_dryrun|cib_sync_call, user_name); -+ int cib_rc = controld_delete_resource_history(rsc->id, lrm_state->node_name, -+ user_name, -+ cib_dryrun|cib_sync_call); - -- if (cib_rc != pcmk_ok) { -+ if (cib_rc != pcmk_rc_ok) { - lrmd_event_data_t *op = NULL; - -- crm_err("Could not delete resource status of %s for %s (user %s) on %s: %s" -- CRM_XS " rc=%d", -- rsc->id, from_sys, (user_name? user_name : "unknown"), -- from_host, pcmk_strerror(cib_rc), cib_rc); -- - op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE); - op->op_status = PCMK_LRM_OP_ERROR; - -- if (cib_rc == -EACCES) { -+ if (cib_rc == EACCES) { - op->rc = PCMK_OCF_INSUFFICIENT_PRIV; - } else { - op->rc = PCMK_OCF_UNKNOWN_ERROR; -diff --git a/daemons/controld/controld_utils.h b/daemons/controld/controld_utils.h -index f902361..ca8cddb 100644 ---- a/daemons/controld/controld_utils.h -+++ b/daemons/controld/controld_utils.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -95,6 +95,8 @@ enum controld_section_e { - - void controld_delete_node_state(const char *uname, - enum controld_section_e section, int options); -+int controld_delete_resource_history(const char *rsc_id, const char *node, -+ const char *user_name, int call_options); - - const char *get_node_id(xmlNode *lrm_rsc_op); - --- -1.8.3.1 - diff --git a/SOURCES/005-sysconfig.patch b/SOURCES/005-sysconfig.patch new file mode 100644 index 0000000..4e49cab --- /dev/null +++ b/SOURCES/005-sysconfig.patch @@ -0,0 +1,32 @@ +From 85040eb19b9405464b01a7e67eb6769d2a03c611 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 19 Jun 2020 17:49:22 -0500 +Subject: [PATCH] Doc: sysconfig: remove outdated reference to wildcards in + PCMK_trace_files + +Wildcards stopped working when the log filtering implementation changed in +1.1.8 to support PCMK_trace_tags. It's not worth the effort to fix at this +point, so just update the comment in the sysconfig file. +--- + daemons/pacemakerd/pacemaker.sysconfig | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/daemons/pacemakerd/pacemaker.sysconfig b/daemons/pacemakerd/pacemaker.sysconfig +index c7745d8..e4a5c4d 100644 +--- a/daemons/pacemakerd/pacemaker.sysconfig ++++ b/daemons/pacemakerd/pacemaker.sysconfig +@@ -34,9 +34,8 @@ + # Log all messages from a comma-separated list of functions. + # PCMK_trace_functions=function1,function2,function3 + +-# Log all messages from a comma-separated list of files (no path). +-# Wildcards are supported, e.g. PCMK_trace_files=prefix*.c +-# PCMK_trace_files=file.c,other.h ++# Log all messages from a comma-separated list of file names (without path). ++# PCMK_trace_files=file1.c,file2.c + + # Log all messages matching comma-separated list of formats. + # PCMK_trace_formats="Sent delete %d" +-- +1.8.3.1 + diff --git a/SOURCES/006-shutdown-lock.patch b/SOURCES/006-shutdown-lock.patch deleted file mode 100644 index 357a2e8..0000000 --- a/SOURCES/006-shutdown-lock.patch +++ /dev/null @@ -1,252 +0,0 @@ -From 3d8a7dc405e98cd8fe637d3e283bc0468d50bc71 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 15 Jan 2020 17:56:44 -0600 -Subject: [PATCH 02/18] Refactor: controller: functionize parts of resource - deletion notification - -... for future reuse ---- - daemons/controld/controld_execd.c | 116 +++++++++++++++++++++++++------------- - daemons/controld/controld_lrm.h | 11 +++- - 2 files changed, 88 insertions(+), 39 deletions(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 212739e..82f2bf1 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -42,9 +42,6 @@ static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op - static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, - const char *operation, xmlNode *msg); - --void send_direct_ack(const char *to_host, const char *to_sys, -- lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id); -- - static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, - int log_level); - static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op); -@@ -278,7 +275,7 @@ send_task_ok_ack(lrm_state_t *lrm_state, ha_msg_input_t *input, - - op->rc = PCMK_OCF_OK; - op->op_status = PCMK_LRM_OP_DONE; -- send_direct_ack(ack_host, ack_sys, rsc, op, rsc_id); -+ controld_ack_event_directly(ack_host, ack_sys, rsc, op, rsc_id); - lrmd_free_event(op); - } - -@@ -850,6 +847,57 @@ controld_query_executor_state(const char *node_name) - node_update_cluster|node_update_peer); - } - -+/*! -+ * \internal -+ * \brief Map standard Pacemaker return code to operation status and OCF code -+ * -+ * \param[out] event Executor event whose status and return code should be set -+ * \param[in] rc Standard Pacemaker return code -+ */ -+void -+controld_rc2event(lrmd_event_data_t *event, int rc) -+{ -+ switch (rc) { -+ case pcmk_rc_ok: -+ event->rc = PCMK_OCF_OK; -+ event->op_status = PCMK_LRM_OP_DONE; -+ break; -+ case EACCES: -+ event->rc = PCMK_OCF_INSUFFICIENT_PRIV; -+ event->op_status = PCMK_LRM_OP_ERROR; -+ break; -+ default: -+ event->rc = PCMK_OCF_UNKNOWN_ERROR; -+ event->op_status = PCMK_LRM_OP_ERROR; -+ break; -+ } -+} -+ -+/*! -+ * \internal -+ * \brief Trigger a new transition after CIB status was deleted -+ * -+ * If a CIB status delete was not expected (as part of the transition graph), -+ * trigger a new transition by updating the (arbitrary) "last-lrm-refresh" -+ * cluster property. -+ * -+ * \param[in] from_sys IPC name that requested the delete -+ * \param[in] rsc_id Resource whose status was deleted (for logging only) -+ */ -+void -+controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id) -+{ -+ if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { -+ char *now_s = crm_strdup_printf("%lld", (long long) time(NULL)); -+ -+ crm_debug("Triggering a refresh after %s cleaned %s", from_sys, rsc_id); -+ update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, -+ NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, -+ FALSE, NULL, NULL); -+ free(now_s); -+ } -+} -+ - static void - notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) - { -@@ -860,33 +908,11 @@ notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_ - crm_info("Notifying %s on %s that %s was%s deleted", - from_sys, (from_host? from_host : "localhost"), rsc_id, - ((rc == pcmk_ok)? "" : " not")); -- - op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE); -- -- if (rc == pcmk_ok) { -- op->op_status = PCMK_LRM_OP_DONE; -- op->rc = PCMK_OCF_OK; -- } else { -- op->op_status = PCMK_LRM_OP_ERROR; -- op->rc = PCMK_OCF_UNKNOWN_ERROR; -- } -- -- send_direct_ack(from_host, from_sys, NULL, op, rsc_id); -+ controld_rc2event(op, pcmk_legacy2rc(rc)); -+ controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id); - lrmd_free_event(op); -- -- if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { -- /* this isn't expected - trigger a new transition */ -- time_t now = time(NULL); -- char *now_s = crm_itoa(now); -- -- crm_debug("Triggering a refresh after %s deleted %s from the executor", -- from_sys, rsc_id); -- -- update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, -- "last-lrm-refresh", now_s, FALSE, NULL, NULL); -- -- free(now_s); -- } -+ controld_trigger_delete_refresh(from_sys, rsc_id); - } - - static gboolean -@@ -1495,7 +1521,7 @@ fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, - #if ENABLE_ACL - if (user_name && is_privileged(user_name) == FALSE) { - crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); -- send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); -+ controld_ack_event_directly(from_host, from_sys, NULL, op, ID(xml_rsc)); - lrmd_free_event(op); - return; - } -@@ -1514,7 +1540,7 @@ fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, - crm_log_xml_warn(xml, "bad input"); - } - -- send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); -+ controld_ack_event_directly(from_host, from_sys, NULL, op, ID(xml_rsc)); - lrmd_free_event(op); - } - -@@ -1684,7 +1710,7 @@ do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, - } else { - op->rc = PCMK_OCF_UNKNOWN_ERROR; - } -- send_direct_ack(from_host, from_sys, NULL, op, rsc->id); -+ controld_ack_event_directly(from_host, from_sys, NULL, op, rsc->id); - lrmd_free_event(op); - return; - } -@@ -2000,9 +2026,23 @@ construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, cons - return op; - } - -+/*! -+ * \internal -+ * \brief Send a (synthesized) event result -+ * -+ * Reply with a synthesized event result directly, as opposed to going through -+ * the executor. -+ * -+ * \param[in] to_host Host to send result to -+ * \param[in] to_sys IPC name to send result to (NULL for transition engine) -+ * \param[in] rsc Type information about resource the result is for -+ * \param[in] op Event with result to send -+ * \param[in] rsc_id ID of resource the result is for -+ */ - void --send_direct_ack(const char *to_host, const char *to_sys, -- lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id) -+controld_ack_event_directly(const char *to_host, const char *to_sys, -+ lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, -+ const char *rsc_id) - { - xmlNode *reply = NULL; - xmlNode *update, *iter; -@@ -2221,7 +2261,7 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, - - op->rc = PCMK_OCF_UNKNOWN_ERROR; - op->op_status = PCMK_LRM_OP_INVALID; -- send_direct_ack(NULL, NULL, rsc, op, rsc->id); -+ controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id); - lrmd_free_event(op); - free(op_id); - return; -@@ -2288,7 +2328,7 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, - decode_transition_key(op->user_data, NULL, NULL, NULL, &target_rc); - op->rc = target_rc; - op->op_status = PCMK_LRM_OP_DONE; -- send_direct_ack(NULL, NULL, rsc, op, rsc->id); -+ controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id); - } - - pending->params = op->params; -@@ -2388,7 +2428,7 @@ do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data - - } else { - crm_warn("Resource %s no longer exists in the executor", op->rsc_id); -- send_direct_ack(NULL, NULL, rsc, op, op->rsc_id); -+ controld_ack_event_directly(NULL, NULL, rsc, op, op->rsc_id); - goto cleanup; - } - -@@ -2660,7 +2700,7 @@ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, - } - - if (need_direct_ack) { -- send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); -+ controld_ack_event_directly(NULL, NULL, NULL, op, op->rsc_id); - } - - if(remove == FALSE) { -diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h -index 3ab7048..7acac2a 100644 ---- a/daemons/controld/controld_lrm.h -+++ b/daemons/controld/controld_lrm.h -@@ -1,11 +1,13 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ -+#ifndef CONTROLD_LRM__H -+# define CONTROLD_LRM__H - - #include - #include -@@ -169,3 +171,10 @@ gboolean remote_ra_controlling_guest(lrm_state_t * lrm_state); - - void process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, - active_op_t *pending, xmlNode *action_xml); -+void controld_ack_event_directly(const char *to_host, const char *to_sys, -+ lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, -+ const char *rsc_id); -+void controld_rc2event(lrmd_event_data_t *event, int rc); -+void controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id); -+ -+#endif --- -1.8.3.1 - diff --git a/SOURCES/007-shutdown-lock.patch b/SOURCES/007-shutdown-lock.patch deleted file mode 100644 index 17e7588..0000000 --- a/SOURCES/007-shutdown-lock.patch +++ /dev/null @@ -1,60 +0,0 @@ -From f17c99492c7ab9e639b940a34d2a48b55937b605 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 14 Jan 2020 16:00:36 -0600 -Subject: [PATCH 03/18] Low: tools: improve crm_resource "why" messages - ---- - tools/crm_resource_runtime.c | 21 ++++++++++++--------- - 1 file changed, 12 insertions(+), 9 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 9ae24b6..61ceee7 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -878,7 +878,7 @@ cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name, - void - cli_resource_check(cib_t * cib_conn, resource_t *rsc) - { -- int need_nl = 0; -+ bool printed = false; - char *role_s = NULL; - char *managed = NULL; - resource_t *parent = uber_parent(rsc); -@@ -897,23 +897,26 @@ cli_resource_check(cib_t * cib_conn, resource_t *rsc) - // Treated as if unset - - } else if(role == RSC_ROLE_STOPPED) { -- printf("\n * The configuration specifies that '%s' should remain stopped\n", parent->id); -- need_nl++; -+ printf("\n * Configuration specifies '%s' should remain stopped\n", -+ parent->id); -+ printed = true; - - } else if (is_set(parent->flags, pe_rsc_promotable) - && (role == RSC_ROLE_SLAVE)) { -- printf("\n * The configuration specifies that '%s' should not be promoted\n", parent->id); -- need_nl++; -+ printf("\n * Configuration specifies '%s' should not be promoted\n", -+ parent->id); -+ printed = true; - } - } - -- if(managed && crm_is_true(managed) == FALSE) { -- printf("%s * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id); -- need_nl++; -+ if (managed && !crm_is_true(managed)) { -+ printf("%s * Configuration prevents cluster from stopping or starting unmanaged '%s'\n", -+ (printed? "" : "\n"), parent->id); -+ printed = true; - } - free(managed); - -- if(need_nl) { -+ if (printed) { - printf("\n"); - } - } --- -1.8.3.1 - diff --git a/SOURCES/008-shutdown-lock.patch b/SOURCES/008-shutdown-lock.patch deleted file mode 100644 index 0592013..0000000 --- a/SOURCES/008-shutdown-lock.patch +++ /dev/null @@ -1,122 +0,0 @@ -From 736f255c18d4c99f1956fbb5ad4ac5bfc15bb841 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 14 Jan 2020 16:23:25 -0600 -Subject: [PATCH 04/18] Low: tools: improve error checking for crm_resource - cleanup/fail commands - -Bail earlier for misconfigured resources, and return error (rather than hang) -for unknown or offline node. Also add timeout directly to controller request -rather than rely on the controller using the interval as default timeout. ---- - tools/crm_resource_runtime.c | 54 +++++++++++++++++++++++++++----------------- - 1 file changed, 33 insertions(+), 21 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 61ceee7..2ea8bb3 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -468,8 +468,9 @@ send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, - int rc = -ECOMM; - xmlNode *cmd = NULL; - xmlNode *xml_rsc = NULL; -- const char *value = NULL; - const char *router_node = host_uname; -+ const char *rsc_class = NULL; -+ const char *rsc_type = NULL; - xmlNode *params = NULL; - xmlNode *msg_data = NULL; - resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); -@@ -481,27 +482,49 @@ send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, - } else if (rsc->variant != pe_native) { - CMD_ERR("We can only process primitive resources, not %s", rsc_id); - return -EINVAL; -+ } - -- } else if (host_uname == NULL) { -+ rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -+ rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE); -+ if ((rsc_class == NULL) || (rsc_type == NULL)) { -+ CMD_ERR("Resource %s does not have a class and type", rsc_id); -+ return -EINVAL; -+ } -+ -+ if (host_uname == NULL) { - CMD_ERR("Please specify a node name"); - return -EINVAL; -+ - } else { -- node_t *node = pe_find_node(data_set->nodes, host_uname); -+ pe_node_t *node = pe_find_node(data_set->nodes, host_uname); - -+ if (node == NULL) { -+ CMD_ERR("Node %s not found", host_uname); -+ return -pcmk_err_node_unknown; -+ } -+ -+ if (!(node->details->online)) { -+ CMD_ERR("Node %s is not online", host_uname); -+ return -ENOTCONN; -+ } - if (pe__is_guest_or_remote_node(node)) { - node = pe__current_node(node->details->remote_rsc); - if (node == NULL) { - CMD_ERR("No cluster connection to Pacemaker Remote node %s detected", - host_uname); -- return -ENXIO; -+ return -ENOTCONN; - } - router_node = node->details->uname; - } - } - -- key = generate_transition_key(0, getpid(), 0, "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx"); -- - msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); -+ -+ /* The controller logs the transition key from requests, so we need to have -+ * *something* for it. -+ */ -+ key = generate_transition_key(0, getpid(), 0, -+ "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx"); - crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key); - free(key); - -@@ -519,31 +542,20 @@ send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, - crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id); - } - -- value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_ATTR_TYPE); -- if (value == NULL) { -- CMD_ERR("%s has no type! Aborting...", rsc_id); -- return -ENXIO; -- } -- -- value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_CLASS); -- if (value == NULL) { -- CMD_ERR("%s has no class! Aborting...", rsc_id); -- return -ENXIO; -- } -- -+ crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, rsc_class); - crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_PROVIDER); -+ crm_xml_add(xml_rsc, XML_ATTR_TYPE, rsc_type); - - params = create_xml_node(msg_data, XML_TAG_ATTRS); - crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); - -- key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); -+ // The controller parses the timeout from the request -+ key = crm_meta_name(XML_ATTR_TIMEOUT); - crm_xml_add(params, key, "60000"); /* 1 minute */ - free(key); - - our_pid = crm_getpid_s(); - cmd = create_request(op, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid); -- --/* crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */ - free_xml(msg_data); - - if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) { --- -1.8.3.1 - diff --git a/SOURCES/009-shutdown-lock.patch b/SOURCES/009-shutdown-lock.patch deleted file mode 100644 index ff73598..0000000 --- a/SOURCES/009-shutdown-lock.patch +++ /dev/null @@ -1,139 +0,0 @@ -From 8a0e19a7702f61622d06b1c473fb3d9a5924c8f4 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 14 Jan 2020 18:07:18 -0600 -Subject: [PATCH 05/18] Refactor: liblrmd: new convenience function for - allocating lrmd_event_data_t - ---- - daemons/controld/controld_execd.c | 7 +------ - include/crm/lrmd.h | 2 ++ - lib/lrmd/lrmd_client.c | 34 +++++++++++++++++++++++++++++++++- - lib/pacemaker/pcmk_sched_transition.c | 7 +------ - lib/pacemaker/pcmk_trans_unpack.c | 9 +++------ - 5 files changed, 40 insertions(+), 19 deletions(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 82f2bf1..17cc8d6 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -1878,15 +1878,10 @@ construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, cons - - CRM_ASSERT(rsc_id && operation); - -- op = calloc(1, sizeof(lrmd_event_data_t)); -- CRM_ASSERT(op != NULL); -- -+ op = lrmd_new_event(rsc_id, operation, 0); - op->type = lrmd_event_exec_complete; -- op->op_type = strdup(operation); - op->op_status = PCMK_LRM_OP_PENDING; - op->rc = -1; -- op->rsc_id = strdup(rsc_id); -- op->interval_ms = 0; - op->timeout = 0; - op->start_delay = 0; - -diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h -index cfa2925..3ad1f05 100644 ---- a/include/crm/lrmd.h -+++ b/include/crm/lrmd.h -@@ -248,6 +248,8 @@ typedef struct lrmd_event_data_s { - const char *exit_reason; - } lrmd_event_data_t; - -+lrmd_event_data_t *lrmd_new_event(const char *rsc_id, const char *task, -+ guint interval_ms); - lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t * event); - void lrmd_free_event(lrmd_event_data_t * event); - -diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c -index 2469c52..d16743d 100644 ---- a/lib/lrmd/lrmd_client.c -+++ b/lib/lrmd/lrmd_client.c -@@ -1,5 +1,7 @@ - /* -- * Copyright 2012-2018 David Vossel -+ * Copyright 2012-2020 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -@@ -175,6 +177,36 @@ lrmd_key_value_freeall(lrmd_key_value_t * head) - } - } - -+/*! -+ * Create a new lrmd_event_data_t object -+ * -+ * \param[in] rsc_id ID of resource involved in event -+ * \param[in] task Action name -+ * \param[in] interval_ms Action interval -+ * -+ * \return Newly allocated and initialized lrmd_event_data_t -+ * \note This functions asserts on memory errors, so the return value is -+ * guaranteed to be non-NULL. The caller is responsible for freeing the -+ * result with lrmd_free_event(). -+ */ -+lrmd_event_data_t * -+lrmd_new_event(const char *rsc_id, const char *task, guint interval_ms) -+{ -+ lrmd_event_data_t *event = calloc(1, sizeof(lrmd_event_data_t)); -+ -+ CRM_ASSERT(event != NULL); -+ if (rsc_id != NULL) { -+ event->rsc_id = strdup(rsc_id); -+ CRM_ASSERT(event->rsc_id != NULL); -+ } -+ if (task != NULL) { -+ event->op_type = strdup(task); -+ CRM_ASSERT(event->op_type != NULL); -+ } -+ event->interval_ms = interval_ms; -+ return event; -+} -+ - lrmd_event_data_t * - lrmd_copy_event(lrmd_event_data_t * event) - { -diff --git a/lib/pacemaker/pcmk_sched_transition.c b/lib/pacemaker/pcmk_sched_transition.c -index c415b75..1698c85 100644 ---- a/lib/pacemaker/pcmk_sched_transition.c -+++ b/lib/pacemaker/pcmk_sched_transition.c -@@ -131,12 +131,7 @@ create_op(xmlNode *cib_resource, const char *task, guint interval_ms, - lrmd_event_data_t *op = NULL; - xmlNode *xop = NULL; - -- op = calloc(1, sizeof(lrmd_event_data_t)); -- -- op->rsc_id = strdup(ID(cib_resource)); -- op->interval_ms = interval_ms; -- op->op_type = strdup(task); -- -+ op = lrmd_new_event(ID(cib_resource), task, interval_ms); - op->rc = outcome; - op->op_status = 0; - op->params = NULL; /* TODO: Fill me in */ -diff --git a/lib/pacemaker/pcmk_trans_unpack.c b/lib/pacemaker/pcmk_trans_unpack.c -index e57f386..3e53289 100644 ---- a/lib/pacemaker/pcmk_trans_unpack.c -+++ b/lib/pacemaker/pcmk_trans_unpack.c -@@ -298,12 +298,9 @@ convert_graph_action(xmlNode * resource, crm_action_t * action, int status, int - CRM_CHECK(action_resource != NULL, crm_log_xml_warn(action->xml, "Bad"); - return NULL); - -- op = calloc(1, sizeof(lrmd_event_data_t)); -- -- op->rsc_id = strdup(ID(action_resource)); -- op->interval_ms = action->interval_ms; -- op->op_type = strdup(crm_element_value(action->xml, XML_LRM_ATTR_TASK)); -- -+ op = lrmd_new_event(ID(action_resource), -+ crm_element_value(action->xml, XML_LRM_ATTR_TASK), -+ action->interval_ms); - op->rc = rc; - op->op_status = status; - op->t_run = time(NULL); --- -1.8.3.1 - diff --git a/SOURCES/010-shutdown-lock.patch b/SOURCES/010-shutdown-lock.patch deleted file mode 100644 index 6304246..0000000 --- a/SOURCES/010-shutdown-lock.patch +++ /dev/null @@ -1,129 +0,0 @@ -From 50b0944c8add3f16b8190e75a6d06c3473c12a8f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 21 Nov 2019 14:48:02 -0600 -Subject: [PATCH 06/18] Feature: scheduler: add shutdown lock cluster options - -This commit adds shutdown-lock and shutdown-lock-limit options (just the -options, not the feature itself). - -shutdown-lock defaults to false, which preserves current behavior. The intended -purpose of setting it to true is to *prevent* recovery of a node's resources -elsewhere when the node is cleanly shut down, until the node rejoins. If -shutdown-lock-limit is set to a nonzero time duration, the cluster will -be allowed to recover the resources if the node has not rejoined within this -time. - -The use case is when rebooting a node (such as for software updates) is done by -cluster-unaware system administrators during scheduled maintenance windows, -resources prefer specific nodes, and resource recovery time is high. ---- - include/crm/msg_xml.h | 4 +++- - include/crm/pengine/pe_types.h | 2 ++ - lib/pengine/common.c | 24 +++++++++++++++++++++++- - lib/pengine/unpack.c | 10 ++++++++++ - 4 files changed, 38 insertions(+), 2 deletions(-) - -diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h -index d56e40c..d0cdf6c 100644 ---- a/include/crm/msg_xml.h -+++ b/include/crm/msg_xml.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -346,6 +346,8 @@ extern "C" { - # define XML_CONFIG_ATTR_FORCE_QUIT "shutdown-escalation" - # define XML_CONFIG_ATTR_RECHECK "cluster-recheck-interval" - # define XML_CONFIG_ATTR_FENCE_REACTION "fence-reaction" -+# define XML_CONFIG_ATTR_SHUTDOWN_LOCK "shutdown-lock" -+# define XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT "shutdown-lock-limit" - - # define XML_ALERT_ATTR_PATH "path" - # define XML_ALERT_ATTR_TIMEOUT "timeout" -diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h -index 23e1c46..8a735a3 100644 ---- a/include/crm/pengine/pe_types.h -+++ b/include/crm/pengine/pe_types.h -@@ -102,6 +102,7 @@ enum pe_find { - # define pe_flag_start_failure_fatal 0x00001000ULL - # define pe_flag_remove_after_stop 0x00002000ULL - # define pe_flag_startup_fencing 0x00004000ULL -+# define pe_flag_shutdown_lock 0x00008000ULL - - # define pe_flag_startup_probes 0x00010000ULL - # define pe_flag_have_status 0x00020000ULL -@@ -167,6 +168,7 @@ struct pe_working_set_s { - GList *stop_needed; // Containers that need stop actions - time_t recheck_by; // Hint to controller to re-run scheduler by this time - int ninstances; // Total number of resource instances -+ guint shutdown_lock;// How long (seconds) to lock resources to shutdown node - }; - - enum pe_check_parameters { -diff --git a/lib/pengine/common.c b/lib/pengine/common.c -index da39c99..e72a033 100644 ---- a/lib/pengine/common.c -+++ b/lib/pengine/common.c -@@ -1,5 +1,7 @@ - /* -- * Copyright 2004-2018 Andrew Beekhof -+ * Copyright 2004-2020 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -@@ -85,6 +87,26 @@ static pe_cluster_option pe_opts[] = { - "When set to TRUE, the cluster will immediately ban a resource from a node if it fails to start there. When FALSE, the cluster will instead check the resource's fail count against its migration-threshold." }, - { "enable-startup-probes", NULL, "boolean", NULL, "true", &check_boolean, - "Should the cluster check for active resources during startup", NULL }, -+ { -+ XML_CONFIG_ATTR_SHUTDOWN_LOCK, -+ NULL, "boolean", NULL, "false", &check_boolean, -+ "Whether to lock resources to a cleanly shut down node", -+ "When true, resources active on a node when it is cleanly shut down " -+ "are kept \"locked\" to that node (not allowed to run elsewhere) " -+ "until they start again on that node after it rejoins (or for at " -+ "most shutdown-lock-limit, if set). Stonith resources and " -+ "Pacemaker Remote connections are never locked. Clone and bundle " -+ "instances and the master role of promotable clones are currently " -+ "never locked, though support could be added in a future release." -+ }, -+ { -+ XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT, -+ NULL, "time", NULL, "0", &check_timer, -+ "Do not lock resources to a cleanly shut down node longer than this", -+ "If shutdown-lock is true and this is set to a nonzero time duration, " -+ "shutdown locks will expire after this much time has passed since " -+ "the shutdown was initiated, even if the node has not rejoined." -+ }, - - /* Stonith Options */ - { "stonith-enabled", NULL, "boolean", NULL, "true", &check_boolean, -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index c9fc672..8c0d72a 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -319,6 +319,16 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) - data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy"); - crm_trace("Placement strategy: %s", data_set->placement_strategy); - -+ set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock); -+ crm_trace("Resources will%s be locked to cleanly shut down nodes", -+ (is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not")); -+ if (is_set(data_set->flags, pe_flag_shutdown_lock)) { -+ value = pe_pref(data_set->config_hash, -+ XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT); -+ data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000; -+ crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock); -+ } -+ - return TRUE; - } - --- -1.8.3.1 - diff --git a/SOURCES/011-shutdown-lock.patch b/SOURCES/011-shutdown-lock.patch deleted file mode 100644 index e9f1f5c..0000000 --- a/SOURCES/011-shutdown-lock.patch +++ /dev/null @@ -1,144 +0,0 @@ -From f5d88938955f63935058b7cc2d706a12e6ea1121 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 6 Dec 2019 11:57:59 -0600 -Subject: [PATCH 07/18] Low: scheduler: respect shutdown locks when placing - active resources - -Use new pe_resource_t members to indicate that a resource is locked to a -particular node. - -For active resources (i.e. in the transition where the node is scheduled for -shutdown), these are connected by checking each lockable resource for whether -it is running on a single clean node that is shutting down. - -When applying constraints, place -INFINITY location constraints for locked -resources on all nodes other than the lock node. - -(Inactive resources -- i.e. in later transitions after the node is shut down -- -are not yet locked.) ---- - include/crm/pengine/pe_types.h | 2 + - lib/pacemaker/pcmk_sched_allocate.c | 87 +++++++++++++++++++++++++++++++++++++ - 2 files changed, 89 insertions(+) - -diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h -index 8a735a3..123d8ef 100644 ---- a/include/crm/pengine/pe_types.h -+++ b/include/crm/pengine/pe_types.h -@@ -354,6 +354,8 @@ struct pe_resource_s { - GListPtr fillers; - - pe_node_t *pending_node; // Node on which pending_task is happening -+ pe_node_t *lock_node; // Resource is shutdown-locked to this node -+ time_t lock_time; // When shutdown lock started - - #if ENABLE_VERSIONED_ATTRS - xmlNode *versioned_parameters; -diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c -index fc2f4cf..0314f1b 100644 ---- a/lib/pacemaker/pcmk_sched_allocate.c -+++ b/lib/pacemaker/pcmk_sched_allocate.c -@@ -977,6 +977,87 @@ rsc_discover_filter(resource_t *rsc, node_t *node) - } - } - -+static time_t -+shutdown_time(pe_node_t *node, pe_working_set_t *data_set) -+{ -+ const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); -+ time_t result = 0; -+ -+ if (shutdown) { -+ errno = 0; -+ result = (time_t) crm_int_helper(shutdown, NULL); -+ if (errno != 0) { -+ result = 0; -+ } -+ } -+ return result? result : get_effective_time(data_set); -+} -+ -+static void -+apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set) -+{ -+ const char *class; -+ -+ // Only primitives and (uncloned) groups may be locked -+ if (rsc->variant == pe_group) { -+ for (GList *item = rsc->children; item != NULL; -+ item = item->next) { -+ apply_shutdown_lock((pe_resource_t *) item->data, data_set); -+ } -+ } else if (rsc->variant != pe_native) { -+ return; -+ } -+ -+ // Fence devices and remote connections can't be locked -+ class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -+ if ((class == NULL) || !strcmp(class, PCMK_RESOURCE_CLASS_STONITH) -+ || pe__resource_is_remote_conn(rsc, data_set)) { -+ return; -+ } -+ -+ // Only a resource active on exactly one node can be locked -+ if (pcmk__list_of_1(rsc->running_on)) { -+ pe_node_t *node = rsc->running_on->data; -+ -+ if (node->details->shutdown) { -+ if (node->details->unclean) { -+ pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown", -+ rsc->id, node->details->uname); -+ } else { -+ rsc->lock_node = node; -+ rsc->lock_time = shutdown_time(node, data_set); -+ } -+ } -+ } -+ -+ if (rsc->lock_node == NULL) { -+ // No lock needed -+ return; -+ } -+ -+ if (data_set->shutdown_lock > 0) { -+ time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock; -+ -+ pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)", -+ rsc->id, rsc->lock_node->details->uname, -+ (long long) lock_expiration); -+ pe__update_recheck_time(++lock_expiration, data_set); -+ } else { -+ pe_rsc_info(rsc, "Locking %s to %s due to shutdown", -+ rsc->id, rsc->lock_node->details->uname); -+ } -+ -+ // If resource is locked to one node, ban it from all other nodes -+ for (GList *item = data_set->nodes; item != NULL; item = item->next) { -+ pe_node_t *node = item->data; -+ -+ if (strcmp(node->details->uname, rsc->lock_node->details->uname)) { -+ resource_location(rsc, node, -CRM_SCORE_INFINITY, -+ XML_CONFIG_ATTR_SHUTDOWN_LOCK, data_set); -+ } -+ } -+} -+ - /* - * Count how many valid nodes we have (so we know the maximum number of - * colors we can resolve). -@@ -988,6 +1069,12 @@ stage2(pe_working_set_t * data_set) - { - GListPtr gIter = NULL; - -+ if (is_set(data_set->flags, pe_flag_shutdown_lock)) { -+ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { -+ apply_shutdown_lock((pe_resource_t *) gIter->data, data_set); -+ } -+ } -+ - for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { - node_t *node = (node_t *) gIter->data; - --- -1.8.3.1 - diff --git a/SOURCES/012-shutdown-lock.patch b/SOURCES/012-shutdown-lock.patch deleted file mode 100644 index c700d96..0000000 --- a/SOURCES/012-shutdown-lock.patch +++ /dev/null @@ -1,202 +0,0 @@ -From 16f57bb79de4f88c2def174e3bb7d8ef312674cd Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 6 Dec 2019 12:17:03 -0600 -Subject: [PATCH 08/18] Low: scheduler: respect shutdown locks when placing - inactive resources - -When shutdown-lock is enabled, and we're either scheduling a resource stop -on a node that's cleanly shutting down or scheduling any action for a -previously locked resource, add "shutdown-lock=" to the -graph action. The controller will be able to use this to know when to preserve -the lock (by adding the lock time to the resource state entry). - -When the scheduler unpacks a resource state entry with a lock, it will remember -the lock node and lock time, which will trigger existing code for applying -shutdown locks. ---- - lib/pacemaker/pcmk_sched_allocate.c | 17 ++++++++++++- - lib/pacemaker/pcmk_sched_graph.c | 30 ++++++++++++++++++++++- - lib/pengine/unpack.c | 49 +++++++++++++++++++++++++++++++++---- - 3 files changed, 89 insertions(+), 7 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c -index 0314f1b..884e1bd 100644 ---- a/lib/pacemaker/pcmk_sched_allocate.c -+++ b/lib/pacemaker/pcmk_sched_allocate.c -@@ -1015,8 +1015,23 @@ apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set) - return; - } - -+ if (rsc->lock_node != NULL) { -+ // The lock was obtained from resource history -+ -+ if (rsc->running_on != NULL) { -+ /* The resource was started elsewhere even though it is now -+ * considered locked. This shouldn't be possible, but as a -+ * failsafe, we don't want to disturb the resource now. -+ */ -+ pe_rsc_info(rsc, -+ "Cancelling shutdown lock because %s is already active", -+ rsc->id); -+ rsc->lock_node = NULL; -+ rsc->lock_time = 0; -+ } -+ - // Only a resource active on exactly one node can be locked -- if (pcmk__list_of_1(rsc->running_on)) { -+ } else if (pcmk__list_of_1(rsc->running_on)) { - pe_node_t *node = rsc->running_on->data; - - if (node->details->shutdown) { -diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c -index a6967fe..2861f3d 100644 ---- a/lib/pacemaker/pcmk_sched_graph.c -+++ b/lib/pacemaker/pcmk_sched_graph.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -988,6 +988,26 @@ add_downed_nodes(xmlNode *xml, const action_t *action, - } - } - -+static bool -+should_lock_action(pe_action_t *action) -+{ -+ // Only actions taking place on resource's lock node are locked -+ if ((action->rsc->lock_node == NULL) || (action->node == NULL) -+ || (action->node->details != action->rsc->lock_node->details)) { -+ return false; -+ } -+ -+ /* During shutdown, only stops are locked (otherwise, another action such as -+ * a demote would cause the controller to clear the lock) -+ */ -+ if (action->node->details->shutdown && action->task -+ && strcmp(action->task, RSC_STOP)) { -+ return false; -+ } -+ -+ return true; -+} -+ - static xmlNode * - action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set) - { -@@ -1097,6 +1117,14 @@ action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set) - XML_ATTR_TYPE - }; - -+ /* If a resource is locked to a node via shutdown-lock, mark its actions -+ * so the controller can preserve the lock when the action completes. -+ */ -+ if (should_lock_action(action)) { -+ crm_xml_add_ll(action_xml, XML_CONFIG_ATTR_SHUTDOWN_LOCK, -+ (long long) action->rsc->lock_time); -+ } -+ - // List affected resource - - rsc_xml = create_xml_node(action_xml, -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 8c0d72a..5139e60 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -1059,7 +1060,8 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - crm_trace("Checking node %s/%s/%s status %d/%d/%d", id, rsc->id, rsc->container->id, fence, rsc->role, RSC_ROLE_STARTED); - - } else if (!pe__is_guest_node(this_node) -- && rsc->role == RSC_ROLE_STARTED) { -+ && ((rsc->role == RSC_ROLE_STARTED) -+ || is_set(data_set->flags, pe_flag_shutdown_lock))) { - check = TRUE; - crm_trace("Checking node %s/%s status %d/%d/%d", id, rsc->id, fence, rsc->role, RSC_ROLE_STARTED); - } -@@ -1075,6 +1077,9 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - - } else if (fence) { - process = TRUE; -+ -+ } else if (is_set(data_set->flags, pe_flag_shutdown_lock)) { -+ process = TRUE; - } - - if(process) { -@@ -2198,6 +2203,28 @@ calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index) - } - } - -+// If resource history entry has shutdown lock, remember lock node and time -+static void -+unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node, -+ pe_working_set_t *data_set) -+{ -+ time_t lock_time = 0; // When lock started (i.e. node shutdown time) -+ -+ if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK, -+ &lock_time) == pcmk_ok) && (lock_time != 0)) { -+ -+ if ((data_set->shutdown_lock > 0) -+ && (get_effective_time(data_set) -+ > (lock_time + data_set->shutdown_lock))) { -+ pe_rsc_info(rsc, "Shutdown lock for %s on %s expired", -+ rsc->id, node->details->uname); -+ } else { -+ rsc->lock_node = node; -+ rsc->lock_time = lock_time; -+ } -+ } -+} -+ - static resource_t * - unpack_lrm_rsc_state(node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set) - { -@@ -2234,18 +2261,30 @@ unpack_lrm_rsc_state(node_t * node, xmlNode * rsc_entry, pe_working_set_t * data - } - } - -- if (op_list == NULL) { -- /* if there are no operations, there is nothing to do */ -- return NULL; -+ if (is_not_set(data_set->flags, pe_flag_shutdown_lock)) { -+ if (op_list == NULL) { -+ // If there are no operations, there is nothing to do -+ return NULL; -+ } - } - - /* find the resource */ - rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry); - if (rsc == NULL) { -- rsc = process_orphan_resource(rsc_entry, node, data_set); -+ if (op_list == NULL) { -+ // If there are no operations, there is nothing to do -+ return NULL; -+ } else { -+ rsc = process_orphan_resource(rsc_entry, node, data_set); -+ } - } - CRM_ASSERT(rsc != NULL); - -+ // Check whether the resource is "shutdown-locked" to this node -+ if (is_set(data_set->flags, pe_flag_shutdown_lock)) { -+ unpack_shutdown_lock(rsc_entry, rsc, node, data_set); -+ } -+ - /* process operations */ - saved_role = rsc->role; - on_fail = action_fail_ignore; --- -1.8.3.1 - diff --git a/SOURCES/013-shutdown-lock.patch b/SOURCES/013-shutdown-lock.patch deleted file mode 100644 index 4b9c91f..0000000 --- a/SOURCES/013-shutdown-lock.patch +++ /dev/null @@ -1,281 +0,0 @@ -From 223ab7251adcb8c6f6b96def138be58b1478c42b Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 22 Nov 2019 17:03:20 -0600 -Subject: [PATCH 09/18] Low: controller: mark shutdown-locked resources in - resource history - -When a graph action indicates that the resource should be shutdown-locked -to its node, remember the shutdown lock time in active_op_t so we can remember -that when the result comes back. When the result does come back, add -"shutdown-lock" to its lrm_resource entry in the CIB status section -- as -the timestamp if it's a successful stop or a probe finding the resource -inactive, or as 0 to clear the lock for any other operation. ---- - daemons/controld/controld_control.c | 9 ++++- - daemons/controld/controld_execd.c | 44 +++++++++++++++++++-- - daemons/controld/controld_lrm.h | 1 + - daemons/controld/controld_te_callbacks.c | 65 ++++++++++++++++++++++---------- - daemons/controld/controld_utils.h | 1 + - 5 files changed, 95 insertions(+), 25 deletions(-) - -diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c -index 6c7f97c..c918a1e 100644 ---- a/daemons/controld/controld_control.c -+++ b/daemons/controld/controld_control.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -35,6 +35,7 @@ gboolean fsa_has_quorum = FALSE; - crm_trigger_t *fsa_source = NULL; - crm_trigger_t *config_read = NULL; - bool no_quorum_suicide_escalation = FALSE; -+bool controld_shutdown_lock_enabled = false; - - /* A_HA_CONNECT */ - void -@@ -587,7 +588,10 @@ static pe_cluster_option crmd_opts[] = { - { "stonith-max-attempts",NULL,"integer",NULL,"10",&check_positive_number, - "How many times stonith can fail before it will no longer be attempted on a target" - }, -+ -+ // Already documented in libpe_status (other values must be kept identical) - { "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", "stop", &check_quorum, NULL, NULL }, -+ { XML_CONFIG_ATTR_SHUTDOWN_LOCK, NULL, "boolean", NULL, "false", &check_boolean, NULL, NULL }, - }; - /* *INDENT-ON* */ - -@@ -698,6 +702,9 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void - value = crmd_pref(config_hash, "join-finalization-timeout"); - finalization_timer->period_ms = crm_parse_interval_spec(value); - -+ value = crmd_pref(config_hash, XML_CONFIG_ATTR_SHUTDOWN_LOCK); -+ controld_shutdown_lock_enabled = crm_is_true(value); -+ - free(fsa_cluster_name); - fsa_cluster_name = NULL; - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index 17cc8d6..c0436a2 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -44,7 +44,8 @@ static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, - - static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, - int log_level); --static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op); -+static int do_update_resource(const char *node_name, lrmd_rsc_info_t *rsc, -+ lrmd_event_data_t *op, time_t lock_time); - - static void - lrm_connection_destroy(void) -@@ -2171,7 +2172,7 @@ record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t - crm_debug("Recording pending op " CRM_OP_FMT " on %s in the CIB", - op->rsc_id, op->op_type, op->interval_ms, node_name); - -- do_update_resource(node_name, rsc, op); -+ do_update_resource(node_name, rsc, op, 0); - } - - static void -@@ -2313,6 +2314,10 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, - pending->rsc_id = strdup(rsc->id); - pending->start_time = time(NULL); - pending->user_data = op->user_data? strdup(op->user_data) : NULL; -+ if (crm_element_value_epoch(msg, XML_CONFIG_ATTR_SHUTDOWN_LOCK, -+ &(pending->lock_time)) != pcmk_ok) { -+ pending->lock_time = 0; -+ } - g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending); - - if ((op->interval_ms > 0) -@@ -2356,8 +2361,28 @@ cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *use - } - } - -+/* Only successful stops, and probes that found the resource inactive, get locks -+ * recorded in the history. This ensures the resource stays locked to the node -+ * until it is active there again after the node comes back up. -+ */ -+static bool -+should_preserve_lock(lrmd_event_data_t *op) -+{ -+ if (!controld_shutdown_lock_enabled) { -+ return false; -+ } -+ if (!strcmp(op->op_type, RSC_STOP) && (op->rc == PCMK_OCF_OK)) { -+ return true; -+ } -+ if (!strcmp(op->op_type, RSC_STATUS) && (op->rc == PCMK_OCF_NOT_RUNNING)) { -+ return true; -+ } -+ return false; -+} -+ - static int --do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) -+do_update_resource(const char *node_name, lrmd_rsc_info_t *rsc, -+ lrmd_event_data_t *op, time_t lock_time) - { - /* - -@@ -2412,6 +2437,16 @@ do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data - crm_xml_add(iter, XML_ATTR_TYPE, rsc->type); - crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->standard); - crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider); -+ if (lock_time != 0) { -+ /* Actions on a locked resource should either preserve the lock by -+ * recording it with the action result, or clear it. -+ */ -+ if (!should_preserve_lock(op)) { -+ lock_time = 0; -+ } -+ crm_xml_add_ll(iter, XML_CONFIG_ATTR_SHUTDOWN_LOCK, -+ (long long) lock_time); -+ } - - if (op->params) { - container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); -@@ -2616,7 +2651,8 @@ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, - if (controld_action_is_recordable(op->op_type)) { - if (node_name && rsc) { - // We should record the result, and happily, we can -- update_id = do_update_resource(node_name, rsc, op); -+ update_id = do_update_resource(node_name, rsc, op, -+ pending? pending->lock_time : 0); - need_direct_ack = FALSE; - - } else if (op->rsc_deleted) { -diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h -index 7acac2a..da0582c 100644 ---- a/daemons/controld/controld_lrm.h -+++ b/daemons/controld/controld_lrm.h -@@ -46,6 +46,7 @@ typedef struct active_op_s { - int call_id; - uint32_t flags; // bitmask of active_op_e - time_t start_time; -+ time_t lock_time; - char *rsc_id; - char *op_type; - char *op_key; -diff --git a/daemons/controld/controld_te_callbacks.c b/daemons/controld/controld_te_callbacks.c -index 25f0ab2..8506f26 100644 ---- a/daemons/controld/controld_te_callbacks.c -+++ b/daemons/controld/controld_te_callbacks.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -28,6 +28,17 @@ crm_trigger_t *transition_trigger = NULL; - /* #define RSC_OP_TEMPLATE "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_CIB_TAG_STATE"[@uname='%s']"//"XML_LRM_TAG_RSC_OP"[@id='%s]" */ - #define RSC_OP_TEMPLATE "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_LRM_TAG_RSC_OP"[@id='%s']" - -+// An explicit shutdown-lock of 0 means the lock has been cleared -+static bool -+shutdown_lock_cleared(xmlNode *lrm_resource) -+{ -+ time_t shutdown_lock = 0; -+ -+ return (crm_element_value_epoch(lrm_resource, XML_CONFIG_ATTR_SHUTDOWN_LOCK, -+ &shutdown_lock) == pcmk_ok) -+ && (shutdown_lock == 0); -+} -+ - static void - te_update_diff_v1(const char *event, xmlNode *diff) - { -@@ -106,33 +117,42 @@ te_update_diff_v1(const char *event, xmlNode *diff) - } - freeXpathObject(xpathObj); - -+ // Check for lrm_resource entries -+ xpathObj = xpath_search(diff, -+ "//" F_CIB_UPDATE_RESULT -+ "//" XML_TAG_DIFF_ADDED -+ "//" XML_LRM_TAG_RESOURCE); -+ max = numXpathResults(xpathObj); -+ - /* -- * Updates by, or in response to, TE actions will never contain updates -- * for more than one resource at a time, so such updates indicate an -- * LRM refresh. -- * -- * In that case, start a new transition rather than check each result -- * individually, which can result in _huge_ speedups in large clusters. -+ * Updates by, or in response to, graph actions will never affect more than -+ * one resource at a time, so such updates indicate an LRM refresh. In that -+ * case, start a new transition rather than check each result individually, -+ * which can result in _huge_ speedups in large clusters. - * - * Unfortunately, we can only do so when there are no pending actions. - * Otherwise, we could mistakenly throw away those results here, and - * the cluster will stall waiting for them and time out the operation. - */ -- if (transition_graph->pending == 0) { -- xpathObj = xpath_search(diff, -- "//" F_CIB_UPDATE_RESULT -- "//" XML_TAG_DIFF_ADDED -- "//" XML_LRM_TAG_RESOURCE); -- max = numXpathResults(xpathObj); -- if (max > 1) { -- crm_debug("Ignoring resource operation updates due to history refresh of %d resources", -- max); -- crm_log_xml_trace(diff, "lrm-refresh"); -- abort_transition(INFINITY, tg_restart, "History refresh", NULL); -- goto bail; -+ if ((transition_graph->pending == 0) && (max > 1)) { -+ crm_debug("Ignoring resource operation updates due to history refresh of %d resources", -+ max); -+ crm_log_xml_trace(diff, "lrm-refresh"); -+ abort_transition(INFINITY, tg_restart, "History refresh", NULL); -+ goto bail; -+ } -+ -+ if (max == 1) { -+ xmlNode *lrm_resource = getXpathResult(xpathObj, 0); -+ -+ if (shutdown_lock_cleared(lrm_resource)) { -+ // @TODO would be more efficient to abort once after transition done -+ abort_transition(INFINITY, tg_restart, "Shutdown lock cleared", -+ lrm_resource); -+ // Still process results, so we stop timers and update failcounts - } -- freeXpathObject(xpathObj); - } -+ freeXpathObject(xpathObj); - - /* Process operation updates */ - xpathObj = -@@ -205,6 +225,11 @@ process_lrm_resource_diff(xmlNode *lrm_resource, const char *node) - rsc_op = __xml_next(rsc_op)) { - process_graph_event(rsc_op, node); - } -+ if (shutdown_lock_cleared(lrm_resource)) { -+ // @TODO would be more efficient to abort once after transition done -+ abort_transition(INFINITY, tg_restart, "Shutdown lock cleared", -+ lrm_resource); -+ } - } - - static void -diff --git a/daemons/controld/controld_utils.h b/daemons/controld/controld_utils.h -index ca8cddb..8e31007 100644 ---- a/daemons/controld/controld_utils.h -+++ b/daemons/controld/controld_utils.h -@@ -41,6 +41,7 @@ fsa_cib_anon_update(const char *section, xmlNode *data) { - } - - extern gboolean fsa_has_quorum; -+extern bool controld_shutdown_lock_enabled; - extern int last_peer_update; - extern int last_resource_update; - --- -1.8.3.1 - diff --git a/SOURCES/014-shutdown-lock.patch b/SOURCES/014-shutdown-lock.patch deleted file mode 100644 index b464947..0000000 --- a/SOURCES/014-shutdown-lock.patch +++ /dev/null @@ -1,158 +0,0 @@ -From 8270e8aed46f6e672b94f00fe0bde07cd2b6ddd7 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Dec 2019 11:38:49 -0600 -Subject: [PATCH 10/18] Low: controller: don't clear shutdown locks when node - rejoins - -Add new controld_delete_node_state() values for clearing resource history -while preserving shutdown locks. This is accomplished by deleting all -unlocked lrm_resource entries and all lrm_rsc_op entries, instead of the entire -lrm subsection. ---- - daemons/controld/controld_based.c | 22 +++++++++++++++++++++- - daemons/controld/controld_join_dc.c | 7 +++++-- - daemons/controld/controld_remote_ra.c | 16 ++++++++++------ - daemons/controld/controld_utils.h | 2 ++ - 4 files changed, 38 insertions(+), 9 deletions(-) - -diff --git a/daemons/controld/controld_based.c b/daemons/controld/controld_based.c -index f3a7c4f..0ffc1e8 100644 ---- a/daemons/controld/controld_based.c -+++ b/daemons/controld/controld_based.c -@@ -191,12 +191,21 @@ cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, - // Node's lrm section (name 1x) - #define XPATH_NODE_LRM XPATH_NODE_STATE "/" XML_CIB_TAG_LRM - -+// Node's lrm_rsc_op entries and lrm_resource entries without lock (name 2x) -+#define XPATH_NODE_LRM_UNLOCKED XPATH_NODE_STATE "//" XML_LRM_TAG_RSC_OP \ -+ "|" XPATH_NODE_STATE \ -+ "//" XML_LRM_TAG_RESOURCE \ -+ "[not(@" XML_CONFIG_ATTR_SHUTDOWN_LOCK ")]" -+ - // Node's transient_attributes section (name 1x) - #define XPATH_NODE_ATTRS XPATH_NODE_STATE "/" XML_TAG_TRANSIENT_NODEATTRS - - // Everything under node_state (name 1x) - #define XPATH_NODE_ALL XPATH_NODE_STATE "/*" - -+// Unlocked history + transient attributes (name 3x) -+#define XPATH_NODE_ALL_UNLOCKED XPATH_NODE_LRM_UNLOCKED "|" XPATH_NODE_ATTRS -+ - /*! - * \internal - * \brief Delete subsection of a node's CIB node_state -@@ -218,6 +227,11 @@ controld_delete_node_state(const char *uname, enum controld_section_e section, - xpath = crm_strdup_printf(XPATH_NODE_LRM, uname); - desc = crm_strdup_printf("resource history for node %s", uname); - break; -+ case controld_section_lrm_unlocked: -+ xpath = crm_strdup_printf(XPATH_NODE_LRM_UNLOCKED, uname, uname); -+ desc = crm_strdup_printf("resource history (other than shutdown " -+ "locks) for node %s", uname); -+ break; - case controld_section_attrs: - xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname); - desc = crm_strdup_printf("transient attributes for node %s", uname); -@@ -226,6 +240,12 @@ controld_delete_node_state(const char *uname, enum controld_section_e section, - xpath = crm_strdup_printf(XPATH_NODE_ALL, uname); - desc = crm_strdup_printf("all state for node %s", uname); - break; -+ case controld_section_all_unlocked: -+ xpath = crm_strdup_printf(XPATH_NODE_ALL_UNLOCKED, -+ uname, uname, uname); -+ desc = crm_strdup_printf("all state (other than shutdown locks) " -+ "for node %s", uname); -+ break; - } - - if (fsa_cib_conn == NULL) { -@@ -234,7 +254,7 @@ controld_delete_node_state(const char *uname, enum controld_section_e section, - } else { - int call_id; - -- options |= cib_quorum_override|cib_xpath; -+ options |= cib_quorum_override|cib_xpath|cib_multiple; - call_id = fsa_cib_conn->cmds->remove(fsa_cib_conn, xpath, NULL, options); - crm_info("Deleting %s (via CIB call %d) " CRM_XS " xpath=%s", - desc, call_id, xpath); -diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c -index 885b2a9..f0eb2a2 100644 ---- a/daemons/controld/controld_join_dc.c -+++ b/daemons/controld/controld_join_dc.c -@@ -534,6 +534,7 @@ do_dc_join_ack(long long action, - int join_id = -1; - int call_id = 0; - ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); -+ enum controld_section_e section = controld_section_lrm; - - const char *op = crm_element_value(join_ack->msg, F_CRM_TASK); - const char *join_from = crm_element_value(join_ack->msg, F_CRM_HOST_FROM); -@@ -583,8 +584,10 @@ do_dc_join_ack(long long action, - /* Update CIB with node's current executor state. A new transition will be - * triggered later, when the CIB notifies us of the change. - */ -- controld_delete_node_state(join_from, controld_section_lrm, -- cib_scope_local); -+ if (controld_shutdown_lock_enabled) { -+ section = controld_section_lrm_unlocked; -+ } -+ controld_delete_node_state(join_from, section, cib_scope_local); - if (safe_str_eq(join_from, fsa_our_uname)) { - xmlNode *now_dc_lrmd_state = controld_query_executor_state(fsa_our_uname); - -diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c -index 2d3dfa7..a81c354 100644 ---- a/daemons/controld/controld_remote_ra.c -+++ b/daemons/controld/controld_remote_ra.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2013-2019 the Pacemaker project contributors -+ * Copyright 2013-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -177,17 +177,21 @@ remote_node_up(const char *node_name) - int call_opt, call_id = 0; - xmlNode *update, *state; - crm_node_t *node; -+ enum controld_section_e section = controld_section_all; - - CRM_CHECK(node_name != NULL, return); - crm_info("Announcing pacemaker_remote node %s", node_name); - -- /* Clear node's entire state (resource history and transient attributes). -- * The transient attributes should and normally will be cleared when the -- * node leaves, but since remote node state has a number of corner cases, -- * clear them here as well, to be sure. -+ /* Clear node's entire state (resource history and transient attributes) -+ * other than shutdown locks. The transient attributes should and normally -+ * will be cleared when the node leaves, but since remote node state has a -+ * number of corner cases, clear them here as well, to be sure. - */ - call_opt = crmd_cib_smart_opt(); -- controld_delete_node_state(node_name, controld_section_all, call_opt); -+ if (controld_shutdown_lock_enabled) { -+ section = controld_section_all_unlocked; -+ } -+ controld_delete_node_state(node_name, section, call_opt); - - /* Clear node's probed attribute */ - update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE); -diff --git a/daemons/controld/controld_utils.h b/daemons/controld/controld_utils.h -index 8e31007..5549636 100644 ---- a/daemons/controld/controld_utils.h -+++ b/daemons/controld/controld_utils.h -@@ -90,8 +90,10 @@ bool controld_action_is_recordable(const char *action); - // Subsections of node_state - enum controld_section_e { - controld_section_lrm, -+ controld_section_lrm_unlocked, - controld_section_attrs, - controld_section_all, -+ controld_section_all_unlocked - }; - - void controld_delete_node_state(const char *uname, --- -1.8.3.1 - diff --git a/SOURCES/015-shutdown-lock.patch b/SOURCES/015-shutdown-lock.patch deleted file mode 100644 index 364b2aa..0000000 --- a/SOURCES/015-shutdown-lock.patch +++ /dev/null @@ -1,38 +0,0 @@ -From d70d90367c898bcb62fd6c7dd8d641ca56be04ae Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 20 Dec 2019 11:46:37 -0600 -Subject: [PATCH 11/18] Low: scheduler: display when a resource is - shutdown-locked to a node - -... so it shows up in logs and cluster status displays ---- - lib/pengine/native.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/lib/pengine/native.c b/lib/pengine/native.c -index b064115..5a6fd80 100644 ---- a/lib/pengine/native.c -+++ b/lib/pengine/native.c -@@ -541,6 +541,9 @@ native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, - provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); - } - -+ if ((node == NULL) && (rsc->lock_node != NULL)) { -+ node = rsc->lock_node; -+ } - if (is_set(options, pe_print_rsconly) - || pcmk__list_of_multiple(rsc->running_on)) { - node = NULL; -@@ -583,6 +586,9 @@ native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, - if (node && !(node->details->online) && node->details->unclean) { - have_flags = add_output_flag(outstr, "UNCLEAN", have_flags); - } -+ if (node && (node == rsc->lock_node)) { -+ have_flags = add_output_flag(outstr, "LOCKED", have_flags); -+ } - if (is_set(options, pe_print_pending)) { - const char *pending_task = native_pending_task(rsc); - --- -1.8.3.1 - diff --git a/SOURCES/016-shutdown-lock.patch b/SOURCES/016-shutdown-lock.patch deleted file mode 100644 index b8f8e5d..0000000 --- a/SOURCES/016-shutdown-lock.patch +++ /dev/null @@ -1,29 +0,0 @@ -From bc9c07951cb9c411324056b4d5322016153fee20 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 14 Jan 2020 16:01:16 -0600 -Subject: [PATCH 12/18] Low: tools: crm_resource resource checks should show - shutdown locks - ---- - tools/crm_resource_runtime.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 2ea8bb3..ed5fb03 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -928,6 +928,11 @@ cli_resource_check(cib_t * cib_conn, resource_t *rsc) - } - free(managed); - -+ if (rsc->lock_node) { -+ printf("%s * '%s' is locked to node %s due to shutdown\n", -+ (printed? "" : "\n"), parent->id, rsc->lock_node->details->uname); -+ } -+ - if (printed) { - printf("\n"); - } --- -1.8.3.1 - diff --git a/SOURCES/017-shutdown-lock.patch b/SOURCES/017-shutdown-lock.patch deleted file mode 100644 index 8dc7dd9..0000000 --- a/SOURCES/017-shutdown-lock.patch +++ /dev/null @@ -1,191 +0,0 @@ -From 45a6f0b051743c266c13f3ffd365baf3a9d730f6 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 14 Jan 2020 12:53:39 -0600 -Subject: [PATCH 13/18] Low: controller: allow CRM_OP_LRM_DELETE to clear CIB - only - -Normally, CRM_OP_LRM_DELETE is relayed to the affected node's controller, which -clears the resource from the executor and CIB as well the its own bookkeeping. - -Now, we want to be able to use it to clear shutdown locks for nodes that are -down. Let it take a new "mode" attribute, and if it is "cib", clear the -resource from the CIB locally without relaying the operation or doing anything -else. ---- - daemons/controld/controld_execd.c | 4 +- - daemons/controld/controld_messages.c | 97 ++++++++++++++++++++++++++++++++-- - daemons/controld/controld_te_actions.c | 7 +++ - include/crm_internal.h | 2 + - 4 files changed, 106 insertions(+), 4 deletions(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index c0436a2..8d25fb8 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -1769,7 +1769,9 @@ do_lrm_invoke(long long action, - crm_trace("Executor %s command from %s", crm_op, from_sys); - - if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) { -- crm_rsc_delete = TRUE; // Only crm_resource uses this op -+ if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { -+ crm_rsc_delete = TRUE; // from crm_resource -+ } - operation = CRMD_ACTION_DELETE; - - } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) { -diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c -index 466c64c..689e4a0 100644 ---- a/daemons/controld/controld_messages.c -+++ b/daemons/controld/controld_messages.c -@@ -410,6 +410,14 @@ relay_message(xmlNode * msg, gboolean originated_locally) - - } else if (safe_str_eq(fsa_our_uname, host_to)) { - is_local = 1; -+ } else if (is_for_crm && safe_str_eq(task, CRM_OP_LRM_DELETE)) { -+ xmlNode *msg_data = get_message_xml(msg, F_CRM_DATA); -+ const char *mode = crm_element_value(msg_data, PCMK__XA_MODE); -+ -+ if (safe_str_eq(mode, XML_TAG_CIB)) { -+ // Local delete of an offline node's resource history -+ is_local = 1; -+ } - } - - if (is_for_dc || is_for_dcib || is_for_te) { -@@ -654,6 +662,86 @@ handle_failcount_op(xmlNode * stored_msg) - return I_NULL; - } - -+static enum crmd_fsa_input -+handle_lrm_delete(xmlNode *stored_msg) -+{ -+ const char *mode = NULL; -+ xmlNode *msg_data = get_message_xml(stored_msg, F_CRM_DATA); -+ -+ CRM_CHECK(msg_data != NULL, return I_NULL); -+ -+ /* CRM_OP_LRM_DELETE has two distinct modes. The default behavior is to -+ * relay the operation to the affected node, which will unregister the -+ * resource from the local executor, clear the resource's history from the -+ * CIB, and do some bookkeeping in the controller. -+ * -+ * However, if the affected node is offline, the client will specify -+ * mode="cib" which means the controller receiving the operation should -+ * clear the resource's history from the CIB and nothing else. This is used -+ * to clear shutdown locks. -+ */ -+ mode = crm_element_value(msg_data, PCMK__XA_MODE); -+ if ((mode == NULL) || strcmp(mode, XML_TAG_CIB)) { -+ // Relay to affected node -+ crm_xml_add(stored_msg, F_CRM_SYS_TO, CRM_SYSTEM_LRMD); -+ return I_ROUTER; -+ -+ } else { -+ // Delete CIB history locally (compare with do_lrm_delete()) -+ const char *from_sys = NULL; -+ const char *user_name = NULL; -+ const char *rsc_id = NULL; -+ const char *node = NULL; -+ xmlNode *rsc_xml = NULL; -+ int rc = pcmk_rc_ok; -+ -+ rsc_xml = first_named_child(msg_data, XML_CIB_TAG_RESOURCE); -+ CRM_CHECK(rsc_xml != NULL, return I_NULL); -+ -+ rsc_id = ID(rsc_xml); -+ from_sys = crm_element_value(stored_msg, F_CRM_SYS_FROM); -+ node = crm_element_value(msg_data, XML_LRM_ATTR_TARGET); -+#if ENABLE_ACL -+ user_name = crm_acl_get_set_user(stored_msg, F_CRM_USER, NULL); -+#endif -+ crm_debug("Handling " CRM_OP_LRM_DELETE " for %s on %s locally%s%s " -+ "(clearing CIB resource history only)", rsc_id, node, -+ (user_name? " for user " : ""), (user_name? user_name : "")); -+#if ENABLE_ACL -+ rc = controld_delete_resource_history(rsc_id, node, user_name, -+ cib_dryrun|cib_sync_call); -+#endif -+ if (rc == pcmk_rc_ok) { -+ rc = controld_delete_resource_history(rsc_id, node, user_name, -+ crmd_cib_smart_opt()); -+ } -+ -+ // Notify client if not from graph (compare with notify_deleted()) -+ if (from_sys && strcmp(from_sys, CRM_SYSTEM_TENGINE)) { -+ lrmd_event_data_t *op = NULL; -+ const char *from_host = crm_element_value(stored_msg, -+ F_CRM_HOST_FROM); -+ const char *transition = crm_element_value(msg_data, -+ XML_ATTR_TRANSITION_KEY); -+ -+ crm_info("Notifying %s on %s that %s was%s deleted", -+ from_sys, (from_host? from_host : "local node"), rsc_id, -+ ((rc == pcmk_rc_ok)? "" : " not")); -+ op = lrmd_new_event(rsc_id, CRMD_ACTION_DELETE, 0); -+ op->type = lrmd_event_exec_complete; -+ op->user_data = strdup(transition? transition : FAKE_TE_ID); -+ op->params = crm_str_table_new(); -+ g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), -+ strdup(CRM_FEATURE_SET)); -+ controld_rc2event(op, rc); -+ controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id); -+ lrmd_free_event(op); -+ controld_trigger_delete_refresh(from_sys, rsc_id); -+ } -+ return I_NULL; -+ } -+} -+ - /*! - * \brief Handle a CRM_OP_REMOTE_STATE message by updating remote peer cache - * -@@ -913,9 +1001,12 @@ handle_request(xmlNode * stored_msg, enum crmd_fsa_cause cause) - crm_debug("Raising I_JOIN_RESULT: join-%s", crm_element_value(stored_msg, F_CRM_JOIN_ID)); - return I_JOIN_RESULT; - -- } else if (strcmp(op, CRM_OP_LRM_DELETE) == 0 -- || strcmp(op, CRM_OP_LRM_FAIL) == 0 -- || strcmp(op, CRM_OP_LRM_REFRESH) == 0 || strcmp(op, CRM_OP_REPROBE) == 0) { -+ } else if (strcmp(op, CRM_OP_LRM_DELETE) == 0) { -+ return handle_lrm_delete(stored_msg); -+ -+ } else if ((strcmp(op, CRM_OP_LRM_FAIL) == 0) -+ || (strcmp(op, CRM_OP_LRM_REFRESH) == 0) -+ || (strcmp(op, CRM_OP_REPROBE) == 0)) { - - crm_xml_add(stored_msg, F_CRM_SYS_TO, CRM_SYSTEM_LRMD); - return I_ROUTER; -diff --git a/daemons/controld/controld_te_actions.c b/daemons/controld/controld_te_actions.c -index 948bd64..59e0b5a 100644 ---- a/daemons/controld/controld_te_actions.c -+++ b/daemons/controld/controld_te_actions.c -@@ -107,6 +107,13 @@ te_crm_command(crm_graph_t * graph, crm_action_t * action) - - if (!router_node) { - router_node = on_node; -+ if (safe_str_eq(task, CRM_OP_LRM_DELETE)) { -+ const char *mode = crm_element_value(action->xml, PCMK__XA_MODE); -+ -+ if (safe_str_eq(mode, XML_TAG_CIB)) { -+ router_node = fsa_our_uname; -+ } -+ } - } - - CRM_CHECK(on_node != NULL && strlen(on_node) != 0, -diff --git a/include/crm_internal.h b/include/crm_internal.h -index 1f25686..2fa53dd 100644 ---- a/include/crm_internal.h -+++ b/include/crm_internal.h -@@ -216,6 +216,8 @@ pid_t pcmk_locate_sbd(void); - # define ATTRD_OP_SYNC_RESPONSE "sync-response" - # define ATTRD_OP_CLEAR_FAILURE "clear-failure" - -+# define PCMK__XA_MODE "mode" -+ - # define PCMK_ENV_PHYSICAL_HOST "physical_host" - - --- -1.8.3.1 - diff --git a/SOURCES/018-shutdown-lock.patch b/SOURCES/018-shutdown-lock.patch deleted file mode 100644 index 99ad90e..0000000 --- a/SOURCES/018-shutdown-lock.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 457e231256feb0bdcf206209e03f0875f50d03b3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 14 Jan 2020 16:24:08 -0600 -Subject: [PATCH 14/18] Low: tools: for down nodes, crm_resource --refresh - should clear CIB only - -This provides a mechanism to manually clear shutdown locks. ---- - tools/crm_resource_runtime.c | 16 +++++++++++++--- - 1 file changed, 13 insertions(+), 3 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index ed5fb03..e89b572 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -473,6 +473,7 @@ send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, - const char *rsc_type = NULL; - xmlNode *params = NULL; - xmlNode *msg_data = NULL; -+ bool cib_only = false; - resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); - - if (rsc == NULL) { -@@ -504,10 +505,14 @@ send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, - } - - if (!(node->details->online)) { -- CMD_ERR("Node %s is not online", host_uname); -- return -ENOTCONN; -+ if (strcmp(op, CRM_OP_LRM_DELETE) == 0) { -+ cib_only = true; -+ } else { -+ CMD_ERR("Node %s is not online", host_uname); -+ return -ENOTCONN; -+ } - } -- if (pe__is_guest_or_remote_node(node)) { -+ if (!cib_only && pe__is_guest_or_remote_node(node)) { - node = pe__current_node(node->details->remote_rsc); - if (node == NULL) { - CMD_ERR("No cluster connection to Pacemaker Remote node %s detected", -@@ -533,6 +538,11 @@ send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, - crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); - } - -+ if (cib_only) { -+ // Indicate that only the CIB needs to be cleaned -+ crm_xml_add(msg_data, PCMK__XA_MODE, XML_TAG_CIB); -+ } -+ - xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE); - if (rsc->clone_name) { - crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name); --- -1.8.3.1 - diff --git a/SOURCES/019-shutdown-lock.patch b/SOURCES/019-shutdown-lock.patch deleted file mode 100644 index f94dc58..0000000 --- a/SOURCES/019-shutdown-lock.patch +++ /dev/null @@ -1,221 +0,0 @@ -From cf1e90ffe764f3639799206db9444ae32821386b Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 10 Jan 2020 18:18:07 -0600 -Subject: [PATCH 15/18] Low: scheduler: clear resource history when appropriate - -Tell the controller to clear resource history from the CIB when a resource has -a shutdown lock that expired or was cancelled because the resource is already -active elsewhere. ---- - include/crm/pengine/internal.h | 4 +++- - include/crm/pengine/pe_types.h | 4 +++- - lib/pacemaker/pcmk_sched_allocate.c | 1 + - lib/pacemaker/pcmk_sched_graph.c | 16 ++++++++++++++-- - lib/pacemaker/pcmk_sched_native.c | 6 ++++++ - lib/pengine/unpack.c | 1 + - lib/pengine/utils.c | 34 ++++++++++++++++++++++++++++++++-- - 7 files changed, 60 insertions(+), 6 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 119624d..bc2c70e 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -435,5 +435,7 @@ void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, - pe_working_set_t *data_set); - - bool pe__resource_is_disabled(pe_resource_t *rsc); -+pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, -+ pe_working_set_t *data_set); - - #endif -diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h -index 123d8ef..572787b 100644 ---- a/include/crm/pengine/pe_types.h -+++ b/include/crm/pengine/pe_types.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -287,6 +287,8 @@ enum pe_action_flags { - pe_action_reschedule = 0x02000, - pe_action_tracking = 0x04000, - pe_action_dedup = 0x08000, //! Internal state tracking when creating graph -+ -+ pe_action_dc = 0x10000, //! Action may run on DC instead of target - }; - /* *INDENT-ON* */ - -diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c -index 884e1bd..195d055 100644 ---- a/lib/pacemaker/pcmk_sched_allocate.c -+++ b/lib/pacemaker/pcmk_sched_allocate.c -@@ -1026,6 +1026,7 @@ apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set) - pe_rsc_info(rsc, - "Cancelling shutdown lock because %s is already active", - rsc->id); -+ pe__clear_resource_history(rsc, rsc->lock_node, data_set); - rsc->lock_node = NULL; - rsc->lock_time = 0; - } -diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c -index 2861f3d..355ffca 100644 ---- a/lib/pacemaker/pcmk_sched_graph.c -+++ b/lib/pacemaker/pcmk_sched_graph.c -@@ -586,10 +586,11 @@ update_action(pe_action_t *then, pe_working_set_t *data_set) - - /* 'then' is required, so we must abandon 'first' - * (e.g. a required stop cancels any reload). -- * Only used with reload actions as 'first'. - */ - set_bit(other->action->flags, pe_action_optional); -- clear_bit(first->rsc->flags, pe_rsc_reload); -+ if (!strcmp(first->task, CRMD_ACTION_RELOAD)) { -+ clear_bit(first->rsc->flags, pe_rsc_reload); -+ } - } - - if (first->rsc && then->rsc && (first->rsc != then->rsc) -@@ -1039,6 +1040,11 @@ action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set) - } else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) { - action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); - -+ } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) { -+ // CIB-only clean-up for shutdown locks -+ action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); -+ crm_xml_add(action_xml, PCMK__XA_MODE, XML_TAG_CIB); -+ - /* } else if(safe_str_eq(action->task, RSC_PROBED)) { */ - /* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */ - -@@ -1051,6 +1057,7 @@ action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set) - - } else { - action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); -+ - #if ENABLE_VERSIONED_ATTRS - rsc_details = pe_rsc_action_details(action); - #endif -@@ -1392,6 +1399,11 @@ should_dump_action(pe_action_t *action) - log_action(LOG_DEBUG, "Unallocated action", action, false); - return false; - -+ } else if (is_set(action->flags, pe_action_dc)) { -+ crm_trace("Action %s (%d) should be dumped: " -+ "can run on DC instead of %s", -+ action->uuid, action->id, action->node->details->uname); -+ - } else if (pe__is_guest_node(action->node) - && !action->node->details->remote_requires_reset) { - crm_trace("Action %s (%d) should be dumped: " -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index 9ebdd35..714a7a0 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -1403,6 +1403,12 @@ native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) - pe_order_runnable_left, data_set); - } - -+ // Don't clear resource history if probing on same node -+ custom_action_order(rsc, generate_op_key(rsc->id, CRM_OP_LRM_DELETE, 0), -+ NULL, rsc, generate_op_key(rsc->id, RSC_STATUS, 0), -+ NULL, pe_order_same_node|pe_order_then_cancels_first, -+ data_set); -+ - // Certain checks need allowed nodes - if (check_unfencing || check_utilization || rsc->container) { - allowed_nodes = allowed_nodes_as_list(rsc, data_set); -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 5139e60..87edc83 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -2218,6 +2218,7 @@ unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node, - > (lock_time + data_set->shutdown_lock))) { - pe_rsc_info(rsc, "Shutdown lock for %s on %s expired", - rsc->id, node->details->uname); -+ pe__clear_resource_history(rsc, node, data_set); - } else { - rsc->lock_node = node; - rsc->lock_time = lock_time; -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index 586d92c..b61455d 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -520,6 +520,11 @@ custom_action(resource_t * rsc, char *key, const char *task, - } - action->uuid = strdup(key); - -+ if (safe_str_eq(task, CRM_OP_LRM_DELETE)) { -+ // Resource history deletion for a node can be done on the DC -+ pe_set_action_bit(action, pe_action_dc); -+ } -+ - pe_set_action_bit(action, pe_action_runnable); - if (optional) { - pe_set_action_bit(action, pe_action_optional); -@@ -588,7 +593,8 @@ custom_action(resource_t * rsc, char *key, const char *task, - pe_set_action_bit(action, pe_action_optional); - /* action->runnable = FALSE; */ - -- } else if (action->node->details->online == FALSE -+ } else if (is_not_set(action->flags, pe_action_dc) -+ && !(action->node->details->online) - && (!pe__is_guest_node(action->node) - || action->node->details->remote_requires_reset)) { - pe_clear_action_bit(action, pe_action_runnable); -@@ -600,7 +606,8 @@ custom_action(resource_t * rsc, char *key, const char *task, - pe_fence_node(data_set, action->node, "resource actions are unrunnable"); - } - -- } else if (action->node->details->pending) { -+ } else if (is_not_set(action->flags, pe_action_dc) -+ && action->node->details->pending) { - pe_clear_action_bit(action, pe_action_runnable); - do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)", - action->uuid, action->node->details->uname); -@@ -714,6 +721,8 @@ unpack_operation_on_fail(action_t * action) - - value = on_fail; - } -+ } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) { -+ value = "ignore"; - } - - return value; -@@ -2595,3 +2604,24 @@ pe__resource_is_disabled(pe_resource_t *rsc) - } - return false; - } -+ -+/*! -+ * \internal -+ * \brief Create an action to clear a resource's history from CIB -+ * -+ * \param[in] rsc Resource to clear -+ * \param[in] node Node to clear history on -+ * -+ * \return New action to clear resource history -+ */ -+pe_action_t * -+pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, -+ pe_working_set_t *data_set) -+{ -+ char *key = NULL; -+ -+ CRM_ASSERT(rsc && node); -+ key = generate_op_key(rsc->id, CRM_OP_LRM_DELETE, 0); -+ return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE, -+ data_set); -+} --- -1.8.3.1 - diff --git a/SOURCES/020-shutdown-lock.patch b/SOURCES/020-shutdown-lock.patch deleted file mode 100644 index f650b81..0000000 --- a/SOURCES/020-shutdown-lock.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 16bcad136dc004b7c7bb9f5044c7ef488c441701 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 21 Nov 2019 15:39:42 -0600 -Subject: [PATCH 16/18] Feature: controller: bump feature set for shutdown-lock - ---- - include/crm/crm.h | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/include/crm/crm.h b/include/crm/crm.h -index cbf72d3..d2ffb61 100644 ---- a/include/crm/crm.h -+++ b/include/crm/crm.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -51,7 +51,7 @@ extern "C" { - * >=3.0.13: Fail counts include operation name and interval - * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED - */ --# define CRM_FEATURE_SET "3.2.0" -+# define CRM_FEATURE_SET "3.3.0" - - # define EOS '\0' - # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) --- -1.8.3.1 - diff --git a/SOURCES/021-shutdown-lock.patch b/SOURCES/021-shutdown-lock.patch deleted file mode 100644 index cdd9dba..0000000 --- a/SOURCES/021-shutdown-lock.patch +++ /dev/null @@ -1,738 +0,0 @@ -From a9fdae8b3acd9a271d04f98f9c4e230bfa74efd3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 6 Jan 2020 16:19:12 -0600 -Subject: [PATCH 17/18] Test: scheduler: add regression tests for shutdown - locks - ---- - cts/cts-scheduler.in | 4 +- - cts/scheduler/shutdown-lock-expiration.dot | 11 ++ - cts/scheduler/shutdown-lock-expiration.exp | 68 +++++++++ - cts/scheduler/shutdown-lock-expiration.scores | 17 +++ - cts/scheduler/shutdown-lock-expiration.summary | 31 ++++ - cts/scheduler/shutdown-lock-expiration.xml | 187 +++++++++++++++++++++++++ - cts/scheduler/shutdown-lock.dot | 11 ++ - cts/scheduler/shutdown-lock.exp | 64 +++++++++ - cts/scheduler/shutdown-lock.scores | 17 +++ - cts/scheduler/shutdown-lock.summary | 31 ++++ - cts/scheduler/shutdown-lock.xml | 186 ++++++++++++++++++++++++ - 11 files changed, 626 insertions(+), 1 deletion(-) - create mode 100644 cts/scheduler/shutdown-lock-expiration.dot - create mode 100644 cts/scheduler/shutdown-lock-expiration.exp - create mode 100644 cts/scheduler/shutdown-lock-expiration.scores - create mode 100644 cts/scheduler/shutdown-lock-expiration.summary - create mode 100644 cts/scheduler/shutdown-lock-expiration.xml - create mode 100644 cts/scheduler/shutdown-lock.dot - create mode 100644 cts/scheduler/shutdown-lock.exp - create mode 100644 cts/scheduler/shutdown-lock.scores - create mode 100644 cts/scheduler/shutdown-lock.summary - create mode 100644 cts/scheduler/shutdown-lock.xml - -diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in -index 8fa16fb..f2957ba 100644 ---- a/cts/cts-scheduler.in -+++ b/cts/cts-scheduler.in -@@ -5,7 +5,7 @@ - # Pacemaker targets compatibility with Python 2.7 and 3.2+ - from __future__ import print_function, unicode_literals, absolute_import, division - --__copyright__ = "Copyright 2004-2019 the Pacemaker project contributors" -+__copyright__ = "Copyright 2004-2020 the Pacemaker project contributors" - __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - - import io -@@ -956,6 +956,8 @@ TESTS = [ - [ - [ "resource-discovery", "Exercises resource-discovery location constraint option" ], - [ "rsc-discovery-per-node", "Disable resource discovery per node" ], -+ [ "shutdown-lock", "Ensure shutdown lock works properly" ], -+ [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], - ], - - # @TODO: If pacemaker implements versioned attributes, uncomment these tests -diff --git a/cts/scheduler/shutdown-lock-expiration.dot b/cts/scheduler/shutdown-lock-expiration.dot -new file mode 100644 -index 0000000..ee99079 ---- /dev/null -+++ b/cts/scheduler/shutdown-lock-expiration.dot -@@ -0,0 +1,11 @@ -+ digraph "g" { -+"Fencing_monitor_120000 node3" [ style=bold color="green" fontcolor="black"] -+"Fencing_start_0 node3" -> "Fencing_monitor_120000 node3" [ style = bold] -+"Fencing_start_0 node3" [ style=bold color="green" fontcolor="black"] -+"Fencing_stop_0 node3" -> "Fencing_start_0 node3" [ style = bold] -+"Fencing_stop_0 node3" [ style=bold color="green" fontcolor="black"] -+"rsc2_lrm_delete_0 node2" [ style=bold color="green" fontcolor="black"] -+"rsc2_monitor_10000 node4" [ style=bold color="green" fontcolor="black"] -+"rsc2_start_0 node4" -> "rsc2_monitor_10000 node4" [ style = bold] -+"rsc2_start_0 node4" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/shutdown-lock-expiration.exp b/cts/scheduler/shutdown-lock-expiration.exp -new file mode 100644 -index 0000000..465f12b ---- /dev/null -+++ b/cts/scheduler/shutdown-lock-expiration.exp -@@ -0,0 +1,68 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/shutdown-lock-expiration.scores b/cts/scheduler/shutdown-lock-expiration.scores -new file mode 100644 -index 0000000..e5d435d ---- /dev/null -+++ b/cts/scheduler/shutdown-lock-expiration.scores -@@ -0,0 +1,17 @@ -+Allocation scores: -+Using the original execution date of: 2020-01-06 22:11:40Z -+native_color: Fencing allocation score on node1: 0 -+native_color: Fencing allocation score on node2: 0 -+native_color: Fencing allocation score on node3: 0 -+native_color: Fencing allocation score on node4: 0 -+native_color: Fencing allocation score on node5: 0 -+native_color: rsc1 allocation score on node1: INFINITY -+native_color: rsc1 allocation score on node2: -INFINITY -+native_color: rsc1 allocation score on node3: -INFINITY -+native_color: rsc1 allocation score on node4: -INFINITY -+native_color: rsc1 allocation score on node5: -INFINITY -+native_color: rsc2 allocation score on node1: 0 -+native_color: rsc2 allocation score on node2: INFINITY -+native_color: rsc2 allocation score on node3: 0 -+native_color: rsc2 allocation score on node4: 0 -+native_color: rsc2 allocation score on node5: 0 -diff --git a/cts/scheduler/shutdown-lock-expiration.summary b/cts/scheduler/shutdown-lock-expiration.summary -new file mode 100644 -index 0000000..08c93aa ---- /dev/null -+++ b/cts/scheduler/shutdown-lock-expiration.summary -@@ -0,0 +1,31 @@ -+Using the original execution date of: 2020-01-06 22:11:40Z -+ -+Current cluster status: -+Online: [ node3 node4 node5 ] -+OFFLINE: [ node1 node2 ] -+ -+ Fencing (stonith:fence_xvm): Started node3 -+ rsc1 (ocf::pacemaker:Dummy): Stopped node1 (LOCKED) -+ rsc2 (ocf::pacemaker:Dummy): Stopped -+ -+Transition Summary: -+ * Restart Fencing ( node3 ) due to resource definition change -+ * Start rsc2 ( node4 ) -+ -+Executing cluster transition: -+ * Resource action: Fencing stop on node3 -+ * Resource action: Fencing start on node3 -+ * Resource action: Fencing monitor=120000 on node3 -+ * Resource action: rsc2 start on node4 -+ * Cluster action: lrm_delete for rsc2 on node2 -+ * Resource action: rsc2 monitor=10000 on node4 -+Using the original execution date of: 2020-01-06 22:11:40Z -+ -+Revised cluster status: -+Online: [ node3 node4 node5 ] -+OFFLINE: [ node1 node2 ] -+ -+ Fencing (stonith:fence_xvm): Started node3 -+ rsc1 (ocf::pacemaker:Dummy): Stopped node1 (LOCKED) -+ rsc2 (ocf::pacemaker:Dummy): Started node4 -+ -diff --git a/cts/scheduler/shutdown-lock-expiration.xml b/cts/scheduler/shutdown-lock-expiration.xml -new file mode 100644 -index 0000000..26f720e ---- /dev/null -+++ b/cts/scheduler/shutdown-lock-expiration.xml -@@ -0,0 +1,187 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/shutdown-lock.dot b/cts/scheduler/shutdown-lock.dot -new file mode 100644 -index 0000000..0a7d8c3 ---- /dev/null -+++ b/cts/scheduler/shutdown-lock.dot -@@ -0,0 +1,11 @@ -+ digraph "g" { -+"Fencing_monitor_120000 node3" [ style=bold color="green" fontcolor="black"] -+"Fencing_start_0 node3" -> "Fencing_monitor_120000 node3" [ style = bold] -+"Fencing_start_0 node3" [ style=bold color="green" fontcolor="black"] -+"Fencing_stop_0 node1" -> "Fencing_start_0 node3" [ style = bold] -+"Fencing_stop_0 node1" -> "do_shutdown node1" [ style = bold] -+"Fencing_stop_0 node1" [ style=bold color="green" fontcolor="black"] -+"do_shutdown node1" [ style=bold color="green" fontcolor="black"] -+"rsc1_stop_0 node1" -> "do_shutdown node1" [ style = bold] -+"rsc1_stop_0 node1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/shutdown-lock.exp b/cts/scheduler/shutdown-lock.exp -new file mode 100644 -index 0000000..e8bf9d8 ---- /dev/null -+++ b/cts/scheduler/shutdown-lock.exp -@@ -0,0 +1,64 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/shutdown-lock.scores b/cts/scheduler/shutdown-lock.scores -new file mode 100644 -index 0000000..e09ebfb ---- /dev/null -+++ b/cts/scheduler/shutdown-lock.scores -@@ -0,0 +1,17 @@ -+Allocation scores: -+Using the original execution date of: 2020-01-06 21:59:11Z -+native_color: Fencing allocation score on node1: 0 -+native_color: Fencing allocation score on node2: 0 -+native_color: Fencing allocation score on node3: 0 -+native_color: Fencing allocation score on node4: 0 -+native_color: Fencing allocation score on node5: 0 -+native_color: rsc1 allocation score on node1: INFINITY -+native_color: rsc1 allocation score on node2: -INFINITY -+native_color: rsc1 allocation score on node3: -INFINITY -+native_color: rsc1 allocation score on node4: -INFINITY -+native_color: rsc1 allocation score on node5: -INFINITY -+native_color: rsc2 allocation score on node1: -INFINITY -+native_color: rsc2 allocation score on node2: INFINITY -+native_color: rsc2 allocation score on node3: -INFINITY -+native_color: rsc2 allocation score on node4: -INFINITY -+native_color: rsc2 allocation score on node5: -INFINITY -diff --git a/cts/scheduler/shutdown-lock.summary b/cts/scheduler/shutdown-lock.summary -new file mode 100644 -index 0000000..6ed56d1 ---- /dev/null -+++ b/cts/scheduler/shutdown-lock.summary -@@ -0,0 +1,31 @@ -+Using the original execution date of: 2020-01-06 21:59:11Z -+ -+Current cluster status: -+Online: [ node1 node3 node4 node5 ] -+OFFLINE: [ node2 ] -+ -+ Fencing (stonith:fence_xvm): Started node1 -+ rsc1 (ocf::pacemaker:Dummy): Started node1 -+ rsc2 (ocf::pacemaker:Dummy): Stopped node2 (LOCKED) -+ -+Transition Summary: -+ * Shutdown node1 -+ * Move Fencing ( node1 -> node3 ) -+ * Stop rsc1 ( node1 ) due to node availability -+ -+Executing cluster transition: -+ * Resource action: Fencing stop on node1 -+ * Resource action: rsc1 stop on node1 -+ * Cluster action: do_shutdown on node1 -+ * Resource action: Fencing start on node3 -+ * Resource action: Fencing monitor=120000 on node3 -+Using the original execution date of: 2020-01-06 21:59:11Z -+ -+Revised cluster status: -+Online: [ node1 node3 node4 node5 ] -+OFFLINE: [ node2 ] -+ -+ Fencing (stonith:fence_xvm): Started node3 -+ rsc1 (ocf::pacemaker:Dummy): Stopped -+ rsc2 (ocf::pacemaker:Dummy): Stopped node2 (LOCKED) -+ -diff --git a/cts/scheduler/shutdown-lock.xml b/cts/scheduler/shutdown-lock.xml -new file mode 100644 -index 0000000..ec6db30 ---- /dev/null -+++ b/cts/scheduler/shutdown-lock.xml -@@ -0,0 +1,186 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - diff --git a/SOURCES/022-shutdown-lock.patch b/SOURCES/022-shutdown-lock.patch deleted file mode 100644 index cfcef11..0000000 --- a/SOURCES/022-shutdown-lock.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 5656b7d486569702ea6f3fe695c2fba366c970ac Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 12 Dec 2019 09:26:00 -0600 -Subject: [PATCH 18/18] Doc: Pacemaker Explained: document shutdown lock - options - ---- - doc/Pacemaker_Explained/en-US/Ch-Options.txt | 27 +++++++++++++++++++++++++++ - 1 file changed, 27 insertions(+) - -diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt -index f864987..35856aa 100644 ---- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt -+++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt -@@ -389,6 +389,33 @@ rules with +date_spec+ are only guaranteed to be checked this often, and it - also serves as a fail-safe for certain classes of scheduler bugs. A value of 0 - disables this polling; positive values are a time interval. - -+| shutdown-lock | false | -+The default of false allows active resources to be recovered elsewhere when -+their node is cleanly shut down, which is what the vast majority of users will -+want. However, some users prefer to make resources highly available only for -+failures, with no recovery for clean shutdowns. If this option is true, -+resources active on a node when it is cleanly shut down are kept "locked" to -+that node (not allowed to run elsewhere) until they start again on that node -+after it rejoins (or for at most shutdown-lock-limit, if set). Stonith -+resources and Pacemaker Remote connections are never locked. Clone and bundle -+instances and the master role of promotable clones are currently never locked, -+though support could be added in a future release. Locks may be manually -+cleared using the `--refresh` option of `crm_resource` (both the resource and -+node must be specified; this works with remote nodes if their connection -+resource's target-role is set to Stopped, but not if Pacemaker Remote is -+stopped on the remote node without disabling the connection resource). -+indexterm:[shutdown-lock,Cluster Option] -+indexterm:[Cluster,Option,shutdown-lock] -+ -+| shutdown-lock-limit | 0 | -+If shutdown-lock is true, and this is set to a nonzero time duration, locked -+resources will be allowed to start after this much time has passed since the -+node shutdown was initiated, even if the node has not rejoined. (This works -+with remote nodes only if their connection resource's target-role is set to -+Stopped.) -+indexterm:[shutdown-lock-limit,Cluster Option] -+indexterm:[Cluster,Option,shutdown-lock-limit] -+ - | remove-after-stop | FALSE | - indexterm:[remove-after-stop,Cluster Option] - indexterm:[Cluster,Option,remove-after-stop] --- -1.8.3.1 - diff --git a/SOURCES/023-curses.patch b/SOURCES/023-curses.patch deleted file mode 100644 index c1d9a91..0000000 --- a/SOURCES/023-curses.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 426f06cc088d11d6db0c45b434e5ce6b69da78b4 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 2 Jan 2020 15:08:58 -0500 -Subject: [PATCH] Fix: tools: Fix definition of curses_indented_printf. - -The placeholder version that is built if curses is not enabled does not -have a type that matches the header file. Correct that. ---- - tools/crm_mon_curses.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c -index c0dbedb..ecd0584 100644 ---- a/tools/crm_mon_curses.c -+++ b/tools/crm_mon_curses.c -@@ -368,7 +368,7 @@ curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) { - - G_GNUC_PRINTF(2, 3) - void --curses_indented_printf(pcmk__output_t *out, const char *format, va_list args) { -+curses_indented_printf(pcmk__output_t *out, const char *format, ...) { - return; - } - --- -1.8.3.1 - diff --git a/SOURCES/024-crm_mon-cgi.patch b/SOURCES/024-crm_mon-cgi.patch deleted file mode 100644 index c6743eb..0000000 --- a/SOURCES/024-crm_mon-cgi.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 5b98dd71cef867a115a1b07fca2351ba430baf08 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 10 Jan 2020 09:54:59 -0500 -Subject: [PATCH] Fix: tools: Re-enable CGI output from crm_mon. - -The CGI header was not being written out because "false" was being -passed to the finish function. That was being passed because we didn't -want the HTML to be printed out without the refresh header. The fix is -just to s/false/true, and change the order so the extra header is added -first. ---- - tools/crm_mon.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index c1dcf29..4b28bef 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1854,10 +1854,9 @@ static void - handle_html_output(crm_exit_t exit_code) { - xmlNodePtr html = NULL; - -- out->finish(out, exit_code, false, (void **) &html); - pcmk__html_add_header(html, "meta", "http-equiv", "refresh", "content", - crm_itoa(options.reconnect_msec/1000), NULL); -- htmlDocDump(out->dest, html->doc); -+ out->finish(out, exit_code, true, (void **) &html); - } - - /* --- -1.8.3.1 - diff --git a/SOURCES/025-clear-attrs.patch b/SOURCES/025-clear-attrs.patch deleted file mode 100644 index 842656c..0000000 --- a/SOURCES/025-clear-attrs.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 01b463bd715d48dde5bf76ca3a2e78e31f0ffaa1 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 21 Jan 2020 17:25:57 -0600 -Subject: [PATCH] Fix: controller: clear leaving node's transient attributes - even if there is no DC - ---- - daemons/controld/controld_callbacks.c | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c -index f7e3db2..21f831a 100644 ---- a/daemons/controld/controld_callbacks.c -+++ b/daemons/controld/controld_callbacks.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -205,7 +205,11 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d - cib_scope_local); - } - -- } else if(AM_I_DC) { -+ } else if (AM_I_DC || (fsa_our_dc == NULL)) { -+ /* This only needs to be done once, so normally the DC should do -+ * it. However if there is no DC, every node must do it, since -+ * there is no other way to ensure some one node does it. -+ */ - if (appeared) { - te_trigger_stonith_history_sync(FALSE); - } else { --- -1.8.3.1 - diff --git a/SPECS/pacemaker.spec b/SPECS/pacemaker.spec index dab302c..707362a 100644 --- a/SPECS/pacemaker.spec +++ b/SPECS/pacemaker.spec @@ -1,4 +1,5 @@ -# Globals and defines to control package behavior (configure these as desired) +# User-configurable globals and defines to control package behavior +# (these should not test {with X} values, which are declared later) ## User and group to use for nonprivileged services %global uname hacluster @@ -21,12 +22,11 @@ ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) -%global pcmkversion 2.0.3 -%global specversion 5 +%global pcmkversion 2.0.4 +%global specversion 3 -## Upstream commit (or git tag, such as "Pacemaker-" plus the -## {pcmkversion} macro for an official release) to use for this package -%global commit 4b1f869f0f64ef0d248b6aa4781d38ecccf83318 +## Upstream commit (full commit ID, abbreviated commit ID, or tag) to build +%global commit 2deceaa3ae1fbadd844f5c5b47fd33129fa2c227 ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %global commit_abbrev 7 @@ -39,24 +39,58 @@ %global nagios_hash 105ab8a +# Define conditionals so that "rpmbuild --with " and +# "rpmbuild --without " can enable and disable specific features + +## Add option to enable support for stonith/external fencing agents +%bcond_with stonithd + +## Add option to disable support for storing sensitive information outside CIB +%bcond_without cibsecrets + +## Add option to create binaries suitable for use with profiling tools +%bcond_with profiling + +## Add option to create binaries with coverage analysis +%bcond_with coverage + +## Add option to generate documentation (requires Publican, Asciidoc and Inkscape) +%bcond_with doc + +## Add option to prefix package version with "0." +## (so later "official" packages will be considered updates) +%bcond_with pre_release + +## Add option to ship Upstart job files +%bcond_with upstart_job + +## Add option to turn off hardening of libraries and daemon executables +%bcond_without hardening + +## Add option to disable links for legacy daemon names +%bcond_without legacy_links + + # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) -## Short version of git commit -%define shortcommit %(c=%{commit}; case ${c} in - Pacemaker-*%{rparen} echo ${c:10};; - *%{rparen} echo ${c:0:%{commit_abbrev}};; esac) +## Whether this is a tagged release (final or release candidate) +%define tag_release %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo 1 ;; + *%{rparen} echo 0 ;; esac) -## Whether this is a tagged release -%define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?) - -## Whether this is a release candidate (in case of a tagged release) -%define pre_release %([ "%{tag_release}" -eq 0 ] || { - case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;; - esac; }; echo $?) +## Portion of export/dist tarball name after "pacemaker-", and release version +%if 0%{tag_release} +%define archive_version %(c=%{commit}; echo ${c:10}) +%define archive_github_url %{commit}#/%{name}-%{archive_version}.tar.gz +%else +%define archive_version %(c=%{commit}; echo ${c:0:%{commit_abbrev}}) +%define archive_github_url %{archive_version}#/%{name}-%{archive_version}.tar.gz +%endif +# RHEL always uses a simple release number +%define pcmk_release %{specversion} ## Heuristic used to infer bleeding-edge deployments that are ## less likely to have working versions of the documentation tools @@ -181,35 +215,6 @@ %endif -# Define conditionals so that "rpmbuild --with " and -# "rpmbuild --without " can enable and disable specific features - -## Add option to enable support for stonith/external fencing agents -%bcond_with stonithd - -## Add option to create binaries suitable for use with profiling tools -%bcond_with profiling - -## Add option to create binaries with coverage analysis -%bcond_with coverage - -## Add option to generate documentation (requires Publican, Asciidoc and Inkscape) -%bcond_with doc - -## Add option to prefix package version with "0." -## (so later "official" packages will be considered updates) -%bcond_with pre_release - -## Add option to ship Upstart job files -%bcond_with upstart_job - -## Add option to turn off hardening of libraries and daemon executables -%bcond_without hardening - -## Add option to disable links for legacy daemon names -%bcond_without legacy_links - - # Keep sane profiling data if requested %if %{with profiling} @@ -219,24 +224,6 @@ %endif -# Define the release version -# (do not look at externally enforced pre-release flag for tagged releases -# as only -rc tags, captured with the second condition, implies that then) -%if (!%{tag_release} && %{with pre_release}) || 0%{pre_release} -%if 0%{pre_release} -%define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) -%else -%define pcmk_release 0.%{specversion}.%{shortcommit}.git -%endif -%else -%if 0%{tag_release} -%define pcmk_release %{specversion} -%else -# Never use the short commit in a RHEL release number -%define pcmk_release %{specversion} -%endif -%endif - Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} @@ -250,37 +237,23 @@ License: GPLv2+ and LGPLv2+ and BSD Url: http://www.clusterlabs.org Group: System Environment/Daemons -# Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL: -# https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz -Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz +# Example: https://codeload.github.com/ClusterLabs/pacemaker/tar.gz/e91769e +# will download pacemaker-e91769e.tar.gz +# +# The ending part starting with '#' is ignored by github but necessary for +# rpmbuild to know what the tar archive name is. (The downloaded file will be +# named correctly only for commit IDs, not tagged releases.) +# +# You can use "spectool -s 0 pacemaker.spec" (rpmdevtools) to show final URL. +Source0: https://codeload.github.com/%{github_owner}/%{name}/tar.gz/%{archive_github_url} Source1: nagios-agents-metadata-%{nagios_hash}.tar.gz # upstream commits -Patch1: 001-status-deletion.patch -Patch2: 002-status-deletion.patch -Patch3: 003-return-codes.patch -Patch4: 004-unused.patch -Patch5: 005-shutdown-lock.patch -Patch6: 006-shutdown-lock.patch -Patch7: 007-shutdown-lock.patch -Patch8: 008-shutdown-lock.patch -Patch9: 009-shutdown-lock.patch -Patch10: 010-shutdown-lock.patch -Patch11: 011-shutdown-lock.patch -Patch12: 012-shutdown-lock.patch -Patch13: 013-shutdown-lock.patch -Patch14: 014-shutdown-lock.patch -Patch15: 015-shutdown-lock.patch -Patch16: 016-shutdown-lock.patch -Patch17: 017-shutdown-lock.patch -Patch18: 018-shutdown-lock.patch -Patch19: 019-shutdown-lock.patch -Patch20: 020-shutdown-lock.patch -Patch21: 021-shutdown-lock.patch -Patch22: 022-shutdown-lock.patch -Patch23: 023-curses.patch -Patch24: 024-crm_mon-cgi.patch -Patch25: 025-clear-attrs.patch +Patch1: 001-rules.patch +Patch2: 002-demote.patch +Patch3: 003-trace.patch +Patch4: 004-test.patch +Patch5: 005-sysconfig.patch # downstream-only commits #Patch100: xxx.patch @@ -295,7 +268,11 @@ Requires: psmisc %endif %{?systemd_requires} +%if %{defined centos} +ExclusiveArch: aarch64 i686 ppc64le s390x x86_64 %{arm} +%else ExclusiveArch: aarch64 i686 ppc64le s390x x86_64 +%endif Requires: %{python_path} BuildRequires: %{python_name}-devel @@ -359,7 +336,8 @@ when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: - --with(out) : coverage doc stonithd hardening pre_release profiling + --with(out) : cibsecrets coverage doc stonithd hardening pre_release + profiling %package cli License: GPLv2+ and LGPLv2+ @@ -533,7 +511,7 @@ The metadata files required for Pacemaker to execute the nagios plugin monitor resources. %prep -%autosetup -a 1 -n %{name}-%{commit} -S git_am -p 1 +%autosetup -a 1 -n %{name}-%{archive_version} -S git_am -p 1 %build @@ -565,6 +543,7 @@ export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %{!?with_legacy_links: --disable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ + %{?with_cibsecrets: --with-cibsecrets} \ %{!?with_doc: --with-brand=} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ --with-initdir=%{_initrddir} \ @@ -585,6 +564,7 @@ sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool make %{_smp_mflags} V=1 %check +make %{_smp_mflags} check { cts/cts-scheduler --run load-stopped-loop \ && cts/cts-cli \ && touch .CHECKED @@ -764,7 +744,7 @@ getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} - exit 0 %if %{defined ldconfig_scriptlets} -%ldconfig_scriptlets libs +%ldconfig_scriptlets -n %{pkgname_pcmk_libs} %ldconfig_scriptlets cluster-libs %else %post -n %{pkgname_pcmk_libs} -p /sbin/ldconfig @@ -837,6 +817,9 @@ exit 0 %{_sbindir}/attrd_updater %{_sbindir}/cibadmin +%if %{with cibsecrets} +%{_sbindir}/cibsecret +%endif %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount @@ -968,6 +951,43 @@ exit 0 %license %{nagios_name}-%{nagios_hash}/COPYING %changelog +* Thu Jun 25 2020 Ken Gaillot - 2.0.4-3 +- Allow resource and operation defaults per resource or operation type +- Rebase on upstream 2.0.4 final release +- Support on-fail="demote" and no-quorum-policy="demote" options +- Remove incorrect comment from sysconfig file +- Resolves: rhbz1628701 +- Resolves: rhbz1828488 +- Resolves: rhbz1837747 +- Resolves: rhbz1848789 + +* Wed Jun 10 2020 Ken Gaillot - 2.0.4-2 +- Improve cibsecret help and clean up code per static analysis +- Resolves: rhbz1793860 + +* Mon Jun 8 2020 Ken Gaillot - 2.0.4-1 +- Clear leaving node's attributes if there is no DC +- Add crm_mon --node option to limit display to particular node or tagged nodes +- Add crm_mon --include/--exclude options to select what sections are shown +- priority-fencing-delay option bases delay on where resources are active +- Pending DC fencing gets 'stuck' in status display +- crm_rule can now check rule expiration when "years" is specified +- crm_mon now formats error messages better +- Support for CIB secrets is enabled +- Rebase on latest upstream Pacemaker release +- Fix regression introduced in 8.2 so crm_node -n works on remote nodes +- Avoid infinite loop when topology is removed while unfencing is in progress +- Resolves: rhbz1300604 +- Resolves: rhbz1363907 +- Resolves: rhbz1784601 +- Resolves: rhbz1787751 +- Resolves: rhbz1790591 +- Resolves: rhbz1793653 +- Resolves: rhbz1793860 +- Resolves: rhbz1828488 +- Resolves: rhbz1830535 +- Resolves: rhbz1831775 + * Mon Jan 27 2020 Ken Gaillot - 2.0.3-5 - Clear leaving node's attributes if there is no DC - Resolves: rhbz1791841