From 6c49ac42f68b09d9bd30df2e7d2d49ac79bd73d2 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Nov 09 2021 09:59:09 +0000 Subject: import pacemaker-2.1.0-8.el8 --- diff --git a/.gitignore b/.gitignore index f84347e..5b0aa26 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ SOURCES/nagios-agents-metadata-105ab8a.tar.gz -SOURCES/pacemaker-ba59be7.tar.gz +SOURCES/pacemaker-7c3f660.tar.gz diff --git a/.pacemaker.metadata b/.pacemaker.metadata index 073d982..de63f82 100644 --- a/.pacemaker.metadata +++ b/.pacemaker.metadata @@ -1,2 +1,2 @@ ea6c0a27fd0ae8ce02f84a11f08a0d79377041c3 SOURCES/nagios-agents-metadata-105ab8a.tar.gz -268769bcd0d6c2ea2d50db92aaea0f31637775d0 SOURCES/pacemaker-ba59be7.tar.gz +17aa11e179c3f9eacbacac5735d7f5b14a1ac010 SOURCES/pacemaker-7c3f660.tar.gz diff --git a/SOURCES/001-feature-set.patch b/SOURCES/001-feature-set.patch deleted file mode 100644 index cdacd27..0000000 --- a/SOURCES/001-feature-set.patch +++ /dev/null @@ -1,704 +0,0 @@ -From d81282f1ac5e1226fbf6cfa1bd239d317e106def Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 13 Oct 2020 10:08:21 +0200 -Subject: [PATCH 1/3] Feature: crmadmin: implement formatted output - ---- - include/crm/crm.h | 2 +- - tools/crmadmin.c | 312 ++++++++++++++++++++++++++++++++++++++++-------------- - 2 files changed, 232 insertions(+), 82 deletions(-) - -diff --git a/include/crm/crm.h b/include/crm/crm.h -index 389b6aa..4eca278 100644 ---- a/include/crm/crm.h -+++ b/include/crm/crm.h -@@ -51,7 +51,7 @@ extern "C" { - * >=3.0.13: Fail counts include operation name and interval - * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED - */ --# define CRM_FEATURE_SET "3.6.1" -+# define CRM_FEATURE_SET "3.6.2" - - # define EOS '\0' - # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) -diff --git a/tools/crmadmin.c b/tools/crmadmin.c -index 0b87d01..14078e6 100644 ---- a/tools/crmadmin.c -+++ b/tools/crmadmin.c -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -38,7 +39,6 @@ bool need_controld_api = true; - bool need_pacemakerd_api = false; - - bool do_work(pcmk_ipc_api_t *api); --void do_find_node_list(xmlNode *xml_node); - static char *ipc_name = NULL; - - gboolean admin_message_timeout(gpointer data); -@@ -55,13 +55,12 @@ static enum { - - static gboolean BE_VERBOSE = FALSE; - static gboolean BASH_EXPORT = FALSE; --static gboolean BE_SILENT = FALSE; - static char *dest_node = NULL; - static crm_exit_t exit_code = CRM_EX_OK; -+pcmk__output_t *out = NULL; - - - struct { -- gboolean quiet; - gboolean health; - gint timeout; - } options; -@@ -168,6 +167,191 @@ command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError - return TRUE; - } - -+PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *") -+static int -+health_text(pcmk__output_t *out, va_list args) -+{ -+ char *sys_from = va_arg(args, char *); -+ char *host_from = va_arg(args, char *); -+ char *fsa_state = va_arg(args, char *); -+ char *result = va_arg(args, char *); -+ -+ if (!out->is_quiet(out)) { -+ out->info(out, "Status of %s@%s: %s (%s)", crm_str(sys_from), -+ crm_str(host_from), crm_str(fsa_state), crm_str(result)); -+ } else if (fsa_state != NULL) { -+ out->info(out, "%s", fsa_state); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *") -+static int -+health_xml(pcmk__output_t *out, va_list args) -+{ -+ char *sys_from = va_arg(args, char *); -+ char *host_from = va_arg(args, char *); -+ char *fsa_state = va_arg(args, char *); -+ char *result = va_arg(args, char *); -+ -+ xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -+ xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(host_from)); -+ xmlSetProp(node, (pcmkXmlStr) "state", (pcmkXmlStr) crm_str(fsa_state)); -+ xmlSetProp(node, (pcmkXmlStr) "result", (pcmkXmlStr) crm_str(result)); -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *") -+static int -+pacemakerd_health_text(pcmk__output_t *out, va_list args) -+{ -+ char *sys_from = va_arg(args, char *); -+ char *state = va_arg(args, char *); -+ char *last_updated = va_arg(args, char *); -+ -+ if (!out->is_quiet(out)) { -+ out->info(out, "Status of %s: '%s' %s %s", crm_str(sys_from), -+ crm_str(state), (!pcmk__str_empty(last_updated))? -+ "last updated":"", crm_str(last_updated)); -+ } else { -+ out->info(out, "%s", crm_str(state)); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *") -+static int -+pacemakerd_health_xml(pcmk__output_t *out, va_list args) -+{ -+ char *sys_from = va_arg(args, char *); -+ char *state = va_arg(args, char *); -+ char *last_updated = va_arg(args, char *); -+ -+ -+ xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -+ xmlSetProp(node, (pcmkXmlStr) "state", (pcmkXmlStr) crm_str(state)); -+ xmlSetProp(node, (pcmkXmlStr) "last_updated", (pcmkXmlStr) crm_str(last_updated)); -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("dc", "char *") -+static int -+dc_text(pcmk__output_t *out, va_list args) -+{ -+ char *dc = va_arg(args, char *); -+ -+ if (!out->is_quiet(out)) { -+ out->info(out, "Designated Controller is: %s", crm_str(dc)); -+ } else if (dc != NULL) { -+ out->info(out, "%s", dc); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("dc", "char *") -+static int -+dc_xml(pcmk__output_t *out, va_list args) -+{ -+ char *dc = va_arg(args, char *); -+ -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "dc"); -+ xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(dc)); -+ -+ return pcmk_rc_ok; -+} -+ -+ -+PCMK__OUTPUT_ARGS("crmadmin-node-list", "xmlNode *") -+static int -+crmadmin_node_list(pcmk__output_t *out, va_list args) -+{ -+ xmlNode *xml_node = va_arg(args, xmlNode *); -+ int found = 0; -+ xmlNode *node = NULL; -+ xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -+ -+ out->begin_list(out, NULL, NULL, "Nodes"); -+ -+ for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -+ node = crm_next_same_xml(node)) { -+ const char *node_type = BASH_EXPORT ? NULL : -+ crm_element_value(node, XML_ATTR_TYPE); -+ out->message(out, "crmadmin-node", node_type, -+ crm_str(crm_element_value(node, XML_ATTR_UNAME)), -+ crm_str(crm_element_value(node, XML_ATTR_ID))); -+ -+ found++; -+ } -+ // @TODO List Pacemaker Remote nodes that don't have a entry -+ -+ out->end_list(out); -+ -+ if (found == 0) { -+ out->info(out, "No nodes configured"); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *") -+static int -+crmadmin_node_text(pcmk__output_t *out, va_list args) -+{ -+ char *type = va_arg(args, char *); -+ char *name = va_arg(args, char *); -+ char *id = va_arg(args, char *); -+ -+ if (BASH_EXPORT) { -+ out->info(out, "export %s=%s", crm_str(name), crm_str(id)); -+ } else { -+ out->info(out, "%s node: %s (%s)", type ? type : "member", -+ crm_str(name), crm_str(id)); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *") -+static int -+crmadmin_node_xml(pcmk__output_t *out, va_list args) -+{ -+ char *type = va_arg(args, char *); -+ char *name = va_arg(args, char *); -+ char *id = va_arg(args, char *); -+ -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "crmadmin-node"); -+ xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) (type ? type : "member")); -+ xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) crm_str(name)); -+ xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) crm_str(id)); -+ -+ return pcmk_rc_ok; -+} -+ -+static pcmk__message_entry_t fmt_functions[] = { -+ {"health", "default", health_text }, -+ {"health", "xml", health_xml }, -+ {"pacemakerd-health", "default", pacemakerd_health_text }, -+ {"pacemakerd-health", "xml", pacemakerd_health_xml }, -+ {"dc", "default", dc_text }, -+ {"dc", "xml", dc_xml }, -+ {"crmadmin-node-list", "default", crmadmin_node_list }, -+ {"crmadmin-node", "default", crmadmin_node_text }, -+ {"crmadmin-node", "xml", crmadmin_node_xml }, -+ -+ { NULL, NULL, NULL } -+}; -+ -+static pcmk__supported_format_t formats[] = { -+ PCMK__SUPPORTED_FORMAT_TEXT, -+ PCMK__SUPPORTED_FORMAT_XML, -+ { NULL, NULL, NULL } -+}; -+ - static void - quit_main_loop(crm_exit_t ec) - { -@@ -191,7 +375,7 @@ controller_event_cb(pcmk_ipc_api_t *controld_api, - switch (event_type) { - case pcmk_ipc_event_disconnect: - if (exit_code == CRM_EX_DISCONNECT) { // Unexpected -- fprintf(stderr, "error: Lost connection to controller\n"); -+ out->err(out, "error: Lost connection to controller"); - } - goto done; - break; -@@ -209,14 +393,14 @@ controller_event_cb(pcmk_ipc_api_t *controld_api, - } - - if (status != CRM_EX_OK) { -- fprintf(stderr, "error: Bad reply from controller: %s", -+ out->err(out, "error: Bad reply from controller: %s", - crm_exit_str(status)); - exit_code = status; - goto done; - } - - if (reply->reply_type != pcmk_controld_reply_ping) { -- fprintf(stderr, "error: Unknown reply type %d from controller\n", -+ out->err(out, "error: Unknown reply type %d from controller", - reply->reply_type); - goto done; - } -@@ -224,22 +408,16 @@ controller_event_cb(pcmk_ipc_api_t *controld_api, - // Parse desired information from reply - switch (command) { - case cmd_health: -- printf("Status of %s@%s: %s (%s)\n", -+ out->message(out, "health", - reply->data.ping.sys_from, - reply->host_from, - reply->data.ping.fsa_state, - reply->data.ping.result); -- if (BE_SILENT && (reply->data.ping.fsa_state != NULL)) { -- fprintf(stderr, "%s\n", reply->data.ping.fsa_state); -- } - exit_code = CRM_EX_OK; - break; - - case cmd_whois_dc: -- printf("Designated Controller is: %s\n", reply->host_from); -- if (BE_SILENT && (reply->host_from != NULL)) { -- fprintf(stderr, "%s\n", reply->host_from); -- } -+ out->message(out, "dc", reply->host_from); - exit_code = CRM_EX_OK; - break; - -@@ -263,7 +441,7 @@ pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, - switch (event_type) { - case pcmk_ipc_event_disconnect: - if (exit_code == CRM_EX_DISCONNECT) { // Unexpected -- fprintf(stderr, "error: Lost connection to pacemakerd\n"); -+ out->err(out, "error: Lost connection to pacemakerd"); - } - goto done; - break; -@@ -281,14 +459,14 @@ pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, - } - - if (status != CRM_EX_OK) { -- fprintf(stderr, "error: Bad reply from pacemakerd: %s", -+ out->err(out, "error: Bad reply from pacemakerd: %s", - crm_exit_str(status)); - exit_code = status; - goto done; - } - - if (reply->reply_type != pcmk_pacemakerd_reply_ping) { -- fprintf(stderr, "error: Unknown reply type %d from pacemakerd\n", -+ out->err(out, "error: Unknown reply type %d from pacemakerd", - reply->reply_type); - goto done; - } -@@ -305,21 +483,12 @@ pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, - crm_time_log_date | crm_time_log_timeofday | - crm_time_log_with_timezone); - -- printf("Status of %s: '%s' %s %s\n", -+ out->message(out, "pacemakerd-health", - reply->data.ping.sys_from, - (reply->data.ping.status == pcmk_rc_ok)? - pcmk_pacemakerd_api_daemon_state_enum2text( - reply->data.ping.state):"query failed", -- (reply->data.ping.status == pcmk_rc_ok)?"last updated":"", - (reply->data.ping.status == pcmk_rc_ok)?pinged_buf:""); -- if (BE_SILENT && -- (reply->data.ping.state != pcmk_pacemakerd_state_invalid)) { -- fprintf(stderr, "%s\n", -- (reply->data.ping.status == pcmk_rc_ok)? -- pcmk_pacemakerd_api_daemon_state_enum2text( -- reply->data.ping.state): -- "query failed"); -- } - exit_code = CRM_EX_OK; - free(pinged_buf); - } -@@ -354,7 +523,7 @@ list_nodes() - rc = the_cib->cmds->query(the_cib, NULL, &output, - cib_scope_local | cib_sync_call); - if (rc == pcmk_ok) { -- do_find_node_list(output); -+ out->message(out, "crmadmin-node-list", output); - free_xml(output); - } - the_cib->cmds->signoff(the_cib); -@@ -362,20 +531,20 @@ list_nodes() - } - - static GOptionContext * --build_arg_context(pcmk__common_args_t *args) { -+build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { - GOptionContext *context = NULL; - - const char *description = "Report bugs to users@clusterlabs.org"; - - GOptionEntry extra_prog_entries[] = { -- { "quiet", 'q', 0, G_OPTION_ARG_NONE, &options.quiet, -+ { "quiet", 'q', 0, G_OPTION_ARG_NONE, &(args->quiet), - "Display only the essential query information", - NULL }, - - { NULL } - }; - -- context = pcmk__build_arg_context(args, NULL, NULL, NULL); -+ context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); - g_option_context_set_description(context, description); - - /* Add the -q option, which cannot be part of the globally supported options -@@ -402,9 +571,11 @@ main(int argc, char **argv) - - GError *error = NULL; - GOptionContext *context = NULL; -+ GOptionGroup *output_group = NULL; - gchar **processed_args = NULL; - -- context = build_arg_context(args); -+ context = build_arg_context(args, &output_group); -+ pcmk__register_formats(output_group, formats); - - crm_log_cli_init("crmadmin"); - -@@ -421,9 +592,21 @@ main(int argc, char **argv) - crm_bump_log_level(argc, argv); - } - -+ rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); -+ if (rc != pcmk_rc_ok) { -+ fprintf(stderr, "Error creating output format %s: %s\n", -+ args->output_ty, pcmk_rc_str(rc)); -+ exit_code = CRM_EX_ERROR; -+ goto done; -+ } -+ -+ out->quiet = args->quiet; -+ -+ pcmk__register_messages(out, fmt_functions); -+ - if (args->version) { -- /* FIXME: When crmadmin is converted to use formatted output, this can go. */ -- pcmk__cli_help('v', CRM_EX_USAGE); -+ out->version(out, false); -+ goto done; - } - - if (options.timeout) { -@@ -433,12 +616,8 @@ main(int argc, char **argv) - } - } - -- if (options.quiet) { -- BE_SILENT = TRUE; -- } -- - if (options.health) { -- fprintf(stderr, "Cluster-wide health option not supported\n"); -+ out->err(out, "Cluster-wide health option not supported"); - ++argerr; - } - -@@ -447,14 +626,14 @@ main(int argc, char **argv) - } - - if (command == cmd_none) { -- fprintf(stderr, "error: Must specify a command option\n\n"); -+ out->err(out, "error: Must specify a command option"); - ++argerr; - } - - if (argerr) { - char *help = g_option_context_get_help(context, TRUE, NULL); - -- fprintf(stderr, "%s", help); -+ out->err(out, "%s", help); - g_free(help); - exit_code = CRM_EX_USAGE; - goto done; -@@ -464,7 +643,7 @@ main(int argc, char **argv) - if (need_controld_api) { - rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); - if (controld_api == NULL) { -- fprintf(stderr, "error: Could not connect to controller: %s\n", -+ out->err(out, "error: Could not connect to controller: %s", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - goto done; -@@ -472,7 +651,7 @@ main(int argc, char **argv) - pcmk_register_ipc_callback(controld_api, controller_event_cb, NULL); - rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); - if (rc != pcmk_rc_ok) { -- fprintf(stderr, "error: Could not connect to controller: %s\n", -+ out->err(out, "error: Could not connect to controller: %s", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - goto done; -@@ -483,7 +662,7 @@ main(int argc, char **argv) - if (need_pacemakerd_api) { - rc = pcmk_new_ipc_api(&pacemakerd_api, pcmk_ipc_pacemakerd); - if (pacemakerd_api == NULL) { -- fprintf(stderr, "error: Could not connect to pacemakerd: %s\n", -+ out->err(out, "error: Could not connect to pacemakerd: %s", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - goto done; -@@ -491,7 +670,7 @@ main(int argc, char **argv) - pcmk_register_ipc_callback(pacemakerd_api, pacemakerd_event_cb, NULL); - rc = pcmk_connect_ipc(pacemakerd_api, pcmk_ipc_dispatch_main); - if (rc != pcmk_rc_ok) { -- fprintf(stderr, "error: Could not connect to pacemakerd: %s\n", -+ out->err(out, "error: Could not connect to pacemakerd: %s", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - goto done; -@@ -528,6 +707,10 @@ done: - g_strfreev(processed_args); - g_clear_error(&error); - pcmk__free_arg_context(context); -+ if (out != NULL) { -+ out->finish(out, exit_code, true, NULL); -+ pcmk__output_free(out); -+ } - return crm_exit(exit_code); - - } -@@ -567,7 +750,7 @@ do_work(pcmk_ipc_api_t *api) - break; - } - if (rc != pcmk_rc_ok) { -- fprintf(stderr, "error: Command failed: %s", pcmk_rc_str(rc)); -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - } - return need_reply; -@@ -576,43 +759,10 @@ do_work(pcmk_ipc_api_t *api) - gboolean - admin_message_timeout(gpointer data) - { -- fprintf(stderr, -- "error: No reply received from controller before timeout (%dms)\n", -+ out->err(out, -+ "error: No reply received from controller before timeout (%dms)", - message_timeout_ms); - message_timer_id = 0; - quit_main_loop(CRM_EX_TIMEOUT); - return FALSE; // Tells glib to remove source - } -- --void --do_find_node_list(xmlNode * xml_node) --{ -- int found = 0; -- xmlNode *node = NULL; -- xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -- -- for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -- node = crm_next_same_xml(node)) { -- -- if (BASH_EXPORT) { -- printf("export %s=%s\n", -- crm_element_value(node, XML_ATTR_UNAME), -- crm_element_value(node, XML_ATTR_ID)); -- } else { -- const char *node_type = crm_element_value(node, XML_ATTR_TYPE); -- -- if (node_type == NULL) { -- node_type = "member"; -- } -- printf("%s node: %s (%s)\n", node_type, -- crm_element_value(node, XML_ATTR_UNAME), -- crm_element_value(node, XML_ATTR_ID)); -- } -- found++; -- } -- // @TODO List Pacemaker Remote nodes that don't have a entry -- -- if (found == 0) { -- printf("No nodes configured\n"); -- } --} --- -1.8.3.1 - - -From 2b121066c8eead96e85dc3bf6ecc1c2674cbdf32 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 20 Oct 2020 16:19:03 +0200 -Subject: [PATCH 2/3] Refactor: crmadmin: use simple XML lists - ---- - tools/crmadmin.c | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/tools/crmadmin.c b/tools/crmadmin.c -index 14078e6..b80a31a 100644 ---- a/tools/crmadmin.c -+++ b/tools/crmadmin.c -@@ -275,7 +275,7 @@ crmadmin_node_list(pcmk__output_t *out, va_list args) - xmlNode *node = NULL; - xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); - -- out->begin_list(out, NULL, NULL, "Nodes"); -+ out->begin_list(out, NULL, NULL, "nodes"); - - for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; - node = crm_next_same_xml(node)) { -@@ -324,7 +324,7 @@ crmadmin_node_xml(pcmk__output_t *out, va_list args) - char *name = va_arg(args, char *); - char *id = va_arg(args, char *); - -- xmlNodePtr node = pcmk__output_create_xml_node(out, "crmadmin-node"); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "node"); - xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) (type ? type : "member")); - xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) crm_str(name)); - xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) crm_str(id)); -@@ -604,6 +604,10 @@ main(int argc, char **argv) - - pcmk__register_messages(out, fmt_functions); - -+ if (!pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname())) { -+ goto done; -+ } -+ - if (args->version) { - out->version(out, false); - goto done; --- -1.8.3.1 - - -From 9b53a7eda078db736be5212aeb77daf8117e2f17 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 20 Oct 2020 16:21:12 +0200 -Subject: [PATCH 3/3] Feature: xml: add schema for new crmadmin output - ---- - xml/Makefile.am | 2 +- - xml/api/crmadmin-2.4.rng | 68 ++++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 69 insertions(+), 1 deletion(-) - create mode 100644 xml/api/crmadmin-2.4.rng - -diff --git a/xml/Makefile.am b/xml/Makefile.am -index c045522..892c811 100644 ---- a/xml/Makefile.am -+++ b/xml/Makefile.am -@@ -45,7 +45,7 @@ version_pairs_last = $(wordlist \ - ) - - # Names of API schemas that form the choices for pacemaker-result content --API_request_base = command-output crm_mon stonith_admin version -+API_request_base = command-output crm_mon crmadmin stonith_admin version - - # Names of CIB schemas that form the choices for cib/configuration content - CIB_cfg_base = options nodes resources constraints fencing acls tags alerts -diff --git a/xml/api/crmadmin-2.4.rng b/xml/api/crmadmin-2.4.rng -new file mode 100644 -index 0000000..34c9ca4 ---- /dev/null -+++ b/xml/api/crmadmin-2.4.rng -@@ -0,0 +1,68 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ unknown -+ member -+ remote -+ ping -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - diff --git a/SOURCES/001-ping-agent.patch b/SOURCES/001-ping-agent.patch new file mode 100644 index 0000000..89fe41a --- /dev/null +++ b/SOURCES/001-ping-agent.patch @@ -0,0 +1,225 @@ +From c6ee0973522268ed7b3241cf0ec2e06398444114 Mon Sep 17 00:00:00 2001 +From: Grace Chin +Date: Tue, 4 May 2021 12:02:17 -0400 +Subject: [PATCH 1/4] Remove deprecated attrd_options + +--- + extra/resources/ping | 11 +++-------- + 1 file changed, 3 insertions(+), 8 deletions(-) + +diff --git a/extra/resources/ping b/extra/resources/ping +index 3cf8dfe..2e93f22 100755 +--- a/extra/resources/ping ++++ b/extra/resources/ping +@@ -178,7 +178,7 @@ ping_stop() { + + rm -f "${OCF_RESKEY_pidfile}" + +- attrd_updater -D -n "$OCF_RESKEY_name" -d "$OCF_RESKEY_dampen" $attrd_options ++ attrd_updater -D -n "$OCF_RESKEY_name" -d "$OCF_RESKEY_dampen" + + return $OCF_SUCCESS + } +@@ -302,9 +302,9 @@ ping_update() { + + score=$(expr $active \* $OCF_RESKEY_multiplier) + if [ "$__OCF_ACTION" = "start" ] ; then +- attrd_updater -n "$OCF_RESKEY_name" -B "$score" -d "$OCF_RESKEY_dampen" $attrd_options ++ attrd_updater -n "$OCF_RESKEY_name" -B "$score" -d "$OCF_RESKEY_dampen" + else +- attrd_updater -n "$OCF_RESKEY_name" -v "$score" -d "$OCF_RESKEY_dampen" $attrd_options ++ attrd_updater -n "$OCF_RESKEY_name" -v "$score" -d "$OCF_RESKEY_dampen" + fi + rc=$? + case $rc in +@@ -396,11 +396,6 @@ case "${OCF_RESKEY_debug}" in + ;; + esac + +-attrd_options='-q' +-if [ "${OCF_RESKEY_debug}" = "true" ]; then +- attrd_options='' +-fi +- + case "$__OCF_ACTION" in + meta-data) meta_data + exit $OCF_SUCCESS +-- +1.8.3.1 + + +From 6d6c4691cf0970059689856c354daf9e098b4451 Mon Sep 17 00:00:00 2001 +From: Grace Chin +Date: Tue, 4 May 2021 14:50:37 -0400 +Subject: [PATCH 2/4] Replace debug values, true and false, with 0 and 1 + +--- + extra/resources/ping | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/extra/resources/ping b/extra/resources/ping +index 2e93f22..fee019b 100755 +--- a/extra/resources/ping ++++ b/extra/resources/ping +@@ -24,7 +24,7 @@ + : ${OCF_RESKEY_dampen:="5s"} + : ${OCF_RESKEY_attempts:="3"} + : ${OCF_RESKEY_multiplier:="1"} +-: ${OCF_RESKEY_debug:="false"} ++: ${OCF_RESKEY_debug:="0"} + : ${OCF_RESKEY_failure_score:="0"} + : ${OCF_RESKEY_use_fping:="1"} + : ${OCF_RESKEY_host_list:=""} +@@ -152,7 +152,7 @@ END + + ping_conditional_log() { + level="$1"; shift +- if [ "${OCF_RESKEY_debug}" = "true" ]; then ++ if [ $OCF_RESKEY_debug -gt 0 ]; then + ocf_log "$level" "$*" + fi + } +@@ -388,8 +388,8 @@ fi + + # Check the debug option + case "${OCF_RESKEY_debug}" in +- true|True|TRUE|1) OCF_RESKEY_debug=true;; +- false|False|FALSE|0) OCF_RESKEY_debug=false;; ++ true|True|TRUE|1) OCF_RESKEY_debug=0;; ++ false|False|FALSE|0) OCF_RESKEY_debug=1;; + *) + ocf_log warn "Value for 'debug' is incorrect. Please specify 'true' or 'false' not: ${OCF_RESKEY_debug}" + OCF_RESKEY_debug=false +-- +1.8.3.1 + + +From a886a31056b6aca764c6911f5432af2c5ebf51df Mon Sep 17 00:00:00 2001 +From: Grace Chin +Date: Tue, 11 May 2021 11:04:50 -0400 +Subject: [PATCH 3/4] Add verbose debug mode which logs ping and fping output + when set + +--- + extra/resources/ping | 19 ++++++++++++++----- + 1 file changed, 14 insertions(+), 5 deletions(-) + +diff --git a/extra/resources/ping b/extra/resources/ping +index fee019b..cc796af 100755 +--- a/extra/resources/ping ++++ b/extra/resources/ping +@@ -249,10 +249,13 @@ fping_check() { + + case $rc in + 0) ++ if [ $OCF_RESKEY_debug -gt 1 ]; then ++ ping_conditional_log info "$output" ++ fi + ;; + 1) + for h in $(echo "$output" | grep "is unreachable" | awk '{print $1}'); do +- ping_conditional_log warn "$h is inactive" ++ ping_conditional_log warn "$h is inactive: $output" + done + ;; + *) +@@ -282,7 +285,12 @@ ping_check() { + p_out=$($p_exe $p_args $OCF_RESKEY_options $host 2>&1); rc=$? + + case $rc in +- 0) active=$(expr $active + 1);; ++ 0) ++ active=$(expr $active + 1) ++ if [ $OCF_RESKEY_debug -gt 1 ]; then ++ ping_conditional_log info "$p_out" ++ fi ++ ;; + 1) ping_conditional_log warn "$host is inactive: $p_out";; + *) ocf_log err "Unexpected result for '$p_exe $p_args $OCF_RESKEY_options $host' $rc: $p_out";; + esac +@@ -388,10 +396,11 @@ fi + + # Check the debug option + case "${OCF_RESKEY_debug}" in +- true|True|TRUE|1) OCF_RESKEY_debug=0;; +- false|False|FALSE|0) OCF_RESKEY_debug=1;; ++ true|True|TRUE|1) OCF_RESKEY_debug=1;; ++ false|False|FALSE|0) OCF_RESKEY_debug=0;; ++ verbose|Verbose|VERBOSE|2) OCF_RESKEY_debug=2;; + *) +- ocf_log warn "Value for 'debug' is incorrect. Please specify 'true' or 'false' not: ${OCF_RESKEY_debug}" ++ ocf_log warn "Value for 'debug' is incorrect. Please specify 'true', 'false', or 'verbose', not: ${OCF_RESKEY_debug}" + OCF_RESKEY_debug=false + ;; + esac +-- +1.8.3.1 + + +From 460043f133ced80e923b1290af70502a72deb7f8 Mon Sep 17 00:00:00 2001 +From: Grace Chin +Date: Tue, 11 May 2021 11:07:05 -0400 +Subject: [PATCH 4/4] Improve variable names + +--- + extra/resources/ping | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/extra/resources/ping b/extra/resources/ping +index cc796af..9763b60 100755 +--- a/extra/resources/ping ++++ b/extra/resources/ping +@@ -244,22 +244,22 @@ fping_check() { + timeout=$(expr $OCF_RESKEY_timeout \* 1000 / $OCF_RESKEY_attempts) + + cmd="$p_exe -r $OCF_RESKEY_attempts -t $timeout -B 1.0 $OCF_RESKEY_options $OCF_RESKEY_host_list" +- output=$($cmd 2>&1); rc=$? +- active=$(echo "$output" | grep "is alive" | wc -l) ++ fping_output=$($cmd 2>&1); rc=$? ++ active=$(echo "$fping_output" | grep "is alive" | wc -l) + + case $rc in + 0) + if [ $OCF_RESKEY_debug -gt 1 ]; then +- ping_conditional_log info "$output" ++ ping_conditional_log info "$fping_output" + fi + ;; + 1) +- for h in $(echo "$output" | grep "is unreachable" | awk '{print $1}'); do +- ping_conditional_log warn "$h is inactive: $output" ++ for h in $(echo "$fping_output" | grep "is unreachable" | awk '{print $1}'); do ++ ping_conditional_log warn "$h is inactive: $fping_output" + done + ;; + *) +- ocf_log err "Unexpected result for '$cmd' $rc: $(echo "$output" | tr '\n' ';')" ++ ocf_log err "Unexpected result for '$cmd' $rc: $(echo "$fping_output" | tr '\n' ';')" + ;; + esac + +@@ -282,17 +282,17 @@ ping_check() { + *:*) p_exe=ping6 + esac + +- p_out=$($p_exe $p_args $OCF_RESKEY_options $host 2>&1); rc=$? ++ ping_output=$($p_exe $p_args $OCF_RESKEY_options $host 2>&1); rc=$? + + case $rc in + 0) + active=$(expr $active + 1) + if [ $OCF_RESKEY_debug -gt 1 ]; then +- ping_conditional_log info "$p_out" ++ ping_conditional_log info "$ping_output" + fi + ;; +- 1) ping_conditional_log warn "$host is inactive: $p_out";; +- *) ocf_log err "Unexpected result for '$p_exe $p_args $OCF_RESKEY_options $host' $rc: $p_out";; ++ 1) ping_conditional_log warn "$host is inactive: $ping_output";; ++ *) ocf_log err "Unexpected result for '$p_exe $p_args $OCF_RESKEY_options $host' $rc: $ping_output";; + esac + done + return $active +-- +1.8.3.1 + diff --git a/SOURCES/002-feature-set.patch b/SOURCES/002-feature-set.patch deleted file mode 100644 index 199470c..0000000 --- a/SOURCES/002-feature-set.patch +++ /dev/null @@ -1,165 +0,0 @@ -From 12e59f337f838d647deb8f84850324f785e58824 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 20 Oct 2020 10:53:26 -0400 -Subject: [PATCH] Feature: libcrmcommon: Add a spacer formatted output message. - ---- - include/crm/common/output_internal.h | 10 +++++++++- - lib/common/output_html.c | 6 ++++++ - lib/common/output_log.c | 6 ++++++ - lib/common/output_text.c | 6 ++++++ - lib/common/output_xml.c | 6 ++++++ - tools/crm_mon_curses.c | 6 ++++++ - 6 files changed, 39 insertions(+), 1 deletion(-) - -diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h -index 2874259..e1bd295 100644 ---- a/include/crm/common/output_internal.h -+++ b/include/crm/common/output_internal.h -@@ -460,6 +460,14 @@ struct pcmk__output_s { - * \return true if output should be supressed, false otherwise. - */ - bool (*is_quiet) (pcmk__output_t *out); -+ -+ /*! -+ * \internal -+ * \brief Output a spacer. Not all formatter will do this. -+ * -+ * \param[in] out The output functions structure. -+ */ -+ void (*spacer) (pcmk__output_t *out); - }; - - /*! -@@ -745,7 +753,7 @@ G_GNUC_NULL_TERMINATED; - - #define PCMK__OUTPUT_SPACER_IF(out_obj, cond) \ - if (cond) { \ -- out_obj->info(out_obj, "%s", ""); \ -+ out->spacer(out); \ - } - - #define PCMK__OUTPUT_LIST_HEADER(out_obj, cond, retcode, title...) \ -diff --git a/lib/common/output_html.c b/lib/common/output_html.c -index 156887d..e354b5d 100644 ---- a/lib/common/output_html.c -+++ b/lib/common/output_html.c -@@ -368,6 +368,11 @@ html_is_quiet(pcmk__output_t *out) { - return false; - } - -+static void -+html_spacer(pcmk__output_t *out) { -+ pcmk__output_create_xml_node(out, "br"); -+} -+ - pcmk__output_t * - pcmk__mk_html_output(char **argv) { - pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t)); -@@ -399,6 +404,7 @@ pcmk__mk_html_output(char **argv) { - retval->end_list = html_end_list; - - retval->is_quiet = html_is_quiet; -+ retval->spacer = html_spacer; - - return retval; - } -diff --git a/lib/common/output_log.c b/lib/common/output_log.c -index fd13c89..6336fa2 100644 ---- a/lib/common/output_log.c -+++ b/lib/common/output_log.c -@@ -226,6 +226,11 @@ log_is_quiet(pcmk__output_t *out) { - return false; - } - -+static void -+log_spacer(pcmk__output_t *out) { -+ /* This function intentionally left blank */ -+} -+ - pcmk__output_t * - pcmk__mk_log_output(char **argv) { - pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t)); -@@ -256,6 +261,7 @@ pcmk__mk_log_output(char **argv) { - retval->end_list = log_end_list; - - retval->is_quiet = log_is_quiet; -+ retval->spacer = log_spacer; - - return retval; - } -diff --git a/lib/common/output_text.c b/lib/common/output_text.c -index 9b3c09a..3432505 100644 ---- a/lib/common/output_text.c -+++ b/lib/common/output_text.c -@@ -244,6 +244,11 @@ text_is_quiet(pcmk__output_t *out) { - return out->quiet; - } - -+static void -+text_spacer(pcmk__output_t *out) { -+ fprintf(out->dest, "\n"); -+} -+ - pcmk__output_t * - pcmk__mk_text_output(char **argv) { - pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t)); -@@ -275,6 +280,7 @@ pcmk__mk_text_output(char **argv) { - retval->end_list = text_end_list; - - retval->is_quiet = text_is_quiet; -+ retval->spacer = text_spacer; - - return retval; - } -diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c -index 9a08d20..1710fac 100644 ---- a/lib/common/output_xml.c -+++ b/lib/common/output_xml.c -@@ -376,6 +376,11 @@ xml_is_quiet(pcmk__output_t *out) { - return false; - } - -+static void -+xml_spacer(pcmk__output_t *out) { -+ /* This function intentionally left blank */ -+} -+ - pcmk__output_t * - pcmk__mk_xml_output(char **argv) { - pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t)); -@@ -407,6 +412,7 @@ pcmk__mk_xml_output(char **argv) { - retval->end_list = xml_end_list; - - retval->is_quiet = xml_is_quiet; -+ retval->spacer = xml_spacer; - - return retval; - } -diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c -index 2c092df..8a08578 100644 ---- a/tools/crm_mon_curses.c -+++ b/tools/crm_mon_curses.c -@@ -247,6 +247,11 @@ curses_is_quiet(pcmk__output_t *out) { - return out->quiet; - } - -+static void -+curses_spacer(pcmk__output_t *out) { -+ addch('\n'); -+} -+ - pcmk__output_t * - crm_mon_mk_curses_output(char **argv) { - pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t)); -@@ -278,6 +283,7 @@ crm_mon_mk_curses_output(char **argv) { - retval->end_list = curses_end_list; - - retval->is_quiet = curses_is_quiet; -+ retval->spacer = curses_spacer; - - return retval; - } --- -1.8.3.1 - diff --git a/SOURCES/002-pacemakerd-options.patch b/SOURCES/002-pacemakerd-options.patch new file mode 100644 index 0000000..56941ec --- /dev/null +++ b/SOURCES/002-pacemakerd-options.patch @@ -0,0 +1,451 @@ +From 0d40ebf10b1794ece2c5c9768ea7222d3834d3b3 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 13 May 2021 11:42:18 -0400 +Subject: [PATCH 1/4] Build: Use a different variable to find man page + includes. + +With other programs outside of the tools directory being converted to +use glib for command line handling, their includes are not going to be +in tools/. So we need to use a different autoconf variable to find +them. +--- + mk/common.mk | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/mk/common.mk b/mk/common.mk +index b247670..aa59feb 100644 +--- a/mk/common.mk ++++ b/mk/common.mk +@@ -1,5 +1,5 @@ + # +-# Copyright 2014-2020 the Pacemaker project contributors ++# Copyright 2014-2021 the Pacemaker project contributors + # + # The version control history for this file may have further details. + # +@@ -68,11 +68,11 @@ HELP2MAN_ARGS = -N --section 8 --name "Part of the Pacemaker cluster resource ma + # and all wrappers to C code. + %.8: % $(MAN8DEPS) + $(AM_V_at)chmod a+x $(abs_builddir)/$< +- $(AM_V_MAN)if [ -f $(top_srcdir)/tools/$@.inc ]; then \ ++ $(AM_V_MAN)if [ -f $(abs_srcdir)/$@.inc ]; then \ + PATH=$(abs_builddir):$$PATH $(HELP2MAN) $(HELP2MAN_ARGS) \ + -h --help-all \ + --no-discard-stderr \ +- -i $(top_srcdir)/tools/$@.inc $(abs_builddir)/$< \ ++ -i $(abs_srcdir)/$@.inc $(abs_builddir)/$< \ + | sed -f $(top_srcdir)/tools/fix-manpages > $@ ; \ + else \ + PATH=$(abs_builddir):$$PATH $(HELP2MAN) $(HELP2MAN_ARGS) \ +-- +1.8.3.1 + + +From c7ab1d901bcbbf0137277e783e072777ca2f82d9 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 13 May 2021 11:44:16 -0400 +Subject: [PATCH 2/4] Refactor: daemons: Remove the pid_file variable from + pacemakerd. + +It's never used anywhere. +--- + daemons/pacemakerd/pacemakerd.c | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c +index 8ec9708..03d688e 100644 +--- a/daemons/pacemakerd/pacemakerd.c ++++ b/daemons/pacemakerd/pacemakerd.c +@@ -27,8 +27,7 @@ + + static crm_trigger_t *shutdown_trigger = NULL; + static crm_trigger_t *startup_trigger = NULL; +-static const char *pid_file = PCMK_RUN_DIR "/pacemaker.pid"; + + /* state we report when asked via pacemakerd-api status-ping */ + static const char *pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_INIT; + static gboolean running_with_sbd = FALSE; /* local copy */ +@@ -224,7 +222,6 @@ main(int argc, char **argv) + /* Legacy */ + break; + case 'p': +- pid_file = optarg; + break; + case 's': + pcmk__set_env_option("node_start_state", "standby"); +-- +1.8.3.1 + + +From 98990eed9f6a5dbde7c8a5aa0783e93d5479295b Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 13 May 2021 13:14:38 -0400 +Subject: [PATCH 3/4] Refactor: daemons: Use glib for command line handling in + pacemakerd. + +--- + daemons/pacemakerd/Makefile.am | 2 + + daemons/pacemakerd/pacemakerd.8.inc | 5 + + daemons/pacemakerd/pacemakerd.c | 195 ++++++++++++++++++------------------ + 3 files changed, 102 insertions(+), 100 deletions(-) + create mode 100644 daemons/pacemakerd/pacemakerd.8.inc + +diff --git a/daemons/pacemakerd/Makefile.am b/daemons/pacemakerd/Makefile.am +index cc657f5..84517a3 100644 +--- a/daemons/pacemakerd/Makefile.am ++++ b/daemons/pacemakerd/Makefile.am +@@ -15,6 +15,8 @@ if BUILD_SYSTEMD + systemdsystemunit_DATA = pacemaker.service + endif + ++EXTRA_DIST = pacemakerd.8.inc ++ + ## SOURCES + + noinst_HEADERS = pacemakerd.h +diff --git a/daemons/pacemakerd/pacemakerd.8.inc b/daemons/pacemakerd/pacemakerd.8.inc +new file mode 100644 +index 0000000..902af4e +--- /dev/null ++++ b/daemons/pacemakerd/pacemakerd.8.inc +@@ -0,0 +1,5 @@ ++[synopsis] ++pacemakerd [options] ++ ++/subsidiary Pacemaker daemons/ ++.SH OPTIONS +diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c +index 03d688e..ce194bf 100644 +--- a/daemons/pacemakerd/pacemakerd.c ++++ b/daemons/pacemakerd/pacemakerd.c +@@ -23,12 +23,54 @@ + #include + #include + #include ++#include + #include + #include + + #include + #include + ++#define SUMMARY "pacemakerd - primary Pacemaker daemon that launches and monitors all subsidiary Pacemaker daemons" ++ ++struct { ++ gboolean features; ++ gboolean foreground; ++ gboolean shutdown; ++ gboolean standby; ++} options; ++ ++static gboolean ++pid_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { ++ return TRUE; ++} ++ ++static gboolean ++standby_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { ++ options.standby = TRUE; ++ pcmk__set_env_option("node_start_state", "standby"); ++ return TRUE; ++} ++ ++static GOptionEntry entries[] = { ++ { "features", 'F', 0, G_OPTION_ARG_NONE, &options.features, ++ "Display full version and list of features Pacemaker was built with", ++ NULL }, ++ { "foreground", 'f', 0, G_OPTION_ARG_NONE, &options.foreground, ++ "(Ignored) Pacemaker always runs in the foreground", ++ NULL }, ++ { "pid-file", 'p', 0, G_OPTION_ARG_CALLBACK, pid_cb, ++ "(Ignored) Daemon pid file location", ++ "FILE" }, ++ { "shutdown", 'S', 0, G_OPTION_ARG_NONE, &options.shutdown, ++ "Instruct Pacemaker to shutdown on this machine", ++ NULL }, ++ { "standby", 's', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, standby_cb, ++ "Start node in standby state", ++ NULL }, ++ ++ { NULL } ++}; ++ + static gboolean fatal_error = FALSE; + static GMainLoop *mainloop = NULL; + static bool global_keep_tracking = false; +@@ -642,49 +685,6 @@ pcmk_sigquit(int nsig) + .connection_destroyed = pcmk_ipc_destroy + }; + +-static pcmk__cli_option_t long_options[] = { +- // long option, argument type, storage, short option, description, flags +- { +- "help", no_argument, NULL, '?', +- "\tThis text", pcmk__option_default +- }, +- { +- "version", no_argument, NULL, '$', +- "\tVersion information", pcmk__option_default +- }, +- { +- "verbose", no_argument, NULL, 'V', +- "\tIncrease debug output", pcmk__option_default +- }, +- { +- "shutdown", no_argument, NULL, 'S', +- "\tInstruct Pacemaker to shutdown on this machine", pcmk__option_default +- }, +- { +- "features", no_argument, NULL, 'F', +- "\tDisplay full version and list of features Pacemaker was built with", +- pcmk__option_default +- }, +- { +- "-spacer-", no_argument, NULL, '-', +- "\nAdditional Options:", pcmk__option_default +- }, +- { +- "foreground", no_argument, NULL, 'f', +- "\t(Ignored) Pacemaker always runs in the foreground", +- pcmk__option_default +- }, +- { +- "pid-file", required_argument, NULL, 'p', +- "\t(Ignored) Daemon pid file location", pcmk__option_default +- }, +- { +- "standby", no_argument, NULL, 's', +- "\tStart node in standby state", pcmk__option_default +- }, +- { 0, 0, 0, 0 } +-}; +- + static void + mcp_chown(const char *path, uid_t uid, gid_t gid) + { +@@ -1168,83 +1211,66 @@ request_shutdown(crm_ipc_t *ipc) + return status; + } + ++static GOptionContext * ++build_arg_context(pcmk__common_args_t *args) { ++ GOptionContext *context = NULL; ++ ++ context = pcmk__build_arg_context(args, NULL, NULL, NULL); ++ pcmk__add_main_args(context, entries); ++ return context; ++} ++ + int + main(int argc, char **argv) + { +- int flag; +- int argerr = 0; ++ crm_exit_t exit_code = CRM_EX_OK; ++ ++ GError *error = NULL; ++ ++ pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); ++ gchar **processed_args = pcmk__cmdline_preproc(argv, "p"); ++ GOptionContext *context = build_arg_context(args); + +- int option_index = 0; + bool old_instance_connected = false; +- gboolean shutdown = FALSE; + + crm_ipc_t *old_instance = NULL; + qb_ipcs_service_t *ipcs = NULL; + + crm_log_preinit(NULL, argc, argv); +- pcmk__set_cli_options(NULL, "[options]", long_options, +- "primary Pacemaker daemon that launches and " +- "monitors all subsidiary Pacemaker daemons"); + mainloop_add_signal(SIGHUP, pcmk_ignore); + mainloop_add_signal(SIGQUIT, pcmk_sigquit); + +- while (1) { +- flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); +- if (flag == -1) +- break; +- +- switch (flag) { +- case 'V': +- crm_bump_log_level(argc, argv); +- break; +- case 'f': +- /* Legacy */ +- break; +- case 'p': +- break; +- case 's': +- pcmk__set_env_option("node_start_state", "standby"); +- break; +- case '$': +- case '?': +- pcmk__cli_help(flag, CRM_EX_OK); +- break; +- case 'S': +- shutdown = TRUE; +- break; +- case 'F': +- printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, +- CRM_FEATURE_SET, CRM_FEATURES); +- crm_exit(CRM_EX_OK); +- default: +- printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); +- ++argerr; +- break; +- } ++ if (!g_option_context_parse_strv(context, &processed_args, &error)) { ++ exit_code = CRM_EX_USAGE; ++ goto done; + } + +- if (optind < argc) { +- printf("non-option ARGV-elements: "); +- while (optind < argc) +- printf("%s ", argv[optind++]); +- printf("\n"); +- } +- if (argerr) { +- pcmk__cli_help('?', CRM_EX_USAGE); ++ if (options.features) { ++ printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, ++ CRM_FEATURE_SET, CRM_FEATURES); ++ exit_code = CRM_EX_OK; ++ goto done; + } + ++ if (args->version) { ++ g_strfreev(processed_args); ++ pcmk__free_arg_context(context); ++ /* FIXME: When pacemakerd is converted to use formatted output, this can go. */ ++ pcmk__cli_help('v', CRM_EX_USAGE); ++ } + + setenv("LC_ALL", "C", 1); + + pcmk__set_env_option("mcp", "true"); + ++ pcmk__cli_init_logging("pacemakerd", args->verbosity); + crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); + + crm_debug("Checking for existing Pacemaker instance"); + old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0); + old_instance_connected = crm_ipc_connect(old_instance); + +- if (shutdown) { ++ if (options.shutdown) { + if (old_instance_connected) { + crm_exit(request_shutdown(old_instance)); + } else { +@@ -1253,22 +1279,25 @@ main(int argc, char **argv) + "Pacemaker instance: %s", strerror(errno)); + crm_ipc_close(old_instance); + crm_ipc_destroy(old_instance); +- crm_exit(CRM_EX_DISCONNECT); ++ exit_code = CRM_EX_DISCONNECT; ++ goto done; + } + + } else if (old_instance_connected) { + crm_ipc_close(old_instance); + crm_ipc_destroy(old_instance); + crm_err("Aborting start-up because active Pacemaker instance found"); +- crm_exit(CRM_EX_FATAL); ++ exit_code = CRM_EX_FATAL; ++ goto done; + } + + crm_ipc_close(old_instance); + crm_ipc_destroy(old_instance); + + #ifdef SUPPORT_COROSYNC + if (mcp_read_config() == FALSE) { +- crm_exit(CRM_EX_UNAVAILABLE); ++ exit_code = CRM_EX_UNAVAILABLE; ++ goto done; + } + #endif + +@@ -1292,7 +1321,8 @@ main(int argc, char **argv) + #ifdef SUPPORT_COROSYNC + /* Allows us to block shutdown */ + if (!cluster_connect_cfg()) { +- crm_exit(CRM_EX_PROTOCOL); ++ exit_code = CRM_EX_PROTOCOL; ++ goto done; + } + #endif + +@@ -1307,9 +1337,11 @@ main(int argc, char **argv) + case pcmk_rc_ok: + break; + case pcmk_rc_ipc_unauthorized: +- crm_exit(CRM_EX_CANTCREAT); ++ exit_code = CRM_EX_CANTCREAT; ++ goto done; + default: +- crm_exit(CRM_EX_FATAL); ++ exit_code = CRM_EX_FATAL; ++ goto done; + }; + + mainloop_add_signal(SIGTERM, pcmk_shutdown); +@@ -1342,5 +1374,11 @@ main(int argc, char **argv) + #ifdef SUPPORT_COROSYNC + cluster_disconnect_cfg(); + #endif +- crm_exit(CRM_EX_OK); ++ ++done: ++ g_strfreev(processed_args); ++ pcmk__free_arg_context(context); ++ ++ pcmk__output_and_clear_error(error, NULL); ++ crm_exit(exit_code); + } +-- +1.8.3.1 + + +From 8f7924fbb2a012bedcad59335b7bebc5020b26e3 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 13 May 2021 13:27:13 -0400 +Subject: [PATCH 4/4] Low: pacemaker.service: Don't start pacemakerd with -f. + +This option is completely ignored by pacemakerd. +--- + daemons/pacemakerd/pacemaker.service.in | 2 +- + doc/sphinx/Clusters_from_Scratch/verification.rst | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/daemons/pacemakerd/pacemaker.service.in b/daemons/pacemakerd/pacemaker.service.in +index b128ddc..0363a22 100644 +--- a/daemons/pacemakerd/pacemaker.service.in ++++ b/daemons/pacemakerd/pacemaker.service.in +@@ -44,7 +44,7 @@ EnvironmentFile=-@CONFIGDIR@/pacemaker + EnvironmentFile=-@CONFIGDIR@/sbd + SuccessExitStatus=100 + +-ExecStart=@sbindir@/pacemakerd -f ++ExecStart=@sbindir@/pacemakerd + + # Systemd v227 and above can limit the number of processes spawned by a + # service. That is a bad idea for an HA cluster resource manager, so disable it +diff --git a/doc/sphinx/Clusters_from_Scratch/verification.rst b/doc/sphinx/Clusters_from_Scratch/verification.rst +index 9d647f8..b7fa20e 100644 +--- a/doc/sphinx/Clusters_from_Scratch/verification.rst ++++ b/doc/sphinx/Clusters_from_Scratch/verification.rst +@@ -103,7 +103,7 @@ the necessary processes are running: + 2 ? S 0:00 [kthreadd] + ...lots of processes... + 17121 ? SLsl 0:01 /usr/sbin/corosync -f +- 17133 ? Ss 0:00 /usr/sbin/pacemakerd -f ++ 17133 ? Ss 0:00 /usr/sbin/pacemakerd + 17134 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-based + 17135 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-fenced + 17136 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-execd +-- +1.8.3.1 + diff --git a/SOURCES/003-feature-set.patch b/SOURCES/003-feature-set.patch deleted file mode 100644 index 94180fa..0000000 --- a/SOURCES/003-feature-set.patch +++ /dev/null @@ -1,208 +0,0 @@ -From 0deb5145c336bc4b32766c6f7af259d643af9143 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 28 Oct 2020 13:56:09 -0400 -Subject: [PATCH 1/2] Fix: scheduler, tools: Update typing on maint-mode args. - ---- - lib/pengine/pe_output.c | 2 +- - tools/crm_mon_curses.c | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index 186be33..d0f96f4 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -686,7 +686,7 @@ pe__cluster_dc_xml(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long") -+PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int") - int - pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args) { - unsigned long long flags = va_arg(args, unsigned long long); -diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c -index 8a08578..9cf28dc 100644 ---- a/tools/crm_mon_curses.c -+++ b/tools/crm_mon_curses.c -@@ -365,7 +365,7 @@ stonith_event_console(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long") -+PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int") - static int - cluster_maint_mode_console(pcmk__output_t *out, va_list args) { - unsigned long long flags = va_arg(args, unsigned long long); --- -1.8.3.1 - - -From 7a61ae2384b0a1653b4a06926b4ec23099ccf292 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 28 Oct 2020 13:57:51 -0400 -Subject: [PATCH 2/2] Fix: tools: Update typing on formatted output args in - crmadmin. - -A lot of these are actually taking const char * as an argument, not -regular char *. ---- - tools/crmadmin.c | 62 ++++++++++++++++++++++++++++---------------------------- - 1 file changed, 31 insertions(+), 31 deletions(-) - -diff --git a/tools/crmadmin.c b/tools/crmadmin.c -index b80a31a..e61dbf4 100644 ---- a/tools/crmadmin.c -+++ b/tools/crmadmin.c -@@ -167,14 +167,14 @@ command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError - return TRUE; - } - --PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") - static int - health_text(pcmk__output_t *out, va_list args) - { -- char *sys_from = va_arg(args, char *); -- char *host_from = va_arg(args, char *); -- char *fsa_state = va_arg(args, char *); -- char *result = va_arg(args, char *); -+ const char *sys_from = va_arg(args, const char *); -+ const char *host_from = va_arg(args, const char *); -+ const char *fsa_state = va_arg(args, const char *); -+ const char *result = va_arg(args, const char *); - - if (!out->is_quiet(out)) { - out->info(out, "Status of %s@%s: %s (%s)", crm_str(sys_from), -@@ -186,14 +186,14 @@ health_text(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") - static int - health_xml(pcmk__output_t *out, va_list args) - { -- char *sys_from = va_arg(args, char *); -- char *host_from = va_arg(args, char *); -- char *fsa_state = va_arg(args, char *); -- char *result = va_arg(args, char *); -+ const char *sys_from = va_arg(args, const char *); -+ const char *host_from = va_arg(args, const char *); -+ const char *fsa_state = va_arg(args, const char *); -+ const char *result = va_arg(args, const char *); - - xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); - xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(host_from)); -@@ -203,13 +203,13 @@ health_xml(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *") - static int - pacemakerd_health_text(pcmk__output_t *out, va_list args) - { -- char *sys_from = va_arg(args, char *); -- char *state = va_arg(args, char *); -- char *last_updated = va_arg(args, char *); -+ const char *sys_from = va_arg(args, const char *); -+ const char *state = va_arg(args, const char *); -+ const char *last_updated = va_arg(args, const char *); - - if (!out->is_quiet(out)) { - out->info(out, "Status of %s: '%s' %s %s", crm_str(sys_from), -@@ -222,13 +222,13 @@ pacemakerd_health_text(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *") - static int - pacemakerd_health_xml(pcmk__output_t *out, va_list args) - { -- char *sys_from = va_arg(args, char *); -- char *state = va_arg(args, char *); -- char *last_updated = va_arg(args, char *); -+ const char *sys_from = va_arg(args, const char *); -+ const char *state = va_arg(args, const char *); -+ const char *last_updated = va_arg(args, const char *); - - - xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -@@ -238,11 +238,11 @@ pacemakerd_health_xml(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("dc", "char *") -+PCMK__OUTPUT_ARGS("dc", "const char *") - static int - dc_text(pcmk__output_t *out, va_list args) - { -- char *dc = va_arg(args, char *); -+ const char *dc = va_arg(args, const char *); - - if (!out->is_quiet(out)) { - out->info(out, "Designated Controller is: %s", crm_str(dc)); -@@ -253,11 +253,11 @@ dc_text(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("dc", "char *") -+PCMK__OUTPUT_ARGS("dc", "const char *") - static int - dc_xml(pcmk__output_t *out, va_list args) - { -- char *dc = va_arg(args, char *); -+ const char *dc = va_arg(args, const char *); - - xmlNodePtr node = pcmk__output_create_xml_node(out, "dc"); - xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(dc)); -@@ -266,7 +266,7 @@ dc_xml(pcmk__output_t *out, va_list args) - } - - --PCMK__OUTPUT_ARGS("crmadmin-node-list", "xmlNode *") -+PCMK__OUTPUT_ARGS("crmadmin-node-list", "struct xmlNode *") - static int - crmadmin_node_list(pcmk__output_t *out, va_list args) - { -@@ -298,13 +298,13 @@ crmadmin_node_list(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *") - static int - crmadmin_node_text(pcmk__output_t *out, va_list args) - { -- char *type = va_arg(args, char *); -- char *name = va_arg(args, char *); -- char *id = va_arg(args, char *); -+ const char *type = va_arg(args, const char *); -+ const char *name = va_arg(args, const char *); -+ const char *id = va_arg(args, const char *); - - if (BASH_EXPORT) { - out->info(out, "export %s=%s", crm_str(name), crm_str(id)); -@@ -316,13 +316,13 @@ crmadmin_node_text(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *") - static int - crmadmin_node_xml(pcmk__output_t *out, va_list args) - { -- char *type = va_arg(args, char *); -- char *name = va_arg(args, char *); -- char *id = va_arg(args, char *); -+ const char *type = va_arg(args, const char *); -+ const char *name = va_arg(args, const char *); -+ const char *id = va_arg(args, const char *); - - xmlNodePtr node = pcmk__output_create_xml_node(out, "node"); - xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) (type ? type : "member")); --- -1.8.3.1 - diff --git a/SOURCES/003-pacemakerd-output.patch b/SOURCES/003-pacemakerd-output.patch new file mode 100644 index 0000000..167e22b --- /dev/null +++ b/SOURCES/003-pacemakerd-output.patch @@ -0,0 +1,343 @@ +From 7c35387a9896cb968cf4087b5cbed94af44e1ea5 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 14 May 2021 12:03:46 -0400 +Subject: [PATCH 1/5] Feature: daemons: Convert pacemakerd to formatted output. + +The main purpose of this is to finish getting pacemakerd moved off the +existing command line handling code (pcmk__cli_help in particular) so +that code can eventually be deprecated or removed. pacemakerd itself +does fairly little printing. +--- + daemons/pacemakerd/pacemakerd.c | 58 ++++++++++++++++++++++++++++++----------- + 1 file changed, 43 insertions(+), 15 deletions(-) + +diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c +index ce194bf..bd59729 100644 +--- a/daemons/pacemakerd/pacemakerd.c ++++ b/daemons/pacemakerd/pacemakerd.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -37,6 +38,14 @@ struct { + gboolean standby; + } options; + ++static pcmk__output_t *out = NULL; ++ ++static pcmk__supported_format_t formats[] = { ++ PCMK__SUPPORTED_FORMAT_NONE, ++ PCMK__SUPPORTED_FORMAT_TEXT, ++ { NULL, NULL, NULL } ++}; ++ + static gboolean + pid_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { + return TRUE; +@@ -1167,10 +1176,10 @@ pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, + } + + static GOptionContext * +-build_arg_context(pcmk__common_args_t *args) { ++build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { + GOptionContext *context = NULL; + +- context = pcmk__build_arg_context(args, NULL, NULL, NULL); ++ context = pcmk__build_arg_context(args, "text", group, NULL); + pcmk__add_main_args(context, entries); + return context; + } +@@ -1182,9 +1191,11 @@ main(int argc, char **argv) + + GError *error = NULL; + ++ int rc = pcmk_rc_ok; ++ GOptionGroup *output_group = NULL; + pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); + gchar **processed_args = pcmk__cmdline_preproc(argv, "p"); +- GOptionContext *context = build_arg_context(args); ++ GOptionContext *context = build_arg_context(args, &output_group); + + bool old_instance_connected = false; + +@@ -1195,23 +1205,30 @@ main(int argc, char **argv) + mainloop_add_signal(SIGHUP, pcmk_ignore); + mainloop_add_signal(SIGQUIT, pcmk_sigquit); + ++ pcmk__register_formats(output_group, formats); + if (!g_option_context_parse_strv(context, &processed_args, &error)) { + exit_code = CRM_EX_USAGE; + goto done; + } + ++ rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); ++ if (rc != pcmk_rc_ok) { ++ exit_code = CRM_EX_ERROR; ++ g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s", ++ args->output_ty, pcmk_rc_str(rc)); ++ goto done; ++ } ++ + if (options.features) { +- printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, +- CRM_FEATURE_SET, CRM_FEATURES); ++ out->info(out, "Pacemaker %s (Build: %s)\n Supporting v%s: %s", PACEMAKER_VERSION, ++ BUILD_VERSION, CRM_FEATURE_SET, CRM_FEATURES); + exit_code = CRM_EX_OK; + goto done; + } + + if (args->version) { +- g_strfreev(processed_args); +- pcmk__free_arg_context(context); +- /* FIXME: When pacemakerd is converted to use formatted output, this can go. */ +- pcmk__cli_help('v', CRM_EX_USAGE); ++ out->version(out, false); ++ goto done; + } + + setenv("LC_ALL", "C", 1); +@@ -1248,6 +1265,13 @@ main(int argc, char **argv) + crm_ipc_close(old_instance); + crm_ipc_destroy(old_instance); + ++ /* Don't allow any accidental output after this point. */ ++ if (out != NULL) { ++ out->finish(out, exit_code, true, NULL); ++ pcmk__output_free(out); ++ out = NULL; ++ } ++ + #ifdef SUPPORT_COROSYNC + if (mcp_read_config() == FALSE) { + exit_code = CRM_EX_UNAVAILABLE; +@@ -1333,6 +1357,11 @@ done: + g_strfreev(processed_args); + pcmk__free_arg_context(context); + +- pcmk__output_and_clear_error(error, NULL); ++ pcmk__output_and_clear_error(error, out); ++ ++ if (out != NULL) { ++ out->finish(out, exit_code, true, NULL); ++ pcmk__output_free(out); ++ } + crm_exit(exit_code); + } +-- +1.8.3.1 + + +From 35e6da64381fcb092d81ce16835cc28670b077cb Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 17 May 2021 10:04:04 -0400 +Subject: [PATCH 2/5] Features: daemons: Output the pacemakerd feature list in + XML. + +--- + daemons/pacemakerd/pacemakerd.c | 45 ++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 42 insertions(+), 3 deletions(-) + +diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c +index bd59729..93cf743 100644 +--- a/daemons/pacemakerd/pacemakerd.c ++++ b/daemons/pacemakerd/pacemakerd.c +@@ -43,6 +43,42 @@ static pcmk__output_t *out = NULL; + static pcmk__supported_format_t formats[] = { + PCMK__SUPPORTED_FORMAT_NONE, + PCMK__SUPPORTED_FORMAT_TEXT, ++ PCMK__SUPPORTED_FORMAT_XML, ++ { NULL, NULL, NULL } ++}; ++ ++static int ++pacemakerd_features(pcmk__output_t *out, va_list args) { ++ out->info(out, "Pacemaker %s (Build: %s)\n Supporting v%s: %s", PACEMAKER_VERSION, ++ BUILD_VERSION, CRM_FEATURE_SET, CRM_FEATURES); ++ return pcmk_rc_ok; ++} ++ ++static int ++pacemakerd_features_xml(pcmk__output_t *out, va_list args) { ++ gchar **feature_list = g_strsplit(CRM_FEATURES, " ", 0); ++ ++ pcmk__output_xml_create_parent(out, "pacemakerd", ++ "version", PACEMAKER_VERSION, ++ "build", BUILD_VERSION, ++ "feature_set", CRM_FEATURE_SET, ++ NULL); ++ out->begin_list(out, NULL, NULL, "features"); ++ ++ for (char **s = feature_list; *s != NULL; s++) { ++ pcmk__output_create_xml_text_node(out, "feature", *s); ++ } ++ ++ out->end_list(out); ++ ++ g_strfreev(feature_list); ++ return pcmk_rc_ok; ++} ++ ++static pcmk__message_entry_t fmt_functions[] = { ++ { "features", "default", pacemakerd_features }, ++ { "features", "xml", pacemakerd_features_xml }, ++ + { NULL, NULL, NULL } + }; + +@@ -200,7 +236,7 @@ static GOptionContext * + build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { + GOptionContext *context = NULL; + +- context = pcmk__build_arg_context(args, "text", group, NULL); ++ context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); + pcmk__add_main_args(context, entries); + return context; + } +@@ -241,9 +277,12 @@ main(int argc, char **argv) + goto done; + } + ++ pcmk__force_args(context, &error, "%s --xml-simple-list", g_get_prgname()); ++ ++ pcmk__register_messages(out, fmt_functions); ++ + if (options.features) { +- out->info(out, "Pacemaker %s (Build: %s)\n Supporting v%s: %s", PACEMAKER_VERSION, +- BUILD_VERSION, CRM_FEATURE_SET, CRM_FEATURES); ++ out->message(out, "features"); + exit_code = CRM_EX_OK; + goto done; + } +-- +1.8.3.1 + + +From 5b7f5eb35b025b59805cf3c7c3dcb6a3cf4b71b3 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 17 May 2021 11:09:53 -0400 +Subject: [PATCH 3/5] Low: daemons: Conditionally enable logging in pacemakerd. + +If we're doing an interactive command-line call, use +pcmk__cli_init_logging. At the moment, all command line calls except +for --shutdown do their work before logging would even come up, so we +really only need to do this for --shutdown. + +If we're doing a daemon call, use crm_log_init. +--- + daemons/pacemakerd/pacemakerd.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c +index 93cf743..c20bde7 100644 +--- a/daemons/pacemakerd/pacemakerd.c ++++ b/daemons/pacemakerd/pacemakerd.c +@@ -296,8 +296,11 @@ main(int argc, char **argv) + + pcmk__set_env_option("mcp", "true"); + +- pcmk__cli_init_logging("pacemakerd", args->verbosity); +- crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); ++ if (options.shutdown) { ++ pcmk__cli_init_logging("pacemakerd", args->verbosity); ++ } else { ++ crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); ++ } + + crm_debug("Checking for existing Pacemaker instance"); + old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0); +-- +1.8.3.1 + + +From 2393362bb7489e86d937ed46a1c5cfb93d9bf3ab Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 17 May 2021 11:58:06 -0400 +Subject: [PATCH 4/5] Fix: include: Bump CRM_FEATURE_SET for new pacemakerd + args. + +--- + include/crm/crm.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/crm/crm.h b/include/crm/crm.h +index fdfc825..92a98fa 100644 +--- a/include/crm/crm.h ++++ b/include/crm/crm.h +@@ -66,7 +66,7 @@ extern "C" { + * >=3.0.13: Fail counts include operation name and interval + * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED + */ +-# define CRM_FEATURE_SET "3.10.0" ++# define CRM_FEATURE_SET "3.10.1" + + /* Pacemaker's CPG protocols use fixed-width binary fields for the sender and + * recipient of a CPG message. This imposes an arbitrary limit on cluster node +-- +1.8.3.1 + + +From 3ad8edbd91631b87ef5f53fa2d68f0c8bbb9ee2b Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 17 May 2021 11:57:09 -0400 +Subject: [PATCH 5/5] Feature: xml: Add schema for pacemakerd. + +--- + xml/Makefile.am | 1 + + xml/api/pacemakerd-2.10.rng | 28 ++++++++++++++++++++++++++++ + 2 files changed, 29 insertions(+) + create mode 100644 xml/api/pacemakerd-2.10.rng + +diff --git a/xml/Makefile.am b/xml/Makefile.am +index 12a51c5..b9448d4 100644 +--- a/xml/Makefile.am ++++ b/xml/Makefile.am +@@ -56,6 +56,7 @@ API_request_base = command-output \ + crm_simulate \ + crmadmin \ + digests \ ++ pacemakerd \ + stonith_admin \ + version + +diff --git a/xml/api/pacemakerd-2.10.rng b/xml/api/pacemakerd-2.10.rng +new file mode 100644 +index 0000000..41a11e7 +--- /dev/null ++++ b/xml/api/pacemakerd-2.10.rng +@@ -0,0 +1,28 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + diff --git a/SOURCES/004-check-level.patch b/SOURCES/004-check-level.patch new file mode 100644 index 0000000..f2abb5f --- /dev/null +++ b/SOURCES/004-check-level.patch @@ -0,0 +1,199 @@ +From 3905e7eac11298fc20efd567a773666f948edf61 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 3 May 2021 11:19:04 -0400 +Subject: [PATCH 1/2] Feature: tools: Add OCF_CHECK_LEVEL to crm_resource + environment. + +If --validate= or --force-check= are given with a level, pass that along +as OCF_CHECK_LEVEL. This argument is optional, and if no value is given +then the environment variable will not be set and whatever's the default +on the resource agent will be used. + +See: rhbz#1955792. +--- + tools/crm_resource.c | 29 +++++++++++++++++++++-------- + tools/crm_resource.h | 4 ++-- + tools/crm_resource_runtime.c | 13 ++++++++++--- + 3 files changed, 33 insertions(+), 13 deletions(-) + +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index 45db2b2..6ca96f8 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -100,6 +100,7 @@ struct { + int timeout_ms; // Parsed from --timeout value + char *agent_spec; // Standard and/or provider and/or agent + gchar *xml_file; // Value of (deprecated) --xml-file ++ int check_level; // Optional value of --validate or --force-check + + // Resource configuration specified via command-line arguments + gboolean cmdline_config; // Resource configuration was via arguments +@@ -113,6 +114,7 @@ struct { + GHashTable *override_params; // Resource parameter values that override config + } options = { + .attr_set_type = XML_TAG_ATTR_SETS, ++ .check_level = -1, + .cib_options = cib_sync_call, + .require_cib = TRUE, + .require_dataset = TRUE, +@@ -402,14 +404,15 @@ static GOptionEntry query_entries[] = { + }; + + static GOptionEntry command_entries[] = { +- { "validate", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, ++ { "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, + validate_or_force_cb, + "Validate resource configuration by calling agent's validate-all\n" + INDENT "action. The configuration may be specified either by giving an\n" + INDENT "existing resource name with -r, or by specifying --class,\n" + INDENT "--agent, and --provider arguments, along with any number of\n" +- INDENT "--option arguments.", +- NULL }, ++ INDENT "--option arguments. An optional LEVEL argument can be given\n" ++ INDENT "to control the level of checking performed.", ++ "LEVEL" }, + { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb, + "If resource has any past failures, clear its history and fail\n" + INDENT "count. Optionally filtered by --resource, --node, --operation\n" +@@ -546,11 +549,12 @@ static GOptionEntry advanced_entries[] = { + INDENT "the cluster believes the resource is a clone instance already\n" + INDENT "running on the local node.", + NULL }, +- { "force-check", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, ++ { "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, + validate_or_force_cb, + "(Advanced) Bypass the cluster and check the state of a resource on\n" +- INDENT "the local node", +- NULL }, ++ INDENT "the local node. An optional LEVEL argument can be given\n" ++ INDENT "to control the level of checking performed.", ++ "LEVEL" }, + + { NULL } + }; +@@ -910,6 +914,15 @@ validate_or_force_cb(const gchar *option_name, const gchar *optarg, + if (options.override_params == NULL) { + options.override_params = pcmk__strkey_table(free, free); + } ++ ++ if (optarg != NULL) { ++ if (pcmk__scan_min_int(optarg, &options.check_level, 0) != pcmk_rc_ok) { ++ g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, ++ "Invalid check level setting: %s", optarg); ++ return FALSE; ++ } ++ } ++ + return TRUE; + } + +@@ -1826,12 +1839,12 @@ main(int argc, char **argv) + options.v_class, options.v_provider, options.v_agent, + "validate-all", options.cmdline_params, + options.override_params, options.timeout_ms, +- args->verbosity, options.force); ++ args->verbosity, options.force, options.check_level); + } else { + exit_code = cli_resource_execute(rsc, options.rsc_id, + options.operation, options.override_params, + options.timeout_ms, cib_conn, data_set, +- args->verbosity, options.force); ++ args->verbosity, options.force, options.check_level); + } + goto done; + +diff --git a/tools/crm_resource.h b/tools/crm_resource.h +index 3560377..5ab10d6 100644 +--- a/tools/crm_resource.h ++++ b/tools/crm_resource.h +@@ -88,11 +88,11 @@ crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc + const char *rsc_type, const char *rsc_action, + GHashTable *params, GHashTable *override_hash, + int timeout_ms, int resource_verbose, +- gboolean force); ++ gboolean force, int check_level); + crm_exit_t cli_resource_execute(pe_resource_t *rsc, const char *requested_name, + const char *rsc_action, GHashTable *override_hash, + int timeout_ms, cib_t *cib, pe_working_set_t *data_set, +- int resource_verbose, gboolean force); ++ int resource_verbose, gboolean force, int check_level); + + int cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, + const char *attr_set, const char *attr_set_type, +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index fe0ec98..bde83b6 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -1679,7 +1679,8 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + const char *rsc_class, const char *rsc_prov, + const char *rsc_type, const char *action, + GHashTable *params, GHashTable *override_hash, +- int timeout_ms, int resource_verbose, gboolean force) ++ int timeout_ms, int resource_verbose, gboolean force, ++ int check_level) + { + GHashTable *params_copy = NULL; + crm_exit_t exit_code = CRM_EX_OK; +@@ -1703,6 +1704,12 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + /* add crm_feature_set env needed by some resource agents */ + g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); + ++ if (check_level >= 0) { ++ char *level = crm_strdup_printf("%d", check_level); ++ setenv("OCF_CHECK_LEVEL", level, 1); ++ free(level); ++ } ++ + /* resources_action_create frees the params hash table it's passed, but we + * may need to reuse it in a second call to resources_action_create. Thus + * we'll make a copy here so that gets freed and the original remains for +@@ -1790,7 +1797,7 @@ crm_exit_t + cli_resource_execute(pe_resource_t *rsc, const char *requested_name, + const char *rsc_action, GHashTable *override_hash, + int timeout_ms, cib_t * cib, pe_working_set_t *data_set, +- int resource_verbose, gboolean force) ++ int resource_verbose, gboolean force, int check_level) + { + pcmk__output_t *out = data_set->priv; + crm_exit_t exit_code = CRM_EX_OK; +@@ -1856,7 +1863,7 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, + + exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, action, + params, override_hash, timeout_ms, +- resource_verbose, force); ++ resource_verbose, force, check_level); + return exit_code; + } + +-- +1.8.3.1 + + +From d13ba4bd6defe0dd81fdf8ab39ae5b889513c0c0 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 20 May 2021 10:59:23 -0400 +Subject: [PATCH 2/2] Fix: include: Bump feature set to 3.10.2. + +This is for the OCF_CHECK_LEVEL environment variable. + +See: rhbz#1955792. +--- + include/crm/crm.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/crm/crm.h b/include/crm/crm.h +index 92a98fa..ee52c36 100644 +--- a/include/crm/crm.h ++++ b/include/crm/crm.h +@@ -66,7 +66,7 @@ extern "C" { + * >=3.0.13: Fail counts include operation name and interval + * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED + */ +-# define CRM_FEATURE_SET "3.10.1" ++# define CRM_FEATURE_SET "3.10.2" + + /* Pacemaker's CPG protocols use fixed-width binary fields for the sender and + * recipient of a CPG message. This imposes an arbitrary limit on cluster node +-- +1.8.3.1 + diff --git a/SOURCES/004-feature-set.patch b/SOURCES/004-feature-set.patch deleted file mode 100644 index 22d309d..0000000 --- a/SOURCES/004-feature-set.patch +++ /dev/null @@ -1,771 +0,0 @@ -From 9faa62f0701801f1d420462025e863d8ca3d6a06 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 21 Sep 2020 14:19:19 -0400 -Subject: [PATCH 1/6] Fix: libcrmcommon: Automatically lower case XML list - names. - ---- - lib/common/output_xml.c | 10 ++++------ - 1 file changed, 4 insertions(+), 6 deletions(-) - -diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c -index 1710fac..6a6ed6e 100644 ---- a/lib/common/output_xml.c -+++ b/lib/common/output_xml.c -@@ -50,7 +50,6 @@ typedef struct subst_s { - } subst_t; - - static subst_t substitutions[] = { -- { "Attributes", "attributes" }, - { "Active Resources", "resources" }, - { "Full List of Resources", "resources" }, - { "Inactive Resources", "resources" }, -@@ -61,8 +60,6 @@ static subst_t substitutions[] = { - { "Operations", "node_history" }, - { "Negative Location Constraints", "bans" }, - { "Node Attributes", "node_attributes" }, -- { "Resources", "resources" }, -- { "Tickets", "tickets" }, - - { NULL, NULL } - }; -@@ -288,7 +285,7 @@ static void - xml_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, - const char *format, ...) { - va_list ap; -- const char *name = NULL; -+ char *name = NULL; - char *buf = NULL; - int len; - -@@ -300,14 +297,14 @@ xml_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plura - if (substitute) { - for (subst_t *s = substitutions; s->from != NULL; s++) { - if (!strcmp(s->from, buf)) { -- name = s->to; -+ name = g_strdup(s->to); - break; - } - } - } - - if (name == NULL) { -- name = buf; -+ name = g_ascii_strdown(buf, -1); - } - - if (legacy_xml || simple_list) { -@@ -319,6 +316,7 @@ xml_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plura - xmlSetProp(list_node, (pcmkXmlStr) "name", (pcmkXmlStr) name); - } - -+ g_free(name); - free(buf); - } - --- -1.8.3.1 - - -From 7a77441ae8d3ab943dfafebfc06b63402be323e1 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 27 Oct 2020 12:49:45 -0400 -Subject: [PATCH 2/6] Feature: xml: Move resource-related XML schema into its - own file. - -This allows it to be shared between the crm_mon and crm_resource -schemas. Also, this adds support for resource XML to crm_mon given that -this is now technically possible as part of the library output. ---- - xml/Makefile.am | 2 +- - xml/api/crm_mon-2.4.rng | 311 ++++++++++++++++++++++++++++++++++++++++++++++ - xml/api/resources-2.4.rng | 109 ++++++++++++++++ - 3 files changed, 421 insertions(+), 1 deletion(-) - create mode 100644 xml/api/crm_mon-2.4.rng - create mode 100644 xml/api/resources-2.4.rng - -diff --git a/xml/Makefile.am b/xml/Makefile.am -index 892c811..79ce900 100644 ---- a/xml/Makefile.am -+++ b/xml/Makefile.am -@@ -51,7 +51,7 @@ API_request_base = command-output crm_mon crmadmin stonith_admin version - CIB_cfg_base = options nodes resources constraints fencing acls tags alerts - - # Names of all schemas (including top level and those included by others) --API_base = $(API_request_base) fence-event item status -+API_base = $(API_request_base) fence-event item resources status - CIB_base = cib $(CIB_cfg_base) status score rule nvset - - # Static schema files and transforms (only CIB has transforms) -diff --git a/xml/api/crm_mon-2.4.rng b/xml/api/crm_mon-2.4.rng -new file mode 100644 -index 0000000..88973a4 ---- /dev/null -+++ b/xml/api/crm_mon-2.4.rng -@@ -0,0 +1,311 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ unknown -+ member -+ remote -+ ping -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ granted -+ revoked -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/xml/api/resources-2.4.rng b/xml/api/resources-2.4.rng -new file mode 100644 -index 0000000..e279583 ---- /dev/null -+++ b/xml/api/resources-2.4.rng -@@ -0,0 +1,109 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ docker -+ rkt -+ podman -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - - -From 814eb921cd429692220f33722c9bc061266bd838 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 29 Oct 2020 14:45:02 -0400 -Subject: [PATCH 3/6] Feature: xml: Add a generic list XML schema file. - -Formatted output supports a generic list structure that isn't used in -too many places at the moment, but it could be getting used more in the -future. It's also really easy to have a schema for this piece of XML. ---- - xml/Makefile.am | 7 ++++++- - xml/api/generic-list-2.4.rng | 21 +++++++++++++++++++++ - 2 files changed, 27 insertions(+), 1 deletion(-) - create mode 100644 xml/api/generic-list-2.4.rng - -diff --git a/xml/Makefile.am b/xml/Makefile.am -index 79ce900..2f99f1c 100644 ---- a/xml/Makefile.am -+++ b/xml/Makefile.am -@@ -44,6 +44,11 @@ version_pairs_last = $(wordlist \ - ),$(1),$(2) \ - ) - -+# NOTE: All files in API_request_base, CIB_cfg_base, API_base, and CIB_base -+# need to start with a unique prefix. These variables all get iterated over -+# and globbed, and two files starting with the same prefix will cause -+# problems. -+ - # Names of API schemas that form the choices for pacemaker-result content - API_request_base = command-output crm_mon crmadmin stonith_admin version - -@@ -51,7 +56,7 @@ API_request_base = command-output crm_mon crmadmin stonith_admin version - CIB_cfg_base = options nodes resources constraints fencing acls tags alerts - - # Names of all schemas (including top level and those included by others) --API_base = $(API_request_base) fence-event item resources status -+API_base = $(API_request_base) fence-event generic-list item resources status - CIB_base = cib $(CIB_cfg_base) status score rule nvset - - # Static schema files and transforms (only CIB has transforms) -diff --git a/xml/api/generic-list-2.4.rng b/xml/api/generic-list-2.4.rng -new file mode 100644 -index 0000000..fee93a9 ---- /dev/null -+++ b/xml/api/generic-list-2.4.rng -@@ -0,0 +1,21 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - - -From 316b6f66ca5425093503c51c2f8738922287ebca Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 16 Sep 2020 15:59:34 -0400 -Subject: [PATCH 4/6] Fix: tools: Save the optarg parameter for - --list-ocf-alternatives. - -We need this so it can be added to the XML output of crm_resource. ---- - tools/crm_resource.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 6573ad5..acaddc0 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -720,9 +720,7 @@ list_alternatives_cb(const gchar *option_name, const gchar *optarg, - gpointer data, GError **error) - { - SET_COMMAND(cmd_list_alternatives); -- options.require_cib = FALSE; -- options.require_dataset = FALSE; -- options.require_resource = FALSE; -+ get_agent_spec(optarg); - return TRUE; - } - --- -1.8.3.1 - - -From c063ce9b193f2022611e651c13afcb3ceb5969e3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 2 Sep 2020 16:20:10 -0400 -Subject: [PATCH 5/6] Fix: scheduler: Use the default format handler in a few - more places. - ---- - lib/pengine/pe_output.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index d0f96f4..9d43e5f 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -1847,9 +1847,7 @@ static pcmk__message_entry_t fmt_functions[] = { - { "cluster-times", "log", pe__cluster_times_text }, - { "cluster-times", "text", pe__cluster_times_text }, - { "cluster-times", "xml", pe__cluster_times_xml }, -- { "failed-action", "html", pe__failed_action_text }, -- { "failed-action", "log", pe__failed_action_text }, -- { "failed-action", "text", pe__failed_action_text }, -+ { "failed-action", "default", pe__failed_action_text }, - { "failed-action", "xml", pe__failed_action_xml }, - { "group", "xml", pe__group_xml }, - { "group", "html", pe__group_html }, -@@ -1868,9 +1866,7 @@ static pcmk__message_entry_t fmt_functions[] = { - { "node-attribute", "log", pe__node_attribute_text }, - { "node-attribute", "text", pe__node_attribute_text }, - { "node-attribute", "xml", pe__node_attribute_xml }, -- { "op-history", "html", pe__op_history_text }, -- { "op-history", "log", pe__op_history_text }, -- { "op-history", "text", pe__op_history_text }, -+ { "op-history", "default", pe__op_history_text }, - { "op-history", "xml", pe__op_history_xml }, - { "primitive", "xml", pe__resource_xml }, - { "primitive", "html", pe__resource_html }, --- -1.8.3.1 - - -From a32b99f5fd09ec15dbba6785c5d8dc2e220417a3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 3 Sep 2020 10:23:16 -0400 -Subject: [PATCH 6/6] Refactor: scheduler: Expose native_output_string as - pcmk__native_output_string. - -The plan is that this function can be used to help build the string that -is output for each line of "crm_resource -o" output. It appears that -output only happens for primitive resources. However, I've added a -check at the beginning just in case it's called for some other type of -resource. ---- - include/crm/pengine/internal.h | 3 +++ - lib/pengine/native.c | 24 ++++++++++++++---------- - 2 files changed, 17 insertions(+), 10 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index abe7a76..d658e86 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -243,6 +243,9 @@ void clone_print(pe_resource_t * rsc, const char *pre_text, long options, void * - void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options, - void *print_data); - -+gchar * pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, -+ long options, const char *target_role, bool show_nodes); -+ - int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name - , size_t pairs_count, ...); - char *pe__node_display_name(pe_node_t *node, bool print_detail); -diff --git a/lib/pengine/native.c b/lib/pengine/native.c -index 8f0c5c9..bf1f5c0 100644 ---- a/lib/pengine/native.c -+++ b/lib/pengine/native.c -@@ -570,17 +570,21 @@ add_output_node(GString *s, const char *node, bool have_nodes) - * \return Newly allocated string description of resource - * \note Caller must free the result with g_free(). - */ --static gchar * --native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, -- long options, const char *target_role, bool show_nodes) -+gchar * -+pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, -+ long options, const char *target_role, bool show_nodes) - { - const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); - const char *provider = NULL; - const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); -- char *retval = NULL; -+ gchar *retval = NULL; - GString *outstr = NULL; - bool have_flags = false; - -+ if (rsc->variant != pe_native) { -+ return NULL; -+ } -+ - CRM_CHECK(name != NULL, name = "unknown"); - CRM_CHECK(kind != NULL, kind = "unknown"); - CRM_CHECK(class != NULL, class = "unknown"); -@@ -758,8 +762,8 @@ pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, - } - - { -- gchar *s = native_output_string(rsc, name, node, options, target_role, -- true); -+ gchar *s = pcmk__native_output_string(rsc, name, node, options, -+ target_role, true); - - list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL); - pcmk_create_html_node(list_node, "span", NULL, cl, s); -@@ -826,8 +830,8 @@ pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, - } - - { -- gchar *s = native_output_string(rsc, name, node, options, target_role, -- true); -+ gchar *s = pcmk__native_output_string(rsc, name, node, options, -+ target_role, true); - - out->list_item(out, NULL, "%s", s); - g_free(s); -@@ -923,8 +927,8 @@ common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_nod - } - - { -- gchar *resource_s = native_output_string(rsc, name, node, options, -- target_role, false); -+ gchar *resource_s = pcmk__native_output_string(rsc, name, node, options, -+ target_role, false); - status_print("%s%s", (pre_text? pre_text : ""), resource_s); - g_free(resource_s); - } --- -1.8.3.1 - diff --git a/SOURCES/005-crm_resource.patch b/SOURCES/005-crm_resource.patch new file mode 100644 index 0000000..1683026 --- /dev/null +++ b/SOURCES/005-crm_resource.patch @@ -0,0 +1,866 @@ +From a5a507d4e1abf242903472719a19977811e6f164 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 20 May 2021 11:59:36 -0400 +Subject: [PATCH 01/10] Feature: libcrmcommon: Add OCF_OUTPUT_FORMAT to + crm_resource environment. + +See: rhbz#1644628 +--- + lib/common/output.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/common/output.c b/lib/common/output.c +index 6cb49b5..58872e0 100644 +--- a/lib/common/output.c ++++ b/lib/common/output.c +@@ -71,6 +71,8 @@ pcmk__output_new(pcmk__output_t **out, const char *fmt_name, const char *filenam + return ENOMEM; + } + ++ setenv("OCF_OUTPUT_FORMAT", (*out)->fmt_name, 1); ++ + return pcmk_rc_ok; + } + +-- +1.8.3.1 + + +From acc6ecdbfb797d69794e68f75a734d6252434e01 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 21 May 2021 14:20:30 -0400 +Subject: [PATCH 02/10] Feature: schemas: Copy crm_resource schema in + preparation for changes. + +See: rhbz#1644628 +--- + xml/api/crm_resource-2.11.rng | 238 ++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 238 insertions(+) + create mode 100644 xml/api/crm_resource-2.11.rng + +diff --git a/xml/api/crm_resource-2.11.rng b/xml/api/crm_resource-2.11.rng +new file mode 100644 +index 0000000..8e386db +--- /dev/null ++++ b/xml/api/crm_resource-2.11.rng +@@ -0,0 +1,238 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ promoted ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ocf ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ true ++ false ++ ++ ++ ++ true ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ Stopped ++ Started ++ Master ++ Slave ++ ++ ++ +-- +1.8.3.1 + + +From 1bbdf2149a111e9e19c388834f82001e0d31c427 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 24 May 2021 12:23:55 -0400 +Subject: [PATCH 03/10] Feature: xml: Update the crm_resource schema for XML + output. + +See: rhbz#1644628 +--- + xml/api/crm_resource-2.11.rng | 50 +++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 50 insertions(+) + +diff --git a/xml/api/crm_resource-2.11.rng b/xml/api/crm_resource-2.11.rng +index 8e386db..aaa54d6 100644 +--- a/xml/api/crm_resource-2.11.rng ++++ b/xml/api/crm_resource-2.11.rng +@@ -20,6 +20,7 @@ + + + ++ + + + +@@ -227,6 +228,55 @@ + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + Stopped +-- +1.8.3.1 + + +From d89f5bc7fec856fdcd32fa14edbd0019507d5d15 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 1 Jun 2021 15:26:58 -0400 +Subject: [PATCH 04/10] Low: libcrmcommon: Increase PCMK__API_VERSION for new + crm_resource output. + +See: rhbz#1644628 +--- + include/crm/common/output_internal.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h +index 10b315b..0436cde 100644 +--- a/include/crm/common/output_internal.h ++++ b/include/crm/common/output_internal.h +@@ -27,7 +27,7 @@ extern "C" { + # include + # include + +-# define PCMK__API_VERSION "2.9" ++# define PCMK__API_VERSION "2.11" + + #if defined(PCMK__WITH_ATTRIBUTE_OUTPUT_ARGS) + # define PCMK__OUTPUT_ARGS(ARGS...) __attribute__((output_args(ARGS))) +-- +1.8.3.1 + + +From 30bd2ddf43ee2a911681e51f40ed9ba20ec250b0 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 27 May 2021 13:57:12 -0400 +Subject: [PATCH 05/10] Low: tools: Pass NULL to + cli_resource_execute_from_params... + +if no resource name is given. This happens if we are validating based +on the --class/--agent/--provider command line options instead. +--- + tools/crm_resource.c | 2 +- + tools/crm_resource_runtime.c | 8 ++++---- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index 24f1121..37a0bb0 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -1840,7 +1840,7 @@ main(int argc, char **argv) + + case cmd_execute_agent: + if (options.cmdline_config) { +- exit_code = cli_resource_execute_from_params(out, "test", ++ exit_code = cli_resource_execute_from_params(out, NULL, + options.v_class, options.v_provider, options.v_agent, + "validate-all", options.cmdline_params, + options.override_params, options.timeout_ms, +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index 48a4b40..ebf48bb 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -1717,14 +1717,14 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + */ + params_copy = pcmk__str_table_dup(params); + +- op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0, +- timeout_ms, params_copy, 0); ++ op = resources_action_create(rsc_name ? rsc_name : "test", rsc_class, rsc_prov, ++ rsc_type, action, 0, timeout_ms, params_copy, 0); + if (op == NULL) { + /* Re-run with stderr enabled so we can display a sane error message */ + crm_enable_stderr(TRUE); + params_copy = pcmk__str_table_dup(params); +- op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0, +- timeout_ms, params_copy, 0); ++ op = resources_action_create(rsc_name ? rsc_name : "test", rsc_class, rsc_prov, ++ rsc_type, action, 0, timeout_ms, params_copy, 0); + + /* Callers of cli_resource_execute expect that the params hash table will + * be freed. That function uses this one, so for that reason and for +-- +1.8.3.1 + + +From ee56efd53d14cfc4f902769540b72b3bb6096a73 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 24 May 2021 12:08:52 -0400 +Subject: [PATCH 06/10] Feature: tools: Add an agent-status message for + crm_resource. + +This moves what was previously only done in an out->info call to its own +output message, which means it will appear in XML output as well. Also, +note that if --class/--agent/--provider are given, the resource name +will be set to "test". In that case, do not display the resource name +in the output. + +This message will be used for --validate and the --force-* command line +options to crm_resource. + +See: rhbz#1644628 +--- + tools/crm_resource_print.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 53 insertions(+) + +diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c +index 9d82cf8..88d5878 100644 +--- a/tools/crm_resource_print.c ++++ b/tools/crm_resource_print.c +@@ -152,6 +152,57 @@ attribute_list_default(pcmk__output_t *out, va_list args) { + return pcmk_rc_ok; + } + ++PCMK__OUTPUT_ARGS("agent-status", "int", "const char *", "const char *", "const char *", ++ "const char *", "const char *", "int") ++static int ++agent_status_default(pcmk__output_t *out, va_list args) { ++ int status = va_arg(args, int); ++ const char *action = va_arg(args, const char *); ++ const char *name = va_arg(args, const char *); ++ const char *class = va_arg(args, const char *); ++ const char *provider = va_arg(args, const char *); ++ const char *type = va_arg(args, const char *); ++ int rc = va_arg(args, int); ++ ++ if (status == PCMK_LRM_OP_DONE) { ++ out->info(out, "Operation %s%s%s (%s%s%s:%s) returned: '%s' (%d)", ++ action, name ? " for " : "", name ? name : "", ++ class, provider ? ":" : "", provider ? provider : "", type, ++ services_ocf_exitcode_str(rc), rc); ++ } else { ++ out->err(out, "Operation %s%s%s (%s%s%s:%s) failed: '%s' (%d)", ++ action, name ? " for " : "", name ? name : "", ++ class, provider ? ":" : "", provider ? provider : "", type, ++ services_lrm_status_str(status), status); ++ } ++ ++ return pcmk_rc_ok; ++} ++ ++PCMK__OUTPUT_ARGS("agent-status", "int", "const char *", "const char *", "const char *", ++ "const char *", "const char *", "int") ++static int ++agent_status_xml(pcmk__output_t *out, va_list args) { ++ int status G_GNUC_UNUSED = va_arg(args, int); ++ const char *action G_GNUC_UNUSED = va_arg(args, const char *); ++ const char *name G_GNUC_UNUSED = va_arg(args, const char *); ++ const char *class G_GNUC_UNUSED = va_arg(args, const char *); ++ const char *provider G_GNUC_UNUSED = va_arg(args, const char *); ++ const char *type G_GNUC_UNUSED = va_arg(args, const char *); ++ int rc = va_arg(args, int); ++ ++ char *status_str = pcmk__itoa(rc); ++ ++ pcmk__output_create_xml_node(out, "agent-status", ++ "code", status_str, ++ "message", services_ocf_exitcode_str(rc), ++ NULL); ++ ++ free(status_str); ++ ++ return pcmk_rc_ok; ++} ++ + PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "char *", "GHashTable *") + static int + attribute_list_text(pcmk__output_t *out, va_list args) { +@@ -562,6 +613,8 @@ resource_names(pcmk__output_t *out, va_list args) { + } + + static pcmk__message_entry_t fmt_functions[] = { ++ { "agent-status", "default", agent_status_default }, ++ { "agent-status", "xml", agent_status_xml }, + { "attribute-list", "default", attribute_list_default }, + { "attribute-list", "text", attribute_list_text }, + { "property-list", "default", property_list_default }, +-- +1.8.3.1 + + +From 85cb6b6bff96b18c5174d11e4de4d49cbfb20bb7 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 1 Jun 2021 14:47:30 -0400 +Subject: [PATCH 07/10] Feature: tools: Add an overridden params output + message. + +This also replaces what was previously being done in an out->info call +with an output message. This means it shows up in XML output as well. +Also, note that if --class/--agent/--provider are given, the resource +name will be set to "test". In that case, do not display the resource +name in the output. + +See: rhbz#1644628 +--- + tools/crm_resource_print.c | 39 +++++++++++++++++++++++++++++++++++++++ + 1 file changed, 39 insertions(+) + +diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c +index 88d5878..119d83f 100644 +--- a/tools/crm_resource_print.c ++++ b/tools/crm_resource_print.c +@@ -224,6 +224,43 @@ attribute_list_text(pcmk__output_t *out, va_list args) { + return pcmk_rc_ok; + } + ++PCMK__OUTPUT_ARGS("override", "const char *", "const char *", "const char *") ++static int ++override_default(pcmk__output_t *out, va_list args) { ++ const char *rsc_name = va_arg(args, const char *); ++ const char *name = va_arg(args, const char *); ++ const char *value = va_arg(args, const char *); ++ ++ if (rsc_name == NULL) { ++ out->list_item(out, NULL, "Overriding the cluster configuration with '%s' = '%s'", ++ name, value); ++ } else { ++ out->list_item(out, NULL, "Overriding the cluster configuration for '%s' with '%s' = '%s'", ++ rsc_name, name, value); ++ } ++ ++ return pcmk_rc_ok; ++} ++ ++PCMK__OUTPUT_ARGS("override", "const char *", "const char *", "const char *") ++static int ++override_xml(pcmk__output_t *out, va_list args) { ++ const char *rsc_name = va_arg(args, const char *); ++ const char *name = va_arg(args, const char *); ++ const char *value = va_arg(args, const char *); ++ ++ xmlNodePtr node = pcmk__output_create_xml_node(out, "override", ++ "name", name, ++ "value", value, ++ NULL); ++ ++ if (rsc_name != NULL) { ++ crm_xml_add(node, "rsc", rsc_name); ++ } ++ ++ return pcmk_rc_ok; ++} ++ + PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "char *") + static int + property_list_default(pcmk__output_t *out, va_list args) { +@@ -617,6 +654,8 @@ static pcmk__message_entry_t fmt_functions[] = { + { "agent-status", "xml", agent_status_xml }, + { "attribute-list", "default", attribute_list_default }, + { "attribute-list", "text", attribute_list_text }, ++ { "override", "default", override_default }, ++ { "override", "xml", override_xml }, + { "property-list", "default", property_list_default }, + { "property-list", "text", property_list_text }, + { "resource-check-list", "default", resource_check_list_default }, +-- +1.8.3.1 + + +From e5e24592c7c3231c619fb5253e7925ffbc634a99 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 4 Jun 2021 10:24:51 -0400 +Subject: [PATCH 08/10] Low: tools: Use simple XML lists for resource actions + as well. + +See: rhbz#1644628 +--- + tools/crm_resource.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index 37a0bb0..e957011 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -1643,6 +1643,7 @@ main(int argc, char **argv) + * saves from having to write custom messages to build the lists around all these things + */ + switch (options.rsc_cmd) { ++ case cmd_execute_agent: + case cmd_list_resources: + case cmd_query_xml: + case cmd_query_raw_xml: +-- +1.8.3.1 + + +From 3e75174d0bc31b261adb1994214a5878b79da85b Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 4 Jun 2021 10:30:10 -0400 +Subject: [PATCH 09/10] Feature: tools: Add an output message for resource + actions. + +This wraps up the override and agent-status messages into a single +message, along with any stdout/stderr from the resource action. This +message should be called after taking the action. + +This also implements handling XML output from resource actions. Check +to see if the validate-all action returns XML. If so, output it as a +CDATA block under a "command" element. If not, treat it as plain text +and output it as stdout/stderr from a command. + +See: rhbz#1644628 +--- + tools/crm_resource_print.c | 122 +++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 122 insertions(+) + +diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c +index 119d83f..19a366d 100644 +--- a/tools/crm_resource_print.c ++++ b/tools/crm_resource_print.c +@@ -293,6 +293,126 @@ property_list_text(pcmk__output_t *out, va_list args) { + return pcmk_rc_ok; + } + ++PCMK__OUTPUT_ARGS("resource-agent-action", "int", "const char *", "const char *", ++ "const char *", "const char *", "const char *", "GHashTable *", ++ "int", "int", "char *", "char *") ++static int ++resource_agent_action_default(pcmk__output_t *out, va_list args) { ++ int verbose = va_arg(args, int); ++ ++ const char *class = va_arg(args, const char *); ++ const char *provider = va_arg(args, const char *); ++ const char *type = va_arg(args, const char *); ++ const char *rsc_name = va_arg(args, const char *); ++ const char *action = va_arg(args, const char *); ++ GHashTable *overrides = va_arg(args, GHashTable *); ++ int rc = va_arg(args, int); ++ int status = va_arg(args, int); ++ char *stdout_data = va_arg(args, char *); ++ char *stderr_data = va_arg(args, char *); ++ ++ if (overrides) { ++ GHashTableIter iter; ++ char *name = NULL; ++ char *value = NULL; ++ ++ out->begin_list(out, NULL, NULL, "overrides"); ++ ++ g_hash_table_iter_init(&iter, overrides); ++ while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { ++ out->message(out, "override", rsc_name, name, value); ++ } ++ ++ out->end_list(out); ++ } ++ ++ out->message(out, "agent-status", status, action, rsc_name, class, provider, ++ type, rc); ++ ++ /* hide output for validate-all if not in verbose */ ++ if (verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei)) { ++ return pcmk_rc_ok; ++ } ++ ++ if (stdout_data || stderr_data) { ++ xmlNodePtr doc = string2xml(stdout_data); ++ ++ if (doc != NULL) { ++ out->output_xml(out, "command", stdout_data); ++ xmlFreeNode(doc); ++ } else { ++ out->subprocess_output(out, rc, stdout_data, stderr_data); ++ } ++ } ++ ++ return pcmk_rc_ok; ++} ++ ++PCMK__OUTPUT_ARGS("resource-agent-action", "int", "const char *", "const char *", ++ "const char *", "const char *", "const char *", "GHashTable *", ++ "int", "int", "char *", "char *") ++static int ++resource_agent_action_xml(pcmk__output_t *out, va_list args) { ++ int verbose G_GNUC_UNUSED = va_arg(args, int); ++ ++ const char *class = va_arg(args, const char *); ++ const char *provider = va_arg(args, const char *); ++ const char *type = va_arg(args, const char *); ++ const char *rsc_name = va_arg(args, const char *); ++ const char *action = va_arg(args, const char *); ++ GHashTable *overrides = va_arg(args, GHashTable *); ++ int rc = va_arg(args, int); ++ int status = va_arg(args, int); ++ char *stdout_data = va_arg(args, char *); ++ char *stderr_data = va_arg(args, char *); ++ ++ xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource-agent-action", ++ "action", action, ++ "class", class, ++ "type", type, ++ NULL); ++ ++ if (rsc_name) { ++ crm_xml_add(node, "rsc", rsc_name); ++ } ++ ++ if (provider) { ++ crm_xml_add(node, "provider", provider); ++ } ++ ++ if (overrides) { ++ GHashTableIter iter; ++ char *name = NULL; ++ char *value = NULL; ++ ++ out->begin_list(out, NULL, NULL, "overrides"); ++ ++ g_hash_table_iter_init(&iter, overrides); ++ while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { ++ out->message(out, "override", rsc_name, name, value); ++ } ++ ++ out->end_list(out); ++ } ++ ++ out->message(out, "agent-status", status, action, rsc_name, class, provider, ++ type, rc); ++ ++ if (stdout_data || stderr_data) { ++ xmlNodePtr doc = string2xml(stdout_data); ++ ++ if (doc != NULL) { ++ out->output_xml(out, "command", stdout_data); ++ xmlFreeNode(doc); ++ } else { ++ out->subprocess_output(out, rc, stdout_data, stderr_data); ++ } ++ } ++ ++ pcmk__output_xml_pop_parent(out); ++ return pcmk_rc_ok; ++} ++ + PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *") + static int + resource_check_list_default(pcmk__output_t *out, va_list args) { +@@ -658,6 +778,8 @@ static pcmk__message_entry_t fmt_functions[] = { + { "override", "xml", override_xml }, + { "property-list", "default", property_list_default }, + { "property-list", "text", property_list_text }, ++ { "resource-agent-action", "default", resource_agent_action_default }, ++ { "resource-agent-action", "xml", resource_agent_action_xml }, + { "resource-check-list", "default", resource_check_list_default }, + { "resource-check-list", "xml", resource_check_list_xml }, + { "resource-search-list", "default", resource_search_list_default }, +-- +1.8.3.1 + + +From b50b2418e1e997b42f5370b4672a3f105d74634f Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 4 Jun 2021 10:40:16 -0400 +Subject: [PATCH 10/10] Feature: tools: Use the new resource-agent-action + message. + +See: rhbz#1644628 +--- + tools/crm_resource_runtime.c | 21 +++------------------ + 1 file changed, 3 insertions(+), 18 deletions(-) + +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index ebf48bb..755be9f 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -1765,28 +1765,13 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + if (services_action_sync(op)) { + exit_code = op->rc; + +- if (op->status == PCMK_LRM_OP_DONE) { +- out->info(out, "Operation %s for %s (%s:%s:%s) returned: '%s' (%d)", +- action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, +- services_ocf_exitcode_str(op->rc), op->rc); +- } else { +- out->err(out, "Operation %s for %s (%s:%s:%s) failed: '%s' (%d)", +- action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, +- services_lrm_status_str(op->status), op->status); +- } +- +- /* hide output for validate-all if not in verbose */ +- if (resource_verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei)) +- goto done; +- +- if (op->stdout_data || op->stderr_data) { +- out->subprocess_output(out, op->rc, op->stdout_data, op->stderr_data); +- } ++ out->message(out, "resource-agent-action", resource_verbose, rsc_class, ++ rsc_prov, rsc_type, rsc_name, action, override_hash, op->rc, ++ op->status, op->stdout_data, op->stderr_data); + } else { + exit_code = op->rc == 0 ? CRM_EX_ERROR : op->rc; + } + +-done: + services_action_free(op); + /* See comment above about why we free params here. */ + g_hash_table_destroy(params); +-- +1.8.3.1 + diff --git a/SOURCES/005-feature-set.patch b/SOURCES/005-feature-set.patch deleted file mode 100644 index 346f13e..0000000 --- a/SOURCES/005-feature-set.patch +++ /dev/null @@ -1,210 +0,0 @@ -From 477b7b679d58455dc38c2594b29a1ecfbe88e80c Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 2 Nov 2020 14:55:27 -0500 -Subject: [PATCH 1/2] Fix: libcrmcommon: Prevent a segfault in - pcmk__cmdline_preproc. - -The list of special single-character args is optional. The function -currently handles it being an empty string, but it should handle a NULL -as well. ---- - lib/common/cmdline.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/common/cmdline.c b/lib/common/cmdline.c -index d66ccc7..66f1976 100644 ---- a/lib/common/cmdline.c -+++ b/lib/common/cmdline.c -@@ -203,7 +203,7 @@ pcmk__cmdline_preproc(char **argv, const char *special) { - * glib does not. Grab both the argument and its value and - * separate them into a new argument. - */ -- if (strchr(special, *ch) != NULL) { -+ if (special != NULL && strchr(special, *ch) != NULL) { - /* The argument does not occur at the end of this string of - * arguments. Take everything through the end as its value. - */ --- -1.8.3.1 - - -From d1f4a975fa783045254521f415f1899b34ee96e3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 2 Nov 2020 16:06:29 -0500 -Subject: [PATCH 2/2] Test: libcrmcommon: Add unit tests for - pcmk__cmdline_preproc. - ---- - configure.ac | 1 + - lib/common/tests/Makefile.am | 2 +- - lib/common/tests/cmdline/Makefile.am | 29 ++++++ - .../tests/cmdline/pcmk__cmdline_preproc_test.c | 102 +++++++++++++++++++++ - 4 files changed, 133 insertions(+), 1 deletion(-) - create mode 100644 lib/common/tests/cmdline/Makefile.am - create mode 100644 lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c - -diff --git a/configure.ac b/configure.ac -index 7ed4a30..36e85a9 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -2006,6 +2006,7 @@ AC_CONFIG_FILES(Makefile \ - lib/pacemaker-cluster.pc \ - lib/common/Makefile \ - lib/common/tests/Makefile \ -+ lib/common/tests/cmdline/Makefile \ - lib/common/tests/flags/Makefile \ - lib/common/tests/operations/Makefile \ - lib/common/tests/strings/Makefile \ -diff --git a/lib/common/tests/Makefile.am b/lib/common/tests/Makefile.am -index 33c45cb..f3eaeec 100644 ---- a/lib/common/tests/Makefile.am -+++ b/lib/common/tests/Makefile.am -@@ -1 +1 @@ --SUBDIRS = flags operations strings utils -+SUBDIRS = cmdline flags operations strings utils -diff --git a/lib/common/tests/cmdline/Makefile.am b/lib/common/tests/cmdline/Makefile.am -new file mode 100644 -index 0000000..e69ef21 ---- /dev/null -+++ b/lib/common/tests/cmdline/Makefile.am -@@ -0,0 +1,29 @@ -+# -+# Copyright 2020 the Pacemaker project contributors -+# -+# The version control history for this file may have further details. -+# -+# This source code is licensed under the GNU General Public License version 2 -+# or later (GPLv2+) WITHOUT ANY WARRANTY. -+# -+AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include -+LDADD = $(top_builddir)/lib/common/libcrmcommon.la -+ -+include $(top_srcdir)/mk/glib-tap.mk -+ -+# Add each test program here. Each test should be written as a little standalone -+# program using the glib unit testing functions. See the documentation for more -+# information. -+# -+# https://developer.gnome.org/glib/unstable/glib-Testing.html -+# -+# Add "_test" to the end of all test program names to simplify .gitignore. -+test_programs = pcmk__cmdline_preproc_test -+ -+# If any extra data needs to be added to the source distribution, add it to the -+# following list. -+dist_test_data = -+ -+# If any extra data needs to be used by tests but should not be added to the -+# source distribution, add it to the following list. -+test_data = -diff --git a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c -new file mode 100644 -index 0000000..e13c983 ---- /dev/null -+++ b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c -@@ -0,0 +1,102 @@ -+/* -+ * Copyright 2020 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. -+ * -+ * This source code is licensed under the GNU Lesser General Public License -+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -+ */ -+ -+#include -+#include -+ -+#define LISTS_EQ(a, b) { \ -+ g_assert_cmpint(g_strv_length((gchar **) (a)), ==, g_strv_length((gchar **) (b))); \ -+ for (int i = 0; i < g_strv_length((a)); i++) { \ -+ g_assert_cmpstr((a)[i], ==, (b)[i]); \ -+ } \ -+} -+ -+static void -+empty_input(void) { -+ g_assert_cmpint(pcmk__cmdline_preproc(NULL, "") == NULL, ==, TRUE); -+} -+ -+static void -+no_specials(void) { -+ const char *argv[] = { "-a", "-b", "-c", "-d", NULL }; -+ const gchar *expected[] = { "-a", "-b", "-c", "-d", NULL }; -+ -+ gchar **processed = pcmk__cmdline_preproc((char **) argv, NULL); -+ LISTS_EQ(processed, expected); -+ g_strfreev(processed); -+ -+ processed = pcmk__cmdline_preproc((char **) argv, ""); -+ LISTS_EQ(processed, expected); -+ g_strfreev(processed); -+} -+ -+static void -+single_dash(void) { -+ const char *argv[] = { "-", NULL }; -+ const gchar *expected[] = { "-", NULL }; -+ -+ gchar **processed = pcmk__cmdline_preproc((char **) argv, NULL); -+ LISTS_EQ(processed, expected); -+ g_strfreev(processed); -+} -+ -+static void -+double_dash(void) { -+ const char *argv[] = { "-a", "--", "-bc", NULL }; -+ const gchar *expected[] = { "-a", "--", "-bc", NULL }; -+ -+ gchar **processed = pcmk__cmdline_preproc((char **) argv, NULL); -+ LISTS_EQ(processed, expected); -+ g_strfreev(processed); -+} -+ -+static void -+special_args(void) { -+ const char *argv[] = { "-aX", "-Fval", NULL }; -+ const gchar *expected[] = { "-a", "X", "-F", "val", NULL }; -+ -+ gchar **processed = pcmk__cmdline_preproc((char **) argv, "aF"); -+ LISTS_EQ(processed, expected); -+ g_strfreev(processed); -+} -+ -+static void -+special_arg_at_end(void) { -+ const char *argv[] = { "-a", NULL }; -+ const gchar *expected[] = { "-a", NULL }; -+ -+ gchar **processed = pcmk__cmdline_preproc((char **) argv, "a"); -+ LISTS_EQ(processed, expected); -+ g_strfreev(processed); -+} -+ -+static void -+long_arg(void) { -+ const char *argv[] = { "--blah=foo", NULL }; -+ const gchar *expected[] = { "--blah=foo", NULL }; -+ -+ gchar **processed = pcmk__cmdline_preproc((char **) argv, NULL); -+ LISTS_EQ(processed, expected); -+ g_strfreev(processed); -+} -+ -+int -+main(int argc, char **argv) -+{ -+ g_test_init(&argc, &argv, NULL); -+ -+ g_test_add_func("/common/cmdline/preproc/empty_input", empty_input); -+ g_test_add_func("/common/cmdline/preproc/no_specials", no_specials); -+ g_test_add_func("/common/cmdline/preproc/single_dash", single_dash); -+ g_test_add_func("/common/cmdline/preproc/double_dash", double_dash); -+ g_test_add_func("/common/cmdline/preproc/special_args", special_args); -+ g_test_add_func("/common/cmdline/preproc/special_arg_at_end", special_arg_at_end); -+ g_test_add_func("/common/cmdline/preproc/long_arg", long_arg); -+ return g_test_run(); -+} --- -1.8.3.1 - diff --git a/SOURCES/006-crm_simulate.patch b/SOURCES/006-crm_simulate.patch new file mode 100644 index 0000000..c8d4e3f --- /dev/null +++ b/SOURCES/006-crm_simulate.patch @@ -0,0 +1,896 @@ +From 97571e6ccc9b7fa339a7e27d9b0b9ab782ff3003 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 16 Jun 2021 13:54:10 -0400 +Subject: [PATCH 1/5] Low: schemas: Copy crm_mon.rng in preparation for + changes. + +--- + xml/api/crm_mon-2.12.rng | 243 +++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 243 insertions(+) + create mode 100644 xml/api/crm_mon-2.12.rng + +diff --git a/xml/api/crm_mon-2.12.rng b/xml/api/crm_mon-2.12.rng +new file mode 100644 +index 0000000..ffec923 +--- /dev/null ++++ b/xml/api/crm_mon-2.12.rng +@@ -0,0 +1,243 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ granted ++ revoked ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From da394983f106f974274ddd94675a04c85086010e Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 18 Jun 2021 15:06:34 -0400 +Subject: [PATCH 2/5] Refactor: Split node history out into its own XML schema. + +This allows for sharing it between crm_mon and crm_simulate. +--- + xml/Makefile.am | 2 +- + xml/api/crm_mon-2.12.rng | 64 +-------------------------------------- + xml/api/node-history-2.12.rng | 70 +++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 72 insertions(+), 64 deletions(-) + create mode 100644 xml/api/node-history-2.12.rng + +diff --git a/xml/Makefile.am b/xml/Makefile.am +index b9448d4..8e7b6d3 100644 +--- a/xml/Makefile.am ++++ b/xml/Makefile.am +@@ -64,7 +64,7 @@ API_request_base = command-output \ + CIB_cfg_base = options nodes resources constraints fencing acls tags alerts + + # Names of all schemas (including top level and those included by others) +-API_base = $(API_request_base) fence-event failure generic-list item node-attrs nodes resources status ++API_base = $(API_request_base) fence-event failure generic-list item node-attrs node-history nodes resources status + CIB_base = cib $(CIB_cfg_base) status score rule nvset + + # Static schema files and transforms (only CIB has transforms) +diff --git a/xml/api/crm_mon-2.12.rng b/xml/api/crm_mon-2.12.rng +index ffec923..be14412 100644 +--- a/xml/api/crm_mon-2.12.rng ++++ b/xml/api/crm_mon-2.12.rng +@@ -20,7 +20,7 @@ + + + +- ++ + + + +@@ -113,14 +113,6 @@ + + + +- +- +- +- +- +- +- +- + + + +@@ -156,60 +148,6 @@ + + + +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- + + + +diff --git a/xml/api/node-history-2.12.rng b/xml/api/node-history-2.12.rng +new file mode 100644 +index 0000000..9628000 +--- /dev/null ++++ b/xml/api/node-history-2.12.rng +@@ -0,0 +1,70 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From bf72b2615630eef7876e443d60b34d5a316de847 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Wed, 16 Jun 2021 14:09:31 -0400 +Subject: [PATCH 3/5] Low: schemas: Copy crm_simulate.rng in preparation for + changes. + +--- + xml/api/crm_simulate-2.12.rng | 335 ++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 335 insertions(+) + create mode 100644 xml/api/crm_simulate-2.12.rng + +diff --git a/xml/api/crm_simulate-2.12.rng b/xml/api/crm_simulate-2.12.rng +new file mode 100644 +index 0000000..9a7612d +--- /dev/null ++++ b/xml/api/crm_simulate-2.12.rng +@@ -0,0 +1,335 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From c46e07788788acf5669e3f89b9344190a91c7331 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 18 Jun 2021 15:10:19 -0400 +Subject: [PATCH 4/5] Feature: tools: Add the node-summary to crm_simulate + output. + +If --show-failcounts is given to crm_simulate, it should also display +the node-summary message. + +See: rhbz#1686426 +--- + tools/crm_simulate.c | 7 +++++-- + xml/api/crm_simulate-2.12.rng | 3 +++ + 2 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c +index b4aa9d1..2ea292c 100644 +--- a/tools/crm_simulate.c ++++ b/tools/crm_simulate.c +@@ -409,11 +409,14 @@ print_cluster_status(pe_working_set_t * data_set, unsigned int print_opts) + FALSE, FALSE, all, all, FALSE); + + if (options.show_attrs) { +- out->message(out, "node-attribute-list", data_set, +- 0, rc == pcmk_rc_ok, FALSE, FALSE, FALSE, all, all); ++ rc = out->message(out, "node-attribute-list", data_set, ++ 0, rc == pcmk_rc_ok, FALSE, FALSE, FALSE, all, all); + } + + if (options.show_failcounts) { ++ rc = out->message(out, "node-summary", data_set, all, all, ++ 0, print_opts, FALSE, FALSE, FALSE, FALSE, rc == pcmk_rc_ok); ++ + out->message(out, "failed-action-list", data_set, all, all, + rc == pcmk_rc_ok); + } +diff --git a/xml/api/crm_simulate-2.12.rng b/xml/api/crm_simulate-2.12.rng +index 9a7612d..f90bd36 100644 +--- a/xml/api/crm_simulate-2.12.rng ++++ b/xml/api/crm_simulate-2.12.rng +@@ -67,6 +67,9 @@ + + + ++ ++ ++ + + + +-- +1.8.3.1 + + +From bac50336e0264604716e5997b87ee7e65311b982 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Fri, 18 Jun 2021 15:21:52 -0400 +Subject: [PATCH 5/5] Low: libcrmcommon: Increase PCMK__API_VERSION for new + crm_resource output. + +See: rhbz#1686426 +--- + include/crm/common/output_internal.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h +index 0436cde..ba9c423 100644 +--- a/include/crm/common/output_internal.h ++++ b/include/crm/common/output_internal.h +@@ -27,7 +27,7 @@ extern "C" { + # include + # include + +-# define PCMK__API_VERSION "2.11" ++# define PCMK__API_VERSION "2.12" + + #if defined(PCMK__WITH_ATTRIBUTE_OUTPUT_ARGS) + # define PCMK__OUTPUT_ARGS(ARGS...) __attribute__((output_args(ARGS))) +-- +1.8.3.1 + diff --git a/SOURCES/006-digests.patch b/SOURCES/006-digests.patch deleted file mode 100644 index e93b7e1..0000000 --- a/SOURCES/006-digests.patch +++ /dev/null @@ -1,7903 +0,0 @@ -From 6d0b9b102383ada3fb8a8d50ae0dae9dca6cde9f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 15 Sep 2020 17:18:07 -0500 -Subject: [PATCH 01/11] Refactor: controller: simplify default handling for - private agent parameters - -This is an efficiency gain since the setting of default private parameters only -has to be done once when meta-data is read rather than every time an action -result is recorded, but mainly this is to make the code simpler and easier to -follow. ---- - daemons/controld/controld_execd.c | 31 +------------------------------ - daemons/controld/controld_metadata.c | 19 ++++++++++++++++++- - daemons/controld/controld_metadata.h | 1 - - 3 files changed, 19 insertions(+), 32 deletions(-) - -diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c -index f4dc414..0122e2b 100644 ---- a/daemons/controld/controld_execd.c -+++ b/daemons/controld/controld_execd.c -@@ -498,39 +498,10 @@ build_parameter_list(const lrmd_event_data_t *op, - { - char *list = NULL; - size_t len = 0; -- size_t max = 0; -- -- /* Newer resource agents support the "private" parameter attribute to -- * indicate sensitive parameters. For backward compatibility with older -- * agents, this list is used if the agent doesn't specify any as "private". -- */ -- const char *secure_terms[] = { -- "password", -- "passwd", -- "user", -- }; -- -- if (!pcmk_is_set(metadata->ra_flags, ra_uses_private) -- && (param_type == ra_param_private)) { -- -- max = DIMOF(secure_terms); -- } - - for (GList *iter = metadata->ra_params; iter != NULL; iter = iter->next) { - struct ra_param_s *param = (struct ra_param_s *) iter->data; -- bool accept = FALSE; -- -- if (pcmk_is_set(param->rap_flags, param_type)) { -- accept = TRUE; -- -- } else if (max) { -- for (int lpc = 0; lpc < max; lpc++) { -- if (pcmk__str_eq(secure_terms[lpc], param->rap_name, pcmk__str_casei)) { -- accept = TRUE; -- break; -- } -- } -- } -+ bool accept = pcmk_is_set(param->rap_flags, param_type); - - if (accept) { - crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type)); -diff --git a/daemons/controld/controld_metadata.c b/daemons/controld/controld_metadata.c -index da9da60..ef6281e 100644 ---- a/daemons/controld/controld_metadata.c -+++ b/daemons/controld/controld_metadata.c -@@ -182,6 +182,7 @@ metadata_cache_update(GHashTable *mdc, lrmd_rsc_info_t *rsc, - xmlNode *metadata = NULL; - xmlNode *match = NULL; - struct ra_metadata_s *md = NULL; -+ bool any_private_params = false; - - CRM_CHECK(mdc && rsc && metadata_str, return NULL); - -@@ -238,12 +239,28 @@ metadata_cache_update(GHashTable *mdc, lrmd_rsc_info_t *rsc, - goto err; - } - if (pcmk_is_set(p->rap_flags, ra_param_private)) { -- controld_set_ra_flags(md, key, ra_uses_private); -+ any_private_params = true; - } - md->ra_params = g_list_prepend(md->ra_params, p); - } - } - -+ /* Newer resource agents support the "private" parameter attribute to -+ * indicate sensitive parameters. For backward compatibility with older -+ * agents, implicitly treat a few common names as private when the agent -+ * doesn't specify any explicitly. -+ */ -+ if (!any_private_params) { -+ for (GList *iter = md->ra_params; iter != NULL; iter = iter->next) { -+ struct ra_param_s *p = iter->data; -+ -+ if (pcmk__str_any_of(p->rap_name, "password", "passwd", "user", -+ NULL)) { -+ controld_set_ra_param_flags(p, ra_param_private); -+ } -+ } -+ } -+ - g_hash_table_replace(mdc, key, md); - free_xml(metadata); - return md; -diff --git a/daemons/controld/controld_metadata.h b/daemons/controld/controld_metadata.h -index 010092f..398d12a 100644 ---- a/daemons/controld/controld_metadata.h -+++ b/daemons/controld/controld_metadata.h -@@ -12,7 +12,6 @@ - - enum ra_flags_e { - ra_supports_reload = 0x01, -- ra_uses_private = 0x02, - }; - - enum ra_param_flags_e { --- -1.8.3.1 - - -From 683062145fcfe2afa6aab100d7a8b4b1add6cea9 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 16 Sep 2020 18:41:40 -0500 -Subject: [PATCH 02/11] Fix: scheduler: properly compute digest of - non-sensitive resource parameters - -The controller records op-secure-digest as a hash of all resource parameters -(specifically, those listed in the resource agent's meta-data) except those -marked as private. - -Previously, the scheduler compared that against a digest of *all* parameters -(including meta-attributes, etc.) after filtering private parameters and -running pcmk__filter_op_for_digest(). The latter usually made the hash -identical to the controller's, but not always. Now, it only digests resource -instance attributes. ---- - lib/pengine/utils.c | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index bfb67da..2ad8780 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -2123,7 +2123,12 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, - data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); - - if (calc_secure) { -- data->params_secure = copy_xml(data->params_all); -+ /* The controller doesn't create a digest of *all* non-sensitive -+ * parameters, only those listed in resource agent meta-data. The -+ * equivalent here is rsc->parameters. -+ */ -+ data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS); -+ g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); - if(secure_list) { - filter_parameters(data->params_secure, secure_list, FALSE); - } --- -1.8.3.1 - - -From 7c654b1e34ce79c96c80f2919d7204fa3f1b2669 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 17 Sep 2020 09:29:49 -0500 -Subject: [PATCH 03/11] Refactor: libcrmcommon: separate agent-related API into - own header - ---- - include/crm/common/Makefile.am | 2 +- - include/crm/common/agents.h | 58 ++++++++++++++++++++++++++++++++++++++++++ - include/crm/common/util.h | 25 +----------------- - 3 files changed, 60 insertions(+), 25 deletions(-) - create mode 100644 include/crm/common/agents.h - -diff --git a/include/crm/common/Makefile.am b/include/crm/common/Makefile.am -index f30e75f..2686dc0 100644 ---- a/include/crm/common/Makefile.am -+++ b/include/crm/common/Makefile.am -@@ -12,7 +12,7 @@ MAINTAINERCLEANFILES = Makefile.in - headerdir=$(pkgincludedir)/crm/common - - header_HEADERS = xml.h ipc.h util.h iso8601.h mainloop.h logging.h results.h \ -- nvpair.h acl.h ipc_controld.h ipc_pacemakerd.h -+ nvpair.h acl.h agents.h ipc_controld.h ipc_pacemakerd.h - noinst_HEADERS = internal.h alerts_internal.h \ - iso8601_internal.h remote_internal.h xml_internal.h \ - ipc_internal.h output_internal.h cmdline_internal.h \ -diff --git a/include/crm/common/agents.h b/include/crm/common/agents.h -new file mode 100644 -index 0000000..b585ada ---- /dev/null -+++ b/include/crm/common/agents.h -@@ -0,0 +1,58 @@ -+/* -+ * Copyright 2017-2020 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. -+ * -+ * This source code is licensed under the GNU Lesser General Public License -+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -+ */ -+ -+#ifndef PCMK__AGENTS__H -+# define PCMK__AGENTS__H -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/** -+ * \file -+ * \brief API related to resource agents -+ * \ingroup core -+ */ -+ -+#include // uint32_t -+#include -+ -+// Capabilities supported by a resource agent standard -+enum pcmk_ra_caps { -+ pcmk_ra_cap_none = 0, -+ pcmk_ra_cap_provider = (1 << 0), // Requires provider -+ pcmk_ra_cap_status = (1 << 1), // Supports status instead of monitor -+ pcmk_ra_cap_params = (1 << 2), // Supports parameters -+ pcmk_ra_cap_unique = (1 << 3), // Supports unique clones -+ pcmk_ra_cap_promotable = (1 << 4), // Supports promotable clones -+ pcmk_ra_cap_stdin = (1 << 5), // Reads from standard input -+ pcmk_ra_cap_fence_params = (1 << 6), // Supports pcmk_monitor_timeout, etc. -+}; -+ -+uint32_t pcmk_get_ra_caps(const char *standard); -+char *crm_generate_ra_key(const char *standard, const char *provider, -+ const char *type); -+int crm_parse_agent_spec(const char *spec, char **standard, char **provider, -+ char **type); -+ -+#ifndef PCMK__NO_COMPAT -+/* Everything here is deprecated and kept only for public API backward -+ * compatibility. It will be moved to compatibility.h in a future release. -+ */ -+ -+//! \deprecated Use pcmk_get_ra_caps() instead -+bool crm_provider_required(const char *standard); -+ -+#endif // PCMK__NO_COMPAT -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif // PCMK__AGENTS__H -diff --git a/include/crm/common/util.h b/include/crm/common/util.h -index 74fd6bd..0a2ee6c 100644 ---- a/include/crm/common/util.h -+++ b/include/crm/common/util.h -@@ -32,6 +32,7 @@ extern "C" { - - # include - # include -+# include - # include - - # define ONLINESTATUS "online" // Status of an online client -@@ -133,27 +134,6 @@ xmlNode *crm_create_op_xml(xmlNode *parent, const char *prefix, - const char *timeout); - #define CRM_DEFAULT_OP_TIMEOUT_S "20s" - --// Public resource agent functions (from agents.c) -- --// Capabilities supported by a resource agent standard --enum pcmk_ra_caps { -- pcmk_ra_cap_none = 0, -- pcmk_ra_cap_provider = (1 << 0), // Requires provider -- pcmk_ra_cap_status = (1 << 1), // Supports status instead of monitor -- pcmk_ra_cap_params = (1 << 2), // Supports parameters -- pcmk_ra_cap_unique = (1 << 3), // Supports unique clones -- pcmk_ra_cap_promotable = (1 << 4), // Supports promotable clones -- pcmk_ra_cap_stdin = (1 << 5), // Reads from standard input -- pcmk_ra_cap_fence_params = (1 << 6), // Supports pcmk_monitor_timeout, etc. --}; -- --uint32_t pcmk_get_ra_caps(const char *standard); --char *crm_generate_ra_key(const char *standard, const char *provider, -- const char *type); --int crm_parse_agent_spec(const char *spec, char **standard, char **provider, -- char **type); -- -- - int compare_version(const char *version1, const char *version2); - - /* coverity[+kill] */ -@@ -255,9 +235,6 @@ is_set_any(long long word, long long bit) - return ((word & bit) != 0); - } - --//! \deprecated Use pcmk_get_ra_caps() instead --bool crm_provider_required(const char *standard); -- - //! \deprecated Use strcmp or strcasecmp instead - gboolean crm_str_eq(const char *a, const char *b, gboolean use_case); - --- -1.8.3.1 - - -From 6fe49fa36be9fd67f071b126561ee6d57d0cb491 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 17 Sep 2020 09:41:57 -0500 -Subject: [PATCH 04/11] Refactor: libcrmcommon,libstonithd: expose special - attribute constants - -These will be needed in libcrmcommon, so move them from libstonithd. -Also make them public API, since they're Pacemaker-specific strings -that external users might find useful. ---- - daemons/fenced/cts-fence-helper.c | 25 +++++++++++++++++-------- - daemons/fenced/fenced_commands.c | 25 +++++++++++++------------ - daemons/fenced/pacemaker-fenced.c | 34 +++++++++++++++++++++------------- - include/crm/common/agents.h | 13 +++++++++++++ - include/crm/fencing/internal.h | 8 -------- - include/crm/msg_xml.h | 12 +++++++++++- - lib/fencing/st_client.c | 10 ++++++---- - lib/pengine/unpack.c | 2 +- - lib/pengine/utils.c | 3 ++- - 9 files changed, 84 insertions(+), 48 deletions(-) - -diff --git a/daemons/fenced/cts-fence-helper.c b/daemons/fenced/cts-fence-helper.c -index a248829..af006b5 100644 ---- a/daemons/fenced/cts-fence-helper.c -+++ b/daemons/fenced/cts-fence-helper.c -@@ -26,6 +26,7 @@ - - #include - #include -+#include - #include - - #include -@@ -184,7 +185,8 @@ run_fence_failure_test(void) - { - stonith_key_value_t *params = NULL; - -- params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4"); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_MAP, -+ "false_1_node1=1,2 false_1_node2=3,4"); - params = stonith_key_value_add(params, "mode", "fail"); - - single_test(st-> -@@ -208,7 +210,8 @@ run_fence_failure_rollover_test(void) - { - stonith_key_value_t *params = NULL; - -- params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4"); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_MAP, -+ "false_1_node1=1,2 false_1_node2=3,4"); - params = stonith_key_value_add(params, "mode", "fail"); - - single_test(st-> -@@ -216,7 +219,8 @@ run_fence_failure_rollover_test(void) - "Register device1 for rollover test", 1, 0); - stonith_key_value_freeall(params, 1, 1); - params = NULL; -- params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4"); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_MAP, -+ "false_1_node1=1,2 false_1_node2=3,4"); - params = stonith_key_value_add(params, "mode", "pass"); - - single_test(st-> -@@ -244,7 +248,8 @@ run_standard_test(void) - { - stonith_key_value_t *params = NULL; - -- params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4"); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_MAP, -+ "false_1_node1=1,2 false_1_node2=3,4"); - params = stonith_key_value_add(params, "mode", "pass"); - params = stonith_key_value_add(params, "mock_dynamic_hosts", "false_1_node1 false_1_node2"); - -@@ -320,7 +325,8 @@ standard_dev_test(void) - crm_exit(CRM_EX_DISCONNECT); - } - -- params = stonith_key_value_add(params, "pcmk_host_map", "some-host=pcmk-7 true_1_node1=3,4"); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_MAP, -+ "some-host=pcmk-7 true_1_node1=3,4"); - - rc = st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_xvm", params); - crm_debug("Register: %d", rc); -@@ -498,19 +504,22 @@ test_register_async_devices(int check_event) - char buf[16] = { 0, }; - stonith_key_value_t *params = NULL; - -- params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2"); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_MAP, -+ "false_1_node1=1,2"); - params = stonith_key_value_add(params, "mode", "fail"); - st->cmds->register_device(st, st_opts, "false_1", "stonith-ng", "fence_dummy", params); - stonith_key_value_freeall(params, 1, 1); - - params = NULL; -- params = stonith_key_value_add(params, "pcmk_host_map", "true_1_node1=1,2"); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_MAP, -+ "true_1_node1=1,2"); - params = stonith_key_value_add(params, "mode", "pass"); - st->cmds->register_device(st, st_opts, "true_1", "stonith-ng", "fence_dummy", params); - stonith_key_value_freeall(params, 1, 1); - - params = NULL; -- params = stonith_key_value_add(params, "pcmk_host_map", "custom_timeout_node1=1,2"); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_MAP, -+ "custom_timeout_node1=1,2"); - params = stonith_key_value_add(params, "mode", "fail"); - params = stonith_key_value_add(params, "delay", "1000"); - snprintf(buf, sizeof(buf) - 1, "%d", MAINLOOP_DEFAULT_TIMEOUT + CUSTOM_TIMEOUT_ADDITION); -diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c -index d9db985..45fb469 100644 ---- a/daemons/fenced/fenced_commands.c -+++ b/daemons/fenced/fenced_commands.c -@@ -128,7 +128,7 @@ get_action_delay_max(stonith_device_t * device, const char * action) - return 0; - } - -- value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_MAX); -+ value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_MAX); - if (value) { - delay_max = crm_parse_interval_spec(value) / 1000; - } -@@ -146,7 +146,7 @@ get_action_delay_base(stonith_device_t * device, const char * action) - return 0; - } - -- value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_BASE); -+ value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_BASE); - if (value) { - delay_base = crm_parse_interval_spec(value) / 1000; - } -@@ -269,7 +269,7 @@ get_action_limit(stonith_device_t * device) - const char *value = NULL; - int action_limit = 1; - -- value = g_hash_table_lookup(device->params, STONITH_ATTR_ACTION_LIMIT); -+ value = g_hash_table_lookup(device->params, PCMK_STONITH_ACTION_LIMIT); - if (value) { - action_limit = crm_parse_int(value, "1"); - if (action_limit == 0) { -@@ -897,12 +897,12 @@ build_device_from_xml(xmlNode * msg) - device->namespace = crm_element_value_copy(dev, "namespace"); - device->params = xml2device_params(device->id, dev); - -- value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTLIST); -+ value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_LIST); - if (value) { - device->targets = stonith__parse_targets(value); - } - -- value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTMAP); -+ value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_MAP); - device->aliases = build_port_aliases(value, &(device->targets)); - - device->agent_metadata = get_agent_metadata(device->agent); -@@ -942,13 +942,13 @@ target_list_type(stonith_device_t * dev) - { - const char *check_type = NULL; - -- check_type = g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTCHECK); -+ check_type = g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK); - - if (check_type == NULL) { - -- if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTLIST)) { -+ if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) { - check_type = "static-list"; -- } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)) { -+ } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) { - check_type = "static-list"; - } else if (pcmk_is_set(dev->flags, st_device_supports_list)) { - check_type = "dynamic-list"; -@@ -1067,7 +1067,8 @@ dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data) - if (rc != 0 && !dev->targets) { - crm_notice("Disabling port list queries for %s (%d): %s", dev->id, rc, output); - /* Fall back to status */ -- g_hash_table_replace(dev->params, strdup(STONITH_ATTR_HOSTCHECK), strdup("status")); -+ g_hash_table_replace(dev->params, -+ strdup(PCMK_STONITH_HOST_CHECK), strdup("status")); - - g_list_free_full(dev->targets, free); - dev->targets = NULL; -@@ -1658,7 +1659,7 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc - - if (string_in_list(dev->targets, host)) { - can = TRUE; -- } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP) -+ } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP) - && g_hash_table_lookup(dev->aliases, host)) { - can = TRUE; - } -@@ -1689,8 +1690,8 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc - /* we'll respond to this search request async in the cb */ - return; - } else { -- crm_err("Invalid value for " STONITH_ATTR_HOSTCHECK ": %s", check_type); -- check_type = "Invalid " STONITH_ATTR_HOSTCHECK; -+ crm_err("Invalid value for " PCMK_STONITH_HOST_CHECK ": %s", check_type); -+ check_type = "Invalid " PCMK_STONITH_HOST_CHECK; - } - - if (pcmk__str_eq(host, alias, pcmk__str_casei)) { -diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c -index 092f604..5c2cc3a 100644 ---- a/daemons/fenced/pacemaker-fenced.c -+++ b/daemons/fenced/pacemaker-fenced.c -@@ -653,7 +653,7 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - get_rsc_attributes(rsc->parameters, rsc, node, data_set); - get_meta_attributes(rsc->meta, rsc, node, data_set); - -- rsc_provides = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROVIDES); -+ rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES); - - g_hash_table_iter_init(&gIter, rsc->parameters); - while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) { -@@ -1331,7 +1331,8 @@ main(int argc, char **argv) - printf(" \n"); - #endif - -- printf(" \n", STONITH_ATTR_HOSTARG); -+ printf(" \n", -+ PCMK_STONITH_HOST_ARGUMENT); - printf - (" Advanced use only: An alternate parameter to supply instead of 'port'\n"); - printf -@@ -1342,7 +1343,8 @@ main(int argc, char **argv) - printf(" \n"); - printf(" \n"); - -- printf(" \n", STONITH_ATTR_HOSTMAP); -+ printf(" \n", -+ PCMK_STONITH_HOST_MAP); - printf - (" A mapping of host names to ports numbers for devices that do not support host names.\n"); - printf -@@ -1350,25 +1352,28 @@ main(int argc, char **argv) - printf(" \n"); - printf(" \n"); - -- printf(" \n", STONITH_ATTR_HOSTLIST); -- printf -- (" A list of machines controlled by this device (Optional unless %s=static-list).\n", -- STONITH_ATTR_HOSTCHECK); -+ printf(" \n", -+ PCMK_STONITH_HOST_LIST); -+ printf(" A list of machines controlled by " -+ "this device (Optional unless %s=static-list).\n", -+ PCMK_STONITH_HOST_CHECK); - printf(" \n"); - printf(" \n"); - -- printf(" \n", STONITH_ATTR_HOSTCHECK); -+ printf(" \n", -+ PCMK_STONITH_HOST_CHECK); - printf - (" How to determine which machines are controlled by the device.\n"); - printf(" Allowed values: dynamic-list " - "(query the device via the 'list' command), static-list " -- "(check the " STONITH_ATTR_HOSTLIST " attribute), status " -+ "(check the " PCMK_STONITH_HOST_LIST " attribute), status " - "(query the device via the 'status' command), none (assume " - "every device can fence every machine)\n"); - printf(" \n"); - printf(" \n"); - -- printf(" \n", STONITH_ATTR_DELAY_MAX); -+ printf(" \n", -+ PCMK_STONITH_DELAY_MAX); - printf - (" Enable a random delay for stonith actions and specify the maximum of random delay.\n"); - printf -@@ -1378,7 +1383,8 @@ main(int argc, char **argv) - printf(" \n"); - printf(" \n"); - -- printf(" \n", STONITH_ATTR_DELAY_BASE); -+ printf(" \n", -+ PCMK_STONITH_DELAY_BASE); - printf - (" Enable a base delay for stonith actions and specify base delay value.\n"); - printf -@@ -1388,7 +1394,8 @@ main(int argc, char **argv) - printf(" \n"); - printf(" \n"); - -- printf(" \n", STONITH_ATTR_ACTION_LIMIT); -+ printf(" \n", -+ PCMK_STONITH_ACTION_LIMIT); - printf - (" The maximum number of actions can be performed in parallel on this device\n"); - printf -@@ -1507,7 +1514,8 @@ main(int argc, char **argv) - xmlNode *xml; - stonith_key_value_t *params = NULL; - -- params = stonith_key_value_add(params, STONITH_ATTR_HOSTLIST, stonith_our_uname); -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_LIST, -+ stonith_our_uname); - - xml = create_device_registration_xml("watchdog", st_namespace_internal, - STONITH_WATCHDOG_AGENT, params, -diff --git a/include/crm/common/agents.h b/include/crm/common/agents.h -index b585ada..e9089ae 100644 ---- a/include/crm/common/agents.h -+++ b/include/crm/common/agents.h -@@ -23,6 +23,19 @@ extern "C" { - #include // uint32_t - #include - -+/* Special stonith-class agent parameters interpreted directly by Pacemaker -+ * (not including the pcmk_ACTION_{action,retries,timeout} parameters) -+ */ -+#define PCMK_STONITH_ACTION_LIMIT "pcmk_action_limit" -+#define PCMK_STONITH_DELAY_BASE "pcmk_delay_base" -+#define PCMK_STONITH_DELAY_MAX "pcmk_delay_max" -+#define PCMK_STONITH_HOST_ARGUMENT "pcmk_host_argument" -+#define PCMK_STONITH_HOST_CHECK "pcmk_host_check" -+#define PCMK_STONITH_HOST_LIST "pcmk_host_list" -+#define PCMK_STONITH_HOST_MAP "pcmk_host_map" -+#define PCMK_STONITH_PROVIDES "provides" -+#define PCMK_STONITH_STONITH_TIMEOUT "stonith-timeout" -+ - // Capabilities supported by a resource agent standard - enum pcmk_ra_caps { - pcmk_ra_cap_none = 0, -diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h -index f33957f..391ab72 100644 ---- a/include/crm/fencing/internal.h -+++ b/include/crm/fencing/internal.h -@@ -152,14 +152,6 @@ void stonith__device_parameter_flags(uint32_t *device_flags, - # define T_STONITH_TIMEOUT_VALUE "st-async-timeout-value" - # define T_STONITH_NOTIFY "st_notify" - --# define STONITH_ATTR_HOSTARG "pcmk_host_argument" --# define STONITH_ATTR_HOSTMAP "pcmk_host_map" --# define STONITH_ATTR_HOSTLIST "pcmk_host_list" --# define STONITH_ATTR_HOSTCHECK "pcmk_host_check" --# define STONITH_ATTR_DELAY_MAX "pcmk_delay_max" --# define STONITH_ATTR_DELAY_BASE "pcmk_delay_base" --# define STONITH_ATTR_ACTION_LIMIT "pcmk_action_limit" -- - # define STONITH_ATTR_ACTION_OP "action" - - # define STONITH_OP_EXEC "st_execute" -diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h -index 1fcb72d..c8b528b 100644 ---- a/include/crm/msg_xml.h -+++ b/include/crm/msg_xml.h -@@ -208,7 +208,6 @@ extern "C" { - # define XML_RSC_ATTR_FAIL_TIMEOUT "failure-timeout" - # define XML_RSC_ATTR_MULTIPLE "multiple-active" - # define XML_RSC_ATTR_REQUIRES "requires" --# define XML_RSC_ATTR_PROVIDES "provides" - # define XML_RSC_ATTR_CONTAINER "container" - # define XML_RSC_ATTR_INTERNAL_RSC "internal_rsc" - # define XML_RSC_ATTR_MAINTENANCE "maintenance" -@@ -425,6 +424,17 @@ extern "C" { - # define ID(x) crm_element_value(x, XML_ATTR_ID) - # define TYPE(x) crm_element_name(x) - -+ -+#ifndef PCMK__NO_COMPAT -+/* Everything here is deprecated and kept only for public API backward -+ * compatibility. It will be moved to compatibility.h in a future release. -+ */ -+ -+//! \deprecated Use PCMK_STONITH_PROVIDES instead -+# define XML_RSC_ATTR_PROVIDES "provides" -+ -+#endif -+ - #ifdef __cplusplus - } - #endif -diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c -index 9d7d030..d5784dc 100644 ---- a/lib/fencing/st_client.c -+++ b/lib/fencing/st_client.c -@@ -516,7 +516,8 @@ make_args(const char *agent, const char *action, const char *victim, - append_arg(STONITH_ATTR_ACTION_OP, action, &arg_list); - if (victim && device_args) { - const char *alias = victim; -- const char *param = g_hash_table_lookup(device_args, STONITH_ATTR_HOSTARG); -+ const char *param = g_hash_table_lookup(device_args, -+ PCMK_STONITH_HOST_ARGUMENT); - - if (port_map && g_hash_table_lookup(port_map, victim)) { - alias = g_hash_table_lookup(port_map, victim); -@@ -2046,14 +2047,15 @@ stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id, - - // Convert parameter list to a hash table - for (; params; params = params->next) { -- if (pcmk__str_eq(params->key, STONITH_ATTR_HOSTARG, pcmk__str_casei)) { -+ if (pcmk__str_eq(params->key, PCMK_STONITH_HOST_ARGUMENT, -+ pcmk__str_casei)) { - host_arg = params->value; - } - - // Strip out Pacemaker-implemented parameters - if (!pcmk__starts_with(params->key, "pcmk_") -- && strcmp(params->key, "provides") -- && strcmp(params->key, "stonith-timeout")) { -+ && strcmp(params->key, PCMK_STONITH_PROVIDES) -+ && strcmp(params->key, PCMK_STONITH_STONITH_TIMEOUT)) { - g_hash_table_insert(params_table, strdup(params->key), - strdup(params->value)); - } -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index a9bbf4b..44dba47 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -169,7 +169,7 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, - - // nvpair with provides or requires set to unfencing - #define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR \ -- "[(@" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_PROVIDES "'" \ -+ "[(@" XML_NVPAIR_ATTR_NAME "='" PCMK_STONITH_PROVIDES "'" \ - "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \ - "and @" XML_NVPAIR_ATTR_VALUE "='unfencing']" - -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index 2ad8780..fd238df 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -2362,7 +2362,8 @@ find_unfencing_devices(GListPtr candidates, GListPtr matches) - { - for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) { - pe_resource_t *candidate = gIter->data; -- const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES); -+ const char *provides = g_hash_table_lookup(candidate->meta, -+ PCMK_STONITH_PROVIDES); - const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES); - - if(candidate->children) { --- -1.8.3.1 - - -From 6d492f2dff0931ff2d59686e2680c5616f8da580 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 5 Oct 2020 12:48:04 -0500 -Subject: [PATCH 05/11] Refactor: libcrmcommon,libstonithd: add API for - detecting special stonith params - -This includes a slight behavioral change. Previously, the stonith API validate -method would strip out all parameters starting with "pcmk_" (which could be an -issue if a fence agent named one of its own parameters like that); now, it only -strips the specific parameters that Pacemaker handles directly. ---- - include/crm/common/agents.h | 1 + - lib/common/agents.c | 39 +++++++++++++++++++++++++++++++++++++++ - lib/fencing/st_client.c | 6 +----- - 3 files changed, 41 insertions(+), 5 deletions(-) - -diff --git a/include/crm/common/agents.h b/include/crm/common/agents.h -index e9089ae..b185977 100644 ---- a/include/crm/common/agents.h -+++ b/include/crm/common/agents.h -@@ -53,6 +53,7 @@ char *crm_generate_ra_key(const char *standard, const char *provider, - const char *type); - int crm_parse_agent_spec(const char *spec, char **standard, char **provider, - char **type); -+bool pcmk_stonith_param(const char *param); - - #ifndef PCMK__NO_COMPAT - /* Everything here is deprecated and kept only for public API backward -diff --git a/lib/common/agents.c b/lib/common/agents.c -index 1ee55ac..0291b0b 100644 ---- a/lib/common/agents.c -+++ b/lib/common/agents.c -@@ -171,3 +171,42 @@ crm_provider_required(const char *standard) - { - return pcmk_is_set(pcmk_get_ra_caps(standard), pcmk_ra_cap_provider); - } -+ -+/*! -+ * \brief Check whether a given stonith parameter is handled by Pacemaker -+ * -+ * Return true if a given string is the name of one of the special resource -+ * instance attributes interpreted directly by Pacemaker for stonith-class -+ * resources. -+ * -+ * \param[in] param Parameter name to check -+ * -+ * \return true if \p param is a special fencing parameter -+ */ -+bool -+pcmk_stonith_param(const char *param) -+{ -+ if (param == NULL) { -+ return false; -+ } -+ if (pcmk__str_any_of(param, PCMK_STONITH_PROVIDES, -+ PCMK_STONITH_STONITH_TIMEOUT, NULL)) { -+ return true; -+ } -+ if (!pcmk__starts_with(param, "pcmk_")) { // Short-circuit common case -+ return false; -+ } -+ if (pcmk__str_any_of(param, -+ PCMK_STONITH_ACTION_LIMIT, -+ PCMK_STONITH_DELAY_BASE, -+ PCMK_STONITH_DELAY_MAX, -+ PCMK_STONITH_HOST_ARGUMENT, -+ PCMK_STONITH_HOST_CHECK, -+ PCMK_STONITH_HOST_LIST, -+ PCMK_STONITH_HOST_MAP, -+ NULL)) { -+ return true; -+ } -+ param = strchr(param + 5, '_'); // Skip past "pcmk_ACTION" -+ return pcmk__str_any_of(param, "_action", "_timeout", "_retries", NULL); -+} -diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c -index d5784dc..e5adbf6 100644 ---- a/lib/fencing/st_client.c -+++ b/lib/fencing/st_client.c -@@ -2051,11 +2051,7 @@ stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id, - pcmk__str_casei)) { - host_arg = params->value; - } -- -- // Strip out Pacemaker-implemented parameters -- if (!pcmk__starts_with(params->key, "pcmk_") -- && strcmp(params->key, PCMK_STONITH_PROVIDES) -- && strcmp(params->key, PCMK_STONITH_STONITH_TIMEOUT)) { -+ if (!pcmk_stonith_param(params->key)) { - g_hash_table_insert(params_table, strdup(params->key), - strdup(params->value)); - } --- -1.8.3.1 - - -From d77b525b00c0ab74beb66fe3ef240a67a2c87cd5 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 5 Oct 2020 12:48:57 -0500 -Subject: [PATCH 06/11] Test: libcrmcommon: add unit tests for stonith - parameter name checker - ---- - configure.ac | 1 + - lib/common/tests/Makefile.am | 2 +- - lib/common/tests/agents/Makefile.am | 29 ++++++++++++ - lib/common/tests/agents/pcmk_stonith_param_test.c | 58 +++++++++++++++++++++++ - 4 files changed, 89 insertions(+), 1 deletion(-) - create mode 100644 lib/common/tests/agents/Makefile.am - create mode 100644 lib/common/tests/agents/pcmk_stonith_param_test.c - -diff --git a/configure.ac b/configure.ac -index 36e85a9..7d11d1e 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -2006,6 +2006,7 @@ AC_CONFIG_FILES(Makefile \ - lib/pacemaker-cluster.pc \ - lib/common/Makefile \ - lib/common/tests/Makefile \ -+ lib/common/tests/agents/Makefile \ - lib/common/tests/cmdline/Makefile \ - lib/common/tests/flags/Makefile \ - lib/common/tests/operations/Makefile \ -diff --git a/lib/common/tests/Makefile.am b/lib/common/tests/Makefile.am -index f3eaeec..2c33cc5 100644 ---- a/lib/common/tests/Makefile.am -+++ b/lib/common/tests/Makefile.am -@@ -1 +1 @@ --SUBDIRS = cmdline flags operations strings utils -+SUBDIRS = agents cmdline flags operations strings utils -diff --git a/lib/common/tests/agents/Makefile.am b/lib/common/tests/agents/Makefile.am -new file mode 100644 -index 0000000..40cb5f7 ---- /dev/null -+++ b/lib/common/tests/agents/Makefile.am -@@ -0,0 +1,29 @@ -+# -+# Copyright 2020 the Pacemaker project contributors -+# -+# The version control history for this file may have further details. -+# -+# This source code is licensed under the GNU General Public License version 2 -+# or later (GPLv2+) WITHOUT ANY WARRANTY. -+# -+AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include -+LDADD = $(top_builddir)/lib/common/libcrmcommon.la -+ -+include $(top_srcdir)/mk/glib-tap.mk -+ -+# Add each test program here. Each test should be written as a little standalone -+# program using the glib unit testing functions. See the documentation for more -+# information. -+# -+# https://developer.gnome.org/glib/unstable/glib-Testing.html -+# -+# Add "_test" to the end of all test program names to simplify .gitignore. -+test_programs = pcmk_stonith_param_test -+ -+# If any extra data needs to be added to the source distribution, add it to the -+# following list. -+dist_test_data = -+ -+# If any extra data needs to be used by tests but should not be added to the -+# source distribution, add it to the following list. -+test_data = -diff --git a/lib/common/tests/agents/pcmk_stonith_param_test.c b/lib/common/tests/agents/pcmk_stonith_param_test.c -new file mode 100644 -index 0000000..bf509e9 ---- /dev/null -+++ b/lib/common/tests/agents/pcmk_stonith_param_test.c -@@ -0,0 +1,58 @@ -+/* -+ * Copyright 2020 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. -+ * -+ * This source code is licensed under the GNU Lesser General Public License -+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -+ */ -+ -+#include -+ -+#include -+#include -+ -+static void -+is_stonith_param(void) -+{ -+ g_assert_cmpint(pcmk_stonith_param(NULL), ==, false); -+ g_assert_cmpint(pcmk_stonith_param(""), ==, false); -+ g_assert_cmpint(pcmk_stonith_param("unrecognized"), ==, false); -+ g_assert_cmpint(pcmk_stonith_param("pcmk_unrecognized"), ==, false); -+ g_assert_cmpint(pcmk_stonith_param("x" PCMK_STONITH_ACTION_LIMIT), ==, false); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_ACTION_LIMIT "x"), ==, false); -+ -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_ACTION_LIMIT), ==, true); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_DELAY_BASE), ==, true); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_DELAY_MAX), ==, true); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_HOST_ARGUMENT), ==, true); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_HOST_CHECK), ==, true); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_HOST_LIST), ==, true); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_HOST_MAP), ==, true); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_PROVIDES), ==, true); -+ g_assert_cmpint(pcmk_stonith_param(PCMK_STONITH_STONITH_TIMEOUT), ==, true); -+} -+ -+static void -+is_stonith_action_param(void) -+{ -+ /* Currently, the function accepts any string not containing underbars as -+ * the action name, so we do not need to verify particular action names. -+ */ -+ g_assert_cmpint(pcmk_stonith_param("pcmk_on_unrecognized"), ==, false); -+ g_assert_cmpint(pcmk_stonith_param("pcmk_on_action"), ==, true); -+ g_assert_cmpint(pcmk_stonith_param("pcmk_on_timeout"), ==, true); -+ g_assert_cmpint(pcmk_stonith_param("pcmk_on_retries"), ==, true); -+} -+ -+int -+main(int argc, char **argv) -+{ -+ g_test_init(&argc, &argv, NULL); -+ -+ g_test_add_func("/common/utils/parse_op_key/is_stonith_param", -+ is_stonith_param); -+ g_test_add_func("/common/utils/parse_op_key/is_stonith_action_param", -+ is_stonith_action_param); -+ return g_test_run(); -+} --- -1.8.3.1 - - -From 775851a6dd765eb1a31f167533018031f4d5e1a1 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 17 Sep 2020 12:35:54 -0500 -Subject: [PATCH 07/11] Refactor: libstonithd: simplify creation of fencing - arguments - -It was unnecessarily complicated. ---- - cts/cts-fencing.in | 2 +- - lib/fencing/st_client.c | 140 ++++++++++++++++++++++-------------------------- - 2 files changed, 66 insertions(+), 76 deletions(-) - -diff --git a/cts/cts-fencing.in b/cts/cts-fencing.in -index 4444024..feeedbb 100644 ---- a/cts/cts-fencing.in -+++ b/cts/cts-fencing.in -@@ -1168,7 +1168,7 @@ class Tests(object): - test.add_cmd("stonith_admin", - "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) - test.add_cmd("stonith_admin", "--output-as=xml -F %s -t 3" % (our_uname)) -- test.add_stonith_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) -+ test.add_stonith_log_pattern("as nodeid with fence action 'off' targeting %s" % (our_uname)) - - ### verify nodeid is _NOT_ supplied when nodeid is not in the metadata parameters - test = self.new_test("cpg_do_not_supply_nodeid", -diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c -index e5adbf6..ce2459b 100644 ---- a/lib/fencing/st_client.c -+++ b/lib/fencing/st_client.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -457,39 +458,20 @@ stonith_api_register_level(stonith_t * st, int options, const char *node, int le - } - - static void --append_arg(const char *key, const char *value, GHashTable **args) --{ -- CRM_CHECK(key != NULL, return); -- CRM_CHECK(value != NULL, return); -- CRM_CHECK(args != NULL, return); -- -- if (strstr(key, "pcmk_")) { -- return; -- } else if (strstr(key, CRM_META)) { -- return; -- } else if (pcmk__str_eq(key, "crm_feature_set", pcmk__str_casei)) { -- return; -- } -- -- if (!*args) { -- *args = crm_str_table_new(); -- } -- -- CRM_CHECK(*args != NULL, return); -- crm_trace("Appending: %s=%s", key, value); -- g_hash_table_replace(*args, strdup(key), strdup(value)); --} -- --static void - append_config_arg(gpointer key, gpointer value, gpointer user_data) - { -- /* The fencer will filter action out when it registers the device, -- * but ignore it here just in case any other library callers -- * fail to do so. -+ /* The fencer will filter "action" out when it registers the device, -+ * but ignore it here in case any external API users don't. - */ -- if (!pcmk__str_eq(key, STONITH_ATTR_ACTION_OP, pcmk__str_casei)) { -- append_arg(key, value, user_data); -- return; -+ if (!pcmk__str_eq(key, STONITH_ATTR_ACTION_OP, pcmk__str_casei) -+ && (strstr(key, "pcmk_") == NULL) -+ && (strstr(key, CRM_META) == NULL) -+ && !pcmk__str_eq(key, "crm_feature_set", pcmk__str_casei)) { -+ -+ crm_trace("Passing %s=%s with fence action", -+ (const char *) key, (const char *) (value? value : "")); -+ g_hash_table_insert((GHashTable *) user_data, -+ strdup(key), strdup(value? value : "")); - } - } - -@@ -498,76 +480,84 @@ make_args(const char *agent, const char *action, const char *victim, - uint32_t victim_nodeid, GHashTable * device_args, - GHashTable * port_map, const char *host_arg) - { -- char buffer[512]; - GHashTable *arg_list = NULL; - const char *value = NULL; - - CRM_CHECK(action != NULL, return NULL); - -- snprintf(buffer, sizeof(buffer), "pcmk_%s_action", action); -+ arg_list = crm_str_table_new(); -+ -+ // Add action to arguments (using an alias if requested) - if (device_args) { -+ char buffer[512]; -+ -+ snprintf(buffer, sizeof(buffer), "pcmk_%s_action", action); - value = g_hash_table_lookup(device_args, buffer); -+ if (value) { -+ crm_debug("Substituting '%s' for fence action %s targeting %s", -+ value, action, victim); -+ action = value; -+ } - } -- if (value) { -- crm_debug("Substituting action '%s' for requested operation '%s'", value, action); -- action = value; -- } -+ g_hash_table_insert(arg_list, strdup(STONITH_ATTR_ACTION_OP), -+ strdup(action)); - -- append_arg(STONITH_ATTR_ACTION_OP, action, &arg_list); -+ /* If this is a fencing operation against another node, add more standard -+ * arguments. -+ */ - if (victim && device_args) { -- const char *alias = victim; -- const char *param = g_hash_table_lookup(device_args, -- PCMK_STONITH_HOST_ARGUMENT); -+ const char *param = NULL; - -- if (port_map && g_hash_table_lookup(port_map, victim)) { -- alias = g_hash_table_lookup(port_map, victim); -- } -- -- /* Always supply the node's name, too: -+ /* Always pass the target's name, per - * https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md - */ -- append_arg("nodename", victim, &arg_list); -+ g_hash_table_insert(arg_list, strdup("nodename"), strdup(victim)); -+ -+ // If the target's node ID was specified, pass it, too - if (victim_nodeid) { -- char nodeid_str[33] = { 0, }; -- if (snprintf(nodeid_str, 33, "%u", (unsigned int)victim_nodeid)) { -- crm_info("For stonith action (%s) for victim %s, adding nodeid (%s) to parameters", -- action, victim, nodeid_str); -- append_arg("nodeid", nodeid_str, &arg_list); -- } -- } -+ char *nodeid = crm_strdup_printf("%" PRIu32, victim_nodeid); - -- /* Check if we need to supply the victim in any other form */ -- if(pcmk__str_eq(agent, "fence_legacy", pcmk__str_casei)) { -- value = agent; -+ // cts-fencing looks for this log message -+ crm_info("Passing '%s' as nodeid with fence action '%s' targeting %s", -+ nodeid, action, victim); -+ g_hash_table_insert(arg_list, strdup("nodeid"), nodeid); -+ } - -- } else if (param == NULL) { -- // By default, `port` is added -- if (host_arg == NULL) { -- param = "port"; -+ // Check whether target must be specified in some other way -+ param = g_hash_table_lookup(device_args, PCMK_STONITH_HOST_ARGUMENT); -+ if (!pcmk__str_eq(agent, "fence_legacy", pcmk__str_none) -+ && !pcmk__str_eq(param, "none", pcmk__str_casei)) { - -- } else { -- param = host_arg; -+ if (param == NULL) { -+ /* Use the caller's default for pcmk_host_argument, or "port" if -+ * none was given -+ */ -+ param = (host_arg == NULL)? "port" : host_arg; - } -- - value = g_hash_table_lookup(device_args, param); - -- } else if (pcmk__str_eq(param, "none", pcmk__str_casei)) { -- value = param; /* Nothing more to do */ -- -- } else { -- value = g_hash_table_lookup(device_args, param); -- } -+ if (pcmk__str_eq(value, "dynamic", -+ pcmk__str_casei|pcmk__str_null_matches)) { -+ /* If the host argument was "dynamic" or not explicitly specified, -+ * add it with the target -+ */ -+ const char *alias = NULL; - -- /* Don't overwrite explictly set values for $param */ -- if (pcmk__str_eq(value, "dynamic", pcmk__str_null_matches | pcmk__str_casei)) { -- crm_debug("Performing '%s' action targeting '%s' as '%s=%s'", action, victim, param, -- alias); -- append_arg(param, alias, &arg_list); -+ if (port_map) { -+ alias = g_hash_table_lookup(port_map, victim); -+ } -+ if (alias == NULL) { -+ alias = victim; -+ } -+ crm_debug("Passing %s='%s' with fence action %s targeting %s", -+ param, alias, action, victim); -+ g_hash_table_insert(arg_list, strdup(param), strdup(alias)); -+ } - } - } - - if (device_args) { -- g_hash_table_foreach(device_args, append_config_arg, &arg_list); -+ g_hash_table_foreach(device_args, append_config_arg, arg_list); - } - - return arg_list; --- -1.8.3.1 - - -From 717f2decd99466555287b41331dfc5d693043d56 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 17 Sep 2020 12:39:48 -0500 -Subject: [PATCH 08/11] Low: libstonithd: improve filtering of - Pacemaker-handled parameters - -Previously, when creating arguments to pass to fence agents, we would filter -out parameters whose name contained the substring "pcmk_" anywhere. -Now, use the new API call to filter out only the parameters interpreted -directly by Pacemaker. ---- - lib/fencing/st_client.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c -index ce2459b..b8348fe 100644 ---- a/lib/fencing/st_client.c -+++ b/lib/fencing/st_client.c -@@ -462,9 +462,11 @@ append_config_arg(gpointer key, gpointer value, gpointer user_data) - { - /* The fencer will filter "action" out when it registers the device, - * but ignore it here in case any external API users don't. -+ * -+ * Also filter out parameters handled directly by Pacemaker. - */ - if (!pcmk__str_eq(key, STONITH_ATTR_ACTION_OP, pcmk__str_casei) -- && (strstr(key, "pcmk_") == NULL) -+ && !pcmk_stonith_param(key) - && (strstr(key, CRM_META) == NULL) - && !pcmk__str_eq(key, "crm_feature_set", pcmk__str_casei)) { - --- -1.8.3.1 - - -From 3e960c22a1cd686efd18754f6535167fc34a223c Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 17 Sep 2020 16:26:23 -0500 -Subject: [PATCH 09/11] Low: scheduler: use same default private parameter list - as controller - -If the controller doesn't find any parameters marked private in resource agent -meta-data, it uses "password", "passwd", and "user" as the default list -(in metadata_cache_update()). - -If the scheduler came across a resource operation history entry with no -op-secure-params list, it previously used " passwd password " as the default. - -Even though these are two different situations, and the scheduler should only -find that situation in old saved CIBs, use the same default list for -consistency. ---- - lib/pengine/utils.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index fd238df..a80dab3 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -2066,9 +2066,9 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, - const char *ra_version = NULL; - #endif - -- const char *op_version; -+ const char *op_version = NULL; - const char *restart_list = NULL; -- const char *secure_list = " passwd password "; -+ const char *secure_list = NULL; - - data = calloc(1, sizeof(op_digest_cache_t)); - CRM_ASSERT(data != NULL); -@@ -2102,6 +2102,7 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, - #endif - - } else { -+ secure_list = " passwd password user "; - op_version = CRM_FEATURE_SET; - } - --- -1.8.3.1 - - -From c396a66016cfbb2ace3e166d674a90b4204fff6e Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 16 Sep 2020 19:06:56 -0500 -Subject: [PATCH 10/11] Fix: scheduler: filter Pacemaker-supplied stonith - parameters from secure hash - -Pacemaker calculates "secure digests" of resource operations as hashes of -non-sensitive resource parameters, for use when running crm_simulate on -sanitized data sets. - -Previously, the controller and scheduler could calculate different secure -digests for the same resource history entry for stonith resources. The -controller created its hash based on all resource parameters listed in the -agent meta-data. The scheduler created its hash based on all configured -resource parameters, which could include the special parameters (such as -"provides") that are interpreted directly by Pacemaker and not passed to the -agent. - -Now, the scheduler excludes the special parameters before hashing. This avoids -the annoying situation where running crm_simulate on a sanitized data set shows -unnecessary stonith resource restarts. ---- - lib/pengine/utils.c | 20 ++++++++++++++++++++ - 1 file changed, 20 insertions(+) - -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index a80dab3..a78bd24 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -2124,6 +2124,9 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, - data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); - - if (calc_secure) { -+ const char *class = crm_element_value(rsc->xml, -+ XML_AGENT_ATTR_CLASS); -+ - /* The controller doesn't create a digest of *all* non-sensitive - * parameters, only those listed in resource agent meta-data. The - * equivalent here is rsc->parameters. -@@ -2133,6 +2136,23 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, - if(secure_list) { - filter_parameters(data->params_secure, secure_list, FALSE); - } -+ if (pcmk_is_set(pcmk_get_ra_caps(class), -+ pcmk_ra_cap_fence_params)) { -+ /* For stonith resources, Pacemaker adds special parameters, -+ * but these are not listed in fence agent meta-data, so the -+ * controller will not hash them. That means we have to filter -+ * them out before calculating our hash for comparison. -+ */ -+ for (xmlAttrPtr iter = data->params_secure->properties; -+ iter != NULL; ) { -+ const char *prop_name = (const char *) iter->name; -+ -+ iter = iter->next; // Grab next now in case we remove current -+ if (pcmk_stonith_param(prop_name)) { -+ xml_remove_prop(data->params_secure, prop_name); -+ } -+ } -+ } - data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); - } - --- -1.8.3.1 - - -From 644b7ee1979d4da88497a67bb7f96ebfb91cf1d5 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 17 Sep 2020 17:20:50 -0500 -Subject: [PATCH 11/11] Test: scheduler: update tests for digest fix - -This has been bugging me for years! - -There are still a few regression tests where fencing devices are sanitized -(i.e. have a parameter value of "****") and yet are still restarting. These -are all correct, because either some value not marked as private (e.g. "user") -was sanitized, or they pre-date op-secure-params and op-secure-digest. ---- - cts/scheduler/asymmetrical-order-restart.dot | 5 - - cts/scheduler/asymmetrical-order-restart.exp | 35 -- - cts/scheduler/asymmetrical-order-restart.scores | 2 + - cts/scheduler/asymmetrical-order-restart.summary | 6 +- - cts/scheduler/bug-cl-5247.dot | 34 -- - cts/scheduler/bug-cl-5247.exp | 280 ++---------- - cts/scheduler/bug-cl-5247.scores | 4 + - cts/scheduler/bug-cl-5247.summary | 20 +- - cts/scheduler/nested-remote-recovery.dot | 45 -- - cts/scheduler/nested-remote-recovery.exp | 483 ++++----------------- - cts/scheduler/nested-remote-recovery.scores | 18 + - cts/scheduler/nested-remote-recovery.summary | 54 +-- - cts/scheduler/no-promote-on-unrunnable-guest.dot | 15 - - cts/scheduler/no-promote-on-unrunnable-guest.exp | 273 ++++-------- - .../no-promote-on-unrunnable-guest.scores | 6 + - .../no-promote-on-unrunnable-guest.summary | 18 +- - cts/scheduler/notifs-for-unrunnable.dot | 15 - - cts/scheduler/notifs-for-unrunnable.exp | 195 ++------- - cts/scheduler/notifs-for-unrunnable.scores | 6 + - cts/scheduler/notifs-for-unrunnable.summary | 18 +- - cts/scheduler/on-fail-ignore.dot | 10 - - cts/scheduler/on-fail-ignore.exp | 73 +--- - cts/scheduler/on-fail-ignore.scores | 4 + - cts/scheduler/on-fail-ignore.summary | 12 +- - cts/scheduler/remote-recover-all.dot | 14 - - cts/scheduler/remote-recover-all.exp | 234 ++++------ - cts/scheduler/remote-recover-all.scores | 4 + - cts/scheduler/remote-recover-all.summary | 12 +- - cts/scheduler/remote-recover-connection.dot | 10 - - cts/scheduler/remote-recover-connection.exp | 198 +++------ - cts/scheduler/remote-recover-connection.scores | 4 + - cts/scheduler/remote-recover-connection.summary | 12 +- - cts/scheduler/remote-recover-no-resources.dot | 12 - - cts/scheduler/remote-recover-no-resources.exp | 198 +++------ - cts/scheduler/remote-recover-no-resources.scores | 4 + - cts/scheduler/remote-recover-no-resources.summary | 12 +- - cts/scheduler/remote-recover-unknown.dot | 14 - - cts/scheduler/remote-recover-unknown.exp | 206 +++------ - cts/scheduler/remote-recover-unknown.scores | 4 + - cts/scheduler/remote-recover-unknown.summary | 12 +- - cts/scheduler/remote-recovery.dot | 10 - - cts/scheduler/remote-recovery.exp | 198 +++------ - cts/scheduler/remote-recovery.scores | 4 + - cts/scheduler/remote-recovery.summary | 12 +- - 44 files changed, 706 insertions(+), 2099 deletions(-) - -diff --git a/cts/scheduler/asymmetrical-order-restart.dot b/cts/scheduler/asymmetrical-order-restart.dot -index d12a4a5..8bb3a93 100644 ---- a/cts/scheduler/asymmetrical-order-restart.dot -+++ b/cts/scheduler/asymmetrical-order-restart.dot -@@ -1,9 +1,4 @@ - digraph "g" { --"cesr104ipmi_monitor_60000 cesr105-p16" [ style=bold color="green" fontcolor="black"] --"cesr104ipmi_start_0 cesr105-p16" -> "cesr104ipmi_monitor_60000 cesr105-p16" [ style = bold] --"cesr104ipmi_start_0 cesr105-p16" [ style=bold color="green" fontcolor="black"] --"cesr104ipmi_stop_0 cesr105-p16" -> "cesr104ipmi_start_0 cesr105-p16" [ style = bold] --"cesr104ipmi_stop_0 cesr105-p16" [ style=bold color="green" fontcolor="black"] - "sleep_b_monitor_10000 cesr109-p16" [ style=dashed color="red" fontcolor="black"] - "sleep_b_start_0 cesr109-p16" -> "sleep_b_monitor_10000 cesr109-p16" [ style = dashed] - "sleep_b_start_0 cesr109-p16" [ style=dashed color="red" fontcolor="black"] -diff --git a/cts/scheduler/asymmetrical-order-restart.exp b/cts/scheduler/asymmetrical-order-restart.exp -index bb4a2c5..c0b627a 100644 ---- a/cts/scheduler/asymmetrical-order-restart.exp -+++ b/cts/scheduler/asymmetrical-order-restart.exp -@@ -1,41 +1,6 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - -diff --git a/cts/scheduler/asymmetrical-order-restart.scores b/cts/scheduler/asymmetrical-order-restart.scores -index 35fd8fe..49bb33d 100644 ---- a/cts/scheduler/asymmetrical-order-restart.scores -+++ b/cts/scheduler/asymmetrical-order-restart.scores -@@ -1,4 +1,6 @@ - Allocation scores: -+Only 'private' parameters to cesr104ipmi_monitor_60000 on cesr105-p16 changed: 0:0;1167:0:0:540ff5bf-81ee-4648-97cb-e922b82b370c -+Only 'private' parameters to cesr104ipmi_start_0 on cesr105-p16 changed: 0:0;1166:0:0:540ff5bf-81ee-4648-97cb-e922b82b370c - Using the original execution date of: 2018-08-09 18:55:41Z - pcmk__native_allocate: cesr104ipmi allocation score on cesr105-p16: 0 - pcmk__native_allocate: cesr104ipmi allocation score on cesr109-p16: 0 -diff --git a/cts/scheduler/asymmetrical-order-restart.summary b/cts/scheduler/asymmetrical-order-restart.summary -index 9e2e9fd..7811801 100644 ---- a/cts/scheduler/asymmetrical-order-restart.summary -+++ b/cts/scheduler/asymmetrical-order-restart.summary -@@ -8,14 +8,12 @@ Online: [ cesr105-p16 cesr109-p16 ] - sleep_a (ocf::classe:anything): Stopped (disabled) - sleep_b (ocf::classe:anything): FAILED cesr109-p16 - -+Only 'private' parameters to cesr104ipmi_start_0 on cesr105-p16 changed: 0:0;1166:0:0:540ff5bf-81ee-4648-97cb-e922b82b370c -+Only 'private' parameters to cesr104ipmi_monitor_60000 on cesr105-p16 changed: 0:0;1167:0:0:540ff5bf-81ee-4648-97cb-e922b82b370c - Transition Summary: -- * Restart cesr104ipmi ( cesr105-p16 ) due to resource definition change - * Stop sleep_b ( cesr109-p16 ) due to unrunnable sleep_a start - - Executing cluster transition: -- * Resource action: cesr104ipmi stop on cesr105-p16 -- * Resource action: cesr104ipmi start on cesr105-p16 -- * Resource action: cesr104ipmi monitor=60000 on cesr105-p16 - * Resource action: sleep_b stop on cesr109-p16 - Using the original execution date of: 2018-08-09 18:55:41Z - -diff --git a/cts/scheduler/bug-cl-5247.dot b/cts/scheduler/bug-cl-5247.dot -index 71e816f..f5d6fa3 100644 ---- a/cts/scheduler/bug-cl-5247.dot -+++ b/cts/scheduler/bug-cl-5247.dot -@@ -1,22 +1,4 @@ - digraph "g" { --"grpStonith1_running_0" [ style=bold color="green" fontcolor="orange"] --"grpStonith1_start_0" -> "grpStonith1_running_0" [ style = bold] --"grpStonith1_start_0" -> "prmStonith1-2_start_0 bl460g8n4" [ style = bold] --"grpStonith1_start_0" [ style=bold color="green" fontcolor="orange"] --"grpStonith1_stop_0" -> "grpStonith1_stopped_0" [ style = bold] --"grpStonith1_stop_0" -> "prmStonith1-2_stop_0 bl460g8n4" [ style = bold] --"grpStonith1_stop_0" [ style=bold color="green" fontcolor="orange"] --"grpStonith1_stopped_0" -> "grpStonith1_start_0" [ style = bold] --"grpStonith1_stopped_0" [ style=bold color="green" fontcolor="orange"] --"grpStonith2_running_0" [ style=bold color="green" fontcolor="orange"] --"grpStonith2_start_0" -> "grpStonith2_running_0" [ style = bold] --"grpStonith2_start_0" -> "prmStonith2-2_start_0 bl460g8n3" [ style = bold] --"grpStonith2_start_0" [ style=bold color="green" fontcolor="orange"] --"grpStonith2_stop_0" -> "grpStonith2_stopped_0" [ style = bold] --"grpStonith2_stop_0" -> "prmStonith2-2_stop_0 bl460g8n3" [ style = bold] --"grpStonith2_stop_0" [ style=bold color="green" fontcolor="orange"] --"grpStonith2_stopped_0" -> "grpStonith2_start_0" [ style = bold] --"grpStonith2_stopped_0" [ style=bold color="green" fontcolor="orange"] - "master-group_running_0" [ style=bold color="green" fontcolor="orange"] - "master-group_start_0" -> "master-group_running_0" [ style = bold] - "master-group_start_0" -> "vip-master_start_0 pgsr01" [ style = bold] -@@ -89,27 +71,11 @@ - "pgsr02_stop_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] - "prmDB2_stop_0 bl460g8n4" -> "stonith 'off' pgsr02" [ style = bold] - "prmDB2_stop_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] --"prmStonith1-2_monitor_3600000 bl460g8n4" [ style=bold color="green" fontcolor="black"] --"prmStonith1-2_start_0 bl460g8n4" -> "grpStonith1_running_0" [ style = bold] --"prmStonith1-2_start_0 bl460g8n4" -> "prmStonith1-2_monitor_3600000 bl460g8n4" [ style = bold] --"prmStonith1-2_start_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] --"prmStonith1-2_stop_0 bl460g8n4" -> "grpStonith1_stopped_0" [ style = bold] --"prmStonith1-2_stop_0 bl460g8n4" -> "prmStonith1-2_start_0 bl460g8n4" [ style = bold] --"prmStonith1-2_stop_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] --"prmStonith2-2_monitor_3600000 bl460g8n3" [ style=bold color="green" fontcolor="black"] --"prmStonith2-2_start_0 bl460g8n3" -> "grpStonith2_running_0" [ style = bold] --"prmStonith2-2_start_0 bl460g8n3" -> "prmStonith2-2_monitor_3600000 bl460g8n3" [ style = bold] --"prmStonith2-2_start_0 bl460g8n3" [ style=bold color="green" fontcolor="black"] --"prmStonith2-2_stop_0 bl460g8n3" -> "grpStonith2_stopped_0" [ style = bold] --"prmStonith2-2_stop_0 bl460g8n3" -> "prmStonith2-2_start_0 bl460g8n3" [ style = bold] --"prmStonith2-2_stop_0 bl460g8n3" [ style=bold color="green" fontcolor="black"] - "stonith 'off' pgsr02" -> "master-group_stop_0" [ style = bold] - "stonith 'off' pgsr02" -> "msPostgresql_stop_0" [ style = bold] - "stonith 'off' pgsr02" -> "pgsql_demote_0 pgsr02" [ style = bold] - "stonith 'off' pgsr02" -> "pgsql_post_notify_stonith_0" [ style = bold] - "stonith 'off' pgsr02" -> "pgsql_stop_0 pgsr02" [ style = bold] --"stonith 'off' pgsr02" -> "prmStonith1-2_start_0 bl460g8n4" [ style = bold] --"stonith 'off' pgsr02" -> "prmStonith2-2_start_0 bl460g8n3" [ style = bold] - "stonith 'off' pgsr02" -> "vip-master_start_0 pgsr01" [ style = bold] - "stonith 'off' pgsr02" -> "vip-master_stop_0 pgsr02" [ style = bold] - "stonith 'off' pgsr02" -> "vip-rep_start_0 pgsr01" [ style = bold] -diff --git a/cts/scheduler/bug-cl-5247.exp b/cts/scheduler/bug-cl-5247.exp -index 446df3b..252e65a 100644 ---- a/cts/scheduler/bug-cl-5247.exp -+++ b/cts/scheduler/bug-cl-5247.exp -@@ -14,206 +14,16 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - - - - -- -+ - - -- -+ - - - -@@ -226,7 +36,7 @@ - - - -- -+ - - - -@@ -241,7 +51,7 @@ - - - -- -+ - - - -@@ -259,7 +69,7 @@ - - - -- -+ - - - -@@ -271,7 +81,7 @@ - - - -- -+ - - - -@@ -284,7 +94,7 @@ - - - -- -+ - - - -@@ -293,7 +103,7 @@ - - - -- -+ - - - -@@ -306,7 +116,7 @@ - - - -- -+ - - - -@@ -314,7 +124,7 @@ - - - -- -+ - - - -@@ -327,16 +137,16 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -@@ -349,7 +159,7 @@ - - - -- -+ - - - -@@ -358,7 +168,7 @@ - - - -- -+ - - - -@@ -374,7 +184,7 @@ - - - -- -+ - - - -@@ -389,16 +199,16 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -@@ -413,7 +223,7 @@ - - - -- -+ - - - -@@ -425,7 +235,7 @@ - - - -- -+ - - - -@@ -443,7 +253,7 @@ - - - -- -+ - - - -@@ -458,7 +268,7 @@ - - - -- -+ - - - -@@ -471,7 +281,7 @@ - - - -- -+ - - - -@@ -484,7 +294,7 @@ - - - -- -+ - - - -@@ -497,7 +307,7 @@ - - - -- -+ - - - -@@ -513,7 +323,7 @@ - - - -- -+ - - - -@@ -532,7 +342,7 @@ - - - -- -+ - - - -@@ -547,7 +357,7 @@ - - - -- -+ - - - -@@ -562,7 +372,7 @@ - - - -- -+ - - - -@@ -577,7 +387,7 @@ - - - -- -+ - - - -@@ -585,7 +395,7 @@ - - - -- -+ - - - -@@ -600,7 +410,7 @@ - - - -- -+ - - - -@@ -612,7 +422,7 @@ - - - -- -+ - - - -@@ -627,7 +437,7 @@ - - - -- -+ - - - -@@ -642,7 +452,7 @@ - - - -- -+ - - - -@@ -657,7 +467,7 @@ - - - -- -+ - - - -@@ -669,7 +479,7 @@ - - - -- -+ - - - -@@ -684,7 +494,7 @@ - - - -- -+ - - - -@@ -702,16 +512,16 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -@@ -723,16 +533,16 @@ - - - -- -+ - -- -+ - - - - - - -- -+ - - - -diff --git a/cts/scheduler/bug-cl-5247.scores b/cts/scheduler/bug-cl-5247.scores -index 11a0152..90c7ca6 100644 ---- a/cts/scheduler/bug-cl-5247.scores -+++ b/cts/scheduler/bug-cl-5247.scores -@@ -1,4 +1,8 @@ - Allocation scores: -+Only 'private' parameters to prmStonith1-2_monitor_3600000 on bl460g8n4 changed: 0:0;12:4:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 -+Only 'private' parameters to prmStonith1-2_start_0 on bl460g8n4 changed: 0:0;24:3:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 -+Only 'private' parameters to prmStonith2-2_monitor_3600000 on bl460g8n3 changed: 0:0;19:4:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 -+Only 'private' parameters to prmStonith2-2_start_0 on bl460g8n3 changed: 0:0;30:3:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 - Using the original execution date of: 2015-08-12 02:53:40Z - pcmk__clone_allocate: msPostgresql allocation score on bl460g8n3: -INFINITY - pcmk__clone_allocate: msPostgresql allocation score on bl460g8n4: -INFINITY -diff --git a/cts/scheduler/bug-cl-5247.summary b/cts/scheduler/bug-cl-5247.summary -index 52664e6..9e8959f 100644 ---- a/cts/scheduler/bug-cl-5247.summary -+++ b/cts/scheduler/bug-cl-5247.summary -@@ -17,21 +17,19 @@ GuestOnline: [ pgsr01:prmDB1 ] - Masters: [ pgsr01 ] - Stopped: [ bl460g8n3 bl460g8n4 ] - -+Only 'private' parameters to prmStonith1-2_start_0 on bl460g8n4 changed: 0:0;24:3:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 -+Only 'private' parameters to prmStonith1-2_monitor_3600000 on bl460g8n4 changed: 0:0;12:4:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 -+Only 'private' parameters to prmStonith2-2_start_0 on bl460g8n3 changed: 0:0;30:3:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 -+Only 'private' parameters to prmStonith2-2_monitor_3600000 on bl460g8n3 changed: 0:0;19:4:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 - Transition Summary: - * Fence (off) pgsr02 (resource: prmDB2) 'guest is unclean' - * Stop prmDB2 ( bl460g8n4 ) due to node availability -- * Restart prmStonith1-2 ( bl460g8n4 ) due to resource definition change -- * Restart prmStonith2-2 ( bl460g8n3 ) due to resource definition change - * Recover vip-master ( pgsr02 -> pgsr01 ) - * Recover vip-rep ( pgsr02 -> pgsr01 ) - * Stop pgsql:0 ( Master pgsr02 ) due to node availability - * Stop pgsr02 ( bl460g8n4 ) due to node availability - - Executing cluster transition: -- * Pseudo action: grpStonith1_stop_0 -- * Resource action: prmStonith1-2 stop on bl460g8n4 -- * Pseudo action: grpStonith2_stop_0 -- * Resource action: prmStonith2-2 stop on bl460g8n3 - * Resource action: vip-master monitor on pgsr01 - * Resource action: vip-rep monitor on pgsr01 - * Pseudo action: msPostgresql_pre_notify_demote_0 -@@ -39,23 +37,13 @@ Executing cluster transition: - * Resource action: pgsr02 stop on bl460g8n4 - * Resource action: pgsr02 monitor on bl460g8n3 - * Resource action: prmDB2 stop on bl460g8n4 -- * Pseudo action: grpStonith1_stopped_0 -- * Pseudo action: grpStonith1_start_0 -- * Pseudo action: grpStonith2_stopped_0 -- * Pseudo action: grpStonith2_start_0 - * Resource action: pgsql notify on pgsr01 - * Pseudo action: msPostgresql_confirmed-pre_notify_demote_0 - * Pseudo action: msPostgresql_demote_0 - * Pseudo action: stonith-pgsr02-off on pgsr02 -- * Resource action: prmStonith1-2 start on bl460g8n4 -- * Resource action: prmStonith1-2 monitor=3600000 on bl460g8n4 -- * Resource action: prmStonith2-2 start on bl460g8n3 -- * Resource action: prmStonith2-2 monitor=3600000 on bl460g8n3 - * Pseudo action: pgsql_post_notify_stop_0 - * Pseudo action: pgsql_demote_0 - * Pseudo action: msPostgresql_demoted_0 -- * Pseudo action: grpStonith1_running_0 -- * Pseudo action: grpStonith2_running_0 - * Pseudo action: msPostgresql_post_notify_demoted_0 - * Resource action: pgsql notify on pgsr01 - * Pseudo action: msPostgresql_confirmed-post_notify_demoted_0 -diff --git a/cts/scheduler/nested-remote-recovery.dot b/cts/scheduler/nested-remote-recovery.dot -index 02a267f..2afc6e9 100644 ---- a/cts/scheduler/nested-remote-recovery.dot -+++ b/cts/scheduler/nested-remote-recovery.dot -@@ -86,49 +86,4 @@ - "stonith 'reboot' galera-bundle-0" -> "galera_promote_0 galera-bundle-0" [ style = bold] - "stonith 'reboot' galera-bundle-0" -> "galera_start_0 galera-bundle-0" [ style = bold] - "stonith 'reboot' galera-bundle-0" [ style=bold color="green" fontcolor="orange"] --"stonith-fence_ipmilan-5254000203a2_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254000203a2_start_0 controller-2" -> "stonith-fence_ipmilan-5254000203a2_monitor_60000 controller-2" [ style = bold] --"stonith-fence_ipmilan-5254000203a2_start_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254000203a2_stop_0 controller-2" -> "stonith-fence_ipmilan-5254000203a2_start_0 controller-2" [ style = bold] --"stonith-fence_ipmilan-5254000203a2_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254002f6d57_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254002f6d57_start_0 controller-1" -> "stonith-fence_ipmilan-5254002f6d57_monitor_60000 controller-1" [ style = bold] --"stonith-fence_ipmilan-5254002f6d57_start_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254002f6d57_stop_0 controller-1" -> "stonith-fence_ipmilan-5254002f6d57_start_0 controller-1" [ style = bold] --"stonith-fence_ipmilan-5254002f6d57_stop_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254003296a5_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254003296a5_start_0 controller-1" -> "stonith-fence_ipmilan-5254003296a5_monitor_60000 controller-1" [ style = bold] --"stonith-fence_ipmilan-5254003296a5_start_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254003296a5_stop_0 controller-1" -> "stonith-fence_ipmilan-5254003296a5_start_0 controller-1" [ style = bold] --"stonith-fence_ipmilan-5254003296a5_stop_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254005f9a33_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254005f9a33_start_0 controller-2" -> "stonith-fence_ipmilan-5254005f9a33_monitor_60000 controller-2" [ style = bold] --"stonith-fence_ipmilan-5254005f9a33_start_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254005f9a33_stop_0 controller-2" -> "stonith-fence_ipmilan-5254005f9a33_start_0 controller-2" [ style = bold] --"stonith-fence_ipmilan-5254005f9a33_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540065418e_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540065418e_start_0 controller-2" -> "stonith-fence_ipmilan-52540065418e_monitor_60000 controller-2" [ style = bold] --"stonith-fence_ipmilan-52540065418e_start_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540065418e_stop_0 controller-2" -> "stonith-fence_ipmilan-52540065418e_start_0 controller-2" [ style = bold] --"stonith-fence_ipmilan-52540065418e_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540066e27e_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540066e27e_start_0 controller-1" -> "stonith-fence_ipmilan-52540066e27e_monitor_60000 controller-1" [ style = bold] --"stonith-fence_ipmilan-52540066e27e_start_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540066e27e_stop_0 controller-1" -> "stonith-fence_ipmilan-52540066e27e_start_0 controller-1" [ style = bold] --"stonith-fence_ipmilan-52540066e27e_stop_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540098c9ff_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540098c9ff_start_0 controller-1" -> "stonith-fence_ipmilan-52540098c9ff_monitor_60000 controller-1" [ style = bold] --"stonith-fence_ipmilan-52540098c9ff_start_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-52540098c9ff_stop_0 controller-1" -> "stonith-fence_ipmilan-52540098c9ff_start_0 controller-1" [ style = bold] --"stonith-fence_ipmilan-52540098c9ff_stop_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400a16c0d_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400a16c0d_start_0 controller-1" -> "stonith-fence_ipmilan-525400a16c0d_monitor_60000 controller-1" [ style = bold] --"stonith-fence_ipmilan-525400a16c0d_start_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400a16c0d_stop_0 controller-1" -> "stonith-fence_ipmilan-525400a16c0d_start_0 controller-1" [ style = bold] --"stonith-fence_ipmilan-525400a16c0d_stop_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400aab9d9_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400aab9d9_start_0 controller-2" -> "stonith-fence_ipmilan-525400aab9d9_monitor_60000 controller-2" [ style = bold] --"stonith-fence_ipmilan-525400aab9d9_start_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400aab9d9_stop_0 controller-2" -> "stonith-fence_ipmilan-525400aab9d9_start_0 controller-2" [ style = bold] --"stonith-fence_ipmilan-525400aab9d9_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/nested-remote-recovery.exp b/cts/scheduler/nested-remote-recovery.exp -index 37c51c1..795a6ea 100644 ---- a/cts/scheduler/nested-remote-recovery.exp -+++ b/cts/scheduler/nested-remote-recovery.exp -@@ -1,45 +1,45 @@ - - - -- -+ - - - - - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - -@@ -48,23 +48,23 @@ - - - -- -+ - - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - -@@ -73,121 +73,121 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - -@@ -196,40 +196,40 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -239,7 +239,7 @@ - - - -- -+ - - - -@@ -255,10 +255,10 @@ - - - -- -+ - - -- -+ - - - -@@ -271,29 +271,29 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -312,327 +312,12 @@ - - - -- -+ - - - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - -@@ -646,62 +331,62 @@ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - -@@ -710,49 +395,49 @@ - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -diff --git a/cts/scheduler/nested-remote-recovery.scores b/cts/scheduler/nested-remote-recovery.scores -index e3b75dd..83ae5aa 100644 ---- a/cts/scheduler/nested-remote-recovery.scores -+++ b/cts/scheduler/nested-remote-recovery.scores -@@ -1,4 +1,22 @@ - Allocation scores: -+Only 'private' parameters to stonith-fence_ipmilan-5254000203a2_monitor_60000 on controller-2 changed: 0:0;216:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254000203a2_start_0 on controller-2 changed: 0:0;222:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254002f6d57_monitor_60000 on controller-1 changed: 0:0;229:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-5254002f6d57_start_0 on controller-1 changed: 0:0;237:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-5254003296a5_monitor_60000 on controller-1 changed: 0:0;216:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-5254003296a5_start_0 on controller-1 changed: 0:0;224:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-5254005f9a33_monitor_60000 on controller-2 changed: 0:0;211:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254005f9a33_start_0 on controller-2 changed: 0:0;217:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-52540065418e_monitor_60000 on controller-2 changed: 0:0;223:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-52540065418e_start_0 on controller-2 changed: 0:0;229:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-52540066e27e_monitor_60000 on controller-1 changed: 0:0;219:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-52540066e27e_start_0 on controller-1 changed: 0:0;227:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-52540098c9ff_monitor_60000 on controller-1 changed: 0:0;211:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-52540098c9ff_start_0 on controller-1 changed: 0:0;219:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-525400a16c0d_monitor_60000 on controller-1 changed: 0:0;229:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-525400a16c0d_start_0 on controller-1 changed: 0:0;235:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-525400aab9d9_monitor_60000 on controller-2 changed: 0:0;226:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-525400aab9d9_start_0 on controller-2 changed: 0:0;232:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 - Using the original execution date of: 2018-09-11 21:23:25Z - galera:0 promotion score on galera-bundle-0: 100 - galera:1 promotion score on galera-bundle-1: 100 -diff --git a/cts/scheduler/nested-remote-recovery.summary b/cts/scheduler/nested-remote-recovery.summary -index a8552b7..fef6ba3 100644 ---- a/cts/scheduler/nested-remote-recovery.summary -+++ b/cts/scheduler/nested-remote-recovery.summary -@@ -45,50 +45,32 @@ GuestOnline: [ galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bun - stonith-fence_ipmilan-525400a16c0d (stonith:fence_ipmilan): Started controller-1 - stonith-fence_ipmilan-5254002f6d57 (stonith:fence_ipmilan): Started controller-1 - -+Only 'private' parameters to stonith-fence_ipmilan-525400aab9d9_start_0 on controller-2 changed: 0:0;232:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-525400aab9d9_monitor_60000 on controller-2 changed: 0:0;226:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254000203a2_start_0 on controller-2 changed: 0:0;222:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254000203a2_monitor_60000 on controller-2 changed: 0:0;216:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-52540065418e_start_0 on controller-2 changed: 0:0;229:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-52540065418e_monitor_60000 on controller-2 changed: 0:0;223:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254005f9a33_start_0 on controller-2 changed: 0:0;217:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254005f9a33_monitor_60000 on controller-2 changed: 0:0;211:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254002f6d57_start_0 on controller-1 changed: 0:0;237:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-5254002f6d57_monitor_60000 on controller-1 changed: 0:0;229:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-52540066e27e_start_0 on controller-1 changed: 0:0;227:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-52540066e27e_monitor_60000 on controller-1 changed: 0:0;219:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-525400a16c0d_start_0 on controller-1 changed: 0:0;235:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-525400a16c0d_monitor_60000 on controller-1 changed: 0:0;229:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 -+Only 'private' parameters to stonith-fence_ipmilan-5254003296a5_start_0 on controller-1 changed: 0:0;224:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-5254003296a5_monitor_60000 on controller-1 changed: 0:0;216:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-52540098c9ff_start_0 on controller-1 changed: 0:0;219:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 -+Only 'private' parameters to stonith-fence_ipmilan-52540098c9ff_monitor_60000 on controller-1 changed: 0:0;211:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 - Transition Summary: - * Fence (reboot) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean' - * Recover galera-bundle-docker-0 ( database-0 ) - * Recover galera-bundle-0 ( controller-0 ) - * Recover galera:0 ( Master galera-bundle-0 ) -- * Restart stonith-fence_ipmilan-5254005f9a33 ( controller-2 ) due to resource definition change -- * Restart stonith-fence_ipmilan-52540098c9ff ( controller-1 ) due to resource definition change -- * Restart stonith-fence_ipmilan-5254000203a2 ( controller-2 ) due to resource definition change -- * Restart stonith-fence_ipmilan-5254003296a5 ( controller-1 ) due to resource definition change -- * Restart stonith-fence_ipmilan-52540066e27e ( controller-1 ) due to resource definition change -- * Restart stonith-fence_ipmilan-52540065418e ( controller-2 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400aab9d9 ( controller-2 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400a16c0d ( controller-1 ) due to resource definition change -- * Restart stonith-fence_ipmilan-5254002f6d57 ( controller-1 ) due to resource definition change - - Executing cluster transition: - * Resource action: galera-bundle-0 stop on controller-0 -- * Resource action: stonith-fence_ipmilan-5254005f9a33 stop on controller-2 -- * Resource action: stonith-fence_ipmilan-5254005f9a33 start on controller-2 -- * Resource action: stonith-fence_ipmilan-5254005f9a33 monitor=60000 on controller-2 -- * Resource action: stonith-fence_ipmilan-52540098c9ff stop on controller-1 -- * Resource action: stonith-fence_ipmilan-52540098c9ff start on controller-1 -- * Resource action: stonith-fence_ipmilan-52540098c9ff monitor=60000 on controller-1 -- * Resource action: stonith-fence_ipmilan-5254000203a2 stop on controller-2 -- * Resource action: stonith-fence_ipmilan-5254000203a2 start on controller-2 -- * Resource action: stonith-fence_ipmilan-5254000203a2 monitor=60000 on controller-2 -- * Resource action: stonith-fence_ipmilan-5254003296a5 stop on controller-1 -- * Resource action: stonith-fence_ipmilan-5254003296a5 start on controller-1 -- * Resource action: stonith-fence_ipmilan-5254003296a5 monitor=60000 on controller-1 -- * Resource action: stonith-fence_ipmilan-52540066e27e stop on controller-1 -- * Resource action: stonith-fence_ipmilan-52540066e27e start on controller-1 -- * Resource action: stonith-fence_ipmilan-52540066e27e monitor=60000 on controller-1 -- * Resource action: stonith-fence_ipmilan-52540065418e stop on controller-2 -- * Resource action: stonith-fence_ipmilan-52540065418e start on controller-2 -- * Resource action: stonith-fence_ipmilan-52540065418e monitor=60000 on controller-2 -- * Resource action: stonith-fence_ipmilan-525400aab9d9 stop on controller-2 -- * Resource action: stonith-fence_ipmilan-525400aab9d9 start on controller-2 -- * Resource action: stonith-fence_ipmilan-525400aab9d9 monitor=60000 on controller-2 -- * Resource action: stonith-fence_ipmilan-525400a16c0d stop on controller-1 -- * Resource action: stonith-fence_ipmilan-525400a16c0d start on controller-1 -- * Resource action: stonith-fence_ipmilan-525400a16c0d monitor=60000 on controller-1 -- * Resource action: stonith-fence_ipmilan-5254002f6d57 stop on controller-1 -- * Resource action: stonith-fence_ipmilan-5254002f6d57 start on controller-1 -- * Resource action: stonith-fence_ipmilan-5254002f6d57 monitor=60000 on controller-1 - * Pseudo action: galera-bundle_demote_0 - * Pseudo action: galera-bundle-master_demote_0 - * Pseudo action: galera_demote_0 -diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.dot b/cts/scheduler/no-promote-on-unrunnable-guest.dot -index 6063640..b9696fc 100644 ---- a/cts/scheduler/no-promote-on-unrunnable-guest.dot -+++ b/cts/scheduler/no-promote-on-unrunnable-guest.dot -@@ -110,19 +110,4 @@ - "ovndb_servers_stop_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] - "ovndb_servers_stop_0 ovn-dbs-bundle-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-0" [ style = dashed] - "ovndb_servers_stop_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254005e097a_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254005e097a_start_0 controller-0" -> "stonith-fence_ipmilan-5254005e097a_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-5254005e097a_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254005e097a_stop_0 controller-0" -> "stonith-fence_ipmilan-5254005e097a_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-5254005e097a_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400985679_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400985679_start_0 controller-1" -> "stonith-fence_ipmilan-525400985679_monitor_60000 controller-1" [ style = bold] --"stonith-fence_ipmilan-525400985679_start_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400985679_stop_0 controller-1" -> "stonith-fence_ipmilan-525400985679_start_0 controller-1" [ style = bold] --"stonith-fence_ipmilan-525400985679_stop_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400afe30e_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400afe30e_start_0 controller-2" -> "stonith-fence_ipmilan-525400afe30e_monitor_60000 controller-2" [ style = bold] --"stonith-fence_ipmilan-525400afe30e_start_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400afe30e_stop_0 controller-2" -> "stonith-fence_ipmilan-525400afe30e_start_0 controller-2" [ style = bold] --"stonith-fence_ipmilan-525400afe30e_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.exp b/cts/scheduler/no-promote-on-unrunnable-guest.exp -index 4417f6e..b675aed 100644 ---- a/cts/scheduler/no-promote-on-unrunnable-guest.exp -+++ b/cts/scheduler/no-promote-on-unrunnable-guest.exp -@@ -8,23 +8,23 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - -- -+ - - - -@@ -37,7 +37,7 @@ - - - -- -+ - - - -@@ -50,7 +50,7 @@ - - - -- -+ - - - -@@ -63,7 +63,7 @@ - - - -- -+ - - - -@@ -76,32 +76,32 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -111,7 +111,7 @@ - - - -- -+ - - - -@@ -133,7 +133,7 @@ - - - -- -+ - - - -@@ -146,7 +146,7 @@ - - - -- -+ - - - -@@ -159,7 +159,7 @@ - - - -- -+ - - - -@@ -172,19 +172,19 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -196,28 +196,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -229,61 +229,61 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -295,28 +295,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -331,7 +331,7 @@ - - - -- -+ - - - -@@ -339,131 +339,131 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -473,174 +473,69 @@ - - - -- -+ - - - -- -+ - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - -- -+ - - - - - -- -+ - - - -diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.scores b/cts/scheduler/no-promote-on-unrunnable-guest.scores -index f368ad4..3e6776f 100644 ---- a/cts/scheduler/no-promote-on-unrunnable-guest.scores -+++ b/cts/scheduler/no-promote-on-unrunnable-guest.scores -@@ -1,4 +1,10 @@ - Allocation scores: -+Only 'private' parameters to stonith-fence_ipmilan-5254005e097a_monitor_60000 on controller-0 changed: 0:0;218:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-5254005e097a_start_0 on controller-0 changed: 0:0;217:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-525400985679_monitor_60000 on controller-1 changed: 0:0;224:64:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-525400985679_start_0 on controller-1 changed: 0:0;223:64:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-525400afe30e_monitor_60000 on controller-2 changed: 0:0;220:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-525400afe30e_start_0 on controller-2 changed: 0:0;219:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 - Using the original execution date of: 2020-05-14 10:49:31Z - galera:0 promotion score on galera-bundle-0: 100 - galera:1 promotion score on galera-bundle-1: 100 -diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.summary b/cts/scheduler/no-promote-on-unrunnable-guest.summary -index fd6b926..fdc2668 100644 ---- a/cts/scheduler/no-promote-on-unrunnable-guest.summary -+++ b/cts/scheduler/no-promote-on-unrunnable-guest.summary -@@ -26,27 +26,21 @@ GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bun - Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest] - openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-0 - -+Only 'private' parameters to stonith-fence_ipmilan-5254005e097a_start_0 on controller-0 changed: 0:0;217:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-5254005e097a_monitor_60000 on controller-0 changed: 0:0;218:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-525400985679_start_0 on controller-1 changed: 0:0;223:64:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-525400985679_monitor_60000 on controller-1 changed: 0:0;224:64:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-525400afe30e_start_0 on controller-2 changed: 0:0;219:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 -+Only 'private' parameters to stonith-fence_ipmilan-525400afe30e_monitor_60000 on controller-2 changed: 0:0;220:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 - Transition Summary: - * Stop ovn-dbs-bundle-podman-0 ( controller-0 ) due to node availability - * Stop ovn-dbs-bundle-0 ( controller-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start - * Stop ovndb_servers:0 ( Slave ovn-dbs-bundle-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start - * Promote ovndb_servers:1 ( Slave -> Master ovn-dbs-bundle-1 ) -- * Restart stonith-fence_ipmilan-5254005e097a ( controller-0 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400afe30e ( controller-2 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400985679 ( controller-1 ) due to resource definition change - - Executing cluster transition: - * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 -- * Resource action: stonith-fence_ipmilan-5254005e097a stop on controller-0 -- * Resource action: stonith-fence_ipmilan-5254005e097a start on controller-0 -- * Resource action: stonith-fence_ipmilan-5254005e097a monitor=60000 on controller-0 -- * Resource action: stonith-fence_ipmilan-525400afe30e stop on controller-2 -- * Resource action: stonith-fence_ipmilan-525400afe30e start on controller-2 -- * Resource action: stonith-fence_ipmilan-525400afe30e monitor=60000 on controller-2 -- * Resource action: stonith-fence_ipmilan-525400985679 stop on controller-1 -- * Resource action: stonith-fence_ipmilan-525400985679 start on controller-1 -- * Resource action: stonith-fence_ipmilan-525400985679 monitor=60000 on controller-1 - * Pseudo action: ovn-dbs-bundle_stop_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 -diff --git a/cts/scheduler/notifs-for-unrunnable.dot b/cts/scheduler/notifs-for-unrunnable.dot -index f17e9b4..aa4039e 100644 ---- a/cts/scheduler/notifs-for-unrunnable.dot -+++ b/cts/scheduler/notifs-for-unrunnable.dot -@@ -74,19 +74,4 @@ - "redis:0_start_0 redis-bundle-0" -> "redis:0_monitor_45000 redis-bundle-0" [ style = dashed] - "redis:0_start_0 redis-bundle-0" -> "redis:0_monitor_60000 redis-bundle-0" [ style = dashed] - "redis:0_start_0 redis-bundle-0" [ style=dashed color="red" fontcolor="black"] --"stonith-fence_ipmilan-5254002ff217_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254002ff217_start_0 controller-2" -> "stonith-fence_ipmilan-5254002ff217_monitor_60000 controller-2" [ style = bold] --"stonith-fence_ipmilan-5254002ff217_start_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254002ff217_stop_0 controller-2" -> "stonith-fence_ipmilan-5254002ff217_start_0 controller-2" [ style = bold] --"stonith-fence_ipmilan-5254002ff217_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254008f971a_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254008f971a_start_0 controller-1" -> "stonith-fence_ipmilan-5254008f971a_monitor_60000 controller-1" [ style = bold] --"stonith-fence_ipmilan-5254008f971a_start_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-5254008f971a_stop_0 controller-1" -> "stonith-fence_ipmilan-5254008f971a_start_0 controller-1" [ style = bold] --"stonith-fence_ipmilan-5254008f971a_stop_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400fec0c8_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400fec0c8_start_0 controller-1" -> "stonith-fence_ipmilan-525400fec0c8_monitor_60000 controller-1" [ style = bold] --"stonith-fence_ipmilan-525400fec0c8_start_0 controller-1" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400fec0c8_stop_0 controller-1" -> "stonith-fence_ipmilan-525400fec0c8_start_0 controller-1" [ style = bold] --"stonith-fence_ipmilan-525400fec0c8_stop_0 controller-1" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/notifs-for-unrunnable.exp b/cts/scheduler/notifs-for-unrunnable.exp -index 82067ae..44bb4c3 100644 ---- a/cts/scheduler/notifs-for-unrunnable.exp -+++ b/cts/scheduler/notifs-for-unrunnable.exp -@@ -1,46 +1,46 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -48,97 +48,97 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -146,191 +146,86 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - -- -+ - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - - - -- -+ - -- -+ - - - - - -- -+ - - - -- -+ - -- -+ - - - -diff --git a/cts/scheduler/notifs-for-unrunnable.scores b/cts/scheduler/notifs-for-unrunnable.scores -index cd9df1c..8434da8 100644 ---- a/cts/scheduler/notifs-for-unrunnable.scores -+++ b/cts/scheduler/notifs-for-unrunnable.scores -@@ -1,4 +1,10 @@ - Allocation scores: -+Only 'private' parameters to stonith-fence_ipmilan-5254002ff217_monitor_60000 on controller-2 changed: 0:0;181:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-5254002ff217_start_0 on controller-2 changed: 0:0;180:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-5254008f971a_monitor_60000 on controller-1 changed: 0:0;183:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-5254008f971a_start_0 on controller-1 changed: 0:0;182:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-525400fec0c8_monitor_60000 on controller-1 changed: 0:0;179:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-525400fec0c8_start_0 on controller-1 changed: 0:0;178:2:0:8b283351-71e8-4848-b470-8664f73af1e9 - Using the original execution date of: 2018-02-13 23:40:47Z - galera:0 promotion score on galera-bundle-0: -1 - galera:1 promotion score on galera-bundle-1: 100 -diff --git a/cts/scheduler/notifs-for-unrunnable.summary b/cts/scheduler/notifs-for-unrunnable.summary -index d8d00fd..3f1680c 100644 ---- a/cts/scheduler/notifs-for-unrunnable.summary -+++ b/cts/scheduler/notifs-for-unrunnable.summary -@@ -32,6 +32,12 @@ GuestOnline: [ galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bun - stonith-fence_ipmilan-5254002ff217 (stonith:fence_ipmilan): Started controller-2 - stonith-fence_ipmilan-5254008f971a (stonith:fence_ipmilan): Started controller-1 - -+Only 'private' parameters to stonith-fence_ipmilan-525400fec0c8_start_0 on controller-1 changed: 0:0;178:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-525400fec0c8_monitor_60000 on controller-1 changed: 0:0;179:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-5254008f971a_start_0 on controller-1 changed: 0:0;182:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-5254008f971a_monitor_60000 on controller-1 changed: 0:0;183:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-5254002ff217_start_0 on controller-2 changed: 0:0;180:2:0:8b283351-71e8-4848-b470-8664f73af1e9 -+Only 'private' parameters to stonith-fence_ipmilan-5254002ff217_monitor_60000 on controller-2 changed: 0:0;181:2:0:8b283351-71e8-4848-b470-8664f73af1e9 - Transition Summary: - * Start rabbitmq-bundle-0 ( controller-1 ) due to unrunnable rabbitmq-bundle-docker-0 start (blocked) - * Start rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-docker-0 start (blocked) -@@ -39,22 +45,10 @@ Transition Summary: - * Start galera:0 ( galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start (blocked) - * Start redis-bundle-0 ( controller-1 ) due to unrunnable redis-bundle-docker-0 start (blocked) - * Start redis:0 ( redis-bundle-0 ) due to unrunnable redis-bundle-docker-0 start (blocked) -- * Restart stonith-fence_ipmilan-525400fec0c8 ( controller-1 ) due to resource definition change -- * Restart stonith-fence_ipmilan-5254002ff217 ( controller-2 ) due to resource definition change -- * Restart stonith-fence_ipmilan-5254008f971a ( controller-1 ) due to resource definition change - - Executing cluster transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Pseudo action: redis-bundle-master_pre_notify_start_0 -- * Resource action: stonith-fence_ipmilan-525400fec0c8 stop on controller-1 -- * Resource action: stonith-fence_ipmilan-525400fec0c8 start on controller-1 -- * Resource action: stonith-fence_ipmilan-525400fec0c8 monitor=60000 on controller-1 -- * Resource action: stonith-fence_ipmilan-5254002ff217 stop on controller-2 -- * Resource action: stonith-fence_ipmilan-5254002ff217 start on controller-2 -- * Resource action: stonith-fence_ipmilan-5254002ff217 monitor=60000 on controller-2 -- * Resource action: stonith-fence_ipmilan-5254008f971a stop on controller-1 -- * Resource action: stonith-fence_ipmilan-5254008f971a start on controller-1 -- * Resource action: stonith-fence_ipmilan-5254008f971a monitor=60000 on controller-1 - * Pseudo action: redis-bundle_start_0 - * Pseudo action: galera-bundle_start_0 - * Pseudo action: rabbitmq-bundle_start_0 -diff --git a/cts/scheduler/on-fail-ignore.dot b/cts/scheduler/on-fail-ignore.dot -index 66d22cc..d8f1c9f 100644 ---- a/cts/scheduler/on-fail-ignore.dot -+++ b/cts/scheduler/on-fail-ignore.dot -@@ -1,12 +1,2 @@ - digraph "g" { --"fence_db1_monitor_60000 407892-db2" [ style=bold color="green" fontcolor="black"] --"fence_db1_start_0 407892-db2" -> "fence_db1_monitor_60000 407892-db2" [ style = bold] --"fence_db1_start_0 407892-db2" [ style=bold color="green" fontcolor="black"] --"fence_db1_stop_0 407892-db2" -> "fence_db1_start_0 407892-db2" [ style = bold] --"fence_db1_stop_0 407892-db2" [ style=bold color="green" fontcolor="black"] --"fence_db2_monitor_60000 407888-db1" [ style=bold color="green" fontcolor="black"] --"fence_db2_start_0 407888-db1" -> "fence_db2_monitor_60000 407888-db1" [ style = bold] --"fence_db2_start_0 407888-db1" [ style=bold color="green" fontcolor="black"] --"fence_db2_stop_0 407888-db1" -> "fence_db2_start_0 407888-db1" [ style = bold] --"fence_db2_stop_0 407888-db1" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/on-fail-ignore.exp b/cts/scheduler/on-fail-ignore.exp -index fc3fb5b..56e315f 100644 ---- a/cts/scheduler/on-fail-ignore.exp -+++ b/cts/scheduler/on-fail-ignore.exp -@@ -1,72 +1 @@ -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -+ -diff --git a/cts/scheduler/on-fail-ignore.scores b/cts/scheduler/on-fail-ignore.scores -index 85040ba..64c9896 100644 ---- a/cts/scheduler/on-fail-ignore.scores -+++ b/cts/scheduler/on-fail-ignore.scores -@@ -1,4 +1,8 @@ - Allocation scores: -+Only 'private' parameters to fence_db1_monitor_60000 on 407892-db2 changed: 0:0;5:3:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d -+Only 'private' parameters to fence_db1_start_0 on 407892-db2 changed: 0:0;4:3:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d -+Only 'private' parameters to fence_db2_monitor_60000 on 407888-db1 changed: 0:0;8:4:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d -+Only 'private' parameters to fence_db2_start_0 on 407888-db1 changed: 0:0;7:4:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d - Using the original execution date of: 2017-10-26 14:23:50Z - pcmk__native_allocate: fence_db1 allocation score on 407888-db1: 0 - pcmk__native_allocate: fence_db1 allocation score on 407892-db2: 100 -diff --git a/cts/scheduler/on-fail-ignore.summary b/cts/scheduler/on-fail-ignore.summary -index fb4b0f3..7605f37 100644 ---- a/cts/scheduler/on-fail-ignore.summary -+++ b/cts/scheduler/on-fail-ignore.summary -@@ -7,17 +7,13 @@ Online: [ 407888-db1 407892-db2 ] - fence_db2 (stonith:fence_ipmilan): Started 407888-db1 - nfs_snet_ip (ocf::heartbeat:IPaddr2): Started 407888-db1 (failure ignored) - -+Only 'private' parameters to fence_db2_start_0 on 407888-db1 changed: 0:0;7:4:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d -+Only 'private' parameters to fence_db2_monitor_60000 on 407888-db1 changed: 0:0;8:4:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d -+Only 'private' parameters to fence_db1_start_0 on 407892-db2 changed: 0:0;4:3:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d -+Only 'private' parameters to fence_db1_monitor_60000 on 407892-db2 changed: 0:0;5:3:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d - Transition Summary: -- * Restart fence_db1 ( 407892-db2 ) due to resource definition change -- * Restart fence_db2 ( 407888-db1 ) due to resource definition change - - Executing cluster transition: -- * Resource action: fence_db1 stop on 407892-db2 -- * Resource action: fence_db1 start on 407892-db2 -- * Resource action: fence_db1 monitor=60000 on 407892-db2 -- * Resource action: fence_db2 stop on 407888-db1 -- * Resource action: fence_db2 start on 407888-db1 -- * Resource action: fence_db2 monitor=60000 on 407888-db1 - Using the original execution date of: 2017-10-26 14:23:50Z - - Revised cluster status: -diff --git a/cts/scheduler/remote-recover-all.dot b/cts/scheduler/remote-recover-all.dot -index 819e5eb..d513ce4 100644 ---- a/cts/scheduler/remote-recover-all.dot -+++ b/cts/scheduler/remote-recover-all.dot -@@ -117,8 +117,6 @@ - "stonith 'reboot' galera-2" -> "ip-172.17.4.11_start_0 controller-2" [ style = bold] - "stonith 'reboot' galera-2" -> "stonith 'reboot' messaging-1" [ style = bold] - "stonith 'reboot' galera-2" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] --"stonith 'reboot' galera-2" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith 'reboot' galera-2" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] - "stonith 'reboot' galera-2" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' messaging-1" -> "galera-0_start_0 controller-2" [ style = bold] - "stonith 'reboot' messaging-1" -> "ip-172.17.1.14_start_0 controller-2" [ style = bold] -@@ -128,22 +126,10 @@ - "stonith 'reboot' messaging-1" -> "rabbitmq_post_notify_stonith_0" [ style = bold] - "stonith 'reboot' messaging-1" -> "rabbitmq_stop_0 messaging-1" [ style = bold] - "stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] --"stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] - "stonith 'reboot' messaging-1" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" -> "stonith-fence_ipmilan-5254005bdbb5_monitor_60000 controller-2" [ style = bold] - "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] --"stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/remote-recover-all.exp b/cts/scheduler/remote-recover-all.exp -index 0cb51f6..7edc970 100644 ---- a/cts/scheduler/remote-recover-all.exp -+++ b/cts/scheduler/remote-recover-all.exp -@@ -1,7 +1,7 @@ - - - -- -+ - - - -@@ -9,27 +9,27 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - -@@ -41,7 +41,7 @@ - - - -- -+ - - - -@@ -49,7 +49,7 @@ - - - -- -+ - - - -@@ -113,13 +113,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -128,22 +128,22 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -155,16 +155,16 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - -@@ -173,13 +173,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -195,28 +195,28 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -224,28 +224,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -284,7 +284,7 @@ - - - -- -+ - - - -@@ -293,7 +293,7 @@ - - - -- -+ - - - -@@ -306,7 +306,7 @@ - - - -- -+ - - - -@@ -319,7 +319,7 @@ - - - -- -+ - - - -@@ -335,7 +335,7 @@ - - - -- -+ - - - -@@ -348,7 +348,7 @@ - - - -- -+ - - - -@@ -357,13 +357,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -375,28 +375,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -408,7 +408,7 @@ - - - -- -+ - - - -@@ -416,22 +416,22 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -440,26 +440,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -469,7 +469,7 @@ - - - -- -+ - - - -@@ -481,7 +481,7 @@ - - - -- -+ - - - -@@ -490,26 +490,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -519,7 +519,7 @@ - - - -- -+ - - - -@@ -531,7 +531,7 @@ - - - -- -+ - - - -@@ -540,26 +540,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -569,7 +569,7 @@ - - - -- -+ - - - -@@ -581,7 +581,7 @@ - - - -- -+ - - - -@@ -590,13 +590,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -605,28 +605,28 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -638,88 +638,6 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - -@@ -731,7 +649,7 @@ - - - -- -+ - - - -@@ -750,7 +668,7 @@ - - - -- -+ - - - -@@ -758,7 +676,7 @@ - - - -- -+ - - - -@@ -773,7 +691,7 @@ - - - -- -+ - - - -@@ -788,7 +706,7 @@ - - - -- -+ - - - -diff --git a/cts/scheduler/remote-recover-all.scores b/cts/scheduler/remote-recover-all.scores -index e411dfd..82f53ed 100644 ---- a/cts/scheduler/remote-recover-all.scores -+++ b/cts/scheduler/remote-recover-all.scores -@@ -1,4 +1,8 @@ - Allocation scores: -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Using the original execution date of: 2017-05-03 13:33:24Z - galera:0 promotion score on galera-1: 100 - galera:1 promotion score on none: 0 -diff --git a/cts/scheduler/remote-recover-all.summary b/cts/scheduler/remote-recover-all.summary -index 6b64e42..988114f 100644 ---- a/cts/scheduler/remote-recover-all.summary -+++ b/cts/scheduler/remote-recover-all.summary -@@ -37,6 +37,10 @@ RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] - stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) - -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Transition Summary: - * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' - * Fence (reboot) galera-2 'resources are active and the connection is unrecoverable' -@@ -51,8 +55,6 @@ Transition Summary: - * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) - * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) - * Stop haproxy:0 ( controller-1 ) due to node availability -- * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) due to resource definition change - * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) - - Executing cluster transition: -@@ -61,8 +63,6 @@ Executing cluster transition: - * Pseudo action: galera-2_stop_0 - * Pseudo action: galera-master_demote_0 - * Pseudo action: redis-master_pre_notify_stop_0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Pseudo action: redis_post_notify_stop_0 -@@ -90,10 +90,6 @@ Executing cluster transition: - * Pseudo action: ip-172.17.1.14_stop_0 - * Pseudo action: ip-172.17.1.17_stop_0 - * Pseudo action: ip-172.17.4.11_stop_0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 - * Resource action: galera-0 monitor=20000 on controller-2 - * Resource action: rabbitmq notify on messaging-2 -diff --git a/cts/scheduler/remote-recover-connection.dot b/cts/scheduler/remote-recover-connection.dot -index 33c4fd0..86192f3 100644 ---- a/cts/scheduler/remote-recover-connection.dot -+++ b/cts/scheduler/remote-recover-connection.dot -@@ -95,14 +95,4 @@ - "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] --"stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/remote-recover-connection.exp b/cts/scheduler/remote-recover-connection.exp -index d1d33c8..cfcba98 100644 ---- a/cts/scheduler/remote-recover-connection.exp -+++ b/cts/scheduler/remote-recover-connection.exp -@@ -1,33 +1,33 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -35,33 +35,33 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -69,33 +69,33 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -110,7 +110,7 @@ - - - -- -+ - - - -@@ -123,7 +123,7 @@ - - - -- -+ - - - -@@ -136,7 +136,7 @@ - - - -- -+ - - - -@@ -172,7 +172,7 @@ - - - -- -+ - - - -@@ -181,7 +181,7 @@ - - - -- -+ - - - -@@ -194,7 +194,7 @@ - - - -- -+ - - - -@@ -207,7 +207,7 @@ - - - -- -+ - - - -@@ -223,7 +223,7 @@ - - - -- -+ - - - -@@ -236,7 +236,7 @@ - - - -- -+ - - - -@@ -245,13 +245,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -263,28 +263,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -296,7 +296,7 @@ - - - -- -+ - - - -@@ -304,22 +304,22 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -328,26 +328,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -357,13 +357,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -372,26 +372,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -401,13 +401,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -416,26 +416,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -445,13 +445,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -460,13 +460,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -475,28 +475,28 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -508,76 +508,6 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - -@@ -589,7 +519,7 @@ - - - -- -+ - - - -@@ -602,7 +532,7 @@ - - - -- -+ - - - -@@ -610,7 +540,7 @@ - - - -- -+ - - - -diff --git a/cts/scheduler/remote-recover-connection.scores b/cts/scheduler/remote-recover-connection.scores -index 328f1d3..391d7c8 100644 ---- a/cts/scheduler/remote-recover-connection.scores -+++ b/cts/scheduler/remote-recover-connection.scores -@@ -1,4 +1,8 @@ - Allocation scores: -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Using the original execution date of: 2017-05-03 13:33:24Z - galera:0 promotion score on galera-1: 100 - galera:1 promotion score on galera-2: 100 -diff --git a/cts/scheduler/remote-recover-connection.summary b/cts/scheduler/remote-recover-connection.summary -index bacd5a9..f87c150 100644 ---- a/cts/scheduler/remote-recover-connection.summary -+++ b/cts/scheduler/remote-recover-connection.summary -@@ -37,6 +37,10 @@ RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] - stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) - -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Transition Summary: - * Fence (reboot) controller-1 'peer is no longer part of the cluster' - * Move messaging-1 ( controller-1 -> controller-2 ) -@@ -47,8 +51,6 @@ Transition Summary: - * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) - * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) - * Stop haproxy:0 ( controller-1 ) due to node availability -- * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) due to resource definition change - * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) - - Executing cluster transition: -@@ -56,12 +58,6 @@ Executing cluster transition: - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: redis-master_pre_notify_stop_0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Resource action: messaging-1 start on controller-2 -diff --git a/cts/scheduler/remote-recover-no-resources.dot b/cts/scheduler/remote-recover-no-resources.dot -index 8db3fb6..bae902f 100644 ---- a/cts/scheduler/remote-recover-no-resources.dot -+++ b/cts/scheduler/remote-recover-no-resources.dot -@@ -102,22 +102,10 @@ - "stonith 'reboot' messaging-1" -> "rabbitmq_post_notify_stonith_0" [ style = bold] - "stonith 'reboot' messaging-1" -> "rabbitmq_stop_0 messaging-1" [ style = bold] - "stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] --"stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] - "stonith 'reboot' messaging-1" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" -> "stonith-fence_ipmilan-5254005bdbb5_monitor_60000 controller-2" [ style = bold] - "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] --"stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/remote-recover-no-resources.exp b/cts/scheduler/remote-recover-no-resources.exp -index 90470fb..394933f 100644 ---- a/cts/scheduler/remote-recover-no-resources.exp -+++ b/cts/scheduler/remote-recover-no-resources.exp -@@ -1,7 +1,7 @@ - - - -- -+ - - - -@@ -9,27 +9,27 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - -@@ -38,7 +38,7 @@ - - - -- -+ - - - -@@ -46,7 +46,7 @@ - - - -- -+ - - - -@@ -110,13 +110,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -125,22 +125,22 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -159,7 +159,7 @@ - - - -- -+ - - - -@@ -195,7 +195,7 @@ - - - -- -+ - - - -@@ -204,7 +204,7 @@ - - - -- -+ - - - -@@ -217,7 +217,7 @@ - - - -- -+ - - - -@@ -230,7 +230,7 @@ - - - -- -+ - - - -@@ -246,7 +246,7 @@ - - - -- -+ - - - -@@ -259,7 +259,7 @@ - - - -- -+ - - - -@@ -268,13 +268,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -286,28 +286,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -319,7 +319,7 @@ - - - -- -+ - - - -@@ -327,22 +327,22 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -351,26 +351,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -380,7 +380,7 @@ - - - -- -+ - - - -@@ -389,7 +389,7 @@ - - - -- -+ - - - -@@ -398,26 +398,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -427,7 +427,7 @@ - - - -- -+ - - - -@@ -436,7 +436,7 @@ - - - -- -+ - - - -@@ -445,26 +445,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -474,7 +474,7 @@ - - - -- -+ - - - -@@ -483,7 +483,7 @@ - - - -- -+ - - - -@@ -492,13 +492,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -507,28 +507,28 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -540,82 +540,6 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - -@@ -627,7 +551,7 @@ - - - -- -+ - - - -@@ -643,7 +567,7 @@ - - - -- -+ - - - -@@ -651,7 +575,7 @@ - - - -- -+ - - - -@@ -666,7 +590,7 @@ - - - -- -+ - - - -diff --git a/cts/scheduler/remote-recover-no-resources.scores b/cts/scheduler/remote-recover-no-resources.scores -index 378974e..cd4444c 100644 ---- a/cts/scheduler/remote-recover-no-resources.scores -+++ b/cts/scheduler/remote-recover-no-resources.scores -@@ -1,4 +1,8 @@ - Allocation scores: -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Using the original execution date of: 2017-05-03 13:33:24Z - galera:0 promotion score on galera-1: 100 - galera:1 promotion score on galera-0: 100 -diff --git a/cts/scheduler/remote-recover-no-resources.summary b/cts/scheduler/remote-recover-no-resources.summary -index 7f2a90c..16ac8ac 100644 ---- a/cts/scheduler/remote-recover-no-resources.summary -+++ b/cts/scheduler/remote-recover-no-resources.summary -@@ -37,6 +37,10 @@ RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] - stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) - -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Transition Summary: - * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' - * Fence (reboot) controller-1 'peer is no longer part of the cluster' -@@ -49,8 +53,6 @@ Transition Summary: - * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) - * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) - * Stop haproxy:0 ( controller-1 ) due to node availability -- * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) due to resource definition change - * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) - - Executing cluster transition: -@@ -58,8 +60,6 @@ Executing cluster transition: - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: redis-master_pre_notify_stop_0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Pseudo action: redis_post_notify_stop_0 -@@ -77,10 +77,6 @@ Executing cluster transition: - * Pseudo action: redis-master_stopped_0 - * Pseudo action: haproxy_stop_0 - * Pseudo action: haproxy-clone_stopped_0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 - * Resource action: galera-0 monitor=20000 on controller-2 - * Resource action: rabbitmq notify on messaging-2 -diff --git a/cts/scheduler/remote-recover-unknown.dot b/cts/scheduler/remote-recover-unknown.dot -index 902e0b5..eea6eea 100644 ---- a/cts/scheduler/remote-recover-unknown.dot -+++ b/cts/scheduler/remote-recover-unknown.dot -@@ -101,8 +101,6 @@ - "stonith 'reboot' galera-2" -> "ip-172.17.4.11_start_0 controller-2" [ style = bold] - "stonith 'reboot' galera-2" -> "stonith 'reboot' messaging-1" [ style = bold] - "stonith 'reboot' galera-2" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] --"stonith 'reboot' galera-2" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith 'reboot' galera-2" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] - "stonith 'reboot' galera-2" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' messaging-1" -> "galera-0_start_0 controller-2" [ style = bold] - "stonith 'reboot' messaging-1" -> "ip-172.17.1.14_start_0 controller-2" [ style = bold] -@@ -112,22 +110,10 @@ - "stonith 'reboot' messaging-1" -> "rabbitmq_post_notify_stonith_0" [ style = bold] - "stonith 'reboot' messaging-1" -> "rabbitmq_stop_0 messaging-1" [ style = bold] - "stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] --"stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith 'reboot' messaging-1" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] - "stonith 'reboot' messaging-1" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" -> "stonith-fence_ipmilan-5254005bdbb5_monitor_60000 controller-2" [ style = bold] - "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] --"stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/remote-recover-unknown.exp b/cts/scheduler/remote-recover-unknown.exp -index 82cb65f7..1d14684 100644 ---- a/cts/scheduler/remote-recover-unknown.exp -+++ b/cts/scheduler/remote-recover-unknown.exp -@@ -1,7 +1,7 @@ - - - -- -+ - - - -@@ -9,27 +9,27 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - -@@ -41,7 +41,7 @@ - - - -- -+ - - - -@@ -49,7 +49,7 @@ - - - -- -+ - - - -@@ -113,13 +113,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -128,22 +128,22 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -162,7 +162,7 @@ - - - -- -+ - - - -@@ -198,7 +198,7 @@ - - - -- -+ - - - -@@ -207,7 +207,7 @@ - - - -- -+ - - - -@@ -220,7 +220,7 @@ - - - -- -+ - - - -@@ -233,7 +233,7 @@ - - - -- -+ - - - -@@ -249,7 +249,7 @@ - - - -- -+ - - - -@@ -262,7 +262,7 @@ - - - -- -+ - - - -@@ -271,13 +271,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -289,28 +289,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -322,7 +322,7 @@ - - - -- -+ - - - -@@ -330,22 +330,22 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -354,26 +354,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -383,7 +383,7 @@ - - - -- -+ - - - -@@ -395,7 +395,7 @@ - - - -- -+ - - - -@@ -404,26 +404,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -433,7 +433,7 @@ - - - -- -+ - - - -@@ -445,7 +445,7 @@ - - - -- -+ - - - -@@ -454,26 +454,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -483,7 +483,7 @@ - - - -- -+ - - - -@@ -495,7 +495,7 @@ - - - -- -+ - - - -@@ -504,13 +504,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -519,28 +519,28 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -552,88 +552,6 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - -@@ -645,7 +563,7 @@ - - - -- -+ - - - -@@ -664,7 +582,7 @@ - - - -- -+ - - - -@@ -672,7 +590,7 @@ - - - -- -+ - - - -@@ -687,7 +605,7 @@ - - - -- -+ - - - -@@ -702,7 +620,7 @@ - - - -- -+ - - - -diff --git a/cts/scheduler/remote-recover-unknown.scores b/cts/scheduler/remote-recover-unknown.scores -index 378974e..cd4444c 100644 ---- a/cts/scheduler/remote-recover-unknown.scores -+++ b/cts/scheduler/remote-recover-unknown.scores -@@ -1,4 +1,8 @@ - Allocation scores: -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Using the original execution date of: 2017-05-03 13:33:24Z - galera:0 promotion score on galera-1: 100 - galera:1 promotion score on galera-0: 100 -diff --git a/cts/scheduler/remote-recover-unknown.summary b/cts/scheduler/remote-recover-unknown.summary -index 330c4cb..af5f724 100644 ---- a/cts/scheduler/remote-recover-unknown.summary -+++ b/cts/scheduler/remote-recover-unknown.summary -@@ -37,6 +37,10 @@ RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] - stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) - -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Transition Summary: - * Fence (reboot) galera-2 'resources are in an unknown state and the connection is unrecoverable' - * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' -@@ -50,8 +54,6 @@ Transition Summary: - * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) - * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) - * Stop haproxy:0 ( controller-1 ) due to node availability -- * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) due to resource definition change - * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) - - Executing cluster transition: -@@ -59,8 +61,6 @@ Executing cluster transition: - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: redis-master_pre_notify_stop_0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Pseudo action: redis_post_notify_stop_0 -@@ -79,10 +79,6 @@ Executing cluster transition: - * Pseudo action: redis-master_stopped_0 - * Pseudo action: haproxy_stop_0 - * Pseudo action: haproxy-clone_stopped_0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 - * Resource action: galera-0 monitor=20000 on controller-2 - * Resource action: rabbitmq notify on messaging-2 -diff --git a/cts/scheduler/remote-recovery.dot b/cts/scheduler/remote-recovery.dot -index 33c4fd0..86192f3 100644 ---- a/cts/scheduler/remote-recovery.dot -+++ b/cts/scheduler/remote-recovery.dot -@@ -95,14 +95,4 @@ - "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style=bold color="green" fontcolor="black"] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] - "stonith-fence_ipmilan-5254005bdbb5_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] --"stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400b4f6bd_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_monitor_60000 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style=bold color="green" fontcolor="black"] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] --"stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] - } -diff --git a/cts/scheduler/remote-recovery.exp b/cts/scheduler/remote-recovery.exp -index d1d33c8..cfcba98 100644 ---- a/cts/scheduler/remote-recovery.exp -+++ b/cts/scheduler/remote-recovery.exp -@@ -1,33 +1,33 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -35,33 +35,33 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -69,33 +69,33 @@ - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -110,7 +110,7 @@ - - - -- -+ - - - -@@ -123,7 +123,7 @@ - - - -- -+ - - - -@@ -136,7 +136,7 @@ - - - -- -+ - - - -@@ -172,7 +172,7 @@ - - - -- -+ - - - -@@ -181,7 +181,7 @@ - - - -- -+ - - - -@@ -194,7 +194,7 @@ - - - -- -+ - - - -@@ -207,7 +207,7 @@ - - - -- -+ - - - -@@ -223,7 +223,7 @@ - - - -- -+ - - - -@@ -236,7 +236,7 @@ - - - -- -+ - - - -@@ -245,13 +245,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -263,28 +263,28 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - - - -- -+ - - - -@@ -296,7 +296,7 @@ - - - -- -+ - - - -@@ -304,22 +304,22 @@ - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -328,26 +328,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -357,13 +357,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -372,26 +372,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -401,13 +401,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -416,26 +416,26 @@ - - - -- -+ - - - - - -- -+ - - - - - - -- -+ - - - - - -- -+ - - - -@@ -445,13 +445,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -460,13 +460,13 @@ - - - -- -+ - - - - - -- -+ - - - -@@ -475,28 +475,28 @@ - - - -- -+ - - - - - -- -+ - - - - - -- -+ - - -- -+ - - - - - -- -+ - - - -@@ -508,76 +508,6 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - -@@ -589,7 +519,7 @@ - - - -- -+ - - - -@@ -602,7 +532,7 @@ - - - -- -+ - - - -@@ -610,7 +540,7 @@ - - - -- -+ - - - -diff --git a/cts/scheduler/remote-recovery.scores b/cts/scheduler/remote-recovery.scores -index 328f1d3..391d7c8 100644 ---- a/cts/scheduler/remote-recovery.scores -+++ b/cts/scheduler/remote-recovery.scores -@@ -1,4 +1,8 @@ - Allocation scores: -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Using the original execution date of: 2017-05-03 13:33:24Z - galera:0 promotion score on galera-1: 100 - galera:1 promotion score on galera-2: 100 -diff --git a/cts/scheduler/remote-recovery.summary b/cts/scheduler/remote-recovery.summary -index bacd5a9..f87c150 100644 ---- a/cts/scheduler/remote-recovery.summary -+++ b/cts/scheduler/remote-recovery.summary -@@ -37,6 +37,10 @@ RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] - stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) - -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_start_0 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400bbf613_monitor_60000 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_start_0 on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea -+Only 'private' parameters to stonith-fence_ipmilan-525400b4f6bd_monitor_60000 on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea - Transition Summary: - * Fence (reboot) controller-1 'peer is no longer part of the cluster' - * Move messaging-1 ( controller-1 -> controller-2 ) -@@ -47,8 +51,6 @@ Transition Summary: - * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) - * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) - * Stop haproxy:0 ( controller-1 ) due to node availability -- * Restart stonith-fence_ipmilan-525400bbf613 ( controller-0 ) due to resource definition change -- * Restart stonith-fence_ipmilan-525400b4f6bd ( controller-0 ) due to resource definition change - * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) - - Executing cluster transition: -@@ -56,12 +58,6 @@ Executing cluster transition: - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: redis-master_pre_notify_stop_0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 -- * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Resource action: messaging-1 start on controller-2 --- -1.8.3.1 - diff --git a/SOURCES/007-feature-set.patch b/SOURCES/007-feature-set.patch deleted file mode 100644 index c7b67e9..0000000 --- a/SOURCES/007-feature-set.patch +++ /dev/null @@ -1,5949 +0,0 @@ -From 08d3e77237eb3b9f4600b4a8a4c5153e928ca3ce Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 7 Aug 2020 13:07:50 -0400 -Subject: [PATCH 01/19] Feature: tools: Add the beginnings of formatted output - to crm_resource. - -This just adds the command line options, the output object, hooks it -up for use if --version is given, and uses it for printing error -messages. ---- - tools/crm_resource.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++------ - 1 file changed, 47 insertions(+), 6 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index acaddc0..9700618 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -154,6 +154,7 @@ gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GE - - bool BE_QUIET = FALSE; - static crm_exit_t exit_code = CRM_EX_OK; -+static pcmk__output_t *out = NULL; - - // Things that should be cleaned up on exit - static GError *error = NULL; -@@ -166,15 +167,32 @@ static pe_working_set_t *data_set = NULL; - - #define INDENT " " - -+static pcmk__supported_format_t formats[] = { -+ PCMK__SUPPORTED_FORMAT_NONE, -+ PCMK__SUPPORTED_FORMAT_TEXT, -+ PCMK__SUPPORTED_FORMAT_XML, -+ { NULL, NULL, NULL } -+}; -+ - // Clean up and exit - static crm_exit_t - bye(crm_exit_t ec) - { - if (error != NULL) { -- fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message); -+ if (out != NULL) { -+ out->err(out, "%s: %s", g_get_prgname(), error->message); -+ } else { -+ fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message); -+ } -+ - g_clear_error(&error); - } - -+ if (out != NULL) { -+ out->finish(out, ec, true, NULL); -+ pcmk__output_free(out); -+ } -+ - if (cib_conn != NULL) { - cib_t *save_cib_conn = cib_conn; - -@@ -1428,7 +1446,7 @@ validate_cmdline(crm_exit_t *exit_code) - } - - static GOptionContext * --build_arg_context(pcmk__common_args_t *args) { -+build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { - GOptionContext *context = NULL; - - GOptionEntry extra_prog_entries[] = { -@@ -1471,7 +1489,7 @@ build_arg_context(pcmk__common_args_t *args) { - "had failed permanently and has been repaired by an administrator):\n\n" - "\t# crm_resource --resource myResource --cleanup --node aNode\n\n"; - -- context = pcmk__build_arg_context(args, NULL, NULL, NULL); -+ context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); - g_option_context_set_description(context, description); - - /* Add the -Q option, which cannot be part of the globally supported options -@@ -1504,9 +1522,11 @@ main(int argc, char **argv) - - pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); - GOptionContext *context = NULL; -+ GOptionGroup *output_group = NULL; - gchar **processed_args = NULL; - -- context = build_arg_context(args); -+ context = build_arg_context(args, &output_group); -+ pcmk__register_formats(output_group, formats); - crm_log_cli_init("crm_resource"); - - processed_args = pcmk__cmdline_preproc(argv, "GINSTdginpstuv"); -@@ -1520,6 +1540,14 @@ main(int argc, char **argv) - crm_bump_log_level(argc, argv); - } - -+ rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); -+ if (rc != pcmk_rc_ok) { -+ fprintf(stderr, "Error creating output format %s: %s\n", -+ args->output_ty, pcmk_rc_str(rc)); -+ exit_code = CRM_EX_ERROR; -+ goto done; -+ } -+ - options.resource_verbose = args->verbosity; - BE_QUIET = args->quiet; - -@@ -1593,9 +1621,22 @@ main(int argc, char **argv) - goto done; - } - -+ if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) { -+ /* Kind of a hack to display XML lists using a real tag instead of . This just -+ * saves from having to write custom messages to build the lists around all these things -+ */ -+ if (options.rsc_cmd == cmd_list_resources || options.rsc_cmd == cmd_query_xml || -+ options.rsc_cmd == cmd_query_raw_xml || options.rsc_cmd == cmd_list_active_ops || -+ options.rsc_cmd == cmd_list_all_ops) { -+ pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname()); -+ } else { -+ pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname()); -+ } -+ } -+ - if (args->version) { -- /* FIXME: When crm_resource is converted to use formatted output, this can go. */ -- pcmk__cli_help('v', CRM_EX_USAGE); -+ out->version(out, false); -+ goto done; - } - - if (optind > argc) { --- -1.8.3.1 - - -From 38f09e048e662c0e1814cbde6ecee633cc9560d5 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 26 Aug 2020 16:32:41 -0400 -Subject: [PATCH 02/19] Refactor: tools: Pass a pcmk__output_t object around - crm_resource. - -Basically, any function that does any printing is eventually going to -need this object. Adding it all at once here should make for more easy -to understand patches later. ---- - tools/crm_resource.c | 109 +++++++++--------- - tools/crm_resource.h | 110 ++++++++++-------- - tools/crm_resource_ban.c | 19 ++-- - tools/crm_resource_print.c | 38 ++++--- - tools/crm_resource_runtime.c | 266 +++++++++++++++++++++++-------------------- - 5 files changed, 288 insertions(+), 254 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 9700618..7a661a4 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -918,7 +918,8 @@ why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **er - } - - static int --ban_or_move(pe_resource_t *rsc, const char *move_lifetime, crm_exit_t *exit_code) -+ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime, -+ crm_exit_t *exit_code) - { - int rc = pcmk_rc_ok; - pe_node_t *current = NULL; -@@ -929,7 +930,7 @@ ban_or_move(pe_resource_t *rsc, const char *move_lifetime, crm_exit_t *exit_code - current = pe__find_active_requires(rsc, &nactive); - - if (nactive == 1) { -- rc = cli_resource_ban(options.rsc_id, current->details->uname, move_lifetime, NULL, -+ rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, - cib_conn, options.cib_options, options.promoted_role_only); - - } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { -@@ -948,7 +949,7 @@ ban_or_move(pe_resource_t *rsc, const char *move_lifetime, crm_exit_t *exit_code - } - - if(count == 1 && current) { -- rc = cli_resource_ban(options.rsc_id, current->details->uname, move_lifetime, NULL, -+ rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, - cib_conn, options.cib_options, options.promoted_role_only); - - } else { -@@ -977,7 +978,7 @@ ban_or_move(pe_resource_t *rsc, const char *move_lifetime, crm_exit_t *exit_code - } - - static void --cleanup(pe_resource_t *rsc) -+cleanup(pcmk__output_t *out, pe_resource_t *rsc) - { - int rc = pcmk_rc_ok; - -@@ -987,12 +988,12 @@ cleanup(pe_resource_t *rsc) - - crm_debug("Erasing failures of %s (%s requested) on %s", - rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); -- rc = cli_resource_delete(controld_api, options.host_uname, rsc, options.operation, -+ rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, options.operation, - options.interval_spec, TRUE, data_set, options.force); - - if ((rc == pcmk_rc_ok) && !BE_QUIET) { - // Show any reasons why resource might stay stopped -- cli_resource_check(cib_conn, rsc); -+ cli_resource_check(out, cib_conn, rsc); - } - - if (rc == pcmk_rc_ok) { -@@ -1001,7 +1002,7 @@ cleanup(pe_resource_t *rsc) - } - - static int --clear_constraints(xmlNodePtr *cib_xml_copy) -+clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) - { - GListPtr before = NULL; - GListPtr after = NULL; -@@ -1089,7 +1090,7 @@ delete() - } - - static int --list_agents(const char *agent_spec, crm_exit_t *exit_code) -+list_agents(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) - { - int rc = pcmk_rc_ok; - lrmd_list_t *list = NULL; -@@ -1126,7 +1127,7 @@ list_agents(const char *agent_spec, crm_exit_t *exit_code) - } - - static int --list_providers(const char *agent_spec, crm_exit_t *exit_code) -+list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) - { - int rc; - const char *text = NULL; -@@ -1177,7 +1178,7 @@ list_providers(const char *agent_spec, crm_exit_t *exit_code) - } - - static int --list_raw() -+list_raw(pcmk__output_t *out) - { - int rc = pcmk_rc_ok; - int found = 0; -@@ -1187,7 +1188,7 @@ list_raw() - pe_resource_t *rsc = (pe_resource_t *) lpc->data; - - found++; -- cli_resource_print_raw(rsc); -+ cli_resource_print_raw(out, rsc); - } - - if (found == 0) { -@@ -1199,7 +1200,7 @@ list_raw() - } - - static void --list_stacks_and_constraints(pe_resource_t *rsc, bool recursive) -+list_stacks_and_constraints(pcmk__output_t *out, pe_resource_t *rsc, bool recursive) - { - GListPtr lpc = NULL; - xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, -@@ -1216,10 +1217,10 @@ list_stacks_and_constraints(pe_resource_t *rsc, bool recursive) - pe__clear_resource_flags(r, pe_rsc_allocating); - } - -- cli_resource_print_colocation(rsc, TRUE, recursive, 1); -+ cli_resource_print_colocation(out, rsc, TRUE, recursive, 1); - - fprintf(stdout, "* %s\n", rsc->id); -- cli_resource_print_location(rsc, NULL); -+ cli_resource_print_location(out, rsc, NULL); - - for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *r = (pe_resource_t *) lpc->data; -@@ -1227,7 +1228,7 @@ list_stacks_and_constraints(pe_resource_t *rsc, bool recursive) - pe__clear_resource_flags(r, pe_rsc_allocating); - } - -- cli_resource_print_colocation(rsc, FALSE, recursive, 1); -+ cli_resource_print_colocation(out, rsc, FALSE, recursive, 1); - } - - static int -@@ -1262,7 +1263,7 @@ populate_working_set(xmlNodePtr *cib_xml_copy) - } - - static int --refresh() -+refresh(pcmk__output_t *out) - { - int rc = pcmk_rc_ok; - const char *router_node = options.host_uname; -@@ -1307,7 +1308,7 @@ refresh() - } - - static void --refresh_resource(pe_resource_t *rsc) -+refresh_resource(pcmk__output_t *out, pe_resource_t *rsc) - { - int rc = pcmk_rc_ok; - -@@ -1317,12 +1318,12 @@ refresh_resource(pe_resource_t *rsc) - - crm_debug("Re-checking the state of %s (%s requested) on %s", - rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); -- rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0, FALSE, -- data_set, options.force); -+ rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, NULL, -+ 0, FALSE, data_set, options.force); - - if ((rc == pcmk_rc_ok) && !BE_QUIET) { - // Show any reasons why resource might stay stopped -- cli_resource_check(cib_conn, rsc); -+ cli_resource_check(out, cib_conn, rsc); - } - - if (rc == pcmk_rc_ok) { -@@ -1364,7 +1365,7 @@ set_property() - } - - static int --show_metadata(const char *agent_spec, crm_exit_t *exit_code) -+show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) - { - int rc = pcmk_rc_ok; - char *standard = NULL; -@@ -1438,7 +1439,7 @@ validate_cmdline(crm_exit_t *exit_code) - if (options.validate_options == NULL) { - options.validate_options = crm_str_table_new(); - } -- *exit_code = cli_resource_execute_from_params("test", options.v_class, options.v_provider, options.v_agent, -+ *exit_code = cli_resource_execute_from_params(out, "test", options.v_class, options.v_provider, options.v_agent, - "validate-all", options.validate_options, - options.override_params, options.timeout_ms, - options.resource_verbose, options.force); -@@ -1737,25 +1738,25 @@ main(int argc, char **argv) - switch (options.rsc_cmd) { - case cmd_list_resources: - rc = pcmk_rc_ok; -- cli_resource_print_list(data_set, FALSE); -+ cli_resource_print_list(out, data_set, FALSE); - break; - - case cmd_list_instances: -- rc = list_raw(); -+ rc = list_raw(out); - break; - - case cmd_list_standards: - case cmd_list_providers: - case cmd_list_alternatives: -- rc = list_providers(options.agent_spec, &exit_code); -+ rc = list_providers(out, options.agent_spec, &exit_code); - break; - - case cmd_list_agents: -- rc = list_agents(options.agent_spec, &exit_code); -+ rc = list_agents(out, options.agent_spec, &exit_code); - break; - - case cmd_metadata: -- rc = show_metadata(options.agent_spec, &exit_code); -+ rc = show_metadata(out, options.agent_spec, &exit_code); - break; - - case cmd_restart: -@@ -1764,7 +1765,7 @@ main(int argc, char **argv) - * update the working set multiple times, so it needs to use its own - * copy. - */ -- rc = cli_resource_restart(rsc, options.host_uname, -+ rc = cli_resource_restart(out, rsc, options.host_uname, - options.move_lifetime, options.timeout_ms, - cib_conn, options.cib_options, - options.promoted_role_only, -@@ -1772,11 +1773,11 @@ main(int argc, char **argv) - break; - - case cmd_wait: -- rc = wait_till_stable(options.timeout_ms, cib_conn); -+ rc = wait_till_stable(out, options.timeout_ms, cib_conn); - break; - - case cmd_execute_agent: -- exit_code = cli_resource_execute(rsc, options.rsc_id, -+ exit_code = cli_resource_execute(out, rsc, options.rsc_id, - options.operation, - options.override_params, - options.timeout_ms, cib_conn, -@@ -1785,11 +1786,11 @@ main(int argc, char **argv) - break; - - case cmd_colocations: -- list_stacks_and_constraints(rsc, false); -+ list_stacks_and_constraints(out, rsc, false); - break; - - case cmd_colocations_deep: -- list_stacks_and_constraints(rsc, true); -+ list_stacks_and_constraints(out, rsc, true); - break; - - case cmd_cts: -@@ -1798,13 +1799,13 @@ main(int argc, char **argv) - lpc = lpc->next) { - - rsc = (pe_resource_t *) lpc->data; -- cli_resource_print_cts(rsc); -+ cli_resource_print_cts(out, rsc); - } -- cli_resource_print_cts_constraints(data_set); -+ cli_resource_print_cts_constraints(out, data_set); - break; - - case cmd_fail: -- rc = cli_resource_fail(controld_api, options.host_uname, -+ rc = cli_resource_fail(out, controld_api, options.host_uname, - options.rsc_id, data_set); - if (rc == pcmk_rc_ok) { - start_mainloop(controld_api); -@@ -1812,28 +1813,28 @@ main(int argc, char **argv) - break; - - case cmd_list_active_ops: -- rc = cli_resource_print_operations(options.rsc_id, -+ rc = cli_resource_print_operations(out, options.rsc_id, - options.host_uname, TRUE, - data_set); - break; - - case cmd_list_all_ops: -- rc = cli_resource_print_operations(options.rsc_id, -+ rc = cli_resource_print_operations(out, options.rsc_id, - options.host_uname, FALSE, - data_set); - break; - - case cmd_locate: -- cli_resource_search(rsc, options.rsc_id, data_set); -+ cli_resource_search(out, rsc, options.rsc_id, data_set); - rc = pcmk_rc_ok; - break; - - case cmd_query_xml: -- rc = cli_resource_print(rsc, data_set, TRUE); -+ rc = cli_resource_print(out, rsc, data_set, TRUE); - break; - - case cmd_query_raw_xml: -- rc = cli_resource_print(rsc, data_set, FALSE); -+ rc = cli_resource_print(out, rsc, data_set, FALSE); - break; - - case cmd_why: -@@ -1847,20 +1848,20 @@ main(int argc, char **argv) - goto done; - } - } -- cli_resource_why(cib_conn, data_set->resources, rsc, dest); -+ cli_resource_why(out, cib_conn, data_set->resources, rsc, dest); - rc = pcmk_rc_ok; - } - break; - - case cmd_clear: -- rc = clear_constraints(&cib_xml_copy); -+ rc = clear_constraints(out, &cib_xml_copy); - break; - - case cmd_move: - if (options.host_uname == NULL) { -- rc = ban_or_move(rsc, options.move_lifetime, &exit_code); -+ rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); - } else { -- rc = cli_resource_move(rsc, options.rsc_id, options.host_uname, -+ rc = cli_resource_move(out, rsc, options.rsc_id, options.host_uname, - options.move_lifetime, cib_conn, - options.cib_options, data_set, - options.promoted_role_only, -@@ -1870,7 +1871,7 @@ main(int argc, char **argv) - - case cmd_ban: - if (options.host_uname == NULL) { -- rc = ban_or_move(rsc, options.move_lifetime, &exit_code); -+ rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); - } else { - pe_node_t *dest = pe_find_node(data_set->nodes, - options.host_uname); -@@ -1879,7 +1880,7 @@ main(int argc, char **argv) - rc = pcmk_rc_node_unknown; - goto done; - } -- rc = cli_resource_ban(options.rsc_id, dest->details->uname, -+ rc = cli_resource_ban(out, options.rsc_id, dest->details->uname, - options.move_lifetime, NULL, cib_conn, - options.cib_options, - options.promoted_role_only); -@@ -1887,7 +1888,7 @@ main(int argc, char **argv) - break; - - case cmd_get_property: -- rc = cli_resource_print_property(rsc, options.prop_name, data_set); -+ rc = cli_resource_print_property(out, rsc, options.prop_name, data_set); - break; - - case cmd_set_property: -@@ -1895,7 +1896,7 @@ main(int argc, char **argv) - break; - - case cmd_get_param: -- rc = cli_resource_print_attribute(rsc, options.prop_name, -+ rc = cli_resource_print_attribute(out, rsc, options.prop_name, - options.attr_set_type, data_set); - break; - -@@ -1908,7 +1909,7 @@ main(int argc, char **argv) - } - - /* coverity[var_deref_model] False positive */ -- rc = cli_resource_update_attribute(rsc, options.rsc_id, -+ rc = cli_resource_update_attribute(out, rsc, options.rsc_id, - options.prop_set, - options.attr_set_type, - options.prop_id, -@@ -1921,7 +1922,7 @@ main(int argc, char **argv) - - case cmd_delete_param: - /* coverity[var_deref_model] False positive */ -- rc = cli_resource_delete_attribute(rsc, options.rsc_id, -+ rc = cli_resource_delete_attribute(out, rsc, options.rsc_id, - options.prop_set, - options.attr_set_type, - options.prop_id, -@@ -1932,22 +1933,22 @@ main(int argc, char **argv) - - case cmd_cleanup: - if (rsc == NULL) { -- rc = cli_cleanup_all(controld_api, options.host_uname, -+ rc = cli_cleanup_all(out, controld_api, options.host_uname, - options.operation, options.interval_spec, - data_set); - if (rc == pcmk_rc_ok) { - start_mainloop(controld_api); - } - } else { -- cleanup(rsc); -+ cleanup(out, rsc); - } - break; - - case cmd_refresh: - if (rsc == NULL) { -- rc = refresh(); -+ rc = refresh(out); - } else { -- refresh_resource(rsc); -+ refresh_resource(out, rsc); - } - break; - -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index e979ef8..bf99f24 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -25,75 +25,85 @@ - extern bool BE_QUIET; - - /* ban */ --int cli_resource_prefer(const char *rsc_id, const char *host, const char *move_lifetime, -- cib_t * cib_conn, int cib_options, gboolean promoted_role_only); --int cli_resource_ban(const char *rsc_id, const char *host, const char *move_lifetime, -- GListPtr allnodes, cib_t * cib_conn, int cib_options, -- gboolean promoted_role_only); -+int cli_resource_prefer(pcmk__output_t *out, const char *rsc_id, const char *host, -+ const char *move_lifetime, cib_t * cib_conn, int cib_options, -+ gboolean promoted_role_only); -+int cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, -+ const char *move_lifetime, GListPtr allnodes, cib_t * cib_conn, -+ int cib_options, gboolean promoted_role_only); - int cli_resource_clear(const char *rsc_id, const char *host, GListPtr allnodes, - cib_t * cib_conn, int cib_options, bool clear_ban_constraints, gboolean force); - int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, int cib_options, - const char *rsc, const char *node, gboolean promoted_role_only); - - /* print */ --void cli_resource_print_cts(pe_resource_t * rsc); --void cli_resource_print_raw(pe_resource_t * rsc); --void cli_resource_print_cts_constraints(pe_working_set_t * data_set); --void cli_resource_print_location(pe_resource_t * rsc, const char *prefix); --void cli_resource_print_colocation(pe_resource_t * rsc, bool dependents, bool recursive, int offset); -+void cli_resource_print_cts(pcmk__output_t *out, pe_resource_t * rsc); -+void cli_resource_print_raw(pcmk__output_t *out, pe_resource_t * rsc); -+void cli_resource_print_cts_constraints(pcmk__output_t *out, pe_working_set_t * data_set); -+void cli_resource_print_location(pcmk__output_t *out, pe_resource_t * rsc, -+ const char *prefix); -+void cli_resource_print_colocation(pcmk__output_t *out, pe_resource_t * rsc, -+ bool dependents, bool recursive, int offset); - --int cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, -+int cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, pe_working_set_t *data_set, - bool expanded); --int cli_resource_print_list(pe_working_set_t * data_set, bool raw); --int cli_resource_print_attribute(pe_resource_t *rsc, const char *attr, const char *attr_set_type, -+int cli_resource_print_list(pcmk__output_t *out, pe_working_set_t * data_set, bool raw); -+int cli_resource_print_attribute(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *attr, const char *attr_set_type, - pe_working_set_t *data_set); --int cli_resource_print_property(pe_resource_t *rsc, const char *attr, -+int cli_resource_print_property(pcmk__output_t *out, pe_resource_t *rsc, const char *attr, - pe_working_set_t *data_set); --int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pe_working_set_t * data_set); -+int cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id, -+ const char *host_uname, bool active, -+ pe_working_set_t * data_set); - - /* runtime */ --void cli_resource_check(cib_t * cib, pe_resource_t *rsc); --int cli_resource_fail(pcmk_ipc_api_t *controld_api, -+void cli_resource_check(pcmk__output_t *out, cib_t * cib, pe_resource_t *rsc); -+int cli_resource_fail(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - const char *host_uname, const char *rsc_id, - pe_working_set_t *data_set); --int cli_resource_search(pe_resource_t *rsc, const char *requested_name, -- pe_working_set_t *data_set); --int cli_resource_delete(pcmk_ipc_api_t *controld_api, -+int cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *requested_name, pe_working_set_t *data_set); -+int cli_resource_delete(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - const char *host_uname, pe_resource_t *rsc, - const char *operation, const char *interval_spec, - bool just_failures, pe_working_set_t *data_set, - gboolean force); --int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, -- const char *operation, const char *interval_spec, -- pe_working_set_t *data_set); --int cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_lifetime, -- int timeout_ms, cib_t *cib, int cib_options, -- gboolean promoted_role_only, gboolean force); --int cli_resource_move(pe_resource_t *rsc, const char *rsc_id, -- const char *host_name, const char *move_lifetime, -- cib_t *cib, int cib_options, pe_working_set_t *data_set, -+int cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, -+ const char *node_name, const char *operation, -+ const char *interval_spec, pe_working_set_t *data_set); -+int cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, -+ const char *move_lifetime, int timeout_ms, cib_t *cib, -+ int cib_options, gboolean promoted_role_only, gboolean force); -+int cli_resource_move(pcmk__output_t *out, pe_resource_t *rsc, const char *rsc_id, -+ const char *host_name, const char *move_lifetime, cib_t *cib, -+ int cib_options, pe_working_set_t *data_set, - gboolean promoted_role_only, gboolean force); --crm_exit_t cli_resource_execute_from_params(const char *rsc_name, const char *rsc_class, -- const char *rsc_prov, const char *rsc_type, -- const char *rsc_action, GHashTable *params, -- GHashTable *override_hash, int timeout_ms, -- int resource_verbose, gboolean force); --crm_exit_t cli_resource_execute(pe_resource_t *rsc, const char *requested_name, -- const char *rsc_action, GHashTable *override_hash, -- int timeout_ms, cib_t *cib, pe_working_set_t *data_set, -- int resource_verbose, gboolean force); -+crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, -+ const char *rsc_class, const char *rsc_prov, -+ const char *rsc_type, const char *rsc_action, -+ GHashTable *params, GHashTable *override_hash, -+ int timeout_ms, int resource_verbose, -+ gboolean force); -+crm_exit_t cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *requested_name, const char *rsc_action, -+ GHashTable *override_hash, int timeout_ms, cib_t *cib, -+ pe_working_set_t *data_set, int resource_verbose, -+ gboolean force); - --int cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, -- const char *attr_set, const char *attr_set_type, -- const char *attr_id, const char *attr_name, -- const char *attr_value, gboolean recursive, cib_t *cib, -- int cib_options, pe_working_set_t *data_set, gboolean force); --int cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, -- const char *attr_set, const char *attr_set_type, -- const char *attr_id, const char *attr_name, cib_t *cib, -- int cib_options, pe_working_set_t *data_set, gboolean force); -+int cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *requested_name, const char *attr_set, -+ const char *attr_set_type, const char *attr_id, -+ const char *attr_name, const char *attr_value, -+ gboolean recursive, cib_t *cib, int cib_options, -+ pe_working_set_t *data_set, gboolean force); -+int cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *requested_name, const char *attr_set, -+ const char *attr_set_type, const char *attr_id, -+ const char *attr_name, cib_t *cib, int cib_options, -+ pe_working_set_t *data_set, gboolean force); - - int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml); --int wait_till_stable(int timeout_ms, cib_t * cib); --void cli_resource_why(cib_t *cib_conn, GListPtr resources, pe_resource_t *rsc, -- pe_node_t *node); -+int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib); -+void cli_resource_why(pcmk__output_t *out, cib_t *cib_conn, GListPtr resources, -+ pe_resource_t *rsc, pe_node_t *node); -diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c -index e055797..95e5a17 100644 ---- a/tools/crm_resource_ban.c -+++ b/tools/crm_resource_ban.c -@@ -12,7 +12,7 @@ - #define XPATH_MAX 1024 - - static char * --parse_cli_lifetime(const char *move_lifetime) -+parse_cli_lifetime(pcmk__output_t *out, const char *move_lifetime) - { - char *later_s = NULL; - crm_time_t *now = NULL; -@@ -58,9 +58,9 @@ parse_cli_lifetime(const char *move_lifetime) - - // \return Standard Pacemaker return code - int --cli_resource_ban(const char *rsc_id, const char *host, const char *move_lifetime, -- GListPtr allnodes, cib_t * cib_conn, int cib_options, -- gboolean promoted_role_only) -+cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, -+ const char *move_lifetime, GListPtr allnodes, cib_t * cib_conn, -+ int cib_options, gboolean promoted_role_only) - { - char *later_s = NULL; - int rc = pcmk_rc_ok; -@@ -72,13 +72,13 @@ cli_resource_ban(const char *rsc_id, const char *host, const char *move_lifetime - for(; n && rc == pcmk_rc_ok; n = n->next) { - pe_node_t *target = n->data; - -- rc = cli_resource_ban(rsc_id, target->details->uname, move_lifetime, -+ rc = cli_resource_ban(out, rsc_id, target->details->uname, move_lifetime, - NULL, cib_conn, cib_options, promoted_role_only); - } - return rc; - } - -- later_s = parse_cli_lifetime(move_lifetime); -+ later_s = parse_cli_lifetime(out, move_lifetime); - if(move_lifetime && later_s == NULL) { - return EINVAL; - } -@@ -143,10 +143,11 @@ cli_resource_ban(const char *rsc_id, const char *host, const char *move_lifetime - - // \return Standard Pacemaker return code - int --cli_resource_prefer(const char *rsc_id, const char *host, const char *move_lifetime, -- cib_t * cib_conn, int cib_options, gboolean promoted_role_only) -+cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host, -+ const char *move_lifetime, cib_t * cib_conn, int cib_options, -+ gboolean promoted_role_only) - { -- char *later_s = parse_cli_lifetime(move_lifetime); -+ char *later_s = parse_cli_lifetime(out, move_lifetime); - int rc = pcmk_rc_ok; - xmlNode *location = NULL; - xmlNode *fragment = NULL; -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 1dbc2e2..de1c608 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -12,7 +12,7 @@ - - #define cons_string(x) x?x:"NA" - void --cli_resource_print_cts_constraints(pe_working_set_t * data_set) -+cli_resource_print_cts_constraints(pcmk__output_t *out, pe_working_set_t * data_set) - { - xmlNode *xml_obj = NULL; - xmlNode *lifetime = NULL; -@@ -49,7 +49,7 @@ cli_resource_print_cts_constraints(pe_working_set_t * data_set) - } - - void --cli_resource_print_cts(pe_resource_t * rsc) -+cli_resource_print_cts(pcmk__output_t *out, pe_resource_t * rsc) - { - GListPtr lpc = NULL; - const char *host = NULL; -@@ -78,13 +78,13 @@ cli_resource_print_cts(pe_resource_t * rsc) - for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { - pe_resource_t *child = (pe_resource_t *) lpc->data; - -- cli_resource_print_cts(child); -+ cli_resource_print_cts(out, child); - } - } - - - void --cli_resource_print_raw(pe_resource_t * rsc) -+cli_resource_print_raw(pcmk__output_t *out, pe_resource_t * rsc) - { - GListPtr lpc = NULL; - GListPtr children = rsc->children; -@@ -96,13 +96,13 @@ cli_resource_print_raw(pe_resource_t * rsc) - for (lpc = children; lpc != NULL; lpc = lpc->next) { - pe_resource_t *child = (pe_resource_t *) lpc->data; - -- cli_resource_print_raw(child); -+ cli_resource_print_raw(out, child); - } - } - - // \return Standard Pacemaker return code - int --cli_resource_print_list(pe_working_set_t * data_set, bool raw) -+cli_resource_print_list(pcmk__output_t *out, pe_working_set_t * data_set, bool raw) - { - int found = 0; - -@@ -130,8 +130,9 @@ cli_resource_print_list(pe_working_set_t * data_set, bool raw) - - // \return Standard Pacemaker return code - int --cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, -- pe_working_set_t * data_set) -+cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id, -+ const char *host_uname, bool active, -+ pe_working_set_t * data_set) - { - pe_resource_t *rsc = NULL; - int opts = pe_print_printf | pe_print_rsconly | pe_print_suppres_nl | pe_print_pending; -@@ -172,7 +173,7 @@ cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool a - } - - void --cli_resource_print_location(pe_resource_t * rsc, const char *prefix) -+cli_resource_print_location(pcmk__output_t *out, pe_resource_t * rsc, const char *prefix) - { - GListPtr lpc = NULL; - GListPtr list = rsc->rsc_location; -@@ -199,7 +200,8 @@ cli_resource_print_location(pe_resource_t * rsc, const char *prefix) - } - - void --cli_resource_print_colocation(pe_resource_t * rsc, bool dependents, bool recursive, int offset) -+cli_resource_print_colocation(pcmk__output_t *out, pe_resource_t * rsc, -+ bool dependents, bool recursive, int offset) - { - char *prefix = NULL; - GListPtr lpc = NULL; -@@ -239,7 +241,7 @@ cli_resource_print_colocation(pe_resource_t * rsc, bool dependents, bool recursi - } - - if (dependents && recursive) { -- cli_resource_print_colocation(peer, dependents, recursive, offset + 1); -+ cli_resource_print_colocation(out, peer, dependents, recursive, offset + 1); - } - - score = score2char(cons->score); -@@ -251,11 +253,11 @@ cli_resource_print_colocation(pe_resource_t * rsc, bool dependents, bool recursi - fprintf(stdout, "%s%-*s (score=%s, id=%s)\n", prefix, 80 - (4 * offset), - peer->id, score, cons->id); - } -- cli_resource_print_location(peer, prefix); -+ cli_resource_print_location(out, peer, prefix); - free(score); - - if (!dependents && recursive) { -- cli_resource_print_colocation(peer, dependents, recursive, offset + 1); -+ cli_resource_print_colocation(out, peer, dependents, recursive, offset + 1); - } - } - free(prefix); -@@ -263,7 +265,8 @@ cli_resource_print_colocation(pe_resource_t * rsc, bool dependents, bool recursi - - // \return Standard Pacemaker return code - int --cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, bool expanded) -+cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, -+ pe_working_set_t *data_set, bool expanded) - { - char *rsc_xml = NULL; - int opts = pe_print_printf | pe_print_pending; -@@ -279,8 +282,8 @@ cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, bool expanded - - // \return Standard Pacemaker return code - int --cli_resource_print_attribute(pe_resource_t *rsc, const char *attr, const char *attr_set_type, -- pe_working_set_t * data_set) -+cli_resource_print_attribute(pcmk__output_t *out, pe_resource_t *rsc, const char *attr, -+ const char *attr_set_type, pe_working_set_t * data_set) - { - int rc = ENXIO; - unsigned int count = 0; -@@ -324,7 +327,8 @@ cli_resource_print_attribute(pe_resource_t *rsc, const char *attr, const char *a - - // \return Standard Pacemaker return code - int --cli_resource_print_property(pe_resource_t *rsc, const char *attr, pe_working_set_t * data_set) -+cli_resource_print_property(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *attr, pe_working_set_t * data_set) - { - const char *value = crm_element_value(rsc->xml, attr); - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index d133219..42d33bd 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -13,7 +13,8 @@ - #include - - static int --do_find_resource(const char *rsc, pe_resource_t * the_rsc, pe_working_set_t * data_set) -+do_find_resource(pcmk__output_t *out, const char *rsc, pe_resource_t * the_rsc, -+ pe_working_set_t * data_set) - { - int found = 0; - GListPtr lpc = NULL; -@@ -43,7 +44,7 @@ do_find_resource(const char *rsc, pe_resource_t * the_rsc, pe_working_set_t * da - } - - int --cli_resource_search(pe_resource_t *rsc, const char *requested_name, -+cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, - pe_working_set_t *data_set) - { - int found = 0; -@@ -51,7 +52,7 @@ cli_resource_search(pe_resource_t *rsc, const char *requested_name, - - if (pe_rsc_is_clone(rsc)) { - for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { -- found += do_find_resource(requested_name, iter->data, data_set); -+ found += do_find_resource(out, requested_name, iter->data, data_set); - } - - /* The anonymous clone children's common ID is supplied */ -@@ -62,11 +63,11 @@ cli_resource_search(pe_resource_t *rsc, const char *requested_name, - && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) { - - for (GListPtr iter = parent->children; iter; iter = iter->next) { -- found += do_find_resource(requested_name, iter->data, data_set); -+ found += do_find_resource(out, requested_name, iter->data, data_set); - } - - } else { -- found += do_find_resource(requested_name, rsc, data_set); -+ found += do_find_resource(out, requested_name, rsc, data_set); - } - - return found; -@@ -76,8 +77,9 @@ cli_resource_search(pe_resource_t *rsc, const char *requested_name, - - // \return Standard Pacemaker return code - static int --find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const char *attr_set_type, -- const char *set_name, const char *attr_id, const char *attr_name, char **value) -+find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, -+ const char *rsc, const char *attr_set_type, const char *set_name, -+ const char *attr_id, const char *attr_name, char **value) - { - int offset = 0; - int rc = pcmk_rc_ok; -@@ -156,9 +158,11 @@ find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const cha - - /* PRIVATE. Use the find_matching_attr_resources instead. */ - static void --find_matching_attr_resources_recursive(GList/* */ ** result, pe_resource_t * rsc, const char * rsc_id, -- const char * attr_set, const char * attr_set_type, const char * attr_id, -- const char * attr_name, cib_t * cib, const char * cmd, int depth) -+find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* */ ** result, -+ pe_resource_t * rsc, const char * rsc_id, -+ const char * attr_set, const char * attr_set_type, -+ const char * attr_id, const char * attr_name, -+ cib_t * cib, const char * cmd, int depth) - { - int rc = pcmk_rc_ok; - char *lookup_id = clone_strip(rsc->id); -@@ -166,7 +170,7 @@ find_matching_attr_resources_recursive(GList/* */ ** result, pe - - /* visit the children */ - for(GList *gIter = rsc->children; gIter; gIter = gIter->next) { -- find_matching_attr_resources_recursive(result, (pe_resource_t*)gIter->data, -+ find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data, - rsc_id, attr_set, attr_set_type, - attr_id, attr_name, cib, cmd, depth+1); - /* do it only once for clones */ -@@ -175,7 +179,8 @@ find_matching_attr_resources_recursive(GList/* */ ** result, pe - } - } - -- rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); -+ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, -+ attr_set, attr_id, attr_name, &local_attr_id); - /* Post-order traversal. - * The root is always on the list and it is the last item. */ - if((0 == depth) || (pcmk_rc_ok == rc)) { -@@ -190,9 +195,11 @@ find_matching_attr_resources_recursive(GList/* */ ** result, pe - - /* The result is a linearized pre-ordered tree of resources. */ - static GList/**/ * --find_matching_attr_resources(pe_resource_t * rsc, const char * rsc_id, const char * attr_set, -+find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, -+ const char * rsc_id, const char * attr_set, - const char * attr_set_type, const char * attr_id, -- const char * attr_name, cib_t * cib, const char * cmd, gboolean force) -+ const char * attr_name, cib_t * cib, const char * cmd, -+ gboolean force) - { - int rc = pcmk_rc_ok; - char *lookup_id = NULL; -@@ -207,7 +214,8 @@ find_matching_attr_resources(pe_resource_t * rsc, const char * rsc_id, const cha - if(rsc->parent && pe_clone == rsc->parent->variant) { - int rc = pcmk_rc_ok; - char *local_attr_id = NULL; -- rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); -+ rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type, -+ attr_set, attr_id, attr_name, &local_attr_id); - free(local_attr_id); - - if(rc != pcmk_rc_ok) { -@@ -222,7 +230,8 @@ find_matching_attr_resources(pe_resource_t * rsc, const char * rsc_id, const cha - - if(child->variant == pe_native) { - lookup_id = clone_strip(child->id); /* Could be a cloned group! */ -- rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); -+ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, -+ attr_set, attr_id, attr_name, &local_attr_id); - - if(rc == pcmk_rc_ok) { - rsc = child; -@@ -237,7 +246,7 @@ find_matching_attr_resources(pe_resource_t * rsc, const char * rsc_id, const cha - return g_list_append(result, rsc); - } - /* If the resource is a group ==> children inherit the attribute if defined. */ -- find_matching_attr_resources_recursive(&result, rsc, rsc_id, attr_set, -+ find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set, - attr_set_type, attr_id, attr_name, - cib, cmd, 0); - return result; -@@ -245,11 +254,12 @@ find_matching_attr_resources(pe_resource_t * rsc, const char * rsc_id, const cha - - // \return Standard Pacemaker return code - int --cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, -- const char *attr_set, const char *attr_set_type, -- const char *attr_id, const char *attr_name, -- const char *attr_value, gboolean recursive, cib_t *cib, -- int cib_options, pe_working_set_t *data_set, gboolean force) -+cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *requested_name, const char *attr_set, -+ const char *attr_set_type, const char *attr_id, -+ const char *attr_name, const char *attr_value, -+ gboolean recursive, cib_t *cib, int cib_options, -+ pe_working_set_t *data_set, gboolean force) - { - int rc = pcmk_rc_ok; - static bool need_init = TRUE; -@@ -263,13 +273,13 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, - if(attr_id == NULL - && force == FALSE - && find_resource_attr( -- cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == EINVAL) { -+ out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == EINVAL) { - printf("\n"); - } - - if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { - if (force == FALSE) { -- rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, -+ rc = find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id, - XML_TAG_META_SETS, attr_set, attr_id, - attr_name, &local_attr_id); - if (rc == pcmk_rc_ok && BE_QUIET == FALSE) { -@@ -286,7 +296,7 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, - resources = g_list_append(resources, rsc); - - } else { -- resources = find_matching_attr_resources(rsc, requested_name, attr_set, attr_set_type, -+ resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, - attr_id, attr_name, cib, "update", force); - } - -@@ -306,8 +316,8 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, - attr_id = common_attr_id; - - lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ -- rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, -- &local_attr_id); -+ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, -+ attr_set, attr_id, attr_name, &local_attr_id); - - if (rc == pcmk_rc_ok) { - crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id); -@@ -387,7 +397,7 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, - if (cons->score > 0 && !pcmk_is_set(peer->flags, pe_rsc_allocating)) { - /* Don't get into colocation loops */ - crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id); -- cli_resource_update_attribute(peer, peer->id, NULL, attr_set_type, -+ cli_resource_update_attribute(out, peer, peer->id, NULL, attr_set_type, - NULL, attr_name, attr_value, recursive, - cib, cib_options, data_set, force); - } -@@ -400,10 +410,11 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, - - // \return Standard Pacemaker return code - int --cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, -- const char *attr_set, const char *attr_set_type, -- const char *attr_id, const char *attr_name, cib_t *cib, -- int cib_options, pe_working_set_t *data_set, gboolean force) -+cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *requested_name, const char *attr_set, -+ const char *attr_set_type, const char *attr_id, -+ const char *attr_name, cib_t *cib, int cib_options, -+ pe_working_set_t *data_set, gboolean force) - { - int rc = pcmk_rc_ok; - GList/**/ *resources = NULL; -@@ -411,12 +422,12 @@ cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, - if(attr_id == NULL - && force == FALSE - && find_resource_attr( -- cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == EINVAL) { -+ out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == EINVAL) { - printf("\n"); - } - - if(pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { -- resources = find_matching_attr_resources(rsc, requested_name, attr_set, attr_set_type, -+ resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, - attr_id, attr_name, cib, "delete", force); - } else { - resources = g_list_append(resources, rsc); -@@ -430,8 +441,8 @@ cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, - rsc = (pe_resource_t*)gIter->data; - - lookup_id = clone_strip(rsc->id); -- rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, -- &local_attr_id); -+ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, -+ attr_set, attr_id, attr_name, &local_attr_id); - - if (rc == ENXIO) { - free(lookup_id); -@@ -471,9 +482,8 @@ cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, - - // \return Standard Pacemaker return code - static int --send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, -- const char *host_uname, const char *rsc_id, -- pe_working_set_t *data_set) -+send_lrm_rsc_op(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, bool do_fail_resource, -+ const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) - { - const char *router_node = host_uname; - const char *rsc_api_id = NULL; -@@ -568,8 +578,9 @@ rsc_fail_name(pe_resource_t *rsc) - - // \return Standard Pacemaker return code - static int --clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, -- const char *rsc_id, pe_working_set_t *data_set) -+clear_rsc_history(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, -+ const char *host_uname, const char *rsc_id, -+ pe_working_set_t *data_set) - { - int rc = pcmk_rc_ok; - -@@ -578,7 +589,7 @@ clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, - * single operation, we might wind up with a wrong idea of the current - * resource state, and we might not re-probe the resource. - */ -- rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, data_set); -+ rc = send_lrm_rsc_op(out, controld_api, false, host_uname, rsc_id, data_set); - if (rc != pcmk_rc_ok) { - return rc; - } -@@ -594,8 +605,8 @@ clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, - - // \return Standard Pacemaker return code - static int --clear_rsc_failures(pcmk_ipc_api_t *controld_api, const char *node_name, -- const char *rsc_id, const char *operation, -+clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, -+ const char *node_name, const char *rsc_id, const char *operation, - const char *interval_spec, pe_working_set_t *data_set) - { - int rc = pcmk_rc_ok; -@@ -667,7 +678,7 @@ clear_rsc_failures(pcmk_ipc_api_t *controld_api, const char *node_name, - g_hash_table_iter_init(&iter, rscs); - while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { - crm_debug("Erasing failures of %s on %s", failed_id, node_name); -- rc = clear_rsc_history(controld_api, node_name, failed_id, data_set); -+ rc = clear_rsc_history(out, controld_api, node_name, failed_id, data_set); - if (rc != pcmk_rc_ok) { - return rc; - } -@@ -697,8 +708,8 @@ clear_rsc_fail_attrs(pe_resource_t *rsc, const char *operation, - - // \return Standard Pacemaker return code - int --cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, -- pe_resource_t *rsc, const char *operation, -+cli_resource_delete(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, -+ const char *host_uname, pe_resource_t *rsc, const char *operation, - const char *interval_spec, bool just_failures, - pe_working_set_t *data_set, gboolean force) - { -@@ -714,7 +725,7 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, - for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { - pe_resource_t *child = (pe_resource_t *) lpc->data; - -- rc = cli_resource_delete(controld_api, host_uname, child, operation, -+ rc = cli_resource_delete(out, controld_api, host_uname, child, operation, - interval_spec, just_failures, data_set, - force); - if (rc != pcmk_rc_ok) { -@@ -749,7 +760,7 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, - node = (pe_node_t *) lpc->data; - - if (node->details->online) { -- rc = cli_resource_delete(controld_api, node->details->uname, -+ rc = cli_resource_delete(out, controld_api, node->details->uname, - rsc, operation, interval_spec, - just_failures, data_set, force); - } -@@ -791,10 +802,10 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, - } - - if (just_failures) { -- rc = clear_rsc_failures(controld_api, host_uname, rsc->id, operation, -+ rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation, - interval_spec, data_set); - } else { -- rc = clear_rsc_history(controld_api, host_uname, rsc->id, data_set); -+ rc = clear_rsc_history(out, controld_api, host_uname, rsc->id, data_set); - } - if (rc != pcmk_rc_ok) { - printf("Cleaned %s failures on %s, but unable to clean history: %s\n", -@@ -807,9 +818,9 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, - - // \return Standard Pacemaker return code - int --cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, -- const char *operation, const char *interval_spec, -- pe_working_set_t *data_set) -+cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, -+ const char *node_name, const char *operation, -+ const char *interval_spec, pe_working_set_t *data_set) - { - int rc = pcmk_rc_ok; - int attr_options = pcmk__node_attr_none; -@@ -842,7 +853,7 @@ cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, - } - - if (node_name) { -- rc = clear_rsc_failures(controld_api, node_name, NULL, -+ rc = clear_rsc_failures(out, controld_api, node_name, NULL, - operation, interval_spec, data_set); - if (rc != pcmk_rc_ok) { - printf("Cleaned all resource failures on %s, but unable to clean history: %s\n", -@@ -853,7 +864,7 @@ cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, - for (GList *iter = data_set->nodes; iter; iter = iter->next) { - pe_node_t *node = (pe_node_t *) iter->data; - -- rc = clear_rsc_failures(controld_api, node->details->uname, NULL, -+ rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL, - operation, interval_spec, data_set); - if (rc != pcmk_rc_ok) { - printf("Cleaned all resource failures on all nodes, but unable to clean history: %s\n", -@@ -868,17 +879,17 @@ cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, - } - - void --cli_resource_check(cib_t * cib_conn, pe_resource_t *rsc) -+cli_resource_check(pcmk__output_t *out, cib_t * cib_conn, pe_resource_t *rsc) - { - bool printed = false; - char *role_s = NULL; - char *managed = NULL; - pe_resource_t *parent = uber_parent(rsc); - -- find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, -+ find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, - NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); - -- find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, -+ find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, - NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); - - if(role_s) { -@@ -920,11 +931,12 @@ cli_resource_check(cib_t * cib_conn, pe_resource_t *rsc) - - // \return Standard Pacemaker return code - int --cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, -- const char *rsc_id, pe_working_set_t *data_set) -+cli_resource_fail(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, -+ const char *host_uname, const char *rsc_id, -+ pe_working_set_t *data_set) - { - crm_notice("Failing %s on %s", rsc_id, host_uname); -- return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, data_set); -+ return send_lrm_rsc_op(out, controld_api, true, host_uname, rsc_id, data_set); - } - - static GHashTable * -@@ -1055,7 +1067,7 @@ static void dump_list(GList *items, const char *tag) - } - } - --static void display_list(GList *items, const char *tag) -+static void display_list(pcmk__output_t *out, GList *items, const char *tag) - { - GList *item = NULL; - -@@ -1103,7 +1115,8 @@ update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml) - * data_set->input and data_set->now. - */ - static int --update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib) -+update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set, -+ cib_t *cib) - { - xmlNode *cib_xml_copy = NULL; - int rc = pcmk_rc_ok; -@@ -1127,7 +1140,8 @@ update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib) - - // \return Standard Pacemaker return code - static int --update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) -+update_dataset(pcmk__output_t *out, cib_t *cib, pe_working_set_t * data_set, -+ bool simulate) - { - char *pid = NULL; - char *shadow_file = NULL; -@@ -1135,7 +1149,7 @@ update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) - int rc = pcmk_rc_ok; - - pe_reset_working_set(data_set); -- rc = update_working_set_from_cib(data_set, cib); -+ rc = update_working_set_from_cib(out, data_set, cib); - if (rc != pcmk_rc_ok) { - return rc; - } -@@ -1168,7 +1182,7 @@ update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) - - pcmk__schedule_actions(data_set, data_set->input, NULL); - run_simulation(data_set, shadow_cib, NULL, TRUE); -- rc = update_dataset(shadow_cib, data_set, FALSE); -+ rc = update_dataset(out, shadow_cib, data_set, FALSE); - - } else { - cluster_status(data_set); -@@ -1260,9 +1274,9 @@ max_delay_in(pe_working_set_t * data_set, GList *resources) - * \return Standard Pacemaker return code (exits on certain failures) - */ - int --cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_lifetime, -- int timeout_ms, cib_t *cib, int cib_options, -- gboolean promoted_role_only, gboolean force) -+cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, -+ const char *move_lifetime, int timeout_ms, cib_t *cib, -+ int cib_options, gboolean promoted_role_only, gboolean force) - { - int rc = pcmk_rc_ok; - int lpc = 0; -@@ -1322,7 +1336,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - goto done; - } - pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); -- rc = update_dataset(cib, data_set, FALSE); -+ rc = update_dataset(out, cib, data_set, FALSE); - if(rc != pcmk_rc_ok) { - fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc); - goto done; -@@ -1336,7 +1350,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - if (stop_via_ban) { - /* Stop the clone or bundle instance by banning it from the host */ - BE_QUIET = TRUE; -- rc = cli_resource_ban(rsc_id, host, move_lifetime, NULL, cib, -+ rc = cli_resource_ban(out, rsc_id, host, move_lifetime, NULL, cib, - cib_options, promoted_role_only); - - } else { -@@ -1346,11 +1360,11 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - */ - char *lookup_id = clone_strip(rsc->id); - -- find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL, -+ find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL, - NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role); - free(lookup_id); -- rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, -- XML_RSC_ATTR_TARGET_ROLE, -+ rc = cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, -+ NULL, XML_RSC_ATTR_TARGET_ROLE, - RSC_STOPPED, FALSE, cib, cib_options, - data_set, force); - } -@@ -1365,7 +1379,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - goto done; - } - -- rc = update_dataset(cib, data_set, TRUE); -+ rc = update_dataset(out, cib, data_set, TRUE); - if(rc != pcmk_rc_ok) { - fprintf(stderr, "Could not determine which resources would be stopped\n"); - goto failure; -@@ -1376,7 +1390,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - - list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); - fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta)); -- display_list(list_delta, " * "); -+ display_list(out, list_delta, " * "); - - step_timeout_s = timeout / sleep_interval; - while (list_delta != NULL) { -@@ -1392,7 +1406,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - timeout -= sleep_interval; - crm_trace("%ds remaining", timeout); - } -- rc = update_dataset(cib, data_set, FALSE); -+ rc = update_dataset(out, cib, data_set, FALSE); - if(rc != pcmk_rc_ok) { - fprintf(stderr, "Could not determine which resources were stopped\n"); - goto failure; -@@ -1412,7 +1426,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - if(before == g_list_length(list_delta)) { - /* aborted during stop phase, print the contents of list_delta */ - fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); -- display_list(list_delta, " * "); -+ display_list(out, list_delta, " * "); - rc = ETIME; - goto failure; - } -@@ -1423,15 +1437,15 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - rc = cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force); - - } else if (orig_target_role) { -- rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, -+ rc = cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, - NULL, XML_RSC_ATTR_TARGET_ROLE, - orig_target_role, FALSE, cib, - cib_options, data_set, force); - free(orig_target_role); - orig_target_role = NULL; - } else { -- rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, -- XML_RSC_ATTR_TARGET_ROLE, cib, -+ rc = cli_resource_delete_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, -+ NULL, XML_RSC_ATTR_TARGET_ROLE, cib, - cib_options, data_set, force); - } - -@@ -1446,7 +1460,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - target_active = restart_target_active; - list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); - fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta)); -- display_list(list_delta, " * "); -+ display_list(out, list_delta, " * "); - - step_timeout_s = timeout / sleep_interval; - while (waiting_for_starts(list_delta, rsc, host)) { -@@ -1464,7 +1478,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - crm_trace("%ds remaining", timeout); - } - -- rc = update_dataset(cib, data_set, FALSE); -+ rc = update_dataset(out, cib, data_set, FALSE); - if(rc != pcmk_rc_ok) { - fprintf(stderr, "Could not determine which resources were started\n"); - goto failure; -@@ -1487,7 +1501,7 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - if(before == g_list_length(list_delta)) { - /* aborted during start phase, print the contents of list_delta */ - fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); -- display_list(list_delta, " * "); -+ display_list(out, list_delta, " * "); - rc = ETIME; - goto failure; - } -@@ -1501,12 +1515,12 @@ cli_resource_restart(pe_resource_t *rsc, const char *host, const char *move_life - if (stop_via_ban) { - cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force); - } else if (orig_target_role) { -- cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, -+ cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, - XML_RSC_ATTR_TARGET_ROLE, orig_target_role, - FALSE, cib, cib_options, data_set, force); - free(orig_target_role); - } else { -- cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, -+ cli_resource_delete_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, - XML_RSC_ATTR_TARGET_ROLE, cib, cib_options, - data_set, force); - } -@@ -1571,7 +1585,7 @@ actions_are_pending(GListPtr actions) - * \return void - */ - static void --print_pending_actions(GListPtr actions) -+print_pending_actions(pcmk__output_t *out, GListPtr actions) - { - GListPtr action; - -@@ -1610,7 +1624,7 @@ print_pending_actions(GListPtr actions) - * \return Standard Pacemaker return code - */ - int --wait_till_stable(int timeout_ms, cib_t * cib) -+wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) - { - pe_working_set_t *data_set = NULL; - int rc = pcmk_rc_ok; -@@ -1632,7 +1646,7 @@ wait_till_stable(int timeout_ms, cib_t * cib) - if (time_diff > 0) { - crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff); - } else { -- print_pending_actions(data_set->actions); -+ print_pending_actions(out, data_set->actions); - pe_free_working_set(data_set); - return ETIME; - } -@@ -1642,7 +1656,7 @@ wait_till_stable(int timeout_ms, cib_t * cib) - - /* Get latest transition graph */ - pe_reset_working_set(data_set); -- rc = update_working_set_from_cib(data_set, cib); -+ rc = update_working_set_from_cib(out, data_set, cib); - if (rc != pcmk_rc_ok) { - pe_free_working_set(data_set); - return rc; -@@ -1675,11 +1689,11 @@ wait_till_stable(int timeout_ms, cib_t * cib) - } - - crm_exit_t --cli_resource_execute_from_params(const char *rsc_name, const char *rsc_class, -- const char *rsc_prov, const char *rsc_type, -- const char *action, GHashTable *params, -- GHashTable *override_hash, int timeout_ms, -- int resource_verbose, gboolean force) -+cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, -+ const char *rsc_class, const char *rsc_prov, -+ const char *rsc_type, const char *action, -+ GHashTable *params, GHashTable *override_hash, -+ int timeout_ms, int resource_verbose, gboolean force) - { - GHashTable *params_copy = NULL; - crm_exit_t exit_code = CRM_EX_OK; -@@ -1815,9 +1829,10 @@ done: - } - - crm_exit_t --cli_resource_execute(pe_resource_t *rsc, const char *requested_name, -- const char *rsc_action, GHashTable *override_hash, -- int timeout_ms, cib_t * cib, pe_working_set_t *data_set, -+cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *requested_name, const char *rsc_action, -+ GHashTable *override_hash, int timeout_ms, -+ cib_t * cib, pe_working_set_t *data_set, - int resource_verbose, gboolean force) - { - crm_exit_t exit_code = CRM_EX_OK; -@@ -1842,7 +1857,7 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, - action = rsc_action+6; - - if(pe_rsc_is_clone(rsc)) { -- int rc = cli_resource_search(rsc, requested_name, data_set); -+ int rc = cli_resource_search(out, rsc, requested_name, data_set); - if(rc > 0 && force == FALSE) { - CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active", - action, rsc->id); -@@ -1879,7 +1894,7 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, - - rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id; - -- exit_code = cli_resource_execute_from_params(rid, rclass, rprov, rtype, action, -+ exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, action, - params, override_hash, timeout_ms, - resource_verbose, force); - return exit_code; -@@ -1887,10 +1902,10 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, - - // \return Standard Pacemaker return code - int --cli_resource_move(pe_resource_t *rsc, const char *rsc_id, const char *host_name, -- const char *move_lifetime, cib_t *cib, int cib_options, -- pe_working_set_t *data_set, gboolean promoted_role_only, -- gboolean force) -+cli_resource_move(pcmk__output_t *out, pe_resource_t *rsc, const char *rsc_id, -+ const char *host_name, const char *move_lifetime, cib_t *cib, -+ int cib_options, pe_working_set_t *data_set, -+ gboolean promoted_role_only, gboolean force) - { - int rc = pcmk_rc_ok; - unsigned int count = 0; -@@ -1966,7 +1981,7 @@ cli_resource_move(pe_resource_t *rsc, const char *rsc_id, const char *host_name, - cib_options, TRUE, force); - - /* Record an explicit preference for 'dest' */ -- rc = cli_resource_prefer(rsc_id, dest->details->uname, move_lifetime, -+ rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime, - cib, cib_options, promoted_role_only); - - crm_trace("%s%s now prefers node %s%s", -@@ -1978,7 +1993,7 @@ cli_resource_move(pe_resource_t *rsc, const char *rsc_id, const char *host_name, - if(force && (cur_is_dest == FALSE)) { - /* Ban the original location if possible */ - if(current) { -- (void)cli_resource_ban(rsc_id, current->details->uname, move_lifetime, -+ (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime, - NULL, cib, cib_options, promoted_role_only); - - } else if(count > 1) { -@@ -1999,7 +2014,8 @@ cli_resource_move(pe_resource_t *rsc, const char *rsc_id, const char *host_name, - } - - static void --cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources) -+cli_resource_why_without_rsc_and_host(pcmk__output_t *out, cib_t *cib_conn, -+ GListPtr resources) - { - GListPtr lpc = NULL; - GListPtr hosts = NULL; -@@ -2014,7 +2030,7 @@ cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources) - printf("Resource %s is running\n", rsc->id); - } - -- cli_resource_check(cib_conn, rsc); -+ cli_resource_check(out, cib_conn, rsc); - g_list_free(hosts); - hosts = NULL; - } -@@ -2022,19 +2038,21 @@ cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources) - } - - static void --cli_resource_why_with_rsc_and_host(cib_t *cib_conn, GListPtr resources, -- pe_resource_t *rsc, const char *host_uname) -+cli_resource_why_with_rsc_and_host(pcmk__output_t *out, cib_t *cib_conn, -+ GListPtr resources, pe_resource_t *rsc, -+ const char *host_uname) - { - if (resource_is_running_on(rsc, host_uname)) { - printf("Resource %s is running on host %s\n",rsc->id,host_uname); - } else { - printf("Resource %s is not running on host %s\n", rsc->id, host_uname); - } -- cli_resource_check(cib_conn, rsc); -+ cli_resource_check(out, cib_conn, rsc); - } - - static void --cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,pe_node_t *node) -+cli_resource_why_without_rsc_with_host(pcmk__output_t *out, cib_t *cib_conn, -+ GListPtr resources, pe_node_t *node) - { - const char* host_uname = node->details->uname; - GListPtr allResources = node->details->allocated_rsc; -@@ -2045,14 +2063,14 @@ cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,pe_nod - for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; - printf("Resource %s is running on host %s\n",rsc->id,host_uname); -- cli_resource_check(cib_conn,rsc); -+ cli_resource_check(out, cib_conn, rsc); - } - - for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; - printf("Resource %s is assigned to host %s but not running\n", - rsc->id, host_uname); -- cli_resource_check(cib_conn,rsc); -+ cli_resource_check(out, cib_conn, rsc); - } - - g_list_free(allResources); -@@ -2061,33 +2079,33 @@ cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,pe_nod - } - - static void --cli_resource_why_with_rsc_without_host(cib_t *cib_conn, GListPtr resources, -- pe_resource_t *rsc) -+cli_resource_why_with_rsc_without_host(pcmk__output_t *out, cib_t *cib_conn, -+ GListPtr resources, pe_resource_t *rsc) - { - GListPtr hosts = NULL; - - rsc->fns->location(rsc, &hosts, TRUE); - printf("Resource %s is %srunning\n", rsc->id, (hosts? "" : "not ")); -- cli_resource_check(cib_conn, rsc); -+ cli_resource_check(out, cib_conn, rsc); - g_list_free(hosts); - } - --void cli_resource_why(cib_t *cib_conn, GListPtr resources, pe_resource_t *rsc, -- pe_node_t *node) -+void cli_resource_why(pcmk__output_t *out, cib_t *cib_conn, GListPtr resources, -+ pe_resource_t *rsc, pe_node_t *node) - { - const char *host_uname = (node == NULL)? NULL : node->details->uname; - - if ((rsc == NULL) && (host_uname == NULL)) { -- cli_resource_why_without_rsc_and_host(cib_conn, resources); -+ cli_resource_why_without_rsc_and_host(out, cib_conn, resources); - - } else if ((rsc != NULL) && (host_uname != NULL)) { -- cli_resource_why_with_rsc_and_host(cib_conn, resources, rsc, -+ cli_resource_why_with_rsc_and_host(out, cib_conn, resources, rsc, - host_uname); - - } else if ((rsc == NULL) && (host_uname != NULL)) { -- cli_resource_why_without_rsc_with_host(cib_conn, resources, node); -+ cli_resource_why_without_rsc_with_host(out, cib_conn, resources, node); - - } else if ((rsc != NULL) && (host_uname == NULL)) { -- cli_resource_why_with_rsc_without_host(cib_conn, resources, rsc); -+ cli_resource_why_with_rsc_without_host(out, cib_conn, resources, rsc); - } - } --- -1.8.3.1 - - -From 1194f91a9c21877a0aac8d7fe579307a1b024971 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 26 Aug 2020 16:37:34 -0400 -Subject: [PATCH 03/19] Refactor: tools: Use is_quiet in crm_resource. - -This gets rid of the BE_QUIET global. ---- - tools/crm_resource.c | 15 +++++++-------- - tools/crm_resource.h | 2 -- - tools/crm_resource_ban.c | 2 +- - tools/crm_resource_runtime.c | 18 +++++++++--------- - 4 files changed, 17 insertions(+), 20 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 7a661a4..e663f55 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -152,7 +152,6 @@ gboolean restart_cb(const gchar *option_name, const gchar *optarg, - gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); - gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); - --bool BE_QUIET = FALSE; - static crm_exit_t exit_code = CRM_EX_OK; - static pcmk__output_t *out = NULL; - -@@ -639,7 +638,7 @@ attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, G - gboolean - class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { - if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) { -- if (BE_QUIET == FALSE) { -+ if (!out->is_quiet(out)) { - g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, - "Standard %s does not support parameters\n", optarg); - } -@@ -991,7 +990,7 @@ cleanup(pcmk__output_t *out, pe_resource_t *rsc) - rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, options.operation, - options.interval_spec, TRUE, data_set, options.force); - -- if ((rc == pcmk_rc_ok) && !BE_QUIET) { -+ if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { - // Show any reasons why resource might stay stopped - cli_resource_check(out, cib_conn, rsc); - } -@@ -1011,7 +1010,7 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) - pe_node_t *dest = NULL; - int rc = pcmk_rc_ok; - -- if (BE_QUIET == FALSE) { -+ if (!out->is_quiet(out)) { - before = build_constraint_list(data_set->input); - } - -@@ -1024,7 +1023,7 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) - dest = pe_find_node(data_set->nodes, options.host_uname); - if (dest == NULL) { - rc = pcmk_rc_node_unknown; -- if (BE_QUIET == FALSE) { -+ if (!out->is_quiet(out)) { - g_list_free(before); - } - return rc; -@@ -1037,7 +1036,7 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) - cib_conn, options.cib_options, TRUE, options.force); - } - -- if (BE_QUIET == FALSE) { -+ if (!out->is_quiet(out)) { - rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call); - rc = pcmk_legacy2rc(rc); - -@@ -1321,7 +1320,7 @@ refresh_resource(pcmk__output_t *out, pe_resource_t *rsc) - rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, NULL, - 0, FALSE, data_set, options.force); - -- if ((rc == pcmk_rc_ok) && !BE_QUIET) { -+ if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { - // Show any reasons why resource might stay stopped - cli_resource_check(out, cib_conn, rsc); - } -@@ -1550,7 +1549,7 @@ main(int argc, char **argv) - } - - options.resource_verbose = args->verbosity; -- BE_QUIET = args->quiet; -+ out->quiet = args->quiet; - - crm_log_args(argc, argv); - -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index bf99f24..0100488 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -22,8 +22,6 @@ - #include - #include - --extern bool BE_QUIET; -- - /* ban */ - int cli_resource_prefer(pcmk__output_t *out, const char *rsc_id, const char *host, - const char *move_lifetime, cib_t * cib_conn, int cib_options, -diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c -index 95e5a17..abed209 100644 ---- a/tools/crm_resource_ban.c -+++ b/tools/crm_resource_ban.c -@@ -88,7 +88,7 @@ cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, - location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); - crm_xml_set_id(location, "cli-ban-%s-on-%s", rsc_id, host); - -- if (BE_QUIET == FALSE) { -+ if (!out->is_quiet(out)) { - CMD_ERR("WARNING: Creating rsc_location constraint '%s'" - " with a score of -INFINITY for resource %s" - " on %s.", ID(location), rsc_id, host); -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 42d33bd..3e1f985 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -22,7 +22,7 @@ do_find_resource(pcmk__output_t *out, const char *rsc, pe_resource_t * the_rsc, - for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) { - pe_node_t *node = (pe_node_t *) lpc->data; - -- if (BE_QUIET) { -+ if (out->is_quiet(out)) { - fprintf(stdout, "%s\n", node->details->uname); - } else { - const char *state = ""; -@@ -36,7 +36,7 @@ do_find_resource(pcmk__output_t *out, const char *rsc, pe_resource_t * the_rsc, - found++; - } - -- if (BE_QUIET == FALSE && found == 0) { -+ if (!out->is_quiet(out) && found == 0) { - fprintf(stderr, "resource %s is NOT running\n", rsc); - } - -@@ -220,7 +220,7 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, - - if(rc != pcmk_rc_ok) { - rsc = rsc->parent; -- if (BE_QUIET == FALSE) { -+ if (!out->is_quiet(out)) { - printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id); - } - } -@@ -235,7 +235,7 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, - - if(rc == pcmk_rc_ok) { - rsc = child; -- if (BE_QUIET == FALSE) { -+ if (!out->is_quiet(out)) { - printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id); - } - } -@@ -282,7 +282,7 @@ cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, - rc = find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id, - XML_TAG_META_SETS, attr_set, attr_id, - attr_name, &local_attr_id); -- if (rc == pcmk_rc_ok && BE_QUIET == FALSE) { -+ if (rc == pcmk_rc_ok && !out->is_quiet(out)) { - printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", - uber_parent(rsc)->id, attr_name, local_attr_id); - printf(" Delete '%s' first or use the force option to override\n", -@@ -359,7 +359,7 @@ cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, - rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); - rc = pcmk_legacy2rc(rc); - -- if (rc == pcmk_rc_ok && BE_QUIET == FALSE) { -+ if (rc == pcmk_rc_ok && !out->is_quiet(out)) { - printf("Set '%s' option: id=%s%s%s%s%s value=%s\n", lookup_id, local_attr_id, - attr_set ? " set=" : "", attr_set ? attr_set : "", - attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value); -@@ -466,7 +466,7 @@ cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc, - rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); - rc = pcmk_legacy2rc(rc); - -- if (rc == pcmk_rc_ok && BE_QUIET == FALSE) { -+ if (rc == pcmk_rc_ok && !out->is_quiet(out)) { - printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id, - attr_set ? " set=" : "", attr_set ? attr_set : "", - attr_name ? " name=" : "", attr_name ? attr_name : ""); -@@ -1349,7 +1349,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - - if (stop_via_ban) { - /* Stop the clone or bundle instance by banning it from the host */ -- BE_QUIET = TRUE; -+ out->quiet = true; - rc = cli_resource_ban(out, rsc_id, host, move_lifetime, NULL, cib, - cib_options, promoted_role_only); - -@@ -1631,7 +1631,7 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) - int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S; - time_t expire_time = time(NULL) + timeout_s; - time_t time_diff; -- bool printed_version_warning = BE_QUIET; // i.e. don't print if quiet -+ bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet - - data_set = pe_new_working_set(); - if (data_set == NULL) { --- -1.8.3.1 - - -From 155a0a8e078061cebbe3354404b58882196b0350 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 21 Sep 2020 15:59:40 -0400 -Subject: [PATCH 04/19] Feature: tools: Add an output message for a list of - resource names. - -This is kind of an unusual output message in that it just dumps a list -of resource names with no other information. It appears to only list -primitive resources, too. This doesn't seem especially useful anywhere -else, so I am just adding it to crm_resource. - -Note that this is one of the basic XML lists, wrapped with and - tags. There's no additional useful information for the items of -this list. ---- - tools/crm_resource.c | 31 ++++++----------------- - tools/crm_resource.h | 3 +++ - tools/crm_resource_print.c | 63 +++++++++++++++++++++++++++++++++------------- - 3 files changed, 56 insertions(+), 41 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index e663f55..d9e8e70 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1176,28 +1176,6 @@ list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_cod - return rc; - } - --static int --list_raw(pcmk__output_t *out) --{ -- int rc = pcmk_rc_ok; -- int found = 0; -- GListPtr lpc = NULL; -- -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *rsc = (pe_resource_t *) lpc->data; -- -- found++; -- cli_resource_print_raw(out, rsc); -- } -- -- if (found == 0) { -- printf("NO resources configured\n"); -- rc = ENXIO; -- } -- -- return rc; --} -- - static void - list_stacks_and_constraints(pcmk__output_t *out, pe_resource_t *rsc, bool recursive) - { -@@ -1634,6 +1612,8 @@ main(int argc, char **argv) - } - } - -+ crm_resource_register_messages(out); -+ - if (args->version) { - out->version(out, false); - goto done; -@@ -1741,7 +1721,12 @@ main(int argc, char **argv) - break; - - case cmd_list_instances: -- rc = list_raw(out); -+ rc = out->message(out, "resource-names-list", data_set->resources); -+ -+ if (rc != pcmk_rc_ok) { -+ rc = ENXIO; -+ } -+ - break; - - case cmd_list_standards: -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 0100488..28a3760 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -105,3 +106,5 @@ int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml); - int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib); - void cli_resource_why(pcmk__output_t *out, cib_t *cib_conn, GListPtr resources, - pe_resource_t *rsc, pe_node_t *node); -+ -+void crm_resource_register_messages(pcmk__output_t *out); -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index de1c608..e62122f 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -9,6 +9,7 @@ - - #include - #include -+#include - - #define cons_string(x) x?x:"NA" - void -@@ -82,24 +83,6 @@ cli_resource_print_cts(pcmk__output_t *out, pe_resource_t * rsc) - } - } - -- --void --cli_resource_print_raw(pcmk__output_t *out, pe_resource_t * rsc) --{ -- GListPtr lpc = NULL; -- GListPtr children = rsc->children; -- -- if (children == NULL) { -- printf("%s\n", rsc->id); -- } -- -- for (lpc = children; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *child = (pe_resource_t *) lpc->data; -- -- cli_resource_print_raw(out, child); -- } --} -- - // \return Standard Pacemaker return code - int - cli_resource_print_list(pcmk__output_t *out, pe_working_set_t * data_set, bool raw) -@@ -338,3 +321,47 @@ cli_resource_print_property(pcmk__output_t *out, pe_resource_t *rsc, - } - return ENXIO; - } -+ -+static void -+add_resource_name(pcmk__output_t *out, pe_resource_t *rsc) { -+ if (rsc->children == NULL) { -+ out->list_item(out, "resource", "%s", rsc->id); -+ } else { -+ for (GListPtr lpc = rsc->children; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *child = (pe_resource_t *) lpc->data; -+ add_resource_name(out, child); -+ } -+ } -+} -+ -+PCMK__OUTPUT_ARGS("resource-names-list", "GListPtr") -+static int -+resource_names(pcmk__output_t *out, va_list args) { -+ GListPtr resources = va_arg(args, GListPtr); -+ -+ if (resources == NULL) { -+ out->err(out, "NO resources configured\n"); -+ return pcmk_rc_no_output; -+ } -+ -+ out->begin_list(out, NULL, NULL, "Resource Names"); -+ -+ for (GListPtr lpc = resources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *rsc = (pe_resource_t *) lpc->data; -+ add_resource_name(out, rsc); -+ } -+ -+ out->end_list(out); -+ return pcmk_rc_ok; -+} -+ -+static pcmk__message_entry_t fmt_functions[] = { -+ { "resource-names-list", "default", resource_names }, -+ -+ { NULL, NULL, NULL } -+}; -+ -+void -+crm_resource_register_messages(pcmk__output_t *out) { -+ pcmk__register_messages(out, fmt_functions); -+} --- -1.8.3.1 - - -From 1d706932132e1b77a991c7bd5b869c593ef355b7 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 21 Sep 2020 16:08:51 -0400 -Subject: [PATCH 05/19] Feature: tools: Use the existing resource-list message - in crm_resource. - -This replaces cli_resource_print_list with existing formatted output -code, getting rid of one more place where the older style output -functions are still being used. - -Note that this does change the format for text output. The older output -used indentation for displaying the members of a clone and probably -other things. There's not really a good way to do this with the -existing text output code, short of adding more command line options for -controlling what parts of the list formatting are used. - -That seems like a bit much for this one use, so instead just enable the -fancier list formatting if we are listing resources. ---- - cts/cli/regression.tools.exp | 3 ++- - tools/crm_resource.c | 20 +++++++++++++++++--- - tools/crm_resource.h | 1 - - tools/crm_resource_print.c | 28 ---------------------------- - 4 files changed, 19 insertions(+), 33 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 35e7a8c..10ec53b 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -926,7 +926,8 @@ Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attrib - =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#= - * Passed: crm_resource - Create a resource attribute - =#=#=#= Begin test: List the configured resources =#=#=#= -- dummy (ocf::pacemaker:Dummy): Stopped -+Full List of Resources: -+ * dummy (ocf::pacemaker:Dummy): Stopped - =#=#=#= Current cib after: List the configured resources =#=#=#= - - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index d9e8e70..2d71e7a 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1610,8 +1610,13 @@ main(int argc, char **argv) - } else { - pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname()); - } -+ } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) { -+ if (options.rsc_cmd == cmd_list_resources) { -+ pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname()); -+ } - } - -+ pe__register_messages(out); - crm_resource_register_messages(out); - - if (args->version) { -@@ -1715,10 +1720,19 @@ main(int argc, char **argv) - } - - switch (options.rsc_cmd) { -- case cmd_list_resources: -- rc = pcmk_rc_ok; -- cli_resource_print_list(out, data_set, FALSE); -+ case cmd_list_resources: { -+ GListPtr all = NULL; -+ all = g_list_prepend(all, strdup("*")); -+ rc = out->message(out, "resource-list", data_set, -+ pe_print_rsconly | pe_print_pending, -+ FALSE, TRUE, FALSE, TRUE, all, all, FALSE); -+ g_list_free_full(all, free); -+ -+ if (rc == pcmk_rc_no_output) { -+ rc = ENXIO; -+ } - break; -+ } - - case cmd_list_instances: - rc = out->message(out, "resource-names-list", data_set->resources); -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 28a3760..6b6dab2 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -46,7 +46,6 @@ void cli_resource_print_colocation(pcmk__output_t *out, pe_resource_t * rsc, - - int cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, pe_working_set_t *data_set, - bool expanded); --int cli_resource_print_list(pcmk__output_t *out, pe_working_set_t * data_set, bool raw); - int cli_resource_print_attribute(pcmk__output_t *out, pe_resource_t *rsc, - const char *attr, const char *attr_set_type, - pe_working_set_t *data_set); -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index e62122f..f7356fb 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -85,34 +85,6 @@ cli_resource_print_cts(pcmk__output_t *out, pe_resource_t * rsc) - - // \return Standard Pacemaker return code - int --cli_resource_print_list(pcmk__output_t *out, pe_working_set_t * data_set, bool raw) --{ -- int found = 0; -- -- GListPtr lpc = NULL; -- int opts = pe_print_printf | pe_print_rsconly | pe_print_pending; -- -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *rsc = (pe_resource_t *) lpc->data; -- -- if (pcmk_is_set(rsc->flags, pe_rsc_orphan) -- && rsc->fns->active(rsc, TRUE) == FALSE) { -- continue; -- } -- rsc->fns->print(rsc, NULL, opts, stdout); -- found++; -- } -- -- if (found == 0) { -- printf("NO resources configured\n"); -- return ENXIO; -- } -- -- return pcmk_rc_ok; --} -- --// \return Standard Pacemaker return code --int - cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id, - const char *host_uname, bool active, - pe_working_set_t * data_set) --- -1.8.3.1 - - -From 86603abc8785e853b85251f431fe86ca28bb35df Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 21 Sep 2020 16:11:21 -0400 -Subject: [PATCH 06/19] Feature: liblrmd: Add output messages for agents, - providers, and standards. - -And use these messages in crm_resource. For XML output, standards use -the basic list formatting with and tags. All other -messages use their own custom lists, because those need to include -additional information. ---- - include/crm/lrmd_internal.h | 3 + - lib/lrmd/Makefile.am | 4 +- - lib/lrmd/lrmd_output.c | 145 ++++++++++++++++++++++++++++++++++++++++++++ - tools/crm_resource.c | 78 +++++++++++++++--------- - 4 files changed, 198 insertions(+), 32 deletions(-) - create mode 100644 lib/lrmd/lrmd_output.c - -diff --git a/include/crm/lrmd_internal.h b/include/crm/lrmd_internal.h -index 498a9ba..720e1a3 100644 ---- a/include/crm/lrmd_internal.h -+++ b/include/crm/lrmd_internal.h -@@ -15,6 +15,7 @@ - #include // xmlNode - #include // crm_ipc_t - #include // mainloop_io_t, ipc_client_callbacks -+#include // pcmk__output_t - #include // pcmk__remote_t - #include // lrmd_t, lrmd_event_data_t - -@@ -66,4 +67,6 @@ void remote_proxy_relay_event(remote_proxy_t *proxy, xmlNode *msg); - void remote_proxy_relay_response(remote_proxy_t *proxy, xmlNode *msg, - int msg_id); - -+void lrmd__register_messages(pcmk__output_t *out); -+ - #endif -diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am -index 41ba7fd..09e40b1 100644 ---- a/lib/lrmd/Makefile.am -+++ b/lib/lrmd/Makefile.am -@@ -1,5 +1,5 @@ - # --# Copyright 2012-2018 the Pacemaker project contributors -+# Copyright 2012-2020 the Pacemaker project contributors - # - # The version control history for this file may have further details. - # -@@ -18,4 +18,4 @@ liblrmd_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) - liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/services/libcrmservice.la \ - $(top_builddir)/lib/fencing/libstonithd.la --liblrmd_la_SOURCES = lrmd_client.c proxy_common.c lrmd_alerts.c -+liblrmd_la_SOURCES = lrmd_client.c proxy_common.c lrmd_alerts.c lrmd_output.c -diff --git a/lib/lrmd/lrmd_output.c b/lib/lrmd/lrmd_output.c -new file mode 100644 -index 0000000..7dc0709 ---- /dev/null -+++ b/lib/lrmd/lrmd_output.c -@@ -0,0 +1,145 @@ -+/* -+ * Copyright 2020 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. -+ * -+ * This source code is licensed under the GNU Lesser General Public License -+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -+ */ -+ -+#include -+#include -+ -+#include -+#include -+ -+static int -+default_list(pcmk__output_t *out, lrmd_list_t *list, const char *title) { -+ lrmd_list_t *iter = NULL; -+ -+ out->begin_list(out, NULL, NULL, "%s", title); -+ -+ for (iter = list; iter != NULL; iter = iter->next) { -+ out->list_item(out, NULL, "%s", iter->val); -+ } -+ -+ out->end_list(out); -+ lrmd_list_freeall(list); -+ return pcmk_rc_ok; -+} -+ -+static int -+xml_list(pcmk__output_t *out, lrmd_list_t *list, const char *ele) { -+ lrmd_list_t *iter = NULL; -+ -+ for (iter = list; iter != NULL; iter = iter->next) { -+ pcmk__output_create_xml_text_node(out, ele, iter->val); -+ } -+ -+ lrmd_list_freeall(list); -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("alternatives-list", "lrmd_list_t *", "const char *") -+static int -+lrmd__alternatives_list_xml(pcmk__output_t *out, va_list args) { -+ lrmd_list_t *list = va_arg(args, lrmd_list_t *); -+ const char *agent_spec = va_arg(args, const char *); -+ -+ xmlNodePtr node = pcmk__output_xml_create_parent(out, "providers"); -+ -+ xmlSetProp(node, (pcmkXmlStr) "for", (pcmkXmlStr) agent_spec); -+ return xml_list(out, list, "provider"); -+} -+ -+PCMK__OUTPUT_ARGS("alternatives-list", "lrmd_list_t *", "const char *") -+static int -+lrmd__alternatives_list(pcmk__output_t *out, va_list args) { -+ lrmd_list_t *list = va_arg(args, lrmd_list_t *); -+ const char *agent_spec G_GNUC_UNUSED = va_arg(args, const char *); -+ -+ return default_list(out, list, "Providers"); -+} -+ -+PCMK__OUTPUT_ARGS("agents-list", "lrmd_list_t *", "const char *", "char *") -+static int -+lrmd__agents_list_xml(pcmk__output_t *out, va_list args) { -+ lrmd_list_t *list = va_arg(args, lrmd_list_t *); -+ const char *agent_spec = va_arg(args, const char *); -+ char *provider = va_arg(args, char *); -+ -+ xmlNodePtr node = pcmk__output_xml_create_parent(out, "agents"); -+ xmlSetProp(node, (pcmkXmlStr) "standard", (pcmkXmlStr) agent_spec); -+ -+ if (!pcmk__str_empty(provider)) { -+ xmlSetProp(node, (pcmkXmlStr) "provider", (pcmkXmlStr) provider); -+ } -+ -+ return xml_list(out, list, "agent"); -+} -+ -+PCMK__OUTPUT_ARGS("agents-list", "lrmd_list_t *", "const char *", "char *") -+static int -+lrmd__agents_list(pcmk__output_t *out, va_list args) { -+ lrmd_list_t *list = va_arg(args, lrmd_list_t *); -+ const char *agent_spec = va_arg(args, const char *); -+ char *provider = va_arg(args, char *); -+ -+ int rc; -+ char *title = crm_strdup_printf("%s agents", pcmk__str_empty(provider) ? agent_spec : provider); -+ -+ rc = default_list(out, list, title); -+ free(title); -+ return rc; -+} -+ -+PCMK__OUTPUT_ARGS("providers-list", "lrmd_list_t *", "const char *") -+static int -+lrmd__providers_list_xml(pcmk__output_t *out, va_list args) { -+ lrmd_list_t *list = va_arg(args, lrmd_list_t *); -+ const char *agent_spec = va_arg(args, const char *); -+ -+ xmlNodePtr node = pcmk__output_xml_create_parent(out, "providers"); -+ -+ xmlSetProp(node, (pcmkXmlStr) "standard", (pcmkXmlStr) "ocf"); -+ -+ if (agent_spec != NULL) { -+ xmlSetProp(node, (pcmkXmlStr) "agent", (pcmkXmlStr) agent_spec); -+ } -+ -+ return xml_list(out, list, "provider"); -+} -+ -+PCMK__OUTPUT_ARGS("providers-list", "lrmd_list_t *", "const char *") -+static int -+lrmd__providers_list(pcmk__output_t *out, va_list args) { -+ lrmd_list_t *list = va_arg(args, lrmd_list_t *); -+ const char *agent_spec G_GNUC_UNUSED = va_arg(args, const char *); -+ -+ return default_list(out, list, "Providers"); -+} -+ -+PCMK__OUTPUT_ARGS("standards-list", "lrmd_list_t *") -+static int -+lrmd__standards_list(pcmk__output_t *out, va_list args) { -+ lrmd_list_t *list = va_arg(args, lrmd_list_t *); -+ -+ return default_list(out, list, "Standards"); -+} -+ -+static pcmk__message_entry_t fmt_functions[] = { -+ { "alternatives-list", "default", lrmd__alternatives_list }, -+ { "alternatives-list", "xml", lrmd__alternatives_list_xml }, -+ { "agents-list", "default", lrmd__agents_list }, -+ { "agents-list", "xml", lrmd__agents_list_xml }, -+ { "providers-list", "default", lrmd__providers_list }, -+ { "providers-list", "xml", lrmd__providers_list_xml }, -+ { "standards-list", "default", lrmd__standards_list }, -+ -+ { NULL, NULL, NULL } -+}; -+ -+void -+lrmd__register_messages(pcmk__output_t *out) { -+ pcmk__register_messages(out, fmt_functions); -+} -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 2d71e7a..df9c623 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -8,6 +8,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -1092,25 +1093,24 @@ static int - list_agents(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) - { - int rc = pcmk_rc_ok; -- lrmd_list_t *list = NULL; -- lrmd_list_t *iter = NULL; - char *provider = strchr(agent_spec, ':'); - lrmd_t *lrmd_conn = lrmd_api_new(); -+ lrmd_list_t *list = NULL; - - if (provider) { - *provider++ = 0; - } -+ - rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider); - - if (rc > 0) { -- for (iter = list; iter != NULL; iter = iter->next) { -- printf("%s\n", iter->val); -- } -- lrmd_list_freeall(list); -- rc = pcmk_rc_ok; -+ rc = out->message(out, "agents-list", list, agent_spec, provider); - } else { -- *exit_code = CRM_EX_NOSUCH; - rc = pcmk_rc_error; -+ } -+ -+ if (rc != pcmk_rc_ok) { -+ *exit_code = CRM_EX_NOSUCH; - if (provider == NULL) { - g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, - "No agents found for standard '%s'", agent_spec); -@@ -1130,19 +1130,41 @@ list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_cod - { - int rc; - const char *text = NULL; -- lrmd_list_t *list = NULL; -- lrmd_list_t *iter = NULL; - lrmd_t *lrmd_conn = lrmd_api_new(); -+ lrmd_list_t *list = NULL; - - switch (options.rsc_cmd) { -+ case cmd_list_alternatives: -+ rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); -+ -+ if (rc > 0) { -+ rc = out->message(out, "alternatives-list", list, agent_spec); -+ } else { -+ rc = pcmk_rc_error; -+ } -+ -+ text = "OCF providers"; -+ break; - case cmd_list_standards: - rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); -+ -+ if (rc > 0) { -+ rc = out->message(out, "standards-list", list); -+ } else { -+ rc = pcmk_rc_error; -+ } -+ - text = "standards"; - break; - case cmd_list_providers: -- case cmd_list_alternatives: -- rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, -- &list); -+ rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); -+ -+ if (rc > 0) { -+ rc = out->message(out, "providers-list", list, agent_spec); -+ } else { -+ rc = pcmk_rc_error; -+ } -+ - text = "OCF providers"; - break; - default: -@@ -1152,24 +1174,19 @@ list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_cod - return pcmk_rc_error; - } - -- if (rc > 0) { -- for (iter = list; iter != NULL; iter = iter->next) { -- printf("%s\n", iter->val); -- } -- lrmd_list_freeall(list); -- rc = pcmk_rc_ok; -- -- } else if (agent_spec != NULL) { -- *exit_code = CRM_EX_NOSUCH; -- rc = pcmk_rc_error; -- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, -- "No %s found for %s", text, agent_spec); -+ if (rc != pcmk_rc_ok) { -+ if (agent_spec != NULL) { -+ *exit_code = CRM_EX_NOSUCH; -+ rc = pcmk_rc_error; -+ g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, -+ "No %s found for %s", text, agent_spec); - -- } else { -- *exit_code = CRM_EX_NOSUCH; -- rc = pcmk_rc_error; -- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, -- "No %s found", text); -+ } else { -+ *exit_code = CRM_EX_NOSUCH; -+ rc = pcmk_rc_error; -+ g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, -+ "No %s found", text); -+ } - } - - lrmd_api_delete(lrmd_conn); -@@ -1618,6 +1635,7 @@ main(int argc, char **argv) - - pe__register_messages(out); - crm_resource_register_messages(out); -+ lrmd__register_messages(out); - - if (args->version) { - out->version(out, false); --- -1.8.3.1 - - -From 64336b4ff485d6ddcadf6a5b6235084fd7d85101 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 21 Sep 2020 16:44:04 -0400 -Subject: [PATCH 07/19] Feature: tools: Use formatted output for props, attrs, - and metadata. - -For the most part, these can just be built out of the basic formatted -output tools. The text versions exist separately to preserve the old -output style of crm_resource. ---- - tools/crm_resource.c | 40 ++++++++++++++++--- - tools/crm_resource.h | 5 --- - tools/crm_resource_print.c | 95 +++++++++++++++++++++++++++------------------- - 3 files changed, 90 insertions(+), 50 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index df9c623..dcb769f 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1378,7 +1378,7 @@ show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code - rc = pcmk_legacy2rc(rc); - - if (metadata) { -- printf("%s\n", metadata); -+ out->output_xml(out, "metadata", metadata); - } else { - *exit_code = crm_errno2exit(rc); - g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, -@@ -1904,17 +1904,47 @@ main(int argc, char **argv) - break; - - case cmd_get_property: -- rc = cli_resource_print_property(out, rsc, options.prop_name, data_set); -+ rc = out->message(out, "property", rsc, options.prop_name); -+ if (rc == pcmk_rc_no_output) { -+ rc = ENXIO; -+ } -+ - break; - - case cmd_set_property: - rc = set_property(); - break; - -- case cmd_get_param: -- rc = cli_resource_print_attribute(out, rsc, options.prop_name, -- options.attr_set_type, data_set); -+ case cmd_get_param: { -+ unsigned int count = 0; -+ GHashTable *params = NULL; -+ pe_node_t *current = pe__find_active_on(rsc, &count, NULL); -+ -+ if (count > 1) { -+ out->err(out, "%s is active on more than one node," -+ " returning the default value for %s", rsc->id, crm_str(options.prop_name)); -+ current = NULL; -+ } -+ -+ params = crm_str_table_new(); -+ -+ if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { -+ get_rsc_attributes(params, rsc, current, data_set); -+ -+ } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { -+ /* No need to redirect to the parent */ -+ get_meta_attributes(params, rsc, current, data_set); -+ -+ } else { -+ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params, -+ NULL, FALSE, data_set); -+ } -+ -+ crm_debug("Looking up %s in %s", options.prop_name, rsc->id); -+ rc = out->message(out, "attribute", rsc, options.prop_name, params); -+ g_hash_table_destroy(params); - break; -+ } - - case cmd_set_param: - if (pcmk__str_empty(options.prop_value)) { -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 6b6dab2..4fc7c71 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -46,11 +46,6 @@ void cli_resource_print_colocation(pcmk__output_t *out, pe_resource_t * rsc, - - int cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, pe_working_set_t *data_set, - bool expanded); --int cli_resource_print_attribute(pcmk__output_t *out, pe_resource_t *rsc, -- const char *attr, const char *attr_set_type, -- pe_working_set_t *data_set); --int cli_resource_print_property(pcmk__output_t *out, pe_resource_t *rsc, const char *attr, -- pe_working_set_t *data_set); - int cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id, - const char *host_uname, bool active, - pe_working_set_t * data_set); -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index f7356fb..093eb75 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -235,63 +235,74 @@ cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, - return pcmk_rc_ok; - } - --// \return Standard Pacemaker return code --int --cli_resource_print_attribute(pcmk__output_t *out, pe_resource_t *rsc, const char *attr, -- const char *attr_set_type, pe_working_set_t * data_set) --{ -- int rc = ENXIO; -- unsigned int count = 0; -- GHashTable *params = NULL; -- const char *value = NULL; -- pe_node_t *current = pe__find_active_on(rsc, &count, NULL); -- -- if (count > 1) { -- CMD_ERR("%s is active on more than one node," -- " returning the default value for %s", rsc->id, crm_str(attr)); -- current = NULL; -+PCMK__OUTPUT_ARGS("attribute", "pe_resource_t *", "char *", "GHashTable *") -+static int -+attribute_default(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ char *attr = va_arg(args, char *); -+ GHashTable *params = va_arg(args, GHashTable *); -+ -+ const char *value = g_hash_table_lookup(params, attr); -+ -+ if (value != NULL) { -+ out->begin_list(out, NULL, NULL, "Attributes"); -+ out->list_item(out, attr, "%s", value); -+ out->end_list(out); -+ } else { -+ out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id); - } - -- params = crm_str_table_new(); -+ return pcmk_rc_ok; -+} - -- if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { -- get_rsc_attributes(params, rsc, current, data_set); -+PCMK__OUTPUT_ARGS("attribute", "pe_resource_t *", "char *", "GHashTable *") -+static int -+attribute_text(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ char *attr = va_arg(args, char *); -+ GHashTable *params = va_arg(args, GHashTable *); - -- } else if (pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { -- /* No need to redirect to the parent */ -- get_meta_attributes(params, rsc, current, data_set); -+ const char *value = g_hash_table_lookup(params, attr); - -+ if (value != NULL) { -+ out->info(out, "%s", value); - } else { -- pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params, -- NULL, FALSE, data_set); -+ out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id); - } - -- crm_debug("Looking up %s in %s", attr, rsc->id); -- value = g_hash_table_lookup(params, attr); -- if (value != NULL) { -- fprintf(stdout, "%s\n", value); -- rc = pcmk_rc_ok; -+ return pcmk_rc_ok; -+} - -- } else { -- CMD_ERR("Attribute '%s' not found for '%s'", attr, rsc->id); -+PCMK__OUTPUT_ARGS("property", "pe_resource_t *", "char *") -+static int -+property_default(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ char *attr = va_arg(args, char *); -+ -+ const char *value = crm_element_value(rsc->xml, attr); -+ -+ if (value != NULL) { -+ out->begin_list(out, NULL, NULL, "Properties"); -+ out->list_item(out, attr, "%s", value); -+ out->end_list(out); - } - -- g_hash_table_destroy(params); -- return rc; -+ return pcmk_rc_ok; - } - --// \return Standard Pacemaker return code --int --cli_resource_print_property(pcmk__output_t *out, pe_resource_t *rsc, -- const char *attr, pe_working_set_t * data_set) --{ -+PCMK__OUTPUT_ARGS("property", "pe_resource_t *", "char *") -+static int -+property_text(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ char *attr = va_arg(args, char *); -+ - const char *value = crm_element_value(rsc->xml, attr); - - if (value != NULL) { -- fprintf(stdout, "%s\n", value); -- return pcmk_rc_ok; -+ out->info(out, "%s", value); - } -- return ENXIO; -+ -+ return pcmk_rc_ok; - } - - static void -@@ -328,6 +339,10 @@ resource_names(pcmk__output_t *out, va_list args) { - } - - static pcmk__message_entry_t fmt_functions[] = { -+ { "attribute", "default", attribute_default }, -+ { "attribute", "text", attribute_text }, -+ { "property", "default", property_default }, -+ { "property", "text", property_text }, - { "resource-names-list", "default", resource_names }, - - { NULL, NULL, NULL } --- -1.8.3.1 - - -From 575c2231baf11f24d953508000780a88ce7ad57c Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 26 Oct 2020 10:09:16 -0400 -Subject: [PATCH 08/19] Feature: scheduler: Add a message for resource config - printing. - -This prints out the XML or raw XML config for a given resource. ---- - cts/cli/regression.tools.exp | 2 +- - include/crm/pengine/internal.h | 1 + - lib/pengine/pe_output.c | 21 +++++++++++++++++++++ - tools/crm_resource_print.c | 7 ++----- - 4 files changed, 25 insertions(+), 6 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 10ec53b..738e800 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -972,7 +972,7 @@ dummy - * Passed: crm_resource - List IDs of instantiated resources - =#=#=#= Begin test: Show XML configuration of resource =#=#=#= - dummy (ocf::pacemaker:Dummy): Stopped --xml: -+Resource XML: - - - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index d658e86..00b6b4c 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -301,6 +301,7 @@ int pe__node_list_text(pcmk__output_t *out, va_list args); - int pe__node_list_xml(pcmk__output_t *out, va_list args); - int pe__op_history_text(pcmk__output_t *out, va_list args); - int pe__op_history_xml(pcmk__output_t *out, va_list args); -+int pe__resource_config(pcmk__output_t *out, va_list args); - int pe__resource_history_text(pcmk__output_t *out, va_list args); - int pe__resource_history_xml(pcmk__output_t *out, va_list args); - int pe__resource_xml(pcmk__output_t *out, va_list args); -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index 9d43e5f..dd3a880 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -1583,6 +1583,26 @@ pe__op_history_xml(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - -+PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "gboolean") -+int pe__resource_config(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ gboolean raw = va_arg(args, gboolean); -+ -+ char *rsc_xml = NULL; -+ -+ if (raw) { -+ rsc_xml = dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml); -+ } else { -+ rsc_xml = dump_xml_formatted(rsc->xml); -+ } -+ -+ out->info(out, "Resource XML:"); -+ out->output_xml(out, "xml", rsc_xml); -+ -+ free(rsc_xml); -+ return pcmk_rc_ok; -+} -+ - PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean") - int - pe__resource_history_text(pcmk__output_t *out, va_list args) { -@@ -1872,6 +1892,7 @@ static pcmk__message_entry_t fmt_functions[] = { - { "primitive", "html", pe__resource_html }, - { "primitive", "text", pe__resource_text }, - { "primitive", "log", pe__resource_text }, -+ { "resource-config", "default", pe__resource_config }, - { "resource-history", "default", pe__resource_history_text }, - { "resource-history", "xml", pe__resource_history_xml }, - { "resource-list", "default", pe__resource_list }, -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 093eb75..99217aa 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -223,15 +223,12 @@ int - cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, - pe_working_set_t *data_set, bool expanded) - { -- char *rsc_xml = NULL; - int opts = pe_print_printf | pe_print_pending; - - rsc->fns->print(rsc, NULL, opts, stdout); - -- rsc_xml = dump_xml_formatted((!expanded && rsc->orig_xml)? -- rsc->orig_xml : rsc->xml); -- fprintf(stdout, "%sxml:\n%s\n", expanded ? "" : "raw ", rsc_xml); -- free(rsc_xml); -+ out->message(out, "resource-config", rsc, !expanded); -+ - return pcmk_rc_ok; - } - --- -1.8.3.1 - - -From 711d9412de519ac897ebe51f79e6bf47275eb5ab Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 27 Aug 2020 10:11:51 -0400 -Subject: [PATCH 09/19] Feature: tools: Use formatted output for resource - output in crm_resource. - -This switches crm_resource away from using the old style resource -printing functions and onto using formatted output. ---- - cts/cli/regression.tools.exp | 3 +-- - lib/common/output_xml.c | 1 + - tools/crm_resource_print.c | 9 +++++++-- - tools/crm_resource_runtime.c | 2 ++ - 4 files changed, 11 insertions(+), 4 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 738e800..4abbb75 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -971,7 +971,7 @@ dummy - =#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#= - * Passed: crm_resource - List IDs of instantiated resources - =#=#=#= Begin test: Show XML configuration of resource =#=#=#= -- dummy (ocf::pacemaker:Dummy): Stopped -+dummy (ocf::pacemaker:Dummy): Stopped - Resource XML: - - -@@ -979,7 +979,6 @@ Resource XML: - - - -- - =#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#= - * Passed: crm_resource - Show XML configuration of resource - =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#= -diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c -index 6a6ed6e..bba21e7 100644 ---- a/lib/common/output_xml.c -+++ b/lib/common/output_xml.c -@@ -60,6 +60,7 @@ static subst_t substitutions[] = { - { "Operations", "node_history" }, - { "Negative Location Constraints", "bans" }, - { "Node Attributes", "node_attributes" }, -+ { "Resource Config", "resource_config" }, - - { NULL, NULL } - }; -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 99217aa..447c57d 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -223,12 +223,17 @@ int - cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, - pe_working_set_t *data_set, bool expanded) - { -- int opts = pe_print_printf | pe_print_pending; -+ unsigned int opts = pe_print_pending; -+ GListPtr all = NULL; - -- rsc->fns->print(rsc, NULL, opts, stdout); -+ all = g_list_prepend(all, strdup("*")); - -+ out->begin_list(out, NULL, NULL, "Resource Config"); -+ out->message(out, crm_map_element_name(rsc->xml), opts, rsc, all, all); - out->message(out, "resource-config", rsc, !expanded); -+ out->end_list(out); - -+ g_list_free_full(all, free); - return pcmk_rc_ok; - } - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 3e1f985..7aae8cc 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -142,6 +142,8 @@ find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, - crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); - } - -+ out->spacer(out); -+ - } else if(value) { - const char *tmp = crm_element_value(xml_search, attr); - --- -1.8.3.1 - - -From b89fa86c642d33ddfd9db6015bb3e74f6645623c Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 27 Aug 2020 10:29:00 -0400 -Subject: [PATCH 10/19] Feature: tools: Use formatted output for finding - resources. - ---- - tools/crm_resource_runtime.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 7aae8cc..dca9553 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -23,21 +23,21 @@ do_find_resource(pcmk__output_t *out, const char *rsc, pe_resource_t * the_rsc, - pe_node_t *node = (pe_node_t *) lpc->data; - - if (out->is_quiet(out)) { -- fprintf(stdout, "%s\n", node->details->uname); -+ out->info(out, "%s", node->details->uname); - } else { - const char *state = ""; - - if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) { - state = "Master"; - } -- fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state); -+ out->info(out, "resource %s is running on: %s %s", rsc, node->details->uname, state); - } - - found++; - } - - if (!out->is_quiet(out) && found == 0) { -- fprintf(stderr, "resource %s is NOT running\n", rsc); -+ out->err(out, "resource %s is NOT running", rsc); - } - - return found; --- -1.8.3.1 - - -From ed030eead6b704400fe6362c5b4a320798675995 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 20 Oct 2020 12:59:25 -0400 -Subject: [PATCH 11/19] Feature: tools: Use subprocess_output for crm_resource - execution results. - ---- - tools/crm_resource_runtime.c | 32 ++------------------------------ - 1 file changed, 2 insertions(+), 30 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index dca9553..22b469e 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1772,9 +1772,6 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, - } - - if (services_action_sync(op)) { -- int more, lpc, last; -- char *local_copy = NULL; -- - exit_code = op->rc; - - if (op->status == PCMK_LRM_OP_DONE) { -@@ -1791,33 +1788,8 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, - if (resource_verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei)) - goto done; - -- if (op->stdout_data) { -- local_copy = strdup(op->stdout_data); -- more = strlen(local_copy); -- last = 0; -- -- for (lpc = 0; lpc < more; lpc++) { -- if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) { -- local_copy[lpc] = 0; -- printf(" > stdout: %s\n", local_copy + last); -- last = lpc + 1; -- } -- } -- free(local_copy); -- } -- if (op->stderr_data) { -- local_copy = strdup(op->stderr_data); -- more = strlen(local_copy); -- last = 0; -- -- for (lpc = 0; lpc < more; lpc++) { -- if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) { -- local_copy[lpc] = 0; -- printf(" > stderr: %s\n", local_copy + last); -- last = lpc + 1; -- } -- } -- free(local_copy); -+ if (op->stdout_data || op->stderr_data) { -+ out->subprocess_output(out, op->rc, op->stdout_data, op->stderr_data); - } - } else { - exit_code = op->rc == 0 ? CRM_EX_ERROR : op->rc; --- -1.8.3.1 - - -From fb9792d7b99347b82b51a281ba10b0349ee45d5d Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 27 Aug 2020 11:29:11 -0400 -Subject: [PATCH 12/19] Feature: tools: Use formatted output for crm_resource - info messages. - -Basically, anything that's being printed out for informational purposes -should use out->info. This won't show up in the XML format, but that's -never existed for crm_resource in the first place. - -Also, errors (most things printed to stderr, uses of CMD_ERR, etc.) -should use out->err. ---- - cts/cli/regression.tools.exp | 2 +- - tools/crm_resource.c | 24 ++--- - tools/crm_resource_ban.c | 32 +++---- - tools/crm_resource_runtime.c | 223 +++++++++++++++++++++---------------------- - 4 files changed, 137 insertions(+), 144 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 4abbb75..935dce8 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -3223,10 +3223,10 @@ Migration will take effect until: - =#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#= - * Passed: crm_resource - Try to move a resource previously moved with a lifetime - =#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#= -+Migration will take effect until: - WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. - This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool - This will be the case even if node1 is the last node in the cluster --Migration will take effect until: - =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= - - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index dcb769f..0532095 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -238,7 +238,7 @@ resource_ipc_timeout(gpointer data) - } - - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT, -- "\nAborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S); -+ "Aborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S); - - quit_main_loop(CRM_EX_TIMEOUT); - return FALSE; -@@ -258,18 +258,19 @@ controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, - - case pcmk_ipc_event_reply: - if (status != CRM_EX_OK) { -- fprintf(stderr, "\nError: bad reply from controller: %s\n", -- crm_exit_str(status)); -+ out->err(out, "Error: bad reply from controller: %s", -+ crm_exit_str(status)); - pcmk_disconnect_ipc(api); - quit_main_loop(status); - } else { -- fprintf(stderr, "."); - if ((pcmk_controld_api_replies_expected(api) == 0) - && mainloop && g_main_loop_is_running(mainloop)) { -- fprintf(stderr, " OK\n"); -+ out->info(out, "... got reply (done)"); - crm_debug("Got all the replies we expected"); - pcmk_disconnect_ipc(api); - quit_main_loop(CRM_EX_OK); -+ } else { -+ out->info(out, "... got reply"); - } - } - break; -@@ -285,8 +286,8 @@ start_mainloop(pcmk_ipc_api_t *capi) - unsigned int count = pcmk_controld_api_replies_expected(capi); - - if (count > 0) { -- fprintf(stderr, "Waiting for %d %s from the controller", -- count, pcmk__plural_alt(count, "reply", "replies")); -+ out->info(out, "Waiting for %d %s from the controller", -+ count, pcmk__plural_alt(count, "reply", "replies")); - exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects - mainloop = g_main_loop_new(NULL, FALSE); - g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); -@@ -1055,7 +1056,7 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) - remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp); - - for (ele = remaining; ele != NULL; ele = ele->next) { -- printf("Removing constraint: %s\n", (char *) ele->data); -+ out->info(out, "Removing constraint: %s", (char *) ele->data); - } - - g_list_free(before); -@@ -1281,8 +1282,8 @@ refresh(pcmk__output_t *out) - } - - if (controld_api == NULL) { -- printf("Dry run: skipping clean-up of %s due to CIB_file\n", -- options.host_uname? options.host_uname : "all nodes"); -+ out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", -+ options.host_uname? options.host_uname : "all nodes"); - rc = pcmk_rc_ok; - return rc; - } -@@ -1724,7 +1725,8 @@ main(int argc, char **argv) - if (options.require_crmd) { - rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); - if (rc != pcmk_rc_ok) { -- CMD_ERR("Error connecting to the controller: %s", pcmk_rc_str(rc)); -+ g_set_error(&error, PCMK__RC_ERROR, rc, -+ "Error connecting to the controller: %s", pcmk_rc_str(rc)); - goto done; - } - pcmk_register_ipc_callback(controld_api, controller_event_callback, -diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c -index abed209..4e3ab8b 100644 ---- a/tools/crm_resource_ban.c -+++ b/tools/crm_resource_ban.c -@@ -25,18 +25,18 @@ parse_cli_lifetime(pcmk__output_t *out, const char *move_lifetime) - - duration = crm_time_parse_duration(move_lifetime); - if (duration == NULL) { -- CMD_ERR("Invalid duration specified: %s", move_lifetime); -- CMD_ERR("Please refer to" -- " https://en.wikipedia.org/wiki/ISO_8601#Durations" -- " for examples of valid durations"); -+ out->err(out, "Invalid duration specified: %s\n" -+ "Please refer to https://en.wikipedia.org/wiki/ISO_8601#Durations " -+ "for examples of valid durations", move_lifetime); - return NULL; - } - - now = crm_time_new(NULL); - later = crm_time_add(now, duration); - if (later == NULL) { -- CMD_ERR("Unable to add %s to current time", move_lifetime); -- CMD_ERR("Please report to " PACKAGE_BUGREPORT " as possible bug"); -+ out->err(out, "Unable to add %s to current time\n" -+ "Please report to " PACKAGE_BUGREPORT " as possible bug", -+ move_lifetime); - crm_time_free(now); - crm_time_free(duration); - return NULL; -@@ -48,7 +48,7 @@ parse_cli_lifetime(pcmk__output_t *out, const char *move_lifetime) - crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); - crm_time_log(LOG_INFO, "duration", duration, crm_time_log_date | crm_time_log_timeofday); - later_s = crm_time_as_string(later, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); -- printf("Migration will take effect until: %s\n", later_s); -+ out->info(out, "Migration will take effect until: %s", later_s); - - crm_time_free(duration); - crm_time_free(later); -@@ -89,15 +89,15 @@ cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, - crm_xml_set_id(location, "cli-ban-%s-on-%s", rsc_id, host); - - if (!out->is_quiet(out)) { -- CMD_ERR("WARNING: Creating rsc_location constraint '%s'" -- " with a score of -INFINITY for resource %s" -- " on %s.", ID(location), rsc_id, host); -- CMD_ERR("\tThis will prevent %s from %s on %s until the constraint " -- "is removed using the clear option or by editing the CIB " -- "with an appropriate tool", -- rsc_id, (promoted_role_only? "being promoted" : "running"), host); -- CMD_ERR("\tThis will be the case even if %s is" -- " the last node in the cluster", host); -+ out->info(out, "WARNING: Creating rsc_location constraint '%s' with a " -+ "score of -INFINITY for resource %s on %s.\n\tThis will " -+ "prevent %s from %s on %s until the constraint is removed " -+ "using the clear option or by editing the CIB with an " -+ "appropriate tool\n\tThis will be the case even if %s " -+ "is the last node in the cluster", -+ ID(location), rsc_id, host, rsc_id, -+ (promoted_role_only? "being promoted" : "running"), -+ host, host); - } - - crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id); -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 22b469e..bd377a3 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -134,12 +134,12 @@ find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, - xmlNode *child = NULL; - - rc = EINVAL; -- printf("Multiple attributes match name=%s\n", attr_name); -+ out->info(out, "Multiple attributes match name=%s", attr_name); - - for (child = pcmk__xml_first_child(xml_search); child != NULL; - child = pcmk__xml_next(child)) { -- printf(" Value: %s \t(id=%s)\n", -- crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); -+ out->info(out, " Value: %s \t(id=%s)", -+ crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); - } - - out->spacer(out); -@@ -223,7 +223,8 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, - if(rc != pcmk_rc_ok) { - rsc = rsc->parent; - if (!out->is_quiet(out)) { -- printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id); -+ out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'", -+ cmd, attr_name, rsc->id, rsc_id); - } - } - return g_list_append(result, rsc); -@@ -238,7 +239,8 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, - if(rc == pcmk_rc_ok) { - rsc = child; - if (!out->is_quiet(out)) { -- printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id); -+ out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'", -+ attr_name, lookup_id, cmd, rsc_id); - } - } - -@@ -272,11 +274,9 @@ cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, - GList/**/ *resources = NULL; - const char *common_attr_id = attr_id; - -- if(attr_id == NULL -- && force == FALSE -- && find_resource_attr( -- out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == EINVAL) { -- printf("\n"); -+ if (attr_id == NULL && force == FALSE) { -+ find_resource_attr (out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, -+ NULL, NULL, attr_name, NULL); - } - - if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { -@@ -285,10 +285,10 @@ cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, - XML_TAG_META_SETS, attr_set, attr_id, - attr_name, &local_attr_id); - if (rc == pcmk_rc_ok && !out->is_quiet(out)) { -- printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", -- uber_parent(rsc)->id, attr_name, local_attr_id); -- printf(" Delete '%s' first or use the force option to override\n", -- local_attr_id); -+ out->err(out, "WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)", -+ uber_parent(rsc)->id, attr_name, local_attr_id); -+ out->err(out, " Delete '%s' first or use the force option to override", -+ local_attr_id); - } - free(local_attr_id); - if (rc == pcmk_rc_ok) { -@@ -362,9 +362,9 @@ cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, - rc = pcmk_legacy2rc(rc); - - if (rc == pcmk_rc_ok && !out->is_quiet(out)) { -- printf("Set '%s' option: id=%s%s%s%s%s value=%s\n", lookup_id, local_attr_id, -- attr_set ? " set=" : "", attr_set ? attr_set : "", -- attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value); -+ out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s", lookup_id, local_attr_id, -+ attr_set ? " set=" : "", attr_set ? attr_set : "", -+ attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value); - } - - free_xml(xml_top); -@@ -421,11 +421,9 @@ cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc, - int rc = pcmk_rc_ok; - GList/**/ *resources = NULL; - -- if(attr_id == NULL -- && force == FALSE -- && find_resource_attr( -- out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == EINVAL) { -- printf("\n"); -+ if (attr_id == NULL && force == FALSE) { -+ find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, -+ NULL, NULL, attr_name, NULL); - } - - if(pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { -@@ -469,9 +467,9 @@ cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc, - rc = pcmk_legacy2rc(rc); - - if (rc == pcmk_rc_ok && !out->is_quiet(out)) { -- printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id, -- attr_set ? " set=" : "", attr_set ? attr_set : "", -- attr_name ? " name=" : "", attr_name ? attr_name : ""); -+ out->info(out, "Deleted '%s' option: id=%s%s%s%s%s", lookup_id, local_attr_id, -+ attr_set ? " set=" : "", attr_set ? attr_set : "", -+ attr_name ? " name=" : "", attr_name ? attr_name : ""); - } - - free(lookup_id); -@@ -497,11 +495,11 @@ send_lrm_rsc_op(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, bool do_fail_ - pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); - - if (rsc == NULL) { -- CMD_ERR("Resource %s not found", rsc_id); -+ out->err(out, "Resource %s not found", rsc_id); - return ENXIO; - - } else if (rsc->variant != pe_native) { -- CMD_ERR("We can only process primitive resources, not %s", rsc_id); -+ out->err(out, "We can only process primitive resources, not %s", rsc_id); - return EINVAL; - } - -@@ -509,25 +507,25 @@ send_lrm_rsc_op(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, bool do_fail_ - rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER), - rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE); - if ((rsc_class == NULL) || (rsc_type == NULL)) { -- CMD_ERR("Resource %s does not have a class and type", rsc_id); -+ out->err(out, "Resource %s does not have a class and type", rsc_id); - return EINVAL; - } - - if (host_uname == NULL) { -- CMD_ERR("Please specify a node name"); -+ out->err(out, "Please specify a node name"); - return EINVAL; - - } else { - pe_node_t *node = pe_find_node(data_set->nodes, host_uname); - - if (node == NULL) { -- CMD_ERR("Node %s not found", host_uname); -+ out->err(out, "Node %s not found", host_uname); - return pcmk_rc_node_unknown; - } - - if (!(node->details->online)) { - if (do_fail_resource) { -- CMD_ERR("Node %s is not online", host_uname); -+ out->err(out, "Node %s is not online", host_uname); - return ENOTCONN; - } else { - cib_only = true; -@@ -536,8 +534,8 @@ send_lrm_rsc_op(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, bool do_fail_ - if (!cib_only && pe__is_guest_or_remote_node(node)) { - node = pe__current_node(node->details->remote_rsc); - if (node == NULL) { -- CMD_ERR("No cluster connection to Pacemaker Remote node %s detected", -- host_uname); -+ out->err(out, "No cluster connection to Pacemaker Remote node %s detected", -+ host_uname); - return ENOTCONN; - } - router_node = node->details->uname; -@@ -779,27 +777,27 @@ cli_resource_delete(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - node = pe_find_node(data_set->nodes, host_uname); - - if (node == NULL) { -- printf("Unable to clean up %s because node %s not found\n", -- rsc->id, host_uname); -+ out->err(out, "Unable to clean up %s because node %s not found", -+ rsc->id, host_uname); - return ENODEV; - } - - if (!node->details->rsc_discovery_enabled) { -- printf("Unable to clean up %s because resource discovery disabled on %s\n", -- rsc->id, host_uname); -+ out->err(out, "Unable to clean up %s because resource discovery disabled on %s", -+ rsc->id, host_uname); - return EOPNOTSUPP; - } - - if (controld_api == NULL) { -- printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n", -- rsc->id, host_uname); -+ out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file", -+ rsc->id, host_uname); - return pcmk_rc_ok; - } - - rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); - if (rc != pcmk_rc_ok) { -- printf("Unable to clean up %s failures on %s: %s\n", -- rsc->id, host_uname, pcmk_rc_str(rc)); -+ out->err(out, "Unable to clean up %s failures on %s: %s", -+ rsc->id, host_uname, pcmk_rc_str(rc)); - return rc; - } - -@@ -810,10 +808,10 @@ cli_resource_delete(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - rc = clear_rsc_history(out, controld_api, host_uname, rsc->id, data_set); - } - if (rc != pcmk_rc_ok) { -- printf("Cleaned %s failures on %s, but unable to clean history: %s\n", -- rsc->id, host_uname, pcmk_strerror(rc)); -+ out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s", -+ rsc->id, host_uname, pcmk_strerror(rc)); - } else { -- printf("Cleaned up %s on %s\n", rsc->id, host_uname); -+ out->info(out, "Cleaned up %s on %s", rsc->id, host_uname); - } - return rc; - } -@@ -829,8 +827,8 @@ cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - const char *display_name = node_name? node_name : "all nodes"; - - if (controld_api == NULL) { -- printf("Dry run: skipping clean-up of %s due to CIB_file\n", -- display_name); -+ out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", -+ display_name); - return rc; - } - -@@ -838,7 +836,7 @@ cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - pe_node_t *node = pe_find_node(data_set->nodes, node_name); - - if (node == NULL) { -- CMD_ERR("Unknown node: %s", node_name); -+ out->err(out, "Unknown node: %s", node_name); - return ENXIO; - } - if (pe__is_guest_or_remote_node(node)) { -@@ -849,8 +847,8 @@ cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - rc = pcmk__node_attr_request_clear(NULL, node_name, NULL, operation, - interval_spec, NULL, attr_options); - if (rc != pcmk_rc_ok) { -- printf("Unable to clean up all failures on %s: %s\n", -- display_name, pcmk_rc_str(rc)); -+ out->err(out, "Unable to clean up all failures on %s: %s", -+ display_name, pcmk_rc_str(rc)); - return rc; - } - -@@ -858,8 +856,8 @@ cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - rc = clear_rsc_failures(out, controld_api, node_name, NULL, - operation, interval_spec, data_set); - if (rc != pcmk_rc_ok) { -- printf("Cleaned all resource failures on %s, but unable to clean history: %s\n", -- node_name, pcmk_strerror(rc)); -+ out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s", -+ node_name, pcmk_strerror(rc)); - return rc; - } - } else { -@@ -869,14 +867,14 @@ cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL, - operation, interval_spec, data_set); - if (rc != pcmk_rc_ok) { -- printf("Cleaned all resource failures on all nodes, but unable to clean history: %s\n", -- pcmk_strerror(rc)); -+ out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s", -+ pcmk_strerror(rc)); - return rc; - } - } - } - -- printf("Cleaned up all resources on %s\n", display_name); -+ out->info(out, "Cleaned up all resources on %s", display_name); - return rc; - } - -@@ -1074,7 +1072,7 @@ static void display_list(pcmk__output_t *out, GList *items, const char *tag) - GList *item = NULL; - - for (item = items; item != NULL; item = item->next) { -- fprintf(stdout, "%s%s\n", tag, (const char *)item->data); -+ out->info(out, "%s%s", tag, (const char *)item->data); - } - } - -@@ -1127,12 +1125,12 @@ update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set, - rc = pcmk_legacy2rc(rc); - - if (rc != pcmk_rc_ok) { -- fprintf(stderr, "Could not obtain the current CIB: %s (%d)\n", pcmk_strerror(rc), rc); -+ out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc); - return rc; - } - rc = update_working_set_xml(data_set, &cib_xml_copy); - if (rc != pcmk_rc_ok) { -- fprintf(stderr, "Could not upgrade the current CIB XML\n"); -+ out->err(out, "Could not upgrade the current CIB XML"); - free_xml(cib_xml_copy); - return rc; - } -@@ -1162,7 +1160,7 @@ update_dataset(pcmk__output_t *out, cib_t *cib, pe_working_set_t * data_set, - shadow_file = get_shadow_file(pid); - - if (shadow_cib == NULL) { -- fprintf(stderr, "Could not create shadow cib: '%s'\n", pid); -+ out->err(out, "Could not create shadow cib: '%s'", pid); - rc = ENXIO; - goto cleanup; - } -@@ -1170,7 +1168,7 @@ update_dataset(pcmk__output_t *out, cib_t *cib, pe_working_set_t * data_set, - rc = write_xml_file(data_set->input, shadow_file, FALSE); - - if (rc < 0) { -- fprintf(stderr, "Could not populate shadow cib: %s (%d)\n", pcmk_strerror(rc), rc); -+ out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc); - goto cleanup; - } - -@@ -1178,7 +1176,7 @@ update_dataset(pcmk__output_t *out, cib_t *cib, pe_working_set_t * data_set, - rc = pcmk_legacy2rc(rc); - - if (rc != pcmk_rc_ok) { -- fprintf(stderr, "Could not connect to shadow cib: %s (%d)\n", pcmk_strerror(rc), rc); -+ out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc); - goto cleanup; - } - -@@ -1301,9 +1299,9 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - if(resource_is_running_on(rsc, host) == FALSE) { - const char *id = rsc->clone_name?rsc->clone_name:rsc->id; - if(host) { -- printf("%s is not running on %s and so cannot be restarted\n", id, host); -+ out->err(out, "%s is not running on %s and so cannot be restarted", id, host); - } else { -- printf("%s is not running anywhere and so cannot be restarted\n", id); -+ out->err(out, "%s is not running anywhere and so cannot be restarted", id); - } - return ENXIO; - } -@@ -1340,7 +1338,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); - rc = update_dataset(out, cib, data_set, FALSE); - if(rc != pcmk_rc_ok) { -- fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc); -+ out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc); - goto done; - } - -@@ -1371,7 +1369,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - data_set, force); - } - if(rc != pcmk_rc_ok) { -- fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); -+ out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc); - if (current_active) { - g_list_free_full(current_active, free); - } -@@ -1383,7 +1381,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - - rc = update_dataset(out, cib, data_set, TRUE); - if(rc != pcmk_rc_ok) { -- fprintf(stderr, "Could not determine which resources would be stopped\n"); -+ out->err(out, "Could not determine which resources would be stopped"); - goto failure; - } - -@@ -1391,7 +1389,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - dump_list(target_active, "Target"); - - list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); -- fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta)); -+ out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta)); - display_list(out, list_delta, " * "); - - step_timeout_s = timeout / sleep_interval; -@@ -1410,7 +1408,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - } - rc = update_dataset(out, cib, data_set, FALSE); - if(rc != pcmk_rc_ok) { -- fprintf(stderr, "Could not determine which resources were stopped\n"); -+ out->err(out, "Could not determine which resources were stopped"); - goto failure; - } - -@@ -1427,7 +1425,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); - if(before == g_list_length(list_delta)) { - /* aborted during stop phase, print the contents of list_delta */ -- fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); -+ out->info(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); - display_list(out, list_delta, " * "); - rc = ETIME; - goto failure; -@@ -1452,7 +1450,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - } - - if(rc != pcmk_rc_ok) { -- fprintf(stderr, "Could not unset target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); -+ out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc); - goto done; - } - -@@ -1461,7 +1459,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - } - target_active = restart_target_active; - list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); -- fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta)); -+ out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta)); - display_list(out, list_delta, " * "); - - step_timeout_s = timeout / sleep_interval; -@@ -1482,7 +1480,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - - rc = update_dataset(out, cib, data_set, FALSE); - if(rc != pcmk_rc_ok) { -- fprintf(stderr, "Could not determine which resources were started\n"); -+ out->err(out, "Could not determine which resources were started"); - goto failure; - } - -@@ -1502,7 +1500,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, - - if(before == g_list_length(list_delta)) { - /* aborted during start phase, print the contents of list_delta */ -- fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); -+ out->info(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); - display_list(out, list_delta, " * "); - rc = ETIME; - goto failure; -@@ -1578,29 +1576,23 @@ actions_are_pending(GListPtr actions) - return FALSE; - } - --/*! -- * \internal -- * \brief Print pending actions to stderr -- * -- * \param[in] actions List of actions to check -- * -- * \return void -- */ - static void - print_pending_actions(pcmk__output_t *out, GListPtr actions) - { - GListPtr action; - -- fprintf(stderr, "Pending actions:\n"); -+ out->info(out, "Pending actions:"); - for (action = actions; action != NULL; action = action->next) { - pe_action_t *a = (pe_action_t *) action->data; - -- if (action_is_pending(a)) { -- fprintf(stderr, "\tAction %d: %s", a->id, a->uuid); -- if (a->node) { -- fprintf(stderr, "\ton %s", a->node->details->uname); -- } -- fprintf(stderr, "\n"); -+ if (!action_is_pending(a)) { -+ continue; -+ } -+ -+ if (a->node) { -+ out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, a->node->details->uname); -+ } else { -+ out->info(out, "\tAction %d: %s", a->id, a->uuid); - } - } - } -@@ -1678,8 +1670,8 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) - "dc-version"); - - if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) { -- printf("warning: wait option may not work properly in " -- "mixed-version cluster\n"); -+ out->info(out, "warning: wait option may not work properly in " -+ "mixed-version cluster"); - printed_version_warning = TRUE; - } - } -@@ -1702,8 +1694,8 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, - svc_action_t *op = NULL; - - if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { -- CMD_ERR("Sorry, the %s option doesn't support %s resources yet", -- action, rsc_class); -+ out->err(out, "Sorry, the %s option doesn't support %s resources yet", -+ action, rsc_class); - crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); - } - -@@ -1765,8 +1757,8 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, - - g_hash_table_iter_init(&iter, override_hash); - while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) { -- printf("Overriding the cluster configuration for '%s' with '%s' = '%s'\n", -- rsc_name, name, value); -+ out->info(out, "Overriding the cluster configuration for '%s' with '%s' = '%s'", -+ rsc_name, name, value); - g_hash_table_replace(op->params, strdup(name), strdup(value)); - } - } -@@ -1775,13 +1767,13 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, - exit_code = op->rc; - - if (op->status == PCMK_LRM_OP_DONE) { -- printf("Operation %s for %s (%s:%s:%s) returned: '%s' (%d)\n", -- action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, -- services_ocf_exitcode_str(op->rc), op->rc); -+ out->info(out, "Operation %s for %s (%s:%s:%s) returned: '%s' (%d)", -+ action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, -+ services_ocf_exitcode_str(op->rc), op->rc); - } else { -- printf("Operation %s for %s (%s:%s:%s) failed: '%s' (%d)\n", -- action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, -- services_lrm_status_str(op->status), op->status); -+ out->info(out, "Operation %s for %s (%s:%s:%s) failed: '%s' (%d)", -+ action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, -+ services_lrm_status_str(op->status), op->status); - } - - /* hide output for validate-all if not in verbose */ -@@ -1833,11 +1825,11 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, - if(pe_rsc_is_clone(rsc)) { - int rc = cli_resource_search(out, rsc, requested_name, data_set); - if(rc > 0 && force == FALSE) { -- CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active", -- action, rsc->id); -- CMD_ERR("Try setting target-role=Stopped first or specifying " -- "the force option"); -- crm_exit(CRM_EX_UNSAFE); -+ out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", -+ action, rsc->id); -+ out->err(out, "Try setting target-role=Stopped first or specifying " -+ "the force option"); -+ return CRM_EX_UNSAFE; - } - } - -@@ -1851,9 +1843,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, - } - - if(rsc->variant == pe_group) { -- CMD_ERR("Sorry, the %s option doesn't support group resources", -- rsc_action); -- crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); -+ out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); -+ return CRM_EX_UNIMPLEMENT_FEATURE; - } - - rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -@@ -1895,12 +1886,12 @@ cli_resource_move(pcmk__output_t *out, pe_resource_t *rsc, const char *rsc_id, - pe_resource_t *p = uber_parent(rsc); - - if (pcmk_is_set(p->flags, pe_rsc_promotable)) { -- CMD_ERR("Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); -+ out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); - rsc_id = p->id; - rsc = p; - - } else { -- CMD_ERR("Ignoring master option: %s is not promotable", rsc_id); -+ out->info(out, "Ignoring master option: %s is not promotable", rsc_id); - promoted_role_only = FALSE; - } - } -@@ -1971,13 +1962,13 @@ cli_resource_move(pcmk__output_t *out, pe_resource_t *rsc, const char *rsc_id, - NULL, cib, cib_options, promoted_role_only); - - } else if(count > 1) { -- CMD_ERR("Resource '%s' is currently %s in %d locations. " -- "One may now move to %s", -- rsc_id, (promoted_role_only? "promoted" : "active"), -- count, dest->details->uname); -- CMD_ERR("To prevent '%s' from being %s at a specific location, " -- "specify a node.", -- rsc_id, (promoted_role_only? "promoted" : "active")); -+ out->info(out, "Resource '%s' is currently %s in %d locations. " -+ "One may now move to %s", -+ rsc_id, (promoted_role_only? "promoted" : "active"), -+ count, dest->details->uname); -+ out->info(out, "To prevent '%s' from being %s at a specific location, " -+ "specify a node.", -+ rsc_id, (promoted_role_only? "promoted" : "active")); - - } else { - crm_trace("Not banning %s from its current location: not active", rsc_id); --- -1.8.3.1 - - -From 8d94e547f6ae7a1bdeb6d28b4f9f9e102543e35f Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 3 Sep 2020 10:50:35 -0400 -Subject: [PATCH 13/19] Feature: scheduler, tools: Add a new node-and-op output - message. - -This is used for the output of "crm_resource -O" and "crm_resource -o". -Each operation gets printed on a single line with a brief summary of the -node it's running on preceeding it. - -There's a fair bit of overlap between this and the op-history message -but there's also a number of differences. This new message is for use -in crm_resource, and attempting to combine the two is going to change -that program's output, which may not be acceptable to some users. ---- - include/crm/pengine/internal.h | 2 + - lib/common/output_xml.c | 1 + - lib/pengine/pe_output.c | 105 +++++++++++++++++++++++++++++++++++++++++ - tools/crm_resource_print.c | 43 +++++------------ - 4 files changed, 120 insertions(+), 31 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 00b6b4c..396d707 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -293,6 +293,8 @@ int pe__bundle_text(pcmk__output_t *out, va_list args); - int pe__node_html(pcmk__output_t *out, va_list args); - int pe__node_text(pcmk__output_t *out, va_list args); - int pe__node_xml(pcmk__output_t *out, va_list args); -+int pe__node_and_op(pcmk__output_t *out, va_list args); -+int pe__node_and_op_xml(pcmk__output_t *out, va_list args); - int pe__node_attribute_html(pcmk__output_t *out, va_list args); - int pe__node_attribute_text(pcmk__output_t *out, va_list args); - int pe__node_attribute_xml(pcmk__output_t *out, va_list args); -diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c -index bba21e7..716d10f 100644 ---- a/lib/common/output_xml.c -+++ b/lib/common/output_xml.c -@@ -61,6 +61,7 @@ static subst_t substitutions[] = { - { "Negative Location Constraints", "bans" }, - { "Node Attributes", "node_attributes" }, - { "Resource Config", "resource_config" }, -+ { "Resource Operations", "operations" }, - - { NULL, NULL } - }; -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index dd3a880..1a3f93d 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -1284,6 +1284,109 @@ pe__node_attribute_html(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - -+PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr") -+int -+pe__node_and_op(pcmk__output_t *out, va_list args) { -+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); -+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr); -+ -+ pe_resource_t *rsc = NULL; -+ gchar *node_str = NULL; -+ char *last_change_str = NULL; -+ -+ const char *op_rsc = crm_element_value(xml_op, "resource"); -+ const char *status_s = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS); -+ const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); -+ int status = crm_parse_int(status_s, "0"); -+ time_t last_change = 0; -+ -+ rsc = pe_find_resource(data_set->resources, op_rsc); -+ -+ if (rsc) { -+ pe_node_t *node = pe__current_node(rsc); -+ const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); -+ int opts = pe_print_rsconly | pe_print_pending; -+ -+ if (node == NULL) { -+ node = rsc->pending_node; -+ } -+ -+ node_str = pcmk__native_output_string(rsc, rsc_printable_id(rsc), node, -+ opts, target_role, false); -+ } else { -+ node_str = crm_strdup_printf("Unknown resource %s", op_rsc); -+ } -+ -+ if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, -+ &last_change) == pcmk_ok) { -+ last_change_str = crm_strdup_printf(", %s=%s, exec=%sms", -+ XML_RSC_OP_LAST_CHANGE, -+ crm_strip_trailing_newline(ctime(&last_change)), -+ crm_element_value(xml_op, XML_RSC_OP_T_EXEC)); -+ } -+ -+ out->list_item(out, NULL, "%s: %s (node=%s, call=%s, rc=%s%s): %s", -+ node_str, op_key ? op_key : ID(xml_op), -+ crm_element_value(xml_op, XML_ATTR_UNAME), -+ crm_element_value(xml_op, XML_LRM_ATTR_CALLID), -+ crm_element_value(xml_op, XML_LRM_ATTR_RC), -+ last_change_str ? last_change_str : "", -+ services_lrm_status_str(status)); -+ -+ g_free(node_str); -+ free(last_change_str); -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr") -+int -+pe__node_and_op_xml(pcmk__output_t *out, va_list args) { -+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); -+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr); -+ -+ pe_resource_t *rsc = NULL; -+ const char *op_rsc = crm_element_value(xml_op, "resource"); -+ const char *status_s = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS); -+ const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); -+ int status = crm_parse_int(status_s, "0"); -+ time_t last_change = 0; -+ -+ xmlNode *node = pcmk__output_create_xml_node(out, "operation"); -+ -+ rsc = pe_find_resource(data_set->resources, op_rsc); -+ -+ if (rsc) { -+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -+ const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); -+ char *agent_tuple = NULL; -+ -+ agent_tuple = crm_strdup_printf("%s:%s:%s", class, -+ pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) ? crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER) : "", -+ kind); -+ -+ xmlSetProp(node, (pcmkXmlStr) "rsc", (pcmkXmlStr) rsc_printable_id(rsc)); -+ xmlSetProp(node, (pcmkXmlStr) "agent", (pcmkXmlStr) agent_tuple); -+ free(agent_tuple); -+ } -+ -+ xmlSetProp(node, (pcmkXmlStr) "op", (pcmkXmlStr) (op_key ? op_key : ID(xml_op))); -+ xmlSetProp(node, (pcmkXmlStr) "node", (pcmkXmlStr) crm_element_value(xml_op, XML_ATTR_UNAME)); -+ xmlSetProp(node, (pcmkXmlStr) "call", (pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID)); -+ xmlSetProp(node, (pcmkXmlStr) "rc", (pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_RC)); -+ -+ if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, -+ &last_change) == pcmk_ok) { -+ xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE, -+ (pcmkXmlStr) crm_strip_trailing_newline(ctime(&last_change))); -+ xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_EXEC, -+ (pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_EXEC)); -+ } -+ -+ xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) services_lrm_status_str(status)); -+ -+ return pcmk_rc_ok; -+} -+ - PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int") - int - pe__node_attribute_xml(pcmk__output_t *out, va_list args) { -@@ -1878,6 +1981,8 @@ static pcmk__message_entry_t fmt_functions[] = { - { "node", "log", pe__node_text }, - { "node", "text", pe__node_text }, - { "node", "xml", pe__node_xml }, -+ { "node-and-op", "default", pe__node_and_op }, -+ { "node-and-op", "xml", pe__node_and_op_xml }, - { "node-list", "html", pe__node_list_html }, - { "node-list", "log", pe__node_list_text }, - { "node-list", "text", pe__node_list_text }, -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 447c57d..de2202e 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -89,42 +89,23 @@ cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id, - const char *host_uname, bool active, - pe_working_set_t * data_set) - { -- pe_resource_t *rsc = NULL; -- int opts = pe_print_printf | pe_print_rsconly | pe_print_suppres_nl | pe_print_pending; -+ int rc = pcmk_rc_no_output; - GListPtr ops = find_operations(rsc_id, host_uname, active, data_set); -- GListPtr lpc = NULL; -- -- for (lpc = ops; lpc != NULL; lpc = lpc->next) { -- xmlNode *xml_op = (xmlNode *) lpc->data; - -- const char *op_rsc = crm_element_value(xml_op, "resource"); -- const char *status_s = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS); -- const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); -- int status = crm_parse_int(status_s, "0"); -- time_t last_change = 0; -+ if (!ops) { -+ return rc; -+ } - -- rsc = pe_find_resource(data_set->resources, op_rsc); -- if(rsc) { -- rsc->fns->print(rsc, "", opts, stdout); -- } else { -- fprintf(stdout, "Unknown resource %s", op_rsc); -- } -+ out->begin_list(out, NULL, NULL, "Resource Operations"); -+ rc = pcmk_rc_ok; - -- fprintf(stdout, ": %s (node=%s, call=%s, rc=%s", -- op_key ? op_key : ID(xml_op), -- crm_element_value(xml_op, XML_ATTR_UNAME), -- crm_element_value(xml_op, XML_LRM_ATTR_CALLID), -- crm_element_value(xml_op, XML_LRM_ATTR_RC)); -- -- if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, -- &last_change) == pcmk_ok) { -- fprintf(stdout, ", " XML_RSC_OP_LAST_CHANGE "=%s, exec=%sms", -- crm_strip_trailing_newline(ctime(&last_change)), -- crm_element_value(xml_op, XML_RSC_OP_T_EXEC)); -- } -- fprintf(stdout, "): %s\n", services_lrm_status_str(status)); -+ for (GListPtr lpc = ops; lpc != NULL; lpc = lpc->next) { -+ xmlNode *xml_op = (xmlNode *) lpc->data; -+ out->message(out, "node-and-op", data_set, xml_op); - } -- return pcmk_rc_ok; -+ -+ out->end_list(out); -+ return rc; - } - - void --- -1.8.3.1 - - -From 768e2ae0efa626fce75e7fb84970c49617ca8448 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 1 Oct 2020 11:21:40 -0400 -Subject: [PATCH 14/19] Feature: tools: Use formatted output for CTS printing - in crm_resource. - -Note that this option only exists for internal use, passing information -to cts. It's not exposed in the command line help. I've implemented it -just using out->info so it will only print for the text output format. -I don't want XML output that someone might start to rely on, despite not -being in the schema. ---- - tools/crm_resource.c | 2 ++ - tools/crm_resource_print.c | 33 ++++++++++++++++----------------- - 2 files changed, 18 insertions(+), 17 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 0532095..6e32982 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1813,12 +1813,14 @@ main(int argc, char **argv) - - case cmd_cts: - rc = pcmk_rc_ok; -+ - for (GList *lpc = data_set->resources; lpc != NULL; - lpc = lpc->next) { - - rsc = (pe_resource_t *) lpc->data; - cli_resource_print_cts(out, rsc); - } -+ - cli_resource_print_cts_constraints(out, data_set); - break; - -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index de2202e..4b3c42d 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -33,19 +33,18 @@ cli_resource_print_cts_constraints(pcmk__output_t *out, pe_working_set_t * data_ - continue; - } - -- if (pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj), pcmk__str_casei)) { -- printf("Constraint %s %s %s %s %s %s %s\n", -- crm_element_name(xml_obj), -- cons_string(crm_element_value(xml_obj, XML_ATTR_ID)), -- cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)), -- cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)), -- cons_string(crm_element_value(xml_obj, XML_RULE_ATTR_SCORE)), -- cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE)), -- cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE))); -- -- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_LOCATION, crm_element_name(xml_obj), pcmk__str_casei)) { -- /* unpack_location(xml_obj, data_set); */ -+ if (!pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj), pcmk__str_casei)) { -+ continue; - } -+ -+ out->info(out, "Constraint %s %s %s %s %s %s %s", -+ crm_element_name(xml_obj), -+ cons_string(crm_element_value(xml_obj, XML_ATTR_ID)), -+ cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)), -+ cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)), -+ cons_string(crm_element_value(xml_obj, XML_RULE_ATTR_SCORE)), -+ cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE)), -+ cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE))); - } - } - -@@ -70,11 +69,11 @@ cli_resource_print_cts(pcmk__output_t *out, pe_resource_t * rsc) - host = node->details->uname; - } - -- printf("Resource: %s %s %s %s %s %s %s %s %d %lld 0x%.16llx\n", -- crm_element_name(rsc->xml), rsc->id, -- rsc->clone_name ? rsc->clone_name : rsc->id, rsc->parent ? rsc->parent->id : "NA", -- rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags, -- rsc->flags); -+ out->info(out, "Resource: %s %s %s %s %s %s %s %s %d %lld 0x%.16llx", -+ crm_element_name(rsc->xml), rsc->id, -+ rsc->clone_name ? rsc->clone_name : rsc->id, rsc->parent ? rsc->parent->id : "NA", -+ rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags, -+ rsc->flags); - - for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { - pe_resource_t *child = (pe_resource_t *) lpc->data; --- -1.8.3.1 - - -From 9ab0635ada9293456eed7e31a8bf2c6e9b44d833 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 1 Oct 2020 14:33:47 -0400 -Subject: [PATCH 15/19] Feature: tools: Use formatted output for crm_resource - checks. - ---- - cts/cli/regression.tools.exp | 4 +- - tools/crm_resource.c | 2 +- - tools/crm_resource.h | 20 +++- - tools/crm_resource_print.c | 260 +++++++++++++++++++++++++++++++++++++++++++ - tools/crm_resource_runtime.c | 170 +++++++--------------------- - 5 files changed, 318 insertions(+), 138 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 935dce8..7166714 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -876,9 +876,7 @@ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attribut - * Passed: crm_resource - Create another resource meta attribute - =#=#=#= Begin test: Show why a resource is not running =#=#=#= - Resource dummy is not running -- -- * Configuration specifies 'dummy' should remain stopped -- -+Configuration specifies 'dummy' should remain stopped - =#=#=#= End test: Show why a resource is not running - OK (0) =#=#=#= - * Passed: crm_resource - Show why a resource is not running - =#=#=#= Begin test: Remove another resource meta attribute =#=#=#= -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 6e32982..f551059 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1868,7 +1868,7 @@ main(int argc, char **argv) - goto done; - } - } -- cli_resource_why(out, cib_conn, data_set->resources, rsc, dest); -+ out->message(out, "resource-why", cib_conn, data_set->resources, rsc, dest); - rc = pcmk_rc_ok; - } - break; -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 4fc7c71..377f7aa 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -23,6 +23,20 @@ - #include - #include - -+enum resource_check_flags { -+ rsc_remain_stopped = (1 << 0), -+ rsc_unpromotable = (1 << 1), -+ rsc_unmanaged = (1 << 2) -+}; -+ -+typedef struct resource_checks_s { -+ pe_resource_t *rsc; -+ unsigned int flags; -+ const char *lock_node; -+} resource_checks_t; -+ -+resource_checks_t *cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed); -+ - /* ban */ - int cli_resource_prefer(pcmk__output_t *out, const char *rsc_id, const char *host, - const char *move_lifetime, cib_t * cib_conn, int cib_options, -@@ -51,7 +65,7 @@ int cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id, - pe_working_set_t * data_set); - - /* runtime */ --void cli_resource_check(pcmk__output_t *out, cib_t * cib, pe_resource_t *rsc); -+int cli_resource_check(pcmk__output_t *out, cib_t * cib, pe_resource_t *rsc); - int cli_resource_fail(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - const char *host_uname, const char *rsc_id, - pe_working_set_t *data_set); -@@ -98,7 +112,7 @@ int cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc, - - int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml); - int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib); --void cli_resource_why(pcmk__output_t *out, cib_t *cib_conn, GListPtr resources, -- pe_resource_t *rsc, pe_node_t *node); -+ -+bool resource_is_running_on(pe_resource_t *rsc, const char *host); - - void crm_resource_register_messages(pcmk__output_t *out); -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 4b3c42d..d2a2cc8 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -8,6 +8,7 @@ - */ - - #include -+#include - #include - #include - -@@ -287,6 +288,261 @@ property_text(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - -+PCMK__OUTPUT_ARGS("resource-check", "resource_checks_t *") -+static int -+resource_check_default(pcmk__output_t *out, va_list args) { -+ resource_checks_t *checks = va_arg(args, resource_checks_t *); -+ -+ pe_resource_t *parent = uber_parent(checks->rsc); -+ int rc = pcmk_rc_no_output; -+ bool printed = false; -+ -+ if (checks->flags != 0 || checks->lock_node != NULL) { -+ printed = true; -+ out->begin_list(out, NULL, NULL, "Resource Checks"); -+ } -+ -+ if (pcmk_is_set(checks->flags, rsc_remain_stopped)) { -+ out->list_item(out, "check", "Configuration specifies '%s' should remain stopped", -+ parent->id); -+ } -+ -+ if (pcmk_is_set(checks->flags, rsc_unpromotable)) { -+ out->list_item(out, "check", "Configuration specifies '%s' should not be promoted", -+ parent->id); -+ } -+ -+ if (pcmk_is_set(checks->flags, rsc_unmanaged)) { -+ out->list_item(out, "check", "Configuration prevents cluster from stopping or starting unmanaged '%s'", -+ parent->id); -+ } -+ -+ if (checks->lock_node) { -+ out->list_item(out, "check", "'%s' is locked to node %s due to shutdown", -+ parent->id, checks->lock_node); -+ } -+ -+ if (printed) { -+ out->end_list(out); -+ rc = pcmk_rc_ok; -+ } -+ -+ return rc; -+} -+ -+PCMK__OUTPUT_ARGS("resource-check", "resource_checks_t *") -+static int -+resource_check_xml(pcmk__output_t *out, va_list args) { -+ resource_checks_t *checks = va_arg(args, resource_checks_t *); -+ -+ pe_resource_t *parent = uber_parent(checks->rsc); -+ int rc = pcmk_rc_no_output; -+ -+ xmlNode *node = pcmk__output_create_xml_node(out, "check"); -+ -+ xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) parent->id); -+ -+ if (pcmk_is_set(checks->flags, rsc_remain_stopped)) { -+ xmlSetProp(node, (pcmkXmlStr) "remain_stopped", (pcmkXmlStr) "true"); -+ } -+ -+ if (pcmk_is_set(checks->flags, rsc_unpromotable)) { -+ xmlSetProp(node, (pcmkXmlStr) "promotable", (pcmkXmlStr) "false"); -+ } -+ -+ if (pcmk_is_set(checks->flags, rsc_unmanaged)) { -+ xmlSetProp(node, (pcmkXmlStr) "unmanaged", (pcmkXmlStr) "true"); -+ } -+ -+ if (checks->lock_node) { -+ xmlSetProp(node, (pcmkXmlStr) "locked-to", (pcmkXmlStr) checks->lock_node); -+ } -+ -+ return rc; -+} -+ -+PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GListPtr", "pe_resource_t *", -+ "pe_node_t *") -+static int -+resource_why_default(pcmk__output_t *out, va_list args) -+{ -+ cib_t *cib_conn = va_arg(args, cib_t *); -+ GListPtr resources = va_arg(args, GListPtr); -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ pe_node_t *node = va_arg(args, pe_node_t *); -+ -+ const char *host_uname = (node == NULL)? NULL : node->details->uname; -+ -+ out->begin_list(out, NULL, NULL, "Resource Reasons"); -+ -+ if ((rsc == NULL) && (host_uname == NULL)) { -+ GListPtr lpc = NULL; -+ GListPtr hosts = NULL; -+ -+ for (lpc = resources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *rsc = (pe_resource_t *) lpc->data; -+ rsc->fns->location(rsc, &hosts, TRUE); -+ -+ if (hosts == NULL) { -+ out->list_item(out, "reason", "Resource %s is not running", rsc->id); -+ } else { -+ out->list_item(out, "reason", "Resource %s is running", rsc->id); -+ } -+ -+ cli_resource_check(out, cib_conn, rsc); -+ g_list_free(hosts); -+ hosts = NULL; -+ } -+ -+ } else if ((rsc != NULL) && (host_uname != NULL)) { -+ if (resource_is_running_on(rsc, host_uname)) { -+ out->list_item(out, "reason", "Resource %s is running on host %s", -+ rsc->id, host_uname); -+ } else { -+ out->list_item(out, "reason", "Resource %s is not running on host %s", -+ rsc->id, host_uname); -+ } -+ -+ cli_resource_check(out, cib_conn, rsc); -+ -+ } else if ((rsc == NULL) && (host_uname != NULL)) { -+ const char* host_uname = node->details->uname; -+ GListPtr allResources = node->details->allocated_rsc; -+ GListPtr activeResources = node->details->running_rsc; -+ GListPtr unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); -+ GListPtr lpc = NULL; -+ -+ for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *rsc = (pe_resource_t *) lpc->data; -+ out->list_item(out, "reason", "Resource %s is running on host %s", -+ rsc->id, host_uname); -+ cli_resource_check(out, cib_conn, rsc); -+ } -+ -+ for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *rsc = (pe_resource_t *) lpc->data; -+ out->list_item(out, "reason", "Resource %s is assigned to host %s but not running", -+ rsc->id, host_uname); -+ cli_resource_check(out, cib_conn, rsc); -+ } -+ -+ g_list_free(allResources); -+ g_list_free(activeResources); -+ g_list_free(unactiveResources); -+ -+ } else if ((rsc != NULL) && (host_uname == NULL)) { -+ GListPtr hosts = NULL; -+ -+ rsc->fns->location(rsc, &hosts, TRUE); -+ out->list_item(out, "reason", "Resource %s is %srunning", -+ rsc->id, (hosts? "" : "not ")); -+ cli_resource_check(out, cib_conn, rsc); -+ g_list_free(hosts); -+ } -+ -+ out->end_list(out); -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GListPtr", "pe_resource_t *", -+ "pe_node_t *") -+static int -+resource_why_xml(pcmk__output_t *out, va_list args) -+{ -+ cib_t *cib_conn = va_arg(args, cib_t *); -+ GListPtr resources = va_arg(args, GListPtr); -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ pe_node_t *node = va_arg(args, pe_node_t *); -+ -+ const char *host_uname = (node == NULL)? NULL : node->details->uname; -+ -+ xmlNode *xml_node = pcmk__output_xml_create_parent(out, "reason"); -+ -+ if ((rsc == NULL) && (host_uname == NULL)) { -+ GListPtr lpc = NULL; -+ GListPtr hosts = NULL; -+ -+ pcmk__output_xml_create_parent(out, "resources"); -+ -+ for (lpc = resources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *rsc = (pe_resource_t *) lpc->data; -+ xmlNode *rsc_node = NULL; -+ -+ rsc->fns->location(rsc, &hosts, TRUE); -+ -+ rsc_node = pcmk__output_xml_create_parent(out, "resource"); -+ xmlSetProp(rsc_node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc->id); -+ xmlSetProp(rsc_node, (pcmkXmlStr) "running", -+ (pcmkXmlStr) pcmk__btoa(hosts != NULL)); -+ -+ cli_resource_check(out, cib_conn, rsc); -+ pcmk__output_xml_pop_parent(out); -+ g_list_free(hosts); -+ hosts = NULL; -+ } -+ -+ pcmk__output_xml_pop_parent(out); -+ -+ } else if ((rsc != NULL) && (host_uname != NULL)) { -+ if (resource_is_running_on(rsc, host_uname)) { -+ xmlSetProp(xml_node, (pcmkXmlStr) "running_on", (pcmkXmlStr) host_uname); -+ } -+ -+ cli_resource_check(out, cib_conn, rsc); -+ -+ } else if ((rsc == NULL) && (host_uname != NULL)) { -+ const char* host_uname = node->details->uname; -+ GListPtr allResources = node->details->allocated_rsc; -+ GListPtr activeResources = node->details->running_rsc; -+ GListPtr unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); -+ GListPtr lpc = NULL; -+ -+ pcmk__output_xml_create_parent(out, "resources"); -+ -+ for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *rsc = (pe_resource_t *) lpc->data; -+ xmlNode *rsc_node = NULL; -+ -+ rsc_node = pcmk__output_xml_create_parent(out, "resource"); -+ xmlSetProp(rsc_node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc->id); -+ xmlSetProp(rsc_node, (pcmkXmlStr) "running", (pcmkXmlStr) "true"); -+ xmlSetProp(rsc_node, (pcmkXmlStr) "host", (pcmkXmlStr) host_uname); -+ -+ cli_resource_check(out, cib_conn, rsc); -+ pcmk__output_xml_pop_parent(out); -+ } -+ -+ for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *rsc = (pe_resource_t *) lpc->data; -+ xmlNode *rsc_node = NULL; -+ -+ rsc_node = pcmk__output_xml_create_parent(out, "resource"); -+ xmlSetProp(rsc_node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc->id); -+ xmlSetProp(rsc_node, (pcmkXmlStr) "running", (pcmkXmlStr) "false"); -+ xmlSetProp(rsc_node, (pcmkXmlStr) "host", (pcmkXmlStr) host_uname); -+ -+ cli_resource_check(out, cib_conn, rsc); -+ pcmk__output_xml_pop_parent(out); -+ } -+ -+ pcmk__output_xml_pop_parent(out); -+ g_list_free(allResources); -+ g_list_free(activeResources); -+ g_list_free(unactiveResources); -+ -+ } else if ((rsc != NULL) && (host_uname == NULL)) { -+ GListPtr hosts = NULL; -+ -+ rsc->fns->location(rsc, &hosts, TRUE); -+ xmlSetProp(xml_node, (pcmkXmlStr) "running", -+ (pcmkXmlStr) pcmk__btoa(hosts != NULL)); -+ cli_resource_check(out, cib_conn, rsc); -+ g_list_free(hosts); -+ } -+ -+ return pcmk_rc_ok; -+} -+ - static void - add_resource_name(pcmk__output_t *out, pe_resource_t *rsc) { - if (rsc->children == NULL) { -@@ -325,6 +581,10 @@ static pcmk__message_entry_t fmt_functions[] = { - { "attribute", "text", attribute_text }, - { "property", "default", property_default }, - { "property", "text", property_text }, -+ { "resource-check", "default", resource_check_default }, -+ { "resource-check", "xml", resource_check_xml }, -+ { "resource-why", "default", resource_why_default }, -+ { "resource-why", "xml", resource_why_xml }, - { "resource-names-list", "default", resource_names }, - - { NULL, NULL, NULL } -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index bd377a3..4f8287b 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -43,6 +43,35 @@ do_find_resource(pcmk__output_t *out, const char *rsc, pe_resource_t * the_rsc, - return found; - } - -+resource_checks_t * -+cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed) -+{ -+ pe_resource_t *parent = uber_parent(rsc); -+ resource_checks_t *rc = calloc(1, sizeof(resource_checks_t)); -+ -+ if (role_s) { -+ enum rsc_role_e role = text2role(role_s); -+ -+ if (role == RSC_ROLE_STOPPED) { -+ rc->flags |= rsc_remain_stopped; -+ } else if (pcmk_is_set(parent->flags, pe_rsc_promotable) && -+ role == RSC_ROLE_SLAVE) { -+ rc->flags |= rsc_unpromotable; -+ } -+ } -+ -+ if (managed && !crm_is_true(managed)) { -+ rc->flags |= rsc_unmanaged; -+ } -+ -+ if (rsc->lock_node) { -+ rc->lock_node = rsc->lock_node->details->uname; -+ } -+ -+ rc->rsc = rsc; -+ return rc; -+} -+ - int - cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, - pe_working_set_t *data_set) -@@ -878,13 +907,14 @@ cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - return rc; - } - --void -+int - cli_resource_check(pcmk__output_t *out, cib_t * cib_conn, pe_resource_t *rsc) - { -- bool printed = false; - char *role_s = NULL; - char *managed = NULL; - pe_resource_t *parent = uber_parent(rsc); -+ int rc = pcmk_rc_no_output; -+ resource_checks_t *checks = NULL; - - find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, - NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); -@@ -892,41 +922,16 @@ cli_resource_check(pcmk__output_t *out, cib_t * cib_conn, pe_resource_t *rsc) - find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, - NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); - -- if(role_s) { -- enum rsc_role_e role = text2role(role_s); -+ checks = cli_check_resource(rsc, role_s, managed); - -- free(role_s); -- if(role == RSC_ROLE_UNKNOWN) { -- // Treated as if unset -- -- } else if(role == RSC_ROLE_STOPPED) { -- printf("\n * Configuration specifies '%s' should remain stopped\n", -- parent->id); -- printed = true; -- -- } else if (pcmk_is_set(parent->flags, pe_rsc_promotable) -- && (role == RSC_ROLE_SLAVE)) { -- printf("\n * Configuration specifies '%s' should not be promoted\n", -- parent->id); -- printed = true; -- } -+ if (checks->flags != 0 || checks->lock_node != NULL) { -+ rc = out->message(out, "resource-check", checks); - } - -- if (managed && !crm_is_true(managed)) { -- printf("%s * Configuration prevents cluster from stopping or starting unmanaged '%s'\n", -- (printed? "" : "\n"), parent->id); -- printed = true; -- } -+ free(role_s); - free(managed); -- -- if (rsc->lock_node) { -- printf("%s * '%s' is locked to node %s due to shutdown\n", -- (printed? "" : "\n"), parent->id, rsc->lock_node->details->uname); -- } -- -- if (printed) { -- printf("\n"); -- } -+ free(checks); -+ return rc; - } - - // \return Standard Pacemaker return code -@@ -986,7 +991,7 @@ generate_resource_params(pe_resource_t * rsc, pe_working_set_t * data_set) - return combined; - } - --static bool resource_is_running_on(pe_resource_t *rsc, const char *host) -+bool resource_is_running_on(pe_resource_t *rsc, const char *host) - { - bool found = TRUE; - GListPtr hIter = NULL; -@@ -1977,100 +1982,3 @@ cli_resource_move(pcmk__output_t *out, pe_resource_t *rsc, const char *rsc_id, - - return rc; - } -- --static void --cli_resource_why_without_rsc_and_host(pcmk__output_t *out, cib_t *cib_conn, -- GListPtr resources) --{ -- GListPtr lpc = NULL; -- GListPtr hosts = NULL; -- -- for (lpc = resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *rsc = (pe_resource_t *) lpc->data; -- rsc->fns->location(rsc, &hosts, TRUE); -- -- if (hosts == NULL) { -- printf("Resource %s is not running\n", rsc->id); -- } else { -- printf("Resource %s is running\n", rsc->id); -- } -- -- cli_resource_check(out, cib_conn, rsc); -- g_list_free(hosts); -- hosts = NULL; -- } -- --} -- --static void --cli_resource_why_with_rsc_and_host(pcmk__output_t *out, cib_t *cib_conn, -- GListPtr resources, pe_resource_t *rsc, -- const char *host_uname) --{ -- if (resource_is_running_on(rsc, host_uname)) { -- printf("Resource %s is running on host %s\n",rsc->id,host_uname); -- } else { -- printf("Resource %s is not running on host %s\n", rsc->id, host_uname); -- } -- cli_resource_check(out, cib_conn, rsc); --} -- --static void --cli_resource_why_without_rsc_with_host(pcmk__output_t *out, cib_t *cib_conn, -- GListPtr resources, pe_node_t *node) --{ -- const char* host_uname = node->details->uname; -- GListPtr allResources = node->details->allocated_rsc; -- GListPtr activeResources = node->details->running_rsc; -- GListPtr unactiveResources = pcmk__subtract_lists(allResources,activeResources,(GCompareFunc) strcmp); -- GListPtr lpc = NULL; -- -- for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *rsc = (pe_resource_t *) lpc->data; -- printf("Resource %s is running on host %s\n",rsc->id,host_uname); -- cli_resource_check(out, cib_conn, rsc); -- } -- -- for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *rsc = (pe_resource_t *) lpc->data; -- printf("Resource %s is assigned to host %s but not running\n", -- rsc->id, host_uname); -- cli_resource_check(out, cib_conn, rsc); -- } -- -- g_list_free(allResources); -- g_list_free(activeResources); -- g_list_free(unactiveResources); --} -- --static void --cli_resource_why_with_rsc_without_host(pcmk__output_t *out, cib_t *cib_conn, -- GListPtr resources, pe_resource_t *rsc) --{ -- GListPtr hosts = NULL; -- -- rsc->fns->location(rsc, &hosts, TRUE); -- printf("Resource %s is %srunning\n", rsc->id, (hosts? "" : "not ")); -- cli_resource_check(out, cib_conn, rsc); -- g_list_free(hosts); --} -- --void cli_resource_why(pcmk__output_t *out, cib_t *cib_conn, GListPtr resources, -- pe_resource_t *rsc, pe_node_t *node) --{ -- const char *host_uname = (node == NULL)? NULL : node->details->uname; -- -- if ((rsc == NULL) && (host_uname == NULL)) { -- cli_resource_why_without_rsc_and_host(out, cib_conn, resources); -- -- } else if ((rsc != NULL) && (host_uname != NULL)) { -- cli_resource_why_with_rsc_and_host(out, cib_conn, resources, rsc, -- host_uname); -- -- } else if ((rsc == NULL) && (host_uname != NULL)) { -- cli_resource_why_without_rsc_with_host(out, cib_conn, resources, node); -- -- } else if ((rsc != NULL) && (host_uname == NULL)) { -- cli_resource_why_with_rsc_without_host(out, cib_conn, resources, rsc); -- } --} --- -1.8.3.1 - - -From b5ce7803e3b072066458e93802688a1ec15875ce Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 2 Oct 2020 12:29:22 -0400 -Subject: [PATCH 16/19] Feature: tools: Use formatted output for resource - searching. - -This reorganizes the cli_resource_search and do_find_resource functions -into more logical functions. cli_resource_search now just puts together -a list of resources and returns it. This list should not be freed -because it reuses existing lists. Then, there is a formatted output -message to do the actual printing. ---- - tools/crm_resource.c | 7 +++-- - tools/crm_resource.h | 4 +-- - tools/crm_resource_print.c | 72 ++++++++++++++++++++++++++++++++++++++++++++ - tools/crm_resource_runtime.c | 53 +++++++++----------------------- - 4 files changed, 92 insertions(+), 44 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index f551059..b341194 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1844,10 +1844,11 @@ main(int argc, char **argv) - data_set); - break; - -- case cmd_locate: -- cli_resource_search(out, rsc, options.rsc_id, data_set); -- rc = pcmk_rc_ok; -+ case cmd_locate: { -+ GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set); -+ rc = out->message(out, "resource-search", resources, rsc, options.rsc_id); - break; -+ } - - case cmd_query_xml: - rc = cli_resource_print(out, rsc, data_set, TRUE); -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 377f7aa..6de2457 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -69,8 +69,8 @@ int cli_resource_check(pcmk__output_t *out, cib_t * cib, pe_resource_t *rsc); - int cli_resource_fail(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - const char *host_uname, const char *rsc_id, - pe_working_set_t *data_set); --int cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, -- const char *requested_name, pe_working_set_t *data_set); -+GListPtr cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, -+ const char *requested_name, pe_working_set_t *data_set); - int cli_resource_delete(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - const char *host_uname, pe_resource_t *rsc, - const char *operation, const char *interval_spec, -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index d2a2cc8..5ff3e9b 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -361,6 +361,76 @@ resource_check_xml(pcmk__output_t *out, va_list args) { - return rc; - } - -+PCMK__OUTPUT_ARGS("resource-search", "GListPtr", "pe_resource_t *", "gchar *") -+static int -+resource_search_default(pcmk__output_t *out, va_list args) -+{ -+ GListPtr nodes = va_arg(args, GListPtr); -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ gchar *requested_name = va_arg(args, gchar *); -+ -+ bool printed = false; -+ int rc = pcmk_rc_no_output; -+ -+ if (!out->is_quiet(out) && nodes == NULL) { -+ out->err(out, "resource %s is NOT running", requested_name); -+ return rc; -+ } -+ -+ for (GListPtr lpc = nodes; lpc != NULL; lpc = lpc->next) { -+ pe_node_t *node = (pe_node_t *) lpc->data; -+ -+ if (!printed) { -+ out->begin_list(out, NULL, NULL, "Nodes"); -+ printed = true; -+ rc = pcmk_rc_ok; -+ } -+ -+ if (out->is_quiet(out)) { -+ out->list_item(out, "node", "%s", node->details->uname); -+ } else { -+ const char *state = ""; -+ -+ if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { -+ state = " Master"; -+ } -+ out->list_item(out, "node", "resource %s is running on: %s%s", -+ requested_name, node->details->uname, state); -+ } -+ } -+ -+ if (printed) { -+ out->end_list(out); -+ } -+ -+ return rc; -+} -+ -+ -+PCMK__OUTPUT_ARGS("resource-search", "GListPtr", "pe_resource_t *", "gchar *") -+static int -+resource_search_xml(pcmk__output_t *out, va_list args) -+{ -+ GListPtr nodes = va_arg(args, GListPtr); -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ gchar *requested_name = va_arg(args, gchar *); -+ -+ xmlNode *xml_node = pcmk__output_xml_create_parent(out, "nodes"); -+ -+ xmlSetProp(xml_node, (pcmkXmlStr) "resource", (pcmkXmlStr) requested_name); -+ -+ for (GListPtr lpc = nodes; lpc != NULL; lpc = lpc->next) { -+ pe_node_t *node = (pe_node_t *) lpc->data; -+ xmlNode *sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname); -+ -+ if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { -+ xmlSetProp(sub_node, (pcmkXmlStr) "state", (pcmkXmlStr) "promoted"); -+ } -+ } -+ -+ return pcmk_rc_ok; -+} -+ - PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GListPtr", "pe_resource_t *", - "pe_node_t *") - static int -@@ -583,6 +653,8 @@ static pcmk__message_entry_t fmt_functions[] = { - { "property", "text", property_text }, - { "resource-check", "default", resource_check_default }, - { "resource-check", "xml", resource_check_xml }, -+ { "resource-search", "default", resource_search_default }, -+ { "resource-search", "xml", resource_search_xml }, - { "resource-why", "default", resource_why_default }, - { "resource-why", "xml", resource_why_xml }, - { "resource-names-list", "default", resource_names }, -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 4f8287b..bbd8bc1 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -12,37 +12,6 @@ - #include - #include - --static int --do_find_resource(pcmk__output_t *out, const char *rsc, pe_resource_t * the_rsc, -- pe_working_set_t * data_set) --{ -- int found = 0; -- GListPtr lpc = NULL; -- -- for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) { -- pe_node_t *node = (pe_node_t *) lpc->data; -- -- if (out->is_quiet(out)) { -- out->info(out, "%s", node->details->uname); -- } else { -- const char *state = ""; -- -- if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) { -- state = "Master"; -- } -- out->info(out, "resource %s is running on: %s %s", rsc, node->details->uname, state); -- } -- -- found++; -- } -- -- if (!out->is_quiet(out) && found == 0) { -- out->err(out, "resource %s is NOT running", rsc); -- } -- -- return found; --} -- - resource_checks_t * - cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed) - { -@@ -72,16 +41,19 @@ cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed) - return rc; - } - --int -+GListPtr - cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, - pe_working_set_t *data_set) - { -- int found = 0; -+ GListPtr found = NULL; - pe_resource_t *parent = uber_parent(rsc); - - if (pe_rsc_is_clone(rsc)) { - for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { -- found += do_find_resource(out, requested_name, iter->data, data_set); -+ GListPtr extra = ((pe_resource_t *) iter->data)->running_on; -+ if (extra != NULL) { -+ found = g_list_concat(found, extra); -+ } - } - - /* The anonymous clone children's common ID is supplied */ -@@ -92,11 +64,14 @@ cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *request - && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) { - - for (GListPtr iter = parent->children; iter; iter = iter->next) { -- found += do_find_resource(out, requested_name, iter->data, data_set); -+ GListPtr extra = ((pe_resource_t *) iter->data)->running_on; -+ if (extra != NULL) { -+ found = g_list_concat(found, extra); -+ } - } - -- } else { -- found += do_find_resource(out, requested_name, rsc, data_set); -+ } else if (rsc->running_on != NULL) { -+ found = g_list_concat(found, rsc->running_on); - } - - return found; -@@ -1828,8 +1803,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, - action = rsc_action+6; - - if(pe_rsc_is_clone(rsc)) { -- int rc = cli_resource_search(out, rsc, requested_name, data_set); -- if(rc > 0 && force == FALSE) { -+ GListPtr rscs = cli_resource_search(out, rsc, requested_name, data_set); -+ if(rscs != NULL && force == FALSE) { - out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", - action, rsc->id); - out->err(out, "Try setting target-role=Stopped first or specifying " --- -1.8.3.1 - - -From a74e1193e005994ec4cfe1991e13bb61fff64de9 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 5 Oct 2020 13:54:11 -0400 -Subject: [PATCH 17/19] Feature: tools: Use formatted output for stacks and - constraints. - -This also changes the format for text output. The new output uses the -underlying text list code for handling indentation and nesting, while -the old code passed around an indentation prefix. However, this does -mean we end up with a little of the text list fanciness as well. -Hopefully that will not be a problem. ---- - cts/cli/regression.tools.exp | 7 +- - include/pcmki/pcmki_output.h | 7 + - lib/pacemaker/pcmk_output.c | 328 ++++++++++++++++++++++++++++++++++++++++++- - tools/crm_resource.c | 40 +----- - tools/crm_resource.h | 4 - - tools/crm_resource_print.c | 91 ------------ - 6 files changed, 343 insertions(+), 134 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 7166714..221730d 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -2001,12 +2001,13 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#= - * Passed: crm_resource - Ban dummy from node1 - =#=#=#= Begin test: Show where a resource is running =#=#=#= --resource dummy is running on: node1 -+resource dummy is running on: node1 - =#=#=#= End test: Show where a resource is running - OK (0) =#=#=#= - * Passed: crm_resource - Show where a resource is running - =#=#=#= Begin test: Show constraints on a resource =#=#=#= --* dummy -- : Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1) -+dummy: -+ * Locations: -+ * Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1) - =#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#= - * Passed: crm_resource - Show constraints on a resource - =#=#=#= Begin test: Ban dummy from node2 =#=#=#= -diff --git a/include/pcmki/pcmki_output.h b/include/pcmki/pcmki_output.h -index 2b750fb..0faef35 100644 ---- a/include/pcmki/pcmki_output.h -+++ b/include/pcmki/pcmki_output.h -@@ -21,6 +21,13 @@ extern pcmk__supported_format_t pcmk__out_formats[]; - int pcmk__out_prologue(pcmk__output_t **out, xmlNodePtr *xml); - void pcmk__out_epilogue(pcmk__output_t *out, xmlNodePtr *xml, int retval); - -+/* This function registers only the formatted output messages that are a part -+ * of libpacemaker. It is not to be confused with pcmk__register_messages, -+ * which is a part of formatted output support and registers a whole table of -+ * messages at a time. -+ */ -+void pcmk__register_lib_messages(pcmk__output_t *out); -+ - #ifdef __cplusplus - } - #endif -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index adf4c34..306e561 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -13,7 +13,7 @@ - #include - #include - #include --#include -+#include - - pcmk__supported_format_t pcmk__out_formats[] = { - PCMK__SUPPORTED_FORMAT_XML, -@@ -46,3 +46,329 @@ pcmk__out_epilogue(pcmk__output_t *out, xmlNodePtr *xml, int retval) { - - pcmk__output_free(out); - } -+ -+PCMK__OUTPUT_ARGS("colocations-list", "pe_resource_t *", "gboolean", "gboolean") -+static int colocations_list(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ gboolean dependents = va_arg(args, gboolean); -+ gboolean recursive = va_arg(args, gboolean); -+ -+ GListPtr lpc = NULL; -+ GListPtr list = rsc->rsc_cons; -+ bool printed_header = false; -+ -+ if (dependents) { -+ list = rsc->rsc_cons_lhs; -+ } -+ -+ if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -+ return pcmk_rc_no_output; -+ } -+ -+ pe__set_resource_flags(rsc, pe_rsc_allocating); -+ for (lpc = list; lpc != NULL; lpc = lpc->next) { -+ rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ -+ char *score = NULL; -+ pe_resource_t *peer = cons->rsc_rh; -+ -+ if (dependents) { -+ peer = cons->rsc_lh; -+ } -+ -+ if (pcmk_is_set(peer->flags, pe_rsc_allocating)) { -+ if (dependents == FALSE) { -+ if (!printed_header) { -+ out->begin_list(out, NULL, NULL, "Colocations"); -+ printed_header = true; -+ } -+ -+ out->list_item(out, NULL, "%s (id=%s - loop)", peer->id, cons->id); -+ } -+ continue; -+ } -+ -+ if (dependents && recursive) { -+ if (!printed_header) { -+ out->begin_list(out, NULL, NULL, "Colocations"); -+ printed_header = true; -+ } -+ -+ out->message(out, "colocations-list", rsc, dependents, recursive); -+ } -+ -+ if (!printed_header) { -+ out->begin_list(out, NULL, NULL, "Colocations"); -+ printed_header = true; -+ } -+ -+ score = score2char(cons->score); -+ if (cons->role_rh > RSC_ROLE_STARTED) { -+ out->list_item(out, NULL, "%s (score=%s, %s role=%s, id=%s", -+ peer->id, score, dependents ? "needs" : "with", -+ role2text(cons->role_rh), cons->id); -+ } else { -+ out->list_item(out, NULL, "%s (score=%s, id=%s", -+ peer->id, score, cons->id); -+ } -+ -+ free(score); -+ out->message(out, "locations-list", peer); -+ -+ if (!dependents && recursive) { -+ out->message(out, "colocations-list", rsc, dependents, recursive); -+ } -+ } -+ -+ if (printed_header) { -+ out->end_list(out); -+ } -+ -+ return pcmk_rc_no_output; -+} -+ -+PCMK__OUTPUT_ARGS("colocations-list", "pe_resource_t *", "gboolean", "gboolean") -+static int colocations_list_xml(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ gboolean dependents = va_arg(args, gboolean); -+ gboolean recursive = va_arg(args, gboolean); -+ -+ GListPtr lpc = NULL; -+ GListPtr list = rsc->rsc_cons; -+ bool printed_header = false; -+ -+ if (dependents) { -+ list = rsc->rsc_cons_lhs; -+ } -+ -+ if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -+ return pcmk_rc_ok; -+ } -+ -+ pe__set_resource_flags(rsc, pe_rsc_allocating); -+ for (lpc = list; lpc != NULL; lpc = lpc->next) { -+ rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ pe_resource_t *peer = cons->rsc_rh; -+ char *score = NULL; -+ -+ if (dependents) { -+ peer = cons->rsc_lh; -+ } -+ -+ if (pcmk_is_set(peer->flags, pe_rsc_allocating)) { -+ if (dependents == FALSE) { -+ xmlNodePtr node; -+ -+ if (!printed_header) { -+ pcmk__output_xml_create_parent(out, "colocations"); -+ printed_header = true; -+ } -+ -+ node = pcmk__output_create_xml_node(out, "colocation"); -+ xmlSetProp(node, (pcmkXmlStr) "peer", (pcmkXmlStr) peer->id); -+ xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) cons->id); -+ } -+ continue; -+ } -+ -+ if (dependents && recursive) { -+ if (!printed_header) { -+ pcmk__output_xml_create_parent(out, "colocations"); -+ printed_header = true; -+ } -+ -+ out->message(out, "colocations-list", rsc, dependents, recursive); -+ } -+ -+ if (!printed_header) { -+ pcmk__output_xml_create_parent(out, "colocations"); -+ printed_header = true; -+ } -+ -+ score = score2char(cons->score); -+ if (cons->role_rh > RSC_ROLE_STARTED) { -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "colocation"); -+ xmlSetProp(node, (pcmkXmlStr) "peer", (pcmkXmlStr) peer->id); -+ xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) cons->id); -+ xmlSetProp(node, (pcmkXmlStr) "score", (pcmkXmlStr) score); -+ xmlSetProp(node, (pcmkXmlStr) "dependents", -+ (pcmkXmlStr) (dependents ? "needs" : "with")); -+ xmlSetProp(node, (pcmkXmlStr) "role", (pcmkXmlStr) role2text(cons->role_rh)); -+ } else { -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "colocation"); -+ xmlSetProp(node, (pcmkXmlStr) "peer", (pcmkXmlStr) peer->id); -+ xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) cons->id); -+ xmlSetProp(node, (pcmkXmlStr) "score", (pcmkXmlStr) score); -+ } -+ -+ free(score); -+ out->message(out, "locations-list", peer); -+ -+ if (!dependents && recursive) { -+ out->message(out, "colocations-list", rsc, dependents, recursive); -+ } -+ } -+ -+ if (printed_header) { -+ pcmk__output_xml_pop_parent(out); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") -+static int locations_list(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc G_GNUC_UNUSED = va_arg(args, pe_resource_t *); -+ -+ GListPtr lpc = NULL; -+ GListPtr list = rsc->rsc_location; -+ -+ out->begin_list(out, NULL, NULL, "Locations"); -+ -+ for (lpc = list; lpc != NULL; lpc = lpc->next) { -+ pe__location_t *cons = lpc->data; -+ -+ GListPtr lpc2 = NULL; -+ -+ for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { -+ pe_node_t *node = (pe_node_t *) lpc2->data; -+ char *score = score2char(node->weight); -+ -+ out->list_item(out, NULL, "Node %s (score=%s, id=%s)", -+ node->details->uname, score, cons->id); -+ free(score); -+ } -+ } -+ -+ out->end_list(out); -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") -+static int locations_list_xml(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ -+ GListPtr lpc = NULL; -+ GListPtr list = rsc->rsc_location; -+ -+ pcmk__output_xml_create_parent(out, "locations"); -+ -+ for (lpc = list; lpc != NULL; lpc = lpc->next) { -+ pe__location_t *cons = lpc->data; -+ -+ GListPtr lpc2 = NULL; -+ -+ for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { -+ pe_node_t *node = (pe_node_t *) lpc2->data; -+ char *score = score2char(node->weight); -+ -+ xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "location"); -+ xmlSetProp(xml_node, (pcmkXmlStr) "host", (pcmkXmlStr) node->details->uname); -+ xmlSetProp(xml_node, (pcmkXmlStr) "id", (pcmkXmlStr) cons->id); -+ xmlSetProp(xml_node, (pcmkXmlStr) "score", (pcmkXmlStr) score); -+ -+ free(score); -+ } -+ } -+ -+ pcmk__output_xml_pop_parent(out); -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean") -+static int -+stacks_and_constraints(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc G_GNUC_UNUSED = va_arg(args, pe_resource_t *); -+ pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *); -+ gboolean recursive G_GNUC_UNUSED = va_arg(args, gboolean); -+ -+ GListPtr lpc = NULL; -+ xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, -+ data_set->input); -+ -+ unpack_constraints(cib_constraints, data_set); -+ -+ // Constraints apply to group/clone, not member/instance -+ rsc = uber_parent(rsc); -+ -+ for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *r = (pe_resource_t *) lpc->data; -+ -+ pe__clear_resource_flags(r, pe_rsc_allocating); -+ } -+ -+ out->message(out, "colocations-list", rsc, TRUE, recursive); -+ -+ out->begin_list(out, NULL, NULL, "%s", rsc->id); -+ out->message(out, "locations-list", rsc); -+ out->end_list(out); -+ -+ for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *r = (pe_resource_t *) lpc->data; -+ -+ pe__clear_resource_flags(r, pe_rsc_allocating); -+ } -+ -+ out->message(out, "colocations-list", rsc, FALSE, recursive); -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean") -+static int -+stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); -+ gboolean recursive = va_arg(args, gboolean); -+ -+ GListPtr lpc = NULL; -+ xmlNodePtr node = NULL; -+ xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, -+ data_set->input); -+ -+ unpack_constraints(cib_constraints, data_set); -+ -+ // Constraints apply to group/clone, not member/instance -+ rsc = uber_parent(rsc); -+ -+ for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *r = (pe_resource_t *) lpc->data; -+ -+ pe__clear_resource_flags(r, pe_rsc_allocating); -+ } -+ -+ pcmk__output_xml_create_parent(out, "constraints"); -+ -+ out->message(out, "colocations-list", rsc, TRUE, recursive); -+ -+ node = pcmk__output_xml_create_parent(out, "resource"); -+ xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc->id); -+ out->message(out, "locations-list", rsc); -+ pcmk__output_xml_pop_parent(out); -+ -+ for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *r = (pe_resource_t *) lpc->data; -+ -+ pe__clear_resource_flags(r, pe_rsc_allocating); -+ } -+ -+ out->message(out, "colocations-list", rsc, FALSE, recursive); -+ return pcmk_rc_ok; -+} -+ -+static pcmk__message_entry_t fmt_functions[] = { -+ { "colocations-list", "default", colocations_list }, -+ { "colocations-list", "xml", colocations_list_xml }, -+ { "locations-list", "default", locations_list }, -+ { "locations-list", "xml", locations_list_xml }, -+ { "stacks-constraints", "default", stacks_and_constraints }, -+ { "stacks-constraints", "xml", stacks_and_constraints_xml }, -+ -+ { NULL, NULL, NULL } -+}; -+ -+void -+pcmk__register_lib_messages(pcmk__output_t *out) { -+ pcmk__register_messages(out, fmt_functions); -+} -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index b341194..2c62ff6 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1194,38 +1194,6 @@ list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_cod - return rc; - } - --static void --list_stacks_and_constraints(pcmk__output_t *out, pe_resource_t *rsc, bool recursive) --{ -- GListPtr lpc = NULL; -- xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, -- data_set->input); -- -- unpack_constraints(cib_constraints, data_set); -- -- // Constraints apply to group/clone, not member/instance -- rsc = uber_parent(rsc); -- -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *r = (pe_resource_t *) lpc->data; -- -- pe__clear_resource_flags(r, pe_rsc_allocating); -- } -- -- cli_resource_print_colocation(out, rsc, TRUE, recursive, 1); -- -- fprintf(stdout, "* %s\n", rsc->id); -- cli_resource_print_location(out, rsc, NULL); -- -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *r = (pe_resource_t *) lpc->data; -- -- pe__clear_resource_flags(r, pe_rsc_allocating); -- } -- -- cli_resource_print_colocation(out, rsc, FALSE, recursive, 1); --} -- - static int - populate_working_set(xmlNodePtr *cib_xml_copy) - { -@@ -1629,7 +1597,8 @@ main(int argc, char **argv) - pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname()); - } - } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) { -- if (options.rsc_cmd == cmd_list_resources) { -+ if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep || -+ options.rsc_cmd == cmd_list_resources) { - pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname()); - } - } -@@ -1637,6 +1606,7 @@ main(int argc, char **argv) - pe__register_messages(out); - crm_resource_register_messages(out); - lrmd__register_messages(out); -+ pcmk__register_lib_messages(out); - - if (args->version) { - out->version(out, false); -@@ -1804,11 +1774,11 @@ main(int argc, char **argv) - break; - - case cmd_colocations: -- list_stacks_and_constraints(out, rsc, false); -+ rc = out->message(out, "stacks-constraints", rsc, data_set, false); - break; - - case cmd_colocations_deep: -- list_stacks_and_constraints(out, rsc, true); -+ rc = out->message(out, "stacks-constraints", rsc, data_set, true); - break; - - case cmd_cts: -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 6de2457..5bfadb7 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -53,10 +53,6 @@ int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, int cib_optio - void cli_resource_print_cts(pcmk__output_t *out, pe_resource_t * rsc); - void cli_resource_print_raw(pcmk__output_t *out, pe_resource_t * rsc); - void cli_resource_print_cts_constraints(pcmk__output_t *out, pe_working_set_t * data_set); --void cli_resource_print_location(pcmk__output_t *out, pe_resource_t * rsc, -- const char *prefix); --void cli_resource_print_colocation(pcmk__output_t *out, pe_resource_t * rsc, -- bool dependents, bool recursive, int offset); - - int cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, pe_working_set_t *data_set, - bool expanded); -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 5ff3e9b..6303863 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -108,97 +108,6 @@ cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id, - return rc; - } - --void --cli_resource_print_location(pcmk__output_t *out, pe_resource_t * rsc, const char *prefix) --{ -- GListPtr lpc = NULL; -- GListPtr list = rsc->rsc_location; -- int offset = 0; -- -- if (prefix) { -- offset = strlen(prefix) - 2; -- } -- -- for (lpc = list; lpc != NULL; lpc = lpc->next) { -- pe__location_t *cons = lpc->data; -- -- GListPtr lpc2 = NULL; -- -- for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { -- pe_node_t *node = (pe_node_t *) lpc2->data; -- char *score = score2char(node->weight); -- -- fprintf(stdout, "%s: Node %-*s (score=%s, id=%s)\n", -- prefix ? prefix : " ", 71 - offset, node->details->uname, score, cons->id); -- free(score); -- } -- } --} -- --void --cli_resource_print_colocation(pcmk__output_t *out, pe_resource_t * rsc, -- bool dependents, bool recursive, int offset) --{ -- char *prefix = NULL; -- GListPtr lpc = NULL; -- GListPtr list = rsc->rsc_cons; -- -- prefix = calloc(1, (offset * 4) + 1); -- memset(prefix, ' ', offset * 4); -- -- if (dependents) { -- list = rsc->rsc_cons_lhs; -- } -- -- if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -- /* Break colocation loops */ -- printf("loop %s\n", rsc->id); -- free(prefix); -- return; -- } -- -- pe__set_resource_flags(rsc, pe_rsc_allocating); -- for (lpc = list; lpc != NULL; lpc = lpc->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -- -- char *score = NULL; -- pe_resource_t *peer = cons->rsc_rh; -- -- if (dependents) { -- peer = cons->rsc_lh; -- } -- -- if (pcmk_is_set(peer->flags, pe_rsc_allocating)) { -- if (dependents == FALSE) { -- fprintf(stdout, "%s%-*s (id=%s - loop)\n", prefix, 80 - (4 * offset), peer->id, -- cons->id); -- } -- continue; -- } -- -- if (dependents && recursive) { -- cli_resource_print_colocation(out, peer, dependents, recursive, offset + 1); -- } -- -- score = score2char(cons->score); -- if (cons->role_rh > RSC_ROLE_STARTED) { -- fprintf(stdout, "%s%-*s (score=%s, %s role=%s, id=%s)\n", prefix, 80 - (4 * offset), -- peer->id, score, dependents ? "needs" : "with", role2text(cons->role_rh), -- cons->id); -- } else { -- fprintf(stdout, "%s%-*s (score=%s, id=%s)\n", prefix, 80 - (4 * offset), -- peer->id, score, cons->id); -- } -- cli_resource_print_location(out, peer, prefix); -- free(score); -- -- if (!dependents && recursive) { -- cli_resource_print_colocation(out, peer, dependents, recursive, offset + 1); -- } -- } -- free(prefix); --} -- - // \return Standard Pacemaker return code - int - cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, --- -1.8.3.1 - - -From c718c8798254732122771552bef59bf4cd45d24a Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 4 Nov 2020 11:17:43 -0500 -Subject: [PATCH 18/19] Feature: xml: Add a schema for new crm_resource output. - ---- - xml/Makefile.am | 2 +- - xml/api/crm_resource-2.4.rng | 255 +++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 256 insertions(+), 1 deletion(-) - create mode 100644 xml/api/crm_resource-2.4.rng - -diff --git a/xml/Makefile.am b/xml/Makefile.am -index 2f99f1c..a56258d 100644 ---- a/xml/Makefile.am -+++ b/xml/Makefile.am -@@ -50,7 +50,7 @@ version_pairs_last = $(wordlist \ - # problems. - - # Names of API schemas that form the choices for pacemaker-result content --API_request_base = command-output crm_mon crmadmin stonith_admin version -+API_request_base = command-output crm_mon crm_resource crmadmin stonith_admin version - - # Names of CIB schemas that form the choices for cib/configuration content - CIB_cfg_base = options nodes resources constraints fencing acls tags alerts -diff --git a/xml/api/crm_resource-2.4.rng b/xml/api/crm_resource-2.4.rng -new file mode 100644 -index 0000000..1bcb969 ---- /dev/null -+++ b/xml/api/crm_resource-2.4.rng -@@ -0,0 +1,255 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ promoted -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ ocf -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ true -+ false -+ -+ -+ -+ true -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ needs -+ with -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - - -From a4bb7ae6a2a9ea3016539f3cdd5002536217b39b Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 21 Oct 2020 13:50:15 -0400 -Subject: [PATCH 19/19] Fix: include: Bump CRM_FEATURE_SET to 3.6.3. - -This is being bumped due to the addition of the --output-as= and ---output-to= arguments to crm_resource for formatted output. In -addition, there are various minor differences in the crm_resource text -output. It is hoped that over time, these differences won't matter as -much because consumers can use the XML output instead. ---- - include/crm/crm.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/include/crm/crm.h b/include/crm/crm.h -index 4eca278..b07152c 100644 ---- a/include/crm/crm.h -+++ b/include/crm/crm.h -@@ -51,7 +51,7 @@ extern "C" { - * >=3.0.13: Fail counts include operation name and interval - * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED - */ --# define CRM_FEATURE_SET "3.6.2" -+# define CRM_FEATURE_SET "3.6.3" - - # define EOS '\0' - # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) --- -1.8.3.1 - diff --git a/SOURCES/007-unfencing-loop.patch b/SOURCES/007-unfencing-loop.patch new file mode 100644 index 0000000..d4950c8 --- /dev/null +++ b/SOURCES/007-unfencing-loop.patch @@ -0,0 +1,733 @@ +From 6dcd6b51d7d3993bc483588d6ed75077518ed600 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 4 Jun 2021 16:30:55 -0500 +Subject: [PATCH 01/11] Low: controller: check whether unfenced node was remote + node + +... so the controller can indicate the node is remote (if known at that point, +which is not guaranteed) when setting unfencing-related node attributes. +--- + daemons/controld/controld_fencing.c | 21 ++++++++++++++++++--- + 1 file changed, 18 insertions(+), 3 deletions(-) + +diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c +index 23dff28..0fba661 100644 +--- a/daemons/controld/controld_fencing.c ++++ b/daemons/controld/controld_fencing.c +@@ -757,15 +757,30 @@ tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data) + if (pcmk__str_eq("on", op, pcmk__str_casei)) { + const char *value = NULL; + char *now = pcmk__ttoa(time(NULL)); ++ gboolean is_remote_node = FALSE; ++ ++ /* This check is not 100% reliable, since this node is not ++ * guaranteed to have the remote node cached. However, it ++ * doesn't have to be reliable, since the attribute manager can ++ * learn a node's "remoteness" by other means sooner or later. ++ * This allows it to learn more quickly if this node does have ++ * the information. ++ */ ++ if (g_hash_table_lookup(crm_remote_peer_cache, uuid) != NULL) { ++ is_remote_node = TRUE; ++ } + +- update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, FALSE); ++ update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, ++ is_remote_node); + free(now); + + value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_ALL); +- update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, FALSE); ++ update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, ++ is_remote_node); + + value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_SECURE); +- update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, FALSE); ++ update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, ++ is_remote_node); + + } else if (action->sent_update == FALSE) { + send_stonith_update(action, target, uuid); +-- +1.8.3.1 + + +From 3ef6d9403f68ab8559c45cc99f5a8da05ca6420b Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 7 Jun 2021 10:50:36 -0500 +Subject: [PATCH 02/11] Refactor: pacemaker-attrd: functionize adding remote + node to cache + +... for future reuse +--- + daemons/attrd/attrd_commands.c | 34 +++++++++++++++++++++++----------- + 1 file changed, 23 insertions(+), 11 deletions(-) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index 731c243..93a165b 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -102,6 +102,28 @@ free_attribute(gpointer data) + } + } + ++/*! ++ * \internal ++ * \brief Ensure a Pacemaker Remote node is in the correct peer cache ++ * ++ * \param[in] ++ */ ++static void ++cache_remote_node(const char *node_name) ++{ ++ /* If we previously assumed this node was an unseen cluster node, ++ * remove its entry from the cluster peer cache. ++ */ ++ crm_node_t *dup = pcmk__search_cluster_node_cache(0, node_name); ++ ++ if (dup && (dup->uuid == NULL)) { ++ reap_crm_member(0, node_name); ++ } ++ ++ // Ensure node is in the remote peer cache ++ CRM_ASSERT(crm_remote_peer_get(node_name) != NULL); ++} ++ + static xmlNode * + build_attribute_xml( + xmlNode *parent, const char *name, const char *set, const char *uuid, unsigned int timeout_ms, const char *user, +@@ -709,17 +731,7 @@ attrd_lookup_or_create_value(GHashTable *values, const char *host, xmlNode *xml) + + crm_element_value_int(xml, PCMK__XA_ATTR_IS_REMOTE, &is_remote); + if (is_remote) { +- /* If we previously assumed this node was an unseen cluster node, +- * remove its entry from the cluster peer cache. +- */ +- crm_node_t *dup = pcmk__search_cluster_node_cache(0, host); +- +- if (dup && (dup->uuid == NULL)) { +- reap_crm_member(0, host); +- } +- +- /* Ensure this host is in the remote peer cache */ +- CRM_ASSERT(crm_remote_peer_get(host) != NULL); ++ cache_remote_node(host); + } + + if (v == NULL) { +-- +1.8.3.1 + + +From 6fac2c71bc2c56870ac828d7cd7b7c799279c47e Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 7 Jun 2021 10:39:34 -0500 +Subject: [PATCH 03/11] Refactor: pacemaker-attrd: don't try to remove votes + for remote nodes + +Remote nodes never vote. + +This has no effect in practice since the removal would simply do nothing, +but we might as well not waste time trying. +--- + daemons/attrd/attrd_commands.c | 11 ++++++----- + 1 file changed, 6 insertions(+), 5 deletions(-) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index 93a165b..dbe777e 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -976,7 +976,8 @@ attrd_election_cb(gpointer user_data) + void + attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) + { +- bool remove_voter = FALSE; ++ bool gone = false; ++ bool is_remote = pcmk_is_set(peer->flags, crm_remote_node); + + switch (kind) { + case crm_status_uname: +@@ -984,7 +985,7 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da + + case crm_status_processes: + if (!pcmk_is_set(peer->processes, crm_get_cluster_proc())) { +- remove_voter = TRUE; ++ gone = true; + } + break; + +@@ -1000,13 +1001,13 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da + } else { + // Remove all attribute values associated with lost nodes + attrd_peer_remove(peer->uname, FALSE, "loss"); +- remove_voter = TRUE; ++ gone = true; + } + break; + } + +- // In case an election is in progress, remove any vote by the node +- if (remove_voter) { ++ // Remove votes from cluster nodes that leave, in case election in progress ++ if (gone && !is_remote) { + attrd_remove_voter(peer); + } + } +-- +1.8.3.1 + + +From 54089fc663d6aaf10ca164c6c94b3b17237788de Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 7 Jun 2021 10:40:06 -0500 +Subject: [PATCH 04/11] Low: pacemaker-attrd: check for remote nodes in peer + update callback + +If a remote node was started before the local cluster node joined the cluster, +the cluster node will assume its node attributes are for a cluster node until +it learns otherwise. Check for remoteness in the peer update callback, to have +another way we can learn it. +--- + daemons/attrd/attrd_commands.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index dbe777e..5f6a754 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -1009,6 +1009,10 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da + // Remove votes from cluster nodes that leave, in case election in progress + if (gone && !is_remote) { + attrd_remove_voter(peer); ++ ++ // Ensure remote nodes that come up are in the remote node cache ++ } else if (!gone && is_remote) { ++ cache_remote_node(peer->uname); + } + } + +-- +1.8.3.1 + + +From 8c048df0312d0d9c857d87b570a352429a710928 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 7 Jun 2021 11:29:12 -0500 +Subject: [PATCH 05/11] Log: pacemaker-attrd: log peer status changes + +--- + daemons/attrd/attrd_commands.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index 5f6a754..d6d179b 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -972,6 +972,7 @@ attrd_election_cb(gpointer user_data) + return FALSE; + } + ++#define state_text(state) ((state)? (const char *)(state) : "in unknown state") + + void + attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) +@@ -981,15 +982,23 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da + + switch (kind) { + case crm_status_uname: ++ crm_debug("%s node %s is now %s", ++ (is_remote? "Remote" : "Cluster"), ++ peer->uname, state_text(peer->state)); + break; + + case crm_status_processes: + if (!pcmk_is_set(peer->processes, crm_get_cluster_proc())) { + gone = true; + } ++ crm_debug("Node %s is %s a peer", ++ peer->uname, (gone? "no longer" : "now")); + break; + + case crm_status_nstate: ++ crm_debug("%s node %s is now %s (was %s)", ++ (is_remote? "Remote" : "Cluster"), ++ peer->uname, state_text(peer->state), state_text(data)); + if (pcmk__str_eq(peer->state, CRM_NODE_MEMBER, pcmk__str_casei)) { + /* If we're the writer, send new peers a list of all attributes + * (unless it's a remote node, which doesn't run its own attrd) +-- +1.8.3.1 + + +From 1dcc8dee4990cf0dbdec0e14db6d9a3ad67a41d5 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 7 Jun 2021 11:13:53 -0500 +Subject: [PATCH 06/11] Low: pacemaker-attrd: ensure node ID is only set for + attributes when known + +In most cases, attribute updates contained the node ID, and the node ID was +used by other code, only if known (i.e. positive). However a couple places did +not check this, so add that. + +I am unsure whether the missing check caused problems in practice, but there +appears to be the possibility that a remote node would wrongly be added to the +cluster node cache. +--- + daemons/attrd/attrd_commands.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index d6d179b..b3f441c 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -136,7 +136,9 @@ build_attribute_xml( + crm_xml_add(xml, PCMK__XA_ATTR_UUID, uuid); + crm_xml_add(xml, PCMK__XA_ATTR_USER, user); + crm_xml_add(xml, PCMK__XA_ATTR_NODE_NAME, peer); +- crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, peerid); ++ if (peerid > 0) { ++ crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, peerid); ++ } + crm_xml_add(xml, PCMK__XA_ATTR_VALUE, value); + crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, timeout_ms/1000); + crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, is_private); +@@ -937,7 +939,7 @@ attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter) + /* If this is a cluster node whose node ID we are learning, remember it */ + if ((v->nodeid == 0) && (v->is_remote == FALSE) + && (crm_element_value_int(xml, PCMK__XA_ATTR_NODE_ID, +- (int*)&v->nodeid) == 0)) { ++ (int*)&v->nodeid) == 0) && (v->nodeid > 0)) { + + crm_node_t *known_peer = crm_get_peer(v->nodeid, host); + +-- +1.8.3.1 + + +From 8d12490e88b558d01db37a38f7d35175c6d2d69a Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 10 Jun 2021 17:25:57 -0500 +Subject: [PATCH 07/11] Refactor: pacemaker-attrd: functionize processing a + sync response + +... for code isolation, and because we need to add more to it +--- + daemons/attrd/attrd_commands.c | 59 ++++++++++++++++++++++++++++-------------- + 1 file changed, 39 insertions(+), 20 deletions(-) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index b3f441c..d02d3e6 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -572,6 +572,43 @@ attrd_peer_clear_failure(crm_node_t *peer, xmlNode *xml) + } + + /*! ++ * \internal ++ * \brief Load attributes from a peer sync response ++ * ++ * \param[in] peer Peer that sent clear request ++ * \param[in] peer_won Whether peer is the attribute writer ++ * \param[in] xml Request XML ++ */ ++static void ++process_peer_sync_response(crm_node_t *peer, bool peer_won, xmlNode *xml) ++{ ++ crm_info("Processing " PCMK__ATTRD_CMD_SYNC_RESPONSE " from %s", ++ peer->uname); ++ ++ if (peer_won) { ++ /* Initialize the "seen" flag for all attributes to cleared, so we can ++ * detect attributes that local node has but the writer doesn't. ++ */ ++ clear_attribute_value_seen(); ++ } ++ ++ // Process each attribute update in the sync response ++ for (xmlNode *child = pcmk__xml_first_child(xml); child != NULL; ++ child = pcmk__xml_next(child)) { ++ attrd_peer_update(peer, child, ++ crm_element_value(child, PCMK__XA_ATTR_NODE_NAME), ++ TRUE); ++ } ++ ++ if (peer_won) { ++ /* If any attributes are still not marked as seen, the writer doesn't ++ * know about them, so send all peers an update with them. ++ */ ++ attrd_current_only_attribute_update(peer, xml); ++ } ++} ++ ++/*! + \internal + \brief Broadcast private attribute for local node with protocol version + */ +@@ -596,7 +633,7 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml) + const char *op = crm_element_value(xml, PCMK__XA_TASK); + const char *election_op = crm_element_value(xml, F_CRM_TASK); + const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); +- bool peer_won = FALSE; ++ bool peer_won = false; + + if (election_op) { + attrd_handle_election_op(peer, xml); +@@ -631,25 +668,7 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml) + + } else if (pcmk__str_eq(op, PCMK__ATTRD_CMD_SYNC_RESPONSE, pcmk__str_casei) + && !pcmk__str_eq(peer->uname, attrd_cluster->uname, pcmk__str_casei)) { +- xmlNode *child = NULL; +- +- crm_info("Processing %s from %s", op, peer->uname); +- +- /* Clear the seen flag for attribute processing held only in the own node. */ +- if (peer_won) { +- clear_attribute_value_seen(); +- } +- +- for (child = pcmk__xml_first_child(xml); child != NULL; +- child = pcmk__xml_next(child)) { +- host = crm_element_value(child, PCMK__XA_ATTR_NODE_NAME); +- attrd_peer_update(peer, child, host, TRUE); +- } +- +- if (peer_won) { +- /* Synchronize if there is an attribute held only by own node that Writer does not have. */ +- attrd_current_only_attribute_update(peer, xml); +- } ++ process_peer_sync_response(peer, peer_won, xml); + + } else if (pcmk__str_eq(op, PCMK__ATTRD_CMD_FLUSH, pcmk__str_casei)) { + /* Ignore. The flush command was removed in 2.0.0 but may be +-- +1.8.3.1 + + +From a890a0e5bbbcabf907f51ed0460868035f72464d Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 11 Jun 2021 14:40:39 -0500 +Subject: [PATCH 08/11] Refactor: pacemaker-attrd: functionize broadcasting + local override + +... for code isolation +--- + daemons/attrd/attrd_commands.c | 42 +++++++++++++++++++++++++++++------------- + 1 file changed, 29 insertions(+), 13 deletions(-) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index d02d3e6..4783427 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -804,6 +804,34 @@ attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml) + free_xml(sync); + } + ++/*! ++ * \internal ++ * \brief Override an attribute sync with a local value ++ * ++ * Broadcast the local node's value for an attribute that's different from the ++ * value provided in a peer's attribute synchronization response. This ensures a ++ * node's values for itself take precedence and all peers are kept in sync. ++ * ++ * \param[in] a Attribute entry to override ++ * ++ * \return Local instance of attribute value ++ */ ++static attribute_value_t * ++broadcast_local_value(attribute_t *a) ++{ ++ attribute_value_t *v = g_hash_table_lookup(a->values, attrd_cluster->uname); ++ xmlNode *sync = create_xml_node(NULL, __func__); ++ ++ crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); ++ build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, ++ a->user, a->is_private, v->nodename, v->nodeid, ++ v->current, FALSE); ++ attrd_xml_add_writer(sync); ++ send_attrd_message(NULL, sync); ++ free_xml(sync); ++ return v; ++} ++ + void + attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter) + { +@@ -899,21 +927,9 @@ attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter) + if (filter && !pcmk__str_eq(v->current, value, pcmk__str_casei) + && pcmk__str_eq(host, attrd_cluster->uname, pcmk__str_casei)) { + +- xmlNode *sync = create_xml_node(NULL, __func__); +- + crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s", + attr, host, v->current, value, peer->uname); +- +- crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); +- v = g_hash_table_lookup(a->values, host); +- build_attribute_xml(sync, attr, a->set, a->uuid, a->timeout_ms, a->user, +- a->is_private, v->nodename, v->nodeid, v->current, FALSE); +- +- attrd_xml_add_writer(sync); +- +- /* Broadcast in case any other nodes had the inconsistent value */ +- send_attrd_message(NULL, sync); +- free_xml(sync); ++ v = broadcast_local_value(a); + + } else if (!pcmk__str_eq(v->current, value, pcmk__str_casei)) { + crm_notice("Setting %s[%s]: %s -> %s " CRM_XS " from %s", +-- +1.8.3.1 + + +From f6f65e3dab070f1bbdf6d1383f4d6173a8840bc9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 11 Jun 2021 14:50:29 -0500 +Subject: [PATCH 09/11] Log: pacemaker-attrd: improve messages when + broadcasting local-only values + +The traces aren't necessary since build_attribute_xml() already logs the same +info at debug. Also, rename function for clarity, and make static. +--- + daemons/attrd/attrd_commands.c | 35 ++++++++++++++++------------------- + 1 file changed, 16 insertions(+), 19 deletions(-) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index 4783427..356defb 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -51,11 +51,12 @@ GHashTable *attributes = NULL; + + void write_attribute(attribute_t *a, bool ignore_delay); + void write_or_elect_attribute(attribute_t *a); +-void attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml); + void attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter); + void attrd_peer_sync(crm_node_t *peer, xmlNode *xml); + void attrd_peer_remove(const char *host, gboolean uncache, const char *source); + ++static void broadcast_unseen_local_values(crm_node_t *peer, xmlNode *xml); ++ + static gboolean + send_attrd_message(crm_node_t * node, xmlNode * data) + { +@@ -604,7 +605,7 @@ process_peer_sync_response(crm_node_t *peer, bool peer_won, xmlNode *xml) + /* If any attributes are still not marked as seen, the writer doesn't + * know about them, so send all peers an update with them. + */ +- attrd_current_only_attribute_update(peer, xml); ++ broadcast_unseen_local_values(peer, xml); + } + } + +@@ -768,40 +769,36 @@ attrd_lookup_or_create_value(GHashTable *values, const char *host, xmlNode *xml) + return(v); + } + +-void +-attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml) ++void ++broadcast_unseen_local_values(crm_node_t *peer, xmlNode *xml) + { + GHashTableIter aIter; + GHashTableIter vIter; +- attribute_t *a; ++ attribute_t *a = NULL; + attribute_value_t *v = NULL; +- xmlNode *sync = create_xml_node(NULL, __func__); +- gboolean build = FALSE; +- +- crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); ++ xmlNode *sync = NULL; + + g_hash_table_iter_init(&aIter, attributes); + while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { + g_hash_table_iter_init(&vIter, a->values); + while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { +- if (pcmk__str_eq(v->nodename, attrd_cluster->uname, pcmk__str_casei) && v->seen == FALSE) { +- crm_trace("Syncing %s[%s] = %s to everyone.(from local only attributes)", a->id, v->nodename, v->current); +- +- build = TRUE; ++ if (!(v->seen) && pcmk__str_eq(v->nodename, attrd_cluster->uname, ++ pcmk__str_casei)) { ++ if (sync == NULL) { ++ sync = create_xml_node(NULL, __func__); ++ crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); ++ } + build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, + v->nodename, v->nodeid, v->current, (a->timeout_ms && a->timer ? TRUE : FALSE)); +- } else { +- crm_trace("Local attribute(%s[%s] = %s) was ignore.(another host) : [%s]", a->id, v->nodename, v->current, attrd_cluster->uname); +- continue; + } + } + } + +- if (build) { +- crm_debug("Syncing values to everyone.(from local only attributes)"); ++ if (sync != NULL) { ++ crm_debug("Broadcasting local-only values"); + send_attrd_message(NULL, sync); ++ free_xml(sync); + } +- free_xml(sync); + } + + /*! +-- +1.8.3.1 + + +From ab90ffb785ea018556f216b8f540f8c3429a3947 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 11 Jun 2021 15:04:20 -0500 +Subject: [PATCH 10/11] Refactor: pacemaker-attrd: simplify attribute XML + creation function + +... and rename for clarity +--- + daemons/attrd/attrd_commands.c | 48 ++++++++++++++++++++++++------------------ + 1 file changed, 27 insertions(+), 21 deletions(-) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index 356defb..5b32a77 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -125,25 +125,35 @@ cache_remote_node(const char *node_name) + CRM_ASSERT(crm_remote_peer_get(node_name) != NULL); + } + ++/*! ++ * \internal ++ * \brief Create an XML representation of an attribute for use in peer messages ++ * ++ * \param[in] parent Create attribute XML as child element of this element ++ * \param[in] a Attribute to represent ++ * \param[in] v Attribute value to represent ++ * \param[in] force_write If true, value should be written even if unchanged ++ * ++ * \return XML representation of attribute ++ */ + static xmlNode * +-build_attribute_xml( +- xmlNode *parent, const char *name, const char *set, const char *uuid, unsigned int timeout_ms, const char *user, +- gboolean is_private, const char *peer, uint32_t peerid, const char *value, gboolean is_force_write) ++add_attribute_value_xml(xmlNode *parent, attribute_t *a, attribute_value_t *v, ++ bool force_write) + { + xmlNode *xml = create_xml_node(parent, __func__); + +- crm_xml_add(xml, PCMK__XA_ATTR_NAME, name); +- crm_xml_add(xml, PCMK__XA_ATTR_SET, set); +- crm_xml_add(xml, PCMK__XA_ATTR_UUID, uuid); +- crm_xml_add(xml, PCMK__XA_ATTR_USER, user); +- crm_xml_add(xml, PCMK__XA_ATTR_NODE_NAME, peer); +- if (peerid > 0) { +- crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, peerid); ++ crm_xml_add(xml, PCMK__XA_ATTR_NAME, a->id); ++ crm_xml_add(xml, PCMK__XA_ATTR_SET, a->set); ++ crm_xml_add(xml, PCMK__XA_ATTR_UUID, a->uuid); ++ crm_xml_add(xml, PCMK__XA_ATTR_USER, a->user); ++ crm_xml_add(xml, PCMK__XA_ATTR_NODE_NAME, v->nodename); ++ if (v->nodeid > 0) { ++ crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, v->nodeid); + } +- crm_xml_add(xml, PCMK__XA_ATTR_VALUE, value); +- crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, timeout_ms/1000); +- crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, is_private); +- crm_xml_add_int(xml, PCMK__XA_ATTR_FORCE, is_force_write); ++ crm_xml_add(xml, PCMK__XA_ATTR_VALUE, v->current); ++ crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, a->timeout_ms / 1000); ++ crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, a->is_private); ++ crm_xml_add_int(xml, PCMK__XA_ATTR_FORCE, force_write); + + return xml; + } +@@ -695,8 +705,7 @@ attrd_peer_sync(crm_node_t *peer, xmlNode *xml) + g_hash_table_iter_init(&vIter, a->values); + while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { + crm_debug("Syncing %s[%s] = %s to %s", a->id, v->nodename, v->current, peer?peer->uname:"everyone"); +- build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, +- v->nodename, v->nodeid, v->current, FALSE); ++ add_attribute_value_xml(sync, a, v, false); + } + } + +@@ -788,8 +797,7 @@ broadcast_unseen_local_values(crm_node_t *peer, xmlNode *xml) + sync = create_xml_node(NULL, __func__); + crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); + } +- build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, +- v->nodename, v->nodeid, v->current, (a->timeout_ms && a->timer ? TRUE : FALSE)); ++ add_attribute_value_xml(sync, a, v, a->timeout_ms && a->timer); + } + } + } +@@ -820,9 +828,7 @@ broadcast_local_value(attribute_t *a) + xmlNode *sync = create_xml_node(NULL, __func__); + + crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); +- build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, +- a->user, a->is_private, v->nodename, v->nodeid, +- v->current, FALSE); ++ add_attribute_value_xml(sync, a, v, false); + attrd_xml_add_writer(sync); + send_attrd_message(NULL, sync); + free_xml(sync); +-- +1.8.3.1 + + +From 540d74130c5c8d9c626d6c50475e4dc4f64234e7 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 4 Jun 2021 16:34:26 -0500 +Subject: [PATCH 11/11] Fix: pacemaker-attrd: avoid repeated unfencing of + remote nodes + +The attribute manager can't record a remote node's attributes to the CIB until +it knows the node is remote. Normally, this is learned when the remote node +starts, because the controller clears the CRM_OP_PROBED attribute and indicates +that it is for a remote node. + +However, if a cluster node is down when a remote node starts, and later comes +up, it learns the remote node's existing attributes as part of the attribute +sync. Previously, this did not include whether each value is for a cluster or +remote node, so the newly joined attribute manager couldn't write out remote +nodes' attributes until it learned that via some other event -- which might not +happen before the node becomes DC, in which case its scheduler will not see any +unfencing-related node attributes and may wrongly schedule unfencing. + +The sync response handling already calls attrd_lookup_or_create_value(), which +checks PCMK__XA_ATTR_IS_REMOTE, so all we need to do is add that to the sync +response. +--- + daemons/attrd/attrd_commands.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c +index 5b32a77..0142383 100644 +--- a/daemons/attrd/attrd_commands.c ++++ b/daemons/attrd/attrd_commands.c +@@ -43,8 +43,9 @@ + * 1 1.1.15 PCMK__ATTRD_CMD_UPDATE_BOTH, + * PCMK__ATTRD_CMD_UPDATE_DELAY + * 2 1.1.17 PCMK__ATTRD_CMD_CLEAR_FAILURE ++ * 3 2.1.1 PCMK__ATTRD_CMD_SYNC_RESPONSE indicates remote nodes + */ +-#define ATTRD_PROTOCOL_VERSION "2" ++#define ATTRD_PROTOCOL_VERSION "3" + + int last_cib_op_done = 0; + GHashTable *attributes = NULL; +@@ -150,6 +151,9 @@ add_attribute_value_xml(xmlNode *parent, attribute_t *a, attribute_value_t *v, + if (v->nodeid > 0) { + crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, v->nodeid); + } ++ if (v->is_remote != 0) { ++ crm_xml_add_int(xml, PCMK__XA_ATTR_IS_REMOTE, 1); ++ } + crm_xml_add(xml, PCMK__XA_ATTR_VALUE, v->current); + crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, a->timeout_ms / 1000); + crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, a->is_private); +-- +1.8.3.1 + diff --git a/SOURCES/008-digests.patch b/SOURCES/008-digests.patch deleted file mode 100644 index 06c415b..0000000 --- a/SOURCES/008-digests.patch +++ /dev/null @@ -1,1764 +0,0 @@ -From 591324bc6f15c12bf1547c7c20e8e99da219f1fc Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 22 Oct 2020 18:06:26 -0500 -Subject: [PATCH 1/5] Refactor: libpe_status: functionize creating operation - digests better - -Basically, this takes most of rsc_action_digest(), and puts it in a new, -exposed function pe__calculate_digests(), for future reuse. The exposed -function creates a new op_digest_cache_t object with calculated digests; -rsc_action_digests() takes that and puts it in a node's digest cache. - -This additionally functionizes most of pe__calculate_digests(), with separate -functions for creating each of the three digests that go into a digest object: -the digest of all parameters, the digest of non-private parameters, and the -digest of reload parameters. - -There are no changes in how the code works. ---- - include/crm/pengine/internal.h | 5 + - lib/pengine/utils.c | 304 +++++++++++++++++++++++++++-------------- - 2 files changed, 208 insertions(+), 101 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 396d707..7f22512 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -528,6 +528,11 @@ typedef struct op_digest_cache_s { - char *digest_restart_calc; - } op_digest_cache_t; - -+op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task, -+ const char *key, pe_node_t *node, -+ xmlNode *xml_op, bool calc_secure, -+ pe_working_set_t *data_set); -+ - op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, - pe_working_set_t * data_set); - -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index a78bd24..c441c71 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -2034,139 +2034,241 @@ append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNo - } - g_hash_table_destroy(hash); - } -+ -+static void -+append_all_versioned_params(pe_resource_t *rsc, pe_node_t *node, -+ pe_action_t *action, xmlNode *xml_op, -+ pe_working_set_t *data_set) -+{ -+ const char *ra_version = NULL; -+ xmlNode *local_versioned_params = NULL; -+ pe_rsc_action_details_t *details = pe_rsc_action_details(action); -+ -+ local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); -+ pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); -+ if (xml_op != NULL) { -+ ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); -+ } -+ append_versioned_params(local_versioned_params, ra_version, -+ data->params_all); -+ append_versioned_params(rsc->versioned_parameters, ra_version, -+ data->params_all); -+ append_versioned_params(details->versioned_parameters, ra_version, -+ data->params_all); -+} - #endif - - /*! - * \internal -- * \brief Calculate action digests and store in node's digest cache -+ * \brief Add digest of all parameters to a digest cache entry - * -- * \param[in] rsc Resource that action was for -- * \param[in] task Name of action performed -- * \param[in] key Action's task key -- * \param[in] node Node action was performed on -- * \param[in] xml_op XML of operation in CIB status (if available) -- * \param[in] calc_secure Whether to calculate secure digest -- * \param[in] data_set Cluster working set -- * -- * \return Pointer to node's digest cache entry -+ * \param[out] data Digest cache entry to modify -+ * \param[in] rsc Resource that action was for -+ * \param[in] node Node action was performed on -+ * \param[in] task Name of action performed -+ * \param[in] key Action's task key -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] op_version CRM feature set to use for digest calculation -+ * \param[in] data_set Cluster working set - */ --static op_digest_cache_t * --rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, -- pe_node_t *node, xmlNode *xml_op, bool calc_secure, -- pe_working_set_t *data_set) -+static void -+calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, -+ pe_node_t *node, const char *task, const char *key, -+ xmlNode *xml_op, const char *op_version, -+ pe_working_set_t *data_set) - { -- op_digest_cache_t *data = NULL; -+ pe_action_t *action = NULL; -+ GHashTable *local_rsc_params = crm_str_table_new(); - -- data = g_hash_table_lookup(node->details->digest_cache, key); -- if (data == NULL) { -- GHashTable *local_rsc_params = crm_str_table_new(); -- pe_action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); --#if ENABLE_VERSIONED_ATTRS -- xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); -- const char *ra_version = NULL; --#endif -+ get_rsc_attributes(local_rsc_params, rsc, node, data_set); - -- const char *op_version = NULL; -- const char *restart_list = NULL; -- const char *secure_list = NULL; -+ data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); - -- data = calloc(1, sizeof(op_digest_cache_t)); -- CRM_ASSERT(data != NULL); -+ /* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers -+ * that themselves are Pacemaker Remote nodes -+ */ -+ if (pe__add_bundle_remote_name(rsc, data->params_all, -+ XML_RSC_ATTR_REMOTE_RA_ADDR)) { -+ crm_trace("Set address for bundle connection %s (on %s)", -+ rsc->id, node->details->uname); -+ } -+ -+ action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); -+ g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); -+ g_hash_table_foreach(action->extra, hash2field, data->params_all); -+ g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); -+ g_hash_table_foreach(action->meta, hash2metafield, data->params_all); - -- get_rsc_attributes(local_rsc_params, rsc, node, data_set); - #if ENABLE_VERSIONED_ATTRS -- pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); -+ append_all_versioned_params(rsc, node, action, xml_op, data_set); - #endif - -- data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); -+ pcmk__filter_op_for_digest(data->params_all); - -- // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside -- if (pe__add_bundle_remote_name(rsc, data->params_all, -- XML_RSC_ATTR_REMOTE_RA_ADDR)) { -- crm_trace("Set address for bundle connection %s (on %s)", -- rsc->id, node->details->uname); -- } -+ g_hash_table_destroy(local_rsc_params); -+ pe_free_action(action); - -- g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); -- g_hash_table_foreach(action->extra, hash2field, data->params_all); -- g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); -- g_hash_table_foreach(action->meta, hash2metafield, data->params_all); -+ data->digest_all_calc = calculate_operation_digest(data->params_all, -+ op_version); -+} - -- if(xml_op) { -- secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); -- restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); -+/*! -+ * \internal -+ * \brief Add secure digest to a digest cache entry -+ * -+ * \param[out] data Digest cache entry to modify -+ * \param[in] rsc Resource that action was for -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] op_version CRM feature set to use for digest calculation -+ */ -+static void -+calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, -+ xmlNode *xml_op, const char *op_version) -+{ -+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -+ const char *secure_list = NULL; - -- op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); --#if ENABLE_VERSIONED_ATTRS -- ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); --#endif -+ if (xml_op == NULL) { -+ secure_list = " passwd password user "; -+ } else { -+ secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); -+ } - -- } else { -- secure_list = " passwd password user "; -- op_version = CRM_FEATURE_SET; -+ /* The controller doesn't create a digest of *all* non-sensitive -+ * parameters, only those listed in resource agent meta-data. The -+ * equivalent here is rsc->parameters. -+ */ -+ data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS); -+ g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); -+ if (secure_list != NULL) { -+ filter_parameters(data->params_secure, secure_list, FALSE); -+ } -+ if (pcmk_is_set(pcmk_get_ra_caps(class), -+ pcmk_ra_cap_fence_params)) { -+ /* For stonith resources, Pacemaker adds special parameters, -+ * but these are not listed in fence agent meta-data, so the -+ * controller will not hash them. That means we have to filter -+ * them out before calculating our hash for comparison. -+ */ -+ for (xmlAttrPtr iter = data->params_secure->properties; -+ iter != NULL; ) { -+ const char *prop_name = (const char *) iter->name; -+ -+ iter = iter->next; // Grab next now in case we remove current -+ if (pcmk_stonith_param(prop_name)) { -+ xml_remove_prop(data->params_secure, prop_name); -+ } - } -+ } -+ data->digest_secure_calc = calculate_operation_digest(data->params_secure, -+ op_version); -+} - --#if ENABLE_VERSIONED_ATTRS -- append_versioned_params(local_versioned_params, ra_version, data->params_all); -- append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all); -+/*! -+ * \internal -+ * \brief Add restart digest to a digest cache entry -+ * -+ * \param[out] data Digest cache entry to modify -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] op_version CRM feature set to use for digest calculation -+ */ -+static void -+calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, -+ const char *op_version) -+{ -+ const char *value = NULL; - -- { -- pe_rsc_action_details_t *details = pe_rsc_action_details(action); -- append_versioned_params(details->versioned_parameters, ra_version, data->params_all); -- } --#endif -+ // We must have XML of resource operation history -+ if (xml_op == NULL) { -+ return; -+ } - -- pcmk__filter_op_for_digest(data->params_all); -+ // And the history must have a restart digest to compare against -+ if (crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) == NULL) { -+ return; -+ } - -- g_hash_table_destroy(local_rsc_params); -- pe_free_action(action); -+ // Start with a copy of all parameters -+ data->params_restart = copy_xml(data->params_all); - -- data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); -+ // Then filter out reloadable parameters, if any -+ value = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); -+ if (value != NULL) { -+ filter_parameters(data->params_restart, value, TRUE); -+ } - -- if (calc_secure) { -- const char *class = crm_element_value(rsc->xml, -- XML_AGENT_ATTR_CLASS); -+ value = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); -+ data->digest_restart_calc = calculate_operation_digest(data->params_restart, -+ value); -+} - -- /* The controller doesn't create a digest of *all* non-sensitive -- * parameters, only those listed in resource agent meta-data. The -- * equivalent here is rsc->parameters. -- */ -- data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS); -- g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); -- if(secure_list) { -- filter_parameters(data->params_secure, secure_list, FALSE); -- } -- if (pcmk_is_set(pcmk_get_ra_caps(class), -- pcmk_ra_cap_fence_params)) { -- /* For stonith resources, Pacemaker adds special parameters, -- * but these are not listed in fence agent meta-data, so the -- * controller will not hash them. That means we have to filter -- * them out before calculating our hash for comparison. -- */ -- for (xmlAttrPtr iter = data->params_secure->properties; -- iter != NULL; ) { -- const char *prop_name = (const char *) iter->name; -+/*! -+ * \internal -+ * \brief Create a new digest cache entry with calculated digests -+ * -+ * \param[in] rsc Resource that action was for -+ * \param[in] task Name of action performed -+ * \param[in] key Action's task key -+ * \param[in] node Node action was performed on -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] calc_secure Whether to calculate secure digest -+ * \param[in] data_set Cluster working set -+ * -+ * \return Pointer to new digest cache entry (or NULL on memory error) -+ * \note It is the caller's responsibility to free the result using -+ * destroy_digest_cache(). -+ */ -+op_digest_cache_t * -+pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, -+ pe_node_t *node, xmlNode *xml_op, bool calc_secure, -+ pe_working_set_t *data_set) -+{ -+ op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t)); -+ const char *op_version = CRM_FEATURE_SET; - -- iter = iter->next; // Grab next now in case we remove current -- if (pcmk_stonith_param(prop_name)) { -- xml_remove_prop(data->params_secure, prop_name); -- } -- } -- } -- data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); -- } -+ if (data == NULL) { -+ return NULL; -+ } -+ if (xml_op != NULL) { -+ op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); -+ } -+ calculate_main_digest(data, rsc, node, task, key, xml_op, op_version, -+ data_set); -+ if (calc_secure) { -+ calculate_secure_digest(data, rsc, xml_op, op_version); -+ } -+ calculate_restart_digest(data, xml_op, op_version); -+ return data; -+} - -- if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { -- data->params_restart = copy_xml(data->params_all); -- if (restart_list) { -- filter_parameters(data->params_restart, restart_list, TRUE); -- } -- data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version); -- } -+/*! -+ * \internal -+ * \brief Calculate action digests and store in node's digest cache -+ * -+ * \param[in] rsc Resource that action was for -+ * \param[in] task Name of action performed -+ * \param[in] key Action's task key -+ * \param[in] node Node action was performed on -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] calc_secure Whether to calculate secure digest -+ * \param[in] data_set Cluster working set -+ * -+ * \return Pointer to node's digest cache entry -+ */ -+static op_digest_cache_t * -+rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, -+ pe_node_t *node, xmlNode *xml_op, bool calc_secure, -+ pe_working_set_t *data_set) -+{ -+ op_digest_cache_t *data = NULL; - -+ data = g_hash_table_lookup(node->details->digest_cache, key); -+ if (data == NULL) { -+ data = pe__calculate_digests(rsc, task, key, node, xml_op, calc_secure, -+ data_set); -+ CRM_ASSERT(data != NULL); - g_hash_table_insert(node->details->digest_cache, strdup(key), data); - } -- - return data; - } - --- -1.8.3.1 - - -From 5827da6cdd6a49590765c871ce81186af9eead19 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 3 Nov 2020 16:44:09 -0600 -Subject: [PATCH 2/5] Refactor: scheduler: expose digest free function - -... for future reuse. Also rename per current guidelines. ---- - include/crm/pengine/internal.h | 2 ++ - lib/pengine/unpack.c | 31 +++++++++++++++++++++---------- - lib/pengine/utils.c | 2 +- - 3 files changed, 24 insertions(+), 11 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 7f22512..7a38234 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -533,6 +533,8 @@ op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task, - xmlNode *xml_op, bool calc_secure, - pe_working_set_t *data_set); - -+void pe__free_digests(gpointer ptr); -+ - op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, - pe_working_set_t * data_set); - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 44dba47..4655c7e 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -376,20 +376,31 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) - return TRUE; - } - --static void --destroy_digest_cache(gpointer ptr) -+/*! -+ * \internal -+ * \brief Free an operation digest cache entry -+ * -+ * \param[in] ptr Pointer to cache entry to free -+ * -+ * \note The argument is a gpointer so this can be used as a hash table -+ * free function. -+ */ -+void -+pe__free_digests(gpointer ptr) - { - op_digest_cache_t *data = ptr; - -- free_xml(data->params_all); -- free_xml(data->params_secure); -- free_xml(data->params_restart); -+ if (data != NULL) { -+ free_xml(data->params_all); -+ free_xml(data->params_secure); -+ free_xml(data->params_restart); - -- free(data->digest_all_calc); -- free(data->digest_restart_calc); -- free(data->digest_secure_calc); -+ free(data->digest_all_calc); -+ free(data->digest_restart_calc); -+ free(data->digest_secure_calc); - -- free(data); -+ free(data); -+ } - } - - pe_node_t * -@@ -446,7 +457,7 @@ pe_create_node(const char *id, const char *uname, const char *type, - - new_node->details->digest_cache = g_hash_table_new_full(crm_str_hash, - g_str_equal, free, -- destroy_digest_cache); -+ pe__free_digests); - - data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node, sort_node_uname); - return new_node; -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index c441c71..bded23c 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -2216,7 +2216,7 @@ calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, - * - * \return Pointer to new digest cache entry (or NULL on memory error) - * \note It is the caller's responsibility to free the result using -- * destroy_digest_cache(). -+ * pe__free_digests(). - */ - op_digest_cache_t * - pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, --- -1.8.3.1 - - -From ab184d36cc34776aab992d606c2f4e9ceb49a2ba Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 3 Nov 2020 16:52:50 -0600 -Subject: [PATCH 3/5] Refactor: scheduler: expose fencing digest comparison - function within library - -... for future separation into different file. Also rename per current -guidelines. ---- - lib/pengine/pe_status_private.h | 6 ++++++ - lib/pengine/utils.c | 10 ++++++---- - 2 files changed, 12 insertions(+), 4 deletions(-) - -diff --git a/lib/pengine/pe_status_private.h b/lib/pengine/pe_status_private.h -index 0f3814f..360f280 100644 ---- a/lib/pengine/pe_status_private.h -+++ b/lib/pengine/pe_status_private.h -@@ -65,4 +65,10 @@ gboolean unpack_tags(xmlNode *xml_tags, pe_working_set_t *data_set); - G_GNUC_INTERNAL - gboolean unpack_status(xmlNode *status, pe_working_set_t *data_set); - -+G_GNUC_INTERNAL -+op_digest_cache_t *pe__compare_fencing_digest(pe_resource_t *rsc, -+ const char *agent, -+ pe_node_t *node, -+ pe_working_set_t *data_set); -+ - #endif // PE_STATUS_PRIVATE__H -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index bded23c..30cec9e 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -20,6 +20,8 @@ - #include - #include - -+#include "pe_status_private.h" -+ - extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); - void print_str_str(gpointer key, gpointer value, gpointer user_data); - gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); -@@ -2401,9 +2403,9 @@ unfencing_digest_matches(const char *rsc_id, const char *agent, - * - * \return Node's digest cache entry - */ --static op_digest_cache_t * --fencing_action_digest_cmp(pe_resource_t *rsc, const char *agent, -- pe_node_t *node, pe_working_set_t *data_set) -+op_digest_cache_t * -+pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent, -+ pe_node_t *node, pe_working_set_t *data_set) - { - const char *node_summary = NULL; - -@@ -2613,7 +2615,7 @@ pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, - XML_ATTR_TYPE); - op_digest_cache_t *data = NULL; - -- data = fencing_action_digest_cmp(match, agent, node, data_set); -+ data = pe__compare_fencing_digest(match, agent, node, data_set); - if(data->rc == RSC_DIGEST_ALL) { - optional = FALSE; - crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); --- -1.8.3.1 - - -From 55a5299d815992df9122d4b1e67e3c2f5a969c43 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 23 Oct 2020 11:30:16 -0500 -Subject: [PATCH 4/5] Refactor: libpe_status: separate digest-related code into - own file - -There are no changes in the code. - -Before: - 4035 lib/pengine/unpack.c - 2966 lib/pengine/utils.c - 7001 total - -After: - 509 lib/pengine/pe_digest.c - 4008 lib/pengine/unpack.c - 2502 lib/pengine/utils.c - 7019 total ---- - lib/pengine/Makefile.am | 1 + - lib/pengine/pe_digest.c | 509 ++++++++++++++++++++++++++++++++++++++++++++++++ - lib/pengine/unpack.c | 27 --- - lib/pengine/utils.c | 464 ------------------------------------------- - 4 files changed, 510 insertions(+), 491 deletions(-) - create mode 100644 lib/pengine/pe_digest.c - -diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am -index 6289bfc..258f594 100644 ---- a/lib/pengine/Makefile.am -+++ b/lib/pengine/Makefile.am -@@ -39,6 +39,7 @@ libpe_status_la_SOURCES += complex.c - libpe_status_la_SOURCES += failcounts.c - libpe_status_la_SOURCES += group.c - libpe_status_la_SOURCES += native.c -+libpe_status_la_SOURCES += pe_digest.c - libpe_status_la_SOURCES += remote.c - libpe_status_la_SOURCES += rules.c - libpe_status_la_SOURCES += status.c -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -new file mode 100644 -index 0000000..b54210c ---- /dev/null -+++ b/lib/pengine/pe_digest.c -@@ -0,0 +1,509 @@ -+/* -+ * Copyright 2004-2020 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. -+ * -+ * This source code is licensed under the GNU Lesser General Public License -+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -+ */ -+ -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include "pe_status_private.h" -+ -+/*! -+ * \internal -+ * \brief Free an operation digest cache entry -+ * -+ * \param[in] ptr Pointer to cache entry to free -+ * -+ * \note The argument is a gpointer so this can be used as a hash table -+ * free function. -+ */ -+void -+pe__free_digests(gpointer ptr) -+{ -+ op_digest_cache_t *data = ptr; -+ -+ if (data != NULL) { -+ free_xml(data->params_all); -+ free_xml(data->params_secure); -+ free_xml(data->params_restart); -+ -+ free(data->digest_all_calc); -+ free(data->digest_restart_calc); -+ free(data->digest_secure_calc); -+ -+ free(data); -+ } -+} -+ -+static void -+filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) -+{ -+ if (param_set && param_string) { -+ xmlAttrPtr xIter = param_set->properties; -+ -+ while (xIter) { -+ const char *prop_name = (const char *)xIter->name; -+ char *name = crm_strdup_printf(" %s ", prop_name); -+ char *match = strstr(param_string, name); -+ -+ free(name); -+ -+ // Do now, because current entry might get removed below -+ xIter = xIter->next; -+ -+ if (need_present && match == NULL) { -+ crm_trace("%s not found in %s", prop_name, param_string); -+ xml_remove_prop(param_set, prop_name); -+ -+ } else if (need_present == FALSE && match) { -+ crm_trace("%s found in %s", prop_name, param_string); -+ xml_remove_prop(param_set, prop_name); -+ } -+ } -+ } -+} -+ -+#if ENABLE_VERSIONED_ATTRS -+static void -+append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params) -+{ -+ GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version); -+ char *key = NULL; -+ char *value = NULL; -+ GHashTableIter iter; -+ -+ g_hash_table_iter_init(&iter, hash); -+ while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { -+ crm_xml_add(params, key, value); -+ } -+ g_hash_table_destroy(hash); -+} -+ -+static void -+append_all_versioned_params(pe_resource_t *rsc, pe_node_t *node, -+ pe_action_t *action, xmlNode *xml_op, -+ pe_working_set_t *data_set) -+{ -+ const char *ra_version = NULL; -+ xmlNode *local_versioned_params = NULL; -+ pe_rsc_action_details_t *details = pe_rsc_action_details(action); -+ -+ local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); -+ pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); -+ if (xml_op != NULL) { -+ ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); -+ } -+ append_versioned_params(local_versioned_params, ra_version, -+ data->params_all); -+ append_versioned_params(rsc->versioned_parameters, ra_version, -+ data->params_all); -+ append_versioned_params(details->versioned_parameters, ra_version, -+ data->params_all); -+} -+#endif -+ -+/*! -+ * \internal -+ * \brief Add digest of all parameters to a digest cache entry -+ * -+ * \param[out] data Digest cache entry to modify -+ * \param[in] rsc Resource that action was for -+ * \param[in] node Node action was performed on -+ * \param[in] task Name of action performed -+ * \param[in] key Action's task key -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] op_version CRM feature set to use for digest calculation -+ * \param[in] data_set Cluster working set -+ */ -+static void -+calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, -+ pe_node_t *node, const char *task, const char *key, -+ xmlNode *xml_op, const char *op_version, -+ pe_working_set_t *data_set) -+{ -+ pe_action_t *action = NULL; -+ GHashTable *local_rsc_params = crm_str_table_new(); -+ -+ get_rsc_attributes(local_rsc_params, rsc, node, data_set); -+ -+ data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); -+ -+ /* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers -+ * that themselves are Pacemaker Remote nodes -+ */ -+ if (pe__add_bundle_remote_name(rsc, data->params_all, -+ XML_RSC_ATTR_REMOTE_RA_ADDR)) { -+ crm_trace("Set address for bundle connection %s (on %s)", -+ rsc->id, node->details->uname); -+ } -+ -+ action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); -+ g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); -+ g_hash_table_foreach(action->extra, hash2field, data->params_all); -+ g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); -+ g_hash_table_foreach(action->meta, hash2metafield, data->params_all); -+ -+#if ENABLE_VERSIONED_ATTRS -+ append_all_versioned_params(rsc, node, action, xml_op, data_set); -+#endif -+ -+ pcmk__filter_op_for_digest(data->params_all); -+ -+ g_hash_table_destroy(local_rsc_params); -+ pe_free_action(action); -+ -+ data->digest_all_calc = calculate_operation_digest(data->params_all, -+ op_version); -+} -+ -+/*! -+ * \internal -+ * \brief Add secure digest to a digest cache entry -+ * -+ * \param[out] data Digest cache entry to modify -+ * \param[in] rsc Resource that action was for -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] op_version CRM feature set to use for digest calculation -+ */ -+static void -+calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, -+ xmlNode *xml_op, const char *op_version) -+{ -+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -+ const char *secure_list = NULL; -+ -+ if (xml_op == NULL) { -+ secure_list = " passwd password user "; -+ } else { -+ secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); -+ } -+ -+ /* The controller doesn't create a digest of *all* non-sensitive -+ * parameters, only those listed in resource agent meta-data. The -+ * equivalent here is rsc->parameters. -+ */ -+ data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS); -+ g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); -+ if (secure_list != NULL) { -+ filter_parameters(data->params_secure, secure_list, FALSE); -+ } -+ if (pcmk_is_set(pcmk_get_ra_caps(class), -+ pcmk_ra_cap_fence_params)) { -+ /* For stonith resources, Pacemaker adds special parameters, -+ * but these are not listed in fence agent meta-data, so the -+ * controller will not hash them. That means we have to filter -+ * them out before calculating our hash for comparison. -+ */ -+ for (xmlAttrPtr iter = data->params_secure->properties; -+ iter != NULL; ) { -+ const char *prop_name = (const char *) iter->name; -+ -+ iter = iter->next; // Grab next now in case we remove current -+ if (pcmk_stonith_param(prop_name)) { -+ xml_remove_prop(data->params_secure, prop_name); -+ } -+ } -+ } -+ data->digest_secure_calc = calculate_operation_digest(data->params_secure, -+ op_version); -+} -+ -+/*! -+ * \internal -+ * \brief Add restart digest to a digest cache entry -+ * -+ * \param[out] data Digest cache entry to modify -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] op_version CRM feature set to use for digest calculation -+ */ -+static void -+calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, -+ const char *op_version) -+{ -+ const char *value = NULL; -+ -+ // We must have XML of resource operation history -+ if (xml_op == NULL) { -+ return; -+ } -+ -+ // And the history must have a restart digest to compare against -+ if (crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) == NULL) { -+ return; -+ } -+ -+ // Start with a copy of all parameters -+ data->params_restart = copy_xml(data->params_all); -+ -+ // Then filter out reloadable parameters, if any -+ value = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); -+ if (value != NULL) { -+ filter_parameters(data->params_restart, value, TRUE); -+ } -+ -+ value = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); -+ data->digest_restart_calc = calculate_operation_digest(data->params_restart, -+ value); -+} -+ -+/*! -+ * \internal -+ * \brief Create a new digest cache entry with calculated digests -+ * -+ * \param[in] rsc Resource that action was for -+ * \param[in] task Name of action performed -+ * \param[in] key Action's task key -+ * \param[in] node Node action was performed on -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] calc_secure Whether to calculate secure digest -+ * \param[in] data_set Cluster working set -+ * -+ * \return Pointer to new digest cache entry (or NULL on memory error) -+ * \note It is the caller's responsibility to free the result using -+ * pe__free_digests(). -+ */ -+op_digest_cache_t * -+pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, -+ pe_node_t *node, xmlNode *xml_op, bool calc_secure, -+ pe_working_set_t *data_set) -+{ -+ op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t)); -+ const char *op_version = CRM_FEATURE_SET; -+ -+ if (data == NULL) { -+ return NULL; -+ } -+ if (xml_op != NULL) { -+ op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); -+ } -+ calculate_main_digest(data, rsc, node, task, key, xml_op, op_version, -+ data_set); -+ if (calc_secure) { -+ calculate_secure_digest(data, rsc, xml_op, op_version); -+ } -+ calculate_restart_digest(data, xml_op, op_version); -+ return data; -+} -+ -+/*! -+ * \internal -+ * \brief Calculate action digests and store in node's digest cache -+ * -+ * \param[in] rsc Resource that action was for -+ * \param[in] task Name of action performed -+ * \param[in] key Action's task key -+ * \param[in] node Node action was performed on -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] calc_secure Whether to calculate secure digest -+ * \param[in] data_set Cluster working set -+ * -+ * \return Pointer to node's digest cache entry -+ */ -+static op_digest_cache_t * -+rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, -+ pe_node_t *node, xmlNode *xml_op, bool calc_secure, -+ pe_working_set_t *data_set) -+{ -+ op_digest_cache_t *data = NULL; -+ -+ data = g_hash_table_lookup(node->details->digest_cache, key); -+ if (data == NULL) { -+ data = pe__calculate_digests(rsc, task, key, node, xml_op, calc_secure, -+ data_set); -+ CRM_ASSERT(data != NULL); -+ g_hash_table_insert(node->details->digest_cache, strdup(key), data); -+ } -+ return data; -+} -+ -+op_digest_cache_t * -+rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, -+ pe_working_set_t * data_set) -+{ -+ op_digest_cache_t *data = NULL; -+ -+ char *key = NULL; -+ guint interval_ms = 0; -+ -+ const char *op_version; -+ const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); -+ const char *digest_all; -+ const char *digest_restart; -+ -+ CRM_ASSERT(node != NULL); -+ -+ op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); -+ digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); -+ digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); -+ -+ crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); -+ key = pcmk__op_key(rsc->id, task, interval_ms); -+ data = rsc_action_digest(rsc, task, key, node, xml_op, -+ pcmk_is_set(data_set->flags, pe_flag_sanitized), -+ data_set); -+ -+ data->rc = RSC_DIGEST_MATCH; -+ if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { -+ pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s", -+ key, node->details->uname, -+ crm_str(digest_restart), data->digest_restart_calc, -+ op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); -+ data->rc = RSC_DIGEST_RESTART; -+ -+ } else if (digest_all == NULL) { -+ /* it is unknown what the previous op digest was */ -+ data->rc = RSC_DIGEST_UNKNOWN; -+ -+ } else if (strcmp(digest_all, data->digest_all_calc) != 0) { -+ pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s", -+ key, node->details->uname, -+ crm_str(digest_all), data->digest_all_calc, -+ (interval_ms > 0)? "reschedule" : "reload", -+ op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); -+ data->rc = RSC_DIGEST_ALL; -+ } -+ -+ free(key); -+ return data; -+} -+ -+/*! -+ * \internal -+ * \brief Create an unfencing summary for use in special node attribute -+ * -+ * Create a string combining a fence device's resource ID, agent type, and -+ * parameter digest (whether for all parameters or just non-private parameters). -+ * This can be stored in a special node attribute, allowing us to detect changes -+ * in either the agent type or parameters, to know whether unfencing must be -+ * redone or can be safely skipped when the device's history is cleaned. -+ * -+ * \param[in] rsc_id Fence device resource ID -+ * \param[in] agent_type Fence device agent -+ * \param[in] param_digest Fence device parameter digest -+ * -+ * \return Newly allocated string with unfencing digest -+ * \note The caller is responsible for freeing the result. -+ */ -+static inline char * -+create_unfencing_summary(const char *rsc_id, const char *agent_type, -+ const char *param_digest) -+{ -+ return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest); -+} -+ -+/*! -+ * \internal -+ * \brief Check whether a node can skip unfencing -+ * -+ * Check whether a fence device's current definition matches a node's -+ * stored summary of when it was last unfenced by the device. -+ * -+ * \param[in] rsc_id Fence device's resource ID -+ * \param[in] agent Fence device's agent type -+ * \param[in] digest_calc Fence device's current parameter digest -+ * \param[in] node_summary Value of node's special unfencing node attribute -+ * (a comma-separated list of unfencing summaries for -+ * all devices that have unfenced this node) -+ * -+ * \return TRUE if digest matches, FALSE otherwise -+ */ -+static bool -+unfencing_digest_matches(const char *rsc_id, const char *agent, -+ const char *digest_calc, const char *node_summary) -+{ -+ bool matches = FALSE; -+ -+ if (rsc_id && agent && digest_calc && node_summary) { -+ char *search_secure = create_unfencing_summary(rsc_id, agent, -+ digest_calc); -+ -+ /* The digest was calculated including the device ID and agent, -+ * so there is no risk of collision using strstr(). -+ */ -+ matches = (strstr(node_summary, search_secure) != NULL); -+ crm_trace("Calculated unfencing digest '%s' %sfound in '%s'", -+ search_secure, matches? "" : "not ", node_summary); -+ free(search_secure); -+ } -+ return matches; -+} -+ -+/* Magic string to use as action name for digest cache entries used for -+ * unfencing checks. This is not a real action name (i.e. "on"), so -+ * check_action_definition() won't confuse these entries with real actions. -+ */ -+#define STONITH_DIGEST_TASK "stonith-on" -+ -+/*! -+ * \internal -+ * \brief Calculate fence device digests and digest comparison result -+ * -+ * \param[in] rsc Fence device resource -+ * \param[in] agent Fence device's agent type -+ * \param[in] node Node with digest cache to use -+ * \param[in] data_set Cluster working set -+ * -+ * \return Node's digest cache entry -+ */ -+op_digest_cache_t * -+pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent, -+ pe_node_t *node, pe_working_set_t *data_set) -+{ -+ const char *node_summary = NULL; -+ -+ // Calculate device's current parameter digests -+ char *key = pcmk__op_key(rsc->id, STONITH_DIGEST_TASK, 0); -+ op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, -+ node, NULL, TRUE, data_set); -+ -+ free(key); -+ -+ // Check whether node has special unfencing summary node attribute -+ node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL); -+ if (node_summary == NULL) { -+ data->rc = RSC_DIGEST_UNKNOWN; -+ return data; -+ } -+ -+ // Check whether full parameter digest matches -+ if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc, -+ node_summary)) { -+ data->rc = RSC_DIGEST_MATCH; -+ return data; -+ } -+ -+ // Check whether secure parameter digest matches -+ node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE); -+ if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc, -+ node_summary)) { -+ data->rc = RSC_DIGEST_MATCH; -+ if (pcmk_is_set(data_set->flags, pe_flag_stdout)) { -+ printf("Only 'private' parameters to %s for unfencing %s changed\n", -+ rsc->id, node->details->uname); -+ } -+ return data; -+ } -+ -+ // Parameters don't match -+ data->rc = RSC_DIGEST_ALL; -+ if (pcmk_is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout)) -+ && data->digest_secure_calc) { -+ char *digest = create_unfencing_summary(rsc->id, agent, -+ data->digest_secure_calc); -+ -+ printf("Parameters to %s for unfencing %s changed, try '%s'\n", -+ rsc->id, node->details->uname, digest); -+ free(digest); -+ } -+ return data; -+} -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 4655c7e..a15bb92 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -376,33 +376,6 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) - return TRUE; - } - --/*! -- * \internal -- * \brief Free an operation digest cache entry -- * -- * \param[in] ptr Pointer to cache entry to free -- * -- * \note The argument is a gpointer so this can be used as a hash table -- * free function. -- */ --void --pe__free_digests(gpointer ptr) --{ -- op_digest_cache_t *data = ptr; -- -- if (data != NULL) { -- free_xml(data->params_all); -- free_xml(data->params_secure); -- free_xml(data->params_restart); -- -- free(data->digest_all_calc); -- free(data->digest_restart_calc); -- free(data->digest_secure_calc); -- -- free(data); -- } --} -- - pe_node_t * - pe_create_node(const char *id, const char *uname, const char *type, - const char *score, pe_working_set_t * data_set) -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index 30cec9e..04110be 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -19,7 +19,6 @@ - - #include - #include -- - #include "pe_status_private.h" - - extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); -@@ -1993,469 +1992,6 @@ ticket_new(const char *ticket_id, pe_working_set_t * data_set) - return ticket; - } - --static void --filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) --{ -- if (param_set && param_string) { -- xmlAttrPtr xIter = param_set->properties; -- -- while (xIter) { -- const char *prop_name = (const char *)xIter->name; -- char *name = crm_strdup_printf(" %s ", prop_name); -- char *match = strstr(param_string, name); -- -- free(name); -- -- // Do now, because current entry might get removed below -- xIter = xIter->next; -- -- if (need_present && match == NULL) { -- crm_trace("%s not found in %s", prop_name, param_string); -- xml_remove_prop(param_set, prop_name); -- -- } else if (need_present == FALSE && match) { -- crm_trace("%s found in %s", prop_name, param_string); -- xml_remove_prop(param_set, prop_name); -- } -- } -- } --} -- --#if ENABLE_VERSIONED_ATTRS --static void --append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params) --{ -- GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version); -- char *key = NULL; -- char *value = NULL; -- GHashTableIter iter; -- -- g_hash_table_iter_init(&iter, hash); -- while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { -- crm_xml_add(params, key, value); -- } -- g_hash_table_destroy(hash); --} -- --static void --append_all_versioned_params(pe_resource_t *rsc, pe_node_t *node, -- pe_action_t *action, xmlNode *xml_op, -- pe_working_set_t *data_set) --{ -- const char *ra_version = NULL; -- xmlNode *local_versioned_params = NULL; -- pe_rsc_action_details_t *details = pe_rsc_action_details(action); -- -- local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); -- pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); -- if (xml_op != NULL) { -- ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); -- } -- append_versioned_params(local_versioned_params, ra_version, -- data->params_all); -- append_versioned_params(rsc->versioned_parameters, ra_version, -- data->params_all); -- append_versioned_params(details->versioned_parameters, ra_version, -- data->params_all); --} --#endif -- --/*! -- * \internal -- * \brief Add digest of all parameters to a digest cache entry -- * -- * \param[out] data Digest cache entry to modify -- * \param[in] rsc Resource that action was for -- * \param[in] node Node action was performed on -- * \param[in] task Name of action performed -- * \param[in] key Action's task key -- * \param[in] xml_op XML of operation in CIB status (if available) -- * \param[in] op_version CRM feature set to use for digest calculation -- * \param[in] data_set Cluster working set -- */ --static void --calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, -- pe_node_t *node, const char *task, const char *key, -- xmlNode *xml_op, const char *op_version, -- pe_working_set_t *data_set) --{ -- pe_action_t *action = NULL; -- GHashTable *local_rsc_params = crm_str_table_new(); -- -- get_rsc_attributes(local_rsc_params, rsc, node, data_set); -- -- data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); -- -- /* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers -- * that themselves are Pacemaker Remote nodes -- */ -- if (pe__add_bundle_remote_name(rsc, data->params_all, -- XML_RSC_ATTR_REMOTE_RA_ADDR)) { -- crm_trace("Set address for bundle connection %s (on %s)", -- rsc->id, node->details->uname); -- } -- -- action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); -- g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); -- g_hash_table_foreach(action->extra, hash2field, data->params_all); -- g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); -- g_hash_table_foreach(action->meta, hash2metafield, data->params_all); -- --#if ENABLE_VERSIONED_ATTRS -- append_all_versioned_params(rsc, node, action, xml_op, data_set); --#endif -- -- pcmk__filter_op_for_digest(data->params_all); -- -- g_hash_table_destroy(local_rsc_params); -- pe_free_action(action); -- -- data->digest_all_calc = calculate_operation_digest(data->params_all, -- op_version); --} -- --/*! -- * \internal -- * \brief Add secure digest to a digest cache entry -- * -- * \param[out] data Digest cache entry to modify -- * \param[in] rsc Resource that action was for -- * \param[in] xml_op XML of operation in CIB status (if available) -- * \param[in] op_version CRM feature set to use for digest calculation -- */ --static void --calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, -- xmlNode *xml_op, const char *op_version) --{ -- const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); -- const char *secure_list = NULL; -- -- if (xml_op == NULL) { -- secure_list = " passwd password user "; -- } else { -- secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); -- } -- -- /* The controller doesn't create a digest of *all* non-sensitive -- * parameters, only those listed in resource agent meta-data. The -- * equivalent here is rsc->parameters. -- */ -- data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS); -- g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); -- if (secure_list != NULL) { -- filter_parameters(data->params_secure, secure_list, FALSE); -- } -- if (pcmk_is_set(pcmk_get_ra_caps(class), -- pcmk_ra_cap_fence_params)) { -- /* For stonith resources, Pacemaker adds special parameters, -- * but these are not listed in fence agent meta-data, so the -- * controller will not hash them. That means we have to filter -- * them out before calculating our hash for comparison. -- */ -- for (xmlAttrPtr iter = data->params_secure->properties; -- iter != NULL; ) { -- const char *prop_name = (const char *) iter->name; -- -- iter = iter->next; // Grab next now in case we remove current -- if (pcmk_stonith_param(prop_name)) { -- xml_remove_prop(data->params_secure, prop_name); -- } -- } -- } -- data->digest_secure_calc = calculate_operation_digest(data->params_secure, -- op_version); --} -- --/*! -- * \internal -- * \brief Add restart digest to a digest cache entry -- * -- * \param[out] data Digest cache entry to modify -- * \param[in] xml_op XML of operation in CIB status (if available) -- * \param[in] op_version CRM feature set to use for digest calculation -- */ --static void --calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, -- const char *op_version) --{ -- const char *value = NULL; -- -- // We must have XML of resource operation history -- if (xml_op == NULL) { -- return; -- } -- -- // And the history must have a restart digest to compare against -- if (crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) == NULL) { -- return; -- } -- -- // Start with a copy of all parameters -- data->params_restart = copy_xml(data->params_all); -- -- // Then filter out reloadable parameters, if any -- value = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); -- if (value != NULL) { -- filter_parameters(data->params_restart, value, TRUE); -- } -- -- value = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); -- data->digest_restart_calc = calculate_operation_digest(data->params_restart, -- value); --} -- --/*! -- * \internal -- * \brief Create a new digest cache entry with calculated digests -- * -- * \param[in] rsc Resource that action was for -- * \param[in] task Name of action performed -- * \param[in] key Action's task key -- * \param[in] node Node action was performed on -- * \param[in] xml_op XML of operation in CIB status (if available) -- * \param[in] calc_secure Whether to calculate secure digest -- * \param[in] data_set Cluster working set -- * -- * \return Pointer to new digest cache entry (or NULL on memory error) -- * \note It is the caller's responsibility to free the result using -- * pe__free_digests(). -- */ --op_digest_cache_t * --pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, -- pe_node_t *node, xmlNode *xml_op, bool calc_secure, -- pe_working_set_t *data_set) --{ -- op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t)); -- const char *op_version = CRM_FEATURE_SET; -- -- if (data == NULL) { -- return NULL; -- } -- if (xml_op != NULL) { -- op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); -- } -- calculate_main_digest(data, rsc, node, task, key, xml_op, op_version, -- data_set); -- if (calc_secure) { -- calculate_secure_digest(data, rsc, xml_op, op_version); -- } -- calculate_restart_digest(data, xml_op, op_version); -- return data; --} -- --/*! -- * \internal -- * \brief Calculate action digests and store in node's digest cache -- * -- * \param[in] rsc Resource that action was for -- * \param[in] task Name of action performed -- * \param[in] key Action's task key -- * \param[in] node Node action was performed on -- * \param[in] xml_op XML of operation in CIB status (if available) -- * \param[in] calc_secure Whether to calculate secure digest -- * \param[in] data_set Cluster working set -- * -- * \return Pointer to node's digest cache entry -- */ --static op_digest_cache_t * --rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, -- pe_node_t *node, xmlNode *xml_op, bool calc_secure, -- pe_working_set_t *data_set) --{ -- op_digest_cache_t *data = NULL; -- -- data = g_hash_table_lookup(node->details->digest_cache, key); -- if (data == NULL) { -- data = pe__calculate_digests(rsc, task, key, node, xml_op, calc_secure, -- data_set); -- CRM_ASSERT(data != NULL); -- g_hash_table_insert(node->details->digest_cache, strdup(key), data); -- } -- return data; --} -- --op_digest_cache_t * --rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, -- pe_working_set_t * data_set) --{ -- op_digest_cache_t *data = NULL; -- -- char *key = NULL; -- guint interval_ms = 0; -- -- const char *op_version; -- const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); -- const char *digest_all; -- const char *digest_restart; -- -- CRM_ASSERT(node != NULL); -- -- op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); -- digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); -- digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); -- -- crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); -- key = pcmk__op_key(rsc->id, task, interval_ms); -- data = rsc_action_digest(rsc, task, key, node, xml_op, -- pcmk_is_set(data_set->flags, pe_flag_sanitized), -- data_set); -- -- data->rc = RSC_DIGEST_MATCH; -- if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { -- pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s", -- key, node->details->uname, -- crm_str(digest_restart), data->digest_restart_calc, -- op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); -- data->rc = RSC_DIGEST_RESTART; -- -- } else if (digest_all == NULL) { -- /* it is unknown what the previous op digest was */ -- data->rc = RSC_DIGEST_UNKNOWN; -- -- } else if (strcmp(digest_all, data->digest_all_calc) != 0) { -- pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s", -- key, node->details->uname, -- crm_str(digest_all), data->digest_all_calc, -- (interval_ms > 0)? "reschedule" : "reload", -- op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); -- data->rc = RSC_DIGEST_ALL; -- } -- -- free(key); -- return data; --} -- --/*! -- * \internal -- * \brief Create an unfencing summary for use in special node attribute -- * -- * Create a string combining a fence device's resource ID, agent type, and -- * parameter digest (whether for all parameters or just non-private parameters). -- * This can be stored in a special node attribute, allowing us to detect changes -- * in either the agent type or parameters, to know whether unfencing must be -- * redone or can be safely skipped when the device's history is cleaned. -- * -- * \param[in] rsc_id Fence device resource ID -- * \param[in] agent_type Fence device agent -- * \param[in] param_digest Fence device parameter digest -- * -- * \return Newly allocated string with unfencing digest -- * \note The caller is responsible for freeing the result. -- */ --static inline char * --create_unfencing_summary(const char *rsc_id, const char *agent_type, -- const char *param_digest) --{ -- return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest); --} -- --/*! -- * \internal -- * \brief Check whether a node can skip unfencing -- * -- * Check whether a fence device's current definition matches a node's -- * stored summary of when it was last unfenced by the device. -- * -- * \param[in] rsc_id Fence device's resource ID -- * \param[in] agent Fence device's agent type -- * \param[in] digest_calc Fence device's current parameter digest -- * \param[in] node_summary Value of node's special unfencing node attribute -- * (a comma-separated list of unfencing summaries for -- * all devices that have unfenced this node) -- * -- * \return TRUE if digest matches, FALSE otherwise -- */ --static bool --unfencing_digest_matches(const char *rsc_id, const char *agent, -- const char *digest_calc, const char *node_summary) --{ -- bool matches = FALSE; -- -- if (rsc_id && agent && digest_calc && node_summary) { -- char *search_secure = create_unfencing_summary(rsc_id, agent, -- digest_calc); -- -- /* The digest was calculated including the device ID and agent, -- * so there is no risk of collision using strstr(). -- */ -- matches = (strstr(node_summary, search_secure) != NULL); -- crm_trace("Calculated unfencing digest '%s' %sfound in '%s'", -- search_secure, matches? "" : "not ", node_summary); -- free(search_secure); -- } -- return matches; --} -- --/* Magic string to use as action name for digest cache entries used for -- * unfencing checks. This is not a real action name (i.e. "on"), so -- * check_action_definition() won't confuse these entries with real actions. -- */ --#define STONITH_DIGEST_TASK "stonith-on" -- --/*! -- * \internal -- * \brief Calculate fence device digests and digest comparison result -- * -- * \param[in] rsc Fence device resource -- * \param[in] agent Fence device's agent type -- * \param[in] node Node with digest cache to use -- * \param[in] data_set Cluster working set -- * -- * \return Node's digest cache entry -- */ --op_digest_cache_t * --pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent, -- pe_node_t *node, pe_working_set_t *data_set) --{ -- const char *node_summary = NULL; -- -- // Calculate device's current parameter digests -- char *key = pcmk__op_key(rsc->id, STONITH_DIGEST_TASK, 0); -- op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, -- node, NULL, TRUE, data_set); -- -- free(key); -- -- // Check whether node has special unfencing summary node attribute -- node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL); -- if (node_summary == NULL) { -- data->rc = RSC_DIGEST_UNKNOWN; -- return data; -- } -- -- // Check whether full parameter digest matches -- if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc, -- node_summary)) { -- data->rc = RSC_DIGEST_MATCH; -- return data; -- } -- -- // Check whether secure parameter digest matches -- node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE); -- if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc, -- node_summary)) { -- data->rc = RSC_DIGEST_MATCH; -- if (pcmk_is_set(data_set->flags, pe_flag_stdout)) { -- printf("Only 'private' parameters to %s for unfencing %s changed\n", -- rsc->id, node->details->uname); -- } -- return data; -- } -- -- // Parameters don't match -- data->rc = RSC_DIGEST_ALL; -- if (pcmk_is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout)) -- && data->digest_secure_calc) { -- char *digest = create_unfencing_summary(rsc->id, agent, -- data->digest_secure_calc); -- -- printf("Parameters to %s for unfencing %s changed, try '%s'\n", -- rsc->id, node->details->uname, digest); -- free(digest); -- } -- return data; --} -- - const char *rsc_printable_id(pe_resource_t *rsc) - { - if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { --- -1.8.3.1 - - -From 310ac114613fc6c16f26d95805d934c082d67039 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 23 Oct 2020 12:51:10 -0500 -Subject: [PATCH 5/5] Refactor: libpe_status: add ability to override - configuration for digests - -This will be needed for a new command-line feature ---- - include/crm/pengine/internal.h | 3 ++- - lib/pengine/pe_digest.c | 29 +++++++++++++++++++++-------- - 2 files changed, 23 insertions(+), 9 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 7a38234..c4b28cc 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -530,7 +530,8 @@ typedef struct op_digest_cache_s { - - op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task, - const char *key, pe_node_t *node, -- xmlNode *xml_op, bool calc_secure, -+ xmlNode *xml_op, GHashTable *overrides, -+ bool calc_secure, - pe_working_set_t *data_set); - - void pe__free_digests(gpointer ptr); -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index b54210c..5bcd22b 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -123,13 +123,14 @@ append_all_versioned_params(pe_resource_t *rsc, pe_node_t *node, - * \param[in] key Action's task key - * \param[in] xml_op XML of operation in CIB status (if available) - * \param[in] op_version CRM feature set to use for digest calculation -+ * \param[in] overrides Key/value hash table to override resource parameters - * \param[in] data_set Cluster working set - */ - static void - calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - pe_node_t *node, const char *task, const char *key, - xmlNode *xml_op, const char *op_version, -- pe_working_set_t *data_set) -+ GHashTable *overrides, pe_working_set_t *data_set) - { - pe_action_t *action = NULL; - GHashTable *local_rsc_params = crm_str_table_new(); -@@ -148,6 +149,9 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - } - - action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); -+ if (overrides != NULL) { -+ g_hash_table_foreach(overrides, hash2field, data->params_all); -+ } - g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); - g_hash_table_foreach(action->extra, hash2field, data->params_all); - g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); -@@ -174,10 +178,12 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - * \param[in] rsc Resource that action was for - * \param[in] xml_op XML of operation in CIB status (if available) - * \param[in] op_version CRM feature set to use for digest calculation -+ * \param[in] overrides Key/value hash table to override resource parameters - */ - static void - calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, -- xmlNode *xml_op, const char *op_version) -+ xmlNode *xml_op, const char *op_version, -+ GHashTable *overrides) - { - const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); - const char *secure_list = NULL; -@@ -193,6 +199,9 @@ calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, - * equivalent here is rsc->parameters. - */ - data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS); -+ if (overrides != NULL) { -+ g_hash_table_foreach(overrides, hash2field, data->params_secure); -+ } - g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); - if (secure_list != NULL) { - filter_parameters(data->params_secure, secure_list, FALSE); -@@ -225,6 +234,9 @@ calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, - * \param[out] data Digest cache entry to modify - * \param[in] xml_op XML of operation in CIB status (if available) - * \param[in] op_version CRM feature set to use for digest calculation -+ * -+ * \note This function doesn't need to handle overrides because it starts with -+ * data->params_all, which already has overrides applied. - */ - static void - calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, -@@ -265,6 +277,7 @@ calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, - * \param[in] key Action's task key - * \param[in] node Node action was performed on - * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] overrides Key/value hash table to override resource parameters - * \param[in] calc_secure Whether to calculate secure digest - * \param[in] data_set Cluster working set - * -@@ -274,8 +287,8 @@ calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, - */ - op_digest_cache_t * - pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, -- pe_node_t *node, xmlNode *xml_op, bool calc_secure, -- pe_working_set_t *data_set) -+ pe_node_t *node, xmlNode *xml_op, GHashTable *overrides, -+ bool calc_secure, pe_working_set_t *data_set) - { - op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t)); - const char *op_version = CRM_FEATURE_SET; -@@ -287,9 +300,9 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, - op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); - } - calculate_main_digest(data, rsc, node, task, key, xml_op, op_version, -- data_set); -+ overrides, data_set); - if (calc_secure) { -- calculate_secure_digest(data, rsc, xml_op, op_version); -+ calculate_secure_digest(data, rsc, xml_op, op_version, overrides); - } - calculate_restart_digest(data, xml_op, op_version); - return data; -@@ -318,8 +331,8 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, - - data = g_hash_table_lookup(node->details->digest_cache, key); - if (data == NULL) { -- data = pe__calculate_digests(rsc, task, key, node, xml_op, calc_secure, -- data_set); -+ data = pe__calculate_digests(rsc, task, key, node, xml_op, NULL, -+ calc_secure, data_set); - CRM_ASSERT(data != NULL); - g_hash_table_insert(node->details->digest_cache, strdup(key), data); - } --- -1.8.3.1 - diff --git a/SOURCES/008-dynamic-list-fencing.patch b/SOURCES/008-dynamic-list-fencing.patch new file mode 100644 index 0000000..4a56117 --- /dev/null +++ b/SOURCES/008-dynamic-list-fencing.patch @@ -0,0 +1,140 @@ +From 2d15fb37525f88ec8d5acb689b698044c4bb69b1 Mon Sep 17 00:00:00 2001 +From: Hideo Yamauchi +Date: Thu, 17 Jun 2021 22:39:12 +0900 +Subject: [PATCH 1/2] Low: fenced: Low: fenced: Remove unnecessary release. + +--- + daemons/fenced/fenced_commands.c | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c +index fee55a7..35aec06 100644 +--- a/daemons/fenced/fenced_commands.c ++++ b/daemons/fenced/fenced_commands.c +@@ -1104,9 +1104,6 @@ dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data) + /* Fall back to status */ + g_hash_table_replace(dev->params, + strdup(PCMK_STONITH_HOST_CHECK), strdup("status")); +- +- g_list_free_full(dev->targets, free); +- dev->targets = NULL; + } else if (!rc) { + crm_info("Refreshing port list for %s", dev->id); + g_list_free_full(dev->targets, free); +-- +1.8.3.1 + + +From a29f88f6020aac5f1ac32072942eb5713d7be50d Mon Sep 17 00:00:00 2001 +From: Hideo Yamauchi +Date: Thu, 17 Jun 2021 22:40:40 +0900 +Subject: [PATCH 2/2] High: fenced: Wrong device may be selected when + "dynamic-list" is specified. + +--- + daemons/fenced/fenced_commands.c | 67 +++++++++++++++++++++++----------------- + 1 file changed, 38 insertions(+), 29 deletions(-) + +diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c +index 35aec06..da076fb 100644 +--- a/daemons/fenced/fenced_commands.c ++++ b/daemons/fenced/fenced_commands.c +@@ -904,6 +904,31 @@ xml2device_params(const char *name, xmlNode *dev) + return params; + } + ++static const char * ++target_list_type(stonith_device_t * dev) ++{ ++ const char *check_type = NULL; ++ ++ check_type = g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK); ++ ++ if (check_type == NULL) { ++ ++ if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) { ++ check_type = "static-list"; ++ } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) { ++ check_type = "static-list"; ++ } else if (pcmk_is_set(dev->flags, st_device_supports_list)) { ++ check_type = "dynamic-list"; ++ } else if (pcmk_is_set(dev->flags, st_device_supports_status)) { ++ check_type = "status"; ++ } else { ++ check_type = "none"; ++ } ++ } ++ ++ return check_type; ++} ++ + static stonith_device_t * + build_device_from_xml(xmlNode * msg) + { +@@ -931,6 +956,12 @@ build_device_from_xml(xmlNode * msg) + value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_MAP); + device->aliases = build_port_aliases(value, &(device->targets)); + ++ value = target_list_type(device); ++ if (!pcmk__str_eq(value, "static-list", pcmk__str_casei) && device->targets) { ++ /* Other than "static-list", dev-> targets is unnecessary. */ ++ g_list_free_full(device->targets, free); ++ device->targets = NULL; ++ } + device->agent_metadata = get_agent_metadata(device->agent); + if (device->agent_metadata) { + read_action_metadata(device); +@@ -971,31 +1002,6 @@ build_device_from_xml(xmlNode * msg) + return device; + } + +-static const char * +-target_list_type(stonith_device_t * dev) +-{ +- const char *check_type = NULL; +- +- check_type = g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK); +- +- if (check_type == NULL) { +- +- if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) { +- check_type = "static-list"; +- } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) { +- check_type = "static-list"; +- } else if (pcmk_is_set(dev->flags, st_device_supports_list)) { +- check_type = "dynamic-list"; +- } else if (pcmk_is_set(dev->flags, st_device_supports_status)) { +- check_type = "status"; +- } else { +- check_type = "none"; +- } +- } +- +- return check_type; +-} +- + static void + schedule_internal_command(const char *origin, + stonith_device_t * device, +@@ -1099,11 +1105,14 @@ dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data) + + /* If we successfully got the targets earlier, don't disable. */ + if (rc != 0 && !dev->targets) { +- crm_notice("Disabling port list queries for %s: %s " +- CRM_XS " rc=%d", dev->id, output, rc); +- /* Fall back to status */ +- g_hash_table_replace(dev->params, ++ if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK) == NULL) { ++ /* ++ If the operation fails if the user does not explicitly specify "dynamic-list", it will fall back to "status". ++ */ ++ crm_notice("Disabling port list queries for %s (%d): %s", dev->id, rc, output); ++ g_hash_table_replace(dev->params, + strdup(PCMK_STONITH_HOST_CHECK), strdup("status")); ++ } + } else if (!rc) { + crm_info("Refreshing port list for %s", dev->id); + g_list_free_full(dev->targets, free); +-- +1.8.3.1 + diff --git a/SOURCES/009-crm_resource-messages.patch b/SOURCES/009-crm_resource-messages.patch new file mode 100644 index 0000000..bdbcf03 --- /dev/null +++ b/SOURCES/009-crm_resource-messages.patch @@ -0,0 +1,229 @@ +From 5bcab230ad4c647ca78b18bd4a66e30a4bb4417f Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 16 Jun 2021 11:19:03 +0200 +Subject: [PATCH 1/2] Feature: crm_resource: report not supported for --force-* + w/systemd, upstart, nagios and bundled resources + +--- + tools/crm_resource.c | 21 ++++---------- + tools/crm_resource_runtime.c | 67 +++++++++++++++++++++++++++++--------------- + 2 files changed, 51 insertions(+), 37 deletions(-) + +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index 4abdd03..fa7902c 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -660,21 +660,12 @@ attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, G + + gboolean + class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { +- if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) { +- if (!args->quiet) { +- g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, +- "Standard %s does not support parameters\n", optarg); +- } +- return FALSE; +- +- } else { +- if (options.v_class != NULL) { +- free(options.v_class); +- } +- +- options.v_class = strdup(optarg); ++ if (options.v_class != NULL) { ++ free(options.v_class); + } + ++ options.v_class = strdup(optarg); ++ + options.cmdline_config = TRUE; + options.require_resource = FALSE; + return TRUE; +@@ -1422,7 +1413,7 @@ validate_cmdline_config(void) + } else if (options.rsc_cmd != cmd_execute_agent) { + g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, + "--class, --agent, and --provider can only be used with " +- "--validate"); ++ "--validate and --force-*"); + + // Not all of --class, --agent, and --provider need to be given. Not all + // classes support the concept of a provider. Check that what we were given +@@ -1841,7 +1832,7 @@ main(int argc, char **argv) + if (options.cmdline_config) { + exit_code = cli_resource_execute_from_params(out, NULL, + options.v_class, options.v_provider, options.v_agent, +- "validate-all", options.cmdline_params, ++ options.operation, options.cmdline_params, + options.override_params, options.timeout_ms, + args->verbosity, options.force, options.check_level); + } else { +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index fe42e60..59e6df5 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -1674,24 +1674,59 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) + return rc; + } + ++static const char * ++get_action(const char *rsc_action) { ++ const char *action = NULL; ++ ++ if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { ++ action = "validate-all"; ++ ++ } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { ++ action = "monitor"; ++ ++ } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop", ++ "force-demote", "force-promote", NULL)) { ++ action = rsc_action+6; ++ } else { ++ action = rsc_action; ++ } ++ ++ return action; ++} ++ + crm_exit_t + cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + const char *rsc_class, const char *rsc_prov, +- const char *rsc_type, const char *action, ++ const char *rsc_type, const char *rsc_action, + GHashTable *params, GHashTable *override_hash, + int timeout_ms, int resource_verbose, gboolean force, + int check_level) + { ++ const char *action = NULL; + GHashTable *params_copy = NULL; + crm_exit_t exit_code = CRM_EX_OK; + svc_action_t *op = NULL; + + if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { + out->err(out, "Sorry, the %s option doesn't support %s resources yet", +- action, rsc_class); ++ rsc_action, rsc_class); ++ crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); ++ } else if (pcmk__strcase_any_of(rsc_class, PCMK_RESOURCE_CLASS_SYSTEMD, ++ PCMK_RESOURCE_CLASS_UPSTART, PCMK_RESOURCE_CLASS_NAGIOS, NULL)) { ++ out->err(out, "Sorry, the %s option doesn't support %s resources", ++ rsc_action, rsc_class); ++ crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); ++ } else if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, ++ pcmk__str_casei) && !pcmk__str_eq( ++ resources_find_service_class(rsc_name), PCMK_RESOURCE_CLASS_LSB, ++ pcmk__str_casei)) { ++ out->err(out, "Sorry, the %s option doesn't support %s resources", ++ rsc_action, resources_find_service_class(rsc_name)); + crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); + } + ++ action = get_action(rsc_action); ++ + /* If no timeout was provided, grab the default. */ + if (timeout_ms == 0) { + timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); +@@ -1766,7 +1801,7 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + exit_code = op->rc; + + out->message(out, "resource-agent-action", resource_verbose, rsc_class, +- rsc_prov, rsc_type, rsc_name, action, override_hash, op->rc, ++ rsc_prov, rsc_type, rsc_name, rsc_action, override_hash, op->rc, + op->status, op->stdout_data, op->stderr_data); + } else { + exit_code = op->rc == 0 ? CRM_EX_ERROR : op->rc; +@@ -1790,27 +1825,15 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, + const char *rtype = NULL; + const char *rprov = NULL; + const char *rclass = NULL; +- const char *action = NULL; + GHashTable *params = NULL; + +- if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { +- action = "validate-all"; +- +- } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { +- action = "monitor"; +- +- } else if (pcmk__str_eq(rsc_action, "force-stop", pcmk__str_casei)) { +- action = rsc_action+6; +- +- } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", ++ if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", + "force-promote", NULL)) { +- action = rsc_action+6; +- + if(pe_rsc_is_clone(rsc)) { + GList *nodes = cli_resource_search(rsc, requested_name, data_set); + if(nodes != NULL && force == FALSE) { + out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", +- action, rsc->id); ++ rsc_action, rsc->id); + out->err(out, "Try setting target-role=Stopped first or specifying " + "the force option"); + return CRM_EX_UNSAFE; +@@ -1818,9 +1841,6 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, + + g_list_free_full(nodes, free); + } +- +- } else { +- action = rsc_action; + } + + if(pe_rsc_is_clone(rsc)) { +@@ -1831,6 +1851,9 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, + if(rsc->variant == pe_group) { + out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); + return CRM_EX_UNIMPLEMENT_FEATURE; ++ } else if (rsc->variant == pe_container || pe_rsc_is_bundled(rsc)) { ++ out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action); ++ return CRM_EX_UNIMPLEMENT_FEATURE; + } + + rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); +@@ -1841,12 +1864,12 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, + data_set); + + if (timeout_ms == 0) { +- timeout_ms = pe_get_configured_timeout(rsc, action, data_set); ++ timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action), data_set); + } + + rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id; + +- exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, action, ++ exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action, + params, override_hash, timeout_ms, + resource_verbose, force, check_level); + return exit_code; +-- +1.8.3.1 + + +From 289cd231186755d99c1262eb9f968dc852409588 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 16 Jul 2021 13:20:55 +0200 +Subject: [PATCH 2/2] Refactor: crm_resource: remove duplicate Overriding + message that's handled elsewhere + +--- + tools/crm_resource_runtime.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index 59e6df5..ce037c5 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -1791,8 +1791,6 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + + g_hash_table_iter_init(&iter, override_hash); + while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) { +- out->info(out, "Overriding the cluster configuration for '%s' with '%s' = '%s'", +- rsc_name, name, value); + g_hash_table_replace(op->params, strdup(name), strdup(value)); + } + } +-- +1.8.3.1 + diff --git a/SOURCES/009-digests.patch b/SOURCES/009-digests.patch deleted file mode 100644 index 33361b2..0000000 --- a/SOURCES/009-digests.patch +++ /dev/null @@ -1,846 +0,0 @@ -From df587aaec07b4a08364d4024b3d0c73e6dede562 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 9 Nov 2020 14:55:42 -0600 -Subject: [PATCH 1/9] Refactor: scheduler: simplify XML attribute filtering - function - ---- - lib/pengine/pe_digest.c | 52 ++++++++++++++++++++++++++----------------------- - 1 file changed, 28 insertions(+), 24 deletions(-) - -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index 5bcd22b..1e119f9 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -45,30 +45,38 @@ pe__free_digests(gpointer ptr) - } - } - -+/*! -+ * \internal -+ * \brief Remove named attributes from an XML element -+ * -+ * \param[in,out] param_set XML to be filtered -+ * \param[in] param_string Space-separated list of attribute names -+ * \param[in] need_present Whether to remove attributes that match, -+ * or those that don't match -+ */ - static void --filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) -+filter_parameters(xmlNode *param_set, const char *param_string, -+ bool need_present) - { -- if (param_set && param_string) { -- xmlAttrPtr xIter = param_set->properties; -- -- while (xIter) { -- const char *prop_name = (const char *)xIter->name; -- char *name = crm_strdup_printf(" %s ", prop_name); -- char *match = strstr(param_string, name); -+ if ((param_set == NULL) || (param_string == NULL)) { -+ return; -+ } -+ for (xmlAttrPtr xIter = param_set->properties; xIter; ) { -+ const char *prop_name = (const char *) xIter->name; -+ char *name = crm_strdup_printf(" %s ", prop_name); -+ char *match = strstr(param_string, name); - -- free(name); -+ free(name); - -- // Do now, because current entry might get removed below -- xIter = xIter->next; -+ // Do now, because current entry might get removed below -+ xIter = xIter->next; - -- if (need_present && match == NULL) { -- crm_trace("%s not found in %s", prop_name, param_string); -- xml_remove_prop(param_set, prop_name); -+ if ((need_present && (match == NULL)) -+ || (!need_present && (match != NULL))) { - -- } else if (need_present == FALSE && match) { -- crm_trace("%s found in %s", prop_name, param_string); -- xml_remove_prop(param_set, prop_name); -- } -+ crm_trace("Filtering %s (%sfound in '%s')", -+ prop_name, (need_present? "not " : ""), param_string); -+ xml_remove_prop(param_set, prop_name); - } - } - } -@@ -203,9 +211,7 @@ calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, - g_hash_table_foreach(overrides, hash2field, data->params_secure); - } - g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); -- if (secure_list != NULL) { -- filter_parameters(data->params_secure, secure_list, FALSE); -- } -+ filter_parameters(data->params_secure, secure_list, FALSE); - if (pcmk_is_set(pcmk_get_ra_caps(class), - pcmk_ra_cap_fence_params)) { - /* For stonith resources, Pacemaker adds special parameters, -@@ -259,9 +265,7 @@ calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, - - // Then filter out reloadable parameters, if any - value = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); -- if (value != NULL) { -- filter_parameters(data->params_restart, value, TRUE); -- } -+ filter_parameters(data->params_restart, value, TRUE); - - value = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); - data->digest_restart_calc = calculate_operation_digest(data->params_restart, --- -1.8.3.1 - - -From f030af8771601d46947ac9276538c46c6c296504 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 9 Nov 2020 18:37:37 -0600 -Subject: [PATCH 2/9] Refactor: scheduler: remember whether action is probe - when unpacking - -... to reduce code duplication and improve readability ---- - lib/pengine/utils.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index 04110be..b0922fa 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -995,6 +995,8 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai - { - int timeout_ms = 0; - const char *value = NULL; -+ bool is_probe = pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei) -+ && (interval_ms == 0); - #if ENABLE_VERSIONED_ATTRS - pe_rsc_action_details_t *rsc_details = NULL; - #endif -@@ -1026,8 +1028,7 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai - action->meta, NULL, FALSE, data_set); - - // Determine probe default timeout differently -- if (pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei) -- && (interval_ms == 0)) { -+ if (is_probe) { - xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); - - if (min_interval_mon) { -@@ -1099,8 +1100,7 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai - if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard), - pcmk_ra_cap_fence_params) - && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) -- || (pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei) -- && (interval_ms == 0))) -+ || is_probe) - && action->rsc->parameters) { - - value = g_hash_table_lookup(action->rsc->parameters, --- -1.8.3.1 - - -From 9de547849697cd6a3581db3f83b04f68d0405d9d Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 10 Nov 2020 15:15:01 -0600 -Subject: [PATCH 3/9] Refactor: scheduler: don't include originally unpacked - resource parameters in digest - -Previously, when calculating an operation digest, calculate_main_digest() would -grab the following, in order of highest to lowest precedence: - -* instance attributes evaluated for the appropriate node -* instance attributes specified with the operation -* instance attributes as originally unpacked (without evaluating for any node) -* resource meta-attributes - -Adding the originally unpacked instance attributes was redundant, since -node-evaluated instance attributes would always be a superset of those and -would take precedence. ---- - lib/pengine/pe_digest.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index 1e119f9..dd6b753 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -162,7 +162,6 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - } - g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); - g_hash_table_foreach(action->extra, hash2field, data->params_all); -- g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); - g_hash_table_foreach(action->meta, hash2metafield, data->params_all); - - #if ENABLE_VERSIONED_ATTRS --- -1.8.3.1 - - -From e9921b2ab3e9eeab6227d97cc12b85fa04dfd187 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Nov 2020 09:27:12 -0600 -Subject: [PATCH 4/9] Refactor: scheduler: reuse existing function to check for - remote in XML - -... to reduce code duplication and improve readability ---- - lib/pengine/bundle.c | 21 +++------------------ - 1 file changed, 3 insertions(+), 18 deletions(-) - -diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c -index 76730c7..4f6eac3 100644 ---- a/lib/pengine/bundle.c -+++ b/lib/pengine/bundle.c -@@ -959,28 +959,13 @@ pe__bundle_needs_remote_name(pe_resource_t *rsc) - const char *value; - - if (rsc == NULL) { -- return FALSE; -+ return false; - } - - value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR); -- if (!pcmk__str_eq(value, "#uname", pcmk__str_casei)) { -- return FALSE; - -- } else { -- const char *match[3][2] = { -- { XML_ATTR_TYPE, "remote" }, -- { XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF }, -- { XML_AGENT_ATTR_PROVIDER, "pacemaker" }, -- }; -- -- for (int m = 0; m < 3; m++) { -- value = crm_element_value(rsc->xml, match[m][0]); -- if (!pcmk__str_eq(value, match[m][1], pcmk__str_casei)) { -- return FALSE; -- } -- } -- } -- return TRUE; -+ return pcmk__str_eq(value, "#uname", pcmk__str_casei) -+ && xml_contains_remote_node(rsc->xml); - } - - const char * --- -1.8.3.1 - - -From 0f220e1cd25ec095492ac4b346452520f4b71cf1 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 11 Nov 2020 15:29:46 -0600 -Subject: [PATCH 5/9] Refactor: scheduler: remove dead code - -Several internal expression-testing functions were unused. While they might -have had some potential future value, the abundance of similarly named -functions made debugging difficult. Comment blocks were added to the functions -they wrapped, which should add similar value. - -Also, we had an internal wrapper for pe__eval_date_expr() that was used -only in crm_rule, so it was moved there. ---- - include/crm/pengine/rules_internal.h | 10 +-- - lib/pengine/rules.c | 130 +++++++++++------------------------ - tools/crm_rule.c | 27 +++++++- - 3 files changed, 66 insertions(+), 101 deletions(-) - -diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h -index f60263a..7380826 100644 ---- a/include/crm/pengine/rules_internal.h -+++ b/include/crm/pengine/rules_internal.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2015-2019 the Pacemaker project contributors -+ * Copyright 2015-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -28,14 +28,6 @@ gboolean pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data); - gboolean pe__eval_role_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); - gboolean pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data); - --int pe_eval_date_expression(xmlNode *time_expr, -- crm_time_t *now, -- crm_time_t *next_change); --gboolean pe_test_date_expression(xmlNode *time_expr, crm_time_t *now, -- crm_time_t *next_change); - int pe_cron_range_satisfied(crm_time_t * now, xmlNode * cron_spec); --gboolean pe_test_attr_expression(xmlNode *expr, GHashTable *hash, crm_time_t *now, -- pe_match_data_t *match_data); --gboolean pe_test_role_expression(xmlNode * expr, enum rsc_role_e role, crm_time_t * now); - - #endif -diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c -index 28562aa..be30e67 100644 ---- a/lib/pengine/rules.c -+++ b/lib/pengine/rules.c -@@ -140,37 +140,6 @@ find_expression_type(xmlNode * expr) - return attr_expr; - } - --gboolean --pe_test_role_expression(xmlNode *expr, enum rsc_role_e role, crm_time_t *now) --{ -- pe_rule_eval_data_t rule_data = { -- .node_hash = NULL, -- .role = role, -- .now = now, -- .match_data = NULL, -- .rsc_data = NULL, -- .op_data = NULL -- }; -- -- return pe__eval_role_expr(expr, &rule_data); --} -- --gboolean --pe_test_attr_expression(xmlNode *expr, GHashTable *hash, crm_time_t *now, -- pe_match_data_t *match_data) --{ -- pe_rule_eval_data_t rule_data = { -- .node_hash = hash, -- .role = RSC_ROLE_UNKNOWN, -- .now = now, -- .match_data = match_data, -- .rsc_data = NULL, -- .op_data = NULL -- }; -- -- return pe__eval_attr_expr(expr, &rule_data); --} -- - /* As per the nethack rules: - * - * moon period = 29.53058 days ~= 30, year = 365.2422 days -@@ -331,38 +300,6 @@ pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec) - return end; - } - --/*! -- * \internal -- * \brief Test a date expression (pass/fail) for a specific time -- * -- * \param[in] time_expr date_expression XML -- * \param[in] now Time for which to evaluate expression -- * \param[out] next_change If not NULL, set to when evaluation will change -- * -- * \return TRUE if date expression is in effect at given time, FALSE otherwise -- */ --gboolean --pe_test_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) --{ -- pe_rule_eval_data_t rule_data = { -- .node_hash = NULL, -- .role = RSC_ROLE_UNKNOWN, -- .now = now, -- .match_data = NULL, -- .rsc_data = NULL, -- .op_data = NULL -- }; -- -- switch (pe__eval_date_expr(expr, &rule_data, next_change)) { -- case pcmk_rc_within_range: -- case pcmk_rc_ok: -- return TRUE; -- -- default: -- return FALSE; -- } --} -- - // Set next_change to t if t is earlier - static void - crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t) -@@ -375,31 +312,6 @@ crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t) - } - } - --/*! -- * \internal -- * \brief Evaluate a date expression for a specific time -- * -- * \param[in] time_expr date_expression XML -- * \param[in] now Time for which to evaluate expression -- * \param[out] next_change If not NULL, set to when evaluation will change -- * -- * \return Standard Pacemaker return code -- */ --int --pe_eval_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) --{ -- pe_rule_eval_data_t rule_data = { -- .node_hash = NULL, -- .role = RSC_ROLE_UNKNOWN, -- .now = now, -- .match_data = NULL, -- .rsc_data = NULL, -- .op_data = NULL -- }; -- -- return pe__eval_date_expr(expr, &rule_data, next_change); --} -- - // Information about a block of nvpair elements - typedef struct sorted_set_s { - int score; // This block's score for sorting -@@ -908,7 +820,16 @@ pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_ - break; - - case time_expr: -- accept = pe_test_date_expression(expr, rule_data->now, next_change); -+ switch (pe__eval_date_expr(expr, rule_data, next_change)) { -+ case pcmk_rc_within_range: -+ case pcmk_rc_ok: -+ accept = TRUE; -+ break; -+ -+ default: -+ accept = FALSE; -+ break; -+ } - break; - - case role_expr: -@@ -1104,6 +1025,16 @@ accept_attr_expr(const char *l_val, const char *r_val, const char *type, - return false; // Should never reach this point - } - -+/*! -+ * \internal -+ * \brief Evaluate a node attribute expression based on #uname, #id, #kind, -+ * or a generic node attribute -+ * -+ * \param[in] expr XML of rule expression -+ * \param[in] rule_data The match_data and node_hash members are used -+ * -+ * \return TRUE if rule_data satisfies the expression, FALSE otherwise -+ */ - gboolean - pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) - { -@@ -1169,8 +1100,16 @@ pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) - return accept_attr_expr(h_val, value, type, op); - } - -- -- -+/*! -+ * \internal -+ * \brief Evaluate a date_expression -+ * -+ * \param[in] expr XML of rule expression -+ * \param[in] rule_data Only the now member is used -+ * \param[out] next_change If not NULL, set to when evaluation will change -+ * -+ * \return Standard Pacemaker return code -+ */ - int - pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) - { -@@ -1285,6 +1224,15 @@ pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) { - return TRUE; - } - -+/*! -+ * \internal -+ * \brief Evaluate a node attribute expression based on #role -+ * -+ * \param[in] expr XML of rule expression -+ * \param[in] rule_data Only the role member is used -+ * -+ * \return TRUE if rule_data->role satisfies the expression, FALSE otherwise -+ */ - gboolean - pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) - { -diff --git a/tools/crm_rule.c b/tools/crm_rule.c -index 0e44828..2871f3d 100644 ---- a/tools/crm_rule.c -+++ b/tools/crm_rule.c -@@ -75,6 +75,31 @@ mode_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **e - return TRUE; - } - -+/*! -+ * \internal -+ * \brief Evaluate a date expression for a specific time -+ * -+ * \param[in] time_expr date_expression XML -+ * \param[in] now Time for which to evaluate expression -+ * \param[out] next_change If not NULL, set to when evaluation will change -+ * -+ * \return Standard Pacemaker return code -+ */ -+static int -+eval_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) -+{ -+ pe_rule_eval_data_t rule_data = { -+ .node_hash = NULL, -+ .role = RSC_ROLE_UNKNOWN, -+ .now = now, -+ .match_data = NULL, -+ .rsc_data = NULL, -+ .op_data = NULL -+ }; -+ -+ return pe__eval_date_expr(expr, &rule_data, next_change); -+} -+ - static int - crm_rule_check(pe_working_set_t *data_set, const char *rule_id, crm_time_t *effective_date) - { -@@ -156,7 +181,7 @@ crm_rule_check(pe_working_set_t *data_set, const char *rule_id, crm_time_t *effe - CRM_ASSERT(match != NULL); - CRM_ASSERT(find_expression_type(match) == time_expr); - -- rc = pe_eval_date_expression(match, effective_date, NULL); -+ rc = eval_date_expression(match, effective_date, NULL); - - if (rc == pcmk_rc_within_range) { - printf("Rule %s is still in effect\n", rule_id); --- -1.8.3.1 - - -From 91a6ec4bec86de7389fcd64cb27e315da760f0dd Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 11 Nov 2020 16:44:57 -0600 -Subject: [PATCH 6/9] Refactor: scheduler: make constraint unpacking function - static - -... for linker efficiency and readability. Also change the return type to void -since it was ignored (and the same for some related functions). ---- - include/pcmki/pcmki_sched_allocate.h | 4 +-- - lib/pacemaker/pcmk_sched_constraints.c | 46 ++++++++++++++++------------------ - 2 files changed, 23 insertions(+), 27 deletions(-) - -diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h -index efc0da6..a7f8c11 100644 ---- a/include/pcmki/pcmki_sched_allocate.h -+++ b/include/pcmki/pcmki_sched_allocate.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -143,8 +143,6 @@ extern gboolean unpack_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) - - extern gboolean unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set); - --extern gboolean unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set); -- - extern gboolean unpack_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set); - - void LogNodeActions(pe_working_set_t * data_set, gboolean terminal); -diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c -index 6ed2d8c..121754d 100644 ---- a/lib/pacemaker/pcmk_sched_constraints.c -+++ b/lib/pacemaker/pcmk_sched_constraints.c -@@ -48,6 +48,7 @@ static pe__location_t *generate_location_rule(pe_resource_t *rsc, - crm_time_t *next_change, - pe_working_set_t *data_set, - pe_match_data_t *match_data); -+static void unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set); - - static bool - evaluate_lifetime(xmlNode *lifetime, pe_working_set_t *data_set) -@@ -709,11 +710,13 @@ tag_to_set(xmlNode * xml_obj, xmlNode ** rsc_set, const char * attr, - return TRUE; - } - --static gboolean unpack_rsc_location(xmlNode * xml_obj, pe_resource_t * rsc_lh, const char * role, -- const char * score, pe_working_set_t * data_set, pe_match_data_t * match_data); -+static void unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc_lh, -+ const char *role, const char *score, -+ pe_working_set_t *data_set, -+ pe_match_data_t *match_data); - --static gboolean --unpack_simple_location(xmlNode * xml_obj, pe_working_set_t * data_set) -+static void -+unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set) - { - const char *id = crm_element_value(xml_obj, XML_ATTR_ID); - const char *value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); -@@ -721,7 +724,7 @@ unpack_simple_location(xmlNode * xml_obj, pe_working_set_t * data_set) - if(value) { - pe_resource_t *rsc_lh = pe_find_constraint_resource(data_set->resources, value); - -- return unpack_rsc_location(xml_obj, rsc_lh, NULL, NULL, data_set, NULL); -+ unpack_rsc_location(xml_obj, rsc_lh, NULL, NULL, data_set, NULL); - } - - value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE_PATTERN); -@@ -741,7 +744,7 @@ unpack_simple_location(xmlNode * xml_obj, pe_working_set_t * data_set) - " has invalid value '%s'", id, value); - regfree(r_patt); - free(r_patt); -- return FALSE; -+ return; - } - - for (rIter = data_set->resources; rIter; rIter = rIter->next) { -@@ -787,13 +790,12 @@ unpack_simple_location(xmlNode * xml_obj, pe_working_set_t * data_set) - regfree(r_patt); - free(r_patt); - } -- -- return FALSE; - } - --static gboolean --unpack_rsc_location(xmlNode * xml_obj, pe_resource_t * rsc_lh, const char * role, -- const char * score, pe_working_set_t * data_set, pe_match_data_t * match_data) -+static void -+unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc_lh, const char *role, -+ const char *score, pe_working_set_t *data_set, -+ pe_match_data_t *match_data) - { - pe__location_t *location = NULL; - const char *id_lh = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); -@@ -804,7 +806,7 @@ unpack_rsc_location(xmlNode * xml_obj, pe_resource_t * rsc_lh, const char * role - if (rsc_lh == NULL) { - pcmk__config_warn("Ignoring constraint '%s' because resource '%s' " - "does not exist", id, id_lh); -- return FALSE; -+ return; - } - - if (score == NULL) { -@@ -816,7 +818,7 @@ unpack_rsc_location(xmlNode * xml_obj, pe_resource_t * rsc_lh, const char * role - pe_node_t *match = pe_find_node(data_set->nodes, node); - - if (!match) { -- return FALSE; -+ return; - } - location = rsc2node_new(id, rsc_lh, score_i, discovery, match, data_set); - -@@ -850,7 +852,7 @@ unpack_rsc_location(xmlNode * xml_obj, pe_resource_t * rsc_lh, const char * role - pe__update_recheck_time(t, data_set); - } - crm_time_free(next_change); -- return TRUE; -+ return; - } - - if (role == NULL) { -@@ -860,7 +862,7 @@ unpack_rsc_location(xmlNode * xml_obj, pe_resource_t * rsc_lh, const char * role - if (location && role) { - if (text2role(role) == RSC_ROLE_UNKNOWN) { - pe_err("Invalid constraint %s: Bad role %s", id, role); -- return FALSE; -+ return; - - } else { - enum rsc_role_e r = text2role(role); -@@ -877,8 +879,6 @@ unpack_rsc_location(xmlNode * xml_obj, pe_resource_t * rsc_lh, const char * role - } - } - } -- -- return TRUE; - } - - static gboolean -@@ -992,8 +992,8 @@ unpack_location_set(xmlNode * location, xmlNode * set, pe_working_set_t * data_s - return TRUE; - } - --gboolean --unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set) -+static void -+unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set) - { - xmlNode *set = NULL; - gboolean any_sets = FALSE; -@@ -1002,7 +1002,7 @@ unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set) - xmlNode *expanded_xml = NULL; - - if (unpack_location_tags(xml_obj, &expanded_xml, data_set) == FALSE) { -- return FALSE; -+ return; - } - - if (expanded_xml) { -@@ -1020,7 +1020,7 @@ unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set) - if (expanded_xml) { - free_xml(expanded_xml); - } -- return FALSE; -+ return; - } - } - } -@@ -1031,10 +1031,8 @@ unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set) - } - - if (any_sets == FALSE) { -- return unpack_simple_location(xml_obj, data_set); -+ unpack_simple_location(xml_obj, data_set); - } -- -- return TRUE; - } - - static int --- -1.8.3.1 - - -From df842adafca8ba56ddb5b448490ffef54ea785d4 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Nov 2020 09:19:51 -0600 -Subject: [PATCH 7/9] Refactor: scheduler: trivial refactoring of nvpair - evaluation - -... for readability ---- - lib/pengine/rules.c | 15 +++++++-------- - 1 file changed, 7 insertions(+), 8 deletions(-) - -diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c -index be30e67..86e7899 100644 ---- a/lib/pengine/rules.c -+++ b/lib/pengine/rules.c -@@ -529,20 +529,19 @@ static GList * - make_pairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, - const char *always_first) - { -- GListPtr unsorted = NULL; -- const char *score = NULL; -- sorted_set_t *pair = NULL; -- xmlNode *attr_set = NULL; -+ GList *unsorted = NULL; - - if (xml_obj == NULL) { - return NULL; - } -- for (attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL; -+ for (xmlNode *attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL; - attr_set = pcmk__xe_next(attr_set)) { - -- /* Uncertain if set_name == NULL check is strictly necessary here */ -- if (pcmk__str_eq(set_name, (const char *)attr_set->name, pcmk__str_null_matches)) { -- pair = NULL; -+ if (pcmk__str_eq(set_name, (const char *) attr_set->name, -+ pcmk__str_null_matches)) { -+ const char *score = NULL; -+ sorted_set_t *pair = NULL; -+ - attr_set = expand_idref(attr_set, top); - if (attr_set == NULL) { - continue; --- -1.8.3.1 - - -From 373c224ac1c41bd47a928a7523744c6c678a6543 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Nov 2020 09:21:35 -0600 -Subject: [PATCH 8/9] Low: scheduler: correctly skip dangling id-ref - -When evaluating XML nvpair blocks, make_pairs() previously reused the -"for" loop variable when expanding id-ref's. However if the id-ref was dangling -(only possible if schema enforcement is turned off), this would make it NULL -and thus exiting the loop instead of continuing. ---- - lib/pengine/rules.c | 11 ++++++----- - 1 file changed, 6 insertions(+), 5 deletions(-) - -diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c -index 86e7899..1bd807f 100644 ---- a/lib/pengine/rules.c -+++ b/lib/pengine/rules.c -@@ -541,18 +541,19 @@ make_pairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, - pcmk__str_null_matches)) { - const char *score = NULL; - sorted_set_t *pair = NULL; -+ xmlNode *expanded_attr_set = expand_idref(attr_set, top); - -- attr_set = expand_idref(attr_set, top); -- if (attr_set == NULL) { -+ if (expanded_attr_set == NULL) { -+ // Schema (if not "none") prevents this - continue; - } - - pair = calloc(1, sizeof(sorted_set_t)); -- pair->name = ID(attr_set); -+ pair->name = ID(expanded_attr_set); - pair->special_name = always_first; -- pair->attr_set = attr_set; -+ pair->attr_set = expanded_attr_set; - -- score = crm_element_value(attr_set, XML_RULE_ATTR_SCORE); -+ score = crm_element_value(expanded_attr_set, XML_RULE_ATTR_SCORE); - pair->score = char2score(score); - - unsorted = g_list_prepend(unsorted, pair); --- -1.8.3.1 - - -From b59717751c168ec745bedbcc5696bee11036d931 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 12 Nov 2020 14:37:27 -0600 -Subject: [PATCH 9/9] Low: scheduler: treat missing parameter as NULL in rules - with value-source - -Previously, if value-source were set to "param" or "meta", and an affected -resource did not have the specified parameter, the parameter name would wrongly -be used as the value to compare against. Now, use NULL as the value to compare -against. ---- - lib/pengine/rules.c | 11 ++++++++--- - 1 file changed, 8 insertions(+), 3 deletions(-) - -diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c -index 1bd807f..e5d452f 100644 ---- a/lib/pengine/rules.c -+++ b/lib/pengine/rules.c -@@ -1041,6 +1041,7 @@ pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) - gboolean attr_allocated = FALSE; - const char *h_val = NULL; - GHashTable *table = NULL; -+ bool literal = true; - - const char *op = NULL; - const char *type = NULL; -@@ -1071,18 +1072,22 @@ pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) - } - - if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) { -+ literal = false; - table = rule_data->match_data->params; - } else if (pcmk__str_eq(value_source, "meta", pcmk__str_casei)) { -+ literal = false; - table = rule_data->match_data->meta; - } - } - -- if (table) { -+ if (!literal) { - const char *param_name = value; - const char *param_value = NULL; - -- if (param_name && param_name[0]) { -- if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) { -+ value = NULL; -+ if ((table != NULL) && !pcmk__str_empty(param_name)) { -+ param_value = (const char *)g_hash_table_lookup(table, param_name); -+ if (param_value != NULL) { - value = param_value; - } - } --- -1.8.3.1 - diff --git a/SOURCES/010-feature-set.patch b/SOURCES/010-feature-set.patch deleted file mode 100644 index 187d623..0000000 --- a/SOURCES/010-feature-set.patch +++ /dev/null @@ -1,26 +0,0 @@ -From d1b6b6cb5151763888ac8bc55708d2e7cbbf590b Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 19 Nov 2020 13:35:31 -0500 -Subject: [PATCH] Fix: scheduler: Fix output of failed actions without an - operation_key. - ---- - lib/pengine/pe_output.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index 1a3f93d..b91348f 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -975,7 +975,7 @@ pe__failed_action_xml(pcmk__output_t *out, va_list args) { - xmlNodePtr node = pcmk__output_create_xml_node(out, "failure"); - - xmlSetProp(node, (pcmkXmlStr) (op_key ? "op_key" : "id"), -- (pcmkXmlStr) (op_key ? op_key : "id")); -+ (pcmkXmlStr) (op_key ? op_key : ID(xml_op))); - xmlSetProp(node, (pcmkXmlStr) "node", - (pcmkXmlStr) crm_element_value(xml_op, XML_ATTR_UNAME)); - xmlSetProp(node, (pcmkXmlStr) "exitstatus", --- -1.8.3.1 - diff --git a/SOURCES/010-probe-pending.patch b/SOURCES/010-probe-pending.patch new file mode 100644 index 0000000..336c33e --- /dev/null +++ b/SOURCES/010-probe-pending.patch @@ -0,0 +1,715 @@ +From b0347f7b8e609420a7055d5fe537cc40ac0d1bb2 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 16 Jul 2021 11:08:05 -0500 +Subject: [PATCH 1/3] Fix: scheduler: don't schedule probes of unmanaged + resources on pending nodes + +Previously, custom_action() would set an action's optional or runnable flag in +the same, exclusive if-else sequence. This means that if an action should be +optional *and* runnable, only one would be set. In particular, this meant that +if a resource is unmanaged *and* its allocated node is pending, any probe would +be set to optional, but not unrunnable, and the controller could wrongly +attempt the probe before the join completed. + +Now, optional is checked separately. +--- + lib/pengine/utils.c | 22 ++++++++++++++-------- + 1 file changed, 14 insertions(+), 8 deletions(-) + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 5ef742e..965824b 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -541,6 +541,20 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + FALSE, data_set); + } + ++ // Make the action optional if its resource is unmanaged ++ if (!pcmk_is_set(action->flags, pe_action_pseudo) ++ && (action->node != NULL) ++ && !pcmk_is_set(action->rsc->flags, pe_rsc_managed) ++ && (g_hash_table_lookup(action->meta, ++ XML_LRM_ATTR_INTERVAL_MS) == NULL)) { ++ pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)", ++ action->uuid, action->node->details->uname, ++ action->rsc->id); ++ pe__set_action_flags(action, pe_action_optional); ++ // We shouldn't clear runnable here because ... something ++ } ++ ++ // Make the action runnable or unrunnable as appropriate + if (pcmk_is_set(action->flags, pe_action_pseudo)) { + /* leave untouched */ + +@@ -549,14 +563,6 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, + action->uuid); + pe__clear_action_flags(action, pe_action_runnable); + +- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed) +- && g_hash_table_lookup(action->meta, +- XML_LRM_ATTR_INTERVAL_MS) == NULL) { +- pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)", +- action->uuid, action->node->details->uname, rsc->id); +- pe__set_action_flags(action, pe_action_optional); +- //pe__clear_action_flags(action, pe_action_runnable); +- + } else if (!pcmk_is_set(action->flags, pe_action_dc) + && !(action->node->details->online) + && (!pe__is_guest_node(action->node) +-- +1.8.3.1 + + +From 520303b90eb707f5b7a9afa9b106e4a38b90f0f9 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 14 Jul 2021 17:18:44 -0500 +Subject: [PATCH 2/3] Test: scheduler: update existing tests for probe + scheduling change + +This is an improvement. Looking at bundle-probe-order-2 for example, +the bundle's first instance has this status to start: + + * Replica[0] + * galera (ocf::heartbeat:galera): Stopped (unmanaged) + * galera-bundle-docker-0 (ocf::heartbeat:docker): Started centos2 (unmanaged) + * galera-bundle-0 (ocf::pacemaker:remote): Started centos2 (unmanaged) + +After the changes, we now schedule recurring monitors for +galera-bundle-docker-0 and galera-bundle-0 on centos2, and a probe of galera:0 +on galera-bundle-0, all of which are possible. +--- + cts/scheduler/dot/bundle-probe-order-2.dot | 3 ++ + cts/scheduler/dot/bundle-probe-order-3.dot | 1 + + cts/scheduler/exp/bundle-probe-order-2.exp | 33 ++++++++++++++++++++-- + cts/scheduler/exp/bundle-probe-order-3.exp | 21 ++++++++++---- + cts/scheduler/summary/bundle-probe-order-2.summary | 3 ++ + cts/scheduler/summary/bundle-probe-order-3.summary | 1 + + 6 files changed, 53 insertions(+), 9 deletions(-) + +diff --git a/cts/scheduler/dot/bundle-probe-order-2.dot b/cts/scheduler/dot/bundle-probe-order-2.dot +index 0cce3fd..7706195 100644 +--- a/cts/scheduler/dot/bundle-probe-order-2.dot ++++ b/cts/scheduler/dot/bundle-probe-order-2.dot +@@ -1,6 +1,9 @@ + digraph "g" { ++"galera-bundle-0_monitor_30000 centos2" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-docker-0_monitor_60000 centos2" [ style=bold color="green" fontcolor="black"] + "galera-bundle-docker-1_monitor_0 centos2" [ style=bold color="green" fontcolor="black"] + "galera-bundle-docker-2_monitor_0 centos1" [ style=bold color="green" fontcolor="black"] + "galera-bundle-docker-2_monitor_0 centos2" [ style=bold color="green" fontcolor="black"] + "galera-bundle-docker-2_monitor_0 centos3" [ style=bold color="green" fontcolor="black"] ++"galera:0_monitor_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"] + } +diff --git a/cts/scheduler/dot/bundle-probe-order-3.dot b/cts/scheduler/dot/bundle-probe-order-3.dot +index a4b109f..53a384b 100644 +--- a/cts/scheduler/dot/bundle-probe-order-3.dot ++++ b/cts/scheduler/dot/bundle-probe-order-3.dot +@@ -2,6 +2,7 @@ + "galera-bundle-0_monitor_0 centos1" [ style=bold color="green" fontcolor="black"] + "galera-bundle-0_monitor_0 centos2" [ style=bold color="green" fontcolor="black"] + "galera-bundle-0_monitor_0 centos3" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-docker-0_monitor_60000 centos2" [ style=bold color="green" fontcolor="black"] + "galera-bundle-docker-1_monitor_0 centos2" [ style=bold color="green" fontcolor="black"] + "galera-bundle-docker-2_monitor_0 centos1" [ style=bold color="green" fontcolor="black"] + "galera-bundle-docker-2_monitor_0 centos2" [ style=bold color="green" fontcolor="black"] +diff --git a/cts/scheduler/exp/bundle-probe-order-2.exp b/cts/scheduler/exp/bundle-probe-order-2.exp +index d6174e7..5b28050 100644 +--- a/cts/scheduler/exp/bundle-probe-order-2.exp ++++ b/cts/scheduler/exp/bundle-probe-order-2.exp +@@ -1,6 +1,33 @@ + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + +@@ -8,7 +35,7 @@ + + + +- ++ + + + +@@ -17,7 +44,7 @@ + + + +- ++ + + + +@@ -26,7 +53,7 @@ + + + +- ++ + + + +diff --git a/cts/scheduler/exp/bundle-probe-order-3.exp b/cts/scheduler/exp/bundle-probe-order-3.exp +index e1f60e7..69140a4 100644 +--- a/cts/scheduler/exp/bundle-probe-order-3.exp ++++ b/cts/scheduler/exp/bundle-probe-order-3.exp +@@ -1,6 +1,15 @@ + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + +@@ -8,7 +17,7 @@ + + + +- ++ + + + +@@ -17,7 +26,7 @@ + + + +- ++ + + + +@@ -26,7 +35,7 @@ + + + +- ++ + + + +@@ -35,7 +44,7 @@ + + + +- ++ + + + +@@ -44,7 +53,7 @@ + + + +- ++ + + + +@@ -53,7 +62,7 @@ + + + +- ++ + + + +diff --git a/cts/scheduler/summary/bundle-probe-order-2.summary b/cts/scheduler/summary/bundle-probe-order-2.summary +index 681d607..024c472 100644 +--- a/cts/scheduler/summary/bundle-probe-order-2.summary ++++ b/cts/scheduler/summary/bundle-probe-order-2.summary +@@ -13,6 +13,9 @@ Current cluster status: + Transition Summary: + + Executing Cluster Transition: ++ * Resource action: galera:0 monitor on galera-bundle-0 ++ * Resource action: galera-bundle-docker-0 monitor=60000 on centos2 ++ * Resource action: galera-bundle-0 monitor=30000 on centos2 + * Resource action: galera-bundle-docker-1 monitor on centos2 + * Resource action: galera-bundle-docker-2 monitor on centos3 + * Resource action: galera-bundle-docker-2 monitor on centos2 +diff --git a/cts/scheduler/summary/bundle-probe-order-3.summary b/cts/scheduler/summary/bundle-probe-order-3.summary +index f089618..331bd87 100644 +--- a/cts/scheduler/summary/bundle-probe-order-3.summary ++++ b/cts/scheduler/summary/bundle-probe-order-3.summary +@@ -12,6 +12,7 @@ Current cluster status: + Transition Summary: + + Executing Cluster Transition: ++ * Resource action: galera-bundle-docker-0 monitor=60000 on centos2 + * Resource action: galera-bundle-0 monitor on centos3 + * Resource action: galera-bundle-0 monitor on centos2 + * Resource action: galera-bundle-0 monitor on centos1 +-- +1.8.3.1 + + +From cb9c294a7ef22916866e0e42e51e88c2b1a61c2e Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 14 Jul 2021 17:23:11 -0500 +Subject: [PATCH 3/3] Test: scheduler: add test for probe of unmanaged resource + on pending node + +No probes should be scheduled in this case +--- + cts/cts-scheduler.in | 1 + + cts/scheduler/dot/probe-pending-node.dot | 2 + + cts/scheduler/exp/probe-pending-node.exp | 1 + + cts/scheduler/scores/probe-pending-node.scores | 61 ++++++ + cts/scheduler/summary/probe-pending-node.summary | 55 +++++ + cts/scheduler/xml/probe-pending-node.xml | 247 +++++++++++++++++++++++ + 6 files changed, 367 insertions(+) + create mode 100644 cts/scheduler/dot/probe-pending-node.dot + create mode 100644 cts/scheduler/exp/probe-pending-node.exp + create mode 100644 cts/scheduler/scores/probe-pending-node.scores + create mode 100644 cts/scheduler/summary/probe-pending-node.summary + create mode 100644 cts/scheduler/xml/probe-pending-node.xml + +diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in +index fc9790b..7ba2415 100644 +--- a/cts/cts-scheduler.in ++++ b/cts/cts-scheduler.in +@@ -110,6 +110,7 @@ TESTS = [ + [ "probe-2", "Correctly re-probe cloned groups" ], + [ "probe-3", "Probe (pending node)" ], + [ "probe-4", "Probe (pending node + stopped resource)" ], ++ [ "probe-pending-node", "Probe (pending node + unmanaged resource)" ], + [ "standby", "Standby" ], + [ "comments", "Comments" ], + ], +diff --git a/cts/scheduler/dot/probe-pending-node.dot b/cts/scheduler/dot/probe-pending-node.dot +new file mode 100644 +index 0000000..d8f1c9f +--- /dev/null ++++ b/cts/scheduler/dot/probe-pending-node.dot +@@ -0,0 +1,2 @@ ++ digraph "g" { ++} +diff --git a/cts/scheduler/exp/probe-pending-node.exp b/cts/scheduler/exp/probe-pending-node.exp +new file mode 100644 +index 0000000..56e315f +--- /dev/null ++++ b/cts/scheduler/exp/probe-pending-node.exp +@@ -0,0 +1 @@ ++ +diff --git a/cts/scheduler/scores/probe-pending-node.scores b/cts/scheduler/scores/probe-pending-node.scores +new file mode 100644 +index 0000000..020a1a0 +--- /dev/null ++++ b/cts/scheduler/scores/probe-pending-node.scores +@@ -0,0 +1,61 @@ ++ ++pcmk__clone_allocate: fs_UC5_SAPMNT-clone allocation score on gcdoubwap01: 0 ++pcmk__clone_allocate: fs_UC5_SAPMNT-clone allocation score on gcdoubwap02: 0 ++pcmk__clone_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap01: 0 ++pcmk__clone_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap02: 0 ++pcmk__clone_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap01: 0 ++pcmk__clone_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap02: 0 ++pcmk__clone_allocate: fs_UC5_SYS-clone allocation score on gcdoubwap01: 0 ++pcmk__clone_allocate: fs_UC5_SYS-clone allocation score on gcdoubwap02: 0 ++pcmk__clone_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap01: 0 ++pcmk__clone_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap02: 0 ++pcmk__clone_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap01: 0 ++pcmk__clone_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: fs_UC5_ascs allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: fs_UC5_ascs allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: fs_UC5_ers allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: fs_UC5_ers allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: grp_UC5_ascs allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: grp_UC5_ascs allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: grp_UC5_ers allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: grp_UC5_ers allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap01: INFINITY ++pcmk__group_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: rsc_vip_init_ers allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: rsc_vip_init_ers allocation score on gcdoubwap02: 0 ++pcmk__group_allocate: rsc_vip_int_ascs allocation score on gcdoubwap01: 0 ++pcmk__group_allocate: rsc_vip_int_ascs allocation score on gcdoubwap02: 0 ++pcmk__native_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap01: 0 ++pcmk__native_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap01: 0 ++pcmk__native_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap01: 0 ++pcmk__native_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap01: 0 ++pcmk__native_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: fs_UC5_ascs allocation score on gcdoubwap01: 0 ++pcmk__native_allocate: fs_UC5_ascs allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: fs_UC5_ers allocation score on gcdoubwap01: -INFINITY ++pcmk__native_allocate: fs_UC5_ers allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap01: -INFINITY ++pcmk__native_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap01: -INFINITY ++pcmk__native_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap01: -INFINITY ++pcmk__native_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap01: -INFINITY ++pcmk__native_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: rsc_vip_init_ers allocation score on gcdoubwap01: 0 ++pcmk__native_allocate: rsc_vip_init_ers allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: rsc_vip_int_ascs allocation score on gcdoubwap01: INFINITY ++pcmk__native_allocate: rsc_vip_int_ascs allocation score on gcdoubwap02: -INFINITY ++pcmk__native_allocate: stonith_gcdoubwap01 allocation score on gcdoubwap01: -INFINITY ++pcmk__native_allocate: stonith_gcdoubwap01 allocation score on gcdoubwap02: 0 ++pcmk__native_allocate: stonith_gcdoubwap02 allocation score on gcdoubwap01: 0 ++pcmk__native_allocate: stonith_gcdoubwap02 allocation score on gcdoubwap02: -INFINITY +diff --git a/cts/scheduler/summary/probe-pending-node.summary b/cts/scheduler/summary/probe-pending-node.summary +new file mode 100644 +index 0000000..208186b +--- /dev/null ++++ b/cts/scheduler/summary/probe-pending-node.summary +@@ -0,0 +1,55 @@ ++Using the original execution date of: 2021-06-11 13:55:24Z ++ ++ *** Resource management is DISABLED *** ++ The cluster will not attempt to start, stop or recover services ++ ++Current cluster status: ++ * Node List: ++ * Node gcdoubwap02: pending ++ * Online: [ gcdoubwap01 ] ++ ++ * Full List of Resources: ++ * stonith_gcdoubwap01 (stonith:fence_gce): Stopped (unmanaged) ++ * stonith_gcdoubwap02 (stonith:fence_gce): Stopped (unmanaged) ++ * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (unmanaged): ++ * Stopped: [ gcdoubwap01 gcdoubwap02 ] ++ * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (unmanaged): ++ * Stopped: [ gcdoubwap01 gcdoubwap02 ] ++ * Resource Group: grp_UC5_ascs (unmanaged): ++ * rsc_vip_int_ascs (ocf:heartbeat:IPaddr2): Stopped (unmanaged) ++ * rsc_vip_gcp_ascs (ocf:heartbeat:gcp-vpc-move-vip): Started gcdoubwap01 (unmanaged) ++ * fs_UC5_ascs (ocf:heartbeat:Filesystem): Stopped (unmanaged) ++ * rsc_sap_UC5_ASCS11 (ocf:heartbeat:SAPInstance): Stopped (unmanaged) ++ * Resource Group: grp_UC5_ers (unmanaged): ++ * rsc_vip_init_ers (ocf:heartbeat:IPaddr2): Stopped (unmanaged) ++ * rsc_vip_gcp_ers (ocf:heartbeat:gcp-vpc-move-vip): Stopped (unmanaged) ++ * fs_UC5_ers (ocf:heartbeat:Filesystem): Stopped (unmanaged) ++ * rsc_sap_UC5_ERS12 (ocf:heartbeat:SAPInstance): Stopped (unmanaged) ++ ++Transition Summary: ++ ++Executing Cluster Transition: ++Using the original execution date of: 2021-06-11 13:55:24Z ++ ++Revised Cluster Status: ++ * Node List: ++ * Node gcdoubwap02: pending ++ * Online: [ gcdoubwap01 ] ++ ++ * Full List of Resources: ++ * stonith_gcdoubwap01 (stonith:fence_gce): Stopped (unmanaged) ++ * stonith_gcdoubwap02 (stonith:fence_gce): Stopped (unmanaged) ++ * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (unmanaged): ++ * Stopped: [ gcdoubwap01 gcdoubwap02 ] ++ * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (unmanaged): ++ * Stopped: [ gcdoubwap01 gcdoubwap02 ] ++ * Resource Group: grp_UC5_ascs (unmanaged): ++ * rsc_vip_int_ascs (ocf:heartbeat:IPaddr2): Stopped (unmanaged) ++ * rsc_vip_gcp_ascs (ocf:heartbeat:gcp-vpc-move-vip): Started gcdoubwap01 (unmanaged) ++ * fs_UC5_ascs (ocf:heartbeat:Filesystem): Stopped (unmanaged) ++ * rsc_sap_UC5_ASCS11 (ocf:heartbeat:SAPInstance): Stopped (unmanaged) ++ * Resource Group: grp_UC5_ers (unmanaged): ++ * rsc_vip_init_ers (ocf:heartbeat:IPaddr2): Stopped (unmanaged) ++ * rsc_vip_gcp_ers (ocf:heartbeat:gcp-vpc-move-vip): Stopped (unmanaged) ++ * fs_UC5_ers (ocf:heartbeat:Filesystem): Stopped (unmanaged) ++ * rsc_sap_UC5_ERS12 (ocf:heartbeat:SAPInstance): Stopped (unmanaged) +diff --git a/cts/scheduler/xml/probe-pending-node.xml b/cts/scheduler/xml/probe-pending-node.xml +new file mode 100644 +index 0000000..9f55c92 +--- /dev/null ++++ b/cts/scheduler/xml/probe-pending-node.xml +@@ -0,0 +1,247 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + diff --git a/SOURCES/011-crm_attribute-regression.patch b/SOURCES/011-crm_attribute-regression.patch new file mode 100644 index 0000000..7263313 --- /dev/null +++ b/SOURCES/011-crm_attribute-regression.patch @@ -0,0 +1,150 @@ +From ea5510dd979bb6d375324cda26925d9e7c4362f5 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 19 Jul 2021 10:04:16 -0400 +Subject: [PATCH 1/2] Low: tools: The --get-value option does not require an + arg. + +Regression in 2.1.0 introduced by 15f5c2901. +--- + tools/crm_attribute.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/crm_attribute.c b/tools/crm_attribute.c +index 2cc8d26..8a5b4e4 100644 +--- a/tools/crm_attribute.c ++++ b/tools/crm_attribute.c +@@ -242,7 +242,7 @@ static GOptionEntry deprecated_entries[] = { + NULL, NULL + }, + +- { "get-value", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, value_cb, ++ { "get-value", 0, G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, value_cb, + NULL, NULL + }, + +-- +1.8.3.1 + + +From ef054d943afe8e60017f6adc4e25f88a59ac91a4 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Mon, 19 Jul 2021 11:37:04 -0400 +Subject: [PATCH 2/2] Low: libcrmcommon: Allow negative numbers as cmdline + options. + +The bug here is that negative numbers (for instance, negative scores) +are not supported as command line arguments. Because we break up a +string that starts with a single dash into multiple arguments, "-1000" +becomes "-1", "-0", "-0", and "-0". + +Because we don't have enough information about what is happening on the +command line, the best we can do here is recognize something as a +negative number and pass it on. Any errors will have to be detected at +a later step. + +Also note that we only recognize negative numbers if they start with +1-9. Starting with 0 will be recognized as some sort of string. + +Regression in 2.1.0 caused by a long-standing bug in +pcmk__cmdline_preproc_test. +--- + lib/common/cmdline.c | 29 ++++++++++++++++++++++ + .../tests/cmdline/pcmk__cmdline_preproc_test.c | 24 +++++++++++++++++- + 2 files changed, 52 insertions(+), 1 deletion(-) + +diff --git a/lib/common/cmdline.c b/lib/common/cmdline.c +index 7c95d02..9c1b810 100644 +--- a/lib/common/cmdline.c ++++ b/lib/common/cmdline.c +@@ -9,6 +9,7 @@ + + #include + ++#include + #include + + #include +@@ -189,6 +190,34 @@ pcmk__cmdline_preproc(char **argv, const char *special) { + /* Skip over leading dash */ + char *ch = argv[i]+1; + ++ /* This looks like the start of a number, which means it is a negative ++ * number. It's probably the argument to the preceeding option, but ++ * we can't know that here. Copy it over and let whatever handles ++ * arguments next figure it out. ++ */ ++ if (*ch != '\0' && *ch >= '1' && *ch <= '9') { ++ bool is_numeric = true; ++ ++ while (*ch != '\0') { ++ if (!isdigit(*ch)) { ++ is_numeric = false; ++ break; ++ } ++ ++ ch++; ++ } ++ ++ if (is_numeric) { ++ g_ptr_array_add(arr, g_strdup_printf("%s", argv[i])); ++ continue; ++ } else { ++ /* This argument wasn't entirely numeric. Reset ch to the ++ * beginning so we can process it one character at a time. ++ */ ++ ch = argv[i]+1; ++ } ++ } ++ + while (*ch != '\0') { + /* This is a special short argument that takes an option. getopt + * allows values to be interspersed with a list of arguments, but +diff --git a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c +index b8506c6..9a752ef 100644 +--- a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c ++++ b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c +@@ -1,5 +1,5 @@ + /* +- * Copyright 2020 the Pacemaker project contributors ++ * Copyright 2020-2021 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * +@@ -86,6 +86,26 @@ long_arg(void) { + g_strfreev(processed); + } + ++static void ++negative_score(void) { ++ const char *argv[] = { "-v", "-1000", NULL }; ++ const gchar *expected[] = { "-v", "-1000", NULL }; ++ ++ gchar **processed = pcmk__cmdline_preproc((char **) argv, "v"); ++ LISTS_EQ(processed, expected); ++ g_strfreev(processed); ++} ++ ++static void ++negative_score_2(void) { ++ const char *argv[] = { "-1i3", NULL }; ++ const gchar *expected[] = { "-1", "-i", "-3", NULL }; ++ ++ gchar **processed = pcmk__cmdline_preproc((char **) argv, NULL); ++ LISTS_EQ(processed, expected); ++ g_strfreev(processed); ++} ++ + int + main(int argc, char **argv) + { +@@ -98,5 +118,7 @@ main(int argc, char **argv) + g_test_add_func("/common/cmdline/preproc/special_args", special_args); + g_test_add_func("/common/cmdline/preproc/special_arg_at_end", special_arg_at_end); + g_test_add_func("/common/cmdline/preproc/long_arg", long_arg); ++ g_test_add_func("/common/cmdline/preproc/negative_score", negative_score); ++ g_test_add_func("/common/cmdline/preproc/negative_score_2", negative_score_2); + return g_test_run(); + } +-- +1.8.3.1 + diff --git a/SOURCES/011-feature-set.patch b/SOURCES/011-feature-set.patch deleted file mode 100644 index 4cd5834..0000000 --- a/SOURCES/011-feature-set.patch +++ /dev/null @@ -1,1900 +0,0 @@ -From ea78f9a90e35be15482129e1bdb9c6d86a9a5015 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 29 Oct 2020 13:41:41 +0100 -Subject: [PATCH 1/2] Refactor: crmadmin: prepare functions to move to - libpacemaker - ---- - tools/crmadmin.c | 321 +++++++++++++++++++++++++++++++++---------------------- - 1 file changed, 196 insertions(+), 125 deletions(-) - -diff --git a/tools/crmadmin.c b/tools/crmadmin.c -index e61dbf4..ec902df 100644 ---- a/tools/crmadmin.c -+++ b/tools/crmadmin.c -@@ -36,7 +36,6 @@ static guint message_timeout_ms = DEFAULT_MESSAGE_TIMEOUT_MS; - static GMainLoop *mainloop = NULL; - - bool need_controld_api = true; --bool need_pacemakerd_api = false; - - bool do_work(pcmk_ipc_api_t *api); - static char *ipc_name = NULL; -@@ -135,8 +134,6 @@ command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError - - if (!strcmp(option_name, "--pacemakerd") || !strcmp(option_name, "-P")) { - command = cmd_pacemakerd_health; -- need_pacemakerd_api = true; -- need_controld_api = false; - } - - if (!strcmp(option_name, "--dc_lookup") || !strcmp(option_name, "-D")) { -@@ -145,7 +142,6 @@ command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError - - if (!strcmp(option_name, "--nodes") || !strcmp(option_name, "-N")) { - command = cmd_list_nodes; -- need_controld_api = false; - } - - if (!strcmp(option_name, "--election") || !strcmp(option_name, "-E")) { -@@ -353,6 +349,16 @@ static pcmk__supported_format_t formats[] = { - }; - - static void -+start_main_loop() -+{ -+ exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects -+ mainloop = g_main_loop_new(NULL, FALSE); -+ message_timer_id = g_timeout_add(message_timeout_ms, -+ admin_message_timeout, NULL); -+ g_main_loop_run(mainloop); -+} -+ -+static void - quit_main_loop(crm_exit_t ec) - { - exit_code = ec; -@@ -366,9 +372,14 @@ quit_main_loop(crm_exit_t ec) - } - - static void --controller_event_cb(pcmk_ipc_api_t *controld_api, -- enum pcmk_ipc_event event_type, crm_exit_t status, -- void *event_data, void *user_data) -+event_done(pcmk_ipc_api_t *api) -+{ -+ pcmk_disconnect_ipc(api); -+ quit_main_loop(exit_code); -+} -+ -+static pcmk_controld_api_reply_t * -+controld_event_reply(pcmk_ipc_api_t *controld_api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data) - { - pcmk_controld_api_reply_t *reply = event_data; - -@@ -377,14 +388,14 @@ controller_event_cb(pcmk_ipc_api_t *controld_api, - if (exit_code == CRM_EX_DISCONNECT) { // Unexpected - out->err(out, "error: Lost connection to controller"); - } -- goto done; -- break; -+ event_done(controld_api); -+ return NULL; - - case pcmk_ipc_event_reply: - break; - - default: -- return; -+ return NULL; - } - - if (message_timer_id != 0) { -@@ -396,39 +407,54 @@ controller_event_cb(pcmk_ipc_api_t *controld_api, - out->err(out, "error: Bad reply from controller: %s", - crm_exit_str(status)); - exit_code = status; -- goto done; -+ event_done(controld_api); -+ return NULL; - } - - if (reply->reply_type != pcmk_controld_reply_ping) { - out->err(out, "error: Unknown reply type %d from controller", - reply->reply_type); -- goto done; -+ event_done(controld_api); -+ return NULL; - } - -- // Parse desired information from reply -- switch (command) { -- case cmd_health: -- out->message(out, "health", -- reply->data.ping.sys_from, -- reply->host_from, -- reply->data.ping.fsa_state, -- reply->data.ping.result); -- exit_code = CRM_EX_OK; -- break; -+ return reply; -+} - -- case cmd_whois_dc: -- out->message(out, "dc", reply->host_from); -- exit_code = CRM_EX_OK; -- break; -+static void -+controller_status_event_cb(pcmk_ipc_api_t *controld_api, -+ enum pcmk_ipc_event event_type, crm_exit_t status, -+ void *event_data, void *user_data) -+{ -+ pcmk_controld_api_reply_t *reply = controld_event_reply(controld_api, -+ event_type, status, event_data); - -- default: // Not really possible here -- exit_code = CRM_EX_SOFTWARE; -- break; -+ if (reply != NULL) { -+ out->message(out, "health", -+ reply->data.ping.sys_from, -+ reply->host_from, -+ reply->data.ping.fsa_state, -+ reply->data.ping.result); -+ exit_code = CRM_EX_OK; - } - --done: -- pcmk_disconnect_ipc(controld_api); -- quit_main_loop(exit_code); -+ event_done(controld_api); -+} -+ -+static void -+designated_controller_event_cb(pcmk_ipc_api_t *controld_api, -+ enum pcmk_ipc_event event_type, crm_exit_t status, -+ void *event_data, void *user_data) -+{ -+ pcmk_controld_api_reply_t *reply = controld_event_reply(controld_api, -+ event_type, status, event_data); -+ -+ if (reply != NULL) { -+ out->message(out, "dc", reply->host_from); -+ exit_code = CRM_EX_OK; -+ } -+ -+ event_done(controld_api); - } - - static void -@@ -438,13 +464,16 @@ pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, - { - pcmk_pacemakerd_api_reply_t *reply = event_data; - -+ crm_time_t *crm_when = crm_time_new(NULL); -+ char *pinged_buf = NULL; -+ - switch (event_type) { - case pcmk_ipc_event_disconnect: - if (exit_code == CRM_EX_DISCONNECT) { // Unexpected - out->err(out, "error: Lost connection to pacemakerd"); - } -- goto done; -- break; -+ event_done(pacemakerd_api); -+ return; - - case pcmk_ipc_event_reply: - break; -@@ -461,52 +490,119 @@ pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, - if (status != CRM_EX_OK) { - out->err(out, "error: Bad reply from pacemakerd: %s", - crm_exit_str(status)); -- exit_code = status; -- goto done; -+ event_done(pacemakerd_api); -+ return; - } - - if (reply->reply_type != pcmk_pacemakerd_reply_ping) { - out->err(out, "error: Unknown reply type %d from pacemakerd", - reply->reply_type); -- goto done; -+ event_done(pacemakerd_api); -+ return; - } - - // Parse desired information from reply -- switch (command) { -- case cmd_pacemakerd_health: -- { -- crm_time_t *crm_when = crm_time_new(NULL); -- char *pinged_buf = NULL; -- -- crm_time_set_timet(crm_when, &reply->data.ping.last_good); -- pinged_buf = crm_time_as_string(crm_when, -- crm_time_log_date | crm_time_log_timeofday | -- crm_time_log_with_timezone); -- -- out->message(out, "pacemakerd-health", -- reply->data.ping.sys_from, -- (reply->data.ping.status == pcmk_rc_ok)? -- pcmk_pacemakerd_api_daemon_state_enum2text( -- reply->data.ping.state):"query failed", -- (reply->data.ping.status == pcmk_rc_ok)?pinged_buf:""); -- exit_code = CRM_EX_OK; -- free(pinged_buf); -- } -- break; -+ crm_time_set_timet(crm_when, &reply->data.ping.last_good); -+ pinged_buf = crm_time_as_string(crm_when, -+ crm_time_log_date | crm_time_log_timeofday | -+ crm_time_log_with_timezone); -+ -+ out->message(out, "pacemakerd-health", -+ reply->data.ping.sys_from, -+ (reply->data.ping.status == pcmk_rc_ok)? -+ pcmk_pacemakerd_api_daemon_state_enum2text( -+ reply->data.ping.state):"query failed", -+ (reply->data.ping.status == pcmk_rc_ok)?pinged_buf:""); -+ exit_code = CRM_EX_OK; -+ free(pinged_buf); -+ -+ event_done(pacemakerd_api); -+} - -- default: // Not really possible here -- exit_code = CRM_EX_SOFTWARE; -- break; -+static pcmk_ipc_api_t * -+ipc_connect(enum pcmk_ipc_server server, pcmk_ipc_callback_t cb) -+{ -+ int rc; -+ pcmk_ipc_api_t *api = NULL; -+ -+ rc = pcmk_new_ipc_api(&api, server); -+ if (api == NULL) { -+ out->err(out, "error: Could not connect to %s: %s", -+ (server == pcmk_ipc_controld) ? "controller" : "pacemakerd", -+ pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); -+ return NULL; -+ } -+ pcmk_register_ipc_callback(api, cb, NULL); -+ rc = pcmk_connect_ipc(api, pcmk_ipc_dispatch_main); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Could not connect to %s: %s", -+ (server == pcmk_ipc_controld) ? "controller" : "pacemakerd", -+ pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); -+ return NULL; - } - --done: -- pcmk_disconnect_ipc(pacemakerd_api); -- quit_main_loop(exit_code); -+ return api; -+} -+ -+static void -+pcmk__controller_status() -+{ -+ pcmk_ipc_api_t *controld_api = ipc_connect(pcmk_ipc_controld, controller_status_event_cb); -+ -+ if (controld_api != NULL) { -+ int rc = pcmk_controld_api_ping(controld_api, dest_node); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); -+ } -+ -+ start_main_loop(); -+ -+ pcmk_free_ipc_api(controld_api); -+ } -+} -+ -+static void -+pcmk__designated_controller() -+{ -+ pcmk_ipc_api_t *controld_api = ipc_connect(pcmk_ipc_controld, designated_controller_event_cb); -+ -+ if (controld_api != NULL) { -+ int rc = pcmk_controld_api_ping(controld_api, dest_node); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); -+ } -+ -+ start_main_loop(); -+ -+ pcmk_free_ipc_api(controld_api); -+ } -+} -+ -+static void -+pcmk__pacemakerd_status() -+{ -+ pcmk_ipc_api_t *pacemakerd_api = ipc_connect(pcmk_ipc_pacemakerd, pacemakerd_event_cb); -+ -+ if (pacemakerd_api != NULL) { -+ int rc = pcmk_pacemakerd_api_ping(pacemakerd_api, ipc_name); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); -+ } -+ -+ start_main_loop(); -+ -+ pcmk_free_ipc_api(pacemakerd_api); -+ } - } - - // \return Standard Pacemaker return code - static int --list_nodes() -+pcmk__list_nodes() - { - cib_t *the_cib = cib_new(); - xmlNode *output = NULL; -@@ -565,7 +661,6 @@ main(int argc, char **argv) - int argerr = 0; - int rc; - pcmk_ipc_api_t *controld_api = NULL; -- pcmk_ipc_api_t *pacemakerd_api = NULL; - - pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); - -@@ -643,51 +738,45 @@ main(int argc, char **argv) - goto done; - } - -- // Connect to the controller if needed -- if (need_controld_api) { -- rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); -- if (controld_api == NULL) { -- out->err(out, "error: Could not connect to controller: %s", -- pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- goto done; -- } -- pcmk_register_ipc_callback(controld_api, controller_event_cb, NULL); -- rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Could not connect to controller: %s", -- pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -+ switch (command) { -+ case cmd_health: -+ pcmk__controller_status(); - goto done; -- } -- } -- -- // Connect to pacemakerd if needed -- if (need_pacemakerd_api) { -- rc = pcmk_new_ipc_api(&pacemakerd_api, pcmk_ipc_pacemakerd); -- if (pacemakerd_api == NULL) { -- out->err(out, "error: Could not connect to pacemakerd: %s", -- pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -+ case cmd_pacemakerd_health: -+ pcmk__pacemakerd_status(); - goto done; -- } -- pcmk_register_ipc_callback(pacemakerd_api, pacemakerd_event_cb, NULL); -- rc = pcmk_connect_ipc(pacemakerd_api, pcmk_ipc_dispatch_main); -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Could not connect to pacemakerd: %s", -- pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -+ case cmd_list_nodes: -+ rc = pcmk__list_nodes(); -+ // might need movink -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); -+ } -+ break; -+ case cmd_whois_dc: -+ pcmk__designated_controller(); - goto done; -- } -+ default: -+ rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); -+ if (controld_api == NULL) { -+ out->err(out, "error: Could not connect to controller: %s", -+ pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); -+ goto done; -+ } -+ rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Could not connect to controller: %s", -+ pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); -+ goto done; -+ } -+ break; - } - -- if (do_work(controld_api?controld_api:pacemakerd_api)) { -+ if (do_work(controld_api?controld_api:NULL)) { - // A reply is needed from controller, so run main loop to get it -- exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects -- mainloop = g_main_loop_new(NULL, FALSE); -- message_timer_id = g_timeout_add(message_timeout_ms, -- admin_message_timeout, NULL); -- g_main_loop_run(mainloop); -+ start_main_loop(); - } - - done: -@@ -698,12 +787,6 @@ done: - pcmk_free_ipc_api(capi); - } - -- if (pacemakerd_api != NULL) { -- pcmk_ipc_api_t *capi = pacemakerd_api; -- pacemakerd_api = NULL; // Ensure we can't free this twice -- pcmk_free_ipc_api(capi); -- } -- - if (mainloop != NULL) { - g_main_loop_unref(mainloop); - mainloop = NULL; -@@ -731,26 +814,14 @@ do_work(pcmk_ipc_api_t *api) - rc = pcmk_controld_api_shutdown(api, dest_node); - break; - -- case cmd_health: // dest_node != NULL -- case cmd_whois_dc: // dest_node == NULL -- rc = pcmk_controld_api_ping(api, dest_node); -- need_reply = true; -- break; -- - case cmd_elect_dc: - rc = pcmk_controld_api_start_election(api); - break; - -- case cmd_list_nodes: -- rc = list_nodes(); -- break; -- -- case cmd_pacemakerd_health: -- rc = pcmk_pacemakerd_api_ping(api, ipc_name); -- need_reply = true; -+ case cmd_none: // not actually possible here - break; - -- case cmd_none: // not actually possible here -+ default: - break; - } - if (rc != pcmk_rc_ok) { --- -1.8.3.1 - - -From ac241aec979938175103624f07c8e90c6b550c48 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Fri, 30 Oct 2020 13:09:48 +0100 -Subject: [PATCH 2/2] Refactor: crmadmin: move guts to libpacemaker - ---- - include/pacemaker-internal.h | 1 + - include/pcmki/Makefile.am | 1 + - include/pcmki/pcmki_cluster_queries.h | 15 + - lib/pacemaker/Makefile.am | 1 + - lib/pacemaker/pcmk_cluster_queries.c | 408 +++++++++++++++++++++++ - lib/pacemaker/pcmk_output.c | 177 ++++++++++ - tools/Makefile.am | 3 +- - tools/crmadmin.c | 612 ++-------------------------------- - 8 files changed, 642 insertions(+), 576 deletions(-) - create mode 100644 include/pcmki/pcmki_cluster_queries.h - create mode 100644 lib/pacemaker/pcmk_cluster_queries.c - -diff --git a/include/pacemaker-internal.h b/include/pacemaker-internal.h -index 37399e7..2e75d09 100644 ---- a/include/pacemaker-internal.h -+++ b/include/pacemaker-internal.h -@@ -11,6 +11,7 @@ - # define PACEMAKER_INTERNAL__H - - # include -+# include - # include - # include - # include -diff --git a/include/pcmki/Makefile.am b/include/pcmki/Makefile.am -index 647f2dc..7aa64c7 100644 ---- a/include/pcmki/Makefile.am -+++ b/include/pcmki/Makefile.am -@@ -10,6 +10,7 @@ - MAINTAINERCLEANFILES = Makefile.in - - noinst_HEADERS = pcmki_error.h \ -+ pcmki_cluster_queries.h \ - pcmki_fence.h \ - pcmki_output.h \ - pcmki_sched_allocate.h \ -diff --git a/include/pcmki/pcmki_cluster_queries.h b/include/pcmki/pcmki_cluster_queries.h -new file mode 100644 -index 0000000..eb3b51c ---- /dev/null -+++ b/include/pcmki/pcmki_cluster_queries.h -@@ -0,0 +1,15 @@ -+#include // gboolean, GMainLoop, etc. -+ -+#include -+#include -+#include -+#include -+ -+int pcmk__controller_status(pcmk__output_t *out, char *dest_node, guint message_timeout_ms); -+int pcmk__designated_controller(pcmk__output_t *out, guint message_timeout_ms); -+int pcmk__pacemakerd_status(pcmk__output_t *out, char *ipc_name, guint message_timeout_ms); -+int pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT); -+ -+// remove when parameters removed from tools/crmadmin.c -+int pcmk__shutdown_controller(pcmk__output_t *out, char *dest_node); -+int pcmk__start_election(pcmk__output_t *out); -diff --git a/lib/pacemaker/Makefile.am b/lib/pacemaker/Makefile.am -index 51a811a..4129ade 100644 ---- a/lib/pacemaker/Makefile.am -+++ b/lib/pacemaker/Makefile.am -@@ -28,6 +28,7 @@ libpacemaker_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la \ - # -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version - # Use += rather than backlashed continuation lines for parsing by bumplibs.sh - libpacemaker_la_SOURCES = -+libpacemaker_la_SOURCES += pcmk_cluster_queries.c - libpacemaker_la_SOURCES += pcmk_fence.c - libpacemaker_la_SOURCES += pcmk_output.c - libpacemaker_la_SOURCES += pcmk_sched_allocate.c -diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c -new file mode 100644 -index 0000000..8d729eb ---- /dev/null -+++ b/lib/pacemaker/pcmk_cluster_queries.c -@@ -0,0 +1,408 @@ -+#include // gboolean, GMainLoop, etc. -+#include // xmlNode -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DEFAULT_MESSAGE_TIMEOUT_MS 30000 -+ -+ -+typedef struct { -+ pcmk__output_t *out; -+ GMainLoop *mainloop; -+ int rc; -+ guint message_timer_id; -+ guint message_timeout_ms; -+} data_t; -+ -+static void -+quit_main_loop(data_t *data) -+{ -+ if (data->mainloop != NULL) { -+ GMainLoop *mloop = data->mainloop; -+ -+ data->mainloop = NULL; // Don't re-enter this block -+ pcmk_quit_main_loop(mloop, 10); -+ g_main_loop_unref(mloop); -+ } -+} -+ -+static gboolean -+admin_message_timeout(gpointer user_data) -+{ -+ data_t *data = user_data; -+ pcmk__output_t *out = data->out; -+ -+ out->err(out, "error: No reply received from controller before timeout (%dms)", -+ data->message_timeout_ms); -+ data->message_timer_id = 0; -+ data->rc = ETIMEDOUT; -+ quit_main_loop(data); -+ return FALSE; // Tells glib to remove source -+} -+ -+static void -+start_main_loop(data_t *data) -+{ -+ if (data->message_timeout_ms < 1) { -+ data->message_timeout_ms = DEFAULT_MESSAGE_TIMEOUT_MS; -+ } -+ -+ data->rc = ECONNRESET; // For unexpected disconnects -+ data->mainloop = g_main_loop_new(NULL, FALSE); -+ data->message_timer_id = g_timeout_add(data->message_timeout_ms, -+ admin_message_timeout, -+ data); -+ g_main_loop_run(data->mainloop); -+} -+ -+static void -+event_done(data_t *data, pcmk_ipc_api_t *api) -+{ -+ pcmk_disconnect_ipc(api); -+ quit_main_loop(data); -+} -+ -+static pcmk_controld_api_reply_t * -+controld_event_reply(data_t *data, pcmk_ipc_api_t *controld_api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data) -+{ -+ pcmk__output_t *out = data->out; -+ pcmk_controld_api_reply_t *reply = event_data; -+ -+ switch (event_type) { -+ case pcmk_ipc_event_disconnect: -+ if (data->rc == ECONNRESET) { // Unexpected -+ out->err(out, "error: Lost connection to controller"); -+ } -+ event_done(data, controld_api); -+ return NULL; -+ -+ case pcmk_ipc_event_reply: -+ break; -+ -+ default: -+ return NULL; -+ } -+ -+ if (data->message_timer_id != 0) { -+ g_source_remove(data->message_timer_id); -+ data->message_timer_id = 0; -+ } -+ -+ if (status != CRM_EX_OK) { -+ out->err(out, "error: Bad reply from controller: %s", -+ crm_exit_str(status)); -+ data->rc = EBADMSG; -+ event_done(data, controld_api); -+ return NULL; -+ } -+ -+ if (reply->reply_type != pcmk_controld_reply_ping) { -+ out->err(out, "error: Unknown reply type %d from controller", -+ reply->reply_type); -+ data->rc = EBADMSG; -+ event_done(data, controld_api); -+ return NULL; -+ } -+ -+ return reply; -+} -+ -+static void -+controller_status_event_cb(pcmk_ipc_api_t *controld_api, -+ enum pcmk_ipc_event event_type, crm_exit_t status, -+ void *event_data, void *user_data) -+{ -+ data_t *data = user_data; -+ pcmk__output_t *out = data->out; -+ pcmk_controld_api_reply_t *reply = controld_event_reply(data, controld_api, -+ event_type, status, event_data); -+ -+ if (reply != NULL) { -+ out->message(out, "health", -+ reply->data.ping.sys_from, -+ reply->host_from, -+ reply->data.ping.fsa_state, -+ reply->data.ping.result); -+ data->rc = pcmk_rc_ok; -+ } -+ -+ event_done(data, controld_api); -+} -+ -+static void -+designated_controller_event_cb(pcmk_ipc_api_t *controld_api, -+ enum pcmk_ipc_event event_type, crm_exit_t status, -+ void *event_data, void *user_data) -+{ -+ data_t *data = user_data; -+ pcmk__output_t *out = data->out; -+ pcmk_controld_api_reply_t *reply = controld_event_reply(data, controld_api, -+ event_type, status, event_data); -+ -+ if (reply != NULL) { -+ out->message(out, "dc", reply->host_from); -+ data->rc = pcmk_rc_ok; -+ } -+ -+ event_done(data, controld_api); -+} -+ -+static void -+pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, -+ enum pcmk_ipc_event event_type, crm_exit_t status, -+ void *event_data, void *user_data) -+{ -+ data_t *data = user_data; -+ pcmk__output_t *out = data->out; -+ pcmk_pacemakerd_api_reply_t *reply = event_data; -+ -+ crm_time_t *crm_when; -+ char *pinged_buf = NULL; -+ -+ switch (event_type) { -+ case pcmk_ipc_event_disconnect: -+ if (data->rc == ECONNRESET) { // Unexpected -+ out->err(out, "error: Lost connection to pacemakerd"); -+ } -+ event_done(data, pacemakerd_api); -+ return; -+ -+ case pcmk_ipc_event_reply: -+ break; -+ -+ default: -+ return; -+ } -+ -+ if (data->message_timer_id != 0) { -+ g_source_remove(data->message_timer_id); -+ data->message_timer_id = 0; -+ } -+ -+ if (status != CRM_EX_OK) { -+ out->err(out, "error: Bad reply from pacemakerd: %s", -+ crm_exit_str(status)); -+ event_done(data, pacemakerd_api); -+ return; -+ } -+ -+ if (reply->reply_type != pcmk_pacemakerd_reply_ping) { -+ out->err(out, "error: Unknown reply type %d from pacemakerd", -+ reply->reply_type); -+ event_done(data, pacemakerd_api); -+ return; -+ } -+ -+ // Parse desired information from reply -+ crm_when = crm_time_new(NULL); -+ crm_time_set_timet(crm_when, &reply->data.ping.last_good); -+ pinged_buf = crm_time_as_string(crm_when, -+ crm_time_log_date | crm_time_log_timeofday | -+ crm_time_log_with_timezone); -+ -+ out->message(out, "pacemakerd-health", -+ reply->data.ping.sys_from, -+ (reply->data.ping.status == pcmk_rc_ok)? -+ pcmk_pacemakerd_api_daemon_state_enum2text( -+ reply->data.ping.state):"query failed", -+ (reply->data.ping.status == pcmk_rc_ok)?pinged_buf:""); -+ data->rc = pcmk_rc_ok; -+ crm_time_free(crm_when); -+ free(pinged_buf); -+ -+ event_done(data, pacemakerd_api); -+} -+ -+static pcmk_ipc_api_t * -+ipc_connect(data_t *data, enum pcmk_ipc_server server, pcmk_ipc_callback_t cb) -+{ -+ int rc; -+ pcmk__output_t *out = data->out; -+ pcmk_ipc_api_t *api = NULL; -+ -+ -+ rc = pcmk_new_ipc_api(&api, server); -+ if (api == NULL) { -+ out->err(out, "error: Could not connect to %s: %s", -+ pcmk_ipc_name(api, true), -+ pcmk_rc_str(rc)); -+ data->rc = rc; -+ return NULL; -+ } -+ if (cb != NULL) { -+ pcmk_register_ipc_callback(api, cb, data); -+ } -+ rc = pcmk_connect_ipc(api, pcmk_ipc_dispatch_main); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Could not connect to %s: %s", -+ pcmk_ipc_name(api, true), -+ pcmk_rc_str(rc)); -+ data->rc = rc; -+ return NULL; -+ } -+ -+ return api; -+} -+ -+int -+pcmk__controller_status(pcmk__output_t *out, char *dest_node, guint message_timeout_ms) -+{ -+ data_t data = { -+ .out = out, -+ .mainloop = NULL, -+ .rc = pcmk_rc_ok, -+ .message_timer_id = 0, -+ .message_timeout_ms = message_timeout_ms -+ }; -+ pcmk_ipc_api_t *controld_api = ipc_connect(&data, pcmk_ipc_controld, controller_status_event_cb); -+ -+ if (controld_api != NULL) { -+ int rc = pcmk_controld_api_ping(controld_api, dest_node); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ data.rc = rc; -+ } -+ -+ start_main_loop(&data); -+ -+ pcmk_free_ipc_api(controld_api); -+ } -+ -+ return data.rc; -+} -+ -+int -+pcmk__designated_controller(pcmk__output_t *out, guint message_timeout_ms) -+{ -+ data_t data = { -+ .out = out, -+ .mainloop = NULL, -+ .rc = pcmk_rc_ok, -+ .message_timer_id = 0, -+ .message_timeout_ms = message_timeout_ms -+ }; -+ pcmk_ipc_api_t *controld_api = ipc_connect(&data, pcmk_ipc_controld, designated_controller_event_cb); -+ -+ if (controld_api != NULL) { -+ int rc = pcmk_controld_api_ping(controld_api, NULL); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ data.rc = rc; -+ } -+ -+ start_main_loop(&data); -+ -+ pcmk_free_ipc_api(controld_api); -+ } -+ -+ return data.rc; -+} -+ -+int -+pcmk__pacemakerd_status(pcmk__output_t *out, char *ipc_name, guint message_timeout_ms) -+{ -+ data_t data = { -+ .out = out, -+ .mainloop = NULL, -+ .rc = pcmk_rc_ok, -+ .message_timer_id = 0, -+ .message_timeout_ms = message_timeout_ms -+ }; -+ pcmk_ipc_api_t *pacemakerd_api = ipc_connect(&data, pcmk_ipc_pacemakerd, pacemakerd_event_cb); -+ -+ if (pacemakerd_api != NULL) { -+ int rc = pcmk_pacemakerd_api_ping(pacemakerd_api, ipc_name); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ data.rc = rc; -+ } -+ -+ start_main_loop(&data); -+ -+ pcmk_free_ipc_api(pacemakerd_api); -+ } -+ -+ return data.rc; -+} -+ -+// \return Standard Pacemaker return code -+int -+pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT) -+{ -+ cib_t *the_cib = cib_new(); -+ xmlNode *output = NULL; -+ int rc; -+ -+ if (the_cib == NULL) { -+ return ENOMEM; -+ } -+ rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); -+ if (rc != pcmk_ok) { -+ return pcmk_legacy2rc(rc); -+ } -+ -+ rc = the_cib->cmds->query(the_cib, NULL, &output, -+ cib_scope_local | cib_sync_call); -+ if (rc == pcmk_ok) { -+ out->message(out, "crmadmin-node-list", output, BASH_EXPORT); -+ free_xml(output); -+ } -+ the_cib->cmds->signoff(the_cib); -+ return pcmk_legacy2rc(rc); -+} -+ -+// remove when parameters removed from tools/crmadmin.c -+int -+pcmk__shutdown_controller(pcmk__output_t *out, char *dest_node) -+{ -+ data_t data = { -+ .out = out, -+ .mainloop = NULL, -+ .rc = pcmk_rc_ok, -+ }; -+ pcmk_ipc_api_t *controld_api = ipc_connect(&data, pcmk_ipc_controld, NULL); -+ -+ if (controld_api != NULL) { -+ int rc = pcmk_controld_api_shutdown(controld_api, dest_node); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ data.rc = rc; -+ } -+ pcmk_free_ipc_api(controld_api); -+ } -+ -+ return data.rc; -+} -+ -+int -+pcmk__start_election(pcmk__output_t *out) -+{ -+ data_t data = { -+ .out = out, -+ .mainloop = NULL, -+ .rc = pcmk_rc_ok, -+ }; -+ pcmk_ipc_api_t *controld_api = ipc_connect(&data, pcmk_ipc_controld, NULL); -+ -+ if (controld_api != NULL) { -+ int rc = pcmk_controld_api_start_election(controld_api); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ data.rc = rc; -+ } -+ -+ pcmk_free_ipc_api(controld_api); -+ } -+ -+ return data.rc; -+} -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 306e561..fd577c6 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -357,6 +357,174 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - -+PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *") -+static int -+health_text(pcmk__output_t *out, va_list args) -+{ -+ char *sys_from = va_arg(args, char *); -+ char *host_from = va_arg(args, char *); -+ char *fsa_state = va_arg(args, char *); -+ char *result = va_arg(args, char *); -+ -+ if (!out->is_quiet(out)) { -+ out->info(out, "Status of %s@%s: %s (%s)", crm_str(sys_from), -+ crm_str(host_from), crm_str(fsa_state), crm_str(result)); -+ } else if (fsa_state != NULL) { -+ out->info(out, "%s", fsa_state); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *") -+static int -+health_xml(pcmk__output_t *out, va_list args) -+{ -+ char *sys_from = va_arg(args, char *); -+ char *host_from = va_arg(args, char *); -+ char *fsa_state = va_arg(args, char *); -+ char *result = va_arg(args, char *); -+ -+ xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -+ xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(host_from)); -+ xmlSetProp(node, (pcmkXmlStr) "state", (pcmkXmlStr) crm_str(fsa_state)); -+ xmlSetProp(node, (pcmkXmlStr) "result", (pcmkXmlStr) crm_str(result)); -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *") -+static int -+pacemakerd_health_text(pcmk__output_t *out, va_list args) -+{ -+ char *sys_from = va_arg(args, char *); -+ char *state = va_arg(args, char *); -+ char *last_updated = va_arg(args, char *); -+ -+ if (!out->is_quiet(out)) { -+ out->info(out, "Status of %s: '%s' %s %s", crm_str(sys_from), -+ crm_str(state), (!pcmk__str_empty(last_updated))? -+ "last updated":"", crm_str(last_updated)); -+ } else { -+ out->info(out, "%s", crm_str(state)); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *") -+static int -+pacemakerd_health_xml(pcmk__output_t *out, va_list args) -+{ -+ char *sys_from = va_arg(args, char *); -+ char *state = va_arg(args, char *); -+ char *last_updated = va_arg(args, char *); -+ -+ -+ xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -+ xmlSetProp(node, (pcmkXmlStr) "state", (pcmkXmlStr) crm_str(state)); -+ xmlSetProp(node, (pcmkXmlStr) "last_updated", (pcmkXmlStr) crm_str(last_updated)); -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("dc", "char *") -+static int -+dc_text(pcmk__output_t *out, va_list args) -+{ -+ char *dc = va_arg(args, char *); -+ -+ if (!out->is_quiet(out)) { -+ out->info(out, "Designated Controller is: %s", crm_str(dc)); -+ } else if (dc != NULL) { -+ out->info(out, "%s", dc); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("dc", "char *") -+static int -+dc_xml(pcmk__output_t *out, va_list args) -+{ -+ char *dc = va_arg(args, char *); -+ -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "dc"); -+ xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(dc)); -+ -+ return pcmk_rc_ok; -+} -+ -+ -+PCMK__OUTPUT_ARGS("crmadmin-node-list", "pcmk__output_t *", "xmlNode *") -+static int -+crmadmin_node_list(pcmk__output_t *out, va_list args) -+{ -+ xmlNode *xml_node = va_arg(args, xmlNode *); -+ int found = 0; -+ xmlNode *node = NULL; -+ xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -+ gboolean BASH_EXPORT = va_arg(args, gboolean); -+ -+ out->begin_list(out, NULL, NULL, "nodes"); -+ -+ for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -+ node = crm_next_same_xml(node)) { -+ const char *node_type = BASH_EXPORT ? NULL : -+ crm_element_value(node, XML_ATTR_TYPE); -+ out->message(out, "crmadmin-node", node_type, -+ crm_str(crm_element_value(node, XML_ATTR_UNAME)), -+ crm_str(crm_element_value(node, XML_ATTR_ID)), -+ BASH_EXPORT); -+ -+ found++; -+ } -+ // @TODO List Pacemaker Remote nodes that don't have a entry -+ -+ out->end_list(out); -+ -+ if (found == 0) { -+ out->info(out, "No nodes configured"); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *", "gboolean") -+static int -+crmadmin_node_text(pcmk__output_t *out, va_list args) -+{ -+ char *type = va_arg(args, char *); -+ char *name = va_arg(args, char *); -+ char *id = va_arg(args, char *); -+ gboolean BASH_EXPORT = va_arg(args, gboolean); -+ -+ if (BASH_EXPORT) { -+ out->info(out, "export %s=%s", crm_str(name), crm_str(id)); -+ } else { -+ out->info(out, "%s node: %s (%s)", type ? type : "member", -+ crm_str(name), crm_str(id)); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *", "gboolean") -+static int -+crmadmin_node_xml(pcmk__output_t *out, va_list args) -+{ -+ char *type = va_arg(args, char *); -+ char *name = va_arg(args, char *); -+ char *id = va_arg(args, char *); -+ -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "node"); -+ xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) (type ? type : "member")); -+ xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) crm_str(name)); -+ xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) crm_str(id)); -+ -+ return pcmk_rc_ok; -+} -+ - static pcmk__message_entry_t fmt_functions[] = { - { "colocations-list", "default", colocations_list }, - { "colocations-list", "xml", colocations_list_xml }, -@@ -364,6 +532,15 @@ static pcmk__message_entry_t fmt_functions[] = { - { "locations-list", "xml", locations_list_xml }, - { "stacks-constraints", "default", stacks_and_constraints }, - { "stacks-constraints", "xml", stacks_and_constraints_xml }, -+ { "health", "default", health_text }, -+ { "health", "xml", health_xml }, -+ { "pacemakerd-health", "default", pacemakerd_health_text }, -+ { "pacemakerd-health", "xml", pacemakerd_health_xml }, -+ { "dc", "default", dc_text }, -+ { "dc", "xml", dc_xml }, -+ { "crmadmin-node-list", "default", crmadmin_node_list }, -+ { "crmadmin-node", "default", crmadmin_node_text }, -+ { "crmadmin-node", "xml", crmadmin_node_xml }, - - { NULL, NULL, NULL } - }; -diff --git a/tools/Makefile.am b/tools/Makefile.am -index a278fa3..de64c93 100644 ---- a/tools/Makefile.am -+++ b/tools/Makefile.am -@@ -69,7 +69,8 @@ MAN8DEPS = crm_attribute - crmadmin_SOURCES = crmadmin.c - crmadmin_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/cib/libcib.la \ -- $(top_builddir)/lib/common/libcrmcommon.la -+ $(top_builddir)/lib/common/libcrmcommon.la \ -+ $(top_builddir)/lib/pacemaker/libpacemaker.la - - crm_error_SOURCES = crm_error.c - crm_error_LDADD = $(top_builddir)/lib/common/libcrmcommon.la -diff --git a/tools/crmadmin.c b/tools/crmadmin.c -index ec902df..2d9d663 100644 ---- a/tools/crmadmin.c -+++ b/tools/crmadmin.c -@@ -16,32 +16,13 @@ - #include // gboolean, GMainLoop, etc. - #include // xmlNode - --#include --#include --#include -+#include -+ - #include - #include --#include --#include --#include --#include --#include - - #define SUMMARY "query and manage the Pacemaker controller" - --#define DEFAULT_MESSAGE_TIMEOUT_MS 30000 -- --static guint message_timer_id = 0; --static guint message_timeout_ms = DEFAULT_MESSAGE_TIMEOUT_MS; --static GMainLoop *mainloop = NULL; -- --bool need_controld_api = true; -- --bool do_work(pcmk_ipc_api_t *api); --static char *ipc_name = NULL; -- --gboolean admin_message_timeout(gpointer data); -- - static enum { - cmd_none, - cmd_shutdown, -@@ -52,17 +33,17 @@ static enum { - cmd_pacemakerd_health, - } command = cmd_none; - --static gboolean BE_VERBOSE = FALSE; --static gboolean BASH_EXPORT = FALSE; --static char *dest_node = NULL; --static crm_exit_t exit_code = CRM_EX_OK; --pcmk__output_t *out = NULL; -- -- - struct { - gboolean health; - gint timeout; --} options; -+ char *dest_node; -+ char *ipc_name; -+ gboolean BASH_EXPORT; -+} options = { -+ .dest_node = NULL, -+ .ipc_name = NULL, -+ .BASH_EXPORT = FALSE -+}; - - gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); - -@@ -112,11 +93,11 @@ static GOptionEntry additional_options[] = { - "\n operation failed", - NULL - }, -- { "bash-export", 'B', 0, G_OPTION_ARG_NONE, &BASH_EXPORT, -+ { "bash-export", 'B', 0, G_OPTION_ARG_NONE, &options.BASH_EXPORT, - "Display nodes as shell commands of the form 'export uname=uuid'" - "\n (valid with -N/--nodes)", - }, -- { "ipc-name", 'i', 0, G_OPTION_ARG_STRING, &ipc_name, -+ { "ipc-name", 'i', 0, G_OPTION_ARG_STRING, &options.ipc_name, - "Name to use for ipc instead of 'crmadmin' (with -P/--pacemakerd).", - NULL - }, -@@ -154,478 +135,21 @@ command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError - } - - if (optarg) { -- if (dest_node != NULL) { -- free(dest_node); -+ if (options.dest_node != NULL) { -+ free(options.dest_node); - } -- dest_node = strdup(optarg); -+ options.dest_node = strdup(optarg); - } - - return TRUE; - } - --PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") --static int --health_text(pcmk__output_t *out, va_list args) --{ -- const char *sys_from = va_arg(args, const char *); -- const char *host_from = va_arg(args, const char *); -- const char *fsa_state = va_arg(args, const char *); -- const char *result = va_arg(args, const char *); -- -- if (!out->is_quiet(out)) { -- out->info(out, "Status of %s@%s: %s (%s)", crm_str(sys_from), -- crm_str(host_from), crm_str(fsa_state), crm_str(result)); -- } else if (fsa_state != NULL) { -- out->info(out, "%s", fsa_state); -- } -- -- return pcmk_rc_ok; --} -- --PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") --static int --health_xml(pcmk__output_t *out, va_list args) --{ -- const char *sys_from = va_arg(args, const char *); -- const char *host_from = va_arg(args, const char *); -- const char *fsa_state = va_arg(args, const char *); -- const char *result = va_arg(args, const char *); -- -- xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -- xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(host_from)); -- xmlSetProp(node, (pcmkXmlStr) "state", (pcmkXmlStr) crm_str(fsa_state)); -- xmlSetProp(node, (pcmkXmlStr) "result", (pcmkXmlStr) crm_str(result)); -- -- return pcmk_rc_ok; --} -- --PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *") --static int --pacemakerd_health_text(pcmk__output_t *out, va_list args) --{ -- const char *sys_from = va_arg(args, const char *); -- const char *state = va_arg(args, const char *); -- const char *last_updated = va_arg(args, const char *); -- -- if (!out->is_quiet(out)) { -- out->info(out, "Status of %s: '%s' %s %s", crm_str(sys_from), -- crm_str(state), (!pcmk__str_empty(last_updated))? -- "last updated":"", crm_str(last_updated)); -- } else { -- out->info(out, "%s", crm_str(state)); -- } -- -- return pcmk_rc_ok; --} -- --PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *") --static int --pacemakerd_health_xml(pcmk__output_t *out, va_list args) --{ -- const char *sys_from = va_arg(args, const char *); -- const char *state = va_arg(args, const char *); -- const char *last_updated = va_arg(args, const char *); -- -- -- xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -- xmlSetProp(node, (pcmkXmlStr) "state", (pcmkXmlStr) crm_str(state)); -- xmlSetProp(node, (pcmkXmlStr) "last_updated", (pcmkXmlStr) crm_str(last_updated)); -- -- return pcmk_rc_ok; --} -- --PCMK__OUTPUT_ARGS("dc", "const char *") --static int --dc_text(pcmk__output_t *out, va_list args) --{ -- const char *dc = va_arg(args, const char *); -- -- if (!out->is_quiet(out)) { -- out->info(out, "Designated Controller is: %s", crm_str(dc)); -- } else if (dc != NULL) { -- out->info(out, "%s", dc); -- } -- -- return pcmk_rc_ok; --} -- --PCMK__OUTPUT_ARGS("dc", "const char *") --static int --dc_xml(pcmk__output_t *out, va_list args) --{ -- const char *dc = va_arg(args, const char *); -- -- xmlNodePtr node = pcmk__output_create_xml_node(out, "dc"); -- xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(dc)); -- -- return pcmk_rc_ok; --} -- -- --PCMK__OUTPUT_ARGS("crmadmin-node-list", "struct xmlNode *") --static int --crmadmin_node_list(pcmk__output_t *out, va_list args) --{ -- xmlNode *xml_node = va_arg(args, xmlNode *); -- int found = 0; -- xmlNode *node = NULL; -- xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -- -- out->begin_list(out, NULL, NULL, "nodes"); -- -- for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -- node = crm_next_same_xml(node)) { -- const char *node_type = BASH_EXPORT ? NULL : -- crm_element_value(node, XML_ATTR_TYPE); -- out->message(out, "crmadmin-node", node_type, -- crm_str(crm_element_value(node, XML_ATTR_UNAME)), -- crm_str(crm_element_value(node, XML_ATTR_ID))); -- -- found++; -- } -- // @TODO List Pacemaker Remote nodes that don't have a entry -- -- out->end_list(out); -- -- if (found == 0) { -- out->info(out, "No nodes configured"); -- } -- -- return pcmk_rc_ok; --} -- --PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *") --static int --crmadmin_node_text(pcmk__output_t *out, va_list args) --{ -- const char *type = va_arg(args, const char *); -- const char *name = va_arg(args, const char *); -- const char *id = va_arg(args, const char *); -- -- if (BASH_EXPORT) { -- out->info(out, "export %s=%s", crm_str(name), crm_str(id)); -- } else { -- out->info(out, "%s node: %s (%s)", type ? type : "member", -- crm_str(name), crm_str(id)); -- } -- -- return pcmk_rc_ok; --} -- --PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *") --static int --crmadmin_node_xml(pcmk__output_t *out, va_list args) --{ -- const char *type = va_arg(args, const char *); -- const char *name = va_arg(args, const char *); -- const char *id = va_arg(args, const char *); -- -- xmlNodePtr node = pcmk__output_create_xml_node(out, "node"); -- xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) (type ? type : "member")); -- xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) crm_str(name)); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) crm_str(id)); -- -- return pcmk_rc_ok; --} -- --static pcmk__message_entry_t fmt_functions[] = { -- {"health", "default", health_text }, -- {"health", "xml", health_xml }, -- {"pacemakerd-health", "default", pacemakerd_health_text }, -- {"pacemakerd-health", "xml", pacemakerd_health_xml }, -- {"dc", "default", dc_text }, -- {"dc", "xml", dc_xml }, -- {"crmadmin-node-list", "default", crmadmin_node_list }, -- {"crmadmin-node", "default", crmadmin_node_text }, -- {"crmadmin-node", "xml", crmadmin_node_xml }, -- -- { NULL, NULL, NULL } --}; -- - static pcmk__supported_format_t formats[] = { - PCMK__SUPPORTED_FORMAT_TEXT, - PCMK__SUPPORTED_FORMAT_XML, - { NULL, NULL, NULL } - }; - --static void --start_main_loop() --{ -- exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects -- mainloop = g_main_loop_new(NULL, FALSE); -- message_timer_id = g_timeout_add(message_timeout_ms, -- admin_message_timeout, NULL); -- g_main_loop_run(mainloop); --} -- --static void --quit_main_loop(crm_exit_t ec) --{ -- exit_code = ec; -- if (mainloop != NULL) { -- GMainLoop *mloop = mainloop; -- -- mainloop = NULL; // Don't re-enter this block -- pcmk_quit_main_loop(mloop, 10); -- g_main_loop_unref(mloop); -- } --} -- --static void --event_done(pcmk_ipc_api_t *api) --{ -- pcmk_disconnect_ipc(api); -- quit_main_loop(exit_code); --} -- --static pcmk_controld_api_reply_t * --controld_event_reply(pcmk_ipc_api_t *controld_api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data) --{ -- pcmk_controld_api_reply_t *reply = event_data; -- -- switch (event_type) { -- case pcmk_ipc_event_disconnect: -- if (exit_code == CRM_EX_DISCONNECT) { // Unexpected -- out->err(out, "error: Lost connection to controller"); -- } -- event_done(controld_api); -- return NULL; -- -- case pcmk_ipc_event_reply: -- break; -- -- default: -- return NULL; -- } -- -- if (message_timer_id != 0) { -- g_source_remove(message_timer_id); -- message_timer_id = 0; -- } -- -- if (status != CRM_EX_OK) { -- out->err(out, "error: Bad reply from controller: %s", -- crm_exit_str(status)); -- exit_code = status; -- event_done(controld_api); -- return NULL; -- } -- -- if (reply->reply_type != pcmk_controld_reply_ping) { -- out->err(out, "error: Unknown reply type %d from controller", -- reply->reply_type); -- event_done(controld_api); -- return NULL; -- } -- -- return reply; --} -- --static void --controller_status_event_cb(pcmk_ipc_api_t *controld_api, -- enum pcmk_ipc_event event_type, crm_exit_t status, -- void *event_data, void *user_data) --{ -- pcmk_controld_api_reply_t *reply = controld_event_reply(controld_api, -- event_type, status, event_data); -- -- if (reply != NULL) { -- out->message(out, "health", -- reply->data.ping.sys_from, -- reply->host_from, -- reply->data.ping.fsa_state, -- reply->data.ping.result); -- exit_code = CRM_EX_OK; -- } -- -- event_done(controld_api); --} -- --static void --designated_controller_event_cb(pcmk_ipc_api_t *controld_api, -- enum pcmk_ipc_event event_type, crm_exit_t status, -- void *event_data, void *user_data) --{ -- pcmk_controld_api_reply_t *reply = controld_event_reply(controld_api, -- event_type, status, event_data); -- -- if (reply != NULL) { -- out->message(out, "dc", reply->host_from); -- exit_code = CRM_EX_OK; -- } -- -- event_done(controld_api); --} -- --static void --pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, -- enum pcmk_ipc_event event_type, crm_exit_t status, -- void *event_data, void *user_data) --{ -- pcmk_pacemakerd_api_reply_t *reply = event_data; -- -- crm_time_t *crm_when = crm_time_new(NULL); -- char *pinged_buf = NULL; -- -- switch (event_type) { -- case pcmk_ipc_event_disconnect: -- if (exit_code == CRM_EX_DISCONNECT) { // Unexpected -- out->err(out, "error: Lost connection to pacemakerd"); -- } -- event_done(pacemakerd_api); -- return; -- -- case pcmk_ipc_event_reply: -- break; -- -- default: -- return; -- } -- -- if (message_timer_id != 0) { -- g_source_remove(message_timer_id); -- message_timer_id = 0; -- } -- -- if (status != CRM_EX_OK) { -- out->err(out, "error: Bad reply from pacemakerd: %s", -- crm_exit_str(status)); -- event_done(pacemakerd_api); -- return; -- } -- -- if (reply->reply_type != pcmk_pacemakerd_reply_ping) { -- out->err(out, "error: Unknown reply type %d from pacemakerd", -- reply->reply_type); -- event_done(pacemakerd_api); -- return; -- } -- -- // Parse desired information from reply -- crm_time_set_timet(crm_when, &reply->data.ping.last_good); -- pinged_buf = crm_time_as_string(crm_when, -- crm_time_log_date | crm_time_log_timeofday | -- crm_time_log_with_timezone); -- -- out->message(out, "pacemakerd-health", -- reply->data.ping.sys_from, -- (reply->data.ping.status == pcmk_rc_ok)? -- pcmk_pacemakerd_api_daemon_state_enum2text( -- reply->data.ping.state):"query failed", -- (reply->data.ping.status == pcmk_rc_ok)?pinged_buf:""); -- exit_code = CRM_EX_OK; -- free(pinged_buf); -- -- event_done(pacemakerd_api); --} -- --static pcmk_ipc_api_t * --ipc_connect(enum pcmk_ipc_server server, pcmk_ipc_callback_t cb) --{ -- int rc; -- pcmk_ipc_api_t *api = NULL; -- -- rc = pcmk_new_ipc_api(&api, server); -- if (api == NULL) { -- out->err(out, "error: Could not connect to %s: %s", -- (server == pcmk_ipc_controld) ? "controller" : "pacemakerd", -- pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- return NULL; -- } -- pcmk_register_ipc_callback(api, cb, NULL); -- rc = pcmk_connect_ipc(api, pcmk_ipc_dispatch_main); -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Could not connect to %s: %s", -- (server == pcmk_ipc_controld) ? "controller" : "pacemakerd", -- pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- return NULL; -- } -- -- return api; --} -- --static void --pcmk__controller_status() --{ -- pcmk_ipc_api_t *controld_api = ipc_connect(pcmk_ipc_controld, controller_status_event_cb); -- -- if (controld_api != NULL) { -- int rc = pcmk_controld_api_ping(controld_api, dest_node); -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- } -- -- start_main_loop(); -- -- pcmk_free_ipc_api(controld_api); -- } --} -- --static void --pcmk__designated_controller() --{ -- pcmk_ipc_api_t *controld_api = ipc_connect(pcmk_ipc_controld, designated_controller_event_cb); -- -- if (controld_api != NULL) { -- int rc = pcmk_controld_api_ping(controld_api, dest_node); -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- } -- -- start_main_loop(); -- -- pcmk_free_ipc_api(controld_api); -- } --} -- --static void --pcmk__pacemakerd_status() --{ -- pcmk_ipc_api_t *pacemakerd_api = ipc_connect(pcmk_ipc_pacemakerd, pacemakerd_event_cb); -- -- if (pacemakerd_api != NULL) { -- int rc = pcmk_pacemakerd_api_ping(pacemakerd_api, ipc_name); -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- } -- -- start_main_loop(); -- -- pcmk_free_ipc_api(pacemakerd_api); -- } --} -- --// \return Standard Pacemaker return code --static int --pcmk__list_nodes() --{ -- cib_t *the_cib = cib_new(); -- xmlNode *output = NULL; -- int rc; -- -- if (the_cib == NULL) { -- return ENOMEM; -- } -- rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); -- if (rc != pcmk_ok) { -- return pcmk_legacy2rc(rc); -- } -- -- rc = the_cib->cmds->query(the_cib, NULL, &output, -- cib_scope_local | cib_sync_call); -- if (rc == pcmk_ok) { -- out->message(out, "crmadmin-node-list", output); -- free_xml(output); -- } -- the_cib->cmds->signoff(the_cib); -- return pcmk_legacy2rc(rc); --} -- - static GOptionContext * - build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { - GOptionContext *context = NULL; -@@ -658,8 +182,10 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { - int - main(int argc, char **argv) - { -- int argerr = 0; -+ pcmk__output_t *out = NULL; -+ crm_exit_t exit_code = CRM_EX_OK; - int rc; -+ int argerr = 0; - pcmk_ipc_api_t *controld_api = NULL; - - pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); -@@ -683,7 +209,6 @@ main(int argc, char **argv) - } - - for (int i = 0; i < args->verbosity; i++) { -- BE_VERBOSE = TRUE; - crm_bump_log_level(argc, argv); - } - -@@ -697,7 +222,7 @@ main(int argc, char **argv) - - out->quiet = args->quiet; - -- pcmk__register_messages(out, fmt_functions); -+ pcmk__register_lib_messages(out); - - if (!pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname())) { - goto done; -@@ -708,13 +233,6 @@ main(int argc, char **argv) - goto done; - } - -- if (options.timeout) { -- message_timeout_ms = (guint) options.timeout; -- if (message_timeout_ms < 1) { -- message_timeout_ms = DEFAULT_MESSAGE_TIMEOUT_MS; -- } -- } -- - if (options.health) { - out->err(out, "Cluster-wide health option not supported"); - ++argerr; -@@ -740,43 +258,31 @@ main(int argc, char **argv) - - switch (command) { - case cmd_health: -- pcmk__controller_status(); -- goto done; -+ rc = pcmk__controller_status(out, options.dest_node, options.timeout); -+ break; - case cmd_pacemakerd_health: -- pcmk__pacemakerd_status(); -- goto done; -+ rc = pcmk__pacemakerd_status(out, options.ipc_name, options.timeout); -+ break; - case cmd_list_nodes: -- rc = pcmk__list_nodes(); -- // might need movink -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- } -+ rc = pcmk__list_nodes(out, options.BASH_EXPORT); - break; - case cmd_whois_dc: -- pcmk__designated_controller(); -- goto done; -- default: -- rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); -- if (controld_api == NULL) { -- out->err(out, "error: Could not connect to controller: %s", -- pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- goto done; -- } -- rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Could not connect to controller: %s", -- pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- goto done; -- } -+ rc = pcmk__designated_controller(out, options.timeout); -+ break; -+ case cmd_shutdown: -+ rc = pcmk__shutdown_controller(out, options.dest_node); -+ break; -+ case cmd_elect_dc: -+ rc = pcmk__start_election(out); -+ break; -+ case cmd_none: -+ rc = pcmk_rc_error; - break; - } - -- if (do_work(controld_api?controld_api:NULL)) { -- // A reply is needed from controller, so run main loop to get it -- start_main_loop(); -+ if (rc != pcmk_rc_ok) { -+ out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -+ exit_code = pcmk_rc2exitc(rc); - } - - done: -@@ -787,10 +293,6 @@ done: - pcmk_free_ipc_api(capi); - } - -- if (mainloop != NULL) { -- g_main_loop_unref(mainloop); -- mainloop = NULL; -- } - g_strfreev(processed_args); - g_clear_error(&error); - pcmk__free_arg_context(context); -@@ -801,43 +303,3 @@ done: - return crm_exit(exit_code); - - } -- --// \return True if reply from controller is needed --bool --do_work(pcmk_ipc_api_t *api) --{ -- bool need_reply = false; -- int rc = pcmk_rc_ok; -- -- switch (command) { -- case cmd_shutdown: -- rc = pcmk_controld_api_shutdown(api, dest_node); -- break; -- -- case cmd_elect_dc: -- rc = pcmk_controld_api_start_election(api); -- break; -- -- case cmd_none: // not actually possible here -- break; -- -- default: -- break; -- } -- if (rc != pcmk_rc_ok) { -- out->err(out, "error: Command failed: %s", pcmk_rc_str(rc)); -- exit_code = pcmk_rc2exitc(rc); -- } -- return need_reply; --} -- --gboolean --admin_message_timeout(gpointer data) --{ -- out->err(out, -- "error: No reply received from controller before timeout (%dms)", -- message_timeout_ms); -- message_timer_id = 0; -- quit_main_loop(CRM_EX_TIMEOUT); -- return FALSE; // Tells glib to remove source --} --- -1.8.3.1 - diff --git a/SOURCES/012-feature-set.patch b/SOURCES/012-feature-set.patch deleted file mode 100644 index a1315b0..0000000 --- a/SOURCES/012-feature-set.patch +++ /dev/null @@ -1,2355 +0,0 @@ -From a2b8e75abf42b28ba0a02de4e67da531556543ab Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 17 Nov 2020 13:53:57 -0500 -Subject: [PATCH 1/6] Refactor: libs: Add a function to set properties on an - xmlNode. - -This takes a va_list so it can be used by other future functions that -will take arguments directly. It then just walks the list of pairs and -sets them as properties on an xmlNode, until a NULL is found. - -Interally, it just uses crm_xml_add so it's useful across the whole -pacemaker code base. ---- - include/crm/common/xml_internal.h | 3 +++ - lib/common/xml.c | 20 ++++++++++++++++++++ - 2 files changed, 23 insertions(+) - -diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h -index 5643be6..c60fa51 100644 ---- a/include/crm/common/xml_internal.h -+++ b/include/crm/common/xml_internal.h -@@ -235,4 +235,7 @@ pcmk__xe_next(const xmlNode *child) - return next; - } - -+void -+pcmk__xe_set_propv(xmlNodePtr node, va_list pairs); -+ - #endif // PCMK__XML_INTERNAL__H -diff --git a/lib/common/xml.c b/lib/common/xml.c -index bed6854..61cac9f 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -2935,6 +2935,26 @@ pcmk__xml_artefact_path(enum pcmk__xml_artefact_ns ns, const char *filespec) - return ret; - } - -+void -+pcmk__xe_set_propv(xmlNodePtr node, va_list pairs) -+{ -+ while (true) { -+ const char *name, *value; -+ -+ name = va_arg(pairs, const char *); -+ if (name == NULL) { -+ return; -+ } -+ -+ value = va_arg(pairs, const char *); -+ if (value == NULL) { -+ return; -+ } -+ -+ crm_xml_add(node, name, value); -+ } -+} -+ - // Deprecated functions kept only for backward API compatibility - - xmlNode *find_entity(xmlNode *parent, const char *node_name, const char *id); --- -1.8.3.1 - - -From b16b24ed049b1a646195e8d8b9bb309c2fa8fe49 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 17 Nov 2020 13:00:44 -0500 -Subject: [PATCH 2/6] Refactor: libs: Set XML properties when creating a node. - -pcmk__output_create_xml_node can take a NULL-terminated list of -properties that should be set on the node at the same time it's created. -This saves a bunch of calls to xmlSetProp and gets rid of some variables -that are otherwise not needed. - -Unfortunately, this can't be used everywhere I'd like but it's a good -start. ---- - include/crm/common/output_internal.h | 4 +- - lib/common/output_html.c | 2 +- - lib/common/output_xml.c | 32 ++++--- - lib/fencing/st_output.c | 34 ++++--- - lib/pacemaker/pcmk_output.c | 77 ++++++++-------- - lib/pengine/bundle.c | 4 +- - lib/pengine/pe_output.c | 174 +++++++++++++++++------------------ - tools/crm_resource_print.c | 6 +- - 8 files changed, 173 insertions(+), 160 deletions(-) - -diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h -index e1bd295..1923c1a 100644 ---- a/include/crm/common/output_internal.h -+++ b/include/crm/common/output_internal.h -@@ -648,9 +648,11 @@ pcmk__output_xml_add_node(pcmk__output_t *out, xmlNodePtr node); - * - * \param[in,out] out The output functions structure. - * \param[in] name The name of the node to be created. -+ * \param[in] ... Name/value pairs to set as XML properties. - */ - xmlNodePtr --pcmk__output_create_xml_node(pcmk__output_t *out, const char *name); -+pcmk__output_create_xml_node(pcmk__output_t *out, const char *name, ...) -+G_GNUC_NULL_TERMINATED; - - /*! - * \internal -diff --git a/lib/common/output_html.c b/lib/common/output_html.c -index e354b5d..77a5410 100644 ---- a/lib/common/output_html.c -+++ b/lib/common/output_html.c -@@ -370,7 +370,7 @@ html_is_quiet(pcmk__output_t *out) { - - static void - html_spacer(pcmk__output_t *out) { -- pcmk__output_create_xml_node(out, "br"); -+ pcmk__output_create_xml_node(out, "br", NULL); - } - - pcmk__output_t * -diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c -index 716d10f..80ee2de 100644 ---- a/lib/common/output_xml.c -+++ b/lib/common/output_xml.c -@@ -24,6 +24,7 @@ - - #include - #include -+#include - #include - - static gboolean legacy_xml = FALSE; -@@ -234,16 +235,16 @@ xml_subprocess_output(pcmk__output_t *out, int exit_status, - - static void - xml_version(pcmk__output_t *out, bool extended) { -- xmlNodePtr node; - private_data_t *priv = out->priv; - CRM_ASSERT(priv != NULL); - -- node = pcmk__output_create_xml_node(out, "version"); -- xmlSetProp(node, (pcmkXmlStr) "program", (pcmkXmlStr) "Pacemaker"); -- xmlSetProp(node, (pcmkXmlStr) "version", (pcmkXmlStr) PACEMAKER_VERSION); -- xmlSetProp(node, (pcmkXmlStr) "author", (pcmkXmlStr) "Andrew Beekhof"); -- xmlSetProp(node, (pcmkXmlStr) "build", (pcmkXmlStr) BUILD_VERSION); -- xmlSetProp(node, (pcmkXmlStr) "features", (pcmkXmlStr) CRM_FEATURES); -+ pcmk__output_create_xml_node(out, "version", -+ "program", "Pacemaker", -+ "version", PACEMAKER_VERSION, -+ "author", "Andrew Beekhof", -+ "build", BUILD_VERSION, -+ "features", CRM_FEATURES, -+ NULL); - } - - G_GNUC_PRINTF(2, 3) -@@ -277,7 +278,7 @@ xml_output_xml(pcmk__output_t *out, const char *name, const char *buf) { - - CRM_ASSERT(priv != NULL); - -- parent = pcmk__output_create_xml_node(out, name); -+ parent = pcmk__output_create_xml_node(out, name, NULL); - cdata_node = xmlNewCDataBlock(getDocPtr(parent), (pcmkXmlStr) buf, strlen(buf)); - xmlAddChild(parent, cdata_node); - } -@@ -419,7 +420,7 @@ pcmk__mk_xml_output(char **argv) { - - xmlNodePtr - pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, name); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, name, NULL); - pcmk__output_xml_push_parent(out, node); - return node; - } -@@ -435,17 +436,24 @@ pcmk__output_xml_add_node(pcmk__output_t *out, xmlNodePtr node) { - } - - xmlNodePtr --pcmk__output_create_xml_node(pcmk__output_t *out, const char *name) { -+pcmk__output_create_xml_node(pcmk__output_t *out, const char *name, ...) { -+ xmlNodePtr node = NULL; - private_data_t *priv = out->priv; -+ va_list args; - - CRM_ASSERT(priv != NULL); - -- return create_xml_node(g_queue_peek_tail(priv->parent_q), name); -+ node = create_xml_node(g_queue_peek_tail(priv->parent_q), name); -+ va_start(args, name); -+ pcmk__xe_set_propv(node, args); -+ va_end(args); -+ -+ return node; - } - - xmlNodePtr - pcmk__output_create_xml_text_node(pcmk__output_t *out, const char *name, const char *content) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, name); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, name, NULL); - xmlNodeSetContent(node, (pcmkXmlStr) content); - return node; - } -diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c -index c3b6b41..82520ee 100644 ---- a/lib/fencing/st_output.c -+++ b/lib/fencing/st_output.c -@@ -132,10 +132,11 @@ stonith__full_history_xml(pcmk__output_t *out, va_list args) { - - PCMK__OUTPUT_LIST_FOOTER(out, rc); - } else { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "fence_history"); - char *rc_s = crm_itoa(history_rc); - -- xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) rc_s); -+ pcmk__output_create_xml_node(out, "fence_history", -+ "status", rc_s, -+ NULL); - free(rc_s); - - rc = pcmk_rc_ok; -@@ -182,11 +183,12 @@ stonith__last_fenced_xml(pcmk__output_t *out, va_list args) { - time_t when = va_arg(args, time_t); - - if (when) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "last-fenced"); - char *buf = time_t_string(when); - -- xmlSetProp(node, (pcmkXmlStr) "target", (pcmkXmlStr) target); -- xmlSetProp(node, (pcmkXmlStr) "when", (pcmkXmlStr) buf); -+ pcmk__output_create_xml_node(out, "last-fenced", -+ "target", target, -+ "when", buf, -+ NULL); - - free(buf); - return pcmk_rc_ok; -@@ -313,13 +315,19 @@ stonith__event_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean") - int - stonith__event_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "fence_event"); - stonith_history_t *event = va_arg(args, stonith_history_t *); - gboolean full_history G_GNUC_UNUSED = va_arg(args, gboolean); - gboolean later_succeeded G_GNUC_UNUSED = va_arg(args, gboolean); - - char *buf = NULL; - -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "fence_event", -+ "action", event->action, -+ "target", event->target, -+ "client", event->client, -+ "origin", event->origin, -+ NULL); -+ - switch (event->state) { - case st_failed: - xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) "failed"); -@@ -342,11 +350,6 @@ stonith__event_xml(pcmk__output_t *out, va_list args) { - xmlSetProp(node, (pcmkXmlStr) "delegate", (pcmkXmlStr) event->delegate); - } - -- xmlSetProp(node, (pcmkXmlStr) "action", (pcmkXmlStr) event->action); -- xmlSetProp(node, (pcmkXmlStr) "target", (pcmkXmlStr) event->target); -- xmlSetProp(node, (pcmkXmlStr) "client", (pcmkXmlStr) event->client); -- xmlSetProp(node, (pcmkXmlStr) "origin", (pcmkXmlStr) event->origin); -- - if (event->state == st_failed || event->state == st_done) { - buf = time_t_string(event->completed); - xmlSetProp(node, (pcmkXmlStr) "completed", (pcmkXmlStr) buf); -@@ -412,19 +415,20 @@ stonith__validate_agent_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("validate", "const char *", "const char *", "char *", "char *", "int") - int - stonith__validate_agent_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "validate"); -- - const char *agent = va_arg(args, const char *); - const char *device = va_arg(args, const char *); - char *output = va_arg(args, char *); - char *error_output = va_arg(args, char *); - int rc = va_arg(args, int); - -- xmlSetProp(node, (pcmkXmlStr) "agent", (pcmkXmlStr) agent); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "validate", -+ "agent", agent, -+ "valid", pcmk__btoa(rc), -+ NULL); -+ - if (device != NULL) { - xmlSetProp(node, (pcmkXmlStr) "device", (pcmkXmlStr) device); - } -- xmlSetProp(node, (pcmkXmlStr) "valid", (pcmkXmlStr) pcmk__btoa(rc)); - - pcmk__output_xml_push_parent(out, node); - out->subprocess_output(out, rc, output, error_output); -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index fd577c6..08753fb 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -157,16 +157,15 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - - if (pcmk_is_set(peer->flags, pe_rsc_allocating)) { - if (dependents == FALSE) { -- xmlNodePtr node; -- - if (!printed_header) { - pcmk__output_xml_create_parent(out, "colocations"); - printed_header = true; - } - -- node = pcmk__output_create_xml_node(out, "colocation"); -- xmlSetProp(node, (pcmkXmlStr) "peer", (pcmkXmlStr) peer->id); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) cons->id); -+ pcmk__output_create_xml_node(out, "colocation", -+ "peer", peer->id, -+ "id", cons->id, -+ NULL); - } - continue; - } -@@ -187,18 +186,19 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - - score = score2char(cons->score); - if (cons->role_rh > RSC_ROLE_STARTED) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "colocation"); -- xmlSetProp(node, (pcmkXmlStr) "peer", (pcmkXmlStr) peer->id); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) cons->id); -- xmlSetProp(node, (pcmkXmlStr) "score", (pcmkXmlStr) score); -- xmlSetProp(node, (pcmkXmlStr) "dependents", -- (pcmkXmlStr) (dependents ? "needs" : "with")); -- xmlSetProp(node, (pcmkXmlStr) "role", (pcmkXmlStr) role2text(cons->role_rh)); -+ pcmk__output_create_xml_node(out, "colocation", -+ "peer", peer->id, -+ "id", cons->id, -+ "score", score, -+ "dependents", dependents ? "needs" : "with", -+ "role", role2text(cons->role_rh), -+ NULL); - } else { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "colocation"); -- xmlSetProp(node, (pcmkXmlStr) "peer", (pcmkXmlStr) peer->id); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) cons->id); -- xmlSetProp(node, (pcmkXmlStr) "score", (pcmkXmlStr) score); -+ pcmk__output_create_xml_node(out, "colocation", -+ "peer", peer->id, -+ "id", cons->id, -+ "score", score, -+ NULL); - } - - free(score); -@@ -263,11 +263,11 @@ static int locations_list_xml(pcmk__output_t *out, va_list args) { - pe_node_t *node = (pe_node_t *) lpc2->data; - char *score = score2char(node->weight); - -- xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "location"); -- xmlSetProp(xml_node, (pcmkXmlStr) "host", (pcmkXmlStr) node->details->uname); -- xmlSetProp(xml_node, (pcmkXmlStr) "id", (pcmkXmlStr) cons->id); -- xmlSetProp(xml_node, (pcmkXmlStr) "score", (pcmkXmlStr) score); -- -+ pcmk__output_create_xml_node(out, "location", -+ "host", node->details->uname, -+ "id", cons->id, -+ "score", score, -+ NULL); - free(score); - } - } -@@ -385,11 +385,11 @@ health_xml(pcmk__output_t *out, va_list args) - char *fsa_state = va_arg(args, char *); - char *result = va_arg(args, char *); - -- xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -- xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(host_from)); -- xmlSetProp(node, (pcmkXmlStr) "state", (pcmkXmlStr) crm_str(fsa_state)); -- xmlSetProp(node, (pcmkXmlStr) "result", (pcmkXmlStr) crm_str(result)); -- -+ pcmk__output_create_xml_node(out, crm_str(sys_from), -+ "node_name", crm_str(host_from), -+ "state", crm_str(fsa_state), -+ "result", crm_str(result), -+ NULL); - return pcmk_rc_ok; - } - -@@ -420,11 +420,10 @@ pacemakerd_health_xml(pcmk__output_t *out, va_list args) - char *state = va_arg(args, char *); - char *last_updated = va_arg(args, char *); - -- -- xmlNodePtr node = pcmk__output_create_xml_node(out, crm_str(sys_from)); -- xmlSetProp(node, (pcmkXmlStr) "state", (pcmkXmlStr) crm_str(state)); -- xmlSetProp(node, (pcmkXmlStr) "last_updated", (pcmkXmlStr) crm_str(last_updated)); -- -+ pcmk__output_create_xml_node(out, crm_str(sys_from), -+ "state", crm_str(state), -+ "last_updated", crm_str(last_updated), -+ NULL); - return pcmk_rc_ok; - } - -@@ -449,9 +448,9 @@ dc_xml(pcmk__output_t *out, va_list args) - { - char *dc = va_arg(args, char *); - -- xmlNodePtr node = pcmk__output_create_xml_node(out, "dc"); -- xmlSetProp(node, (pcmkXmlStr) "node_name", (pcmkXmlStr) crm_str(dc)); -- -+ pcmk__output_create_xml_node(out, "dc", -+ "node_name", crm_str(dc), -+ NULL); - return pcmk_rc_ok; - } - -@@ -517,11 +516,11 @@ crmadmin_node_xml(pcmk__output_t *out, va_list args) - char *name = va_arg(args, char *); - char *id = va_arg(args, char *); - -- xmlNodePtr node = pcmk__output_create_xml_node(out, "node"); -- xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) (type ? type : "member")); -- xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) crm_str(name)); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) crm_str(id)); -- -+ pcmk__output_create_xml_node(out, "node", -+ "type", type ? type : "member", -+ "name", crm_str(name), -+ "id", crm_str(id), -+ NULL); - return pcmk_rc_ok; - } - -diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c -index 4f6eac3..8a17aad 100644 ---- a/lib/pengine/bundle.c -+++ b/lib/pengine/bundle.c -@@ -1663,7 +1663,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args) - } - - if (rc == pcmk_rc_no_output) { -- pcmk__output_create_xml_node(out, "br"); -+ pcmk__output_create_xml_node(out, "br", NULL); - } - - PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s", -@@ -1678,7 +1678,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args) - snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset); - xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr) buffer); - } -- pcmk__output_create_xml_node(out, "br"); -+ pcmk__output_create_xml_node(out, "br", NULL); - out->begin_list(out, NULL, NULL, NULL); - - if (print_ip) { -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index b91348f..a6f5970 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -465,19 +465,19 @@ pe__ban_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean") - int - pe__ban_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "ban"); - pe_node_t *pe_node = va_arg(args, pe_node_t *); - pe__location_t *location = va_arg(args, pe__location_t *); - gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean); - - char *weight_s = crm_itoa(pe_node->weight); - -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) location->id); -- xmlSetProp(node, (pcmkXmlStr) "resource", (pcmkXmlStr) location->rsc_lh->id); -- xmlSetProp(node, (pcmkXmlStr) "node", (pcmkXmlStr) pe_node->details->uname); -- xmlSetProp(node, (pcmkXmlStr) "weight", (pcmkXmlStr) weight_s); -- xmlSetProp(node, (pcmkXmlStr) "master_only", -- (pcmkXmlStr) pcmk__btoa(location->role_filter == RSC_ROLE_MASTER)); -+ pcmk__output_create_xml_node(out, "ban", -+ "id", location->id, -+ "resource", location->rsc_lh->id, -+ "node", pe_node->details->uname, -+ "weight", weight_s, -+ "master_only", pcmk__btoa(location->role_filter == RSC_ROLE_MASTER), -+ NULL); - - free(weight_s); - return pcmk_rc_ok; -@@ -486,8 +486,8 @@ pe__ban_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") - int - pe__cluster_counts_html(pcmk__output_t *out, va_list args) { -- xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li"); -- xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li"); -+ xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL); -+ xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL); - - unsigned int nnodes = va_arg(args, unsigned int); - int nresources = va_arg(args, int); -@@ -582,8 +582,8 @@ pe__cluster_counts_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") - int - pe__cluster_counts_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured"); -- xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured"); -+ xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL); -+ xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL); - - unsigned int nnodes = va_arg(args, unsigned int); - int nresources = va_arg(args, int); -@@ -612,7 +612,7 @@ pe__cluster_counts_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *") - int - pe__cluster_dc_html(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "li"); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); - - pe_node_t *dc = va_arg(args, pe_node_t *); - const char *quorum = va_arg(args, const char *); -@@ -665,22 +665,23 @@ pe__cluster_dc_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *") - int - pe__cluster_dc_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "current_dc"); -- - pe_node_t *dc = va_arg(args, pe_node_t *); - const char *quorum = va_arg(args, const char *); - const char *dc_version_s = va_arg(args, const char *); - char *dc_name G_GNUC_UNUSED = va_arg(args, char *); - - if (dc) { -- xmlSetProp(node, (pcmkXmlStr) "present", (pcmkXmlStr) "true"); -- xmlSetProp(node, (pcmkXmlStr) "version", (pcmkXmlStr) (dc_version_s ? dc_version_s : "")); -- xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) dc->details->uname); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) dc->details->id); -- xmlSetProp(node, (pcmkXmlStr) "with_quorum", -- (pcmkXmlStr) pcmk__btoa(crm_is_true(quorum))); -+ pcmk__output_create_xml_node(out, "current_dc", -+ "present", "true", -+ "version", dc_version_s ? dc_version_s : "", -+ "name", dc->details->uname, -+ "id", dc->details->id, -+ "with_quorum", pcmk__btoa(crm_is_true(quorum)), -+ NULL); - } else { -- xmlSetProp(node, (pcmkXmlStr) "present", (pcmkXmlStr) "false"); -+ pcmk__output_create_xml_node(out, "current_dc", -+ "present", "false", -+ NULL); - } - - return pcmk_rc_ok; -@@ -741,14 +742,14 @@ pe__cluster_options_html(pcmk__output_t *out, va_list args) { - } - - if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "li"); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); - - pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: "); - pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED"); - pcmk_create_html_node(node, "span", NULL, NULL, - " (the cluster will not attempt to start, stop, or recover services)"); - } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "li"); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); - - pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: "); - pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED"); -@@ -817,48 +818,45 @@ pe__cluster_options_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *") - int - pe__cluster_options_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "cluster_options"); - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); -- -- xmlSetProp(node, (pcmkXmlStr) "stonith-enabled", -- (pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled))); -- xmlSetProp(node, (pcmkXmlStr) "symmetric-cluster", -- (pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster))); -+ const char *no_quorum_policy = NULL; - - switch (data_set->no_quorum_policy) { - case no_quorum_freeze: -- xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "freeze"); -+ no_quorum_policy = "freeze"; - break; - - case no_quorum_stop: -- xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "stop"); -+ no_quorum_policy = "stop"; - break; - - case no_quorum_demote: -- xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "demote"); -+ no_quorum_policy = "demote"; - break; - - case no_quorum_ignore: -- xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "ignore"); -+ no_quorum_policy = "ignore"; - break; - - case no_quorum_suicide: -- xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "suicide"); -+ no_quorum_policy = "suicide"; - break; - } - -- xmlSetProp(node, (pcmkXmlStr) "maintenance-mode", -- (pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode))); -- xmlSetProp(node, (pcmkXmlStr) "stop-all-resources", -- (pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything))); -- -+ pcmk__output_create_xml_node(out, "cluster_options", -+ "stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)), -+ "symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)), -+ "no-quorum-policy", no_quorum_policy, -+ "maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)), -+ "stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)), -+ NULL); - return pcmk_rc_ok; - } - - PCMK__OUTPUT_ARGS("cluster-stack", "const char *") - int - pe__cluster_stack_html(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "li"); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); - const char *stack_s = va_arg(args, const char *); - - pcmk_create_html_node(node, "span", NULL, "bold", "Stack: "); -@@ -878,10 +876,11 @@ pe__cluster_stack_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-stack", "const char *") - int - pe__cluster_stack_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "stack"); - const char *stack_s = va_arg(args, const char *); - -- xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) stack_s); -+ pcmk__output_create_xml_node(out, "stack", -+ "type", stack_s, -+ NULL); - - return pcmk_rc_ok; - } -@@ -889,8 +888,8 @@ pe__cluster_stack_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *") - int - pe__cluster_times_html(pcmk__output_t *out, va_list args) { -- xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li"); -- xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li"); -+ xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL); -+ xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL); - - const char *last_written = va_arg(args, const char *); - const char *user = va_arg(args, const char *); -@@ -913,20 +912,20 @@ pe__cluster_times_html(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *") - int - pe__cluster_times_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "last_update"); -- xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "last_change"); -- - const char *last_written = va_arg(args, const char *); - const char *user = va_arg(args, const char *); - const char *client = va_arg(args, const char *); - const char *origin = va_arg(args, const char *); - -- xmlSetProp(updated_node, (pcmkXmlStr) "time", -- (pcmkXmlStr) pcmk__epoch2str(NULL)); -- xmlSetProp(changed_node, (pcmkXmlStr) "time", (pcmkXmlStr) (last_written ? last_written : "")); -- xmlSetProp(changed_node, (pcmkXmlStr) "user", (pcmkXmlStr) (user ? user : "")); -- xmlSetProp(changed_node, (pcmkXmlStr) "client", (pcmkXmlStr) (client ? client : "")); -- xmlSetProp(changed_node, (pcmkXmlStr) "origin", (pcmkXmlStr) (origin ? origin : "")); -+ pcmk__output_create_xml_node(out, "last_update", -+ "time", pcmk__epoch2str(NULL), -+ NULL); -+ pcmk__output_create_xml_node(out, "last_change", -+ "time", last_written ? last_written : "", -+ "user", user ? user : "", -+ "client", client ? client : "", -+ "origin", origin ? origin : "", -+ NULL); - - return pcmk_rc_ok; - } -@@ -972,20 +971,15 @@ pe__failed_action_xml(pcmk__output_t *out, va_list args) { - - char *rc_s = crm_itoa(rc); - char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none"); -- xmlNodePtr node = pcmk__output_create_xml_node(out, "failure"); -- -- xmlSetProp(node, (pcmkXmlStr) (op_key ? "op_key" : "id"), -- (pcmkXmlStr) (op_key ? op_key : ID(xml_op))); -- xmlSetProp(node, (pcmkXmlStr) "node", -- (pcmkXmlStr) crm_element_value(xml_op, XML_ATTR_UNAME)); -- xmlSetProp(node, (pcmkXmlStr) "exitstatus", -- (pcmkXmlStr) services_ocf_exitcode_str(rc)); -- xmlSetProp(node, (pcmkXmlStr) "exitreason", (pcmkXmlStr) reason_s); -- xmlSetProp(node, (pcmkXmlStr) "exitcode", (pcmkXmlStr) rc_s); -- xmlSetProp(node, (pcmkXmlStr) "call", -- (pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID)); -- xmlSetProp(node, (pcmkXmlStr) "status", -- (pcmkXmlStr) services_lrm_status_str(status)); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "failure", -+ op_key ? "op_key" : "id", op_key ? op_key : ID(xml_op), -+ "node", crm_element_value(xml_op, XML_ATTR_UNAME), -+ "exitstatus", services_ocf_exitcode_str(rc), -+ "exitreason", reason_s, -+ "exitcode", rc_s, -+ "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID), -+ "status", services_lrm_status_str(status), -+ NULL); - - if (last) { - guint interval_ms = 0; -@@ -1037,7 +1031,7 @@ pe__node_html(pcmk__output_t *out, va_list args) { - char *buf = crm_strdup_printf("Node: %s", node_name); - - if (full) { -- xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li"); -+ xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL); - - pcmk_create_html_node(item_node, "span", NULL, NULL, buf); - -@@ -1265,7 +1259,7 @@ pe__node_attribute_html(pcmk__output_t *out, va_list args) { - if (add_extra) { - int v = crm_parse_int(value, "0"); - char *s = crm_strdup_printf("%s: %s", name, value); -- xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li"); -+ xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL); - - pcmk_create_html_node(item_node, "span", NULL, NULL, s); - free(s); -@@ -1351,7 +1345,13 @@ pe__node_and_op_xml(pcmk__output_t *out, va_list args) { - int status = crm_parse_int(status_s, "0"); - time_t last_change = 0; - -- xmlNode *node = pcmk__output_create_xml_node(out, "operation"); -+ xmlNode *node = pcmk__output_create_xml_node(out, "operation", -+ "op", op_key ? op_key : ID(xml_op), -+ "node", crm_element_value(xml_op, XML_ATTR_UNAME), -+ "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID), -+ "rc", crm_element_value(xml_op, XML_LRM_ATTR_RC), -+ "status", services_lrm_status_str(status), -+ NULL); - - rsc = pe_find_resource(data_set->resources, op_rsc); - -@@ -1382,8 +1382,6 @@ pe__node_and_op_xml(pcmk__output_t *out, va_list args) { - (pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_EXEC)); - } - -- xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) services_lrm_status_str(status)); -- - return pcmk_rc_ok; - } - -@@ -1395,9 +1393,10 @@ pe__node_attribute_xml(pcmk__output_t *out, va_list args) { - gboolean add_extra = va_arg(args, gboolean); - int expected_score = va_arg(args, int); - -- xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute"); -- xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) name); -- xmlSetProp(node, (pcmkXmlStr) "value", (pcmkXmlStr) value); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute", -+ "name", name, -+ "value", value, -+ NULL); - - if (add_extra) { - char *buf = crm_itoa(expected_score); -@@ -1630,13 +1629,14 @@ pe__op_history_xml(pcmk__output_t *out, va_list args) { - int rc = va_arg(args, int); - gboolean print_timing = va_arg(args, gboolean); - -- char *rc_s = NULL; -- -- xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history"); -- -- xmlSetProp(node, (pcmkXmlStr) "call", -- (pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID)); -- xmlSetProp(node, (pcmkXmlStr) "task", (pcmkXmlStr) task); -+ char *rc_s = crm_itoa(rc); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history", -+ "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID), -+ "task", task, -+ "rc", rc_s, -+ "rc_text", services_ocf_exitcode_str(rc), -+ NULL); -+ free(rc_s); - - if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) { - char *s = crm_strdup_printf("%sms", interval_ms_s); -@@ -1921,11 +1921,11 @@ pe__ticket_xml(pcmk__output_t *out, va_list args) { - - pe_ticket_t *ticket = va_arg(args, pe_ticket_t *); - -- node = pcmk__output_create_xml_node(out, "ticket"); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) ticket->id); -- xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) (ticket->granted ? "granted" : "revoked")); -- xmlSetProp(node, (pcmkXmlStr) "standby", -- (pcmkXmlStr) pcmk__btoa(ticket->standby)); -+ node = pcmk__output_create_xml_node(out, "ticket", -+ "id", ticket->id, -+ "status", ticket->granted ? "granted" : "revoked", -+ "standby", pcmk__btoa(ticket->standby), -+ NULL); - - if (ticket->last_granted > -1) { - xmlSetProp(node, (pcmkXmlStr) "last-granted", -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 6303863..ca8bee2 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -247,9 +247,9 @@ resource_check_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *parent = uber_parent(checks->rsc); - int rc = pcmk_rc_no_output; - -- xmlNode *node = pcmk__output_create_xml_node(out, "check"); -- -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) parent->id); -+ xmlNode *node = pcmk__output_create_xml_node(out, "check", -+ "id", parent->id, -+ NULL); - - if (pcmk_is_set(checks->flags, rsc_remain_stopped)) { - xmlSetProp(node, (pcmkXmlStr) "remain_stopped", (pcmkXmlStr) "true"); --- -1.8.3.1 - - -From 859b7f6daaddbbbfc102b968c4076dc03e03c100 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 18 Nov 2020 13:42:40 -0500 -Subject: [PATCH 3/6] Refactor: libs: Set multiple XML properties at once. - -This just wraps pcmk__xe_set_propv with another function that takes -arguments directly, and then uses that function throughout the libraries -wherever multiple properties are set at once. ---- - include/crm/common/xml_internal.h | 17 ++++++++++++++++ - lib/common/output_html.c | 6 ++++-- - lib/common/output_xml.c | 5 +++-- - lib/common/xml.c | 9 +++++++++ - lib/fencing/st_output.c | 6 ++++-- - lib/pengine/pe_output.c | 42 +++++++++++++++------------------------ - 6 files changed, 53 insertions(+), 32 deletions(-) - -diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h -index c60fa51..13157c6 100644 ---- a/include/crm/common/xml_internal.h -+++ b/include/crm/common/xml_internal.h -@@ -235,7 +235,24 @@ pcmk__xe_next(const xmlNode *child) - return next; - } - -+/*! -+ * \internal -+ * \brief Like pcmk__xe_set_props, but takes a va_list instead of -+ * arguments directly. -+ */ - void - pcmk__xe_set_propv(xmlNodePtr node, va_list pairs); - -+/*! -+ * \internal -+ * \brief Add a NULL-terminated list of name/value pairs to the given -+ * XML node as properties. -+ * -+ * \param[in,out] node XML node to add properties to -+ * \param[in] ... NULL-terminated list of name/value pairs -+ */ -+void -+pcmk__xe_set_props(xmlNodePtr node, ...) -+G_GNUC_NULL_TERMINATED; -+ - #endif // PCMK__XML_INTERNAL__H -diff --git a/lib/common/output_html.c b/lib/common/output_html.c -index 77a5410..5daf831 100644 ---- a/lib/common/output_html.c -+++ b/lib/common/output_html.c -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - - static const char *stylesheet_default = - ".bold { font-weight: bold }\n" -@@ -153,8 +154,9 @@ finish_reset_common(pcmk__output_t *out, crm_exit_t exit_status, bool print) { - - if (stylesheet_link != NULL) { - htmlNodePtr link_node = create_xml_node(head_node, "link"); -- xmlSetProp(link_node, (pcmkXmlStr) "rel", (pcmkXmlStr) "stylesheet"); -- xmlSetProp(link_node, (pcmkXmlStr) "href", (pcmkXmlStr) stylesheet_link); -+ pcmk__xe_set_props(link_node, "rel", "stylesheet", -+ "href", stylesheet_link, -+ NULL); - } - - xmlAddPrevSibling(priv->root->children, head_node); -diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c -index 80ee2de..133b892 100644 ---- a/lib/common/output_xml.c -+++ b/lib/common/output_xml.c -@@ -157,8 +157,9 @@ finish_reset_common(pcmk__output_t *out, crm_exit_t exit_status, bool print) { - char *rc_as_str = crm_itoa(exit_status); - - node = create_xml_node(priv->root, "status"); -- xmlSetProp(node, (pcmkXmlStr) "code", (pcmkXmlStr) rc_as_str); -- xmlSetProp(node, (pcmkXmlStr) "message", (pcmkXmlStr) crm_exit_str(exit_status)); -+ pcmk__xe_set_props(node, "code", rc_as_str, -+ "message", crm_exit_str(exit_status), -+ NULL); - - if (g_slist_length(priv->errors) > 0) { - xmlNodePtr errors_node = create_xml_node(node, "errors"); -diff --git a/lib/common/xml.c b/lib/common/xml.c -index 61cac9f..e5de0d4 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -2955,6 +2955,15 @@ pcmk__xe_set_propv(xmlNodePtr node, va_list pairs) - } - } - -+void -+pcmk__xe_set_props(xmlNodePtr node, ...) -+{ -+ va_list pairs; -+ va_start(pairs, node); -+ pcmk__xe_set_propv(node, pairs); -+ va_end(pairs); -+} -+ - // Deprecated functions kept only for backward API compatibility - - xmlNode *find_entity(xmlNode *parent, const char *node_name, const char *id); -diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c -index 82520ee..65f8ec9 100644 ---- a/lib/fencing/st_output.c -+++ b/lib/fencing/st_output.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -339,8 +340,9 @@ stonith__event_xml(pcmk__output_t *out, va_list args) { - - default: { - char *state = crm_itoa(event->state); -- xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) "pending"); -- xmlSetProp(node, (pcmkXmlStr) "extended-status", (pcmkXmlStr) state); -+ pcmk__xe_set_props(node, "status", "pending", -+ "extended-status", state, -+ NULL); - free(state); - break; - } -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index a6f5970..294f6e1 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -9,6 +9,7 @@ - - #include - #include -+#include - #include - #include - -@@ -994,14 +995,12 @@ pe__failed_action_xml(pcmk__output_t *out, va_list args) { - crm_time_set_timet(crm_when, &when); - rc_change = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); - -- xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE, (pcmkXmlStr) rc_change); -- xmlSetProp(node, (pcmkXmlStr) "queued", -- (pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_QUEUE)); -- xmlSetProp(node, (pcmkXmlStr) "exec", -- (pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_EXEC)); -- xmlSetProp(node, (pcmkXmlStr) "interval", (pcmkXmlStr) s); -- xmlSetProp(node, (pcmkXmlStr) "task", -- (pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_TASK)); -+ pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, rc_change, -+ "queued", crm_element_value(xml_op, XML_RSC_OP_T_QUEUE), -+ "exec", crm_element_value(xml_op, XML_RSC_OP_T_EXEC), -+ "interval", s, -+ "task", crm_element_value(xml_op, XML_LRM_ATTR_TASK), -+ NULL); - - free(s); - free(rc_change); -@@ -1364,22 +1363,17 @@ pe__node_and_op_xml(pcmk__output_t *out, va_list args) { - pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) ? crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER) : "", - kind); - -- xmlSetProp(node, (pcmkXmlStr) "rsc", (pcmkXmlStr) rsc_printable_id(rsc)); -- xmlSetProp(node, (pcmkXmlStr) "agent", (pcmkXmlStr) agent_tuple); -+ pcmk__xe_set_props(node, "rsc", rsc_printable_id(rsc), -+ "agent", agent_tuple, -+ NULL); - free(agent_tuple); - } - -- xmlSetProp(node, (pcmkXmlStr) "op", (pcmkXmlStr) (op_key ? op_key : ID(xml_op))); -- xmlSetProp(node, (pcmkXmlStr) "node", (pcmkXmlStr) crm_element_value(xml_op, XML_ATTR_UNAME)); -- xmlSetProp(node, (pcmkXmlStr) "call", (pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID)); -- xmlSetProp(node, (pcmkXmlStr) "rc", (pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_RC)); -- - if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, - &last_change) == pcmk_ok) { -- xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE, -- (pcmkXmlStr) crm_strip_trailing_newline(ctime(&last_change))); -- xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_EXEC, -- (pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_EXEC)); -+ pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, crm_strip_trailing_newline(ctime(&last_change)), -+ XML_RSC_OP_T_EXEC, crm_element_value(xml_op, XML_RSC_OP_T_EXEC), -+ NULL); - } - - return pcmk_rc_ok; -@@ -1679,10 +1673,6 @@ pe__op_history_xml(pcmk__output_t *out, va_list args) { - } - } - -- rc_s = crm_itoa(rc); -- xmlSetProp(node, (pcmkXmlStr) "rc", (pcmkXmlStr) rc_s); -- xmlSetProp(node, (pcmkXmlStr) "rc_text", (pcmkXmlStr) services_ocf_exitcode_str(rc)); -- free(rc_s); - return pcmk_rc_ok; - } - -@@ -1746,9 +1736,9 @@ pe__resource_history_xml(pcmk__output_t *out, va_list args) { - } else if (all || failcount || last_failure > 0) { - char *migration_s = crm_itoa(rsc->migration_threshold); - -- xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "false"); -- xmlSetProp(node, (pcmkXmlStr) "migration-threshold", -- (pcmkXmlStr) migration_s); -+ pcmk__xe_set_props(node, "orphan", "false", -+ "migration-threshold", migration_s, -+ NULL); - free(migration_s); - - if (failcount > 0) { --- -1.8.3.1 - - -From 1c9b4af53445091c211a48e6027c102133498fbb Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 17 Nov 2020 14:26:33 -0500 -Subject: [PATCH 4/6] Refactor: libs: Set XML properties when creating a parent - node. - -This is just like what's going on with pcmk__output_create_xml_node, -except for pcmk__output_create_parent. ---- - include/crm/common/output_internal.h | 4 +++- - lib/common/output_html.c | 6 +++--- - lib/common/output_xml.c | 22 ++++++++++++------- - lib/lrmd/lrmd_output.c | 17 ++++++++------- - lib/pacemaker/pcmk_output.c | 16 +++++++------- - lib/pengine/bundle.c | 2 +- - lib/pengine/pe_output.c | 10 +++++---- - tools/crm_resource_print.c | 41 ++++++++++++++++++------------------ - 8 files changed, 64 insertions(+), 54 deletions(-) - -diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h -index 1923c1a..4bf6b3d 100644 ---- a/include/crm/common/output_internal.h -+++ b/include/crm/common/output_internal.h -@@ -626,9 +626,11 @@ pcmk__indented_vprintf(pcmk__output_t *out, const char *format, va_list args) G_ - * - * \param[in,out] out The output functions structure. - * \param[in] name The name of the node to be created. -+ * \param[in] ... Name/value pairs to set as XML properties. - */ - xmlNodePtr --pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name); -+pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name, ...) -+G_GNUC_NULL_TERMINATED; - - /*! - * \internal -diff --git a/lib/common/output_html.c b/lib/common/output_html.c -index 5daf831..cf51af4 100644 ---- a/lib/common/output_html.c -+++ b/lib/common/output_html.c -@@ -102,7 +102,7 @@ html_init(pcmk__output_t *out) { - g_queue_push_tail(priv->parent_q, priv->root); - priv->errors = NULL; - -- pcmk__output_xml_create_parent(out, "body"); -+ pcmk__output_xml_create_parent(out, "body", NULL); - - return true; - } -@@ -294,7 +294,7 @@ html_begin_list(pcmk__output_t *out, const char *singular_noun, - */ - q_len = g_queue_get_length(priv->parent_q); - if (q_len > 2) { -- pcmk__output_xml_create_parent(out, "li"); -+ pcmk__output_xml_create_parent(out, "li", NULL); - } - - if (format != NULL) { -@@ -316,7 +316,7 @@ html_begin_list(pcmk__output_t *out, const char *singular_noun, - free(buf); - } - -- node = pcmk__output_xml_create_parent(out, "ul"); -+ node = pcmk__output_xml_create_parent(out, "ul", NULL); - g_queue_push_tail(priv->parent_q, node); - } - -diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c -index 133b892..6d92625 100644 ---- a/lib/common/output_xml.c -+++ b/lib/common/output_xml.c -@@ -217,8 +217,9 @@ xml_subprocess_output(pcmk__output_t *out, int exit_status, - - rc_as_str = crm_itoa(exit_status); - -- node = pcmk__output_xml_create_parent(out, "command"); -- xmlSetProp(node, (pcmkXmlStr) "code", (pcmkXmlStr) rc_as_str); -+ node = pcmk__output_xml_create_parent(out, "command", -+ "code", rc_as_str, -+ NULL); - - if (proc_stdout != NULL) { - child_node = pcmk_create_xml_text_node(node, "output", proc_stdout); -@@ -312,12 +313,11 @@ xml_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plura - } - - if (legacy_xml || simple_list) { -- pcmk__output_xml_create_parent(out, name); -+ pcmk__output_xml_create_parent(out, name, NULL); - } else { -- xmlNodePtr list_node = NULL; -- -- list_node = pcmk__output_xml_create_parent(out, "list"); -- xmlSetProp(list_node, (pcmkXmlStr) "name", (pcmkXmlStr) name); -+ pcmk__output_xml_create_parent(out, "list", -+ "name", name, -+ NULL); - } - - g_free(name); -@@ -420,8 +420,14 @@ pcmk__mk_xml_output(char **argv) { - } - - xmlNodePtr --pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name) { -+pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name, ...) { -+ va_list args; - xmlNodePtr node = pcmk__output_create_xml_node(out, name, NULL); -+ -+ va_start(args, name); -+ pcmk__xe_set_propv(node, args); -+ va_end(args); -+ - pcmk__output_xml_push_parent(out, node); - return node; - } -diff --git a/lib/lrmd/lrmd_output.c b/lib/lrmd/lrmd_output.c -index 7dc0709..c01cc5e 100644 ---- a/lib/lrmd/lrmd_output.c -+++ b/lib/lrmd/lrmd_output.c -@@ -46,9 +46,9 @@ lrmd__alternatives_list_xml(pcmk__output_t *out, va_list args) { - lrmd_list_t *list = va_arg(args, lrmd_list_t *); - const char *agent_spec = va_arg(args, const char *); - -- xmlNodePtr node = pcmk__output_xml_create_parent(out, "providers"); -- -- xmlSetProp(node, (pcmkXmlStr) "for", (pcmkXmlStr) agent_spec); -+ pcmk__output_xml_create_parent(out, "providers", -+ "for", agent_spec, -+ NULL); - return xml_list(out, list, "provider"); - } - -@@ -68,8 +68,9 @@ lrmd__agents_list_xml(pcmk__output_t *out, va_list args) { - const char *agent_spec = va_arg(args, const char *); - char *provider = va_arg(args, char *); - -- xmlNodePtr node = pcmk__output_xml_create_parent(out, "agents"); -- xmlSetProp(node, (pcmkXmlStr) "standard", (pcmkXmlStr) agent_spec); -+ xmlNodePtr node = pcmk__output_xml_create_parent(out, "agents", -+ "standard", agent_spec, -+ NULL); - - if (!pcmk__str_empty(provider)) { - xmlSetProp(node, (pcmkXmlStr) "provider", (pcmkXmlStr) provider); -@@ -99,9 +100,9 @@ lrmd__providers_list_xml(pcmk__output_t *out, va_list args) { - lrmd_list_t *list = va_arg(args, lrmd_list_t *); - const char *agent_spec = va_arg(args, const char *); - -- xmlNodePtr node = pcmk__output_xml_create_parent(out, "providers"); -- -- xmlSetProp(node, (pcmkXmlStr) "standard", (pcmkXmlStr) "ocf"); -+ xmlNodePtr node = pcmk__output_xml_create_parent(out, "providers", -+ "standard", "ocf", -+ NULL); - - if (agent_spec != NULL) { - xmlSetProp(node, (pcmkXmlStr) "agent", (pcmkXmlStr) agent_spec); -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 08753fb..74a7c59 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -158,7 +158,7 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - if (pcmk_is_set(peer->flags, pe_rsc_allocating)) { - if (dependents == FALSE) { - if (!printed_header) { -- pcmk__output_xml_create_parent(out, "colocations"); -+ pcmk__output_xml_create_parent(out, "colocations", NULL); - printed_header = true; - } - -@@ -172,7 +172,7 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - - if (dependents && recursive) { - if (!printed_header) { -- pcmk__output_xml_create_parent(out, "colocations"); -+ pcmk__output_xml_create_parent(out, "colocations", NULL); - printed_header = true; - } - -@@ -180,7 +180,7 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - } - - if (!printed_header) { -- pcmk__output_xml_create_parent(out, "colocations"); -+ pcmk__output_xml_create_parent(out, "colocations", NULL); - printed_header = true; - } - -@@ -252,7 +252,7 @@ static int locations_list_xml(pcmk__output_t *out, va_list args) { - GListPtr lpc = NULL; - GListPtr list = rsc->rsc_location; - -- pcmk__output_xml_create_parent(out, "locations"); -+ pcmk__output_xml_create_parent(out, "locations", NULL); - - for (lpc = list; lpc != NULL; lpc = lpc->next) { - pe__location_t *cons = lpc->data; -@@ -323,7 +323,6 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - gboolean recursive = va_arg(args, gboolean); - - GListPtr lpc = NULL; -- xmlNodePtr node = NULL; - xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, - data_set->input); - -@@ -338,12 +337,13 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - pe__clear_resource_flags(r, pe_rsc_allocating); - } - -- pcmk__output_xml_create_parent(out, "constraints"); -+ pcmk__output_xml_create_parent(out, "constraints", NULL); - - out->message(out, "colocations-list", rsc, TRUE, recursive); - -- node = pcmk__output_xml_create_parent(out, "resource"); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc->id); -+ pcmk__output_xml_create_parent(out, "resource", -+ "id", rsc->id, -+ NULL); - out->message(out, "locations-list", rsc); - pcmk__output_xml_pop_parent(out); - -diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c -index 8a17aad..543f5dc 100644 ---- a/lib/pengine/bundle.c -+++ b/lib/pengine/bundle.c -@@ -1672,7 +1672,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args) - pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", - pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); - -- pcmk__output_xml_create_parent(out, "li"); -+ pcmk__output_xml_create_parent(out, "li", NULL); - - if (pcmk__list_of_multiple(bundle_data->replicas)) { - snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset); -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index 294f6e1..470b025 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -1214,8 +1214,9 @@ pe__node_xml(pcmk__output_t *out, va_list args) { - - out->end_list(out); - } else { -- xmlNodePtr parent = pcmk__output_xml_create_parent(out, "node"); -- xmlSetProp(parent, (pcmkXmlStr) "name", (pcmkXmlStr) node->details->uname); -+ pcmk__output_xml_create_parent(out, "node", -+ "name", node->details->uname, -+ NULL); - } - - return pcmk_rc_ok; -@@ -1728,8 +1729,9 @@ pe__resource_history_xml(pcmk__output_t *out, va_list args) { - time_t last_failure = va_arg(args, int); - gboolean as_header = va_arg(args, gboolean); - -- xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history"); -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc_id); -+ xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history", -+ "id", rsc_id, -+ NULL); - - if (rsc == NULL) { - xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "true"); -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index ca8bee2..a33356f 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -324,9 +324,9 @@ resource_search_xml(pcmk__output_t *out, va_list args) - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gchar *requested_name = va_arg(args, gchar *); - -- xmlNode *xml_node = pcmk__output_xml_create_parent(out, "nodes"); -- -- xmlSetProp(xml_node, (pcmkXmlStr) "resource", (pcmkXmlStr) requested_name); -+ pcmk__output_xml_create_parent(out, "nodes", -+ "resource", requested_name, -+ NULL); - - for (GListPtr lpc = nodes; lpc != NULL; lpc = lpc->next) { - pe_node_t *node = (pe_node_t *) lpc->data; -@@ -435,24 +435,23 @@ resource_why_xml(pcmk__output_t *out, va_list args) - - const char *host_uname = (node == NULL)? NULL : node->details->uname; - -- xmlNode *xml_node = pcmk__output_xml_create_parent(out, "reason"); -+ xmlNode *xml_node = pcmk__output_xml_create_parent(out, "reason", NULL); - - if ((rsc == NULL) && (host_uname == NULL)) { - GListPtr lpc = NULL; - GListPtr hosts = NULL; - -- pcmk__output_xml_create_parent(out, "resources"); -+ pcmk__output_xml_create_parent(out, "resources", NULL); - - for (lpc = resources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; -- xmlNode *rsc_node = NULL; - - rsc->fns->location(rsc, &hosts, TRUE); - -- rsc_node = pcmk__output_xml_create_parent(out, "resource"); -- xmlSetProp(rsc_node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc->id); -- xmlSetProp(rsc_node, (pcmkXmlStr) "running", -- (pcmkXmlStr) pcmk__btoa(hosts != NULL)); -+ pcmk__output_xml_create_parent(out, "resource", -+ "id", rsc->id, -+ "running", pcmk__btoa(hosts != NULL), -+ NULL); - - cli_resource_check(out, cib_conn, rsc); - pcmk__output_xml_pop_parent(out); -@@ -476,16 +475,16 @@ resource_why_xml(pcmk__output_t *out, va_list args) - GListPtr unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); - GListPtr lpc = NULL; - -- pcmk__output_xml_create_parent(out, "resources"); -+ pcmk__output_xml_create_parent(out, "resources", NULL); - - for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; -- xmlNode *rsc_node = NULL; - -- rsc_node = pcmk__output_xml_create_parent(out, "resource"); -- xmlSetProp(rsc_node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc->id); -- xmlSetProp(rsc_node, (pcmkXmlStr) "running", (pcmkXmlStr) "true"); -- xmlSetProp(rsc_node, (pcmkXmlStr) "host", (pcmkXmlStr) host_uname); -+ pcmk__output_xml_create_parent(out, "resource", -+ "id", rsc->id, -+ "running", "true", -+ "host", host_uname, -+ NULL); - - cli_resource_check(out, cib_conn, rsc); - pcmk__output_xml_pop_parent(out); -@@ -493,12 +492,12 @@ resource_why_xml(pcmk__output_t *out, va_list args) - - for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; -- xmlNode *rsc_node = NULL; - -- rsc_node = pcmk__output_xml_create_parent(out, "resource"); -- xmlSetProp(rsc_node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc->id); -- xmlSetProp(rsc_node, (pcmkXmlStr) "running", (pcmkXmlStr) "false"); -- xmlSetProp(rsc_node, (pcmkXmlStr) "host", (pcmkXmlStr) host_uname); -+ pcmk__output_xml_create_parent(out, "resource", -+ "id", rsc->id, -+ "running", "false", -+ "host", host_uname, -+ NULL); - - cli_resource_check(out, cib_conn, rsc); - pcmk__output_xml_pop_parent(out); --- -1.8.3.1 - - -From 29cd349a15dff8975499e650d070934aa4c68e3f Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 18 Nov 2020 14:21:11 -0500 -Subject: [PATCH 5/6] Refactor: libs: Remove most uses of xmlSetProp. - -We can use crm_xml_add to do this instead, and that function will take -ACLs in account too (though, none of these call sites actually care -about ACLs). I've left calls in most of the various XML manipulating -functions because those seem more intentional. ---- - lib/common/output_html.c | 14 +++++++------- - lib/common/output_xml.c | 14 +++++++------- - lib/common/xml.c | 4 ++-- - lib/fencing/st_output.c | 10 +++++----- - lib/lrmd/lrmd_output.c | 4 ++-- - lib/pengine/pe_output.c | 38 ++++++++++++++++---------------------- - tools/crm_resource_print.c | 15 +++++++-------- - 7 files changed, 46 insertions(+), 53 deletions(-) - -diff --git a/lib/common/output_html.c b/lib/common/output_html.c -index cf51af4..542d863 100644 ---- a/lib/common/output_html.c -+++ b/lib/common/output_html.c -@@ -98,7 +98,7 @@ html_init(pcmk__output_t *out) { - priv->root = create_xml_node(NULL, "html"); - xmlCreateIntSubset(priv->root->doc, (pcmkXmlStr) "html", NULL, NULL); - -- xmlSetProp(priv->root, (pcmkXmlStr) "lang", (pcmkXmlStr) "en"); -+ crm_xml_add(priv->root, "lang", "en"); - g_queue_push_tail(priv->parent_q, priv->root); - priv->errors = NULL; - -@@ -137,7 +137,7 @@ finish_reset_common(pcmk__output_t *out, crm_exit_t exit_status, bool print) { - } - - charset_node = create_xml_node(head_node, "meta"); -- xmlSetProp(charset_node, (pcmkXmlStr) "charset", (pcmkXmlStr) "utf-8"); -+ crm_xml_add(charset_node, "charset", "utf-8"); - - /* Add any extra header nodes the caller might have created. */ - for (int i = 0; i < g_slist_length(extra_headers); i++) { -@@ -275,7 +275,7 @@ html_output_xml(pcmk__output_t *out, const char *name, const char *buf) { - CRM_ASSERT(priv != NULL); - - node = pcmk__output_create_html_node(out, "pre", NULL, NULL, buf); -- xmlSetProp(node, (pcmkXmlStr) "lang", (pcmkXmlStr) "xml"); -+ crm_xml_add(node, "lang", "xml"); - } - - G_GNUC_PRINTF(4, 5) -@@ -340,7 +340,7 @@ html_list_item(pcmk__output_t *out, const char *name, const char *format, ...) { - free(buf); - - if (name != NULL) { -- xmlSetProp(item_node, (pcmkXmlStr) "class", (pcmkXmlStr) name); -+ crm_xml_add(item_node, "class", name); - } - } - -@@ -417,11 +417,11 @@ pcmk__output_create_html_node(pcmk__output_t *out, const char *element_name, con - htmlNodePtr node = pcmk__output_create_xml_text_node(out, element_name, text); - - if (class_name != NULL) { -- xmlSetProp(node, (pcmkXmlStr) "class", (pcmkXmlStr) class_name); -+ crm_xml_add(node, "class", class_name); - } - - if (id != NULL) { -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) id); -+ crm_xml_add(node, "id", id); - } - - return node; -@@ -444,7 +444,7 @@ pcmk__html_add_header(const char *name, ...) { - } - - value = va_arg(ap, char *); -- xmlSetProp(header_node, (pcmkXmlStr) key, (pcmkXmlStr) value); -+ crm_xml_add(header_node, key, value); - } - - extra_headers = g_slist_append(extra_headers, header_node); -diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c -index 6d92625..0053979 100644 ---- a/lib/common/output_xml.c -+++ b/lib/common/output_xml.c -@@ -107,13 +107,13 @@ xml_init(pcmk__output_t *out) { - - if (legacy_xml) { - priv->root = create_xml_node(NULL, "crm_mon"); -- xmlSetProp(priv->root, (pcmkXmlStr) "version", (pcmkXmlStr) VERSION); -+ crm_xml_add(priv->root, "version", VERSION); - } else { - priv->root = create_xml_node(NULL, "pacemaker-result"); -- xmlSetProp(priv->root, (pcmkXmlStr) "api-version", (pcmkXmlStr) PCMK__API_VERSION); -+ crm_xml_add(priv->root, "api-version", PCMK__API_VERSION); - - if (out->request != NULL) { -- xmlSetProp(priv->root, (pcmkXmlStr) "request", (pcmkXmlStr) out->request); -+ crm_xml_add(priv->root, "request", out->request); - } - } - -@@ -223,12 +223,12 @@ xml_subprocess_output(pcmk__output_t *out, int exit_status, - - if (proc_stdout != NULL) { - child_node = pcmk_create_xml_text_node(node, "output", proc_stdout); -- xmlSetProp(child_node, (pcmkXmlStr) "source", (pcmkXmlStr) "stdout"); -+ crm_xml_add(child_node, "source", "stdout"); - } - - if (proc_stderr != NULL) { - child_node = pcmk_create_xml_text_node(node, "output", proc_stderr); -- xmlSetProp(child_node, (pcmkXmlStr) "source", (pcmkXmlStr) "stderr"); -+ crm_xml_add(child_node, "source", "stderr"); - } - - pcmk__output_xml_add_node(out, node); -@@ -343,7 +343,7 @@ xml_list_item(pcmk__output_t *out, const char *name, const char *format, ...) { - item_node = pcmk__output_create_xml_text_node(out, "item", buf); - - if (name != NULL) { -- xmlSetProp(item_node, (pcmkXmlStr) "name", (pcmkXmlStr) name); -+ crm_xml_add(item_node, "name", name); - } - - free(buf); -@@ -368,7 +368,7 @@ xml_end_list(pcmk__output_t *out) { - - node = g_queue_pop_tail(priv->parent_q); - buf = crm_strdup_printf("%lu", xmlChildElementCount(node)); -- xmlSetProp(node, (pcmkXmlStr) "count", (pcmkXmlStr) buf); -+ crm_xml_add(node, "count", buf); - free(buf); - } - } -diff --git a/lib/common/xml.c b/lib/common/xml.c -index e5de0d4..abb120c 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -703,11 +703,11 @@ pcmk_create_html_node(xmlNode * parent, const char *element_name, const char *id - xmlNode *node = pcmk_create_xml_text_node(parent, element_name, text); - - if (class_name != NULL) { -- xmlSetProp(node, (pcmkXmlStr) "class", (pcmkXmlStr) class_name); -+ crm_xml_add(node, "class", class_name); - } - - if (id != NULL) { -- xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) id); -+ crm_xml_add(node, "id", id); - } - - return node; -diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c -index 65f8ec9..04f4b83 100644 ---- a/lib/fencing/st_output.c -+++ b/lib/fencing/st_output.c -@@ -331,11 +331,11 @@ stonith__event_xml(pcmk__output_t *out, va_list args) { - - switch (event->state) { - case st_failed: -- xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) "failed"); -+ crm_xml_add(node, "status", "failed"); - break; - - case st_done: -- xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) "success"); -+ crm_xml_add(node, "status", "success"); - break; - - default: { -@@ -349,12 +349,12 @@ stonith__event_xml(pcmk__output_t *out, va_list args) { - } - - if (event->delegate != NULL) { -- xmlSetProp(node, (pcmkXmlStr) "delegate", (pcmkXmlStr) event->delegate); -+ crm_xml_add(node, "delegate", event->delegate); - } - - if (event->state == st_failed || event->state == st_done) { - buf = time_t_string(event->completed); -- xmlSetProp(node, (pcmkXmlStr) "completed", (pcmkXmlStr) buf); -+ crm_xml_add(node, "completed", buf); - free(buf); - } - -@@ -429,7 +429,7 @@ stonith__validate_agent_xml(pcmk__output_t *out, va_list args) { - NULL); - - if (device != NULL) { -- xmlSetProp(node, (pcmkXmlStr) "device", (pcmkXmlStr) device); -+ crm_xml_add(node, "device", device); - } - - pcmk__output_xml_push_parent(out, node); -diff --git a/lib/lrmd/lrmd_output.c b/lib/lrmd/lrmd_output.c -index c01cc5e..dfcf3fa 100644 ---- a/lib/lrmd/lrmd_output.c -+++ b/lib/lrmd/lrmd_output.c -@@ -73,7 +73,7 @@ lrmd__agents_list_xml(pcmk__output_t *out, va_list args) { - NULL); - - if (!pcmk__str_empty(provider)) { -- xmlSetProp(node, (pcmkXmlStr) "provider", (pcmkXmlStr) provider); -+ crm_xml_add(node, "provider", provider); - } - - return xml_list(out, list, "agent"); -@@ -105,7 +105,7 @@ lrmd__providers_list_xml(pcmk__output_t *out, va_list args) { - NULL); - - if (agent_spec != NULL) { -- xmlSetProp(node, (pcmkXmlStr) "agent", (pcmkXmlStr) agent_spec); -+ crm_xml_add(node, "agent", agent_spec); - } - - return xml_list(out, list, "provider"); -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index 470b025..ecb5c2c 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -415,7 +415,7 @@ pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name - const char *param_name = va_arg(args, const char *); - const char *param_value = va_arg(args, const char *); - if (param_name && param_value) { -- xmlSetProp(xml_node, (pcmkXmlStr)param_name, (pcmkXmlStr)param_value); -+ crm_xml_add(xml_node, param_name, param_value); - } - }; - va_end(args); -@@ -592,19 +592,19 @@ pe__cluster_counts_xml(pcmk__output_t *out, va_list args) { - int nblocked = va_arg(args, int); - - char *s = crm_itoa(nnodes); -- xmlSetProp(nodes_node, (pcmkXmlStr) "number", (pcmkXmlStr) s); -+ crm_xml_add(nodes_node, "number", s); - free(s); - - s = crm_itoa(nresources); -- xmlSetProp(resources_node, (pcmkXmlStr) "number", (pcmkXmlStr) s); -+ crm_xml_add(resources_node, "number", s); - free(s); - - s = crm_itoa(ndisabled); -- xmlSetProp(resources_node, (pcmkXmlStr) "disabled", (pcmkXmlStr) s); -+ crm_xml_add(resources_node, "disabled", s); - free(s); - - s = crm_itoa(nblocked); -- xmlSetProp(resources_node, (pcmkXmlStr) "blocked", (pcmkXmlStr) s); -+ crm_xml_add(resources_node, "blocked", s); - free(s); - - return pcmk_rc_ok; -@@ -1196,8 +1196,7 @@ pe__node_xml(pcmk__output_t *out, va_list args) { - - if (pe__is_guest_node(node)) { - xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out); -- xmlSetProp(xml_node, (pcmkXmlStr) "id_as_resource", -- (pcmkXmlStr) node->details->remote_rsc->container->id); -+ crm_xml_add(xml_node, "id_as_resource", node->details->remote_rsc->container->id); - } - - if (group_by_node) { -@@ -1395,7 +1394,7 @@ pe__node_attribute_xml(pcmk__output_t *out, va_list args) { - - if (add_extra) { - char *buf = crm_itoa(expected_score); -- xmlSetProp(node, (pcmkXmlStr) "expected", (pcmkXmlStr) buf); -+ crm_xml_add(node, "expected", buf); - free(buf); - } - -@@ -1635,7 +1634,7 @@ pe__op_history_xml(pcmk__output_t *out, va_list args) { - - if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) { - char *s = crm_strdup_printf("%sms", interval_ms_s); -- xmlSetProp(node, (pcmkXmlStr) "interval", (pcmkXmlStr) s); -+ crm_xml_add(node, "interval", s); - free(s); - } - -@@ -1646,8 +1645,7 @@ pe__op_history_xml(pcmk__output_t *out, va_list args) { - if (value) { - time_t int_value = (time_t) crm_parse_int(value, NULL); - if (int_value > 0) { -- xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE, -- (pcmkXmlStr) pcmk__epoch2str(&int_value)); -+ crm_xml_add(node, XML_RSC_OP_LAST_CHANGE, pcmk__epoch2str(&int_value)); - } - } - -@@ -1655,21 +1653,20 @@ pe__op_history_xml(pcmk__output_t *out, va_list args) { - if (value) { - time_t int_value = (time_t) crm_parse_int(value, NULL); - if (int_value > 0) { -- xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_RUN, -- (pcmkXmlStr) pcmk__epoch2str(&int_value)); -+ crm_xml_add(node, XML_RSC_OP_LAST_RUN, pcmk__epoch2str(&int_value)); - } - } - - value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC); - if (value) { - char *s = crm_strdup_printf("%sms", value); -- xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_EXEC, (pcmkXmlStr) s); -+ crm_xml_add(node, XML_RSC_OP_T_EXEC, s); - free(s); - } - value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE); - if (value) { - char *s = crm_strdup_printf("%sms", value); -- xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_QUEUE, (pcmkXmlStr) s); -+ crm_xml_add(node, XML_RSC_OP_T_QUEUE, s); - free(s); - } - } -@@ -1734,7 +1731,7 @@ pe__resource_history_xml(pcmk__output_t *out, va_list args) { - NULL); - - if (rsc == NULL) { -- xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "true"); -+ crm_xml_add(node, "orphan", "true"); - } else if (all || failcount || last_failure > 0) { - char *migration_s = crm_itoa(rsc->migration_threshold); - -@@ -1746,14 +1743,12 @@ pe__resource_history_xml(pcmk__output_t *out, va_list args) { - if (failcount > 0) { - char *s = crm_itoa(failcount); - -- xmlSetProp(node, (pcmkXmlStr) PCMK__FAIL_COUNT_PREFIX, -- (pcmkXmlStr) s); -+ crm_xml_add(node, PCMK__FAIL_COUNT_PREFIX, s); - free(s); - } - - if (last_failure > 0) { -- xmlSetProp(node, (pcmkXmlStr) PCMK__LAST_FAILURE_PREFIX, -- (pcmkXmlStr) pcmk__epoch2str(&last_failure)); -+ crm_xml_add(node, PCMK__LAST_FAILURE_PREFIX, pcmk__epoch2str(&last_failure)); - } - } - -@@ -1920,8 +1915,7 @@ pe__ticket_xml(pcmk__output_t *out, va_list args) { - NULL); - - if (ticket->last_granted > -1) { -- xmlSetProp(node, (pcmkXmlStr) "last-granted", -- (pcmkXmlStr) pcmk__epoch2str(&ticket->last_granted)); -+ crm_xml_add(node, "last-granted", pcmk__epoch2str(&ticket->last_granted)); - } - - return pcmk_rc_ok; -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index a33356f..cb06879 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -252,19 +252,19 @@ resource_check_xml(pcmk__output_t *out, va_list args) { - NULL); - - if (pcmk_is_set(checks->flags, rsc_remain_stopped)) { -- xmlSetProp(node, (pcmkXmlStr) "remain_stopped", (pcmkXmlStr) "true"); -+ crm_xml_add(node, "remain_stopped", "true"); - } - - if (pcmk_is_set(checks->flags, rsc_unpromotable)) { -- xmlSetProp(node, (pcmkXmlStr) "promotable", (pcmkXmlStr) "false"); -+ crm_xml_add(node, "promotable", "false"); - } - - if (pcmk_is_set(checks->flags, rsc_unmanaged)) { -- xmlSetProp(node, (pcmkXmlStr) "unmanaged", (pcmkXmlStr) "true"); -+ crm_xml_add(node, "unmanaged", "true"); - } - - if (checks->lock_node) { -- xmlSetProp(node, (pcmkXmlStr) "locked-to", (pcmkXmlStr) checks->lock_node); -+ crm_xml_add(node, "locked-to", checks->lock_node); - } - - return rc; -@@ -333,7 +333,7 @@ resource_search_xml(pcmk__output_t *out, va_list args) - xmlNode *sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname); - - if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { -- xmlSetProp(sub_node, (pcmkXmlStr) "state", (pcmkXmlStr) "promoted"); -+ crm_xml_add(sub_node, "state", "promoted"); - } - } - -@@ -463,7 +463,7 @@ resource_why_xml(pcmk__output_t *out, va_list args) - - } else if ((rsc != NULL) && (host_uname != NULL)) { - if (resource_is_running_on(rsc, host_uname)) { -- xmlSetProp(xml_node, (pcmkXmlStr) "running_on", (pcmkXmlStr) host_uname); -+ crm_xml_add(xml_node, "running_on", host_uname); - } - - cli_resource_check(out, cib_conn, rsc); -@@ -512,8 +512,7 @@ resource_why_xml(pcmk__output_t *out, va_list args) - GListPtr hosts = NULL; - - rsc->fns->location(rsc, &hosts, TRUE); -- xmlSetProp(xml_node, (pcmkXmlStr) "running", -- (pcmkXmlStr) pcmk__btoa(hosts != NULL)); -+ crm_xml_add(xml_node, "running", pcmk__btoa(hosts != NULL)); - cli_resource_check(out, cib_conn, rsc); - g_list_free(hosts); - } --- -1.8.3.1 - - -From 55b4f7a17001280fcf1b8dc7bc4c1afd3a6a46d1 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 18 Nov 2020 14:31:31 -0500 -Subject: [PATCH 6/6] Test: cts: Update order of attributes in crm_mon output. - -This has changed due to some of the code reorganization as a result of -trying to set as many attributes as possible when an xmlNode is created. -The resulting crm_mon output should still validate, however, since -attribute order should not be enforced by the schema. ---- - cts/cli/regression.crm_mon.exp | 254 ++++++++++++++++++++--------------------- - 1 file changed, 127 insertions(+), 127 deletions(-) - -diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp -index edcdda6..e9f36ad 100644 ---- a/cts/cli/regression.crm_mon.exp -+++ b/cts/cli/regression.crm_mon.exp -@@ -125,39 +125,39 @@ Active Resources: - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - -- -+ - - -- -- -+ -+ - - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - -- -- -+ -+ - - - -@@ -285,39 +285,39 @@ Active Resources: - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - -- -+ - - -- -- -+ -+ - - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - -- -- -+ -+ - - - -@@ -766,39 +766,39 @@ Negative Location Constraints: - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - -- -+ - - -- -- -+ -+ - - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - -- -- -+ -+ - - - -@@ -928,19 +928,19 @@ Negative Location Constraints: - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - -- -- -+ -+ - - - -@@ -1081,22 +1081,22 @@ Negative Location Constraints: - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - -- -+ - - -- -- -+ -+ - - - -@@ -1171,8 +1171,8 @@ Operations: - - - -- -- -+ -+ - - - -@@ -1371,8 +1371,8 @@ Operations: - - - -- -- -+ -+ - - - -@@ -1452,10 +1452,10 @@ Operations: - - - -- -+ - - -- -+ - - - -@@ -1529,7 +1529,7 @@ Operations: - - - -- -+ - - - -@@ -1611,14 +1611,14 @@ Operations: - - - -- -- -+ -+ - - - - -- -- -+ -+ - - - -@@ -1700,14 +1700,14 @@ Operations: - - - -- -- -+ -+ - - - - -- -- -+ -+ - - - -@@ -1786,14 +1786,14 @@ Operations: - - - -- -- -+ -+ - - - - -- -- -+ -+ - - - -@@ -2257,14 +2257,14 @@ Operations: - - - -- -- -+ -+ - - - - -- -- -+ -+ - - - -@@ -2368,14 +2368,14 @@ Operations: - - - -- -- -+ -+ - - - - -- -- -+ -+ - - - -@@ -2457,14 +2457,14 @@ Operations: - - - -- -- -+ -+ - - - - -- -- -+ -+ - - - -@@ -2568,14 +2568,14 @@ Operations: - - - -- -- -+ -+ - - - - -- -- -+ -+ - - - -@@ -2657,14 +2657,14 @@ Operations: - - - -- -- -+ -+ - - - - -- -- -+ -+ - - - -@@ -2770,46 +2770,46 @@ Active Resources: - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -- -+ -+ - - -- -+ - - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -- -+ -+ - - -- -- -+ -+ - - -- -- -+ -+ - - - - -- -+ - - - -@@ -3051,24 +3051,24 @@ Full List of Resources: - - - -- -- -+ -+ - - -- -- -+ -+ - - -- -- -+ -+ - - -- -- -+ -+ - - -- -- -+ -+ - - - --- -1.8.3.1 - diff --git a/SOURCES/012-string-arguments.patch b/SOURCES/012-string-arguments.patch new file mode 100644 index 0000000..6419117 --- /dev/null +++ b/SOURCES/012-string-arguments.patch @@ -0,0 +1,221 @@ +From 2eee93e8f9ea2daa81769bc69843d63ced1a7112 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 20 Jul 2021 16:39:07 -0400 +Subject: [PATCH 1/2] Low: tools: Audit command line options. + +This just goes through and makes sure the command line options that take +arguments are in the special parameter to pcmk__cmdline_preproc, and +that options that do not take arguments are not. +--- + tools/crm_attribute.c | 2 +- + tools/crm_error.c | 2 +- + tools/crm_resource.c | 2 +- + tools/crm_rule.c | 2 +- + tools/crm_simulate.c | 2 +- + tools/crmadmin.c | 2 +- + tools/stonith_admin.c | 2 +- + 7 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/tools/crm_attribute.c b/tools/crm_attribute.c +index 8a5b4e4..6bd4e2a 100644 +--- a/tools/crm_attribute.c ++++ b/tools/crm_attribute.c +@@ -312,7 +312,7 @@ main(int argc, char **argv) + + GOptionGroup *output_group = NULL; + pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); +- gchar **processed_args = pcmk__cmdline_preproc(argv, "DGNPdilnpstv"); ++ gchar **processed_args = pcmk__cmdline_preproc(argv, "NPUdilnpstv"); + GOptionContext *context = build_arg_context(args, &output_group); + + if (!g_option_context_parse_strv(context, &processed_args, &error)) { +diff --git a/tools/crm_error.c b/tools/crm_error.c +index b4328ce..923f393 100644 +--- a/tools/crm_error.c ++++ b/tools/crm_error.c +@@ -79,7 +79,7 @@ main(int argc, char **argv) + + GOptionGroup *output_group = NULL; + pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); +- gchar **processed_args = pcmk__cmdline_preproc(argv, "lrnX"); ++ gchar **processed_args = pcmk__cmdline_preproc(argv, NULL); + GOptionContext *context = build_arg_context(args, &output_group); + + if (!g_option_context_parse_strv(context, &processed_args, &error)) { +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index fa7902c..d8e140f 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -1530,7 +1530,7 @@ main(int argc, char **argv) + */ + + args = pcmk__new_common_args(SUMMARY); +- processed_args = pcmk__cmdline_preproc(argv, "GINSTdginpstuv"); ++ processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx"); + context = build_arg_context(args, &output_group); + + pcmk__register_formats(output_group, formats); +diff --git a/tools/crm_rule.c b/tools/crm_rule.c +index 8b19bcd..30c5155 100644 +--- a/tools/crm_rule.c ++++ b/tools/crm_rule.c +@@ -239,7 +239,7 @@ main(int argc, char **argv) + + pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); + GOptionContext *context = build_arg_context(args); +- gchar **processed_args = pcmk__cmdline_preproc(argv, "nopNO"); ++ gchar **processed_args = pcmk__cmdline_preproc(argv, "drX"); + + if (!g_option_context_parse_strv(context, &processed_args, &error)) { + exit_code = CRM_EX_USAGE; +diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c +index 0406bff..c83b1b1 100644 +--- a/tools/crm_simulate.c ++++ b/tools/crm_simulate.c +@@ -865,7 +865,7 @@ main(int argc, char **argv) + + GOptionGroup *output_group = NULL; + pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); +- gchar **processed_args = pcmk__cmdline_preproc(argv, "bdefgiqrtuwxDFGINO"); ++ gchar **processed_args = pcmk__cmdline_preproc(argv, "bdefgiqrtuwxDFGINOP"); + GOptionContext *context = build_arg_context(args, &output_group); + + /* This must come before g_option_context_parse_strv. */ +diff --git a/tools/crmadmin.c b/tools/crmadmin.c +index 5cbde1b..b98f282 100644 +--- a/tools/crmadmin.c ++++ b/tools/crmadmin.c +@@ -188,7 +188,7 @@ main(int argc, char **argv) + + GOptionGroup *output_group = NULL; + pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); +- gchar **processed_args = pcmk__cmdline_preproc(argv, "itBDEHKNPS"); ++ gchar **processed_args = pcmk__cmdline_preproc(argv, "itKNS"); + GOptionContext *context = build_arg_context(args, &output_group); + + pcmk__register_formats(output_group, formats); +diff --git a/tools/stonith_admin.c b/tools/stonith_admin.c +index 6773cea..2d48326 100644 +--- a/tools/stonith_admin.c ++++ b/tools/stonith_admin.c +@@ -349,7 +349,7 @@ main(int argc, char **argv) + + GOptionGroup *output_group = NULL; + pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); +- gchar **processed_args = pcmk__cmdline_preproc(argv, "adehilorstvBCDFHQRTU"); ++ gchar **processed_args = pcmk__cmdline_preproc(argv, "adehilorstvyBCDFHQRTU"); + GOptionContext *context = build_arg_context(args, &output_group); + + pcmk__register_formats(output_group, formats); +-- +1.8.3.1 + + +From 8301678ad1162450814d2fea5288aefe47a67a74 Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Tue, 20 Jul 2021 16:40:58 -0400 +Subject: [PATCH 2/2] Low: libcrmcommon: Also allow string arguments that start + with a dash. + +There's various places where an option to a command line argument could +itself be a valid command line argument. For instance: + + crm_attribute -n crm_mon_options -v "-1i3" + +The previous patching to pcmk__cmdline_preproc did not take this into +account. With this patch, options that are last in a string (or by +themselves) and take an argument will have the next command line option +grabbed and copied straight through without processing. + +Regression in 2.1.0 caused by a long-standing bug in pcmk__cmdline_preproc. +--- + lib/common/cmdline.c | 8 ++++++ + .../tests/cmdline/pcmk__cmdline_preproc_test.c | 33 ++++++++++++++++++++++ + 2 files changed, 41 insertions(+) + +diff --git a/lib/common/cmdline.c b/lib/common/cmdline.c +index 9c1b810..1ca6147 100644 +--- a/lib/common/cmdline.c ++++ b/lib/common/cmdline.c +@@ -146,6 +146,7 @@ gchar ** + pcmk__cmdline_preproc(char **argv, const char *special) { + GPtrArray *arr = NULL; + bool saw_dash_dash = false; ++ bool copy_option = false; + + if (argv == NULL) { + return NULL; +@@ -175,6 +176,12 @@ pcmk__cmdline_preproc(char **argv, const char *special) { + continue; + } + ++ if (copy_option == true) { ++ g_ptr_array_add(arr, g_strdup(argv[i])); ++ copy_option = false; ++ continue; ++ } ++ + /* This is just a dash by itself. That could indicate stdin/stdout, or + * it could be user error. Copy it over and let glib figure it out. + */ +@@ -239,6 +246,7 @@ pcmk__cmdline_preproc(char **argv, const char *special) { + */ + } else { + g_ptr_array_add(arr, g_strdup_printf("-%c", *ch)); ++ copy_option = true; + ch++; + } + +diff --git a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c +index 9a752ef..edc5640 100644 +--- a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c ++++ b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c +@@ -106,6 +106,36 @@ negative_score_2(void) { + g_strfreev(processed); + } + ++static void ++string_arg_with_dash(void) { ++ const char *argv[] = { "-n", "crm_mon_options", "-v", "--opt1 --opt2", NULL }; ++ const gchar *expected[] = { "-n", "crm_mon_options", "-v", "--opt1 --opt2", NULL }; ++ ++ gchar **processed = pcmk__cmdline_preproc((char **) argv, "v"); ++ LISTS_EQ(processed, expected); ++ g_strfreev(processed); ++} ++ ++static void ++string_arg_with_dash_2(void) { ++ const char *argv[] = { "-n", "crm_mon_options", "-v", "-1i3", NULL }; ++ const gchar *expected[] = { "-n", "crm_mon_options", "-v", "-1i3", NULL }; ++ ++ gchar **processed = pcmk__cmdline_preproc((char **) argv, "v"); ++ LISTS_EQ(processed, expected); ++ g_strfreev(processed); ++} ++ ++static void ++string_arg_with_dash_3(void) { ++ const char *argv[] = { "-abc", "-1i3", NULL }; ++ const gchar *expected[] = { "-a", "-b", "-c", "-1i3", NULL }; ++ ++ gchar **processed = pcmk__cmdline_preproc((char **) argv, "c"); ++ LISTS_EQ(processed, expected); ++ g_strfreev(processed); ++} ++ + int + main(int argc, char **argv) + { +@@ -120,5 +150,8 @@ main(int argc, char **argv) + g_test_add_func("/common/cmdline/preproc/long_arg", long_arg); + g_test_add_func("/common/cmdline/preproc/negative_score", negative_score); + g_test_add_func("/common/cmdline/preproc/negative_score_2", negative_score_2); ++ g_test_add_func("/common/cmdline/preproc/string_arg_with_dash", string_arg_with_dash); ++ g_test_add_func("/common/cmdline/preproc/string_arg_with_dash_2", string_arg_with_dash_2); ++ g_test_add_func("/common/cmdline/preproc/string_arg_with_dash_3", string_arg_with_dash_3); + return g_test_run(); + } +-- +1.8.3.1 + diff --git a/SOURCES/013-feature-set.patch b/SOURCES/013-feature-set.patch deleted file mode 100644 index cd32a98..0000000 --- a/SOURCES/013-feature-set.patch +++ /dev/null @@ -1,2808 +0,0 @@ -From cd098b06db263cfb337d4b8eaf2724df66fff060 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 20 Nov 2020 14:00:36 -0500 -Subject: [PATCH 1/9] Fix: libs, tools: Be consistent with GListPtr and - xmlNodePtr in formatted output. - -GListPtr should be removed from all the formatted output messages. It -seems like a GLib type, but it's not. So that's weird and we should -stop spreading uses of it around. - -On the other hand, xmlNodePtr is a libxml type that we are already using -sporadically. We should be more consistent in using it. ---- - lib/fencing/st_output.c | 20 ++++++------ - lib/pacemaker/pcmk_output.c | 36 ++++++++++----------- - lib/pengine/bundle.c | 18 +++++------ - lib/pengine/clone.c | 44 +++++++++++++------------- - lib/pengine/group.c | 28 ++++++++--------- - lib/pengine/native.c | 20 ++++++------ - lib/pengine/pe_output.c | 76 ++++++++++++++++++++++----------------------- - tools/crm_resource_print.c | 64 +++++++++++++++++++------------------- - 8 files changed, 153 insertions(+), 153 deletions(-) - -diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c -index 04f4b83..7c3ccef 100644 ---- a/lib/fencing/st_output.c -+++ b/lib/fencing/st_output.c -@@ -30,11 +30,11 @@ time_t_string(time_t when) { - return buf; - } - --PCMK__OUTPUT_ARGS("failed-fencing-history", "stonith_history_t *", "GListPtr", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("failed-fencing-history", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__failed_history(pcmk__output_t *out, va_list args) { - stonith_history_t *history = va_arg(args, stonith_history_t *); -- GListPtr only_node = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); - gboolean full_history = va_arg(args, gboolean); - gboolean print_spacer = va_arg(args, gboolean); - -@@ -58,11 +58,11 @@ stonith__failed_history(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("fencing-history", "stonith_history_t *", "GListPtr", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("fencing-history", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__history(pcmk__output_t *out, va_list args) { - stonith_history_t *history = va_arg(args, stonith_history_t *); -- GListPtr only_node = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); - gboolean full_history = va_arg(args, gboolean); - gboolean print_spacer = va_arg(args, gboolean); - -@@ -84,12 +84,12 @@ stonith__history(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("full-fencing-history", "crm_exit_t", "stonith_history_t *", "GListPtr", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("full-fencing-history", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__full_history(pcmk__output_t *out, va_list args) { - crm_exit_t history_rc G_GNUC_UNUSED = va_arg(args, crm_exit_t); - stonith_history_t *history = va_arg(args, stonith_history_t *); -- GListPtr only_node = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); - gboolean full_history = va_arg(args, gboolean); - gboolean print_spacer = va_arg(args, gboolean); - -@@ -109,12 +109,12 @@ stonith__full_history(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("full-fencing-history", "crm_exit_t", "stonith_history_t *", "GListPtr", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("full-fencing-history", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__full_history_xml(pcmk__output_t *out, va_list args) { - crm_exit_t history_rc = va_arg(args, crm_exit_t); - stonith_history_t *history = va_arg(args, stonith_history_t *); -- GListPtr only_node = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); - gboolean full_history = va_arg(args, gboolean); - gboolean print_spacer G_GNUC_UNUSED = va_arg(args, gboolean); - -@@ -198,11 +198,11 @@ stonith__last_fenced_xml(pcmk__output_t *out, va_list args) { - } - } - --PCMK__OUTPUT_ARGS("pending-fencing-actions", "stonith_history_t *", "GListPtr", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("pending-fencing-actions", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__pending_actions(pcmk__output_t *out, va_list args) { - stonith_history_t *history = va_arg(args, stonith_history_t *); -- GListPtr only_node = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); - gboolean full_history = va_arg(args, gboolean); - gboolean print_spacer = va_arg(args, gboolean); - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 74a7c59..a0b12b9 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -53,8 +53,8 @@ static int colocations_list(pcmk__output_t *out, va_list args) { - gboolean dependents = va_arg(args, gboolean); - gboolean recursive = va_arg(args, gboolean); - -- GListPtr lpc = NULL; -- GListPtr list = rsc->rsc_cons; -+ GList *lpc = NULL; -+ GList *list = rsc->rsc_cons; - bool printed_header = false; - - if (dependents) { -@@ -133,8 +133,8 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - gboolean dependents = va_arg(args, gboolean); - gboolean recursive = va_arg(args, gboolean); - -- GListPtr lpc = NULL; -- GListPtr list = rsc->rsc_cons; -+ GList *lpc = NULL; -+ GList *list = rsc->rsc_cons; - bool printed_header = false; - - if (dependents) { -@@ -220,15 +220,15 @@ PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") - static int locations_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc G_GNUC_UNUSED = va_arg(args, pe_resource_t *); - -- GListPtr lpc = NULL; -- GListPtr list = rsc->rsc_location; -+ GList *lpc = NULL; -+ GList *list = rsc->rsc_location; - - out->begin_list(out, NULL, NULL, "Locations"); - - for (lpc = list; lpc != NULL; lpc = lpc->next) { - pe__location_t *cons = lpc->data; - -- GListPtr lpc2 = NULL; -+ GList *lpc2 = NULL; - - for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { - pe_node_t *node = (pe_node_t *) lpc2->data; -@@ -249,15 +249,15 @@ PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") - static int locations_list_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - -- GListPtr lpc = NULL; -- GListPtr list = rsc->rsc_location; -+ GList *lpc = NULL; -+ GList *list = rsc->rsc_location; - - pcmk__output_xml_create_parent(out, "locations", NULL); - - for (lpc = list; lpc != NULL; lpc = lpc->next) { - pe__location_t *cons = lpc->data; - -- GListPtr lpc2 = NULL; -+ GList *lpc2 = NULL; - - for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { - pe_node_t *node = (pe_node_t *) lpc2->data; -@@ -284,9 +284,9 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *); - gboolean recursive G_GNUC_UNUSED = va_arg(args, gboolean); - -- GListPtr lpc = NULL; -- xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, -- data_set->input); -+ GList *lpc = NULL; -+ xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, -+ data_set->input); - - unpack_constraints(cib_constraints, data_set); - -@@ -322,9 +322,9 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - gboolean recursive = va_arg(args, gboolean); - -- GListPtr lpc = NULL; -- xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, -- data_set->input); -+ GList *lpc = NULL; -+ xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, -+ data_set->input); - - unpack_constraints(cib_constraints, data_set); - -@@ -455,11 +455,11 @@ dc_xml(pcmk__output_t *out, va_list args) - } - - --PCMK__OUTPUT_ARGS("crmadmin-node-list", "pcmk__output_t *", "xmlNode *") -+PCMK__OUTPUT_ARGS("crmadmin-node-list", "pcmk__output_t *", "xmlNodePtr") - static int - crmadmin_node_list(pcmk__output_t *out, va_list args) - { -- xmlNode *xml_node = va_arg(args, xmlNode *); -+ xmlNodePtr xml_node = va_arg(args, xmlNodePtr); - int found = 0; - xmlNode *node = NULL; - xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c -index 543f5dc..7a175a5 100644 ---- a/lib/pengine/bundle.c -+++ b/lib/pengine/bundle.c -@@ -1485,14 +1485,14 @@ bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options, - free(child_text); - } - --PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__bundle_xml(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - pe__bundle_variant_data_t *bundle_data = NULL; - int rc = pcmk_rc_no_output; -@@ -1608,14 +1608,14 @@ pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replic - pe__common_output_html(out, rsc, buffer, node, options); - } - --PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__bundle_html(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - pe__bundle_variant_data_t *bundle_data = NULL; - char buffer[LINE_MAX]; -@@ -1750,14 +1750,14 @@ pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replic - pe__common_output_text(out, rsc, buffer, node, options); - } - --PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__bundle_text(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - pe__bundle_variant_data_t *bundle_data = NULL; - int rc = pcmk_rc_no_output; -diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c -index 0708fdc..9b48d01 100644 ---- a/lib/pengine/clone.c -+++ b/lib/pengine/clone.c -@@ -577,16 +577,16 @@ clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print - free(child_text); - } - --PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__clone_xml(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - -- GListPtr gIter = rsc->children; -+ GList *gIter = rsc->children; - int rc = pcmk_rc_no_output; - gboolean printed_header = FALSE; - gboolean print_everything = TRUE; -@@ -635,23 +635,23 @@ pe__clone_xml(pcmk__output_t *out, va_list args) - return rc; - } - --PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__clone_html(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - char *list_text = NULL; - char *stopped_list = NULL; - size_t list_text_len = 0; - size_t stopped_list_len = 0; - -- GListPtr master_list = NULL; -- GListPtr started_list = NULL; -- GListPtr gIter = rsc->children; -+ GList *master_list = NULL; -+ GList *started_list = NULL; -+ GList *gIter = rsc->children; - - clone_variant_data_t *clone_data = NULL; - int active_instances = 0; -@@ -751,7 +751,7 @@ pe__clone_html(pcmk__output_t *out, va_list args) - } - - if (print_full) { -- GListPtr all = NULL; -+ GList *all = NULL; - - /* Print every resource that's a child of this clone. */ - all = g_list_prepend(all, strdup("*")); -@@ -832,8 +832,8 @@ pe__clone_html(pcmk__output_t *out, va_list args) - if (!pcmk_is_set(rsc->flags, pe_rsc_unique) - && (clone_data->clone_max > active_instances)) { - -- GListPtr nIter; -- GListPtr list = g_hash_table_get_values(rsc->allowed_nodes); -+ GList *nIter; -+ GList *list = g_hash_table_get_values(rsc->allowed_nodes); - - /* Custom stopped list for non-unique clones */ - free(stopped_list); -@@ -872,23 +872,23 @@ pe__clone_html(pcmk__output_t *out, va_list args) - return rc; - } - --PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__clone_text(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - char *list_text = NULL; - char *stopped_list = NULL; - size_t list_text_len = 0; - size_t stopped_list_len = 0; - -- GListPtr master_list = NULL; -- GListPtr started_list = NULL; -- GListPtr gIter = rsc->children; -+ GList *master_list = NULL; -+ GList *started_list = NULL; -+ GList *gIter = rsc->children; - - clone_variant_data_t *clone_data = NULL; - int active_instances = 0; -@@ -988,7 +988,7 @@ pe__clone_text(pcmk__output_t *out, va_list args) - } - - if (print_full) { -- GListPtr all = NULL; -+ GList *all = NULL; - - /* Print every resource that's a child of this clone. */ - all = g_list_prepend(all, strdup("*")); -@@ -1067,8 +1067,8 @@ pe__clone_text(pcmk__output_t *out, va_list args) - if (!pcmk_is_set(rsc->flags, pe_rsc_unique) - && (clone_data->clone_max > active_instances)) { - -- GListPtr nIter; -- GListPtr list = g_hash_table_get_values(rsc->allowed_nodes); -+ GList *nIter; -+ GList *list = g_hash_table_get_values(rsc->allowed_nodes); - - /* Custom stopped list for non-unique clones */ - free(stopped_list); -diff --git a/lib/pengine/group.c b/lib/pengine/group.c -index 33aa177..58c9f7c 100644 ---- a/lib/pengine/group.c -+++ b/lib/pengine/group.c -@@ -180,16 +180,16 @@ group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print - free(child_text); - } - --PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__group_xml(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - -- GListPtr gIter = rsc->children; -+ GList *gIter = rsc->children; - char *count = crm_itoa(g_list_length(gIter)); - - int rc = pcmk_rc_no_output; -@@ -231,14 +231,14 @@ pe__group_xml(pcmk__output_t *out, va_list args) - return rc; - } - --PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__group_html(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - int rc = pcmk_rc_no_output; - gboolean print_everything = TRUE; -@@ -251,7 +251,7 @@ pe__group_html(pcmk__output_t *out, va_list args) - (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)); - - if (options & pe_print_brief) { -- GListPtr rscs = pe__filter_rsc_list(rsc->children, only_rsc); -+ GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc); - - if (rscs != NULL) { - out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s", rsc->id, -@@ -265,7 +265,7 @@ pe__group_html(pcmk__output_t *out, va_list args) - } - - } else { -- for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) { -+ for (GList *gIter = rsc->children; gIter; gIter = gIter->next) { - pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; - - if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) { -@@ -286,14 +286,14 @@ pe__group_html(pcmk__output_t *out, va_list args) - return rc; - } - --PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__group_text(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - int rc = pcmk_rc_no_output; - gboolean print_everything = TRUE; -@@ -306,7 +306,7 @@ pe__group_text(pcmk__output_t *out, va_list args) - (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)); - - if (options & pe_print_brief) { -- GListPtr rscs = pe__filter_rsc_list(rsc->children, only_rsc); -+ GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc); - - if (rscs != NULL) { - out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s", rsc->id, -@@ -320,7 +320,7 @@ pe__group_text(pcmk__output_t *out, va_list args) - } - - } else { -- for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) { -+ for (GList *gIter = rsc->children; gIter; gIter = gIter->next) { - pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; - - if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) { -diff --git a/lib/pengine/native.c b/lib/pengine/native.c -index 38e9350..6f27d7b 100644 ---- a/lib/pengine/native.c -+++ b/lib/pengine/native.c -@@ -920,14 +920,14 @@ native_print(pe_resource_t * rsc, const char *pre_text, long options, void *prin - common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data); - } - --PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__resource_xml(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); - const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); -@@ -977,7 +977,7 @@ pe__resource_xml(pcmk__output_t *out, va_list args) - CRM_ASSERT(rc == pcmk_rc_ok); - - if (rsc->running_on != NULL) { -- GListPtr gIter = rsc->running_on; -+ GList *gIter = rsc->running_on; - - for (; gIter != NULL; gIter = gIter->next) { - pe_node_t *node = (pe_node_t *) gIter->data; -@@ -994,14 +994,14 @@ pe__resource_xml(pcmk__output_t *out, va_list args) - return rc; - } - --PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__resource_html(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - pe_node_t *node = pe__current_node(rsc); - -@@ -1018,14 +1018,14 @@ pe__resource_html(pcmk__output_t *out, va_list args) - return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, options); - } - --PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr") -+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *") - int - pe__resource_text(pcmk__output_t *out, va_list args) - { - unsigned int options = va_arg(args, unsigned int); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - pe_node_t *node = pe__current_node(rsc); - -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index ecb5c2c..3d2fc24 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -1013,7 +1013,7 @@ pe__failed_action_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *", -- "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr") -+ "gboolean", "gboolean", "gboolean", "GList *", "GList *") - int - pe__node_html(pcmk__output_t *out, va_list args) { - pe_node_t *node = va_arg(args, pe_node_t *); -@@ -1023,8 +1023,8 @@ pe__node_html(pcmk__output_t *out, va_list args) { - gboolean print_clone_detail = va_arg(args, gboolean); - gboolean print_brief = va_arg(args, gboolean); - gboolean group_by_node = va_arg(args, gboolean); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - char *node_name = pe__node_display_name(node, print_clone_detail); - char *buf = crm_strdup_printf("Node: %s", node_name); -@@ -1052,7 +1052,7 @@ pe__node_html(pcmk__output_t *out, va_list args) { - pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE"); - } - if (print_brief && group_by_node) { -- GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc); -+ GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc); - - if (rscs != NULL) { - out->begin_list(out, NULL, NULL, NULL); -@@ -1061,7 +1061,7 @@ pe__node_html(pcmk__output_t *out, va_list args) { - } - - } else if (group_by_node) { -- GListPtr lpc2 = NULL; -+ GList *lpc2 = NULL; - - out->begin_list(out, NULL, NULL, NULL); - for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) { -@@ -1081,7 +1081,7 @@ pe__node_html(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *", -- "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr") -+ "gboolean", "gboolean", "gboolean", "GList *", "GList *") - int - pe__node_text(pcmk__output_t *out, va_list args) { - pe_node_t *node = va_arg(args, pe_node_t *); -@@ -1091,8 +1091,8 @@ pe__node_text(pcmk__output_t *out, va_list args) { - gboolean print_clone_detail = va_arg(args, gboolean); - gboolean print_brief = va_arg(args, gboolean); - gboolean group_by_node = va_arg(args, gboolean); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - if (full) { - char *node_name = pe__node_display_name(node, print_clone_detail); -@@ -1110,7 +1110,7 @@ pe__node_text(pcmk__output_t *out, va_list args) { - /* If we're grouping by node, print its resources */ - if (group_by_node) { - if (print_brief) { -- GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc); -+ GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc); - - if (rscs != NULL) { - out->begin_list(out, NULL, NULL, "%s", buf); -@@ -1123,7 +1123,7 @@ pe__node_text(pcmk__output_t *out, va_list args) { - } - - } else { -- GListPtr gIter2 = NULL; -+ GList *gIter2 = NULL; - - out->begin_list(out, NULL, NULL, "%s", buf); - out->begin_list(out, NULL, NULL, "Resources"); -@@ -1151,7 +1151,7 @@ pe__node_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *", -- "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr") -+ "gboolean", "gboolean", "gboolean", "GList *", "GList *") - int - pe__node_xml(pcmk__output_t *out, va_list args) { - pe_node_t *node = va_arg(args, pe_node_t *); -@@ -1161,8 +1161,8 @@ pe__node_xml(pcmk__output_t *out, va_list args) { - gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean); - gboolean print_brief G_GNUC_UNUSED = va_arg(args, gboolean); - gboolean group_by_node = va_arg(args, gboolean); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - - if (full) { - const char *node_type = "unknown"; -@@ -1200,7 +1200,7 @@ pe__node_xml(pcmk__output_t *out, va_list args) { - } - - if (group_by_node) { -- GListPtr lpc = NULL; -+ GList *lpc = NULL; - - for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; -@@ -1401,12 +1401,12 @@ pe__node_attribute_xml(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "unsigned int", "gboolean", "gboolean", "gboolean") - int - pe__node_list_html(pcmk__output_t *out, va_list args) { -- GListPtr nodes = va_arg(args, GListPtr); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *nodes = va_arg(args, GList *); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - unsigned int print_opts = va_arg(args, unsigned int); - gboolean print_clone_detail = va_arg(args, gboolean); - gboolean print_brief = va_arg(args, gboolean); -@@ -1414,7 +1414,7 @@ pe__node_list_html(pcmk__output_t *out, va_list args) { - - int rc = pcmk_rc_no_output; - -- for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) { -+ for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { - pe_node_t *node = (pe_node_t *) gIter->data; - - if (!pcmk__str_in_list(only_node, node->details->uname)) { -@@ -1431,12 +1431,12 @@ pe__node_list_html(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "unsigned int", "gboolean", "gboolean", "gboolean") - int - pe__node_list_text(pcmk__output_t *out, va_list args) { -- GListPtr nodes = va_arg(args, GListPtr); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *nodes = va_arg(args, GList *); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - unsigned int print_opts = va_arg(args, unsigned int); - gboolean print_clone_detail = va_arg(args, gboolean); - gboolean print_brief = va_arg(args, gboolean); -@@ -1457,7 +1457,7 @@ pe__node_list_text(pcmk__output_t *out, va_list args) { - - int rc = pcmk_rc_no_output; - -- for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) { -+ for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { - pe_node_t *node = (pe_node_t *) gIter->data; - const char *node_mode = NULL; - char *node_name = pe__node_display_name(node, print_clone_detail); -@@ -1570,19 +1570,19 @@ pe__node_list_text(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "unsigned int", "gboolean", "gboolean", "gboolean") - int - pe__node_list_xml(pcmk__output_t *out, va_list args) { -- GListPtr nodes = va_arg(args, GListPtr); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *nodes = va_arg(args, GList *); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - unsigned int print_opts = va_arg(args, unsigned int); - gboolean print_clone_detail = va_arg(args, gboolean); - gboolean print_brief = va_arg(args, gboolean); - gboolean group_by_node = va_arg(args, gboolean); - - out->begin_list(out, NULL, NULL, "nodes"); -- for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) { -+ for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { - pe_node_t *node = (pe_node_t *) gIter->data; - - if (!pcmk__str_in_list(only_node, node->details->uname)) { -@@ -1597,10 +1597,10 @@ pe__node_list_xml(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean") -+PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "gboolean") - int - pe__op_history_text(pcmk__output_t *out, va_list args) { -- xmlNode *xml_op = va_arg(args, xmlNode *); -+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr); - const char *task = va_arg(args, const char *); - const char *interval_ms_s = va_arg(args, const char *); - int rc = va_arg(args, int); -@@ -1614,10 +1614,10 @@ pe__op_history_text(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean") -+PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "gboolean") - int - pe__op_history_xml(pcmk__output_t *out, va_list args) { -- xmlNode *xml_op = va_arg(args, xmlNode *); -+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr); - const char *task = va_arg(args, const char *); - const char *interval_ms_s = va_arg(args, const char *); - int rc = va_arg(args, int); -@@ -1760,7 +1760,7 @@ pe__resource_history_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "unsigned int", "gboolean", -- "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr", "gboolean") -+ "gboolean", "gboolean", "gboolean", "GList *", "GList *", "gboolean") - int - pe__resource_list(pcmk__output_t *out, va_list args) - { -@@ -1770,11 +1770,11 @@ pe__resource_list(pcmk__output_t *out, va_list args) - gboolean inactive_resources = va_arg(args, gboolean); - gboolean brief_output = va_arg(args, gboolean); - gboolean print_summary = va_arg(args, gboolean); -- GListPtr only_node = va_arg(args, GListPtr); -- GListPtr only_rsc = va_arg(args, GListPtr); -+ GList *only_node = va_arg(args, GList *); -+ GList *only_rsc = va_arg(args, GList *); - gboolean print_spacer = va_arg(args, gboolean); - -- GListPtr rsc_iter; -+ GList *rsc_iter; - int rc = pcmk_rc_no_output; - - /* If we already showed active resources by node, and -@@ -1798,7 +1798,7 @@ pe__resource_list(pcmk__output_t *out, va_list args) - /* If we haven't already printed resources grouped by node, - * and brief output was requested, print resource summary */ - if (brief_output && !group_by_node) { -- GListPtr rscs = pe__filter_rsc_list(data_set->resources, only_rsc); -+ GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc); - - pe__rscs_brief_output(out, rscs, print_opts, inactive_resources); - g_list_free(rscs); -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index cb06879..4cb78b5 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -247,9 +247,9 @@ resource_check_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *parent = uber_parent(checks->rsc); - int rc = pcmk_rc_no_output; - -- xmlNode *node = pcmk__output_create_xml_node(out, "check", -- "id", parent->id, -- NULL); -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "check", -+ "id", parent->id, -+ NULL); - - if (pcmk_is_set(checks->flags, rsc_remain_stopped)) { - crm_xml_add(node, "remain_stopped", "true"); -@@ -270,11 +270,11 @@ resource_check_xml(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("resource-search", "GListPtr", "pe_resource_t *", "gchar *") -+PCMK__OUTPUT_ARGS("resource-search", "GList *", "pe_resource_t *", "gchar *") - static int - resource_search_default(pcmk__output_t *out, va_list args) - { -- GListPtr nodes = va_arg(args, GListPtr); -+ GList *nodes = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gchar *requested_name = va_arg(args, gchar *); - -@@ -286,7 +286,7 @@ resource_search_default(pcmk__output_t *out, va_list args) - return rc; - } - -- for (GListPtr lpc = nodes; lpc != NULL; lpc = lpc->next) { -+ for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { - pe_node_t *node = (pe_node_t *) lpc->data; - - if (!printed) { -@@ -316,11 +316,11 @@ resource_search_default(pcmk__output_t *out, va_list args) - } - - --PCMK__OUTPUT_ARGS("resource-search", "GListPtr", "pe_resource_t *", "gchar *") -+PCMK__OUTPUT_ARGS("resource-search", "GList *", "pe_resource_t *", "gchar *") - static int - resource_search_xml(pcmk__output_t *out, va_list args) - { -- GListPtr nodes = va_arg(args, GListPtr); -+ GList *nodes = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gchar *requested_name = va_arg(args, gchar *); - -@@ -328,9 +328,9 @@ resource_search_xml(pcmk__output_t *out, va_list args) - "resource", requested_name, - NULL); - -- for (GListPtr lpc = nodes; lpc != NULL; lpc = lpc->next) { -+ for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { - pe_node_t *node = (pe_node_t *) lpc->data; -- xmlNode *sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname); -+ xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname); - - if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { - crm_xml_add(sub_node, "state", "promoted"); -@@ -340,13 +340,13 @@ resource_search_xml(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GListPtr", "pe_resource_t *", -+PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GList *", "pe_resource_t *", - "pe_node_t *") - static int - resource_why_default(pcmk__output_t *out, va_list args) - { - cib_t *cib_conn = va_arg(args, cib_t *); -- GListPtr resources = va_arg(args, GListPtr); -+ GList *resources = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - pe_node_t *node = va_arg(args, pe_node_t *); - -@@ -355,8 +355,8 @@ resource_why_default(pcmk__output_t *out, va_list args) - out->begin_list(out, NULL, NULL, "Resource Reasons"); - - if ((rsc == NULL) && (host_uname == NULL)) { -- GListPtr lpc = NULL; -- GListPtr hosts = NULL; -+ GList *lpc = NULL; -+ GList *hosts = NULL; - - for (lpc = resources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; -@@ -386,10 +386,10 @@ resource_why_default(pcmk__output_t *out, va_list args) - - } else if ((rsc == NULL) && (host_uname != NULL)) { - const char* host_uname = node->details->uname; -- GListPtr allResources = node->details->allocated_rsc; -- GListPtr activeResources = node->details->running_rsc; -- GListPtr unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); -- GListPtr lpc = NULL; -+ GList *allResources = node->details->allocated_rsc; -+ GList *activeResources = node->details->running_rsc; -+ GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); -+ GList *lpc = NULL; - - for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; -@@ -410,7 +410,7 @@ resource_why_default(pcmk__output_t *out, va_list args) - g_list_free(unactiveResources); - - } else if ((rsc != NULL) && (host_uname == NULL)) { -- GListPtr hosts = NULL; -+ GList *hosts = NULL; - - rsc->fns->location(rsc, &hosts, TRUE); - out->list_item(out, "reason", "Resource %s is %srunning", -@@ -423,23 +423,23 @@ resource_why_default(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GListPtr", "pe_resource_t *", -+PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GList *", "pe_resource_t *", - "pe_node_t *") - static int - resource_why_xml(pcmk__output_t *out, va_list args) - { - cib_t *cib_conn = va_arg(args, cib_t *); -- GListPtr resources = va_arg(args, GListPtr); -+ GList *resources = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - pe_node_t *node = va_arg(args, pe_node_t *); - - const char *host_uname = (node == NULL)? NULL : node->details->uname; - -- xmlNode *xml_node = pcmk__output_xml_create_parent(out, "reason", NULL); -+ xmlNodePtr xml_node = pcmk__output_xml_create_parent(out, "reason", NULL); - - if ((rsc == NULL) && (host_uname == NULL)) { -- GListPtr lpc = NULL; -- GListPtr hosts = NULL; -+ GList *lpc = NULL; -+ GList *hosts = NULL; - - pcmk__output_xml_create_parent(out, "resources", NULL); - -@@ -470,10 +470,10 @@ resource_why_xml(pcmk__output_t *out, va_list args) - - } else if ((rsc == NULL) && (host_uname != NULL)) { - const char* host_uname = node->details->uname; -- GListPtr allResources = node->details->allocated_rsc; -- GListPtr activeResources = node->details->running_rsc; -- GListPtr unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); -- GListPtr lpc = NULL; -+ GList *allResources = node->details->allocated_rsc; -+ GList *activeResources = node->details->running_rsc; -+ GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); -+ GList *lpc = NULL; - - pcmk__output_xml_create_parent(out, "resources", NULL); - -@@ -509,7 +509,7 @@ resource_why_xml(pcmk__output_t *out, va_list args) - g_list_free(unactiveResources); - - } else if ((rsc != NULL) && (host_uname == NULL)) { -- GListPtr hosts = NULL; -+ GList *hosts = NULL; - - rsc->fns->location(rsc, &hosts, TRUE); - crm_xml_add(xml_node, "running", pcmk__btoa(hosts != NULL)); -@@ -532,10 +532,10 @@ add_resource_name(pcmk__output_t *out, pe_resource_t *rsc) { - } - } - --PCMK__OUTPUT_ARGS("resource-names-list", "GListPtr") -+PCMK__OUTPUT_ARGS("resource-names-list", "GList *") - static int - resource_names(pcmk__output_t *out, va_list args) { -- GListPtr resources = va_arg(args, GListPtr); -+ GList *resources = va_arg(args, GList *); - - if (resources == NULL) { - out->err(out, "NO resources configured\n"); -@@ -544,7 +544,7 @@ resource_names(pcmk__output_t *out, va_list args) { - - out->begin_list(out, NULL, NULL, "Resource Names"); - -- for (GListPtr lpc = resources; lpc != NULL; lpc = lpc->next) { -+ for (GList *lpc = resources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; - add_resource_name(out, rsc); - } --- -1.8.3.1 - - -From 2075e40607d4a73617a079163cb7c6a2301b7d86 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 9 Nov 2020 10:41:34 -0500 -Subject: [PATCH 2/9] Fix: libpacemaker: Remove some G_GNUC_UNUSED references. - -These variables are, in fact, being used. ---- - lib/pacemaker/pcmk_output.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index a0b12b9..a2bc931 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -218,7 +218,7 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - - PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") - static int locations_list(pcmk__output_t *out, va_list args) { -- pe_resource_t *rsc G_GNUC_UNUSED = va_arg(args, pe_resource_t *); -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); - - GList *lpc = NULL; - GList *list = rsc->rsc_location; -@@ -280,9 +280,9 @@ static int locations_list_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean") - static int - stacks_and_constraints(pcmk__output_t *out, va_list args) { -- pe_resource_t *rsc G_GNUC_UNUSED = va_arg(args, pe_resource_t *); -- pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *); -- gboolean recursive G_GNUC_UNUSED = va_arg(args, gboolean); -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); -+ gboolean recursive = va_arg(args, gboolean); - - GList *lpc = NULL; - xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, --- -1.8.3.1 - - -From 578f7a22fd75b50fce21de474d6b373146a00666 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 20 Nov 2020 16:20:23 -0500 -Subject: [PATCH 3/9] Fix: libs, tools: Rearrange formatted output args - popping. - -For consistency, move all the va_args stuff to the beginning of each -function, and then declare other variables after that. ---- - lib/pacemaker/pcmk_output.c | 1 + - lib/pengine/pe_output.c | 37 ++++++++++++++++++++----------------- - tools/crm_mon_curses.c | 1 + - 3 files changed, 22 insertions(+), 17 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index a2bc931..deb1fe5 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -460,6 +460,7 @@ static int - crmadmin_node_list(pcmk__output_t *out, va_list args) - { - xmlNodePtr xml_node = va_arg(args, xmlNodePtr); -+ - int found = 0; - xmlNode *node = NULL; - xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index 3d2fc24..a7118a4 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -216,8 +216,8 @@ pe__cluster_summary(pcmk__output_t *out, va_list args) { - gboolean show_times = va_arg(args, gboolean); - gboolean show_counts = va_arg(args, gboolean); - gboolean show_options = va_arg(args, gboolean); -- int rc = pcmk_rc_no_output; - -+ int rc = pcmk_rc_no_output; - const char *stack_s = get_cluster_stack(data_set); - - if (show_stack) { -@@ -282,8 +282,8 @@ pe__cluster_summary_html(pcmk__output_t *out, va_list args) { - gboolean show_times = va_arg(args, gboolean); - gboolean show_counts = va_arg(args, gboolean); - gboolean show_options = va_arg(args, gboolean); -- int rc = pcmk_rc_no_output; - -+ int rc = pcmk_rc_no_output; - const char *stack_s = get_cluster_stack(data_set); - - if (show_stack) { -@@ -487,14 +487,14 @@ pe__ban_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") - int - pe__cluster_counts_html(pcmk__output_t *out, va_list args) { -- xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL); -- xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL); -- - unsigned int nnodes = va_arg(args, unsigned int); - int nresources = va_arg(args, int); - int ndisabled = va_arg(args, int); - int nblocked = va_arg(args, int); - -+ xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL); -+ xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL); -+ - char *nnodes_str = crm_strdup_printf("%d node%s configured", - nnodes, pcmk__plural_s(nnodes)); - -@@ -583,14 +583,14 @@ pe__cluster_counts_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") - int - pe__cluster_counts_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL); -- xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL); -- - unsigned int nnodes = va_arg(args, unsigned int); - int nresources = va_arg(args, int); - int ndisabled = va_arg(args, int); - int nblocked = va_arg(args, int); - -+ xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL); -+ xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL); -+ - char *s = crm_itoa(nnodes); - crm_xml_add(nodes_node, "number", s); - free(s); -@@ -613,13 +613,13 @@ pe__cluster_counts_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *") - int - pe__cluster_dc_html(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); -- - pe_node_t *dc = va_arg(args, pe_node_t *); - const char *quorum = va_arg(args, const char *); - const char *dc_version_s = va_arg(args, const char *); - char *dc_name = va_arg(args, char *); - -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); -+ - pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: "); - - if (dc) { -@@ -820,6 +820,7 @@ PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *") - int - pe__cluster_options_xml(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); -+ - const char *no_quorum_policy = NULL; - - switch (data_set->no_quorum_policy) { -@@ -857,9 +858,10 @@ pe__cluster_options_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-stack", "const char *") - int - pe__cluster_stack_html(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); - const char *stack_s = va_arg(args, const char *); - -+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); -+ - pcmk_create_html_node(node, "span", NULL, "bold", "Stack: "); - pcmk_create_html_node(node, "span", NULL, NULL, stack_s); - -@@ -870,6 +872,7 @@ PCMK__OUTPUT_ARGS("cluster-stack", "const char *") - int - pe__cluster_stack_text(pcmk__output_t *out, va_list args) { - const char *stack_s = va_arg(args, const char *); -+ - out->list_item(out, "Stack", "%s", stack_s); - return pcmk_rc_ok; - } -@@ -889,14 +892,14 @@ pe__cluster_stack_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *") - int - pe__cluster_times_html(pcmk__output_t *out, va_list args) { -- xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL); -- xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL); -- - const char *last_written = va_arg(args, const char *); - const char *user = va_arg(args, const char *); - const char *client = va_arg(args, const char *); - const char *origin = va_arg(args, const char *); - -+ xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL); -+ xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL); -+ - char *buf = last_changed_string(last_written, user, client, origin); - - pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: "); -@@ -952,6 +955,7 @@ PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr") - int - pe__failed_action_text(pcmk__output_t *out, va_list args) { - xmlNodePtr xml_op = va_arg(args, xmlNodePtr); -+ - char *s = failed_action_string(xml_op); - - out->list_item(out, NULL, "%s", s); -@@ -1229,7 +1233,6 @@ pe__node_attribute_text(pcmk__output_t *out, va_list args) { - gboolean add_extra = va_arg(args, gboolean); - int expected_score = va_arg(args, int); - -- - if (add_extra) { - int v = crm_parse_int(value, "0"); - -@@ -1904,10 +1907,10 @@ pe__ticket_text(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *") - int - pe__ticket_xml(pcmk__output_t *out, va_list args) { -- xmlNodePtr node = NULL; -- - pe_ticket_t *ticket = va_arg(args, pe_ticket_t *); - -+ xmlNodePtr node = NULL; -+ - node = pcmk__output_create_xml_node(out, "ticket", - "id", ticket->id, - "status", ticket->granted ? "granted" : "revoked", -diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c -index 9cf28dc..869c7f3 100644 ---- a/tools/crm_mon_curses.c -+++ b/tools/crm_mon_curses.c -@@ -369,6 +369,7 @@ PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int") - static int - cluster_maint_mode_console(pcmk__output_t *out, va_list args) { - unsigned long long flags = va_arg(args, unsigned long long); -+ - int rc; - - if (pcmk_is_set(flags, pe_flag_maintenance_mode)) { --- -1.8.3.1 - - -From 7a26a80bc7983f79a69c3548a635bf88c031beb7 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 9 Nov 2020 12:30:19 -0500 -Subject: [PATCH 4/9] Fix: libs: Remove extra whitespace in list_item calls. - ---- - cts/cli/regression.crm_mon.exp | 50 +++++++++++++++++++++--------------------- - lib/pengine/clone.c | 10 ++++----- - lib/pengine/native.c | 6 ++--- - lib/pengine/pe_output.c | 4 ++-- - 4 files changed, 35 insertions(+), 35 deletions(-) - -diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp -index e9f36ad..cf7d28e 100644 ---- a/cts/cli/regression.crm_mon.exp -+++ b/cts/cli/regression.crm_mon.exp -@@ -480,13 +480,13 @@ Node List: - * Online: [ cluster01 cluster02 ] - - Active Resources: -- * 1 (ocf::pacemaker:Dummy): Active cluster02 -- * 1 (stonith:fence_xvm): Active cluster01 -+ * 1 (ocf::pacemaker:Dummy): Active cluster02 -+ * 1 (stonith:fence_xvm): Active cluster01 - * Clone Set: ping-clone [ping]: - * Started: [ cluster01 cluster02 ] - * Resource Group: exim-group: -- * 1/1 (lsb:exim): Active cluster02 -- * 1/1 (ocf::heartbeat:IPaddr): Active cluster02 -+ * 1/1 (lsb:exim): Active cluster02 -+ * 1/1 (ocf::heartbeat:IPaddr): Active cluster02 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster01 cluster02 ] - -@@ -609,16 +609,16 @@ Cluster Summary: - Node List: - * Node cluster01: online: - * Resources: -- * 1 (lsb:mysql-proxy): Active -- * 1 (ocf::pacemaker:ping): Active -- * 1 (stonith:fence_xvm): Active -+ * 1 (lsb:mysql-proxy): Active -+ * 1 (ocf::pacemaker:ping): Active -+ * 1 (stonith:fence_xvm): Active - * Node cluster02: online: - * Resources: -- * 1 (lsb:exim): Active -- * 1 (lsb:mysql-proxy): Active -- * 1 (ocf::heartbeat:IPaddr): Active -- * 1 (ocf::pacemaker:Dummy): Active -- * 1 (ocf::pacemaker:ping): Active -+ * 1 (lsb:exim): Active -+ * 1 (lsb:mysql-proxy): Active -+ * 1 (ocf::heartbeat:IPaddr): Active -+ * 1 (ocf::pacemaker:Dummy): Active -+ * 1 (ocf::pacemaker:ping): Active - - Node Attributes: - * Node: cluster01: -@@ -2857,7 +2857,7 @@ Node List: - * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] - - Full List of Resources: -- * 1/1 (stonith:fence_xvm): Active cluster01 -+ * 1/1 (stonith:fence_xvm): Active cluster01 - * Clone Set: ping-clone [ping]: - * Started: [ cluster01 ] - * Stopped: [ cluster02 ] -@@ -2865,7 +2865,7 @@ Full List of Resources: - * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02 - * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01 - * Resource Group: partially-active-group: -- * 1/2 (ocf::pacemaker:Dummy): Active cluster02 -+ * 1/2 (ocf::pacemaker:Dummy): Active cluster02 - - Node Attributes: - * Node: cluster01: -@@ -2919,20 +2919,20 @@ Cluster Summary: - Node List: - * Node cluster01: online: - * Resources: -- * 1 (ocf::heartbeat:IPaddr2): Active -- * 1 (ocf::heartbeat:docker): Active -- * 1 (ocf::pacemaker:ping): Active -- * 1 (ocf::pacemaker:remote): Active -- * 1 (stonith:fence_xvm): Active -+ * 1 (ocf::heartbeat:IPaddr2): Active -+ * 1 (ocf::heartbeat:docker): Active -+ * 1 (ocf::pacemaker:ping): Active -+ * 1 (ocf::pacemaker:remote): Active -+ * 1 (stonith:fence_xvm): Active - * Node cluster02: online: - * Resources: -- * 1 (ocf::heartbeat:IPaddr2): Active -- * 1 (ocf::heartbeat:docker): Active -- * 1 (ocf::pacemaker:Dummy): Active -- * 1 (ocf::pacemaker:remote): Active -+ * 1 (ocf::heartbeat:IPaddr2): Active -+ * 1 (ocf::heartbeat:docker): Active -+ * 1 (ocf::pacemaker:Dummy): Active -+ * 1 (ocf::pacemaker:remote): Active - * GuestNode httpd-bundle-0@cluster02: online: - * Resources: -- * 1 (ocf::heartbeat:apache): Active -+ * 1 (ocf::heartbeat:apache): Active - - Inactive Resources: - * Clone Set: ping-clone [ping]: -@@ -2942,7 +2942,7 @@ Inactive Resources: - * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02 - * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01 - * Resource Group: partially-active-group: -- * 1/2 (ocf::pacemaker:Dummy): Active cluster02 -+ * 1/2 (ocf::pacemaker:Dummy): Active cluster02 - - Node Attributes: - * Node: cluster01: -diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c -index 9b48d01..9e98178 100644 ---- a/lib/pengine/clone.c -+++ b/lib/pengine/clone.c -@@ -781,7 +781,7 @@ pe__clone_html(pcmk__output_t *out, va_list args) - } - - if (list_text != NULL) { -- out->list_item(out, NULL, " Masters: [ %s ]", list_text); -+ out->list_item(out, NULL, "Masters: [ %s ]", list_text); - g_list_free(master_list); - free(list_text); - list_text = NULL; -@@ -806,13 +806,13 @@ pe__clone_html(pcmk__output_t *out, va_list args) - enum rsc_role_e role = configured_role(rsc); - - if(role == RSC_ROLE_SLAVE) { -- out->list_item(out, NULL, " Slaves (target-role): [ %s ]", list_text); -+ out->list_item(out, NULL, "Slaves (target-role): [ %s ]", list_text); - } else { -- out->list_item(out, NULL, " Slaves: [ %s ]", list_text); -+ out->list_item(out, NULL, "Slaves: [ %s ]", list_text); - } - - } else { -- out->list_item(out, NULL, " Started: [ %s ]", list_text); -+ out->list_item(out, NULL, "Started: [ %s ]", list_text); - } - - g_list_free(started_list); -@@ -861,7 +861,7 @@ pe__clone_html(pcmk__output_t *out, va_list args) - } - - if (stopped_list != NULL) { -- out->list_item(out, NULL, " %s: [ %s ]", state, stopped_list); -+ out->list_item(out, NULL, "%s: [ %s ]", state, stopped_list); - free(stopped_list); - stopped_list_len = 0; - } -diff --git a/lib/pengine/native.c b/lib/pengine/native.c -index 6f27d7b..193be17 100644 ---- a/lib/pengine/native.c -+++ b/lib/pengine/native.c -@@ -1320,12 +1320,12 @@ pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboo - } - - if (print_all) { -- out->list_item(out, NULL, " %d/%d\t(%s):\tActive %s", -+ out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s", - *active_counter, - rsc_counter ? *rsc_counter : 0, type, - (*active_counter > 0) && node_name ? node_name : ""); - } else { -- out->list_item(out, NULL, " %d\t(%s):\tActive %s", -+ out->list_item(out, NULL, "%d\t(%s):\tActive %s", - *active_counter, type, - (*active_counter > 0) && node_name ? node_name : ""); - } -@@ -1334,7 +1334,7 @@ pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboo - } - - if (print_all && active_counter_all == 0) { -- out->list_item(out, NULL, " %d/%d\t(%s):\tActive", -+ out->list_item(out, NULL, "%d/%d\t(%s):\tActive", - active_counter_all, - rsc_counter ? *rsc_counter : 0, type); - rc = pcmk_rc_ok; -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index a7118a4..e26604e 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -1890,13 +1890,13 @@ pe__ticket_text(pcmk__output_t *out, va_list args) { - - if (ticket->last_granted > -1) { - char *time = pcmk_format_named_time("last-granted", ticket->last_granted); -- out->list_item(out, ticket->id, "\t%s%s %s", -+ out->list_item(out, ticket->id, "%s%s %s", - ticket->granted ? "granted" : "revoked", - ticket->standby ? " [standby]" : "", - time); - free(time); - } else { -- out->list_item(out, ticket->id, "\t%s%s", -+ out->list_item(out, ticket->id, "%s%s", - ticket->granted ? "granted" : "revoked", - ticket->standby ? " [standby]" : ""); - } --- -1.8.3.1 - - -From 3db514eed9764d6947f114dfe870a88d082db8a3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 9 Nov 2020 13:53:23 -0500 -Subject: [PATCH 5/9] Fix: libs, tools: Don't use fprintf in formatted output. - -That's what out->info is for. And then having done this, the special -console-specific version of the maint-mode message can go away. It's -now exactly the same as the text version. ---- - lib/pengine/pe_output.c | 10 ++++------ - tools/crm_mon_curses.c | 28 +--------------------------- - 2 files changed, 5 insertions(+), 33 deletions(-) - -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index e26604e..1b57f64 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -694,14 +694,12 @@ pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args) { - unsigned long long flags = va_arg(args, unsigned long long); - - if (pcmk_is_set(flags, pe_flag_maintenance_mode)) { -- fprintf(out->dest, "\n *** Resource management is DISABLED ***"); -- fprintf(out->dest, "\n The cluster will not attempt to start, stop or recover services"); -- fprintf(out->dest, "\n"); -+ out->info(out, "\n *** Resource management is DISABLED ***"); -+ out->info(out, " The cluster will not attempt to start, stop or recover services"); - return pcmk_rc_ok; - } else if (pcmk_is_set(flags, pe_flag_stop_everything)) { -- fprintf(out->dest, "\n *** Resource management is DISABLED ***"); -- fprintf(out->dest, "\n The cluster will keep all resources stopped"); -- fprintf(out->dest, "\n"); -+ out->info(out, "\n *** Resource management is DISABLED ***"); -+ out->info(out, " The cluster will keep all resources stopped"); - return pcmk_rc_ok; - } else { - return pcmk_rc_no_output; -diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c -index 869c7f3..ae22198 100644 ---- a/tools/crm_mon_curses.c -+++ b/tools/crm_mon_curses.c -@@ -365,32 +365,6 @@ stonith_event_console(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int") --static int --cluster_maint_mode_console(pcmk__output_t *out, va_list args) { -- unsigned long long flags = va_arg(args, unsigned long long); -- -- int rc; -- -- if (pcmk_is_set(flags, pe_flag_maintenance_mode)) { -- printw("\n *** Resource management is DISABLED ***"); -- printw("\n The cluster will not attempt to start, stop or recover services"); -- printw("\n"); -- rc = pcmk_rc_ok; -- } else if (pcmk_is_set(flags, pe_flag_stop_everything)) { -- printw("\n *** Resource management is DISABLED ***"); -- printw("\n The cluster will keep all resources stopped"); -- printw("\n"); -- rc = pcmk_rc_ok; -- } else { -- rc = pcmk_rc_no_output; -- } -- -- clrtoeol(); -- refresh(); -- return rc; --} -- - static pcmk__message_entry_t fmt_functions[] = { - { "ban", "console", pe__ban_text }, - { "bundle", "console", pe__bundle_text }, -@@ -406,7 +380,7 @@ static pcmk__message_entry_t fmt_functions[] = { - { "fencing-history", "console", stonith__history }, - { "full-fencing-history", "console", stonith__full_history }, - { "group", "console", pe__group_text }, -- { "maint-mode", "console", cluster_maint_mode_console }, -+ { "maint-mode", "console", pe__cluster_maint_mode_text }, - { "node", "console", pe__node_text }, - { "node-attribute", "console", pe__node_attribute_text }, - { "node-list", "console", pe__node_list_text }, --- -1.8.3.1 - - -From a7bbe968df5617c78e78495406f95f9d258834f1 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 19 Nov 2020 16:04:09 -0500 -Subject: [PATCH 6/9] Fix: libstonithd: Use subprocess_output directly. - -stonith__validate_agent_text was using puts, when it should really just -be using subprocess_output. The effect is the same but we should be -using the same functions everywhere. ---- - lib/fencing/st_output.c | 9 +-------- - 1 file changed, 1 insertion(+), 8 deletions(-) - -diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c -index 7c3ccef..145dd14 100644 ---- a/lib/fencing/st_output.c -+++ b/lib/fencing/st_output.c -@@ -403,14 +403,7 @@ stonith__validate_agent_text(pcmk__output_t *out, va_list args) { - rc ? "failed" : "succeeded"); - } - -- if (output) { -- puts(output); -- } -- -- if (error_output) { -- puts(error_output); -- } -- -+ out->subprocess_output(out, rc, output, error_output); - return rc; - } - --- -1.8.3.1 - - -From 39062beb1f243078bab71cc26af44019927da112 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 20 Nov 2020 16:22:12 -0500 -Subject: [PATCH 7/9] Fix: libs, tools: Add -list to certain formatted output - message names. - -Basically, if the message creates its own list, the name should end in --list. This is a hint to the caller that they do not need to add an -extra layer of lists. I've further changed the names of these messages -to be a little shorter, too. - -Note that some messages (the resource variants most obviously) create -their own list but don't have -list in the name. Here, the list is more -for organizational purposes rather than indicating that it contains -items. ---- - lib/fencing/st_output.c | 20 +++++++-------- - tools/crm_mon_curses.c | 8 +++--- - tools/crm_mon_print.c | 14 +++++----- - tools/crm_resource.c | 8 +++--- - tools/crm_resource_print.c | 61 ++++++++++++++++++++++---------------------- - tools/crm_resource_runtime.c | 2 +- - 6 files changed, 56 insertions(+), 57 deletions(-) - -diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c -index 145dd14..e1f4830 100644 ---- a/lib/fencing/st_output.c -+++ b/lib/fencing/st_output.c -@@ -30,7 +30,7 @@ time_t_string(time_t when) { - return buf; - } - --PCMK__OUTPUT_ARGS("failed-fencing-history", "stonith_history_t *", "GList *", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("failed-fencing-list", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__failed_history(pcmk__output_t *out, va_list args) { - stonith_history_t *history = va_arg(args, stonith_history_t *); -@@ -58,7 +58,7 @@ stonith__failed_history(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("fencing-history", "stonith_history_t *", "GList *", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("fencing-list", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__history(pcmk__output_t *out, va_list args) { - stonith_history_t *history = va_arg(args, stonith_history_t *); -@@ -84,7 +84,7 @@ stonith__history(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("full-fencing-history", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("full-fencing-list", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__full_history(pcmk__output_t *out, va_list args) { - crm_exit_t history_rc G_GNUC_UNUSED = va_arg(args, crm_exit_t); -@@ -109,7 +109,7 @@ stonith__full_history(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("full-fencing-history", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("full-fencing-list", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__full_history_xml(pcmk__output_t *out, va_list args) { - crm_exit_t history_rc = va_arg(args, crm_exit_t); -@@ -198,7 +198,7 @@ stonith__last_fenced_xml(pcmk__output_t *out, va_list args) { - } - } - --PCMK__OUTPUT_ARGS("pending-fencing-actions", "stonith_history_t *", "GList *", "gboolean", "gboolean") -+PCMK__OUTPUT_ARGS("pending-fencing-list", "stonith_history_t *", "GList *", "gboolean", "gboolean") - int - stonith__pending_actions(pcmk__output_t *out, va_list args) { - stonith_history_t *history = va_arg(args, stonith_history_t *); -@@ -433,15 +433,15 @@ stonith__validate_agent_xml(pcmk__output_t *out, va_list args) { - } - - static pcmk__message_entry_t fmt_functions[] = { -- { "failed-fencing-history", "default", stonith__failed_history }, -- { "fencing-history", "default", stonith__history }, -- { "full-fencing-history", "default", stonith__full_history }, -- { "full-fencing-history", "xml", stonith__full_history_xml }, -+ { "failed-fencing-list", "default", stonith__failed_history }, -+ { "fencing-list", "default", stonith__history }, -+ { "full-fencing-list", "default", stonith__full_history }, -+ { "full-fencing-list", "xml", stonith__full_history_xml }, - { "last-fenced", "html", stonith__last_fenced_html }, - { "last-fenced", "log", stonith__last_fenced_text }, - { "last-fenced", "text", stonith__last_fenced_text }, - { "last-fenced", "xml", stonith__last_fenced_xml }, -- { "pending-fencing-actions", "default", stonith__pending_actions }, -+ { "pending-fencing-list", "default", stonith__pending_actions }, - { "stonith-event", "html", stonith__event_html }, - { "stonith-event", "log", stonith__event_text }, - { "stonith-event", "text", stonith__event_text }, -diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c -index ae22198..5c79dd2 100644 ---- a/tools/crm_mon_curses.c -+++ b/tools/crm_mon_curses.c -@@ -376,16 +376,16 @@ static pcmk__message_entry_t fmt_functions[] = { - { "cluster-summary", "console", pe__cluster_summary }, - { "cluster-times", "console", pe__cluster_times_text }, - { "failed-action", "console", pe__failed_action_text }, -- { "failed-fencing-history", "console", stonith__failed_history }, -- { "fencing-history", "console", stonith__history }, -- { "full-fencing-history", "console", stonith__full_history }, -+ { "failed-fencing-list", "console", stonith__failed_history }, -+ { "fencing-list", "console", stonith__history }, -+ { "full-fencing-list", "console", stonith__full_history }, - { "group", "console", pe__group_text }, - { "maint-mode", "console", pe__cluster_maint_mode_text }, - { "node", "console", pe__node_text }, - { "node-attribute", "console", pe__node_attribute_text }, - { "node-list", "console", pe__node_list_text }, - { "op-history", "console", pe__op_history_text }, -- { "pending-fencing-actions", "console", stonith__pending_actions }, -+ { "pending-fencing-list", "console", stonith__pending_actions }, - { "primitive", "console", pe__resource_text }, - { "resource-history", "console", pe__resource_history_text }, - { "stonith-event", "console", stonith_event_console }, -diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c -index ce3e47c..06840b7 100644 ---- a/tools/crm_mon_print.c -+++ b/tools/crm_mon_print.c -@@ -735,7 +735,7 @@ print_status(pcmk__output_t *out, pe_working_set_t *data_set, - GINT_TO_POINTER(st_failed)); - - if (hp) { -- CHECK_RC(rc, out->message(out, "failed-fencing-history", stonith_history, unames, -+ CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames, - pcmk_is_set(mon_ops, mon_op_fence_full_history), - rc == pcmk_rc_ok)); - } -@@ -759,7 +759,7 @@ print_status(pcmk__output_t *out, pe_working_set_t *data_set, - GINT_TO_POINTER(st_failed)); - - if (hp) { -- CHECK_RC(rc, out->message(out, "fencing-history", hp, unames, -+ CHECK_RC(rc, out->message(out, "fencing-list", hp, unames, - pcmk_is_set(mon_ops, mon_op_fence_full_history), - rc == pcmk_rc_ok)); - } -@@ -767,7 +767,7 @@ print_status(pcmk__output_t *out, pe_working_set_t *data_set, - stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); - - if (hp) { -- CHECK_RC(rc, out->message(out, "pending-fencing-actions", hp, unames, -+ CHECK_RC(rc, out->message(out, "pending-fencing-list", hp, unames, - pcmk_is_set(mon_ops, mon_op_fence_full_history), - rc == pcmk_rc_ok)); - } -@@ -853,7 +853,7 @@ print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set, - if (pcmk_is_set(show, mon_show_fencing_all) - && pcmk_is_set(mon_ops, mon_op_fence_history)) { - -- out->message(out, "full-fencing-history", history_rc, stonith_history, -+ out->message(out, "full-fencing-list", history_rc, stonith_history, - unames, pcmk_is_set(mon_ops, mon_op_fence_full_history), - FALSE); - } -@@ -954,7 +954,7 @@ print_html_status(pcmk__output_t *out, pe_working_set_t *data_set, - GINT_TO_POINTER(st_failed)); - - if (hp) { -- out->message(out, "failed-fencing-history", stonith_history, unames, -+ out->message(out, "failed-fencing-list", stonith_history, unames, - pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE); - } - } -@@ -966,7 +966,7 @@ print_html_status(pcmk__output_t *out, pe_working_set_t *data_set, - GINT_TO_POINTER(st_failed)); - - if (hp) { -- out->message(out, "fencing-history", hp, unames, -+ out->message(out, "fencing-list", hp, unames, - pcmk_is_set(mon_ops, mon_op_fence_full_history), - FALSE); - } -@@ -974,7 +974,7 @@ print_html_status(pcmk__output_t *out, pe_working_set_t *data_set, - stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); - - if (hp) { -- out->message(out, "pending-fencing-actions", hp, unames, -+ out->message(out, "pending-fencing-list", hp, unames, - pcmk_is_set(mon_ops, mon_op_fence_full_history), - FALSE); - } -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 2c62ff6..95c72fc 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1816,7 +1816,7 @@ main(int argc, char **argv) - - case cmd_locate: { - GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set); -- rc = out->message(out, "resource-search", resources, rsc, options.rsc_id); -+ rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id); - break; - } - -@@ -1839,7 +1839,7 @@ main(int argc, char **argv) - goto done; - } - } -- out->message(out, "resource-why", cib_conn, data_set->resources, rsc, dest); -+ out->message(out, "resource-reasons-list", cib_conn, data_set->resources, rsc, dest); - rc = pcmk_rc_ok; - } - break; -@@ -1879,7 +1879,7 @@ main(int argc, char **argv) - break; - - case cmd_get_property: -- rc = out->message(out, "property", rsc, options.prop_name); -+ rc = out->message(out, "property-list", rsc, options.prop_name); - if (rc == pcmk_rc_no_output) { - rc = ENXIO; - } -@@ -1916,7 +1916,7 @@ main(int argc, char **argv) - } - - crm_debug("Looking up %s in %s", options.prop_name, rsc->id); -- rc = out->message(out, "attribute", rsc, options.prop_name, params); -+ rc = out->message(out, "attribute-list", rsc, options.prop_name, params); - g_hash_table_destroy(params); - break; - } -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 4cb78b5..89d6172 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -127,9 +127,9 @@ cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc, - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("attribute", "pe_resource_t *", "char *", "GHashTable *") -+PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "char *", "GHashTable *") - static int --attribute_default(pcmk__output_t *out, va_list args) { -+attribute_list_default(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - char *attr = va_arg(args, char *); - GHashTable *params = va_arg(args, GHashTable *); -@@ -147,9 +147,9 @@ attribute_default(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("attribute", "pe_resource_t *", "char *", "GHashTable *") -+PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "char *", "GHashTable *") - static int --attribute_text(pcmk__output_t *out, va_list args) { -+attribute_list_text(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - char *attr = va_arg(args, char *); - GHashTable *params = va_arg(args, GHashTable *); -@@ -165,9 +165,9 @@ attribute_text(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("property", "pe_resource_t *", "char *") -+PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "char *") - static int --property_default(pcmk__output_t *out, va_list args) { -+property_list_default(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - char *attr = va_arg(args, char *); - -@@ -182,9 +182,9 @@ property_default(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("property", "pe_resource_t *", "char *") -+PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "char *") - static int --property_text(pcmk__output_t *out, va_list args) { -+property_list_text(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - char *attr = va_arg(args, char *); - -@@ -197,9 +197,9 @@ property_text(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("resource-check", "resource_checks_t *") -+PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *") - static int --resource_check_default(pcmk__output_t *out, va_list args) { -+resource_check_list_default(pcmk__output_t *out, va_list args) { - resource_checks_t *checks = va_arg(args, resource_checks_t *); - - pe_resource_t *parent = uber_parent(checks->rsc); -@@ -239,9 +239,9 @@ resource_check_default(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("resource-check", "resource_checks_t *") -+PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *") - static int --resource_check_xml(pcmk__output_t *out, va_list args) { -+resource_check_list_xml(pcmk__output_t *out, va_list args) { - resource_checks_t *checks = va_arg(args, resource_checks_t *); - - pe_resource_t *parent = uber_parent(checks->rsc); -@@ -270,9 +270,9 @@ resource_check_xml(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("resource-search", "GList *", "pe_resource_t *", "gchar *") -+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *") - static int --resource_search_default(pcmk__output_t *out, va_list args) -+resource_search_list_default(pcmk__output_t *out, va_list args) - { - GList *nodes = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -@@ -315,10 +315,9 @@ resource_search_default(pcmk__output_t *out, va_list args) - return rc; - } - -- --PCMK__OUTPUT_ARGS("resource-search", "GList *", "pe_resource_t *", "gchar *") -+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *") - static int --resource_search_xml(pcmk__output_t *out, va_list args) -+resource_search_list_xml(pcmk__output_t *out, va_list args) - { - GList *nodes = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -@@ -340,10 +339,10 @@ resource_search_xml(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GList *", "pe_resource_t *", -+PCMK__OUTPUT_ARGS("resource-reasons-list", "cib_t *", "GList *", "pe_resource_t *", - "pe_node_t *") - static int --resource_why_default(pcmk__output_t *out, va_list args) -+resource_reasons_list_default(pcmk__output_t *out, va_list args) - { - cib_t *cib_conn = va_arg(args, cib_t *); - GList *resources = va_arg(args, GList *); -@@ -423,10 +422,10 @@ resource_why_default(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GList *", "pe_resource_t *", -+PCMK__OUTPUT_ARGS("resource-reasons-list", "cib_t *", "GList *", "pe_resource_t *", - "pe_node_t *") - static int --resource_why_xml(pcmk__output_t *out, va_list args) -+resource_reasons_list_xml(pcmk__output_t *out, va_list args) - { - cib_t *cib_conn = va_arg(args, cib_t *); - GList *resources = va_arg(args, GList *); -@@ -554,16 +553,16 @@ resource_names(pcmk__output_t *out, va_list args) { - } - - static pcmk__message_entry_t fmt_functions[] = { -- { "attribute", "default", attribute_default }, -- { "attribute", "text", attribute_text }, -- { "property", "default", property_default }, -- { "property", "text", property_text }, -- { "resource-check", "default", resource_check_default }, -- { "resource-check", "xml", resource_check_xml }, -- { "resource-search", "default", resource_search_default }, -- { "resource-search", "xml", resource_search_xml }, -- { "resource-why", "default", resource_why_default }, -- { "resource-why", "xml", resource_why_xml }, -+ { "attribute-list", "default", attribute_list_default }, -+ { "attribute-list", "text", attribute_list_text }, -+ { "property-list", "default", property_list_default }, -+ { "property-list", "text", property_list_text }, -+ { "resource-check-list", "default", resource_check_list_default }, -+ { "resource-check-list", "xml", resource_check_list_xml }, -+ { "resource-search-list", "default", resource_search_list_default }, -+ { "resource-search-list", "xml", resource_search_list_xml }, -+ { "resource-reasons-list", "default", resource_reasons_list_default }, -+ { "resource-reasons-list", "xml", resource_reasons_list_xml }, - { "resource-names-list", "default", resource_names }, - - { NULL, NULL, NULL } -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index bbd8bc1..3a9feac 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -900,7 +900,7 @@ cli_resource_check(pcmk__output_t *out, cib_t * cib_conn, pe_resource_t *rsc) - checks = cli_check_resource(rsc, role_s, managed); - - if (checks->flags != 0 || checks->lock_node != NULL) { -- rc = out->message(out, "resource-check", checks); -+ rc = out->message(out, "resource-check-list", checks); - } - - free(role_s); --- -1.8.3.1 - - -From d7322d1c2802c2d65a82a19b2513b5769b25dc30 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 20 Nov 2020 16:23:14 -0500 -Subject: [PATCH 8/9] Fix: libpacemaker: Fix typing on crmadmin formatted - output messages. - -Mostly this is just changing char * into const char *, though the -crm_node_list one is pretty important. ---- - lib/pacemaker/pcmk_output.c | 64 ++++++++++++++++++++++----------------------- - 1 file changed, 32 insertions(+), 32 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index deb1fe5..5c54204 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -357,14 +357,14 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") - static int - health_text(pcmk__output_t *out, va_list args) - { -- char *sys_from = va_arg(args, char *); -- char *host_from = va_arg(args, char *); -- char *fsa_state = va_arg(args, char *); -- char *result = va_arg(args, char *); -+ const char *sys_from = va_arg(args, const char *); -+ const char *host_from = va_arg(args, const char *); -+ const char *fsa_state = va_arg(args, const char *); -+ const char *result = va_arg(args, const char *); - - if (!out->is_quiet(out)) { - out->info(out, "Status of %s@%s: %s (%s)", crm_str(sys_from), -@@ -376,14 +376,14 @@ health_text(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") - static int - health_xml(pcmk__output_t *out, va_list args) - { -- char *sys_from = va_arg(args, char *); -- char *host_from = va_arg(args, char *); -- char *fsa_state = va_arg(args, char *); -- char *result = va_arg(args, char *); -+ const char *sys_from = va_arg(args, const char *); -+ const char *host_from = va_arg(args, const char *); -+ const char *fsa_state = va_arg(args, const char *); -+ const char *result = va_arg(args, const char *); - - pcmk__output_create_xml_node(out, crm_str(sys_from), - "node_name", crm_str(host_from), -@@ -393,13 +393,13 @@ health_xml(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *") - static int - pacemakerd_health_text(pcmk__output_t *out, va_list args) - { -- char *sys_from = va_arg(args, char *); -- char *state = va_arg(args, char *); -- char *last_updated = va_arg(args, char *); -+ const char *sys_from = va_arg(args, const char *); -+ const char *state = va_arg(args, const char *); -+ const char *last_updated = va_arg(args, const char *); - - if (!out->is_quiet(out)) { - out->info(out, "Status of %s: '%s' %s %s", crm_str(sys_from), -@@ -412,13 +412,13 @@ pacemakerd_health_text(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *") -+PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *") - static int - pacemakerd_health_xml(pcmk__output_t *out, va_list args) - { -- char *sys_from = va_arg(args, char *); -- char *state = va_arg(args, char *); -- char *last_updated = va_arg(args, char *); -+ const char *sys_from = va_arg(args, const char *); -+ const char *state = va_arg(args, const char *); -+ const char *last_updated = va_arg(args, const char *); - - pcmk__output_create_xml_node(out, crm_str(sys_from), - "state", crm_str(state), -@@ -427,11 +427,11 @@ pacemakerd_health_xml(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("dc", "char *") -+PCMK__OUTPUT_ARGS("dc", "const char *") - static int - dc_text(pcmk__output_t *out, va_list args) - { -- char *dc = va_arg(args, char *); -+ const char *dc = va_arg(args, const char *); - - if (!out->is_quiet(out)) { - out->info(out, "Designated Controller is: %s", crm_str(dc)); -@@ -442,11 +442,11 @@ dc_text(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("dc", "char *") -+PCMK__OUTPUT_ARGS("dc", "const char *") - static int - dc_xml(pcmk__output_t *out, va_list args) - { -- char *dc = va_arg(args, char *); -+ const char *dc = va_arg(args, const char *); - - pcmk__output_create_xml_node(out, "dc", - "node_name", crm_str(dc), -@@ -455,16 +455,16 @@ dc_xml(pcmk__output_t *out, va_list args) - } - - --PCMK__OUTPUT_ARGS("crmadmin-node-list", "pcmk__output_t *", "xmlNodePtr") -+PCMK__OUTPUT_ARGS("crmadmin-node-list", "xmlNodePtr", "gboolean") - static int - crmadmin_node_list(pcmk__output_t *out, va_list args) - { - xmlNodePtr xml_node = va_arg(args, xmlNodePtr); -+ gboolean BASH_EXPORT = va_arg(args, gboolean); - - int found = 0; - xmlNode *node = NULL; - xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -- gboolean BASH_EXPORT = va_arg(args, gboolean); - - out->begin_list(out, NULL, NULL, "nodes"); - -@@ -490,13 +490,13 @@ crmadmin_node_list(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *", "gboolean") -+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "gboolean") - static int - crmadmin_node_text(pcmk__output_t *out, va_list args) - { -- char *type = va_arg(args, char *); -- char *name = va_arg(args, char *); -- char *id = va_arg(args, char *); -+ const char *type = va_arg(args, const char *); -+ const char *name = va_arg(args, const char *); -+ const char *id = va_arg(args, const char *); - gboolean BASH_EXPORT = va_arg(args, gboolean); - - if (BASH_EXPORT) { -@@ -509,13 +509,13 @@ crmadmin_node_text(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - --PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *", "gboolean") -+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "gboolean") - static int - crmadmin_node_xml(pcmk__output_t *out, va_list args) - { -- char *type = va_arg(args, char *); -- char *name = va_arg(args, char *); -- char *id = va_arg(args, char *); -+ const char *type = va_arg(args, const char *); -+ const char *name = va_arg(args, const char *); -+ const char *id = va_arg(args, const char *); - - pcmk__output_create_xml_node(out, "node", - "type", type ? type : "member", --- -1.8.3.1 - - -From 1a123a8d8a8ec22b6166b5cdebcfdfe6b05bcc4e Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 20 Nov 2020 16:23:39 -0500 -Subject: [PATCH 9/9] Fix: libs: Decrease the number of visible formatted - output functions. - -The only ones that need to be exported are the ones that crm_mon needs -to reference for its output. All the others can be static and only -accessed indirectly through calling out->message. - -And then remove the prefixes from these newly static functions. This -could potentially be a little confusing if the text version is public -(and therefore has the, say, pe__ prefix) while the xml version is -hidden and is lacking that suffix. But I think it'll be okay. ---- - include/crm/fencing/internal.h | 11 +-- - include/crm/pengine/internal.h | 54 ++++---------- - lib/fencing/st_output.c | 66 ++++++++--------- - lib/pengine/pe_output.c | 161 +++++++++++++++++++++-------------------- - 4 files changed, 129 insertions(+), 163 deletions(-) - -diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h -index 391ab72..dfb1d64 100644 ---- a/include/crm/fencing/internal.h -+++ b/include/crm/fencing/internal.h -@@ -185,20 +185,11 @@ int stonith__rhcs_validate(stonith_t *st, int call_options, const char *target, - const char *agent, GHashTable *params, const char *host_arg, - int timeout, char **output, char **error_output); - -+/* Exported for crm_mon to reference */ - int stonith__failed_history(pcmk__output_t *out, va_list args); - int stonith__history(pcmk__output_t *out, va_list args); - int stonith__full_history(pcmk__output_t *out, va_list args); --int stonith__full_history_xml(pcmk__output_t *out, va_list args); --int stonith__last_fenced_html(pcmk__output_t *out, va_list args); --int stonith__last_fenced_text(pcmk__output_t *out, va_list args); --int stonith__last_fenced_xml(pcmk__output_t *out, va_list args); - int stonith__pending_actions(pcmk__output_t *out, va_list args); --int stonith__event_html(pcmk__output_t *out, va_list args); --int stonith__event_text(pcmk__output_t *out, va_list args); --int stonith__event_xml(pcmk__output_t *out, va_list args); --int stonith__validate_agent_html(pcmk__output_t *out, va_list args); --int stonith__validate_agent_text(pcmk__output_t *out, va_list args); --int stonith__validate_agent_xml(pcmk__output_t *out, va_list args); - - stonith_history_t *stonith__first_matching_event(stonith_history_t *history, - bool (*matching_fn)(stonith_history_t *, void *), -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index c4b28cc..89e17b8 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -256,34 +256,9 @@ pe__rsc_bool_str(pe_resource_t *rsc, uint64_t rsc_flag) - return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag)); - } - --int pe__ban_html(pcmk__output_t *out, va_list args); --int pe__ban_text(pcmk__output_t *out, va_list args); --int pe__ban_xml(pcmk__output_t *out, va_list args); - int pe__clone_xml(pcmk__output_t *out, va_list args); - int pe__clone_html(pcmk__output_t *out, va_list args); - int pe__clone_text(pcmk__output_t *out, va_list args); --int pe__cluster_counts_html(pcmk__output_t *out, va_list args); --int pe__cluster_counts_text(pcmk__output_t *out, va_list args); --int pe__cluster_counts_xml(pcmk__output_t *out, va_list args); --int pe__cluster_dc_html(pcmk__output_t *out, va_list args); --int pe__cluster_dc_text(pcmk__output_t *out, va_list args); --int pe__cluster_dc_xml(pcmk__output_t *out, va_list args); --int pe__cluster_maint_mode_html(pcmk__output_t *out, va_list args); --int pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args); --int pe__cluster_options_html(pcmk__output_t *out, va_list args); --int pe__cluster_options_log(pcmk__output_t *out, va_list args); --int pe__cluster_options_text(pcmk__output_t *out, va_list args); --int pe__cluster_options_xml(pcmk__output_t *out, va_list args); --int pe__cluster_stack_html(pcmk__output_t *out, va_list args); --int pe__cluster_stack_text(pcmk__output_t *out, va_list args); --int pe__cluster_stack_xml(pcmk__output_t *out, va_list args); --int pe__cluster_summary(pcmk__output_t *out, va_list args); --int pe__cluster_summary_html(pcmk__output_t *out, va_list args); --int pe__cluster_times_html(pcmk__output_t *out, va_list args); --int pe__cluster_times_xml(pcmk__output_t *out, va_list args); --int pe__cluster_times_text(pcmk__output_t *out, va_list args); --int pe__failed_action_text(pcmk__output_t *out, va_list args); --int pe__failed_action_xml(pcmk__output_t *out, va_list args); - int pe__group_xml(pcmk__output_t *out, va_list args); - int pe__group_html(pcmk__output_t *out, va_list args); - int pe__group_text(pcmk__output_t *out, va_list args); -@@ -293,26 +268,25 @@ int pe__bundle_text(pcmk__output_t *out, va_list args); - int pe__node_html(pcmk__output_t *out, va_list args); - int pe__node_text(pcmk__output_t *out, va_list args); - int pe__node_xml(pcmk__output_t *out, va_list args); --int pe__node_and_op(pcmk__output_t *out, va_list args); --int pe__node_and_op_xml(pcmk__output_t *out, va_list args); --int pe__node_attribute_html(pcmk__output_t *out, va_list args); -+int pe__resource_xml(pcmk__output_t *out, va_list args); -+int pe__resource_html(pcmk__output_t *out, va_list args); -+int pe__resource_text(pcmk__output_t *out, va_list args); -+ -+/* Exported for crm_mon to reference */ -+int pe__ban_text(pcmk__output_t *out, va_list args); -+int pe__cluster_counts_text(pcmk__output_t *out, va_list args); -+int pe__cluster_dc_text(pcmk__output_t *out, va_list args); -+int pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args); -+int pe__cluster_options_text(pcmk__output_t *out, va_list args); -+int pe__cluster_stack_text(pcmk__output_t *out, va_list args); -+int pe__cluster_summary(pcmk__output_t *out, va_list args); -+int pe__cluster_times_text(pcmk__output_t *out, va_list args); -+int pe__failed_action_text(pcmk__output_t *out, va_list args); - int pe__node_attribute_text(pcmk__output_t *out, va_list args); --int pe__node_attribute_xml(pcmk__output_t *out, va_list args); --int pe__node_list_html(pcmk__output_t *out, va_list args); - int pe__node_list_text(pcmk__output_t *out, va_list args); --int pe__node_list_xml(pcmk__output_t *out, va_list args); - int pe__op_history_text(pcmk__output_t *out, va_list args); --int pe__op_history_xml(pcmk__output_t *out, va_list args); --int pe__resource_config(pcmk__output_t *out, va_list args); - int pe__resource_history_text(pcmk__output_t *out, va_list args); --int pe__resource_history_xml(pcmk__output_t *out, va_list args); --int pe__resource_xml(pcmk__output_t *out, va_list args); --int pe__resource_html(pcmk__output_t *out, va_list args); --int pe__resource_text(pcmk__output_t *out, va_list args); --int pe__resource_list(pcmk__output_t *out, va_list args); --int pe__ticket_html(pcmk__output_t *out, va_list args); - int pe__ticket_text(pcmk__output_t *out, va_list args); --int pe__ticket_xml(pcmk__output_t *out, va_list args); - - void native_free(pe_resource_t * rsc); - void group_free(pe_resource_t * rsc); -diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c -index e1f4830..f48fd55 100644 ---- a/lib/fencing/st_output.c -+++ b/lib/fencing/st_output.c -@@ -110,8 +110,8 @@ stonith__full_history(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("full-fencing-list", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean") --int --stonith__full_history_xml(pcmk__output_t *out, va_list args) { -+static int -+full_history_xml(pcmk__output_t *out, va_list args) { - crm_exit_t history_rc = va_arg(args, crm_exit_t); - stonith_history_t *history = va_arg(args, stonith_history_t *); - GList *only_node = va_arg(args, GList *); -@@ -147,8 +147,8 @@ stonith__full_history_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("last-fenced", "const char *", "time_t") --int --stonith__last_fenced_html(pcmk__output_t *out, va_list args) { -+static int -+last_fenced_html(pcmk__output_t *out, va_list args) { - const char *target = va_arg(args, const char *); - time_t when = va_arg(args, time_t); - -@@ -163,8 +163,8 @@ stonith__last_fenced_html(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("last-fenced", "const char *", "time_t") --int --stonith__last_fenced_text(pcmk__output_t *out, va_list args) { -+static int -+last_fenced_text(pcmk__output_t *out, va_list args) { - const char *target = va_arg(args, const char *); - time_t when = va_arg(args, time_t); - -@@ -178,8 +178,8 @@ stonith__last_fenced_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("last-fenced", "const char *", "time_t") --int --stonith__last_fenced_xml(pcmk__output_t *out, va_list args) { -+static int -+last_fenced_xml(pcmk__output_t *out, va_list args) { - const char *target = va_arg(args, const char *); - time_t when = va_arg(args, time_t); - -@@ -228,8 +228,8 @@ stonith__pending_actions(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean") --int --stonith__event_html(pcmk__output_t *out, va_list args) { -+static int -+stonith_event_html(pcmk__output_t *out, va_list args) { - stonith_history_t *event = va_arg(args, stonith_history_t *); - gboolean full_history = va_arg(args, gboolean); - gboolean later_succeeded = va_arg(args, gboolean); -@@ -276,8 +276,8 @@ stonith__event_html(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean") --int --stonith__event_text(pcmk__output_t *out, va_list args) { -+static int -+stonith_event_text(pcmk__output_t *out, va_list args) { - stonith_history_t *event = va_arg(args, stonith_history_t *); - gboolean full_history = va_arg(args, gboolean); - gboolean later_succeeded = va_arg(args, gboolean); -@@ -314,8 +314,8 @@ stonith__event_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean") --int --stonith__event_xml(pcmk__output_t *out, va_list args) { -+static int -+stonith_event_xml(pcmk__output_t *out, va_list args) { - stonith_history_t *event = va_arg(args, stonith_history_t *); - gboolean full_history G_GNUC_UNUSED = va_arg(args, gboolean); - gboolean later_succeeded G_GNUC_UNUSED = va_arg(args, gboolean); -@@ -362,8 +362,8 @@ stonith__event_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("validate", "const char *", "const char *", "char *", "char *", "int") --int --stonith__validate_agent_html(pcmk__output_t *out, va_list args) { -+static int -+validate_agent_html(pcmk__output_t *out, va_list args) { - const char *agent = va_arg(args, const char *); - const char *device = va_arg(args, const char *); - char *output = va_arg(args, char *); -@@ -387,8 +387,8 @@ stonith__validate_agent_html(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("validate", "const char *", "const char *", "char *", "char *", "int") --int --stonith__validate_agent_text(pcmk__output_t *out, va_list args) { -+static int -+validate_agent_text(pcmk__output_t *out, va_list args) { - const char *agent = va_arg(args, const char *); - const char *device = va_arg(args, const char *); - char *output = va_arg(args, char *); -@@ -408,8 +408,8 @@ stonith__validate_agent_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("validate", "const char *", "const char *", "char *", "char *", "int") --int --stonith__validate_agent_xml(pcmk__output_t *out, va_list args) { -+static int -+validate_agent_xml(pcmk__output_t *out, va_list args) { - const char *agent = va_arg(args, const char *); - const char *device = va_arg(args, const char *); - char *output = va_arg(args, char *); -@@ -436,20 +436,20 @@ static pcmk__message_entry_t fmt_functions[] = { - { "failed-fencing-list", "default", stonith__failed_history }, - { "fencing-list", "default", stonith__history }, - { "full-fencing-list", "default", stonith__full_history }, -- { "full-fencing-list", "xml", stonith__full_history_xml }, -- { "last-fenced", "html", stonith__last_fenced_html }, -- { "last-fenced", "log", stonith__last_fenced_text }, -- { "last-fenced", "text", stonith__last_fenced_text }, -- { "last-fenced", "xml", stonith__last_fenced_xml }, -+ { "full-fencing-list", "xml", full_history_xml }, -+ { "last-fenced", "html", last_fenced_html }, -+ { "last-fenced", "log", last_fenced_text }, -+ { "last-fenced", "text", last_fenced_text }, -+ { "last-fenced", "xml", last_fenced_xml }, - { "pending-fencing-list", "default", stonith__pending_actions }, -- { "stonith-event", "html", stonith__event_html }, -- { "stonith-event", "log", stonith__event_text }, -- { "stonith-event", "text", stonith__event_text }, -- { "stonith-event", "xml", stonith__event_xml }, -- { "validate", "html", stonith__validate_agent_html }, -- { "validate", "log", stonith__validate_agent_text }, -- { "validate", "text", stonith__validate_agent_text }, -- { "validate", "xml", stonith__validate_agent_xml }, -+ { "stonith-event", "html", stonith_event_html }, -+ { "stonith-event", "log", stonith_event_text }, -+ { "stonith-event", "text", stonith_event_text }, -+ { "stonith-event", "xml", stonith_event_xml }, -+ { "validate", "html", validate_agent_html }, -+ { "validate", "log", validate_agent_text }, -+ { "validate", "text", validate_agent_text }, -+ { "validate", "xml", validate_agent_xml }, - - { NULL, NULL, NULL } - }; -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index 1b57f64..5562eb6 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -273,8 +273,8 @@ pe__cluster_summary(pcmk__output_t *out, va_list args) { - - PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean", - "gboolean", "gboolean", "gboolean") --int --pe__cluster_summary_html(pcmk__output_t *out, va_list args) { -+static int -+cluster_summary_html(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - gboolean print_clone_detail = va_arg(args, gboolean); - gboolean show_stack = va_arg(args, gboolean); -@@ -427,8 +427,8 @@ pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name - } - - PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean") --int --pe__ban_html(pcmk__output_t *out, va_list args) { -+static int -+ban_html(pcmk__output_t *out, va_list args) { - pe_node_t *pe_node = va_arg(args, pe_node_t *); - pe__location_t *location = va_arg(args, pe__location_t *); - gboolean print_clone_detail = va_arg(args, gboolean); -@@ -464,8 +464,8 @@ pe__ban_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean") --int --pe__ban_xml(pcmk__output_t *out, va_list args) { -+static int -+ban_xml(pcmk__output_t *out, va_list args) { - pe_node_t *pe_node = va_arg(args, pe_node_t *); - pe__location_t *location = va_arg(args, pe__location_t *); - gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean); -@@ -485,8 +485,8 @@ pe__ban_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") --int --pe__cluster_counts_html(pcmk__output_t *out, va_list args) { -+static int -+cluster_counts_html(pcmk__output_t *out, va_list args) { - unsigned int nnodes = va_arg(args, unsigned int); - int nresources = va_arg(args, int); - int ndisabled = va_arg(args, int); -@@ -581,8 +581,8 @@ pe__cluster_counts_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") --int --pe__cluster_counts_xml(pcmk__output_t *out, va_list args) { -+static int -+cluster_counts_xml(pcmk__output_t *out, va_list args) { - unsigned int nnodes = va_arg(args, unsigned int); - int nresources = va_arg(args, int); - int ndisabled = va_arg(args, int); -@@ -611,8 +611,8 @@ pe__cluster_counts_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *") --int --pe__cluster_dc_html(pcmk__output_t *out, va_list args) { -+static int -+cluster_dc_html(pcmk__output_t *out, va_list args) { - pe_node_t *dc = va_arg(args, pe_node_t *); - const char *quorum = va_arg(args, const char *); - const char *dc_version_s = va_arg(args, const char *); -@@ -664,8 +664,8 @@ pe__cluster_dc_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *") --int --pe__cluster_dc_xml(pcmk__output_t *out, va_list args) { -+static int -+cluster_dc_xml(pcmk__output_t *out, va_list args) { - pe_node_t *dc = va_arg(args, pe_node_t *); - const char *quorum = va_arg(args, const char *); - const char *dc_version_s = va_arg(args, const char *); -@@ -707,8 +707,8 @@ pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *") --int --pe__cluster_options_html(pcmk__output_t *out, va_list args) { -+static int -+cluster_options_html(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - - out->list_item(out, NULL, "STONITH of failed nodes %s", -@@ -762,8 +762,8 @@ pe__cluster_options_html(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *") --int --pe__cluster_options_log(pcmk__output_t *out, va_list args) { -+static int -+cluster_options_log(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - - if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) { -@@ -815,8 +815,8 @@ pe__cluster_options_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *") --int --pe__cluster_options_xml(pcmk__output_t *out, va_list args) { -+static int -+cluster_options_xml(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - - const char *no_quorum_policy = NULL; -@@ -854,8 +854,8 @@ pe__cluster_options_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-stack", "const char *") --int --pe__cluster_stack_html(pcmk__output_t *out, va_list args) { -+static int -+cluster_stack_html(pcmk__output_t *out, va_list args) { - const char *stack_s = va_arg(args, const char *); - - xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); -@@ -876,8 +876,8 @@ pe__cluster_stack_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-stack", "const char *") --int --pe__cluster_stack_xml(pcmk__output_t *out, va_list args) { -+static int -+cluster_stack_xml(pcmk__output_t *out, va_list args) { - const char *stack_s = va_arg(args, const char *); - - pcmk__output_create_xml_node(out, "stack", -@@ -888,8 +888,8 @@ pe__cluster_stack_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *") --int --pe__cluster_times_html(pcmk__output_t *out, va_list args) { -+static int -+cluster_times_html(pcmk__output_t *out, va_list args) { - const char *last_written = va_arg(args, const char *); - const char *user = va_arg(args, const char *); - const char *client = va_arg(args, const char *); -@@ -912,8 +912,8 @@ pe__cluster_times_html(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *") --int --pe__cluster_times_xml(pcmk__output_t *out, va_list args) { -+static int -+cluster_times_xml(pcmk__output_t *out, va_list args) { - const char *last_written = va_arg(args, const char *); - const char *user = va_arg(args, const char *); - const char *client = va_arg(args, const char *); -@@ -962,8 +962,8 @@ pe__failed_action_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr") --int --pe__failed_action_xml(pcmk__output_t *out, va_list args) { -+static int -+failed_action_xml(pcmk__output_t *out, va_list args) { - xmlNodePtr xml_op = va_arg(args, xmlNodePtr); - - const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); -@@ -1249,8 +1249,8 @@ pe__node_attribute_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int") --int --pe__node_attribute_html(pcmk__output_t *out, va_list args) { -+static int -+node_attribute_html(pcmk__output_t *out, va_list args) { - const char *name = va_arg(args, const char *); - const char *value = va_arg(args, const char *); - gboolean add_extra = va_arg(args, gboolean); -@@ -1279,8 +1279,8 @@ pe__node_attribute_html(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr") --int --pe__node_and_op(pcmk__output_t *out, va_list args) { -+static int -+node_and_op(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - xmlNodePtr xml_op = va_arg(args, xmlNodePtr); - -@@ -1333,8 +1333,8 @@ pe__node_and_op(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr") --int --pe__node_and_op_xml(pcmk__output_t *out, va_list args) { -+static int -+node_and_op_xml(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - xmlNodePtr xml_op = va_arg(args, xmlNodePtr); - -@@ -1381,8 +1381,8 @@ pe__node_and_op_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int") --int --pe__node_attribute_xml(pcmk__output_t *out, va_list args) { -+static int -+node_attribute_xml(pcmk__output_t *out, va_list args) { - const char *name = va_arg(args, const char *); - const char *value = va_arg(args, const char *); - gboolean add_extra = va_arg(args, gboolean); -@@ -1403,8 +1403,8 @@ pe__node_attribute_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "unsigned int", "gboolean", "gboolean", "gboolean") --int --pe__node_list_html(pcmk__output_t *out, va_list args) { -+static int -+node_list_html(pcmk__output_t *out, va_list args) { - GList *nodes = va_arg(args, GList *); - GList *only_node = va_arg(args, GList *); - GList *only_rsc = va_arg(args, GList *); -@@ -1572,8 +1572,8 @@ pe__node_list_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "unsigned int", "gboolean", "gboolean", "gboolean") --int --pe__node_list_xml(pcmk__output_t *out, va_list args) { -+static int -+node_list_xml(pcmk__output_t *out, va_list args) { - GList *nodes = va_arg(args, GList *); - GList *only_node = va_arg(args, GList *); - GList *only_rsc = va_arg(args, GList *); -@@ -1616,8 +1616,8 @@ pe__op_history_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "gboolean") --int --pe__op_history_xml(pcmk__output_t *out, va_list args) { -+static int -+op_history_xml(pcmk__output_t *out, va_list args) { - xmlNodePtr xml_op = va_arg(args, xmlNodePtr); - const char *task = va_arg(args, const char *); - const char *interval_ms_s = va_arg(args, const char *); -@@ -1676,7 +1676,8 @@ pe__op_history_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "gboolean") --int pe__resource_config(pcmk__output_t *out, va_list args) { -+static int -+resource_config(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gboolean raw = va_arg(args, gboolean); - -@@ -1718,8 +1719,8 @@ pe__resource_history_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean") --int --pe__resource_history_xml(pcmk__output_t *out, va_list args) { -+static int -+resource_history_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - const char *rsc_id = va_arg(args, const char *); - gboolean all = va_arg(args, gboolean); -@@ -1762,8 +1763,8 @@ pe__resource_history_xml(pcmk__output_t *out, va_list args) { - - PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "unsigned int", "gboolean", - "gboolean", "gboolean", "gboolean", "GList *", "GList *", "gboolean") --int --pe__resource_list(pcmk__output_t *out, va_list args) -+static int -+resource_list(pcmk__output_t *out, va_list args) - { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - unsigned int print_opts = va_arg(args, unsigned int); -@@ -1861,8 +1862,8 @@ pe__resource_list(pcmk__output_t *out, va_list args) - } - - PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *") --int --pe__ticket_html(pcmk__output_t *out, va_list args) { -+static int -+ticket_html(pcmk__output_t *out, va_list args) { - pe_ticket_t *ticket = va_arg(args, pe_ticket_t *); - - if (ticket->last_granted > -1) { -@@ -1903,8 +1904,8 @@ pe__ticket_text(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *") --int --pe__ticket_xml(pcmk__output_t *out, va_list args) { -+static int -+ticket_xml(pcmk__output_t *out, va_list args) { - pe_ticket_t *ticket = va_arg(args, pe_ticket_t *); - - xmlNodePtr node = NULL; -@@ -1923,10 +1924,10 @@ pe__ticket_xml(pcmk__output_t *out, va_list args) { - } - - static pcmk__message_entry_t fmt_functions[] = { -- { "ban", "html", pe__ban_html }, -+ { "ban", "html", ban_html }, - { "ban", "log", pe__ban_text }, - { "ban", "text", pe__ban_text }, -- { "ban", "xml", pe__ban_xml }, -+ { "ban", "xml", ban_xml }, - { "bundle", "xml", pe__bundle_xml }, - { "bundle", "html", pe__bundle_html }, - { "bundle", "text", pe__bundle_text }, -@@ -1935,30 +1936,30 @@ static pcmk__message_entry_t fmt_functions[] = { - { "clone", "html", pe__clone_html }, - { "clone", "text", pe__clone_text }, - { "clone", "log", pe__clone_text }, -- { "cluster-counts", "html", pe__cluster_counts_html }, -+ { "cluster-counts", "html", cluster_counts_html }, - { "cluster-counts", "log", pe__cluster_counts_text }, - { "cluster-counts", "text", pe__cluster_counts_text }, -- { "cluster-counts", "xml", pe__cluster_counts_xml }, -- { "cluster-dc", "html", pe__cluster_dc_html }, -+ { "cluster-counts", "xml", cluster_counts_xml }, -+ { "cluster-dc", "html", cluster_dc_html }, - { "cluster-dc", "log", pe__cluster_dc_text }, - { "cluster-dc", "text", pe__cluster_dc_text }, -- { "cluster-dc", "xml", pe__cluster_dc_xml }, -- { "cluster-options", "html", pe__cluster_options_html }, -- { "cluster-options", "log", pe__cluster_options_log }, -+ { "cluster-dc", "xml", cluster_dc_xml }, -+ { "cluster-options", "html", cluster_options_html }, -+ { "cluster-options", "log", cluster_options_log }, - { "cluster-options", "text", pe__cluster_options_text }, -- { "cluster-options", "xml", pe__cluster_options_xml }, -+ { "cluster-options", "xml", cluster_options_xml }, - { "cluster-summary", "default", pe__cluster_summary }, -- { "cluster-summary", "html", pe__cluster_summary_html }, -- { "cluster-stack", "html", pe__cluster_stack_html }, -+ { "cluster-summary", "html", cluster_summary_html }, -+ { "cluster-stack", "html", cluster_stack_html }, - { "cluster-stack", "log", pe__cluster_stack_text }, - { "cluster-stack", "text", pe__cluster_stack_text }, -- { "cluster-stack", "xml", pe__cluster_stack_xml }, -- { "cluster-times", "html", pe__cluster_times_html }, -+ { "cluster-stack", "xml", cluster_stack_xml }, -+ { "cluster-times", "html", cluster_times_html }, - { "cluster-times", "log", pe__cluster_times_text }, - { "cluster-times", "text", pe__cluster_times_text }, -- { "cluster-times", "xml", pe__cluster_times_xml }, -+ { "cluster-times", "xml", cluster_times_xml }, - { "failed-action", "default", pe__failed_action_text }, -- { "failed-action", "xml", pe__failed_action_xml }, -+ { "failed-action", "xml", failed_action_xml }, - { "group", "xml", pe__group_xml }, - { "group", "html", pe__group_html }, - { "group", "text", pe__group_text }, -@@ -1968,30 +1969,30 @@ static pcmk__message_entry_t fmt_functions[] = { - { "node", "log", pe__node_text }, - { "node", "text", pe__node_text }, - { "node", "xml", pe__node_xml }, -- { "node-and-op", "default", pe__node_and_op }, -- { "node-and-op", "xml", pe__node_and_op_xml }, -- { "node-list", "html", pe__node_list_html }, -+ { "node-and-op", "default", node_and_op }, -+ { "node-and-op", "xml", node_and_op_xml }, -+ { "node-list", "html", node_list_html }, - { "node-list", "log", pe__node_list_text }, - { "node-list", "text", pe__node_list_text }, -- { "node-list", "xml", pe__node_list_xml }, -- { "node-attribute", "html", pe__node_attribute_html }, -+ { "node-list", "xml", node_list_xml }, -+ { "node-attribute", "html", node_attribute_html }, - { "node-attribute", "log", pe__node_attribute_text }, - { "node-attribute", "text", pe__node_attribute_text }, -- { "node-attribute", "xml", pe__node_attribute_xml }, -+ { "node-attribute", "xml", node_attribute_xml }, - { "op-history", "default", pe__op_history_text }, -- { "op-history", "xml", pe__op_history_xml }, -+ { "op-history", "xml", op_history_xml }, - { "primitive", "xml", pe__resource_xml }, - { "primitive", "html", pe__resource_html }, - { "primitive", "text", pe__resource_text }, - { "primitive", "log", pe__resource_text }, -- { "resource-config", "default", pe__resource_config }, -+ { "resource-config", "default", resource_config }, - { "resource-history", "default", pe__resource_history_text }, -- { "resource-history", "xml", pe__resource_history_xml }, -- { "resource-list", "default", pe__resource_list }, -- { "ticket", "html", pe__ticket_html }, -+ { "resource-history", "xml", resource_history_xml }, -+ { "resource-list", "default", resource_list }, -+ { "ticket", "html", ticket_html }, - { "ticket", "log", pe__ticket_text }, - { "ticket", "text", pe__ticket_text }, -- { "ticket", "xml", pe__ticket_xml }, -+ { "ticket", "xml", ticket_xml }, - - { NULL, NULL, NULL } - }; --- -1.8.3.1 - diff --git a/SOURCES/013-leaks.patch b/SOURCES/013-leaks.patch new file mode 100644 index 0000000..daa42b8 --- /dev/null +++ b/SOURCES/013-leaks.patch @@ -0,0 +1,241 @@ +From bee54eba4d9c28d3a7907a3e13a5deeee6bc0916 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 27 Jul 2021 11:01:04 -0500 +Subject: [PATCH 1/2] Low: tools: avoid (insignificant) memory leaks + +detected by valgrind +--- + lib/pacemaker/pcmk_cluster_queries.c | 2 ++ + tools/crm_diff.c | 2 +- + tools/crm_resource.c | 33 ++++++++++++++++++++------------- + tools/crm_resource_ban.c | 2 +- + 4 files changed, 24 insertions(+), 15 deletions(-) + +diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c +index c68cf9d..46e5538 100644 +--- a/lib/pacemaker/pcmk_cluster_queries.c ++++ b/lib/pacemaker/pcmk_cluster_queries.c +@@ -440,6 +440,7 @@ pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT) + } + rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); + if (rc != pcmk_ok) { ++ cib_delete(the_cib); + return pcmk_legacy2rc(rc); + } + +@@ -488,6 +489,7 @@ pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT) + free_xml(xml_node); + } + the_cib->cmds->signoff(the_cib); ++ cib_delete(the_cib); + return pcmk_legacy2rc(rc); + } + +diff --git a/tools/crm_diff.c b/tools/crm_diff.c +index b37f0ea..9890c10 100644 +--- a/tools/crm_diff.c ++++ b/tools/crm_diff.c +@@ -383,5 +383,5 @@ done: + free_xml(object_2); + + pcmk__output_and_clear_error(error, NULL); +- return exit_code; ++ crm_exit(exit_code); + } +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index d8e140f..8ca90cb 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -1081,6 +1081,8 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) + g_set_error(&error, PCMK__RC_ERROR, rc, + "Could not get modified CIB: %s\n", pcmk_strerror(rc)); + g_list_free(before); ++ free_xml(*cib_xml_copy); ++ *cib_xml_copy = NULL; + return rc; + } + +@@ -1232,29 +1234,34 @@ populate_working_set(xmlNodePtr *cib_xml_copy) + + if (options.xml_file != NULL) { + *cib_xml_copy = filename2xml(options.xml_file); ++ if (*cib_xml_copy == NULL) { ++ rc = pcmk_rc_cib_corrupt; ++ } + } else { + rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call); + rc = pcmk_legacy2rc(rc); + } + +- if(rc != pcmk_rc_ok) { +- return rc; ++ if (rc == pcmk_rc_ok) { ++ data_set = pe_new_working_set(); ++ if (data_set == NULL) { ++ rc = ENOMEM; ++ } else { ++ pe__set_working_set_flags(data_set, ++ pe_flag_no_counts|pe_flag_no_compat); ++ data_set->priv = out; ++ rc = update_working_set_xml(data_set, cib_xml_copy); ++ } + } + +- /* Populate the working set instance */ +- data_set = pe_new_working_set(); +- if (data_set == NULL) { +- rc = ENOMEM; ++ if (rc != pcmk_rc_ok) { ++ free_xml(*cib_xml_copy); ++ *cib_xml_copy = NULL; + return rc; + } + +- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); +- data_set->priv = out; +- rc = update_working_set_xml(data_set, cib_xml_copy); +- if (rc == pcmk_rc_ok) { +- cluster_status(data_set); +- } +- return rc; ++ cluster_status(data_set); ++ return pcmk_rc_ok; + } + + static int +diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c +index a297d49..2c4f48d 100644 +--- a/tools/crm_resource_ban.c ++++ b/tools/crm_resource_ban.c +@@ -292,7 +292,7 @@ resource_clear_node_in_location(const char *rsc_id, const char *host, cib_t * ci + rc = pcmk_legacy2rc(rc); + } + +- free(fragment); ++ free_xml(fragment); + return rc; + } + +-- +1.8.3.1 + + +From a30ff4a87f291a0c9e03c4efb9c9046d2ac594f1 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 27 Jul 2021 11:26:59 -0500 +Subject: [PATCH 2/2] Fix: tools: avoid memory leaks in crm_mon + +could be significant in an interactive session + +regressions introduced in 2.0.4 and 2.0.5 +--- + lib/pengine/bundle.c | 3 ++- + lib/pengine/clone.c | 5 ++--- + lib/pengine/pe_output.c | 3 +++ + 3 files changed, 7 insertions(+), 4 deletions(-) + +diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c +index 6ba786a..7e1d428 100644 +--- a/lib/pengine/bundle.c ++++ b/lib/pengine/bundle.c +@@ -1497,7 +1497,7 @@ pe__bundle_xml(pcmk__output_t *out, va_list args) + for (GList *gIter = bundle_data->replicas; gIter != NULL; + gIter = gIter->next) { + pe__bundle_replica_t *replica = gIter->data; +- char *id = pcmk__itoa(replica->offset); ++ char *id = NULL; + gboolean print_ip, print_child, print_ctnr, print_remote; + + CRM_ASSERT(replica); +@@ -1531,6 +1531,7 @@ pe__bundle_xml(pcmk__output_t *out, va_list args) + CRM_ASSERT(rc == pcmk_rc_ok); + } + ++ id = pcmk__itoa(replica->offset); + rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id); + free(id); + CRM_ASSERT(rc == pcmk_rc_ok); +diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c +index 6323692..ab91fd1 100644 +--- a/lib/pengine/clone.c ++++ b/lib/pengine/clone.c +@@ -807,10 +807,10 @@ pe__clone_html(pcmk__output_t *out, va_list args) + pcmk__add_word(&list_text, &list_text_len, host->details->uname); + active_instances++; + } ++ g_list_free(promoted_list); + + if (list_text != NULL) { + out->list_item(out, NULL, PROMOTED_INSTANCES ": [ %s ]", list_text); +- g_list_free(promoted_list); + free(list_text); + list_text = NULL; + list_text_len = 0; +@@ -828,6 +828,7 @@ pe__clone_html(pcmk__output_t *out, va_list args) + pcmk__add_word(&list_text, &list_text_len, host->details->uname); + active_instances++; + } ++ g_list_free(started_list); + + if (list_text != NULL) { + if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { +@@ -847,7 +848,6 @@ pe__clone_html(pcmk__output_t *out, va_list args) + out->list_item(out, NULL, "Started: [ %s ]", list_text); + } + +- g_list_free(started_list); + free(list_text); + list_text = NULL; + list_text_len = 0; +@@ -1048,10 +1048,10 @@ pe__clone_text(pcmk__output_t *out, va_list args) + pcmk__add_word(&list_text, &list_text_len, host->details->uname); + active_instances++; + } ++ g_list_free(promoted_list); + + if (list_text != NULL) { + out->list_item(out, PROMOTED_INSTANCES, "[ %s ]", list_text); +- g_list_free(promoted_list); + free(list_text); + list_text = NULL; + list_text_len = 0; +@@ -1069,6 +1069,7 @@ pe__clone_text(pcmk__output_t *out, va_list args) + pcmk__add_word(&list_text, &list_text_len, host->details->uname); + active_instances++; + } ++ g_list_free(started_list); + + if (list_text != NULL) { + if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { +@@ -1084,7 +1085,6 @@ pe__clone_text(pcmk__output_t *out, va_list args) + out->list_item(out, "Started", "[ %s ]", list_text); + } + +- g_list_free(started_list); + free(list_text); + list_text = NULL; + } +diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c +index b8997c4..20bd1a9 100644 +--- a/lib/pengine/pe_output.c ++++ b/lib/pengine/pe_output.c +@@ -1410,6 +1410,8 @@ node_text(pcmk__output_t *out, va_list args) { + + out->end_list(out); + out->end_list(out); ++ ++ g_list_free(rscs); + } + + } else { +@@ -1739,6 +1741,7 @@ node_attribute_list(pcmk__output_t *out, va_list args) { + } + + if (!pcmk__str_in_list(only_node, node->details->uname)) { ++ g_list_free(attr_list); + continue; + } + +-- +1.8.3.1 + diff --git a/SOURCES/014-feature-set.patch b/SOURCES/014-feature-set.patch deleted file mode 100644 index 6964ca0..0000000 --- a/SOURCES/014-feature-set.patch +++ /dev/null @@ -1,285 +0,0 @@ -From 0700a4814a598d0e2e9bd54f970c6d3ff66184df Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 24 Nov 2020 16:17:34 +0100 -Subject: [PATCH 1/2] Refactor: move stonith__register_messages() call from - pcmk__out_prologue() to the calling functions - ---- - lib/pacemaker/pcmk_fence.c | 14 ++++++++++++++ - lib/pacemaker/pcmk_output.c | 1 - - 2 files changed, 14 insertions(+), 1 deletion(-) - -diff --git a/lib/pacemaker/pcmk_fence.c b/lib/pacemaker/pcmk_fence.c -index 7beedff..d591379 100644 ---- a/lib/pacemaker/pcmk_fence.c -+++ b/lib/pacemaker/pcmk_fence.c -@@ -247,6 +247,8 @@ pcmk_fence_history(xmlNodePtr *xml, stonith_t *st, char *target, unsigned int ti - return rc; - } - -+ stonith__register_messages(out); -+ - out->quiet = quiet; - - rc = pcmk__fence_history(out, st, target, timeout, verbose, broadcast, cleanup); -@@ -287,6 +289,8 @@ pcmk_fence_installed(xmlNodePtr *xml, stonith_t *st, unsigned int timeout) { - return rc; - } - -+ stonith__register_messages(out); -+ - rc = pcmk__fence_installed(out, st, timeout); - pcmk__out_epilogue(out, xml, rc); - return rc; -@@ -321,6 +325,8 @@ pcmk_fence_last(xmlNodePtr *xml, const char *target, bool as_nodeid) { - return rc; - } - -+ stonith__register_messages(out); -+ - rc = pcmk__fence_last(out, target, as_nodeid); - pcmk__out_epilogue(out, xml, rc); - return rc; -@@ -364,6 +370,8 @@ pcmk_fence_list_targets(xmlNodePtr *xml, stonith_t *st, const char *device_id, - return rc; - } - -+ stonith__register_messages(out); -+ - rc = pcmk__fence_list_targets(out, st, device_id, timeout); - pcmk__out_epilogue(out, xml, rc); - return rc; -@@ -398,6 +406,8 @@ pcmk_fence_metadata(xmlNodePtr *xml, stonith_t *st, char *agent, - return rc; - } - -+ stonith__register_messages(out); -+ - rc = pcmk__fence_metadata(out, st, agent, timeout); - pcmk__out_epilogue(out, xml, rc); - return rc; -@@ -442,6 +452,8 @@ pcmk_fence_registered(xmlNodePtr *xml, stonith_t *st, char *target, - return rc; - } - -+ stonith__register_messages(out); -+ - rc = pcmk__fence_registered(out, st, target, timeout); - pcmk__out_epilogue(out, xml, rc); - return rc; -@@ -501,6 +513,8 @@ pcmk_fence_validate(xmlNodePtr *xml, stonith_t *st, const char *agent, - return rc; - } - -+ stonith__register_messages(out); -+ - rc = pcmk__fence_validate(out, st, agent, id, params, timeout); - pcmk__out_epilogue(out, xml, rc); - return rc; -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 74a7c59..a637031 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -34,7 +34,6 @@ pcmk__out_prologue(pcmk__output_t **out, xmlNodePtr *xml) { - return rc; - } - -- stonith__register_messages(*out); - return rc; - } - --- -1.8.3.1 - - -From 27677a6d03ba42aeb0d6a971df9d9b8861232903 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 24 Nov 2020 16:19:59 +0100 -Subject: [PATCH 2/2] API: libpacemaker: add public API functions for cluster - queries - ---- - include/pacemaker.h | 45 +++++++++++++++++++++- - lib/pacemaker/pcmk_cluster_queries.c | 75 ++++++++++++++++++++++++++++++++++++ - 2 files changed, 118 insertions(+), 2 deletions(-) - -diff --git a/include/pacemaker.h b/include/pacemaker.h -index a1e76d0..b2a73cd 100644 ---- a/include/pacemaker.h -+++ b/include/pacemaker.h -@@ -14,8 +14,6 @@ - extern "C" { - #endif - --#ifdef BUILD_PUBLIC_LIBPACEMAKER -- - /** - * \file - * \brief High Level API -@@ -26,6 +24,49 @@ extern "C" { - # include - - /*! -+ * \brief Get controller status -+ * -+ * \param[in,out] xml The destination for the result, as an XML tree. -+ * \param[in] dest_node Destination node for request -+ * \param[in] message_timeout_ms Message timeout -+ * -+ * \return Standard Pacemaker return code -+ */ -+int pcmk_controller_status(xmlNodePtr *xml, char *dest_node, unsigned int message_timeout_ms); -+ -+/*! -+ * \brief Get designated controller -+ * -+ * \param[in,out] xml The destination for the result, as an XML tree. -+ * \param[in] message_timeout_ms Message timeout -+ * -+ * \return Standard Pacemaker return code -+ */ -+int pcmk_designated_controller(xmlNodePtr *xml, unsigned int message_timeout_ms); -+ -+/*! -+ * \brief Get pacemakerd status -+ * -+ * \param[in,out] xml The destination for the result, as an XML tree. -+ * \param[in] ipc_name IPC name for request -+ * \param[in] message_timeout_ms Message timeout -+ * -+ * \return Standard Pacemaker return code -+ */ -+int pcmk_pacemakerd_status(xmlNodePtr *xml, char *ipc_name, unsigned int message_timeout_ms); -+ -+#ifdef BUILD_PUBLIC_LIBPACEMAKER -+ -+/*! -+ * \brief Get nodes list -+ * -+ * \param[in,out] xml The destination for the result, as an XML tree. -+ * -+ * \return Standard Pacemaker return code -+ */ -+int pcmk_list_nodes(xmlNodePtr *xml); -+ -+/*! - * \brief Perform a STONITH action. - * - * \param[in] st A connection to the STONITH API. -diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c -index 8d729eb..c705b7f 100644 ---- a/lib/pacemaker/pcmk_cluster_queries.c -+++ b/lib/pacemaker/pcmk_cluster_queries.c -@@ -1,6 +1,7 @@ - #include // gboolean, GMainLoop, etc. - #include // xmlNode - -+#include - #include - - #include -@@ -282,6 +283,24 @@ pcmk__controller_status(pcmk__output_t *out, char *dest_node, guint message_time - } - - int -+pcmk_controller_status(xmlNodePtr *xml, char *dest_node, unsigned int message_timeout_ms) -+{ -+ pcmk__output_t *out = NULL; -+ int rc = pcmk_rc_ok; -+ -+ rc = pcmk__out_prologue(&out, xml); -+ if (rc != pcmk_rc_ok) { -+ return rc; -+ } -+ -+ pcmk__register_lib_messages(out); -+ -+ rc = pcmk__controller_status(out, dest_node, (guint) message_timeout_ms); -+ pcmk__out_epilogue(out, xml, rc); -+ return rc; -+} -+ -+int - pcmk__designated_controller(pcmk__output_t *out, guint message_timeout_ms) - { - data_t data = { -@@ -309,6 +328,24 @@ pcmk__designated_controller(pcmk__output_t *out, guint message_timeout_ms) - } - - int -+pcmk_designated_controller(xmlNodePtr *xml, unsigned int message_timeout_ms) -+{ -+ pcmk__output_t *out = NULL; -+ int rc = pcmk_rc_ok; -+ -+ rc = pcmk__out_prologue(&out, xml); -+ if (rc != pcmk_rc_ok) { -+ return rc; -+ } -+ -+ pcmk__register_lib_messages(out); -+ -+ rc = pcmk__designated_controller(out, (guint) message_timeout_ms); -+ pcmk__out_epilogue(out, xml, rc); -+ return rc; -+} -+ -+int - pcmk__pacemakerd_status(pcmk__output_t *out, char *ipc_name, guint message_timeout_ms) - { - data_t data = { -@@ -335,6 +372,24 @@ pcmk__pacemakerd_status(pcmk__output_t *out, char *ipc_name, guint message_timeo - return data.rc; - } - -+int -+pcmk_pacemakerd_status(xmlNodePtr *xml, char *ipc_name, unsigned int message_timeout_ms) -+{ -+ pcmk__output_t *out = NULL; -+ int rc = pcmk_rc_ok; -+ -+ rc = pcmk__out_prologue(&out, xml); -+ if (rc != pcmk_rc_ok) { -+ return rc; -+ } -+ -+ pcmk__register_lib_messages(out); -+ -+ rc = pcmk__pacemakerd_status(out, ipc_name, (guint) message_timeout_ms); -+ pcmk__out_epilogue(out, xml, rc); -+ return rc; -+} -+ - // \return Standard Pacemaker return code - int - pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT) -@@ -361,6 +416,26 @@ pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT) - return pcmk_legacy2rc(rc); - } - -+#ifdef BUILD_PUBLIC_LIBPACEMAKER -+int -+pcmk_list_nodes(xmlNodePtr *xml) -+{ -+ pcmk__output_t *out = NULL; -+ int rc = pcmk_rc_ok; -+ -+ rc = pcmk__out_prologue(&out, xml); -+ if (rc != pcmk_rc_ok) { -+ return rc; -+ } -+ -+ pcmk__register_lib_messages(out); -+ -+ rc = pcmk__list_nodes(out, FALSE); -+ pcmk__out_epilogue(out, xml, rc); -+ return rc; -+} -+#endif -+ - // remove when parameters removed from tools/crmadmin.c - int - pcmk__shutdown_controller(pcmk__output_t *out, char *dest_node) --- -1.8.3.1 - diff --git a/SOURCES/014-str-list.patch b/SOURCES/014-str-list.patch new file mode 100644 index 0000000..e6993ab --- /dev/null +++ b/SOURCES/014-str-list.patch @@ -0,0 +1,465 @@ +From 45813df3eb4c8ad8b1744fa5dd56af86ad0fb3dd Mon Sep 17 00:00:00 2001 +From: Chris Lumens +Date: Thu, 17 Jun 2021 16:07:55 -0400 +Subject: [PATCH] Refactor: libs: pcmk__str_in_list should support pcmk__str_* + flags. + +--- + include/crm/common/strings_internal.h | 2 +- + lib/common/strings.c | 34 +++++++++++++++++++++++---- + lib/fencing/st_output.c | 10 ++++---- + lib/pengine/bundle.c | 8 +++---- + lib/pengine/clone.c | 28 +++++++++++----------- + lib/pengine/group.c | 18 +++++++------- + lib/pengine/native.c | 4 ++-- + lib/pengine/pe_output.c | 22 ++++++++--------- + lib/pengine/utils.c | 6 ++--- + 9 files changed, 79 insertions(+), 53 deletions(-) + +diff --git a/include/crm/common/strings_internal.h b/include/crm/common/strings_internal.h +index 94982cb4e..687079814 100644 +--- a/include/crm/common/strings_internal.h ++++ b/include/crm/common/strings_internal.h +@@ -117,7 +117,7 @@ pcmk__intkey_table_remove(GHashTable *hash_table, int key) + return g_hash_table_remove(hash_table, GINT_TO_POINTER(key)); + } + +-gboolean pcmk__str_in_list(GList *lst, const gchar *s); ++gboolean pcmk__str_in_list(GList *lst, const gchar *s, uint32_t flags); + + bool pcmk__strcase_any_of(const char *s, ...) G_GNUC_NULL_TERMINATED; + bool pcmk__str_any_of(const char *s, ...) G_GNUC_NULL_TERMINATED; +diff --git a/lib/common/strings.c b/lib/common/strings.c +index 3264db5b6..e1e98803b 100644 +--- a/lib/common/strings.c ++++ b/lib/common/strings.c +@@ -872,14 +872,30 @@ pcmk__parse_ll_range(const char *srcstring, long long *start, long long *end) + * Search \p lst for \p s, taking case into account. As a special case, + * if "*" is the only element of \p lst, the search is successful. + * +- * \param[in] lst List to search +- * \param[in] s String to search for ++ * Behavior can be changed with various flags: ++ * ++ * - pcmk__str_casei - By default, comparisons are done taking case into ++ * account. This flag makes comparisons case-insensitive. ++ * - pcmk__str_null_matches - If the input string is NULL, return TRUE. ++ * ++ * \note The special "*" matching rule takes precedence over flags. In ++ * particular, "*" will match a NULL input string even without ++ * pcmk__str_null_matches being specified. ++ * ++ * \note No matter what input string or flags are provided, an empty ++ * list will always return FALSE. ++ * ++ * \param[in] lst List to search ++ * \param[in] s String to search for ++ * \param[in] flags A bitfield of pcmk__str_flags to modify operation + * + * \return \c TRUE if \p s is in \p lst, or \c FALSE otherwise + */ + gboolean +-pcmk__str_in_list(GList *lst, const gchar *s) ++pcmk__str_in_list(GList *lst, const gchar *s, uint32_t flags) + { ++ GCompareFunc fn; ++ + if (lst == NULL) { + return FALSE; + } +@@ -888,7 +904,17 @@ pcmk__str_in_list(GList *lst, const gchar *s) + return TRUE; + } + +- return g_list_find_custom(lst, s, (GCompareFunc) strcmp) != NULL; ++ if (s == NULL) { ++ return pcmk_is_set(flags, pcmk__str_null_matches); ++ } ++ ++ if (pcmk_is_set(flags, pcmk__str_casei)) { ++ fn = (GCompareFunc) strcasecmp; ++ } else { ++ fn = (GCompareFunc) strcmp; ++ } ++ ++ return g_list_find_custom(lst, s, fn) != NULL; + } + + static bool +diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c +index 568ae46a8..e1ae8ac87 100644 +--- a/lib/fencing/st_output.c ++++ b/lib/fencing/st_output.c +@@ -47,7 +47,7 @@ stonith__failed_history(pcmk__output_t *out, va_list args) { + continue; + } + +- if (!pcmk__str_in_list(only_node, hp->target)) { ++ if (!pcmk__str_in_list(only_node, hp->target, pcmk__str_none)) { + continue; + } + +@@ -72,7 +72,7 @@ stonith__history(pcmk__output_t *out, va_list args) { + int rc = pcmk_rc_no_output; + + for (stonith_history_t *hp = history; hp; hp = hp->next) { +- if (!pcmk__str_in_list(only_node, hp->target)) { ++ if (!pcmk__str_in_list(only_node, hp->target, pcmk__str_none)) { + continue; + } + +@@ -101,7 +101,7 @@ stonith__full_history(pcmk__output_t *out, va_list args) { + int rc = pcmk_rc_no_output; + + for (stonith_history_t *hp = history; hp; hp = hp->next) { +- if (!pcmk__str_in_list(only_node, hp->target)) { ++ if (!pcmk__str_in_list(only_node, hp->target, pcmk__str_none)) { + continue; + } + +@@ -129,7 +129,7 @@ full_history_xml(pcmk__output_t *out, va_list args) { + + if (history_rc == 0) { + for (stonith_history_t *hp = history; hp; hp = hp->next) { +- if (!pcmk__str_in_list(only_node, hp->target)) { ++ if (!pcmk__str_in_list(only_node, hp->target, pcmk__str_none)) { + continue; + } + +@@ -218,7 +218,7 @@ stonith__pending_actions(pcmk__output_t *out, va_list args) { + int rc = pcmk_rc_no_output; + + for (stonith_history_t *hp = history; hp; hp = hp->next) { +- if (!pcmk__str_in_list(only_node, hp->target)) { ++ if (!pcmk__str_in_list(only_node, hp->target, pcmk__str_none)) { + continue; + } + +diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c +index 9237392e4..6ba786ae6 100644 +--- a/lib/pengine/bundle.c ++++ b/lib/pengine/bundle.c +@@ -1492,7 +1492,7 @@ pe__bundle_xml(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc->id); ++ print_everything = pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none); + + for (GList *gIter = bundle_data->replicas; gIter != NULL; + gIter = gIter->next) { +@@ -1614,7 +1614,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc->id); ++ print_everything = pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none); + + for (GList *gIter = bundle_data->replicas; gIter != NULL; + gIter = gIter->next) { +@@ -1742,7 +1742,7 @@ pe__bundle_text(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc->id); ++ print_everything = pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none); + + for (GList *gIter = bundle_data->replicas; gIter != NULL; + gIter = gIter->next) { +@@ -2044,7 +2044,7 @@ pe__bundle_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_paren + gboolean passes = FALSE; + pe__bundle_variant_data_t *bundle_data = NULL; + +- if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) { ++ if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none)) { + passes = TRUE; + } else { + get_bundle_variant_data(bundle_data, rsc); +diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c +index 5662338f3..5a6bfa61f 100644 +--- a/lib/pengine/clone.c ++++ b/lib/pengine/clone.c +@@ -624,8 +624,8 @@ pe__clone_xml(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) || +- (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)); ++ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || ++ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)); + + for (; gIter != NULL; gIter = gIter->next) { + pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; +@@ -693,8 +693,8 @@ pe__clone_html(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) || +- (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)); ++ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || ++ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)); + + out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s%s", + rsc->id, ID(clone_data->xml_obj_child), +@@ -801,7 +801,7 @@ pe__clone_html(pcmk__output_t *out, va_list args) + for (gIter = promoted_list; gIter; gIter = gIter->next) { + pe_node_t *host = gIter->data; + +- if (!pcmk__str_in_list(only_node, host->details->uname)) { ++ if (!pcmk__str_in_list(only_node, host->details->uname, pcmk__str_none)) { + continue; + } + +@@ -822,7 +822,7 @@ pe__clone_html(pcmk__output_t *out, va_list args) + for (gIter = started_list; gIter; gIter = gIter->next) { + pe_node_t *host = gIter->data; + +- if (!pcmk__str_in_list(only_node, host->details->uname)) { ++ if (!pcmk__str_in_list(only_node, host->details->uname, pcmk__str_none)) { + continue; + } + +@@ -884,7 +884,7 @@ pe__clone_html(pcmk__output_t *out, va_list args) + pe_node_t *node = (pe_node_t *)nIter->data; + + if (pe_find_node(rsc->running_on, node->details->uname) == NULL && +- pcmk__str_in_list(only_node, node->details->uname)) { ++ pcmk__str_in_list(only_node, node->details->uname, pcmk__str_none)) { + pcmk__add_word(&stopped_list, &stopped_list_len, + node->details->uname); + } +@@ -933,8 +933,8 @@ pe__clone_text(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) || +- (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)); ++ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || ++ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)); + + out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s%s", + rsc->id, ID(clone_data->xml_obj_child), +@@ -1041,7 +1041,7 @@ pe__clone_text(pcmk__output_t *out, va_list args) + for (gIter = promoted_list; gIter; gIter = gIter->next) { + pe_node_t *host = gIter->data; + +- if (!pcmk__str_in_list(only_node, host->details->uname)) { ++ if (!pcmk__str_in_list(only_node, host->details->uname, pcmk__str_none)) { + continue; + } + +@@ -1062,7 +1062,7 @@ pe__clone_text(pcmk__output_t *out, va_list args) + for (gIter = started_list; gIter; gIter = gIter->next) { + pe_node_t *host = gIter->data; + +- if (!pcmk__str_in_list(only_node, host->details->uname)) { ++ if (!pcmk__str_in_list(only_node, host->details->uname, pcmk__str_none)) { + continue; + } + +@@ -1120,7 +1120,7 @@ pe__clone_text(pcmk__output_t *out, va_list args) + pe_node_t *node = (pe_node_t *)nIter->data; + + if (pe_find_node(rsc->running_on, node->details->uname) == NULL && +- pcmk__str_in_list(only_node, node->details->uname)) { ++ pcmk__str_in_list(only_node, node->details->uname, pcmk__str_none)) { + pcmk__add_word(&stopped_list, &stopped_list_len, + node->details->uname); + } +@@ -1220,11 +1220,11 @@ pe__clone_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent + gboolean passes = FALSE; + clone_variant_data_t *clone_data = NULL; + +- if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) { ++ if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none)) { + passes = TRUE; + } else { + get_clone_variant_data(clone_data, rsc); +- passes = pcmk__str_in_list(only_rsc, ID(clone_data->xml_obj_child)); ++ passes = pcmk__str_in_list(only_rsc, ID(clone_data->xml_obj_child), pcmk__str_none); + + if (!passes) { + for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { +diff --git a/lib/pengine/group.c b/lib/pengine/group.c +index 23a72cff7..5f9aa83ce 100644 +--- a/lib/pengine/group.c ++++ b/lib/pengine/group.c +@@ -201,8 +201,8 @@ pe__group_xml(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) || +- (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)); ++ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || ++ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)); + + for (; gIter != NULL; gIter = gIter->next) { + pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; +@@ -248,8 +248,8 @@ pe__group_html(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) || +- (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)); ++ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || ++ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)); + + if (options & pe_print_brief) { + GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc); +@@ -303,8 +303,8 @@ pe__group_text(pcmk__output_t *out, va_list args) + return rc; + } + +- print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) || +- (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)); ++ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || ++ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)); + + if (options & pe_print_brief) { + GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc); +@@ -387,11 +387,11 @@ pe__group_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent + { + gboolean passes = FALSE; + +- if (check_parent && pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) { ++ if (check_parent && pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)), pcmk__str_none)) { + passes = TRUE; +- } else if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) { ++ } else if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none)) { + passes = TRUE; +- } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)) { ++ } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)) { + passes = TRUE; + } else { + for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { +diff --git a/lib/pengine/native.c b/lib/pengine/native.c +index c2333d0d2..56054fc4a 100644 +--- a/lib/pengine/native.c ++++ b/lib/pengine/native.c +@@ -1338,8 +1338,8 @@ pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int show_op + gboolean + pe__native_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent) + { +- if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) || +- pcmk__str_in_list(only_rsc, rsc->id)) { ++ if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || ++ pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)) { + return FALSE; + } else if (check_parent) { + pe_resource_t *up = uber_parent(rsc); +diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c +index 727475735..a6dc4ade8 100644 +--- a/lib/pengine/pe_output.c ++++ b/lib/pengine/pe_output.c +@@ -670,8 +670,8 @@ ban_list(pcmk__output_t *out, va_list args) { + continue; + } + +- if (!pcmk__str_in_list(only_rsc, rsc_printable_id(location->rsc_lh)) && +- !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(location->rsc_lh)))) { ++ if (!pcmk__str_in_list(only_rsc, rsc_printable_id(location->rsc_lh), pcmk__str_none) && ++ !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(location->rsc_lh)), pcmk__str_none)) { + continue; + } + +@@ -1254,7 +1254,7 @@ failed_action_list(pcmk__output_t *out, va_list args) { + xml_op = pcmk__xml_next(xml_op)) { + char *rsc = NULL; + +- if (!pcmk__str_in_list(only_node, crm_element_value(xml_op, XML_ATTR_UNAME))) { ++ if (!pcmk__str_in_list(only_node, crm_element_value(xml_op, XML_ATTR_UNAME), pcmk__str_none)) { + continue; + } + +@@ -1263,7 +1263,7 @@ failed_action_list(pcmk__output_t *out, va_list args) { + continue; + } + +- if (!pcmk__str_in_list(only_rsc, rsc)) { ++ if (!pcmk__str_in_list(only_rsc, rsc, pcmk__str_none)) { + free(rsc); + continue; + } +@@ -1738,7 +1738,7 @@ node_attribute_list(pcmk__output_t *out, va_list args) { + continue; + } + +- if (!pcmk__str_in_list(only_node, node->details->uname)) { ++ if (!pcmk__str_in_list(only_node, node->details->uname, pcmk__str_none)) { + g_list_free(attr_list); + continue; + } +@@ -1835,8 +1835,8 @@ node_history_list(pcmk__output_t *out, va_list args) { + * For other resource types, is_filtered is okay. + */ + if (uber_parent(rsc)->variant == pe_group) { +- if (!pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) && +- !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) { ++ if (!pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) && ++ !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)), pcmk__str_none)) { + continue; + } + } else { +@@ -1899,7 +1899,7 @@ node_list_html(pcmk__output_t *out, va_list args) { + for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { + pe_node_t *node = (pe_node_t *) gIter->data; + +- if (!pcmk__str_in_list(only_node, node->details->uname)) { ++ if (!pcmk__str_in_list(only_node, node->details->uname, pcmk__str_none)) { + continue; + } + +@@ -1940,7 +1940,7 @@ pe__node_list_text(pcmk__output_t *out, va_list args) { + const char *node_mode = NULL; + char *node_name = pe__node_display_name(node, print_clone_detail); + +- if (!pcmk__str_in_list(only_node, node->details->uname)) { ++ if (!pcmk__str_in_list(only_node, node->details->uname, pcmk__str_none)) { + free(node_name); + continue; + } +@@ -2059,7 +2059,7 @@ node_list_xml(pcmk__output_t *out, va_list args) { + for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { + pe_node_t *node = (pe_node_t *) gIter->data; + +- if (!pcmk__str_in_list(only_node, node->details->uname)) { ++ if (!pcmk__str_in_list(only_node, node->details->uname, pcmk__str_none)) { + continue; + } + +@@ -2097,7 +2097,7 @@ node_summary(pcmk__output_t *out, va_list args) { + continue; + } + +- if (!pcmk__str_in_list(only_node, node->details->uname)) { ++ if (!pcmk__str_in_list(only_node, node->details->uname, pcmk__str_none)) { + continue; + } + +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 450d8348c..d1be9e4ca 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -2394,7 +2394,7 @@ pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GList *node_list) + { + for (GList *ele = rsc->running_on; ele; ele = ele->next) { + pe_node_t *node = (pe_node_t *) ele->data; +- if (pcmk__str_in_list(node_list, node->details->uname)) { ++ if (pcmk__str_in_list(node_list, node->details->uname, pcmk__str_none)) { + return true; + } + } +@@ -2419,8 +2419,8 @@ pe__filter_rsc_list(GList *rscs, GList *filter) + /* I think the second condition is safe here for all callers of this + * function. If not, it needs to move into pe__node_text. + */ +- if (pcmk__str_in_list(filter, rsc_printable_id(rsc)) || +- (rsc->parent && pcmk__str_in_list(filter, rsc_printable_id(rsc->parent)))) { ++ if (pcmk__str_in_list(filter, rsc_printable_id(rsc), pcmk__str_none) || ++ (rsc->parent && pcmk__str_in_list(filter, rsc_printable_id(rsc->parent), pcmk__str_none))) { + retval = g_list_prepend(retval, rsc); + } + } +-- +2.27.0 + diff --git a/SOURCES/015-feature-set.patch b/SOURCES/015-feature-set.patch deleted file mode 100644 index 1f1b2e5..0000000 --- a/SOURCES/015-feature-set.patch +++ /dev/null @@ -1,114 +0,0 @@ -From 12b30c920dd15287a7b295475ce1cc4a6cb1f43f Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 8 Dec 2020 15:48:38 -0500 -Subject: [PATCH] Fix: scheduler: Don't output a resource header with no list. - -If there's no resources to print, don't output just the header with -nothing under it. This potentially comes up if there are only inactive -resources, but inactive_resources is FALSE. ---- - lib/pengine/pe_output.c | 48 ++++++++++++++++++++++++++++++++++++------------ - 1 file changed, 36 insertions(+), 12 deletions(-) - -diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c -index 5562eb6..f1a6b43 100644 ---- a/lib/pengine/pe_output.c -+++ b/lib/pengine/pe_output.c -@@ -1761,6 +1761,21 @@ resource_history_xml(pcmk__output_t *out, va_list args) { - return pcmk_rc_ok; - } - -+static void -+print_resource_header(pcmk__output_t *out, gboolean group_by_node, -+ gboolean inactive_resources) -+{ -+ if (group_by_node) { -+ /* Active resources have already been printed by node */ -+ out->begin_list(out, NULL, NULL, "Inactive Resources"); -+ } else if (inactive_resources) { -+ out->begin_list(out, NULL, NULL, "Full List of Resources"); -+ } else { -+ out->begin_list(out, NULL, NULL, "Active Resources"); -+ } -+} -+ -+ - PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "unsigned int", "gboolean", - "gboolean", "gboolean", "gboolean", "GList *", "GList *", "gboolean") - static int -@@ -1778,6 +1793,7 @@ resource_list(pcmk__output_t *out, va_list args) - - GList *rsc_iter; - int rc = pcmk_rc_no_output; -+ bool printed_header = false; - - /* If we already showed active resources by node, and - * we're not showing inactive resources, we have nothing to do -@@ -1786,22 +1802,15 @@ resource_list(pcmk__output_t *out, va_list args) - return rc; - } - -- PCMK__OUTPUT_SPACER_IF(out, print_spacer); -- -- if (group_by_node) { -- /* Active resources have already been printed by node */ -- out->begin_list(out, NULL, NULL, "Inactive Resources"); -- } else if (inactive_resources) { -- out->begin_list(out, NULL, NULL, "Full List of Resources"); -- } else { -- out->begin_list(out, NULL, NULL, "Active Resources"); -- } -- - /* If we haven't already printed resources grouped by node, - * and brief output was requested, print resource summary */ - if (brief_output && !group_by_node) { - GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc); - -+ PCMK__OUTPUT_SPACER_IF(out, print_spacer); -+ print_resource_header(out, group_by_node, inactive_resources); -+ printed_header = true; -+ - pe__rscs_brief_output(out, rscs, print_opts, inactive_resources); - g_list_free(rscs); - } -@@ -1839,6 +1848,12 @@ resource_list(pcmk__output_t *out, va_list args) - continue; - } - -+ if (!printed_header) { -+ PCMK__OUTPUT_SPACER_IF(out, print_spacer); -+ print_resource_header(out, group_by_node, inactive_resources); -+ printed_header = true; -+ } -+ - /* Print this resource */ - x = out->message(out, crm_map_element_name(rsc->xml), print_opts, rsc, - only_node, only_rsc); -@@ -1848,6 +1863,12 @@ resource_list(pcmk__output_t *out, va_list args) - } - - if (print_summary && rc != pcmk_rc_ok) { -+ if (!printed_header) { -+ PCMK__OUTPUT_SPACER_IF(out, print_spacer); -+ print_resource_header(out, group_by_node, inactive_resources); -+ printed_header = true; -+ } -+ - if (group_by_node) { - out->list_item(out, NULL, "No inactive resources"); - } else if (inactive_resources) { -@@ -1857,7 +1878,10 @@ resource_list(pcmk__output_t *out, va_list args) - } - } - -- out->end_list(out); -+ if (printed_header) { -+ out->end_list(out); -+ } -+ - return rc; - } - --- -1.8.3.1 - diff --git a/SOURCES/015-sbd.patch b/SOURCES/015-sbd.patch new file mode 100644 index 0000000..9f47c35 --- /dev/null +++ b/SOURCES/015-sbd.patch @@ -0,0 +1,1312 @@ +From b49f49576ef9d801a48ce7a01a78c72e65be7880 Mon Sep 17 00:00:00 2001 +From: Klaus Wenninger +Date: Fri, 30 Jul 2021 18:07:25 +0200 +Subject: [PATCH 1/3] Fix, Refactor: fenced: add return value to + get_agent_metadata + +Used to distinguish between empty metadata per design, +case of failed getting metadata that might succeed on a +retry and fatal failure. +Fixes as well regression that leads to endless retries getting +metadata for #watchdog - not superserious as it happens with +delays in between but still undesirable. +--- + daemons/fenced/fenced_commands.c | 92 +++++++++++++++++++------------- + 1 file changed, 55 insertions(+), 37 deletions(-) + +diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c +index a778801b1..cd9968f1a 100644 +--- a/daemons/fenced/fenced_commands.c ++++ b/daemons/fenced/fenced_commands.c +@@ -69,7 +69,7 @@ static void stonith_send_reply(xmlNode * reply, int call_options, const char *re + static void search_devices_record_result(struct device_search_s *search, const char *device, + gboolean can_fence); + +-static xmlNode * get_agent_metadata(const char *agent); ++static int get_agent_metadata(const char *agent, xmlNode **metadata); + static void read_action_metadata(stonith_device_t *device); + + typedef struct async_command_s { +@@ -323,19 +323,26 @@ fork_cb(GPid pid, gpointer user_data) + static int + get_agent_metadata_cb(gpointer data) { + stonith_device_t *device = data; ++ guint period_ms; + +- device->agent_metadata = get_agent_metadata(device->agent); +- if (device->agent_metadata) { +- read_action_metadata(device); +- stonith__device_parameter_flags(&(device->flags), device->id, ++ switch (get_agent_metadata(device->agent, &device->agent_metadata)) { ++ case pcmk_rc_ok: ++ if (device->agent_metadata) { ++ read_action_metadata(device); ++ stonith__device_parameter_flags(&(device->flags), device->id, + device->agent_metadata); +- return G_SOURCE_REMOVE; +- } else { +- guint period_ms = pcmk__mainloop_timer_get_period(device->timer); +- if (period_ms < 160 * 1000) { +- mainloop_timer_set_period(device->timer, 2 * period_ms); +- } +- return G_SOURCE_CONTINUE; ++ } ++ return G_SOURCE_REMOVE; ++ ++ case EAGAIN: ++ period_ms = pcmk__mainloop_timer_get_period(device->timer); ++ if (period_ms < 160 * 1000) { ++ mainloop_timer_set_period(device->timer, 2 * period_ms); ++ } ++ return G_SOURCE_CONTINUE; ++ ++ default: ++ return G_SOURCE_REMOVE; + } + } + +@@ -700,38 +707,41 @@ init_metadata_cache(void) { + } + } + +-static xmlNode * +-get_agent_metadata(const char *agent) ++int ++get_agent_metadata(const char *agent, xmlNode ** metadata) + { +- xmlNode *xml = NULL; + char *buffer = NULL; + ++ if (metadata == NULL) { ++ return EINVAL; ++ } ++ *metadata = NULL; ++ if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT, pcmk__str_none)) { ++ return pcmk_rc_ok; ++ } + init_metadata_cache(); + buffer = g_hash_table_lookup(metadata_cache, agent); +- if(pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT, pcmk__str_casei)) { +- return NULL; +- +- } else if(buffer == NULL) { ++ if (buffer == NULL) { + stonith_t *st = stonith_api_new(); + int rc; + + if (st == NULL) { + crm_warn("Could not get agent meta-data: " + "API memory allocation failed"); +- return NULL; ++ return EAGAIN; + } +- rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10); ++ rc = st->cmds->metadata(st, st_opt_sync_call, agent, ++ NULL, &buffer, 10); + stonith_api_delete(st); + if (rc || !buffer) { + crm_err("Could not retrieve metadata for fencing agent %s", agent); +- return NULL; ++ return EAGAIN; + } + g_hash_table_replace(metadata_cache, strdup(agent), buffer); + } + +- xml = string2xml(buffer); +- +- return xml; ++ *metadata = string2xml(buffer); ++ return pcmk_rc_ok; + } + + static gboolean +@@ -962,19 +972,27 @@ build_device_from_xml(xmlNode * msg) + g_list_free_full(device->targets, free); + device->targets = NULL; + } +- device->agent_metadata = get_agent_metadata(device->agent); +- if (device->agent_metadata) { +- read_action_metadata(device); +- stonith__device_parameter_flags(&(device->flags), device->id, +- device->agent_metadata); +- } else { +- if (device->timer == NULL) { +- device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000, ++ switch (get_agent_metadata(device->agent, &device->agent_metadata)) { ++ case pcmk_rc_ok: ++ if (device->agent_metadata) { ++ read_action_metadata(device); ++ stonith__device_parameter_flags(&(device->flags), device->id, ++ device->agent_metadata); ++ } ++ break; ++ ++ case EAGAIN: ++ if (device->timer == NULL) { ++ device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000, + TRUE, get_agent_metadata_cb, device); +- } +- if (!mainloop_timer_running(device->timer)) { +- mainloop_timer_start(device->timer); +- } ++ } ++ if (!mainloop_timer_running(device->timer)) { ++ mainloop_timer_start(device->timer); ++ } ++ break; ++ ++ default: ++ break; + } + + value = g_hash_table_lookup(device->params, "nodeid"); +-- +2.27.0 + + +From 5dd1e4459335764e0adf5fa78d81c875ae2332e9 Mon Sep 17 00:00:00 2001 +From: Klaus Wenninger +Date: Fri, 30 Jul 2021 18:15:10 +0200 +Subject: [PATCH 2/3] feature: watchdog-fencing: allow restriction to certain + nodes + +Bump CRM_FEATURE_SET to 3.11.0 to encourage cluster being +fully upgraded to a version that supports the feature +before explicitly adding a watchdog-fence-device. +--- + configure.ac | 1 + + daemons/controld/controld_control.c | 2 +- + daemons/controld/controld_fencing.c | 14 ++ + daemons/controld/controld_fencing.h | 1 + + daemons/fenced/Makefile.am | 2 +- + daemons/fenced/fence_watchdog.in | 283 ++++++++++++++++++++++++++++ + daemons/fenced/fenced_commands.c | 141 +++++++++++--- + daemons/fenced/fenced_remote.c | 71 ++++--- + daemons/fenced/pacemaker-fenced.c | 131 +++++++++---- + daemons/fenced/pacemaker-fenced.h | 5 +- + include/crm/crm.h | 2 +- + include/crm/fencing/internal.h | 8 +- + lib/fencing/st_client.c | 61 ++++++ + lib/lrmd/lrmd_client.c | 6 +- + rpm/pacemaker.spec.in | 3 + + 16 files changed, 635 insertions(+), 97 deletions(-) + create mode 100755 daemons/fenced/fence_watchdog.in + +diff --git a/configure.ac b/configure.ac +index 436100c81..013562e46 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1972,6 +1972,7 @@ CONFIG_FILES_EXEC([cts/cts-cli], + [cts/support/fence_dummy], + [cts/support/pacemaker-cts-dummyd], + [daemons/fenced/fence_legacy], ++ [daemons/fenced/fence_watchdog], + [doc/abi-check], + [extra/resources/ClusterMon], + [extra/resources/HealthSMART], +diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c +index 45a70bb92..b5da6a46c 100644 +--- a/daemons/controld/controld_control.c ++++ b/daemons/controld/controld_control.c +@@ -615,7 +615,7 @@ static pcmk__cluster_option_t crmd_opts[] = { + }, + { + "stonith-watchdog-timeout", NULL, "time", NULL, +- "0", pcmk__valid_sbd_timeout, ++ "0", controld_verify_stonith_watchdog_timeout, + "How long to wait before we can assume nodes are safely down " + "when watchdog-based self-fencing via SBD is in use", + "If nonzero, along with `have-watchdog=true` automatically set by the " +diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c +index 0fba6613b..6c2a6c550 100644 +--- a/daemons/controld/controld_fencing.c ++++ b/daemons/controld/controld_fencing.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -886,6 +887,19 @@ te_fence_node(crm_graph_t *graph, crm_action_t *action) + return TRUE; + } + ++bool ++controld_verify_stonith_watchdog_timeout(const char *value) ++{ ++ gboolean rv = TRUE; ++ ++ if (stonith_api && (stonith_api->state != stonith_disconnected) && ++ stonith__watchdog_fencing_enabled_for_node_api(stonith_api, ++ fsa_our_uname)) { ++ rv = pcmk__valid_sbd_timeout(value); ++ } ++ return rv; ++} ++ + /* end stonith API client functions */ + + +diff --git a/daemons/controld/controld_fencing.h b/daemons/controld/controld_fencing.h +index d0ecc8234..ef68a0c83 100644 +--- a/daemons/controld/controld_fencing.h ++++ b/daemons/controld/controld_fencing.h +@@ -24,6 +24,7 @@ void update_stonith_max_attempts(const char* value); + void controld_trigger_fencer_connect(void); + void controld_disconnect_fencer(bool destroy); + gboolean te_fence_node(crm_graph_t *graph, crm_action_t *action); ++bool controld_verify_stonith_watchdog_timeout(const char *value); + + // stonith cleanup list + void add_stonith_cleanup(const char *target); +diff --git a/daemons/fenced/Makefile.am b/daemons/fenced/Makefile.am +index 43413e11d..2923d7c9b 100644 +--- a/daemons/fenced/Makefile.am ++++ b/daemons/fenced/Makefile.am +@@ -15,7 +15,7 @@ halibdir = $(CRM_DAEMON_DIR) + + halib_PROGRAMS = pacemaker-fenced cts-fence-helper + +-sbin_SCRIPTS = fence_legacy ++sbin_SCRIPTS = fence_legacy fence_watchdog + + noinst_HEADERS = pacemaker-fenced.h + +diff --git a/daemons/fenced/fence_watchdog.in b/daemons/fenced/fence_watchdog.in +new file mode 100755 +index 000000000..c83304f1d +--- /dev/null ++++ b/daemons/fenced/fence_watchdog.in +@@ -0,0 +1,283 @@ ++#!@PYTHON@ ++"""Dummy watchdog fence agent for providing meta-data for the pacemaker internal agent ++""" ++ ++__copyright__ = "Copyright 2012-2021 the Pacemaker project contributors" ++__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" ++ ++import io ++import os ++import re ++import sys ++import atexit ++import getopt ++ ++SHORT_DESC = "Dummy watchdog fence agent" ++LONG_DESC = """fence_watchdog just provides ++meta-data - actual fencing is done by the pacemaker internal watchdog agent.""" ++ ++ALL_OPT = { ++ "version" : { ++ "getopt" : "V", ++ "longopt" : "version", ++ "help" : "-V, --version Display version information and exit", ++ "required" : "0", ++ "shortdesc" : "Display version information and exit", ++ "order" : 53 ++ }, ++ "help" : { ++ "getopt" : "h", ++ "longopt" : "help", ++ "help" : "-h, --help Display this help and exit", ++ "required" : "0", ++ "shortdesc" : "Display help and exit", ++ "order" : 54 ++ }, ++ "action" : { ++ "getopt" : "o:", ++ "longopt" : "action", ++ "help" : "-o, --action=[action] Action: metadata", ++ "required" : "1", ++ "shortdesc" : "Fencing Action", ++ "default" : "metadata", ++ "order" : 1 ++ }, ++ "nodename" : { ++ "getopt" : "N:", ++ "longopt" : "nodename", ++ "help" : "-N, --nodename Node name of fence victim (ignored)", ++ "required" : "0", ++ "shortdesc" : "Ignored", ++ "order" : 2 ++ }, ++ "plug" : { ++ "getopt" : "n:", ++ "longopt" : "plug", ++ "help" : "-n, --plug=[id] Physical plug number on device (ignored)", ++ "required" : "1", ++ "shortdesc" : "Ignored", ++ "order" : 4 ++ } ++} ++ ++ ++def agent(): ++ """ Return name this file was run as. """ ++ ++ return os.path.basename(sys.argv[0]) ++ ++ ++def fail_usage(message): ++ """ Print a usage message and exit. """ ++ ++ sys.exit("%s\nPlease use '-h' for usage" % message) ++ ++ ++def show_docs(options): ++ """ Handle informational options (display info and exit). """ ++ ++ device_opt = options["device_opt"] ++ ++ if "-h" in options: ++ usage(device_opt) ++ sys.exit(0) ++ ++ if "-o" in options and options["-o"].lower() == "metadata": ++ metadata(device_opt, options) ++ sys.exit(0) ++ ++ if "-V" in options: ++ print(AGENT_VERSION) ++ sys.exit(0) ++ ++ ++def sorted_options(avail_opt): ++ """ Return a list of all options, in their internally specified order. """ ++ ++ sorted_list = [(key, ALL_OPT[key]) for key in avail_opt] ++ sorted_list.sort(key=lambda x: x[1]["order"]) ++ return sorted_list ++ ++ ++def usage(avail_opt): ++ """ Print a usage message. """ ++ print(LONG_DESC) ++ print() ++ print("Usage:") ++ print("\t" + agent() + " [options]") ++ print("Options:") ++ ++ for dummy, value in sorted_options(avail_opt): ++ if len(value["help"]) != 0: ++ print(" " + value["help"]) ++ ++ ++def metadata(avail_opt, options): ++ """ Print agent metadata. """ ++ ++ print(""" ++ ++%s ++""" % (agent(), SHORT_DESC, LONG_DESC)) ++ ++ for option, dummy in sorted_options(avail_opt): ++ if "shortdesc" in ALL_OPT[option]: ++ print(' ') ++ ++ default = "" ++ default_name_arg = "-" + ALL_OPT[option]["getopt"][:-1] ++ default_name_no_arg = "-" + ALL_OPT[option]["getopt"] ++ ++ if "default" in ALL_OPT[option]: ++ default = 'default="%s"' % str(ALL_OPT[option]["default"]) ++ elif default_name_arg in options: ++ if options[default_name_arg]: ++ try: ++ default = 'default="%s"' % options[default_name_arg] ++ except TypeError: ++ ## @todo/@note: Currently there is no clean way how to handle lists ++ ## we can create a string from it but we can't set it on command line ++ default = 'default="%s"' % str(options[default_name_arg]) ++ elif default_name_no_arg in options: ++ default = 'default="true"' ++ ++ mixed = ALL_OPT[option]["help"] ++ ## split it between option and help text ++ res = re.compile(r"^(.*--\S+)\s+", re.IGNORECASE | re.S).search(mixed) ++ if None != res: ++ mixed = res.group(1) ++ mixed = mixed.replace("<", "<").replace(">", ">") ++ print(' ') ++ ++ if ALL_OPT[option]["getopt"].count(":") > 0: ++ print(' ') ++ else: ++ print(' ') ++ ++ print(' ' + ALL_OPT[option]["shortdesc"] + '') ++ print(' ') ++ ++ print(' \n ') ++ print(' ') ++ print(' ') ++ print(' ') ++ print(' ') ++ print(' ') ++ print(' ') ++ print(' ') ++ print('') ++ ++ ++def option_longopt(option): ++ """ Return the getopt-compatible long-option name of the given option. """ ++ ++ if ALL_OPT[option]["getopt"].endswith(":"): ++ return ALL_OPT[option]["longopt"] + "=" ++ else: ++ return ALL_OPT[option]["longopt"] ++ ++ ++def opts_from_command_line(argv, avail_opt): ++ """ Read options from command-line arguments. """ ++ ++ # Prepare list of options for getopt ++ getopt_string = "" ++ longopt_list = [] ++ for k in avail_opt: ++ if k in ALL_OPT: ++ getopt_string += ALL_OPT[k]["getopt"] ++ else: ++ fail_usage("Parse error: unknown option '" + k + "'") ++ ++ if k in ALL_OPT and "longopt" in ALL_OPT[k]: ++ longopt_list.append(option_longopt(k)) ++ ++ try: ++ opt, dummy = getopt.gnu_getopt(argv, getopt_string, longopt_list) ++ except getopt.GetoptError as error: ++ fail_usage("Parse error: " + error.msg) ++ ++ # Transform longopt to short one which are used in fencing agents ++ old_opt = opt ++ opt = {} ++ for old_option in dict(old_opt).keys(): ++ if old_option.startswith("--"): ++ for option in ALL_OPT.keys(): ++ if "longopt" in ALL_OPT[option] and "--" + ALL_OPT[option]["longopt"] == old_option: ++ opt["-" + ALL_OPT[option]["getopt"].rstrip(":")] = dict(old_opt)[old_option] ++ else: ++ opt[old_option] = dict(old_opt)[old_option] ++ ++ return opt ++ ++ ++def opts_from_stdin(avail_opt): ++ """ Read options from standard input. """ ++ ++ opt = {} ++ name = "" ++ for line in sys.stdin.readlines(): ++ line = line.strip() ++ if line.startswith("#") or (len(line) == 0): ++ continue ++ ++ (name, value) = (line + "=").split("=", 1) ++ value = value[:-1] ++ ++ if name not in avail_opt: ++ print("Parse error: Ignoring unknown option '%s'" % line, ++ file=sys.stderr) ++ continue ++ ++ if ALL_OPT[name]["getopt"].endswith(":"): ++ opt["-"+ALL_OPT[name]["getopt"].rstrip(":")] = value ++ elif value.lower() in ["1", "yes", "on", "true"]: ++ opt["-"+ALL_OPT[name]["getopt"]] = "1" ++ ++ return opt ++ ++ ++def process_input(avail_opt): ++ """ Set standard environment variables, and parse all options. """ ++ ++ # Set standard environment ++ os.putenv("LANG", "C") ++ os.putenv("LC_ALL", "C") ++ ++ # Read options from command line or standard input ++ if len(sys.argv) > 1: ++ return opts_from_command_line(sys.argv[1:], avail_opt) ++ else: ++ return opts_from_stdin(avail_opt) ++ ++ ++def atexit_handler(): ++ """ Close stdout on exit. """ ++ ++ try: ++ sys.stdout.close() ++ os.close(1) ++ except IOError: ++ sys.exit("%s failed to close standard output" % agent()) ++ ++ ++def main(): ++ """ Make it so! """ ++ ++ device_opt = ALL_OPT.keys() ++ ++ ## Defaults for fence agent ++ atexit.register(atexit_handler) ++ options = process_input(device_opt) ++ options["device_opt"] = device_opt ++ show_docs(options) ++ ++ print("Watchdog fencing may be initiated only by the cluster, not this agent.", ++ file=sys.stderr) ++ ++ sys.exit(1) ++ ++ ++if __name__ == "__main__": ++ main() +diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c +index cd9968f1a..9470ea2c1 100644 +--- a/daemons/fenced/fenced_commands.c ++++ b/daemons/fenced/fenced_commands.c +@@ -397,15 +397,13 @@ stonith_device_execute(stonith_device_t * device) + return TRUE; + } + +- if(pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT, pcmk__str_casei)) { +- if(pcmk__str_eq(cmd->action, "reboot", pcmk__str_casei)) { +- pcmk__panic(__func__); +- goto done; +- +- } else if(pcmk__str_eq(cmd->action, "off", pcmk__str_casei)) { +- pcmk__panic(__func__); +- goto done; +- ++ if (pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, ++ STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { ++ if (pcmk__strcase_any_of(cmd->action, "reboot", "off", NULL)) { ++ if (node_does_watchdog_fencing(stonith_our_uname)) { ++ pcmk__panic(__func__); ++ goto done; ++ } + } else { + crm_info("Faking success for %s watchdog operation", cmd->action); + cmd->done_cb(0, 0, NULL, cmd); +@@ -716,7 +714,7 @@ get_agent_metadata(const char *agent, xmlNode ** metadata) + return EINVAL; + } + *metadata = NULL; +- if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT, pcmk__str_none)) { ++ if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT_INTERNAL, pcmk__str_none)) { + return pcmk_rc_ok; + } + init_metadata_cache(); +@@ -1050,24 +1048,6 @@ schedule_internal_command(const char *origin, + schedule_stonith_command(cmd, device); + } + +-gboolean +-string_in_list(GList *list, const char *item) +-{ +- int lpc = 0; +- int max = g_list_length(list); +- +- for (lpc = 0; lpc < max; lpc++) { +- const char *value = g_list_nth_data(list, lpc); +- +- if (pcmk__str_eq(item, value, pcmk__str_casei)) { +- return TRUE; +- } else { +- crm_trace("%d: '%s' != '%s'", lpc, item, value); +- } +- } +- return FALSE; +-} +- + static void + status_search_cb(GPid pid, int rc, const char *output, gpointer user_data) + { +@@ -1144,7 +1124,7 @@ dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data) + if (!alias) { + alias = search->host; + } +- if (string_in_list(dev->targets, alias)) { ++ if (pcmk__str_in_list(dev->targets, alias, pcmk__str_casei)) { + can_fence = TRUE; + } + } +@@ -1215,9 +1195,62 @@ stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib) + stonith_device_t *dup = NULL; + stonith_device_t *device = build_device_from_xml(msg); + guint ndevices = 0; ++ int rv = pcmk_ok; + + CRM_CHECK(device != NULL, return -ENOMEM); + ++ /* do we have a watchdog-device? */ ++ if (pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none) || ++ pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, ++ STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) do { ++ if (stonith_watchdog_timeout_ms <= 0) { ++ crm_err("Ignoring watchdog fence device without " ++ "stonith-watchdog-timeout set."); ++ rv = -ENODEV; ++ /* fall through to cleanup & return */ ++ } else if (!pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, ++ STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { ++ crm_err("Ignoring watchdog fence device with unknown " ++ "agent '%s' unequal '" STONITH_WATCHDOG_AGENT "'.", ++ device->agent?device->agent:""); ++ rv = -ENODEV; ++ /* fall through to cleanup & return */ ++ } else if (!pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, ++ pcmk__str_none)) { ++ crm_err("Ignoring watchdog fence device " ++ "named %s !='"STONITH_WATCHDOG_ID"'.", ++ device->id?device->id:""); ++ rv = -ENODEV; ++ /* fall through to cleanup & return */ ++ } else { ++ if (pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT, ++ pcmk__str_none)) { ++ /* this either has an empty list or the targets ++ configured for watchdog-fencing ++ */ ++ g_list_free_full(stonith_watchdog_targets, free); ++ stonith_watchdog_targets = device->targets; ++ device->targets = NULL; ++ } ++ if (node_does_watchdog_fencing(stonith_our_uname)) { ++ g_list_free_full(device->targets, free); ++ device->targets = stonith__parse_targets(stonith_our_uname); ++ g_hash_table_replace(device->params, ++ strdup(PCMK_STONITH_HOST_LIST), ++ strdup(stonith_our_uname)); ++ /* proceed as with any other stonith-device */ ++ break; ++ } ++ ++ crm_debug("Skip registration of watchdog fence device on node not in host-list."); ++ /* cleanup and fall through to more cleanup and return */ ++ device->targets = NULL; ++ stonith_device_remove(device->id, from_cib); ++ } ++ free_device(device); ++ return rv; ++ } while (0); ++ + dup = device_has_duplicate(device); + if (dup) { + ndevices = g_hash_table_size(device_list); +@@ -1598,6 +1631,39 @@ stonith_level_remove(xmlNode *msg, char **desc) + * (CIB registration is not sufficient), because monitor should not be + * possible unless the device is "started" (API registered). + */ ++ ++static char * ++list_to_string(GList *list, const char *delim, gboolean terminate_with_delim) ++{ ++ int max = g_list_length(list); ++ size_t delim_len = delim?strlen(delim):0; ++ size_t alloc_size = 1 + (max?((max-1+(terminate_with_delim?1:0))*delim_len):0); ++ char *rv; ++ GList *gIter; ++ ++ for (gIter = list; gIter != NULL; gIter = gIter->next) { ++ const char *value = (const char *) gIter->data; ++ ++ alloc_size += strlen(value); ++ } ++ rv = calloc(alloc_size, sizeof(char)); ++ if (rv) { ++ char *pos = rv; ++ const char *lead_delim = ""; ++ ++ for (gIter = list; gIter != NULL; gIter = gIter->next) { ++ const char *value = (const char *) gIter->data; ++ ++ pos = &pos[sprintf(pos, "%s%s", lead_delim, value)]; ++ lead_delim = delim; ++ } ++ if (max && terminate_with_delim) { ++ sprintf(pos, "%s", delim); ++ } ++ } ++ return rv; ++} ++ + static int + stonith_device_action(xmlNode * msg, char **output) + { +@@ -1615,6 +1681,19 @@ stonith_device_action(xmlNode * msg, char **output) + return -EPROTO; + } + ++ if (pcmk__str_eq(id, STONITH_WATCHDOG_ID, pcmk__str_none)) { ++ if (stonith_watchdog_timeout_ms <= 0) { ++ return -ENODEV; ++ } else { ++ if (pcmk__str_eq(action, "list", pcmk__str_casei)) { ++ *output = list_to_string(stonith_watchdog_targets, "\n", TRUE); ++ return pcmk_ok; ++ } else if (pcmk__str_eq(action, "monitor", pcmk__str_casei)) { ++ return pcmk_ok; ++ } ++ } ++ } ++ + device = g_hash_table_lookup(device_list, id); + if ((device == NULL) + || (!device->api_registered && !strcmp(action, "monitor"))) { +@@ -1742,7 +1821,7 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc + * Only use if all hosts on which the device can be active can always fence all listed hosts + */ + +- if (string_in_list(dev->targets, host)) { ++ if (pcmk__str_in_list(dev->targets, host, pcmk__str_casei)) { + can = TRUE; + } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP) + && g_hash_table_lookup(dev->aliases, host)) { +@@ -1763,7 +1842,7 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc + return; + } + +- if (string_in_list(dev->targets, alias)) { ++ if (pcmk__str_in_list(dev->targets, alias, pcmk__str_casei)) { + can = TRUE; + } + +diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c +index cf91acaed..224f2baba 100644 +--- a/daemons/fenced/fenced_remote.c ++++ b/daemons/fenced/fenced_remote.c +@@ -1522,6 +1522,25 @@ advance_topology_device_in_level(remote_fencing_op_t *op, const char *device, + } + } + ++static gboolean ++check_watchdog_fencing_and_wait(remote_fencing_op_t * op) ++{ ++ if (node_does_watchdog_fencing(op->target)) { ++ ++ crm_notice("Waiting %lds for %s to self-fence (%s) for " ++ "client %s " CRM_XS " id=%.8s", ++ (stonith_watchdog_timeout_ms / 1000), ++ op->target, op->action, op->client_name, op->id); ++ op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, ++ remote_op_watchdog_done, op); ++ return TRUE; ++ } else { ++ crm_debug("Skipping fallback to watchdog-fencing as %s is " ++ "not in host-list", op->target); ++ } ++ return FALSE; ++} ++ + void + call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer, int rc) + { +@@ -1592,26 +1611,33 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer, int rc) + g_source_remove(op->op_timer_one); + } + +- if(stonith_watchdog_timeout_ms > 0 && device && pcmk__str_eq(device, "watchdog", pcmk__str_casei)) { +- crm_notice("Waiting %lds for %s to self-fence (%s) for client %s " +- CRM_XS " id=%.8s", (stonith_watchdog_timeout_ms / 1000), +- op->target, op->action, op->client_name, op->id); +- op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); +- +- /* TODO check devices to verify watchdog will be in use */ +- } else if(stonith_watchdog_timeout_ms > 0 +- && pcmk__str_eq(peer->host, op->target, pcmk__str_casei) +- && !pcmk__str_eq(op->action, "on", pcmk__str_casei)) { +- crm_notice("Waiting %lds for %s to self-fence (%s) for client %s " +- CRM_XS " id=%.8s", (stonith_watchdog_timeout_ms / 1000), +- op->target, op->action, op->client_name, op->id); +- op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); +- +- } else { ++ if (!(stonith_watchdog_timeout_ms > 0 && ( ++ (pcmk__str_eq(device, STONITH_WATCHDOG_ID, ++ pcmk__str_none)) || ++ (pcmk__str_eq(peer->host, op->target, pcmk__str_casei) ++ && !pcmk__str_eq(op->action, "on", pcmk__str_casei))) && ++ check_watchdog_fencing_and_wait(op))) { ++ ++ /* Some thoughts about self-fencing cases reaching this point: ++ - Actually check in check_watchdog_fencing_and_wait ++ shouldn't fail if STONITH_WATCHDOG_ID is ++ chosen as fencing-device and it being present implies ++ watchdog-fencing is enabled anyway ++ - If watchdog-fencing is disabled either in general or for ++ a specific target - detected in check_watchdog_fencing_and_wait - ++ for some other kind of self-fencing we can't expect ++ a success answer but timeout is fine if the node doesn't ++ come back in between ++ - Delicate might be the case where we have watchdog-fencing ++ enabled for a node but the watchdog-fencing-device isn't ++ explicitly chosen for suicide. Local pe-execution in sbd ++ may detect the node as unclean and lead to timely suicide. ++ Otherwise the selection of stonith-watchdog-timeout at ++ least is questionable. ++ */ + op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op); + } + +- + send_cluster_message(crm_get_peer(0, peer->host), crm_msg_stonith_ng, remote_op, FALSE); + peer->tried = TRUE; + free_xml(remote_op); +@@ -1645,12 +1671,11 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer, int rc) + * but we have all the expected replies, then no devices + * are available to execute the fencing operation. */ + +- if(stonith_watchdog_timeout_ms && pcmk__str_eq(device, "watchdog", pcmk__str_null_matches | pcmk__str_casei)) { +- crm_notice("Waiting %lds for %s to self-fence (%s) for client %s " +- CRM_XS " id=%.8s", (stonith_watchdog_timeout_ms / 1000), +- op->target, op->action, op->client_name, op->id); +- op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); +- return; ++ if(stonith_watchdog_timeout_ms > 0 && pcmk__str_eq(device, ++ STONITH_WATCHDOG_ID, pcmk__str_null_matches)) { ++ if (check_watchdog_fencing_and_wait(op)) { ++ return; ++ } + } + + if (op->state == st_query) { +diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c +index 39738d8be..7f8b427d9 100644 +--- a/daemons/fenced/pacemaker-fenced.c ++++ b/daemons/fenced/pacemaker-fenced.c +@@ -42,6 +42,7 @@ + + char *stonith_our_uname = NULL; + long stonith_watchdog_timeout_ms = 0; ++GList *stonith_watchdog_targets = NULL; + + static GMainLoop *mainloop = NULL; + +@@ -578,7 +579,44 @@ our_node_allowed_for(pe_resource_t *rsc) + } + + static void +-watchdog_device_update(xmlNode *cib) ++watchdog_device_update(void) ++{ ++ if (stonith_watchdog_timeout_ms > 0) { ++ if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) && ++ !stonith_watchdog_targets) { ++ /* getting here watchdog-fencing enabled, no device there yet ++ and reason isn't stonith_watchdog_targets preventing that ++ */ ++ int rc; ++ xmlNode *xml; ++ ++ xml = create_device_registration_xml( ++ STONITH_WATCHDOG_ID, ++ st_namespace_internal, ++ STONITH_WATCHDOG_AGENT, ++ NULL, /* stonith_device_register will add our ++ own name as PCMK_STONITH_HOST_LIST param ++ so we can skip that here ++ */ ++ NULL); ++ rc = stonith_device_register(xml, NULL, TRUE); ++ free_xml(xml); ++ if (rc != pcmk_ok) { ++ crm_crit("Cannot register watchdog pseudo fence agent"); ++ crm_exit(CRM_EX_FATAL); ++ } ++ } ++ ++ } else { ++ /* be silent if no device - todo parameter to stonith_device_remove */ ++ if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID)) { ++ stonith_device_remove(STONITH_WATCHDOG_ID, TRUE); ++ } ++ } ++} ++ ++static void ++update_stonith_watchdog_timeout_ms(xmlNode *cib) + { + xmlNode *stonith_enabled_xml = NULL; + const char *stonith_enabled_s = NULL; +@@ -608,33 +646,7 @@ watchdog_device_update(xmlNode *cib) + } + } + +- if (timeout_ms != stonith_watchdog_timeout_ms) { +- crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000); +- stonith_watchdog_timeout_ms = timeout_ms; +- +- if (stonith_watchdog_timeout_ms > 0) { +- int rc; +- xmlNode *xml; +- stonith_key_value_t *params = NULL; +- +- params = stonith_key_value_add(params, PCMK_STONITH_HOST_LIST, +- stonith_our_uname); +- +- xml = create_device_registration_xml("watchdog", st_namespace_internal, +- STONITH_WATCHDOG_AGENT, params, +- NULL); +- stonith_key_value_freeall(params, 1, 1); +- rc = stonith_device_register(xml, NULL, FALSE); +- free_xml(xml); +- if (rc != pcmk_ok) { +- crm_crit("Cannot register watchdog pseudo fence agent"); +- crm_exit(CRM_EX_FATAL); +- } +- +- } else { +- stonith_device_remove("watchdog", FALSE); +- } +- } ++ stonith_watchdog_timeout_ms = timeout_ms; + } + + /*! +@@ -677,6 +689,16 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) + return; + } + ++ /* if watchdog-fencing is disabled handle any watchdog-fence ++ resource as if it was disabled ++ */ ++ if ((stonith_watchdog_timeout_ms <= 0) && ++ pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) { ++ crm_info("Watchdog-fencing disabled thus handling " ++ "device %s as disabled", rsc->id); ++ return; ++ } ++ + /* Check whether our node is allowed for this resource (and its parent if in a group) */ + node = our_node_allowed_for(rsc); + if (rsc->parent && (rsc->parent->variant == pe_group)) { +@@ -772,6 +794,12 @@ cib_devices_update(void) + } + } + ++ /* have list repopulated if cib has a watchdog-fencing-resource ++ TODO: keep a cached list for queries happening while we are refreshing ++ */ ++ g_list_free_full(stonith_watchdog_targets, free); ++ stonith_watchdog_targets = NULL; ++ + for (gIter = fenced_data_set->resources; gIter != NULL; gIter = gIter->next) { + cib_device_update(gIter->data, fenced_data_set); + } +@@ -825,6 +853,8 @@ update_cib_stonith_devices_v2(const char *event, xmlNode * msg) + if (search != NULL) { + *search = 0; + stonith_device_remove(rsc_id, TRUE); ++ /* watchdog_device_update called afterwards ++ to fall back to implicit definition if needed */ + } else { + crm_warn("Ignoring malformed CIB update (resource deletion)"); + } +@@ -968,6 +998,24 @@ node_has_attr(const char *node, const char *name, const char *value) + return (match != NULL); + } + ++/*! ++ * \internal ++ * \brief Check whether a node does watchdog-fencing ++ * ++ * \param[in] node Name of node to check ++ * ++ * \return TRUE if node found in stonith_watchdog_targets ++ * or stonith_watchdog_targets is empty indicating ++ * all nodes are doing watchdog-fencing ++ */ ++gboolean ++node_does_watchdog_fencing(const char *node) ++{ ++ return ((stonith_watchdog_targets == NULL) || ++ pcmk__str_in_list(stonith_watchdog_targets, node, pcmk__str_casei)); ++} ++ ++ + static void + update_fencing_topology(const char *event, xmlNode * msg) + { +@@ -1073,6 +1121,8 @@ update_cib_cache_cb(const char *event, xmlNode * msg) + xmlNode *stonith_enabled_xml = NULL; + const char *stonith_enabled_s = NULL; + static gboolean stonith_enabled_saved = TRUE; ++ long timeout_ms_saved = stonith_watchdog_timeout_ms; ++ gboolean need_full_refresh = FALSE; + + if(!have_cib_devices) { + crm_trace("Skipping updates until we get a full dump"); +@@ -1127,6 +1177,7 @@ update_cib_cache_cb(const char *event, xmlNode * msg) + } + + pcmk__refresh_node_caches_from_cib(local_cib); ++ update_stonith_watchdog_timeout_ms(local_cib); + + stonith_enabled_xml = get_xpath_object("//nvpair[@name='stonith-enabled']", + local_cib, LOG_NEVER); +@@ -1134,23 +1185,30 @@ update_cib_cache_cb(const char *event, xmlNode * msg) + stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE); + } + +- watchdog_device_update(local_cib); +- + if (stonith_enabled_s && crm_is_true(stonith_enabled_s) == FALSE) { + crm_trace("Ignoring CIB updates while fencing is disabled"); + stonith_enabled_saved = FALSE; +- return; + + } else if (stonith_enabled_saved == FALSE) { + crm_info("Updating fencing device and topology lists " + "now that fencing is enabled"); + stonith_enabled_saved = TRUE; +- fencing_topology_init(); +- cib_devices_update(); ++ need_full_refresh = TRUE; + + } else { +- update_fencing_topology(event, msg); +- update_cib_stonith_devices(event, msg); ++ if (timeout_ms_saved != stonith_watchdog_timeout_ms) { ++ need_full_refresh = TRUE; ++ } else { ++ update_fencing_topology(event, msg); ++ update_cib_stonith_devices(event, msg); ++ watchdog_device_update(); ++ } ++ } ++ ++ if (need_full_refresh) { ++ fencing_topology_init(); ++ cib_devices_update(); ++ watchdog_device_update(); + } + } + +@@ -1162,10 +1220,11 @@ init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *us + local_cib = copy_xml(output); + + pcmk__refresh_node_caches_from_cib(local_cib); ++ update_stonith_watchdog_timeout_ms(local_cib); + + fencing_topology_init(); +- watchdog_device_update(local_cib); + cib_devices_update(); ++ watchdog_device_update(); + } + + static void +diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h +index d330fda4d..14e085e98 100644 +--- a/daemons/fenced/pacemaker-fenced.h ++++ b/daemons/fenced/pacemaker-fenced.h +@@ -260,14 +260,15 @@ bool fencing_peer_active(crm_node_t *peer); + + int stonith_manual_ack(xmlNode * msg, remote_fencing_op_t * op); + +-gboolean string_in_list(GList *list, const char *item); +- + gboolean node_has_attr(const char *node, const char *name, const char *value); + ++gboolean node_does_watchdog_fencing(const char *node); ++ + extern char *stonith_our_uname; + extern gboolean stand_alone; + extern GHashTable *device_list; + extern GHashTable *topology; + extern long stonith_watchdog_timeout_ms; ++extern GList *stonith_watchdog_targets; + + extern GHashTable *stonith_remote_op_list; +diff --git a/include/crm/crm.h b/include/crm/crm.h +index ee52c3630..7861c160e 100644 +--- a/include/crm/crm.h ++++ b/include/crm/crm.h +@@ -66,7 +66,7 @@ extern "C" { + * >=3.0.13: Fail counts include operation name and interval + * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED + */ +-# define CRM_FEATURE_SET "3.10.2" ++# define CRM_FEATURE_SET "3.11.0" + + /* Pacemaker's CPG protocols use fixed-width binary fields for the sender and + * recipient of a CPG message. This imposes an arbitrary limit on cluster node +diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h +index 8bcb544d8..f222edba3 100644 +--- a/include/crm/fencing/internal.h ++++ b/include/crm/fencing/internal.h +@@ -164,7 +164,10 @@ void stonith__device_parameter_flags(uint32_t *device_flags, + # define STONITH_OP_LEVEL_ADD "st_level_add" + # define STONITH_OP_LEVEL_DEL "st_level_remove" + +-# define STONITH_WATCHDOG_AGENT "#watchdog" ++# define STONITH_WATCHDOG_AGENT "fence_watchdog" ++/* Don't change 2 below as it would break rolling upgrade */ ++# define STONITH_WATCHDOG_AGENT_INTERNAL "#watchdog" ++# define STONITH_WATCHDOG_ID "watchdog" + + # ifdef HAVE_STONITH_STONITH_H + // utilities from st_lha.c +@@ -211,4 +214,7 @@ stonith__op_state_pending(enum op_state state) + return state != st_failed && state != st_done; + } + ++gboolean stonith__watchdog_fencing_enabled_for_node(const char *node); ++gboolean stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node); ++ + #endif +diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c +index e285f51e2..0ff98157b 100644 +--- a/lib/fencing/st_client.c ++++ b/lib/fencing/st_client.c +@@ -195,6 +195,67 @@ stonith_get_namespace(const char *agent, const char *namespace_s) + return st_namespace_invalid; + } + ++gboolean ++stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node) ++{ ++ gboolean rv = FALSE; ++ stonith_t *stonith_api = st?st:stonith_api_new(); ++ char *list = NULL; ++ ++ if(stonith_api) { ++ if (stonith_api->state == stonith_disconnected) { ++ int rc = stonith_api->cmds->connect(stonith_api, "stonith-api", NULL); ++ ++ if (rc != pcmk_ok) { ++ crm_err("Failed connecting to Stonith-API for watchdog-fencing-query."); ++ } ++ } ++ ++ if (stonith_api->state != stonith_disconnected) { ++ /* caveat!!! ++ * this might fail when when stonithd is just updating the device-list ++ * probably something we should fix as well for other api-calls */ ++ int rc = stonith_api->cmds->list(stonith_api, st_opt_sync_call, STONITH_WATCHDOG_ID, &list, 0); ++ if ((rc != pcmk_ok) || (list == NULL)) { ++ /* due to the race described above it can happen that ++ * we drop in here - so as not to make remote nodes ++ * panic on that answer ++ */ ++ crm_warn("watchdog-fencing-query failed"); ++ } else if (list[0] == '\0') { ++ crm_warn("watchdog-fencing-query returned an empty list - any node"); ++ rv = TRUE; ++ } else { ++ GList *targets = stonith__parse_targets(list); ++ rv = pcmk__str_in_list(targets, node, pcmk__str_casei); ++ g_list_free_full(targets, free); ++ } ++ free(list); ++ if (!st) { ++ /* if we're provided the api we still might have done the ++ * connection - but let's assume the caller won't bother ++ */ ++ stonith_api->cmds->disconnect(stonith_api); ++ } ++ } ++ ++ if (!st) { ++ stonith_api_delete(stonith_api); ++ } ++ } else { ++ crm_err("Stonith-API for watchdog-fencing-query couldn't be created."); ++ } ++ crm_trace("Pacemaker assumes node %s %sto do watchdog-fencing.", ++ node, rv?"":"not "); ++ return rv; ++} ++ ++gboolean ++stonith__watchdog_fencing_enabled_for_node(const char *node) ++{ ++ return stonith__watchdog_fencing_enabled_for_node_api(NULL, node); ++} ++ + static void + log_action(stonith_action_t *action, pid_t pid) + { +diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c +index 87d050ed1..bf4bceb42 100644 +--- a/lib/lrmd/lrmd_client.c ++++ b/lib/lrmd/lrmd_client.c +@@ -34,6 +34,7 @@ + #include + + #include ++#include + + #ifdef HAVE_GNUTLS_GNUTLS_H + # undef KEYFILE +@@ -934,7 +935,10 @@ lrmd__validate_remote_settings(lrmd_t *lrmd, GHashTable *hash) + crm_xml_add(data, F_LRMD_ORIGIN, __func__); + + value = g_hash_table_lookup(hash, "stonith-watchdog-timeout"); +- crm_xml_add(data, F_LRMD_WATCHDOG, value); ++ if ((value) && ++ (stonith__watchdog_fencing_enabled_for_node(native->remote_nodename))) { ++ crm_xml_add(data, F_LRMD_WATCHDOG, value); ++ } + + rc = lrmd_send_command(lrmd, LRMD_OP_CHECK, data, NULL, 0, 0, + (native->type == pcmk__client_ipc)); +diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in +index 79e78ede9..f58357a77 100644 +--- a/rpm/pacemaker.spec.in ++++ b/rpm/pacemaker.spec.in +@@ -744,6 +744,7 @@ exit 0 + %doc %{_mandir}/man8/crm_attribute.* + %doc %{_mandir}/man8/crm_master.* + %doc %{_mandir}/man8/fence_legacy.* ++%doc %{_mandir}/man8/fence_watchdog.* + %doc %{_mandir}/man8/pacemakerd.* + + %doc %{_datadir}/pacemaker/alerts +@@ -796,6 +797,7 @@ exit 0 + %{_sbindir}/crm_simulate + %{_sbindir}/crm_report + %{_sbindir}/crm_ticket ++%{_sbindir}/fence_watchdog + %{_sbindir}/stonith_admin + # "dirname" is owned by -schemas, which is a prerequisite + %{_datadir}/pacemaker/report.collector +@@ -822,6 +824,7 @@ exit 0 + %exclude %{_mandir}/man8/crm_attribute.* + %exclude %{_mandir}/man8/crm_master.* + %exclude %{_mandir}/man8/fence_legacy.* ++%exclude %{_mandir}/man8/fence_watchdog.* + %exclude %{_mandir}/man8/pacemakerd.* + %exclude %{_mandir}/man8/pacemaker-remoted.* + +-- +2.27.0 + + +From 53dd360f096e5f005e3221e8d44d82d3654b5172 Mon Sep 17 00:00:00 2001 +From: Klaus Wenninger +Date: Wed, 4 Aug 2021 15:57:23 +0200 +Subject: [PATCH 3/3] Fix: watchdog-fencing: Silence warning without node + restriction + +--- + lib/fencing/st_client.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c +index 0ff98157b..14fa7b2a6 100644 +--- a/lib/fencing/st_client.c ++++ b/lib/fencing/st_client.c +@@ -223,7 +223,6 @@ stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node) + */ + crm_warn("watchdog-fencing-query failed"); + } else if (list[0] == '\0') { +- crm_warn("watchdog-fencing-query returned an empty list - any node"); + rv = TRUE; + } else { + GList *targets = stonith__parse_targets(list); +-- +2.27.0 + diff --git a/SOURCES/016-cts.patch b/SOURCES/016-cts.patch new file mode 100644 index 0000000..195afc3 --- /dev/null +++ b/SOURCES/016-cts.patch @@ -0,0 +1,59 @@ +From b37391fef92548f31822f9df2a9b5fa2a61b4514 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 23 Jun 2021 15:17:54 -0500 +Subject: [PATCH] Fix: CTS: handle longer Corosync token timeouts + +Previously, startall() would call cluster_stable() immediately after detecting +the "controller successfully started" message. If the Corosync token timeout is +small enough, this will be fine. However with a token timeout of more than +about 1 second, the controllers will not have formed a membership by this +point, causing cluster_stable() to think there are multiple partitions, and +wait for a DC to be elected in each one, when really they will unite into a +single partition in a short time, and only elect a single DC. + +Now, startall() waits until seeing that each node is a cluster member before +calling cluster_stable(). +--- + cts/lab/CTS.py.in | 3 ++- + cts/lab/patterns.py | 2 ++ + 2 files changed, 4 insertions(+), 1 deletion(-) + +diff --git a/cts/lab/CTS.py.in b/cts/lab/CTS.py.in +index abcb9d285..d9924437b 100644 +--- a/cts/lab/CTS.py.in ++++ b/cts/lab/CTS.py.in +@@ -628,9 +628,10 @@ class ClusterManager(UserDict): + watchpats = [ ] + watchpats.append(self.templates["Pat:DC_IDLE"]) + for node in nodelist: +- watchpats.append(self.templates["Pat:Local_started"] % node) + watchpats.append(self.templates["Pat:InfraUp"] % node) + watchpats.append(self.templates["Pat:PacemakerUp"] % node) ++ watchpats.append(self.templates["Pat:Local_started"] % node) ++ watchpats.append(self.templates["Pat:They_up"] % (nodelist[0], node)) + + # Start all the nodes - at about the same time... + watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"]) +diff --git a/cts/lab/patterns.py b/cts/lab/patterns.py +index e21a016ff..400fd3dc8 100644 +--- a/cts/lab/patterns.py ++++ b/cts/lab/patterns.py +@@ -61,6 +61,7 @@ class BasePatterns(object): + "Pat:We_stopped" : "%s\W.*OVERRIDE THIS PATTERN", + "Pat:They_stopped" : "%s\W.*LOST:.* %s ", + "Pat:They_dead" : "node %s.*: is dead", ++ "Pat:They_up" : "%s %s\W.*OVERRIDE THIS PATTERN", + "Pat:TransitionComplete" : "Transition status: Complete: complete", + + "Pat:Fencing_start" : r"Requesting peer fencing .* targeting %s", +@@ -130,6 +131,7 @@ class crm_corosync(BasePatterns): + "Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines", + "Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost", + "Pat:They_dead" : "pacemaker-controld.*Node %s(\[|\s).*state is now lost", ++ "Pat:They_up" : "\W%s\W.*pacemaker-controld.*Node %s state is now member", + + "Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(", + # "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes() +-- +2.27.0 + diff --git a/SOURCES/016-feature-set.patch b/SOURCES/016-feature-set.patch deleted file mode 100644 index 7c61d82..0000000 --- a/SOURCES/016-feature-set.patch +++ /dev/null @@ -1,5133 +0,0 @@ -From a3f2fe109fd8d6a30ac8834ad513be6dba34a4b0 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 10 Dec 2020 15:57:44 -0500 -Subject: [PATCH 01/18] Test: cts: Add regression tests for crm_resource - output. - -This adds a bunch of tests for various configurations of location -constraints and colocation constraints on resources. This also updates -the regex that removes the absolute path from the cmdline request, now -that crm_resource is also using formatted output. ---- - cts/cli/constraints.xml | 58 ++++ - cts/cli/regression.tools.exp | 782 +++++++++++++++++++++++++++++++++++++++++++ - cts/cts-cli.in | 25 +- - 3 files changed, 864 insertions(+), 1 deletion(-) - create mode 100644 cts/cli/constraints.xml - -diff --git a/cts/cli/constraints.xml b/cts/cli/constraints.xml -new file mode 100644 -index 0000000..1a27aa7 ---- /dev/null -+++ b/cts/cli/constraints.xml -@@ -0,0 +1,58 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 221730d..565cacc 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -3470,3 +3470,785 @@ Removing constraint: cli-prefer-dummy - - =#=#=#= End test: Create an XML patchset - Error occurred (1) =#=#=#= - * Passed: crm_diff - Create an XML patchset -+=#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#= -+prim1: -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim1 -+=#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#= -+prim1: -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim1 -+=#=#=#= Begin test: Check locations and constraints for prim1 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim1 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim1 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim1 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim1 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim1 in XML -+=#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#= -+prim2: -+ * Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) -+Colocations: -+ * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim2 -+=#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#= -+prim2: -+ * Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) -+Colocations: -+ * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim2 -+=#=#=#= Begin test: Check locations and constraints for prim2 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim2 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim2 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim2 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim2 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim2 in XML -+=#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#= -+Colocations: -+ * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY -+ * Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) -+prim3: -+ * Locations: -+Colocations: -+ * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+=#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim3 -+=#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#= -+Colocations: -+ * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY -+ * Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) -+prim3: -+ * Locations: -+Colocations: -+ * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+=#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim3 -+=#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim3 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim3 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim3 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim3 in XML -+=#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#= -+Colocations: -+ * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY -+ * Locations: -+ * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY -+ * Locations: -+prim4: -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+Colocations: -+ * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim4 -+=#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#= -+Colocations: -+ * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY -+ * Locations: -+ * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY -+ * Locations: -+prim4: -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+Colocations: -+ * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim4 -+=#=#=#= Begin test: Check locations and constraints for prim4 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim4 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim4 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim4 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim4 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim4 in XML -+=#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#= -+Colocations: -+ * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+prim5: -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim5 -+=#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#= -+Colocations: -+ * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+prim5: -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim5 -+=#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim5 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim5 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim5 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim5 in XML -+=#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#= -+prim6: -+ * Locations: -+ * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2) -+=#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim6 -+=#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#= -+prim6: -+ * Locations: -+ * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2) -+=#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim6 -+=#=#=#= Begin test: Check locations and constraints for prim6 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim6 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim6 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim6 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim6 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim6 in XML -+=#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#= -+prim7: -+ * Locations: -+Colocations: -+ * group (score=INFINITY, id=colocation-prim7-group-INFINITY -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim7 -+=#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#= -+prim7: -+ * Locations: -+Colocations: -+ * group (score=INFINITY, id=colocation-prim7-group-INFINITY -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim7 -+=#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim7 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim7 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim7 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim7 in XML -+=#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#= -+prim8: -+ * Locations: -+Colocations: -+ * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim8 -+=#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#= -+prim8: -+ * Locations: -+Colocations: -+ * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim8 -+=#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim8 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim8 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim8 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim8 in XML -+=#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#= -+prim9: -+ * Locations: -+Colocations: -+ * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim9 -+=#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#= -+prim9: -+ * Locations: -+Colocations: -+ * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim9 -+=#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim9 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim9 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim9 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim9 in XML -+=#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#= -+prim10: -+ * Locations: -+Colocations: -+ * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+=#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim10 -+=#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#= -+prim10: -+ * Locations: -+Colocations: -+ * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+=#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim10 -+=#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim10 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim10 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim10 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim10 in XML -+=#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#= -+Colocations: -+ * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY -+ * Locations: -+prim11: -+ * Locations: -+Colocations: -+ * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim11 -+=#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#= -+Colocations: -+ * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY -+ * Locations: -+prim11: -+ * Locations: -+Colocations: -+ * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim11 -+=#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim11 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim11 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim11 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim11 in XML -+=#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#= -+Colocations: -+ * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY -+ * Locations: -+prim12: -+ * Locations: -+Colocations: -+ * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim12 -+=#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#= -+Colocations: -+ * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY -+ * Locations: -+prim12: -+ * Locations: -+Colocations: -+ * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim12 -+=#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim12 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim12 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim12 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim12 in XML -+=#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#= -+Colocations: -+ * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY -+ * Locations: -+prim13: -+ * Locations: -+Colocations: -+ * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY -+ * Locations: -+=#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim13 -+=#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#= -+Colocations: -+ * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY -+ * Locations: -+prim13: -+ * Locations: -+Colocations: -+ * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim13 -+=#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for prim13 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for prim13 in XML -+=#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for prim13 in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for prim13 in XML -+=#=#=#= Begin test: Check locations and constraints for group =#=#=#= -+Colocations: -+ * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY -+ * Locations: -+group: -+ * Locations: -+=#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for group -+=#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#= -+Colocations: -+ * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY -+ * Locations: -+group: -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for group -+=#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for group in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for group in XML -+=#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for group in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for group in XML -+=#=#=#= Begin test: Check locations and constraints for clone =#=#=#= -+Colocations: -+ * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY -+ * Locations: -+clone: -+ * Locations: -+=#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for clone -+=#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#= -+Colocations: -+ * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY -+ * Locations: -+clone: -+ * Locations: -+=#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for clone -+=#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Check locations and constraints for clone in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Check locations and constraints for clone in XML -+=#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Recursively check locations and constraints for clone in XML - OK (0) =#=#=#= -+* Passed: crm_resource - Recursively check locations and constraints for clone in XML -diff --git a/cts/cts-cli.in b/cts/cts-cli.in -index 14b4ce9..dfdd3de 100755 ---- a/cts/cts-cli.in -+++ b/cts/cts-cli.in -@@ -768,6 +768,29 @@ function test_tools() { - desc="Create an XML patchset" - cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml" - test_assert $CRM_EX_ERROR 0 -+ -+ export CIB_file="$test_home/cli/constraints.xml" -+ -+ for rsc in prim1 prim2 prim3 prim4 prim5 prim6 prim7 prim8 prim9 \ -+ prim10 prim11 prim12 prim13 group clone; do -+ desc="Check locations and constraints for $rsc" -+ cmd="crm_resource -a -r $rsc" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="Recursively check locations and constraints for $rsc" -+ cmd="crm_resource -A -r $rsc" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="Check locations and constraints for $rsc in XML" -+ cmd="crm_resource -a -r $rsc --output-as=xml" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="Recursively check locations and constraints for $rsc in XML" -+ cmd="crm_resource -A -r $rsc --output-as=xml" -+ test_assert $CRM_EX_OK 0 -+ done -+ -+ unset CIB_file - } - - INVALID_PERIODS=( -@@ -1604,7 +1627,7 @@ for t in $tests; do - -e 's/last_change time=\".*\"/last_change time=\"\"/' \ - -e 's/ api-version=\".*\" / api-version=\"X\" /' \ - -e 's/ version=\".*\" / version=\"\" /' \ -- -e 's/request=\".*crm_mon/request=\"crm_mon/' \ -+ -e 's/request=\".*\(crm_[a-zA-Z0-9]*\)/request=\"\1/' \ - -e 's/crm_feature_set="[^"]*" //'\ - -e 's/validate-with="[^"]*" //'\ - -e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\ --- -1.8.3.1 - - -From c3acf54d012bd113ddc69bd6412f272879408e5d Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 7 Dec 2020 15:43:27 -0500 -Subject: [PATCH 02/18] Refactor: scheduler: Add - pe__clear_resource_flags_on_all. - -This function clears a given flag on all resources in the data set. ---- - include/crm/pengine/internal.h | 1 + - lib/pacemaker/pcmk_output.c | 26 ++++---------------------- - lib/pengine/utils.c | 9 +++++++++ - tools/crm_resource_runtime.c | 6 +----- - 4 files changed, 15 insertions(+), 27 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 89e17b8..a4f8086 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -529,6 +529,7 @@ void pe_action_set_flag_reason(const char *function, long line, pe_action_t *act - - void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags); - void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags); -+void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag); - - gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref); - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 1f5a25b..d3e93ca 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -283,7 +283,6 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - gboolean recursive = va_arg(args, gboolean); - -- GList *lpc = NULL; - xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, - data_set->input); - -@@ -292,11 +291,7 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - // Constraints apply to group/clone, not member/instance - rsc = uber_parent(rsc); - -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *r = (pe_resource_t *) lpc->data; -- -- pe__clear_resource_flags(r, pe_rsc_allocating); -- } -+ pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - - out->message(out, "colocations-list", rsc, TRUE, recursive); - -@@ -304,11 +299,7 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - out->message(out, "locations-list", rsc); - out->end_list(out); - -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *r = (pe_resource_t *) lpc->data; -- -- pe__clear_resource_flags(r, pe_rsc_allocating); -- } -+ pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - - out->message(out, "colocations-list", rsc, FALSE, recursive); - return pcmk_rc_ok; -@@ -321,7 +312,6 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); - gboolean recursive = va_arg(args, gboolean); - -- GList *lpc = NULL; - xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, - data_set->input); - -@@ -330,11 +320,7 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - // Constraints apply to group/clone, not member/instance - rsc = uber_parent(rsc); - -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *r = (pe_resource_t *) lpc->data; -- -- pe__clear_resource_flags(r, pe_rsc_allocating); -- } -+ pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - - pcmk__output_xml_create_parent(out, "constraints", NULL); - -@@ -346,11 +332,7 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - out->message(out, "locations-list", rsc); - pcmk__output_xml_pop_parent(out); - -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *r = (pe_resource_t *) lpc->data; -- -- pe__clear_resource_flags(r, pe_rsc_allocating); -- } -+ pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - - out->message(out, "colocations-list", rsc, FALSE, recursive); - return pcmk_rc_ok; -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index b0922fa..b07afbe 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -2010,6 +2010,15 @@ pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags) - } - - void -+pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag) -+{ -+ for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -+ pe_resource_t *r = (pe_resource_t *) lpc->data; -+ pe__clear_resource_flags_recursive(r, flag); -+ } -+} -+ -+void - pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags) - { - pe__set_resource_flags(rsc, flags); -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 3a9feac..f4500db 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -386,11 +386,7 @@ cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, - need_init = FALSE; - unpack_constraints(cib_constraints, data_set); - -- for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { -- pe_resource_t *r = (pe_resource_t *) lpc->data; -- -- pe__clear_resource_flags(r, pe_rsc_allocating); -- } -+ pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - } - - crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs); --- -1.8.3.1 - - -From d7e95983ef14bdc25fdc42f51d7d7d9fb2240ec8 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 7 Dec 2020 15:54:40 -0500 -Subject: [PATCH 03/18] Refactor: libpacemaker: Add colocations_header. - -This moves code that is going to be useful in multiple places into its -own function. - -This also fixes a missing paren at the end of the header that was being -printed out, which means the test output has changed. So also update -the test output. ---- - cts/cli/regression.tools.exp | 76 ++++++++++++++++++++++---------------------- - lib/pacemaker/pcmk_output.c | 36 ++++++++++++++------- - 2 files changed, 62 insertions(+), 50 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 565cacc..0e69d0d 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -3507,7 +3507,7 @@ prim2: - * Locations: - * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) - Colocations: -- * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY -+ * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) - * Locations: - =#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim2 -@@ -3516,7 +3516,7 @@ prim2: - * Locations: - * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) - Colocations: -- * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY -+ * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) - * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim2 -@@ -3556,26 +3556,26 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim2 in XML - =#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#= - Colocations: -- * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY -+ * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) - * Locations: - * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) - prim3: - * Locations: - Colocations: -- * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY -+ * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - =#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim3 - =#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#= - Colocations: -- * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY -+ * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) - * Locations: - * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) - prim3: - * Locations: - Colocations: -- * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY -+ * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - =#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#= -@@ -3628,29 +3628,29 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim3 in XML - =#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#= - Colocations: -- * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY -+ * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) - * Locations: -- * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY -+ * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) - * Locations: - prim4: - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - Colocations: -- * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY -+ * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - * Locations: - =#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim4 - =#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#= - Colocations: -- * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY -+ * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) - * Locations: -- * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY -+ * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) - * Locations: - prim4: - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - Colocations: -- * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY -+ * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim4 -@@ -3702,7 +3702,7 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim4 in XML - =#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#= - Colocations: -- * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY -+ * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - prim5: -@@ -3711,7 +3711,7 @@ prim5: - * Passed: crm_resource - Check locations and constraints for prim5 - =#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#= - Colocations: -- * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY -+ * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - prim5: -@@ -3794,7 +3794,7 @@ prim6: - prim7: - * Locations: - Colocations: -- * group (score=INFINITY, id=colocation-prim7-group-INFINITY -+ * group (score=INFINITY, id=colocation-prim7-group-INFINITY) - * Locations: - =#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim7 -@@ -3802,7 +3802,7 @@ Colocations: - prim7: - * Locations: - Colocations: -- * group (score=INFINITY, id=colocation-prim7-group-INFINITY -+ * group (score=INFINITY, id=colocation-prim7-group-INFINITY) - * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim7 -@@ -3840,7 +3840,7 @@ Colocations: - prim8: - * Locations: - Colocations: -- * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY -+ * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) - * Locations: - =#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim8 -@@ -3848,7 +3848,7 @@ Colocations: - prim8: - * Locations: - Colocations: -- * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY -+ * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) - * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim8 -@@ -3886,7 +3886,7 @@ Colocations: - prim9: - * Locations: - Colocations: -- * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY -+ * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) - * Locations: - =#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim9 -@@ -3894,7 +3894,7 @@ Colocations: - prim9: - * Locations: - Colocations: -- * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY -+ * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) - * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim9 -@@ -3932,7 +3932,7 @@ Colocations: - prim10: - * Locations: - Colocations: -- * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY -+ * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - =#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#= -@@ -3941,7 +3941,7 @@ Colocations: - prim10: - * Locations: - Colocations: -- * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY -+ * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - =#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#= -@@ -3982,23 +3982,23 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim10 in XML - =#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#= - Colocations: -- * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY -+ * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) - * Locations: - prim11: - * Locations: - Colocations: -- * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY -+ * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) - * Locations: - =#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim11 - =#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#= - Colocations: -- * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY -+ * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) - * Locations: - prim11: - * Locations: - Colocations: -- * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY -+ * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) - * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim11 -@@ -4042,23 +4042,23 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim11 in XML - =#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#= - Colocations: -- * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY -+ * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) - * Locations: - prim12: - * Locations: - Colocations: -- * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY -+ * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) - * Locations: - =#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim12 - =#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#= - Colocations: -- * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY -+ * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) - * Locations: - prim12: - * Locations: - Colocations: -- * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY -+ * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) - * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim12 -@@ -4102,23 +4102,23 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim12 in XML - =#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#= - Colocations: -- * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY -+ * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) - * Locations: - prim13: - * Locations: - Colocations: -- * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY -+ * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) - * Locations: - =#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim13 - =#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#= - Colocations: -- * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY -+ * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) - * Locations: - prim13: - * Locations: - Colocations: -- * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY -+ * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) - * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim13 -@@ -4162,7 +4162,7 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim13 in XML - =#=#=#= Begin test: Check locations and constraints for group =#=#=#= - Colocations: -- * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY -+ * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) - * Locations: - group: - * Locations: -@@ -4170,7 +4170,7 @@ group: - * Passed: crm_resource - Check locations and constraints for group - =#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#= - Colocations: -- * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY -+ * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) - * Locations: - group: - * Locations: -@@ -4208,7 +4208,7 @@ group: - * Passed: crm_resource - Recursively check locations and constraints for group in XML - =#=#=#= Begin test: Check locations and constraints for clone =#=#=#= - Colocations: -- * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY -+ * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) - * Locations: - clone: - * Locations: -@@ -4216,7 +4216,7 @@ clone: - * Passed: crm_resource - Check locations and constraints for clone - =#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#= - Colocations: -- * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY -+ * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) - * Locations: - clone: - * Locations: -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index d3e93ca..8ff3e9d 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -46,6 +46,26 @@ pcmk__out_epilogue(pcmk__output_t *out, xmlNodePtr *xml, int retval) { - pcmk__output_free(out); - } - -+static char * -+colocations_header(pe_resource_t *rsc, rsc_colocation_t *cons, -+ gboolean dependents) { -+ char *score = NULL; -+ char *retval = NULL; -+ -+ score = score2char(cons->score); -+ if (cons->role_rh > RSC_ROLE_STARTED) { -+ retval = crm_strdup_printf("%s (score=%s, %s role=%s, id=%s)", -+ rsc->id, score, dependents ? "needs" : "with", -+ role2text(cons->role_rh), cons->id); -+ } else { -+ retval = crm_strdup_printf("%s (score=%s, id=%s)", -+ rsc->id, score, cons->id); -+ } -+ -+ free(score); -+ return retval; -+} -+ - PCMK__OUTPUT_ARGS("colocations-list", "pe_resource_t *", "gboolean", "gboolean") - static int colocations_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -@@ -67,9 +87,8 @@ static int colocations_list(pcmk__output_t *out, va_list args) { - pe__set_resource_flags(rsc, pe_rsc_allocating); - for (lpc = list; lpc != NULL; lpc = lpc->next) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -- -- char *score = NULL; - pe_resource_t *peer = cons->rsc_rh; -+ char *hdr = NULL; - - if (dependents) { - peer = cons->rsc_lh; -@@ -101,17 +120,10 @@ static int colocations_list(pcmk__output_t *out, va_list args) { - printed_header = true; - } - -- score = score2char(cons->score); -- if (cons->role_rh > RSC_ROLE_STARTED) { -- out->list_item(out, NULL, "%s (score=%s, %s role=%s, id=%s", -- peer->id, score, dependents ? "needs" : "with", -- role2text(cons->role_rh), cons->id); -- } else { -- out->list_item(out, NULL, "%s (score=%s, id=%s", -- peer->id, score, cons->id); -- } -+ hdr = colocations_header(peer, cons, dependents); -+ out->list_item(out, NULL, "%s", hdr); -+ free(hdr); - -- free(score); - out->message(out, "locations-list", peer); - - if (!dependents && recursive) { --- -1.8.3.1 - - -From 393342ea9a113e5453a4fd490c5f45557636902e Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 10 Dec 2020 11:31:14 -0500 -Subject: [PATCH 04/18] Refactor: libpacemaker: Add colocations_xml_node. - -This is like colocations_header, but for the XML side of things. Also, -use XML_CONS_TAG_RSC_DEPEND for the XML tag name to bring us closer in -line with the CIB. Finally, update the test output for the new code. ---- - cts/cli/regression.tools.exp | 76 ++++++++++++++++++++++---------------------- - lib/pacemaker/pcmk_output.c | 49 +++++++++++++++++----------- - 2 files changed, 68 insertions(+), 57 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 0e69d0d..98c8f23 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -3529,7 +3529,7 @@ Colocations: - - - -- -+ - - - -@@ -3546,7 +3546,7 @@ Colocations: - - - -- -+ - - - -@@ -3584,7 +3584,7 @@ Colocations: - - - -- -+ - - - -@@ -3593,7 +3593,7 @@ Colocations: - - - -- -+ - - - -@@ -3607,7 +3607,7 @@ Colocations: - - - -- -+ - - - -@@ -3616,7 +3616,7 @@ Colocations: - - - -- -+ - - - -@@ -3658,9 +3658,9 @@ Colocations: - - - -- -+ - -- -+ - - - -@@ -3669,7 +3669,7 @@ Colocations: - - - -- -+ - - - -@@ -3681,9 +3681,9 @@ Colocations: - - - -- -+ - -- -+ - - - -@@ -3692,7 +3692,7 @@ Colocations: - - - -- -+ - - - -@@ -3722,7 +3722,7 @@ prim5: - - - -- -+ - - - -@@ -3739,7 +3739,7 @@ prim5: - - - -- -+ - - - -@@ -3813,7 +3813,7 @@ Colocations: - - - -- -+ - - - -@@ -3828,7 +3828,7 @@ Colocations: - - - -- -+ - - - -@@ -3859,7 +3859,7 @@ Colocations: - - - -- -+ - - - -@@ -3874,7 +3874,7 @@ Colocations: - - - -- -+ - - - -@@ -3905,7 +3905,7 @@ Colocations: - - - -- -+ - - - -@@ -3920,7 +3920,7 @@ Colocations: - - - -- -+ - - - -@@ -3953,7 +3953,7 @@ Colocations: - - - -- -+ - - - -@@ -3970,7 +3970,7 @@ Colocations: - - - -- -+ - - - -@@ -4006,14 +4006,14 @@ Colocations: - - - -- -+ - - - - - - -- -+ - - - -@@ -4025,14 +4025,14 @@ Colocations: - - - -- -+ - - - - - - -- -+ - - - -@@ -4066,14 +4066,14 @@ Colocations: - - - -- -+ - - - - - - -- -+ - - - -@@ -4085,14 +4085,14 @@ Colocations: - - - -- -+ - - - - - - -- -+ - - - -@@ -4126,14 +4126,14 @@ Colocations: - - - -- -+ - - - - - - -- -+ - - - -@@ -4145,14 +4145,14 @@ Colocations: - - - -- -+ - - - - - - -- -+ - - - -@@ -4180,7 +4180,7 @@ group: - - - -- -+ - - - -@@ -4195,7 +4195,7 @@ group: - - - -- -+ - - - -@@ -4226,7 +4226,7 @@ clone: - - - -- -+ - - - -@@ -4241,7 +4241,7 @@ clone: - - - -- -+ - - - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 8ff3e9d..78171d7 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -66,6 +66,35 @@ colocations_header(pe_resource_t *rsc, rsc_colocation_t *cons, - return retval; - } - -+static void -+colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc, -+ rsc_colocation_t *cons) { -+ char *score = NULL; -+ xmlNodePtr node = NULL; -+ -+ score = score2char(cons->score); -+ node = pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_DEPEND, -+ "id", cons->id, -+ "rsc", cons->rsc_lh->id, -+ "with-rsc", cons->rsc_rh->id, -+ "score", score, -+ NULL); -+ -+ if (cons->node_attribute) { -+ xmlSetProp(node, (pcmkXmlStr) "node-attribute", (pcmkXmlStr) cons->node_attribute); -+ } -+ -+ if (cons->role_lh != RSC_ROLE_UNKNOWN) { -+ xmlSetProp(node, (pcmkXmlStr) "rsc-role", (pcmkXmlStr) role2text(cons->role_lh)); -+ } -+ -+ if (cons->role_rh != RSC_ROLE_UNKNOWN) { -+ xmlSetProp(node, (pcmkXmlStr) "with-rsc-role", (pcmkXmlStr) role2text(cons->role_rh)); -+ } -+ -+ free(score); -+} -+ - PCMK__OUTPUT_ARGS("colocations-list", "pe_resource_t *", "gboolean", "gboolean") - static int colocations_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -@@ -160,7 +189,6 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - for (lpc = list; lpc != NULL; lpc = lpc->next) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; - pe_resource_t *peer = cons->rsc_rh; -- char *score = NULL; - - if (dependents) { - peer = cons->rsc_lh; -@@ -195,24 +223,7 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - printed_header = true; - } - -- score = score2char(cons->score); -- if (cons->role_rh > RSC_ROLE_STARTED) { -- pcmk__output_create_xml_node(out, "colocation", -- "peer", peer->id, -- "id", cons->id, -- "score", score, -- "dependents", dependents ? "needs" : "with", -- "role", role2text(cons->role_rh), -- NULL); -- } else { -- pcmk__output_create_xml_node(out, "colocation", -- "peer", peer->id, -- "id", cons->id, -- "score", score, -- NULL); -- } -- -- free(score); -+ colocations_xml_node(out, peer, cons); - out->message(out, "locations-list", peer); - - if (!dependents && recursive) { --- -1.8.3.1 - - -From 1952bdb888fce9a686c60f99340ad904012ac807 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 17 Dec 2020 18:43:31 -0500 -Subject: [PATCH 05/18] Fix: libpacemaker: Don't show an empty locations list. - ---- - cts/cli/regression.tools.exp | 236 ++++++++++--------------------------------- - lib/pacemaker/pcmk_output.c | 19 ++-- - 2 files changed, 61 insertions(+), 194 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 98c8f23..f5de14d 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -3472,20 +3472,16 @@ Removing constraint: cli-prefer-dummy - * Passed: crm_diff - Create an XML patchset - =#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#= - prim1: -- * Locations: - =#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim1 - =#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#= - prim1: -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim1 - =#=#=#= Begin test: Check locations and constraints for prim1 in XML =#=#=#= - - -- -- -- -+ - - - -@@ -3494,9 +3490,7 @@ prim1: - =#=#=#= Begin test: Recursively check locations and constraints for prim1 in XML =#=#=#= - - -- -- -- -+ - - - -@@ -3508,7 +3502,6 @@ prim2: - * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) - Colocations: - * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) -- * Locations: - =#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim2 - =#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#= -@@ -3517,20 +3510,18 @@ prim2: - * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) - Colocations: - * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim2 - =#=#=#= Begin test: Check locations and constraints for prim2 in XML =#=#=#= - - - -- -+ - -- -+ - - - -- - - - -@@ -3541,13 +3532,12 @@ Colocations: - - - -- -+ - -- -+ - - - -- - - - -@@ -3560,7 +3550,6 @@ Colocations: - * Locations: - * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) - prim3: -- * Locations: - Colocations: - * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) - * Locations: -@@ -3573,7 +3562,6 @@ Colocations: - * Locations: - * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) - prim3: -- * Locations: - Colocations: - * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) - * Locations: -@@ -3585,18 +3573,16 @@ Colocations: - - - -- -+ - -- -+ - -- -- -- -+ - - -- -+ - -- -+ - - - -@@ -3608,18 +3594,16 @@ Colocations: - - - -- -+ - -- -+ - -- -- -- -+ - - -- -+ - -- -+ - - - -@@ -3629,29 +3613,23 @@ Colocations: - =#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#= - Colocations: - * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) -- * Locations: - * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) -- * Locations: - prim4: - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - Colocations: - * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) -- * Locations: - =#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim4 - =#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#= - Colocations: - * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) -- * Locations: - * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) -- * Locations: - prim4: - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - Colocations: - * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim4 - =#=#=#= Begin test: Check locations and constraints for prim4 in XML =#=#=#= -@@ -3659,18 +3637,15 @@ Colocations: - - - -- - -- - - -- -+ - -- -+ - - - -- - - - -@@ -3682,18 +3657,15 @@ Colocations: - - - -- - -- - - -- -+ - -- -+ - - - -- - - - -@@ -3706,7 +3678,6 @@ Colocations: - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - prim5: -- * Locations: - =#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim5 - =#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#= -@@ -3715,7 +3686,6 @@ Colocations: - * Locations: - * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) - prim5: -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim5 - =#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#= -@@ -3723,13 +3693,11 @@ prim5: - - - -- -+ - -- -+ - -- -- -- -+ - - - -@@ -3740,13 +3708,11 @@ prim5: - - - -- -+ - -- -+ - -- -- -- -+ - - - -@@ -3768,9 +3734,9 @@ prim6: - - - -- -+ - -- -+ - - - -@@ -3781,9 +3747,9 @@ prim6: - - - -- -+ - -- -+ - - - -@@ -3792,29 +3758,22 @@ prim6: - * Passed: crm_resource - Recursively check locations and constraints for prim6 in XML - =#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#= - prim7: -- * Locations: - Colocations: - * group (score=INFINITY, id=colocation-prim7-group-INFINITY) -- * Locations: - =#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim7 - =#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#= - prim7: -- * Locations: - Colocations: - * group (score=INFINITY, id=colocation-prim7-group-INFINITY) -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim7 - =#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#= - - -- -- -- -+ - - -- - - - -@@ -3824,12 +3783,9 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#= - - -- -- -- -+ - - -- - - - -@@ -3838,29 +3794,22 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim7 in XML - =#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#= - prim8: -- * Locations: - Colocations: - * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) -- * Locations: - =#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim8 - =#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#= - prim8: -- * Locations: - Colocations: - * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim8 - =#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#= - - -- -- -- -+ - - -- - - - -@@ -3870,12 +3819,9 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#= - - -- -- -- -+ - - -- - - - -@@ -3884,29 +3830,22 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim8 in XML - =#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#= - prim9: -- * Locations: - Colocations: - * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) -- * Locations: - =#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim9 - =#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#= - prim9: -- * Locations: - Colocations: - * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim9 - =#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#= - - -- -- -- -+ - - -- - - - -@@ -3916,12 +3855,9 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#= - - -- -- -- -+ - - -- - - - -@@ -3930,7 +3866,6 @@ Colocations: - * Passed: crm_resource - Recursively check locations and constraints for prim9 in XML - =#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#= - prim10: -- * Locations: - Colocations: - * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) - * Locations: -@@ -3939,7 +3874,6 @@ Colocations: - * Passed: crm_resource - Check locations and constraints for prim10 - =#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#= - prim10: -- * Locations: - Colocations: - * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) - * Locations: -@@ -3949,14 +3883,12 @@ Colocations: - =#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#= - - -- -- -- -+ - - -- -+ - -- -+ - - - -@@ -3966,14 +3898,12 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#= - - -- -- -- -+ - - -- -+ - -- -+ - - - -@@ -3983,23 +3913,17 @@ Colocations: - =#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#= - Colocations: - * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) -- * Locations: - prim11: -- * Locations: - Colocations: - * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) -- * Locations: - =#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim11 - =#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#= - Colocations: - * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) -- * Locations: - prim11: -- * Locations: - Colocations: - * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim11 - =#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#= -@@ -4007,14 +3931,10 @@ Colocations: - - - -- - -- -- -- -+ - - -- - - - -@@ -4026,14 +3946,10 @@ Colocations: - - - -- - -- -- -- -+ - - -- - - - -@@ -4043,23 +3959,17 @@ Colocations: - =#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#= - Colocations: - * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) -- * Locations: - prim12: -- * Locations: - Colocations: - * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) -- * Locations: - =#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim12 - =#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#= - Colocations: - * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) -- * Locations: - prim12: -- * Locations: - Colocations: - * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim12 - =#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#= -@@ -4067,14 +3977,10 @@ Colocations: - - - -- - -- -- -- -+ - - -- - - - -@@ -4086,14 +3992,10 @@ Colocations: - - - -- - -- -- -- -+ - - -- - - - -@@ -4103,23 +4005,17 @@ Colocations: - =#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#= - Colocations: - * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) -- * Locations: - prim13: -- * Locations: - Colocations: - * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) -- * Locations: - =#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim13 - =#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#= - Colocations: - * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) -- * Locations: - prim13: -- * Locations: - Colocations: - * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim13 - =#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#= -@@ -4127,14 +4023,10 @@ Colocations: - - - -- - -- -- -- -+ - - -- - - - -@@ -4146,14 +4038,10 @@ Colocations: - - - -- - -- -- -- -+ - - -- - - - -@@ -4163,17 +4051,13 @@ Colocations: - =#=#=#= Begin test: Check locations and constraints for group =#=#=#= - Colocations: - * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) -- * Locations: - group: -- * Locations: - =#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for group - =#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#= - Colocations: - * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) -- * Locations: - group: -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for group - =#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#= -@@ -4181,11 +4065,8 @@ group: - - - -- - -- -- -- -+ - - - -@@ -4196,11 +4077,8 @@ group: - - - -- - -- -- -- -+ - - - -@@ -4209,17 +4087,13 @@ group: - =#=#=#= Begin test: Check locations and constraints for clone =#=#=#= - Colocations: - * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) -- * Locations: - clone: -- * Locations: - =#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for clone - =#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#= - Colocations: - * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) -- * Locations: - clone: -- * Locations: - =#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for clone - =#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#= -@@ -4227,11 +4101,8 @@ clone: - - - -- - -- -- -- -+ - - - -@@ -4242,11 +4113,8 @@ clone: - - - -- - -- -- -- -+ - - - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 78171d7..aab6876 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -244,8 +244,7 @@ static int locations_list(pcmk__output_t *out, va_list args) { - - GList *lpc = NULL; - GList *list = rsc->rsc_location; -- -- out->begin_list(out, NULL, NULL, "Locations"); -+ int rc = pcmk_rc_no_output; - - for (lpc = list; lpc != NULL; lpc = lpc->next) { - pe__location_t *cons = lpc->data; -@@ -256,15 +255,15 @@ static int locations_list(pcmk__output_t *out, va_list args) { - pe_node_t *node = (pe_node_t *) lpc2->data; - char *score = score2char(node->weight); - -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Locations"); - out->list_item(out, NULL, "Node %s (score=%s, id=%s)", - node->details->uname, score, cons->id); - free(score); - } - } - -- out->end_list(out); -- -- return pcmk_rc_ok; -+ PCMK__OUTPUT_LIST_FOOTER(out, rc); -+ return rc; - } - - PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") -@@ -273,8 +272,7 @@ static int locations_list_xml(pcmk__output_t *out, va_list args) { - - GList *lpc = NULL; - GList *list = rsc->rsc_location; -- -- pcmk__output_xml_create_parent(out, "locations", NULL); -+ int rc = pcmk_rc_no_output; - - for (lpc = list; lpc != NULL; lpc = lpc->next) { - pe__location_t *cons = lpc->data; -@@ -285,6 +283,8 @@ static int locations_list_xml(pcmk__output_t *out, va_list args) { - pe_node_t *node = (pe_node_t *) lpc2->data; - char *score = score2char(node->weight); - -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "locations"); -+ - pcmk__output_create_xml_node(out, "location", - "host", node->details->uname, - "id", cons->id, -@@ -294,9 +294,8 @@ static int locations_list_xml(pcmk__output_t *out, va_list args) { - } - } - -- pcmk__output_xml_pop_parent(out); -- -- return pcmk_rc_ok; -+ PCMK__OUTPUT_LIST_FOOTER(out, rc); -+ return rc; - } - - PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean") --- -1.8.3.1 - - -From e67b94edb679a62cec5ebc5a3f398c7aec3949f9 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 17 Dec 2020 14:48:22 -0500 -Subject: [PATCH 06/18] Refactor: libpacemaker: Split colocations_list into two - functions. - -The point of this is to make it easier to follow along with what these -functions do by eliminating some of the conditionals. However, the -names are confusing and probably can't be made much less confusing. - -* rscs_colocated_with_list outputs a list of all the resources that are - colocated with the given resource argument. -* rsc_is_colocated_with_list outputs a list of all the resources that - the given resource argument is colocated with. ---- - lib/pacemaker/pcmk_output.c | 190 ++++++++++++++++++++++++++++---------------- - 1 file changed, 121 insertions(+), 69 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index aab6876..dcb024c 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -95,53 +95,133 @@ colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc, - free(score); - } - --PCMK__OUTPUT_ARGS("colocations-list", "pe_resource_t *", "gboolean", "gboolean") --static int colocations_list(pcmk__output_t *out, va_list args) { -+PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "gboolean") -+static int -+rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- gboolean dependents = va_arg(args, gboolean); - gboolean recursive = va_arg(args, gboolean); - -- GList *lpc = NULL; -- GList *list = rsc->rsc_cons; - bool printed_header = false; - -- if (dependents) { -- list = rsc->rsc_cons_lhs; -- } -- - if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { - return pcmk_rc_no_output; - } - - pe__set_resource_flags(rsc, pe_rsc_allocating); -- for (lpc = list; lpc != NULL; lpc = lpc->next) { -+ for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -- pe_resource_t *peer = cons->rsc_rh; - char *hdr = NULL; - -- if (dependents) { -- peer = cons->rsc_lh; -+ if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { -+ if (!printed_header) { -+ out->begin_list(out, NULL, NULL, "Colocations"); -+ printed_header = true; -+ } -+ -+ out->list_item(out, NULL, "%s (id=%s - loop)", cons->rsc_rh->id, cons->id); -+ continue; -+ } -+ -+ if (!printed_header) { -+ out->begin_list(out, NULL, NULL, "Colocations"); -+ printed_header = true; - } - -- if (pcmk_is_set(peer->flags, pe_rsc_allocating)) { -- if (dependents == FALSE) { -- if (!printed_header) { -- out->begin_list(out, NULL, NULL, "Colocations"); -- printed_header = true; -- } -+ hdr = colocations_header(cons->rsc_rh, cons, FALSE); -+ out->list_item(out, NULL, "%s", hdr); -+ free(hdr); -+ -+ out->message(out, "locations-list", cons->rsc_rh); -+ -+ if (recursive) { -+ out->message(out, "rsc-is-colocated-with-list", rsc, recursive); -+ } -+ } - -- out->list_item(out, NULL, "%s (id=%s - loop)", peer->id, cons->id); -+ if (printed_header) { -+ out->end_list(out); -+ } -+ -+ return pcmk_rc_no_output; -+} -+ -+PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "gboolean") -+static int -+rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ gboolean recursive = va_arg(args, gboolean); -+ -+ bool printed_header = false; -+ -+ if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -+ return pcmk_rc_ok; -+ } -+ -+ pe__set_resource_flags(rsc, pe_rsc_allocating); -+ for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { -+ rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ -+ if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { -+ if (!printed_header) { -+ pcmk__output_xml_create_parent(out, "colocations", NULL); -+ printed_header = true; - } -+ -+ pcmk__output_create_xml_node(out, "colocation", -+ "peer", cons->rsc_rh->id, -+ "id", cons->id, -+ NULL); -+ continue; -+ } -+ -+ if (!printed_header) { -+ pcmk__output_xml_create_parent(out, "colocations", NULL); -+ printed_header = true; -+ } -+ -+ colocations_xml_node(out, cons->rsc_rh, cons); -+ out->message(out, "locations-list", cons->rsc_rh); -+ -+ if (recursive) { -+ out->message(out, "rsc-is-colocated-with-list", rsc, recursive); -+ } -+ } -+ -+ if (printed_header) { -+ pcmk__output_xml_pop_parent(out); -+ } -+ -+ return pcmk_rc_ok; -+} -+ -+PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "gboolean") -+static int -+rscs_colocated_with_list(pcmk__output_t *out, va_list args) { -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ gboolean recursive = va_arg(args, gboolean); -+ -+ bool printed_header = false; -+ -+ if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -+ return pcmk_rc_no_output; -+ } -+ -+ pe__set_resource_flags(rsc, pe_rsc_allocating); -+ for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { -+ rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ char *hdr = NULL; -+ -+ if (pcmk_is_set(cons->rsc_lh->flags, pe_rsc_allocating)) { - continue; - } - -- if (dependents && recursive) { -+ if (recursive) { - if (!printed_header) { - out->begin_list(out, NULL, NULL, "Colocations"); - printed_header = true; - } - -- out->message(out, "colocations-list", rsc, dependents, recursive); -+ out->message(out, "rscs-colocated-with-list", rsc, recursive); - } - - if (!printed_header) { -@@ -149,15 +229,11 @@ static int colocations_list(pcmk__output_t *out, va_list args) { - printed_header = true; - } - -- hdr = colocations_header(peer, cons, dependents); -+ hdr = colocations_header(cons->rsc_lh, cons, TRUE); - out->list_item(out, NULL, "%s", hdr); - free(hdr); - -- out->message(out, "locations-list", peer); -- -- if (!dependents && recursive) { -- out->message(out, "colocations-list", rsc, dependents, recursive); -- } -+ out->message(out, "locations-list", cons->rsc_lh); - } - - if (printed_header) { -@@ -167,55 +243,33 @@ static int colocations_list(pcmk__output_t *out, va_list args) { - return pcmk_rc_no_output; - } - --PCMK__OUTPUT_ARGS("colocations-list", "pe_resource_t *", "gboolean", "gboolean") --static int colocations_list_xml(pcmk__output_t *out, va_list args) { -+PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "gboolean") -+static int -+rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- gboolean dependents = va_arg(args, gboolean); - gboolean recursive = va_arg(args, gboolean); - -- GList *lpc = NULL; -- GList *list = rsc->rsc_cons; - bool printed_header = false; - -- if (dependents) { -- list = rsc->rsc_cons_lhs; -- } -- - if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { - return pcmk_rc_ok; - } - - pe__set_resource_flags(rsc, pe_rsc_allocating); -- for (lpc = list; lpc != NULL; lpc = lpc->next) { -+ for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -- pe_resource_t *peer = cons->rsc_rh; - -- if (dependents) { -- peer = cons->rsc_lh; -- } -- -- if (pcmk_is_set(peer->flags, pe_rsc_allocating)) { -- if (dependents == FALSE) { -- if (!printed_header) { -- pcmk__output_xml_create_parent(out, "colocations", NULL); -- printed_header = true; -- } -- -- pcmk__output_create_xml_node(out, "colocation", -- "peer", peer->id, -- "id", cons->id, -- NULL); -- } -+ if (pcmk_is_set(cons->rsc_lh->flags, pe_rsc_allocating)) { - continue; - } - -- if (dependents && recursive) { -+ if (recursive) { - if (!printed_header) { - pcmk__output_xml_create_parent(out, "colocations", NULL); - printed_header = true; - } - -- out->message(out, "colocations-list", rsc, dependents, recursive); -+ out->message(out, "rscs-colocated-with-list", rsc, recursive); - } - - if (!printed_header) { -@@ -223,12 +277,8 @@ static int colocations_list_xml(pcmk__output_t *out, va_list args) { - printed_header = true; - } - -- colocations_xml_node(out, peer, cons); -- out->message(out, "locations-list", peer); -- -- if (!dependents && recursive) { -- out->message(out, "colocations-list", rsc, dependents, recursive); -- } -+ colocations_xml_node(out, cons->rsc_lh, cons); -+ out->message(out, "locations-list", cons->rsc_lh); - } - - if (printed_header) { -@@ -315,7 +365,7 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - -- out->message(out, "colocations-list", rsc, TRUE, recursive); -+ out->message(out, "rscs-colocated-with-list", rsc, recursive); - - out->begin_list(out, NULL, NULL, "%s", rsc->id); - out->message(out, "locations-list", rsc); -@@ -323,7 +373,7 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - -- out->message(out, "colocations-list", rsc, FALSE, recursive); -+ out->message(out, "rsc-is-colocated-with-list", rsc, recursive); - return pcmk_rc_ok; - } - -@@ -346,7 +396,7 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - - pcmk__output_xml_create_parent(out, "constraints", NULL); - -- out->message(out, "colocations-list", rsc, TRUE, recursive); -+ out->message(out, "rscs-colocated-with-list", rsc, recursive); - - pcmk__output_xml_create_parent(out, "resource", - "id", rsc->id, -@@ -356,7 +406,7 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - -- out->message(out, "colocations-list", rsc, FALSE, recursive); -+ out->message(out, "rsc-is-colocated-with-list", rsc, recursive); - return pcmk_rc_ok; - } - -@@ -529,8 +579,10 @@ crmadmin_node_xml(pcmk__output_t *out, va_list args) - } - - static pcmk__message_entry_t fmt_functions[] = { -- { "colocations-list", "default", colocations_list }, -- { "colocations-list", "xml", colocations_list_xml }, -+ { "rsc-is-colocated-with-list", "default", rsc_is_colocated_with_list }, -+ { "rsc-is-colocated-with-list", "xml", rsc_is_colocated_with_list_xml }, -+ { "rscs-colocated-with-list", "default", rscs_colocated_with_list }, -+ { "rscs-colocated-with-list", "xml", rscs_colocated_with_list_xml }, - { "locations-list", "default", locations_list }, - { "locations-list", "xml", locations_list_xml }, - { "stacks-constraints", "default", stacks_and_constraints }, --- -1.8.3.1 - - -From 75c45fef1f292d3d6428a10a1a25a4a151d3633a Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 17 Dec 2020 14:55:35 -0500 -Subject: [PATCH 07/18] Refactor: libpacemaker: Use list macros in constraint - formatted output. - -This just gets rid of all the uses of printed_header in favor of -PCMK__OUTPUT_LIST_HEADER and PCMK__OUTPUT_LIST_FOOTER. ---- - lib/pacemaker/pcmk_output.c | 99 ++++++++++++--------------------------------- - 1 file changed, 25 insertions(+), 74 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index dcb024c..2eb3ced 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -101,10 +101,10 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gboolean recursive = va_arg(args, gboolean); - -- bool printed_header = false; -+ int rc = pcmk_rc_no_output; - - if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -- return pcmk_rc_no_output; -+ return rc; - } - - pe__set_resource_flags(rsc, pe_rsc_allocating); -@@ -112,21 +112,13 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; - char *hdr = NULL; - -- if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { -- if (!printed_header) { -- out->begin_list(out, NULL, NULL, "Colocations"); -- printed_header = true; -- } -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Colocations") - -+ if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { - out->list_item(out, NULL, "%s (id=%s - loop)", cons->rsc_rh->id, cons->id); - continue; - } - -- if (!printed_header) { -- out->begin_list(out, NULL, NULL, "Colocations"); -- printed_header = true; -- } -- - hdr = colocations_header(cons->rsc_rh, cons, FALSE); - out->list_item(out, NULL, "%s", hdr); - free(hdr); -@@ -138,11 +130,8 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { - } - } - -- if (printed_header) { -- out->end_list(out); -- } -- -- return pcmk_rc_no_output; -+ PCMK__OUTPUT_LIST_FOOTER(out, rc); -+ return rc; - } - - PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "gboolean") -@@ -151,22 +140,19 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gboolean recursive = va_arg(args, gboolean); - -- bool printed_header = false; -+ int rc = pcmk_rc_no_output; - - if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -- return pcmk_rc_ok; -+ return rc; - } - - pe__set_resource_flags(rsc, pe_rsc_allocating); - for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; - -- if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { -- if (!printed_header) { -- pcmk__output_xml_create_parent(out, "colocations", NULL); -- printed_header = true; -- } -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Colocations"); - -+ if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { - pcmk__output_create_xml_node(out, "colocation", - "peer", cons->rsc_rh->id, - "id", cons->id, -@@ -174,11 +160,6 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - continue; - } - -- if (!printed_header) { -- pcmk__output_xml_create_parent(out, "colocations", NULL); -- printed_header = true; -- } -- - colocations_xml_node(out, cons->rsc_rh, cons); - out->message(out, "locations-list", cons->rsc_rh); - -@@ -187,11 +168,8 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - } - } - -- if (printed_header) { -- pcmk__output_xml_pop_parent(out); -- } -- -- return pcmk_rc_ok; -+ PCMK__OUTPUT_LIST_FOOTER(out, rc); -+ return rc; - } - - PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "gboolean") -@@ -200,10 +178,10 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gboolean recursive = va_arg(args, gboolean); - -- bool printed_header = false; -+ int rc = pcmk_rc_no_output; - - if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -- return pcmk_rc_no_output; -+ return rc; - } - - pe__set_resource_flags(rsc, pe_rsc_allocating); -@@ -215,20 +193,12 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - continue; - } - -- if (recursive) { -- if (!printed_header) { -- out->begin_list(out, NULL, NULL, "Colocations"); -- printed_header = true; -- } -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Colocations"); - -+ if (recursive) { - out->message(out, "rscs-colocated-with-list", rsc, recursive); - } - -- if (!printed_header) { -- out->begin_list(out, NULL, NULL, "Colocations"); -- printed_header = true; -- } -- - hdr = colocations_header(cons->rsc_lh, cons, TRUE); - out->list_item(out, NULL, "%s", hdr); - free(hdr); -@@ -236,11 +206,8 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - out->message(out, "locations-list", cons->rsc_lh); - } - -- if (printed_header) { -- out->end_list(out); -- } -- -- return pcmk_rc_no_output; -+ PCMK__OUTPUT_LIST_FOOTER(out, rc); -+ return rc; - } - - PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "gboolean") -@@ -249,10 +216,10 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gboolean recursive = va_arg(args, gboolean); - -- bool printed_header = false; -+ int rc = pcmk_rc_no_output; - - if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { -- return pcmk_rc_ok; -+ return rc; - } - - pe__set_resource_flags(rsc, pe_rsc_allocating); -@@ -263,29 +230,18 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - continue; - } - -- if (recursive) { -- if (!printed_header) { -- pcmk__output_xml_create_parent(out, "colocations", NULL); -- printed_header = true; -- } -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Colocations"); - -+ if (recursive) { - out->message(out, "rscs-colocated-with-list", rsc, recursive); - } - -- if (!printed_header) { -- pcmk__output_xml_create_parent(out, "colocations", NULL); -- printed_header = true; -- } -- - colocations_xml_node(out, cons->rsc_lh, cons); - out->message(out, "locations-list", cons->rsc_lh); - } - -- if (printed_header) { -- pcmk__output_xml_pop_parent(out); -- } -- -- return pcmk_rc_ok; -+ PCMK__OUTPUT_LIST_FOOTER(out, rc); -+ return rc; - } - - PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") -@@ -364,7 +320,6 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - rsc = uber_parent(rsc); - - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); -- - out->message(out, "rscs-colocated-with-list", rsc, recursive); - - out->begin_list(out, NULL, NULL, "%s", rsc->id); -@@ -372,7 +327,6 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - out->end_list(out); - - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); -- - out->message(out, "rsc-is-colocated-with-list", rsc, recursive); - return pcmk_rc_ok; - } -@@ -393,19 +347,16 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - rsc = uber_parent(rsc); - - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); -- - pcmk__output_xml_create_parent(out, "constraints", NULL); -- - out->message(out, "rscs-colocated-with-list", rsc, recursive); - - pcmk__output_xml_create_parent(out, "resource", - "id", rsc->id, - NULL); - out->message(out, "locations-list", rsc); -- pcmk__output_xml_pop_parent(out); - -+ pcmk__output_xml_pop_parent(out); - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); -- - out->message(out, "rsc-is-colocated-with-list", rsc, recursive); - return pcmk_rc_ok; - } --- -1.8.3.1 - - -From c84ffba9b9d5dd22167215d6e93d8f51ec232d78 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 16 Dec 2020 14:26:27 -0500 -Subject: [PATCH 08/18] Fix: libpacemaker: Change the colocation list headings. - -This should make it a lot more clear what these lists are trying to tell -you. ---- - lib/pacemaker/pcmk_output.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 2eb3ced..b054848 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -112,7 +112,7 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; - char *hdr = NULL; - -- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Colocations") -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources %s is colocated with", rsc->id); - - if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { - out->list_item(out, NULL, "%s (id=%s - loop)", cons->rsc_rh->id, cons->id); -@@ -150,7 +150,7 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; - -- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Colocations"); -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "rsc-is-colocated-with"); - - if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { - pcmk__output_create_xml_node(out, "colocation", -@@ -193,7 +193,7 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - continue; - } - -- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Colocations"); -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources colocated with %s", rsc->id); - - if (recursive) { - out->message(out, "rscs-colocated-with-list", rsc, recursive); -@@ -230,7 +230,7 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - continue; - } - -- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Colocations"); -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "rscs-colocated-with"); - - if (recursive) { - out->message(out, "rscs-colocated-with-list", rsc, recursive); --- -1.8.3.1 - - -From 9570877703f98fd6e3de56df6e2648689650d81e Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 14 Dec 2020 14:01:18 -0500 -Subject: [PATCH 09/18] Fix: tools: Don't display generic lists for colocations - and constraints. - ---- - tools/crm_resource.c | 20 ++++++++++++++------ - 1 file changed, 14 insertions(+), 6 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 2afb0d6..b028c40 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1589,12 +1589,20 @@ main(int argc, char **argv) - /* Kind of a hack to display XML lists using a real tag instead of . This just - * saves from having to write custom messages to build the lists around all these things - */ -- if (options.rsc_cmd == cmd_list_resources || options.rsc_cmd == cmd_query_xml || -- options.rsc_cmd == cmd_query_raw_xml || options.rsc_cmd == cmd_list_active_ops || -- options.rsc_cmd == cmd_list_all_ops) { -- pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname()); -- } else { -- pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname()); -+ switch (options.rsc_cmd) { -+ case cmd_list_resources: -+ case cmd_query_xml: -+ case cmd_query_raw_xml: -+ case cmd_list_active_ops: -+ case cmd_list_all_ops: -+ case cmd_colocations: -+ case cmd_colocations_deep: -+ pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname()); -+ break; -+ -+ default: -+ pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname()); -+ break; - } - } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) { - if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep || --- -1.8.3.1 - - -From 0d845340d5c91e177ba4321b62e6b79bb0e00b81 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 17 Dec 2020 14:56:55 -0500 -Subject: [PATCH 10/18] Fix: libpacemaker: Pass the correct resource to - recursive calls. - -Previously, we were just passing the same resource into the recursive -call, which would immediately return due to pe_rscs_allocating being set -on it. This gets us the recursive output we're expecting. ---- - lib/pacemaker/pcmk_output.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index b054848..4003b4d 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -126,7 +126,7 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { - out->message(out, "locations-list", cons->rsc_rh); - - if (recursive) { -- out->message(out, "rsc-is-colocated-with-list", rsc, recursive); -+ out->message(out, "rsc-is-colocated-with-list", cons->rsc_rh, recursive); - } - } - -@@ -164,7 +164,7 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - out->message(out, "locations-list", cons->rsc_rh); - - if (recursive) { -- out->message(out, "rsc-is-colocated-with-list", rsc, recursive); -+ out->message(out, "rsc-is-colocated-with-list", cons->rsc_rh, recursive); - } - } - -@@ -196,7 +196,7 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources colocated with %s", rsc->id); - - if (recursive) { -- out->message(out, "rscs-colocated-with-list", rsc, recursive); -+ out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive); - } - - hdr = colocations_header(cons->rsc_lh, cons, TRUE); -@@ -233,7 +233,7 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "rscs-colocated-with"); - - if (recursive) { -- out->message(out, "rscs-colocated-with-list", rsc, recursive); -+ out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive); - } - - colocations_xml_node(out, cons->rsc_lh, cons); --- -1.8.3.1 - - -From bb11c15d8ed9a2fac743d095f1caaf070cf4d148 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 16 Dec 2020 15:02:57 -0500 -Subject: [PATCH 11/18] Fix: libpacemaker: Flatten XML output for colocations. - -The XML nodes contain enough information to determine the structure, so -there's no need to build up a recursive tree of tags. ---- - lib/pacemaker/pcmk_output.c | 15 ++++----------- - 1 file changed, 4 insertions(+), 11 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 4003b4d..e2a4dbb 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -150,13 +150,8 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; - -- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "rsc-is-colocated-with"); -- - if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { -- pcmk__output_create_xml_node(out, "colocation", -- "peer", cons->rsc_rh->id, -- "id", cons->id, -- NULL); -+ colocations_xml_node(out, cons->rsc_rh, cons); - continue; - } - -@@ -168,7 +163,6 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - } - } - -- PCMK__OUTPUT_LIST_FOOTER(out, rc); - return rc; - } - -@@ -227,11 +221,10 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; - - if (pcmk_is_set(cons->rsc_lh->flags, pe_rsc_allocating)) { -+ colocations_xml_node(out, cons->rsc_lh, cons); - continue; - } - -- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "rscs-colocated-with"); -- - if (recursive) { - out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive); - } -@@ -240,7 +233,6 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - out->message(out, "locations-list", cons->rsc_lh); - } - -- PCMK__OUTPUT_LIST_FOOTER(out, rc); - return rc; - } - -@@ -355,9 +347,10 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - NULL); - out->message(out, "locations-list", rsc); - -- pcmk__output_xml_pop_parent(out); - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - out->message(out, "rsc-is-colocated-with-list", rsc, recursive); -+ -+ pcmk__output_xml_pop_parent(out); - return pcmk_rc_ok; - } - --- -1.8.3.1 - - -From 78563138db4214db7ccd49da0ba11d880de36267 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 16 Dec 2020 15:04:36 -0500 -Subject: [PATCH 12/18] Fix: libpacemaker: Correct loop detection in - rscs_colocated_with_list. - -If we hit a loop, print something out and continue instead of just -continuing silently. ---- - lib/pacemaker/pcmk_output.c | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index e2a4dbb..9930a5f 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -170,7 +170,7 @@ PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "gboolean") - static int - rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- gboolean recursive = va_arg(args, gboolean); -+ gboolean recursive G_GNUC_UNUSED = va_arg(args, gboolean); - - int rc = pcmk_rc_no_output; - -@@ -183,12 +183,13 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; - char *hdr = NULL; - -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources colocated with %s", rsc->id); -+ - if (pcmk_is_set(cons->rsc_lh->flags, pe_rsc_allocating)) { -+ out->list_item(out, NULL, "%s (id=%s - loop)", cons->rsc_lh->id, cons->id); - continue; - } - -- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources colocated with %s", rsc->id); -- - if (recursive) { - out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive); - } --- -1.8.3.1 - - -From 12f26406247950cd857e498785be860225027629 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 15 Dec 2020 16:23:19 -0500 -Subject: [PATCH 13/18] Fix: libpacemaker: Fix printing out location - constraints. - -In the text output, put the location constraints inside empty lists. -This will indent it under the previous line of text, making it more -apparent what the location information applies to. - -In both text and XML output, remove the print out of the initial -resource. There's plenty of context for what is happening already. We -don't need an extra level. ---- - lib/pacemaker/pcmk_output.c | 35 +++++++++++++++++++---------------- - 1 file changed, 19 insertions(+), 16 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 9930a5f..07f7475 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -123,11 +123,15 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { - out->list_item(out, NULL, "%s", hdr); - free(hdr); - -- out->message(out, "locations-list", cons->rsc_rh); -+ /* Empty list header just for indentation of information about this resource. */ -+ out->begin_list(out, NULL, NULL, NULL); - -+ out->message(out, "locations-list", cons->rsc_rh); - if (recursive) { - out->message(out, "rsc-is-colocated-with-list", cons->rsc_rh, recursive); - } -+ -+ out->end_list(out); - } - - PCMK__OUTPUT_LIST_FOOTER(out, rc); -@@ -170,7 +174,7 @@ PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "gboolean") - static int - rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- gboolean recursive G_GNUC_UNUSED = va_arg(args, gboolean); -+ gboolean recursive = va_arg(args, gboolean); - - int rc = pcmk_rc_no_output; - -@@ -190,15 +194,19 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - continue; - } - -- if (recursive) { -- out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive); -- } -- - hdr = colocations_header(cons->rsc_lh, cons, TRUE); - out->list_item(out, NULL, "%s", hdr); - free(hdr); - -+ /* Empty list header just for indentation of information about this resource. */ -+ out->begin_list(out, NULL, NULL, NULL); -+ - out->message(out, "locations-list", cons->rsc_lh); -+ if (recursive) { -+ out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive); -+ } -+ -+ out->end_list(out); - } - - PCMK__OUTPUT_LIST_FOOTER(out, rc); -@@ -312,13 +320,11 @@ stacks_and_constraints(pcmk__output_t *out, va_list args) { - // Constraints apply to group/clone, not member/instance - rsc = uber_parent(rsc); - -+ out->message(out, "locations-list", rsc); -+ - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - out->message(out, "rscs-colocated-with-list", rsc, recursive); - -- out->begin_list(out, NULL, NULL, "%s", rsc->id); -- out->message(out, "locations-list", rsc); -- out->end_list(out); -- - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - out->message(out, "rsc-is-colocated-with-list", rsc, recursive); - return pcmk_rc_ok; -@@ -339,16 +345,13 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - // Constraints apply to group/clone, not member/instance - rsc = uber_parent(rsc); - -- pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - pcmk__output_xml_create_parent(out, "constraints", NULL); -- out->message(out, "rscs-colocated-with-list", rsc, recursive); -- -- pcmk__output_xml_create_parent(out, "resource", -- "id", rsc->id, -- NULL); - out->message(out, "locations-list", rsc); - - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); -+ out->message(out, "rscs-colocated-with-list", rsc, recursive); -+ -+ pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - out->message(out, "rsc-is-colocated-with-list", rsc, recursive); - - pcmk__output_xml_pop_parent(out); --- -1.8.3.1 - - -From 8b49f68aa4c881021a50fddb25e4d657294d8142 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 16 Dec 2020 15:21:42 -0500 -Subject: [PATCH 14/18] Fix: libpacemaker: Various small fixes to - location-list. - -* Output a "node" attribute instead of a "host" attribute, bringing - crm_resource closer in line to what's used in the CIB. - -* Correct indentation on the function prototypes. - -* Add a "rsc" attribute. This was previously absent (and unnecessary) - because the location and colocation constraint output was structured - in such a way that you could figure out the resource based on the - context. Now that we are just flattening the XML output, this is no - longer the case. We need the attribute to make sense of it. - -* Also add "rsc" to the text output. It's not strictly necessary, but - it does make things look a little clearer. ---- - lib/pacemaker/pcmk_output.c | 15 +++++++++------ - 1 file changed, 9 insertions(+), 6 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 07f7475..63a6f25 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -246,7 +246,8 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") --static int locations_list(pcmk__output_t *out, va_list args) { -+static int -+locations_list(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - - GList *lpc = NULL; -@@ -263,8 +264,8 @@ static int locations_list(pcmk__output_t *out, va_list args) { - char *score = score2char(node->weight); - - PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Locations"); -- out->list_item(out, NULL, "Node %s (score=%s, id=%s)", -- node->details->uname, score, cons->id); -+ out->list_item(out, NULL, "Node %s (score=%s, id=%s, rsc=%s)", -+ node->details->uname, score, cons->id, rsc->id); - free(score); - } - } -@@ -274,7 +275,8 @@ static int locations_list(pcmk__output_t *out, va_list args) { - } - - PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") --static int locations_list_xml(pcmk__output_t *out, va_list args) { -+static int -+locations_list_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - - GList *lpc = NULL; -@@ -292,8 +294,9 @@ static int locations_list_xml(pcmk__output_t *out, va_list args) { - - PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "locations"); - -- pcmk__output_create_xml_node(out, "location", -- "host", node->details->uname, -+ pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_LOCATION, -+ "node", node->details->uname, -+ "rsc", rsc->id, - "id", cons->id, - "score", score, - NULL); --- -1.8.3.1 - - -From 0b2a8b80ec614e7c8c7e31dd49af1b0dcdc7fbcb Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 17 Dec 2020 15:02:42 -0500 -Subject: [PATCH 15/18] Fix: libpacemaker: Also flatten XML location constraint - output. - -There's enough information in the constraint output to put these into -the same single flat list as all the colocation constraints. ---- - lib/pacemaker/pcmk_output.c | 76 +++++++++++++++++++++++++-------------------- - 1 file changed, 43 insertions(+), 33 deletions(-) - -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 63a6f25..0d20a54 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -95,6 +95,43 @@ colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc, - free(score); - } - -+static int -+do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header) -+{ -+ GList *lpc = NULL; -+ GList *list = rsc->rsc_location; -+ int rc = pcmk_rc_no_output; -+ -+ for (lpc = list; lpc != NULL; lpc = lpc->next) { -+ pe__location_t *cons = lpc->data; -+ -+ GList *lpc2 = NULL; -+ -+ for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { -+ pe_node_t *node = (pe_node_t *) lpc2->data; -+ char *score = score2char(node->weight); -+ -+ if (add_header) { -+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "locations"); -+ } -+ -+ pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_LOCATION, -+ "node", node->details->uname, -+ "rsc", rsc->id, -+ "id", cons->id, -+ "score", score, -+ NULL); -+ free(score); -+ } -+ } -+ -+ if (add_header) { -+ PCMK__OUTPUT_LIST_FOOTER(out, rc); -+ } -+ -+ return rc; -+} -+ - PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "gboolean") - static int - rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { -@@ -160,7 +197,7 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - } - - colocations_xml_node(out, cons->rsc_rh, cons); -- out->message(out, "locations-list", cons->rsc_rh); -+ do_locations_list_xml(out, cons->rsc_rh, false); - - if (recursive) { - out->message(out, "rsc-is-colocated-with-list", cons->rsc_rh, recursive); -@@ -234,12 +271,12 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - continue; - } - -+ colocations_xml_node(out, cons->rsc_lh, cons); -+ do_locations_list_xml(out, cons->rsc_lh, false); -+ - if (recursive) { - out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive); - } -- -- colocations_xml_node(out, cons->rsc_lh, cons); -- out->message(out, "locations-list", cons->rsc_lh); - } - - return rc; -@@ -278,34 +315,7 @@ PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") - static int - locations_list_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); -- -- GList *lpc = NULL; -- GList *list = rsc->rsc_location; -- int rc = pcmk_rc_no_output; -- -- for (lpc = list; lpc != NULL; lpc = lpc->next) { -- pe__location_t *cons = lpc->data; -- -- GList *lpc2 = NULL; -- -- for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { -- pe_node_t *node = (pe_node_t *) lpc2->data; -- char *score = score2char(node->weight); -- -- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "locations"); -- -- pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_LOCATION, -- "node", node->details->uname, -- "rsc", rsc->id, -- "id", cons->id, -- "score", score, -- NULL); -- free(score); -- } -- } -- -- PCMK__OUTPUT_LIST_FOOTER(out, rc); -- return rc; -+ return do_locations_list_xml(out, rsc, true); - } - - PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean") -@@ -349,7 +359,7 @@ stacks_and_constraints_xml(pcmk__output_t *out, va_list args) { - rsc = uber_parent(rsc); - - pcmk__output_xml_create_parent(out, "constraints", NULL); -- out->message(out, "locations-list", rsc); -+ do_locations_list_xml(out, rsc, false); - - pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); - out->message(out, "rscs-colocated-with-list", rsc, recursive); --- -1.8.3.1 - - -From e5c98e481273d1bd365b8094df1a00d70e776504 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 17 Dec 2020 18:51:10 -0500 -Subject: [PATCH 16/18] Test: cts: Update expected crm_resource test output. - ---- - cts/cli/regression.tools.exp | 477 +++++++++++++++++-------------------------- - 1 file changed, 188 insertions(+), 289 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index f5de14d..a85b7d6 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -2005,9 +2005,8 @@ resource dummy is running on: node1 - =#=#=#= End test: Show where a resource is running - OK (0) =#=#=#= - * Passed: crm_resource - Show where a resource is running - =#=#=#= Begin test: Show constraints on a resource =#=#=#= --dummy: -- * Locations: -- * Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1) -+Locations: -+ * Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy) - =#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#= - * Passed: crm_resource - Show constraints on a resource - =#=#=#= Begin test: Ban dummy from node2 =#=#=#= -@@ -3471,58 +3470,50 @@ Removing constraint: cli-prefer-dummy - =#=#=#= End test: Create an XML patchset - Error occurred (1) =#=#=#= - * Passed: crm_diff - Create an XML patchset - =#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#= --prim1: - =#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim1 - =#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#= --prim1: - =#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim1 - =#=#=#= Begin test: Check locations and constraints for prim1 in XML =#=#=#= - -- -- -- -+ - - - =#=#=#= End test: Check locations and constraints for prim1 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim1 in XML - =#=#=#= Begin test: Recursively check locations and constraints for prim1 in XML =#=#=#= - -- -- -- -+ - - - =#=#=#= End test: Recursively check locations and constraints for prim1 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim1 in XML - =#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#= --prim2: -- * Locations: -- * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) --Colocations: -+Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) -+Resources prim2 is colocated with: - * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) - =#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim2 - =#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#= --prim2: -- * Locations: -- * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) --Colocations: -+Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) -+Resources prim2 is colocated with: - * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) -+ * Resources prim3 is colocated with: -+ * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) -+ * Resources prim4 is colocated with: -+ * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - =#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim2 - =#=#=#= Begin test: Check locations and constraints for prim2 in XML =#=#=#= - - -- -- -- -- -- -- -- -- -+ -+ - - - -@@ -3531,59 +3522,47 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim2 in XML =#=#=#= - - -- -- -- -- -- -- -- -- -+ -+ -+ -+ -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim2 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim2 in XML - =#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#= --Colocations: -+Resources colocated with prim3: - * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) -- * Locations: -- * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) --prim3: --Colocations: -+ * Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) -+Resources prim3 is colocated with: - * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) -- * Locations: -- * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) - =#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim3 - =#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#= --Colocations: -+Resources colocated with prim3: - * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) -- * Locations: -- * Node cluster01 (score=INFINITY, id=prim2-on-cluster1) --prim3: --Colocations: -+ * Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) -+Resources prim3 is colocated with: - * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) -- * Locations: -- * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) -+ * Resources prim4 is colocated with: -+ * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - =#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim3 - =#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#= - - -- -- -- -- -- -- -- -- -- -- -- -- -- -+ -+ -+ -+ - - - -@@ -3592,61 +3571,47 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#= - - -- -- -- -- -- -- -- -- -- -- -- -- -- -+ -+ -+ -+ -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim3 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim3 in XML - =#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#= --Colocations: -+Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) -+Resources colocated with prim4: - * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) - * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) --prim4: -- * Locations: -- * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) --Colocations: -+Resources prim4 is colocated with: - * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - =#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim4 - =#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#= --Colocations: -+Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) -+Resources colocated with prim4: - * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) - * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) --prim4: -- * Locations: -- * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) --Colocations: -+ * Resources colocated with prim3: -+ * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) -+ * Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) -+Resources prim4 is colocated with: - * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - =#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim4 - =#=#=#= Begin test: Check locations and constraints for prim4 in XML =#=#=#= - - -- -- -- -- -- -- -- -- -- -- -- -- -+ -+ -+ -+ - - - -@@ -3655,49 +3620,43 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim4 in XML =#=#=#= - - -- -- -- -- -- -- -- -- -- -- -- -- -+ -+ -+ -+ -+ -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim4 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim4 in XML - =#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#= --Colocations: -+Resources colocated with prim5: - * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) -- * Locations: -- * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) --prim5: -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) - =#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim5 - =#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#= --Colocations: -+Resources colocated with prim5: - * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) -- * Locations: -- * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) --prim5: -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) -+ * Resources colocated with prim4: -+ * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) -+ * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) -+ * Resources colocated with prim3: -+ * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) -+ * Locations: -+ * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) - =#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim5 - =#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ - - - -@@ -3706,38 +3665,31 @@ prim5: - =#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ -+ -+ -+ -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim5 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim5 in XML - =#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#= --prim6: -- * Locations: -- * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2) -+Locations: -+ * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) - =#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim6 - =#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#= --prim6: -- * Locations: -- * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2) -+Locations: -+ * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) - =#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim6 - =#=#=#= Begin test: Check locations and constraints for prim6 in XML =#=#=#= - - -- -- -- -- -- -+ - - - -@@ -3746,35 +3698,26 @@ prim6: - =#=#=#= Begin test: Recursively check locations and constraints for prim6 in XML =#=#=#= - - -- -- -- -- -- -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim6 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim6 in XML - =#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#= --prim7: --Colocations: -+Resources prim7 is colocated with: - * group (score=INFINITY, id=colocation-prim7-group-INFINITY) - =#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim7 - =#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#= --prim7: --Colocations: -+Resources prim7 is colocated with: - * group (score=INFINITY, id=colocation-prim7-group-INFINITY) - =#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim7 - =#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#= - - -- -- -- -- -+ - - - -@@ -3783,34 +3726,26 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#= - - -- -- -- -- -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim7 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim7 in XML - =#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#= --prim8: --Colocations: -+Resources prim8 is colocated with: - * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) - =#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim8 - =#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#= --prim8: --Colocations: -+Resources prim8 is colocated with: - * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) - =#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim8 - =#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#= - - -- -- -- -- -+ - - - -@@ -3819,34 +3754,26 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#= - - -- -- -- -- -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim8 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim8 in XML - =#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#= --prim9: --Colocations: -+Resources prim9 is colocated with: - * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) - =#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim9 - =#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#= --prim9: --Colocations: -+Resources prim9 is colocated with: - * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) - =#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim9 - =#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#= - - -- -- -- -- -+ - - - -@@ -3855,41 +3782,33 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#= - - -- -- -- -- -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim9 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim9 in XML - =#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#= --prim10: --Colocations: -+Resources prim10 is colocated with: - * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) -- * Locations: -- * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) - =#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim10 - =#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#= --prim10: --Colocations: -+Resources prim10 is colocated with: - * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) -- * Locations: -- * Node cluster02 (score=INFINITY, id=prim4-on-cluster2) -+ * Locations: -+ * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) -+ * Resources prim4 is colocated with: -+ * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) - =#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim10 - =#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ - - - -@@ -3898,44 +3817,41 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim10 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim10 in XML - =#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#= --Colocations: -+Resources colocated with prim11: - * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) --prim11: --Colocations: -+Resources prim11 is colocated with: - * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) - =#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim11 - =#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#= --Colocations: -+Resources colocated with prim11: - * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) --prim11: --Colocations: -+ * Resources colocated with prim13: -+ * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) -+ * Resources colocated with prim12: -+ * prim11 (id=colocation-prim11-prim12-INFINITY - loop) -+Resources prim11 is colocated with: - * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) -+ * Resources prim12 is colocated with: -+ * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) -+ * Resources prim13 is colocated with: -+ * prim11 (id=colocation-prim13-prim11-INFINITY - loop) - =#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim11 - =#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ - - - -@@ -3944,44 +3860,44 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ -+ -+ -+ -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim11 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim11 in XML - =#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#= --Colocations: -+Resources colocated with prim12: - * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) --prim12: --Colocations: -+Resources prim12 is colocated with: - * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) - =#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim12 - =#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#= --Colocations: -+Resources colocated with prim12: - * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) --prim12: --Colocations: -+ * Resources colocated with prim11: -+ * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) -+ * Resources colocated with prim13: -+ * prim12 (id=colocation-prim12-prim13-INFINITY - loop) -+Resources prim12 is colocated with: - * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) -+ * Resources prim13 is colocated with: -+ * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) -+ * Resources prim11 is colocated with: -+ * prim12 (id=colocation-prim11-prim12-INFINITY - loop) - =#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim12 - =#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ - - - -@@ -3990,44 +3906,44 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ -+ -+ -+ -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim12 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim12 in XML - =#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#= --Colocations: -+Resources colocated with prim13: - * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) --prim13: --Colocations: -+Resources prim13 is colocated with: - * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) - =#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for prim13 - =#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#= --Colocations: -+Resources colocated with prim13: - * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) --prim13: --Colocations: -+ * Resources colocated with prim12: -+ * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) -+ * Resources colocated with prim11: -+ * prim13 (id=colocation-prim13-prim11-INFINITY - loop) -+Resources prim13 is colocated with: - * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) -+ * Resources prim11 is colocated with: -+ * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) -+ * Resources prim12 is colocated with: -+ * prim13 (id=colocation-prim12-prim13-INFINITY - loop) - =#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim13 - =#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ - - - -@@ -4036,37 +3952,31 @@ Colocations: - =#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#= - - -- -- -- -- -- -- -- -+ -+ -+ -+ -+ -+ - - - - =#=#=#= End test: Recursively check locations and constraints for prim13 in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for prim13 in XML - =#=#=#= Begin test: Check locations and constraints for group =#=#=#= --Colocations: -+Resources colocated with group: - * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) --group: - =#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for group - =#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#= --Colocations: -+Resources colocated with group: - * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) --group: - =#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for group - =#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#= - - -- -- -- -- -+ - - - -@@ -4075,34 +3985,26 @@ group: - =#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#= - - -- -- -- -- -+ - - - - =#=#=#= End test: Recursively check locations and constraints for group in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for group in XML - =#=#=#= Begin test: Check locations and constraints for clone =#=#=#= --Colocations: -+Resources colocated with clone: - * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) --clone: - =#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#= - * Passed: crm_resource - Check locations and constraints for clone - =#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#= --Colocations: -+Resources colocated with clone: - * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) --clone: - =#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for clone - =#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#= - - -- -- -- -- -+ - - - -@@ -4111,10 +4013,7 @@ clone: - =#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#= - - -- -- -- -- -+ - - - --- -1.8.3.1 - - -From 99078013039810d828ff629af6431765979db89b Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 17 Dec 2020 17:38:22 -0500 -Subject: [PATCH 17/18] Fix: xml: Clone crm_resource schema in preparation for - changes.. - ---- - xml/api/crm_resource-2.5.rng | 255 +++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 255 insertions(+) - create mode 100644 xml/api/crm_resource-2.5.rng - -diff --git a/xml/api/crm_resource-2.5.rng b/xml/api/crm_resource-2.5.rng -new file mode 100644 -index 0000000..1bcb969 ---- /dev/null -+++ b/xml/api/crm_resource-2.5.rng -@@ -0,0 +1,255 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ promoted -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ ocf -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ true -+ false -+ -+ -+ -+ true -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ needs -+ with -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - - -From 5aadeaf78dbbb5a3c43f96891baa5acf234f7cef Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 17 Dec 2020 18:07:46 -0500 -Subject: [PATCH 18/18] Fix: xml: Update XML schema for crm_resource changes. - -The previous colocations-list and locations-list elements and everything -under them has been removed. The elements that would go into these -lists are now in a flat constraints-list element. Those elements have -further been modified to be brought in line with what crm_resource now -outputs. ---- - xml/api/crm_resource-2.5.rng | 101 ++++++++++++++++++------------------------- - 1 file changed, 42 insertions(+), 59 deletions(-) - -diff --git a/xml/api/crm_resource-2.5.rng b/xml/api/crm_resource-2.5.rng -index 1bcb969..b49e24c 100644 ---- a/xml/api/crm_resource-2.5.rng -+++ b/xml/api/crm_resource-2.5.rng -@@ -44,26 +44,16 @@ - - - -- -- -- -- -- -- -- -- - - -- -- -- -- -- -- -- -- -- -- -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -81,15 +71,12 @@ - - - -- -- -- -- -- -- -- -- -- -+ -+ -+ -+ -+ -+ - - - -@@ -198,39 +185,26 @@ - - - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- needs -- with -- -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - -- -- -- -- -- -- -- -- -- -- -- -- -+ -+ -+ -+ -+ -+ -+ - - - -@@ -252,4 +226,13 @@ - - - -+ -+ -+ -+ Stopped -+ Started -+ Master -+ Slave -+ -+ - --- -1.8.3.1 - diff --git a/SOURCES/017-feature-set.patch b/SOURCES/017-feature-set.patch deleted file mode 100644 index cd09a42..0000000 --- a/SOURCES/017-feature-set.patch +++ /dev/null @@ -1,26 +0,0 @@ -From a66f63e1001c2c93e286978c0383b4256abfce6b Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 4 Jan 2021 16:46:52 -0500 -Subject: [PATCH] Test: cts: Distribute the new constraints.xml file. - ---- - cts/Makefile.am | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/cts/Makefile.am b/cts/Makefile.am -index 6abb42f..5666a9f 100644 ---- a/cts/Makefile.am -+++ b/cts/Makefile.am -@@ -60,7 +60,8 @@ cts_SCRIPTS = CTSlab.py \ - pacemaker-cts-dummyd - - clidir = $(testdir)/cli --dist_cli_DATA = cli/crm_diff_new.xml \ -+dist_cli_DATA = cli/constraints.xml \ -+ cli/crm_diff_new.xml \ - cli/crm_diff_old.xml \ - cli/crm_mon.xml \ - cli/crm_mon-partial.xml \ --- -1.8.3.1 - diff --git a/SOURCES/017-watchdog-fixes.patch b/SOURCES/017-watchdog-fixes.patch new file mode 100644 index 0000000..d3df876 --- /dev/null +++ b/SOURCES/017-watchdog-fixes.patch @@ -0,0 +1,58 @@ +From 61eb9c240004d1dbd0b5973e2fecda3686bb4c53 Mon Sep 17 00:00:00 2001 +From: Klaus Wenninger +Date: Tue, 10 Aug 2021 09:06:55 +0200 +Subject: [PATCH 1/2] Build: rpm: package fence_watchdog in base-package + +--- + rpm/pacemaker.spec.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in +index f58357a77..0c569b9ca 100644 +--- a/rpm/pacemaker.spec.in ++++ b/rpm/pacemaker.spec.in +@@ -734,6 +734,7 @@ exit 0 + %{_sbindir}/crm_attribute + %{_sbindir}/crm_master + %{_sbindir}/fence_legacy ++%{_sbindir}/fence_watchdog + + %doc %{_mandir}/man7/pacemaker-controld.* + %doc %{_mandir}/man7/pacemaker-schedulerd.* +@@ -797,7 +798,6 @@ exit 0 + %{_sbindir}/crm_simulate + %{_sbindir}/crm_report + %{_sbindir}/crm_ticket +-%{_sbindir}/fence_watchdog + %{_sbindir}/stonith_admin + # "dirname" is owned by -schemas, which is a prerequisite + %{_datadir}/pacemaker/report.collector +-- +2.27.0 + + +From 88e75d5b98df197fa731e7642434951a24a67095 Mon Sep 17 00:00:00 2001 +From: Klaus Wenninger +Date: Tue, 10 Aug 2021 09:10:23 +0200 +Subject: [PATCH 2/2] Fix: fence_watchdog: fix version output needed for + help2man + +--- + daemons/fenced/fence_watchdog.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/daemons/fenced/fence_watchdog.in b/daemons/fenced/fence_watchdog.in +index c83304f1d..700065e0e 100755 +--- a/daemons/fenced/fence_watchdog.in ++++ b/daemons/fenced/fence_watchdog.in +@@ -12,6 +12,7 @@ import sys + import atexit + import getopt + ++AGENT_VERSION = "1.0.0" + SHORT_DESC = "Dummy watchdog fence agent" + LONG_DESC = """fence_watchdog just provides + meta-data - actual fencing is done by the pacemaker internal watchdog agent.""" +-- +2.27.0 + diff --git a/SOURCES/018-controller.patch b/SOURCES/018-controller.patch new file mode 100644 index 0000000..a2094e3 --- /dev/null +++ b/SOURCES/018-controller.patch @@ -0,0 +1,122 @@ +From ee7eba6a7a05bdf0a12d60ebabb334d8ee021101 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 9 Aug 2021 14:48:57 -0500 +Subject: [PATCH] Fix: controller: ensure lost node's transient attributes are + cleared without DC + +Previously, peer_update_callback() cleared a lost node's transient attributes +if either the local node is DC, or there is no DC. + +However, that left the possibility of the DC being lost at the same time as +another node -- the local node would still have fsa_our_dc set while processing +the leave notifications, so no node would clear the attributes for the non-DC +node. + +Now, the controller has its own CPG configuration change callback, which sets a +global boolean before calling the usual one, so that peer_update_callback() can +know when the DC has been lost. +--- + daemons/controld/controld_callbacks.c | 4 +- + daemons/controld/controld_corosync.c | 57 ++++++++++++++++++++++++++- + 2 files changed, 59 insertions(+), 2 deletions(-) + +diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c +index af24856ae..e564b3dcd 100644 +--- a/daemons/controld/controld_callbacks.c ++++ b/daemons/controld/controld_callbacks.c +@@ -99,6 +99,8 @@ node_alive(const crm_node_t *node) + + #define state_text(state) ((state)? (const char *)(state) : "in unknown state") + ++bool controld_dc_left = false; ++ + void + peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data) + { +@@ -217,7 +219,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d + cib_scope_local); + } + +- } else if (AM_I_DC || (fsa_our_dc == NULL)) { ++ } else if (AM_I_DC || controld_dc_left || (fsa_our_dc == NULL)) { + /* This only needs to be done once, so normally the DC should do + * it. However if there is no DC, every node must do it, since + * there is no other way to ensure some one node does it. +diff --git a/daemons/controld/controld_corosync.c b/daemons/controld/controld_corosync.c +index db99630fb..c5ab6580a 100644 +--- a/daemons/controld/controld_corosync.c ++++ b/daemons/controld/controld_corosync.c +@@ -87,6 +87,61 @@ crmd_cs_destroy(gpointer user_data) + } + } + ++extern bool controld_dc_left; ++ ++/*! ++ * \brief Handle a Corosync notification of a CPG configuration change ++ * ++ * \param[in] handle CPG connection ++ * \param[in] cpg_name CPG group name ++ * \param[in] member_list List of current CPG members ++ * \param[in] member_list_entries Number of entries in \p member_list ++ * \param[in] left_list List of CPG members that left ++ * \param[in] left_list_entries Number of entries in \p left_list ++ * \param[in] joined_list List of CPG members that joined ++ * \param[in] joined_list_entries Number of entries in \p joined_list ++ */ ++static void ++cpg_membership_callback(cpg_handle_t handle, const struct cpg_name *cpg_name, ++ const struct cpg_address *member_list, ++ size_t member_list_entries, ++ const struct cpg_address *left_list, ++ size_t left_list_entries, ++ const struct cpg_address *joined_list, ++ size_t joined_list_entries) ++{ ++ /* When nodes leave CPG, the DC clears their transient node attributes. ++ * ++ * However if there is no DC, or the DC is among the nodes that left, each ++ * remaining node needs to do the clearing, to ensure it gets done. ++ * Otherwise, the attributes would persist when the nodes rejoin, which ++ * could have serious consequences for unfencing, agents that use attributes ++ * for internal logic, etc. ++ * ++ * Here, we set a global boolean if the DC is among the nodes that left, for ++ * use by the peer callback. ++ */ ++ if (fsa_our_dc != NULL) { ++ crm_node_t *peer = pcmk__search_cluster_node_cache(0, fsa_our_dc); ++ ++ if (peer != NULL) { ++ for (int i = 0; i < left_list_entries; ++i) { ++ if (left_list[i].nodeid == peer->id) { ++ controld_dc_left = true; ++ break; ++ } ++ } ++ } ++ } ++ ++ // Process the change normally, which will call the peer callback as needed ++ pcmk_cpg_membership(handle, cpg_name, member_list, member_list_entries, ++ left_list, left_list_entries, ++ joined_list, joined_list_entries); ++ ++ controld_dc_left = false; ++} ++ + extern gboolean crm_connect_corosync(crm_cluster_t * cluster); + + gboolean +@@ -95,7 +150,7 @@ crm_connect_corosync(crm_cluster_t * cluster) + if (is_corosync_cluster()) { + crm_set_status_callback(&peer_update_callback); + cluster->cpg.cpg_deliver_fn = crmd_cs_dispatch; +- cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; ++ cluster->cpg.cpg_confchg_fn = cpg_membership_callback; + cluster->destroy = crmd_cs_destroy; + + if (crm_cluster_connect(cluster)) { +-- +2.27.0 + diff --git a/SOURCES/018-rhbz1907726.patch b/SOURCES/018-rhbz1907726.patch deleted file mode 100644 index f41eea4..0000000 --- a/SOURCES/018-rhbz1907726.patch +++ /dev/null @@ -1,47 +0,0 @@ -From c3e2edb78e6d0b6ffc8acbe8fc7caef058b35d76 Mon Sep 17 00:00:00 2001 -From: Reid Wahl -Date: Tue, 22 Dec 2020 22:28:46 -0800 -Subject: [PATCH] Fix: liblrmd: Limit node name addition to proxied attrd - update commands - -remote_proxy_cb() currently adds the remote node's name as -PCMK__XA_ATTR_NODE_NAME if that attribute is not explicitly set. This is -necessary for attrd update commands. For those, lack of an explicit node -name means to use the local node. Since requests are proxied to full -nodes, the node hosting the remote resource would be incorrectly treated -as the "local node", causing the attribute to be updated for the wrong -node. - -However, for other commands, this is not the case. Lack of an explicit -node name can mean "all nodes" (as for CLEAR_FAILURE and QUERY), or a -node name may be ignored (as for REFRESH). In these cases (the -non-update commands), we don't want to add a node name automatically if -it's not explicitly set. - -Resolves: RHBZ#1907726 - -Signed-off-by: Reid Wahl ---- - lib/lrmd/proxy_common.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/lib/lrmd/proxy_common.c b/lib/lrmd/proxy_common.c -index b8d889e..0f1e76a 100644 ---- a/lib/lrmd/proxy_common.c -+++ b/lib/lrmd/proxy_common.c -@@ -259,7 +259,11 @@ remote_proxy_cb(lrmd_t *lrmd, const char *node_name, xmlNode *msg) - - if (pcmk__str_eq(type, T_ATTRD, pcmk__str_casei) - && crm_element_value(request, -- PCMK__XA_ATTR_NODE_NAME) == NULL) { -+ PCMK__XA_ATTR_NODE_NAME) == NULL -+ && pcmk__str_any_of(crm_element_value(request, PCMK__XA_TASK), -+ PCMK__ATTRD_CMD_UPDATE, -+ PCMK__ATTRD_CMD_UPDATE_BOTH, -+ PCMK__ATTRD_CMD_UPDATE_DELAY, NULL)) { - crm_xml_add(request, PCMK__XA_ATTR_NODE_NAME, proxy->node_name); - } - --- -1.8.3.1 - diff --git a/SOURCES/019-crm_resource.patch b/SOURCES/019-crm_resource.patch new file mode 100644 index 0000000..237dde2 --- /dev/null +++ b/SOURCES/019-crm_resource.patch @@ -0,0 +1,114 @@ +From b4e426a016a4d7c9ade39e60a83644fc537bce26 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 11 Aug 2021 12:10:32 +0200 +Subject: [PATCH 1/2] Fix: crm_resource: translate LSB rc to exit code and fix + resources_find_service_class() call + +--- + tools/crm_resource_runtime.c | 16 ++++++++++++---- + 1 file changed, 12 insertions(+), 4 deletions(-) + +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index ce037c514..e9d8aa687 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -1718,10 +1718,10 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); + } else if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, + pcmk__str_casei) && !pcmk__str_eq( +- resources_find_service_class(rsc_name), PCMK_RESOURCE_CLASS_LSB, ++ resources_find_service_class(rsc_type), PCMK_RESOURCE_CLASS_LSB, + pcmk__str_casei)) { + out->err(out, "Sorry, the %s option doesn't support %s resources", +- rsc_action, resources_find_service_class(rsc_name)); ++ rsc_action, resources_find_service_class(rsc_type)); + crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); + } + +@@ -1798,9 +1798,17 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + if (services_action_sync(op)) { + exit_code = op->rc; + ++ /* Lookup exit code based on rc for LSB resources */ ++ if (( pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei) || ++ (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei) && ++ pcmk__str_eq(resources_find_service_class(rsc_type), PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)) ) && ++ pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { ++ exit_code = services_get_ocf_exitcode(action, exit_code); ++ } ++ + out->message(out, "resource-agent-action", resource_verbose, rsc_class, +- rsc_prov, rsc_type, rsc_name, rsc_action, override_hash, op->rc, +- op->status, op->stdout_data, op->stderr_data); ++ rsc_prov, rsc_type, rsc_name, rsc_action, override_hash, ++ exit_code, op->status, op->stdout_data, op->stderr_data); + } else { + exit_code = op->rc == 0 ? CRM_EX_ERROR : op->rc; + } +-- +2.27.0 + + +From 9a6beb74adfb4710fb3a4e588bef79a562c101f3 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 12 Aug 2021 18:54:30 +0200 +Subject: [PATCH 2/2] Refactor: crm_resource: simplify rsc_class logic by + getting actual class early if it's of class "service" + +--- + tools/crm_resource_runtime.c | 23 +++++++++-------------- + 1 file changed, 9 insertions(+), 14 deletions(-) + +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index e9d8aa687..13b78b6b9 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -1702,26 +1702,23 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + int timeout_ms, int resource_verbose, gboolean force, + int check_level) + { ++ const char *class = NULL; + const char *action = NULL; + GHashTable *params_copy = NULL; + crm_exit_t exit_code = CRM_EX_OK; + svc_action_t *op = NULL; + +- if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { ++ class = !pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei) ? ++ rsc_class : resources_find_service_class(rsc_type); ++ ++ if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { + out->err(out, "Sorry, the %s option doesn't support %s resources yet", +- rsc_action, rsc_class); ++ rsc_action, class); + crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); +- } else if (pcmk__strcase_any_of(rsc_class, PCMK_RESOURCE_CLASS_SYSTEMD, ++ } else if (pcmk__strcase_any_of(class, PCMK_RESOURCE_CLASS_SYSTEMD, + PCMK_RESOURCE_CLASS_UPSTART, PCMK_RESOURCE_CLASS_NAGIOS, NULL)) { + out->err(out, "Sorry, the %s option doesn't support %s resources", +- rsc_action, rsc_class); +- crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); +- } else if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, +- pcmk__str_casei) && !pcmk__str_eq( +- resources_find_service_class(rsc_type), PCMK_RESOURCE_CLASS_LSB, +- pcmk__str_casei)) { +- out->err(out, "Sorry, the %s option doesn't support %s resources", +- rsc_action, resources_find_service_class(rsc_type)); ++ rsc_action, class); + crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); + } + +@@ -1799,9 +1796,7 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, + exit_code = op->rc; + + /* Lookup exit code based on rc for LSB resources */ +- if (( pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei) || +- (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei) && +- pcmk__str_eq(resources_find_service_class(rsc_type), PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)) ) && ++ if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei) && + pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { + exit_code = services_get_ocf_exitcode(action, exit_code); + } +-- +2.27.0 + diff --git a/SOURCES/019-rhbz1371576.patch b/SOURCES/019-rhbz1371576.patch deleted file mode 100644 index e1f02e4..0000000 --- a/SOURCES/019-rhbz1371576.patch +++ /dev/null @@ -1,1174 +0,0 @@ -From 71c53fb73ff8d33253ff99b84e666913050e16cc Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Dec 2020 16:13:30 -0600 -Subject: [PATCH 1/4] Refactor: scheduler: rename colocation constructor - -... per current guidelines. Also make void since no caller used the return -value. ---- - include/pcmki/pcmki_sched_utils.h | 10 ++++---- - lib/pacemaker/pcmk_sched_bundle.c | 12 +++++----- - lib/pacemaker/pcmk_sched_constraints.c | 43 +++++++++++++++++++--------------- - lib/pacemaker/pcmk_sched_group.c | 4 ++-- - lib/pacemaker/pcmk_sched_native.c | 4 ++-- - 5 files changed, 39 insertions(+), 34 deletions(-) - -diff --git a/include/pcmki/pcmki_sched_utils.h b/include/pcmki/pcmki_sched_utils.h -index f2318c5..fdb3843 100644 ---- a/include/pcmki/pcmki_sched_utils.h -+++ b/include/pcmki/pcmki_sched_utils.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -28,10 +28,10 @@ pe__location_t *rsc2node_new(const char *id, pe_resource_t *rsc, int weight, - const char *discovery_mode, pe_node_t *node, - pe_working_set_t *data_set); - --extern gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, -- pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, -- const char *state_lh, const char *state_rh, -- pe_working_set_t * data_set); -+void pcmk__new_colocation(const char *id, const char *node_attr, int score, -+ pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -+ const char *state_lh, const char *state_rh, -+ pe_working_set_t *data_set); - - extern gboolean rsc_ticket_new(const char *id, pe_resource_t * rsc_lh, pe_ticket_t * ticket, - const char *state_lh, const char *loss_policy, -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index e9b8a74..02bff7c 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -140,10 +140,10 @@ pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *prefer, - * host because pacemaker-remoted only supports a single - * active connection - */ -- rsc_colocation_new("child-remote-with-docker-remote", NULL, -- INFINITY, replica->remote, -- container_host->details->remote_rsc, NULL, NULL, -- data_set); -+ pcmk__new_colocation("child-remote-with-docker-remote", NULL, -+ INFINITY, replica->remote, -+ container_host->details->remote_rsc, NULL, -+ NULL, data_set); - } - - if (replica->remote) { -@@ -310,8 +310,8 @@ pcmk__bundle_internal_constraints(pe_resource_t *rsc, - new_rsc_order(replica->container, RSC_STOP, replica->ip, RSC_STOP, - pe_order_implies_first|pe_order_preserve, data_set); - -- rsc_colocation_new("ip-with-docker", NULL, INFINITY, replica->ip, -- replica->container, NULL, NULL, data_set); -+ pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip, -+ replica->container, NULL, NULL, data_set); - } - - if (replica->remote) { -diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c -index 121754d..cce3f12 100644 ---- a/lib/pacemaker/pcmk_sched_constraints.c -+++ b/lib/pacemaker/pcmk_sched_constraints.c -@@ -1339,22 +1339,23 @@ anti_colocation_order(pe_resource_t * first_rsc, int first_role, - } - } - --gboolean --rsc_colocation_new(const char *id, const char *node_attr, int score, -- pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, -- const char *state_lh, const char *state_rh, pe_working_set_t * data_set) -+void -+pcmk__new_colocation(const char *id, const char *node_attr, int score, -+ pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -+ const char *state_lh, const char *state_rh, -+ pe_working_set_t *data_set) - { - rsc_colocation_t *new_con = NULL; - - if ((rsc_lh == NULL) || (rsc_rh == NULL)) { - pcmk__config_err("Ignoring colocation '%s' because resource " - "does not exist", id); -- return FALSE; -+ return; - } - - new_con = calloc(1, sizeof(rsc_colocation_t)); - if (new_con == NULL) { -- return FALSE; -+ return; - } - - if (pcmk__str_eq(state_lh, RSC_ROLE_STARTED_S, pcmk__str_null_matches | pcmk__str_casei)) { -@@ -1390,8 +1391,6 @@ rsc_colocation_new(const char *id, const char *node_attr, int score, - anti_colocation_order(rsc_lh, new_con->role_lh, rsc_rh, new_con->role_rh, data_set); - anti_colocation_order(rsc_rh, new_con->role_rh, rsc_lh, new_con->role_lh, data_set); - } -- -- return TRUE; - } - - /* LHS before RHS */ -@@ -2311,8 +2310,8 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) - EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); - if (with != NULL) { - pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id); -- rsc_colocation_new(set_id, NULL, local_score, resource, with, role, role, -- data_set); -+ pcmk__new_colocation(set_id, NULL, local_score, resource, -+ with, role, role, data_set); - } - - with = resource; -@@ -2327,8 +2326,8 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) - EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); - if (last != NULL) { - pe_rsc_trace(resource, "Colocating %s with %s", last->id, resource->id); -- rsc_colocation_new(set_id, NULL, local_score, last, resource, role, role, -- data_set); -+ pcmk__new_colocation(set_id, NULL, local_score, last, -+ resource, role, role, data_set); - } - - last = resource; -@@ -2360,8 +2359,9 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) - EXPAND_CONSTRAINT_IDREF(set_id, with, ID(xml_rsc_with)); - pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id, - with->id); -- rsc_colocation_new(set_id, NULL, local_score, resource, with, role, role, -- data_set); -+ pcmk__new_colocation(set_id, NULL, local_score, -+ resource, with, role, role, -+ data_set); - } - } - } -@@ -2412,7 +2412,8 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - } - - if (rsc_1 != NULL && rsc_2 != NULL) { -- rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); -+ pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2, -+ data_set); - - } else if (rsc_1 != NULL) { - for (xml_rsc = pcmk__xe_first_child(set2); xml_rsc != NULL; -@@ -2420,7 +2421,8 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - - if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) { - EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); -- rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); -+ pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, -+ role_2, data_set); - } - } - -@@ -2430,7 +2432,8 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - - if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) { - EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); -- rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); -+ pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, -+ role_2, data_set); - } - } - -@@ -2449,7 +2452,8 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - - if (pcmk__str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) { - EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); -- rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); -+ pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, -+ role_1, role_2, data_set); - } - } - } -@@ -2534,7 +2538,8 @@ unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - score_i = char2score(score); - } - -- rsc_colocation_new(id, attr, score_i, rsc_lh, rsc_rh, state_lh, state_rh, data_set); -+ pcmk__new_colocation(id, attr, score_i, rsc_lh, rsc_rh, state_lh, state_rh, -+ data_set); - return TRUE; - } - -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 35f25af..79847e8 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -192,8 +192,8 @@ group_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) - } - - } else if (group_data->colocated) { -- rsc_colocation_new("group:internal_colocation", NULL, INFINITY, -- child_rsc, last_rsc, NULL, NULL, data_set); -+ pcmk__new_colocation("group:internal_colocation", NULL, INFINITY, -+ child_rsc, last_rsc, NULL, NULL, data_set); - } - - if (pcmk_is_set(top->flags, pe_rsc_promotable)) { -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index 9196f59..b18f52e 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -1697,8 +1697,8 @@ native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) - } else { - score = INFINITY; /* Force them to run on the same host */ - } -- rsc_colocation_new("resource-with-container", NULL, score, rsc, -- rsc->container, NULL, NULL, data_set); -+ pcmk__new_colocation("resource-with-container", NULL, score, rsc, -+ rsc->container, NULL, NULL, data_set); - } - } - --- -1.8.3.1 - - -From 9fced08910267513a3208772cca712e6ebec54bc Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Dec 2020 16:24:25 -0600 -Subject: [PATCH 2/4] Refactor: scheduler,tools: rename colocation type - -... per current guidelines ---- - include/crm/pengine/pe_types.h | 4 ++-- - include/pcmki/pcmki_sched_allocate.h | 22 +++++++++++----------- - include/pcmki/pcmki_sched_utils.h | 4 ++-- - include/pcmki/pcmki_scheduler.h | 5 ++--- - lib/pacemaker/pcmk_output.c | 12 ++++++------ - lib/pacemaker/pcmk_sched_bundle.c | 4 ++-- - lib/pacemaker/pcmk_sched_clone.c | 20 ++++++++++---------- - lib/pacemaker/pcmk_sched_constraints.c | 12 ++++++------ - lib/pacemaker/pcmk_sched_graph.c | 3 ++- - lib/pacemaker/pcmk_sched_group.c | 6 +++--- - lib/pacemaker/pcmk_sched_native.c | 18 ++++++++++-------- - lib/pacemaker/pcmk_sched_promotable.c | 8 ++++---- - lib/pacemaker/pcmk_sched_utilization.c | 4 ++-- - tools/crm_resource_runtime.c | 2 +- - 14 files changed, 63 insertions(+), 61 deletions(-) - -diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h -index 9fea637..59d5ce8 100644 ---- a/include/crm/pengine/pe_types.h -+++ b/include/crm/pengine/pe_types.h -@@ -340,8 +340,8 @@ struct pe_resource_s { - - //!@{ - //! This field should be treated as internal to Pacemaker -- GListPtr rsc_cons_lhs; // List of rsc_colocation_t* -- GListPtr rsc_cons; // List of rsc_colocation_t* -+ GListPtr rsc_cons_lhs; // List of pcmk__colocation_t* -+ GListPtr rsc_cons; // List of pcmk__colocation_t* - GListPtr rsc_location; // List of pe__location_t* - GListPtr actions; // List of pe_action_t* - GListPtr rsc_tickets; // List of rsc_ticket* -diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h -index a7f8c11..0859eaa 100644 ---- a/include/pcmki/pcmki_sched_allocate.h -+++ b/include/pcmki/pcmki_sched_allocate.h -@@ -26,9 +26,9 @@ struct resource_alloc_functions_s { - void (*internal_constraints) (pe_resource_t *, pe_working_set_t *); - - void (*rsc_colocation_lh) (pe_resource_t *, pe_resource_t *, -- rsc_colocation_t *, pe_working_set_t *); -+ pcmk__colocation_t *, pe_working_set_t *); - void (*rsc_colocation_rh) (pe_resource_t *, pe_resource_t *, -- rsc_colocation_t *, pe_working_set_t *); -+ pcmk__colocation_t *, pe_working_set_t *); - - void (*rsc_location) (pe_resource_t *, pe__location_t *); - -@@ -56,10 +56,10 @@ pe_node_t *pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *preferred, - extern void native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set); - extern void native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set); - void native_rsc_colocation_lh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - void native_rsc_colocation_rh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - extern void rsc_ticket_constraint(pe_resource_t * lh_rsc, rsc_ticket_t * rsc_ticket, - pe_working_set_t * data_set); -@@ -76,10 +76,10 @@ pe_node_t *pcmk__group_allocate(pe_resource_t *rsc, pe_node_t *preferred, - extern void group_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set); - extern void group_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set); - void group_rsc_colocation_lh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - void group_rsc_colocation_rh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - extern enum pe_action_flags group_action_flags(pe_action_t * action, pe_node_t * node); - void group_rsc_location(pe_resource_t *rsc, pe__location_t *constraint); -@@ -97,11 +97,11 @@ void pcmk__bundle_internal_constraints(pe_resource_t *rsc, - pe_working_set_t *data_set); - void pcmk__bundle_rsc_colocation_lh(pe_resource_t *lh_rsc, - pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - void pcmk__bundle_rsc_colocation_rh(pe_resource_t *lh_rsc, - pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - void pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint); - enum pe_action_flags pcmk__bundle_action_flags(pe_action_t *action, -@@ -114,10 +114,10 @@ pe_node_t *pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *preferred, - extern void clone_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set); - extern void clone_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set); - void clone_rsc_colocation_lh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - void clone_rsc_colocation_rh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint); - extern enum pe_action_flags clone_action_flags(pe_action_t * action, pe_node_t * node); -@@ -133,7 +133,7 @@ void create_promotable_actions(pe_resource_t *rsc, pe_working_set_t *data_set); - void promote_demote_constraints(pe_resource_t *rsc, pe_working_set_t *data_set); - void promotable_constraints(pe_resource_t *rsc, pe_working_set_t *data_set); - void promotable_colocation_rh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set); - - /* extern resource_object_functions_t resource_variants[]; */ -diff --git a/include/pcmki/pcmki_sched_utils.h b/include/pcmki/pcmki_sched_utils.h -index fdb3843..b8d7750 100644 ---- a/include/pcmki/pcmki_sched_utils.h -+++ b/include/pcmki/pcmki_sched_utils.h -@@ -20,7 +20,7 @@ - #include - - /* Constraint helper functions */ --extern rsc_colocation_t *invert_constraint(rsc_colocation_t * constraint); -+pcmk__colocation_t *invert_constraint(pcmk__colocation_t *constraint); - - pe__location_t *copy_constraint(pe__location_t *constraint); - -@@ -71,7 +71,7 @@ enum filter_colocation_res { - - extern enum filter_colocation_res - filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, -- rsc_colocation_t * constraint, gboolean preview); -+ pcmk__colocation_t *constraint, gboolean preview); - - extern int compare_capacity(const pe_node_t * node1, const pe_node_t * node2); - extern void calculate_utilization(GHashTable * current_utilization, -diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h -index 342d57a..b24e994 100644 ---- a/include/pcmki/pcmki_scheduler.h -+++ b/include/pcmki/pcmki_scheduler.h -@@ -10,7 +10,6 @@ - #ifndef PENGINE__H - # define PENGINE__H - --typedef struct rsc_colocation_s rsc_colocation_t; - typedef struct rsc_ticket_s rsc_ticket_t; - typedef struct lrm_agent_s lrm_agent_t; - -@@ -37,7 +36,7 @@ enum pe_weights { - pe_weights_rollback = 0x10, - }; - --struct rsc_colocation_s { -+typedef struct { - const char *id; - const char *node_attribute; - pe_resource_t *rsc_lh; -@@ -47,7 +46,7 @@ struct rsc_colocation_s { - int role_rh; - - int score; --}; -+} pcmk__colocation_t; - - enum loss_ticket_policy_e { - loss_ticket_stop, -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 0d20a54..500afd1 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -47,7 +47,7 @@ pcmk__out_epilogue(pcmk__output_t *out, xmlNodePtr *xml, int retval) { - } - - static char * --colocations_header(pe_resource_t *rsc, rsc_colocation_t *cons, -+colocations_header(pe_resource_t *rsc, pcmk__colocation_t *cons, - gboolean dependents) { - char *score = NULL; - char *retval = NULL; -@@ -68,7 +68,7 @@ colocations_header(pe_resource_t *rsc, rsc_colocation_t *cons, - - static void - colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc, -- rsc_colocation_t *cons) { -+ pcmk__colocation_t *cons) { - char *score = NULL; - xmlNodePtr node = NULL; - -@@ -146,7 +146,7 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { - - pe__set_resource_flags(rsc, pe_rsc_allocating); - for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; - char *hdr = NULL; - - PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources %s is colocated with", rsc->id); -@@ -189,7 +189,7 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - - pe__set_resource_flags(rsc, pe_rsc_allocating); - for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; - - if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) { - colocations_xml_node(out, cons->rsc_rh, cons); -@@ -221,7 +221,7 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) { - - pe__set_resource_flags(rsc, pe_rsc_allocating); - for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; - char *hdr = NULL; - - PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources colocated with %s", rsc->id); -@@ -264,7 +264,7 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { - - pe__set_resource_flags(rsc, pe_rsc_allocating); - for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; - - if (pcmk_is_set(cons->rsc_lh->flags, pe_rsc_allocating)) { - colocations_xml_node(out, cons->rsc_lh, cons); -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index 02bff7c..ac9219c 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -425,7 +425,7 @@ compatible_replica(pe_resource_t *rsc_lh, pe_resource_t *rsc, - - void - pcmk__bundle_rsc_colocation_lh(pe_resource_t *rsc, pe_resource_t *rsc_rh, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - /* -- Never called -- -@@ -469,7 +469,7 @@ int copies_per_node(pe_resource_t * rsc) - - void - pcmk__bundle_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - GListPtr allocated_rhs = NULL; -diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c -index e419a24..9485a98 100644 ---- a/lib/pacemaker/pcmk_sched_clone.c -+++ b/lib/pacemaker/pcmk_sched_clone.c -@@ -237,7 +237,7 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) - - if(resource1->parent) { - for (gIter = resource1->parent->rsc_cons; gIter; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -@@ -252,7 +252,7 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) - } - - for (gIter = resource1->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -@@ -269,7 +269,7 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) - - if(resource2->parent) { - for (gIter = resource2->parent->rsc_cons; gIter; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - crm_trace("Applying %s to %s", constraint->id, resource2->id); - -@@ -281,7 +281,7 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) - } - - for (gIter = resource2->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - crm_trace("Applying %s to %s", constraint->id, resource2->id); - -@@ -499,7 +499,7 @@ append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean al - - gIter = rsc->rsc_cons; - for (; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; - - if (cons->score == 0) { - continue; -@@ -511,7 +511,7 @@ append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean al - - gIter = rsc->rsc_cons_lhs; - for (; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; - - if (cons->score == 0) { - continue; -@@ -645,7 +645,7 @@ pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer, - * order to allocate clone instances - */ - for (GListPtr gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -@@ -656,7 +656,7 @@ pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer, - } - - for (GListPtr gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -@@ -1055,7 +1055,7 @@ find_compatible_child(pe_resource_t *local_child, pe_resource_t *rsc, - - void - clone_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - /* -- Never called -- -@@ -1067,7 +1067,7 @@ clone_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - - void - clone_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - GListPtr gIter = NULL; -diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c -index cce3f12..1b44837 100644 ---- a/lib/pacemaker/pcmk_sched_constraints.c -+++ b/lib/pacemaker/pcmk_sched_constraints.c -@@ -1204,8 +1204,8 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml, - static gint - sort_cons_priority_lh(gconstpointer a, gconstpointer b) - { -- const rsc_colocation_t *rsc_constraint1 = (const rsc_colocation_t *)a; -- const rsc_colocation_t *rsc_constraint2 = (const rsc_colocation_t *)b; -+ const pcmk__colocation_t *rsc_constraint1 = (const pcmk__colocation_t *) a; -+ const pcmk__colocation_t *rsc_constraint2 = (const pcmk__colocation_t *) b; - - if (a == NULL) { - return 1; -@@ -1252,8 +1252,8 @@ sort_cons_priority_lh(gconstpointer a, gconstpointer b) - static gint - sort_cons_priority_rh(gconstpointer a, gconstpointer b) - { -- const rsc_colocation_t *rsc_constraint1 = (const rsc_colocation_t *)a; -- const rsc_colocation_t *rsc_constraint2 = (const rsc_colocation_t *)b; -+ const pcmk__colocation_t *rsc_constraint1 = (const pcmk__colocation_t *) a; -+ const pcmk__colocation_t *rsc_constraint2 = (const pcmk__colocation_t *) b; - - if (a == NULL) { - return 1; -@@ -1345,7 +1345,7 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - const char *state_lh, const char *state_rh, - pe_working_set_t *data_set) - { -- rsc_colocation_t *new_con = NULL; -+ pcmk__colocation_t *new_con = NULL; - - if ((rsc_lh == NULL) || (rsc_rh == NULL)) { - pcmk__config_err("Ignoring colocation '%s' because resource " -@@ -1353,7 +1353,7 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - return; - } - -- new_con = calloc(1, sizeof(rsc_colocation_t)); -+ new_con = calloc(1, sizeof(pcmk__colocation_t)); - if (new_con == NULL) { - return; - } -diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c -index 51f7063..c012d23 100644 ---- a/lib/pacemaker/pcmk_sched_graph.c -+++ b/lib/pacemaker/pcmk_sched_graph.c -@@ -510,7 +510,8 @@ update_colo_start_chain(pe_action_t *action, pe_working_set_t *data_set) - } - - for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *colocate_with = (rsc_colocation_t *)gIter->data; -+ pcmk__colocation_t *colocate_with = (pcmk__colocation_t *) gIter->data; -+ - if (colocate_with->score == INFINITY) { - mark_start_blocked(colocate_with->rsc_lh, action->rsc, data_set); - } -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 79847e8..f1ce0be 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -282,7 +282,7 @@ group_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) - - void - group_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - GListPtr gIter = NULL; -@@ -328,7 +328,7 @@ group_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - - void - group_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - GListPtr gIter = rsc_rh->children; -@@ -514,7 +514,7 @@ pcmk__group_merge_weights(pe_resource_t *rsc, const char *rhs, - factor, flags); - - for (; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index b18f52e..87d8bfb 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -434,7 +434,7 @@ pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, - - for (; gIter != NULL; gIter = gIter->next) { - pe_resource_t *other = NULL; -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -@@ -526,7 +526,7 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes); - - for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - GHashTable *archive = NULL; - pe_resource_t *rsc_rh = constraint->rsc_rh; -@@ -560,7 +560,7 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes); - - for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -@@ -1712,7 +1712,7 @@ native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) - - void - native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - if (rsc_lh == NULL) { -@@ -1735,7 +1735,7 @@ native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - - enum filter_colocation_res - filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, -- rsc_colocation_t * constraint, gboolean preview) -+ pcmk__colocation_t *constraint, gboolean preview) - { - if (constraint->score == 0) { - return influence_nothing; -@@ -1819,7 +1819,8 @@ filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, - } - - static void --influence_priority(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint) -+influence_priority(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -+ pcmk__colocation_t *constraint) - { - const char *rh_value = NULL; - const char *lh_value = NULL; -@@ -1860,7 +1861,8 @@ influence_priority(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocatio - } - - static void --colocation_match(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint) -+colocation_match(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -+ pcmk__colocation_t *constraint) - { - const char *attribute = CRM_ATTR_ID; - const char *value = NULL; -@@ -1928,7 +1930,7 @@ colocation_match(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_ - - void - native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - enum filter_colocation_res filter_results; -diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c -index b976344..9a5474a 100644 ---- a/lib/pacemaker/pcmk_sched_promotable.c -+++ b/lib/pacemaker/pcmk_sched_promotable.c -@@ -319,7 +319,7 @@ promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set) - - gIter = rsc->rsc_cons; - for (; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -@@ -343,7 +343,7 @@ promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set) - - gIter = rsc->rsc_cons_lhs; - for (; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - - if (constraint->score == 0) { - continue; -@@ -738,7 +738,7 @@ pcmk__set_instance_roles(pe_resource_t *rsc, pe_working_set_t *data_set) - apply_master_location(child_rsc, rsc->rsc_location, chosen); - - for (gIter2 = child_rsc->rsc_cons; gIter2 != NULL; gIter2 = gIter2->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) gIter2->data; -+ pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter2->data; - - if (cons->score == 0) { - continue; -@@ -981,7 +981,7 @@ node_hash_update_one(GHashTable * hash, pe_node_t * other, const char *attr, int - - void - promotable_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, -- rsc_colocation_t *constraint, -+ pcmk__colocation_t *constraint, - pe_working_set_t *data_set) - { - GListPtr gIter = NULL; -diff --git a/lib/pacemaker/pcmk_sched_utilization.c b/lib/pacemaker/pcmk_sched_utilization.c -index b46b2fa..0df3a17 100644 ---- a/lib/pacemaker/pcmk_sched_utilization.c -+++ b/lib/pacemaker/pcmk_sched_utilization.c -@@ -276,7 +276,7 @@ find_colocated_rscs(GListPtr colocated_rscs, pe_resource_t * rsc, pe_resource_t - colocated_rscs = g_list_append(colocated_rscs, rsc); - - for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - pe_resource_t *rsc_rh = constraint->rsc_rh; - - /* Break colocation loop */ -@@ -298,7 +298,7 @@ find_colocated_rscs(GListPtr colocated_rscs, pe_resource_t * rsc, pe_resource_t - } - - for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { -- rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; -+ pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - pe_resource_t *rsc_lh = constraint->rsc_lh; - - /* Break colocation loop */ -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index f4500db..e0804fc 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -392,7 +392,7 @@ cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, - crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs); - pe__set_resource_flags(rsc, pe_rsc_allocating); - for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { -- rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; -+ pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; - pe_resource_t *peer = cons->rsc_lh; - - crm_debug("Checking %s %d", cons->id, cons->score); --- -1.8.3.1 - - -From 83401876df22539c35f09b21135eefc7432f3839 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Dec 2020 17:02:36 -0600 -Subject: [PATCH 3/4] Refactor: scheduler: make colocation constraint unpacker - static - -... for linker efficiency. Also make it (and a function it calls) return void -since the only caller ignored the value. ---- - include/pcmki/pcmki_sched_allocate.h | 2 -- - lib/pacemaker/pcmk_sched_constraints.c | 45 +++++++++++++++------------------- - 2 files changed, 20 insertions(+), 27 deletions(-) - -diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h -index 0859eaa..d226c8c 100644 ---- a/include/pcmki/pcmki_sched_allocate.h -+++ b/include/pcmki/pcmki_sched_allocate.h -@@ -141,8 +141,6 @@ extern resource_alloc_functions_t resource_class_alloc_functions[]; - - extern gboolean unpack_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set); - --extern gboolean unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set); -- - extern gboolean unpack_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set); - - void LogNodeActions(pe_working_set_t * data_set, gboolean terminal); -diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c -index 1b44837..0029ad7 100644 ---- a/lib/pacemaker/pcmk_sched_constraints.c -+++ b/lib/pacemaker/pcmk_sched_constraints.c -@@ -49,6 +49,7 @@ static pe__location_t *generate_location_rule(pe_resource_t *rsc, - pe_working_set_t *data_set, - pe_match_data_t *match_data); - static void unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set); -+static void unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set); - - static bool - evaluate_lifetime(xmlNode *lifetime, pe_working_set_t *data_set) -@@ -2463,7 +2464,7 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - return TRUE; - } - --static gboolean -+static void - unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - { - int score_i = 0; -@@ -2488,24 +2489,24 @@ unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - if (rsc_lh == NULL) { - pcmk__config_err("Ignoring constraint '%s' because resource '%s' " - "does not exist", id, id_lh); -- return FALSE; -+ return; - - } else if (rsc_rh == NULL) { - pcmk__config_err("Ignoring constraint '%s' because resource '%s' " - "does not exist", id, id_rh); -- return FALSE; -+ return; - - } else if (instance_lh && pe_rsc_is_clone(rsc_lh) == FALSE) { - pcmk__config_err("Ignoring constraint '%s' because resource '%s' " - "is not a clone but instance '%s' was requested", - id, id_lh, instance_lh); -- return FALSE; -+ return; - - } else if (instance_rh && pe_rsc_is_clone(rsc_rh) == FALSE) { - pcmk__config_err("Ignoring constraint '%s' because resource '%s' " - "is not a clone but instance '%s' was requested", - id, id_rh, instance_rh); -- return FALSE; -+ return; - } - - if (instance_lh) { -@@ -2514,7 +2515,7 @@ unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - pcmk__config_warn("Ignoring constraint '%s' because resource '%s' " - "does not have an instance '%s'", - id, id_lh, instance_lh); -- return FALSE; -+ return; - } - } - -@@ -2524,7 +2525,7 @@ unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - pcmk__config_warn("Ignoring constraint '%s' because resource '%s' " - "does not have an instance '%s'", - "'%s'", id, id_rh, instance_rh); -- return FALSE; -+ return; - } - } - -@@ -2540,7 +2541,6 @@ unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - - pcmk__new_colocation(id, attr, score_i, rsc_lh, rsc_rh, state_lh, state_rh, - data_set); -- return TRUE; - } - - static gboolean -@@ -2660,8 +2660,8 @@ unpack_colocation_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_se - return TRUE; - } - --gboolean --unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) -+static void -+unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set) - { - int score_i = 0; - xmlNode *set = NULL; -@@ -2674,19 +2674,16 @@ unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - const char *id = crm_element_value(xml_obj, XML_ATTR_ID); - const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); - -- gboolean rc = TRUE; -- - if (score) { - score_i = char2score(score); - } - -- rc = unpack_colocation_tags(xml_obj, &expanded_xml, data_set); -+ if (!unpack_colocation_tags(xml_obj, &expanded_xml, data_set)) { -+ return; -+ } - if (expanded_xml) { - orig_xml = xml_obj; - xml_obj = expanded_xml; -- -- } else if (rc == FALSE) { -- return FALSE; - } - - for (set = pcmk__xe_first_child(xml_obj); set != NULL; -@@ -2695,11 +2692,11 @@ unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - if (pcmk__str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, pcmk__str_none)) { - any_sets = TRUE; - set = expand_idref(set, data_set->input); -- if (unpack_colocation_set(set, score_i, data_set) == FALSE) { -- return FALSE; -- -- } else if (last && colocate_rsc_sets(id, last, set, score_i, data_set) == FALSE) { -- return FALSE; -+ if (!unpack_colocation_set(set, score_i, data_set)) { -+ return; -+ } -+ if (last && !colocate_rsc_sets(id, last, set, score_i, data_set)) { -+ return; - } - last = set; - } -@@ -2710,11 +2707,9 @@ unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - xml_obj = orig_xml; - } - -- if (any_sets == FALSE) { -- return unpack_simple_colocation(xml_obj, data_set); -+ if (!any_sets) { -+ unpack_simple_colocation(xml_obj, data_set); - } -- -- return TRUE; - } - - gboolean --- -1.8.3.1 - - -From 2278e01f8d951d939c172ac71e168a11199f84f7 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 21 Dec 2020 17:07:45 -0600 -Subject: [PATCH 4/4] Refactor: scheduler: functionize checking whether - colocation applies - -This allows us to have more consistent trace messages, and will reduce -code duplication when more checks are added later. ---- - include/pcmki/pcmki_sched_utils.h | 3 +++ - lib/pacemaker/pcmk_sched_clone.c | 12 +++++------- - lib/pacemaker/pcmk_sched_group.c | 12 ++++++------ - lib/pacemaker/pcmk_sched_native.c | 14 ++++---------- - lib/pacemaker/pcmk_sched_promotable.c | 25 ++++++++----------------- - lib/pacemaker/pcmk_sched_utils.c | 31 +++++++++++++++++++++++++++++++ - 6 files changed, 57 insertions(+), 40 deletions(-) - -diff --git a/include/pcmki/pcmki_sched_utils.h b/include/pcmki/pcmki_sched_utils.h -index b8d7750..c7ae1b8 100644 ---- a/include/pcmki/pcmki_sched_utils.h -+++ b/include/pcmki/pcmki_sched_utils.h -@@ -72,6 +72,9 @@ enum filter_colocation_res { - extern enum filter_colocation_res - filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, - pcmk__colocation_t *constraint, gboolean preview); -+bool pcmk__colocation_applies(pe_resource_t *rsc, -+ pcmk__colocation_t *colocation, -+ bool promoted_only); - - extern int compare_capacity(const pe_node_t * node1, const pe_node_t * node2); - extern void calculate_utilization(GHashTable * current_utilization, -diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c -index 9485a98..5a06151 100644 ---- a/lib/pacemaker/pcmk_sched_clone.c -+++ b/lib/pacemaker/pcmk_sched_clone.c -@@ -658,14 +658,12 @@ pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (GListPtr gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -+ if (pcmk__colocation_applies(rsc, constraint, false)) { -+ rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, -+ rsc->id, rsc->allowed_nodes, constraint->node_attribute, -+ constraint->score / (float) INFINITY, -+ pe_weights_rollback|pe_weights_positive); - } -- rsc->allowed_nodes = -- constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, -- constraint->node_attribute, -- (float)constraint->score / INFINITY, -- (pe_weights_rollback | pe_weights_positive)); - } - - pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes); -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index f1ce0be..5334f23 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -516,13 +516,13 @@ pcmk__group_merge_weights(pe_resource_t *rsc, const char *rhs, - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -+ if (pcmk__colocation_applies(rsc, constraint, false)) { -+ nodes = pcmk__native_merge_weights(constraint->rsc_lh, rsc->id, -+ nodes, -+ constraint->node_attribute, -+ constraint->score / (float) INFINITY, -+ flags); - } -- nodes = pcmk__native_merge_weights(constraint->rsc_lh, rsc->id, nodes, -- constraint->node_attribute, -- constraint->score / (float) INFINITY, -- flags); - } - - pe__clear_resource_flags(rsc, pe_rsc_merging); -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index 87d8bfb..c302db6 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -562,17 +562,11 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -+ if (pcmk__colocation_applies(rsc, constraint, false)) { -+ rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, -+ rsc->id, rsc->allowed_nodes, constraint->node_attribute, -+ constraint->score / (float) INFINITY, pe_weights_rollback); - } -- pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)", -- constraint->id, constraint->rsc_lh->id, -- constraint->rsc_rh->id); -- rsc->allowed_nodes = -- constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, -- constraint->node_attribute, -- (float)constraint->score / INFINITY, -- pe_weights_rollback); - } - - if (rsc->next_role == RSC_ROLE_STOPPED) { -diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c -index 9a5474a..a0eeaad 100644 ---- a/lib/pacemaker/pcmk_sched_promotable.c -+++ b/lib/pacemaker/pcmk_sched_promotable.c -@@ -345,23 +345,14 @@ promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set) - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } -- -- /* (re-)adds location preferences of resource that wish to be -- * colocated with the master instance -- */ -- if (constraint->role_rh == RSC_ROLE_MASTER) { -- pe_rsc_trace(rsc, "LHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id, -- constraint->score); -- rsc->allowed_nodes = -- constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, -- rsc->allowed_nodes, -- constraint->node_attribute, -- (float)constraint->score / INFINITY, -- (pe_weights_rollback | -- pe_weights_positive)); -+ if (pcmk__colocation_applies(rsc, constraint, true)) { -+ /* (Re-)add location preferences of resource that wishes to be -+ * colocated with the promoted instance. -+ */ -+ rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, -+ rsc->id, rsc->allowed_nodes, constraint->node_attribute, -+ constraint->score / (float) INFINITY, -+ pe_weights_rollback|pe_weights_positive); - } - } - -diff --git a/lib/pacemaker/pcmk_sched_utils.c b/lib/pacemaker/pcmk_sched_utils.c -index eaaf526..aba417a 100644 ---- a/lib/pacemaker/pcmk_sched_utils.c -+++ b/lib/pacemaker/pcmk_sched_utils.c -@@ -765,3 +765,34 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op, - free(key); - return xml_op; - } -+ -+/*! -+ * \internal -+ * \brief Check whether a colocation constraint should apply -+ * -+ * \param[in] rsc Resource of interest (for logging) -+ * \param[in] colocation Colocation constraint to check -+ * \param[in] promoted_only If true, constraint applies if right-hand is promoted -+ */ -+bool -+pcmk__colocation_applies(pe_resource_t *rsc, pcmk__colocation_t *colocation, -+ bool promoted_only) -+{ -+ CRM_CHECK((rsc != NULL) && (colocation != NULL), return false); -+ -+ if (colocation->score == 0) { -+ pe_rsc_trace(rsc, "Ignoring colocation constraint %s: 0 score", -+ colocation->id); -+ return false; -+ } -+ if (promoted_only && (colocation->role_rh != RSC_ROLE_MASTER)) { -+ pe_rsc_trace(rsc, "Ignoring colocation constraint %s: role", -+ colocation->id); -+ return false; -+ } -+ pe_rsc_trace(rsc, "Applying colocation constraint %s: %s with %s%s (%d)", -+ colocation->id, colocation->rsc_lh->id, -+ (promoted_only? "promoted " : ""), -+ colocation->rsc_rh->id, colocation->score); -+ return true; -+} --- -1.8.3.1 - diff --git a/SOURCES/020-fence_watchdog.patch b/SOURCES/020-fence_watchdog.patch new file mode 100644 index 0000000..76abe27 --- /dev/null +++ b/SOURCES/020-fence_watchdog.patch @@ -0,0 +1,25 @@ +From 46dd1118cae948649e000b2159e8e92623520ad9 Mon Sep 17 00:00:00 2001 +From: Klaus Wenninger +Date: Thu, 19 Aug 2021 09:28:54 +0200 +Subject: [PATCH] Fix: fence_watchdog: fix malformed xml in metadata + +--- + daemons/fenced/fence_watchdog.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/daemons/fenced/fence_watchdog.in b/daemons/fenced/fence_watchdog.in +index 700065e0e..eefa7395e 100755 +--- a/daemons/fenced/fence_watchdog.in ++++ b/daemons/fenced/fence_watchdog.in +@@ -124,7 +124,7 @@ def metadata(avail_opt, options): + for option, dummy in sorted_options(avail_opt): + if "shortdesc" in ALL_OPT[option]: + print(' ') ++ '" required="' + ALL_OPT[option]["required"] + '">') + + default = "" + default_name_arg = "-" + ALL_OPT[option]["getopt"][:-1] +-- +2.27.0 + diff --git a/SOURCES/020-rhbz1872376.patch b/SOURCES/020-rhbz1872376.patch deleted file mode 100644 index b1e46f3..0000000 --- a/SOURCES/020-rhbz1872376.patch +++ /dev/null @@ -1,1529 +0,0 @@ -From 08ce507927ff497b0e8f125050f67d59c74d674c Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 19 Nov 2020 15:52:14 -0600 -Subject: [PATCH 01/12] Refactor: libpe_rules: functionize value-source - expansion - -... for readability ---- - lib/pengine/rules.c | 61 +++++++++++++++++++++++++++++++---------------------- - 1 file changed, 36 insertions(+), 25 deletions(-) - -diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c -index e5d452f..aa5d6ab 100644 ---- a/lib/pengine/rules.c -+++ b/lib/pengine/rules.c -@@ -1027,6 +1027,36 @@ accept_attr_expr(const char *l_val, const char *r_val, const char *type, - - /*! - * \internal -+ * \brief Get correct value according to value-source -+ * -+ * \param[in] value value given in rule expression -+ * \param[in] value_source value-source given in rule expressions -+ * \param[in] match_data If not NULL, resource back-references and params -+ */ -+static const char * -+expand_value_source(const char *value, const char *value_source, -+ pe_match_data_t *match_data) -+{ -+ GHashTable *table = NULL; -+ -+ if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) { -+ table = match_data->params; -+ -+ } else if (pcmk__str_eq(value_source, "meta", pcmk__str_casei)) { -+ table = match_data->meta; -+ -+ } else { // literal -+ return value; -+ } -+ -+ if ((table == NULL) || pcmk__str_empty(value)) { -+ return NULL; -+ } -+ return (const char *) g_hash_table_lookup(table, value); -+} -+ -+/*! -+ * \internal - * \brief Evaluate a node attribute expression based on #uname, #id, #kind, - * or a generic node attribute - * -@@ -1040,8 +1070,6 @@ pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) - { - gboolean attr_allocated = FALSE; - const char *h_val = NULL; -- GHashTable *table = NULL; -- bool literal = true; - - const char *op = NULL; - const char *type = NULL; -@@ -1061,36 +1089,19 @@ pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) - return FALSE; - } - -- if (rule_data->match_data) { -- if (rule_data->match_data->re) { -+ if (rule_data->match_data != NULL) { -+ // Expand any regular expression submatches (%0-%9) in attribute name -+ if (rule_data->match_data->re != NULL) { - char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re); - -- if (resolved_attr) { -+ if (resolved_attr != NULL) { - attr = (const char *) resolved_attr; - attr_allocated = TRUE; - } - } - -- if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) { -- literal = false; -- table = rule_data->match_data->params; -- } else if (pcmk__str_eq(value_source, "meta", pcmk__str_casei)) { -- literal = false; -- table = rule_data->match_data->meta; -- } -- } -- -- if (!literal) { -- const char *param_name = value; -- const char *param_value = NULL; -- -- value = NULL; -- if ((table != NULL) && !pcmk__str_empty(param_name)) { -- param_value = (const char *)g_hash_table_lookup(table, param_name); -- if (param_value != NULL) { -- value = param_value; -- } -- } -+ // Get value appropriate to value-source -+ value = expand_value_source(value, value_source, rule_data->match_data); - } - - if (rule_data->node_hash != NULL) { --- -1.8.3.1 - - -From 8864e8596cc44fa275f68d58fea9b7d3d5236106 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 19 Nov 2020 11:59:05 -0600 -Subject: [PATCH 02/12] Refactor: libcrmcommon: expose "first XML attribute" - function - -... for future reuse. Also, rename to reflect that only XML element nodes may -have attributes. ---- - include/crm/common/xml_internal.h | 14 +++++++ - lib/common/crmcommon_private.h | 6 --- - lib/common/nvpair.c | 4 +- - lib/common/patchset.c | 20 +++++----- - lib/common/xml.c | 81 +++++++++++++++++---------------------- - 5 files changed, 61 insertions(+), 64 deletions(-) - -diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h -index 13157c6..b2ff529 100644 ---- a/include/crm/common/xml_internal.h -+++ b/include/crm/common/xml_internal.h -@@ -255,4 +255,18 @@ void - pcmk__xe_set_props(xmlNodePtr node, ...) - G_GNUC_NULL_TERMINATED; - -+/*! -+ * \internal -+ * \brief Get first attribute of an XML element -+ * -+ * \param[in] xe XML element to check -+ * -+ * \return First attribute of \p xe (or NULL if \p xe is NULL or has none) -+ */ -+static inline xmlAttr * -+pcmk__xe_first_attr(const xmlNode *xe) -+{ -+ return (xe == NULL)? NULL : xe->properties; -+} -+ - #endif // PCMK__XML_INTERNAL__H -diff --git a/lib/common/crmcommon_private.h b/lib/common/crmcommon_private.h -index 1654cba..c2f334d 100644 ---- a/lib/common/crmcommon_private.h -+++ b/lib/common/crmcommon_private.h -@@ -141,12 +141,6 @@ void pcmk__mark_xml_attr_dirty(xmlAttr *a); - G_GNUC_INTERNAL - bool pcmk__xa_filterable(const char *name); - --static inline xmlAttr * --pcmk__first_xml_attr(const xmlNode *xml) --{ -- return xml? xml->properties : NULL; --} -- - static inline const char * - pcmk__xml_attr_value(const xmlAttr *attr) - { -diff --git a/lib/common/nvpair.c b/lib/common/nvpair.c -index 0ed7a7d..9bd87af 100644 ---- a/lib/common/nvpair.c -+++ b/lib/common/nvpair.c -@@ -163,7 +163,7 @@ pcmk_xml_attrs2nvpairs(xmlNode *xml) - { - GSList *result = NULL; - -- for (xmlAttrPtr iter = pcmk__first_xml_attr(xml); iter != NULL; -+ for (xmlAttrPtr iter = pcmk__xe_first_attr(xml); iter != NULL; - iter = iter->next) { - - result = pcmk_prepend_nvpair(result, -@@ -925,7 +925,7 @@ xml2list(xmlNode *parent) - - crm_log_xml_trace(nvpair_list, "Unpacking"); - -- for (pIter = pcmk__first_xml_attr(nvpair_list); pIter != NULL; -+ for (pIter = pcmk__xe_first_attr(nvpair_list); pIter != NULL; - pIter = pIter->next) { - - const char *p_name = (const char *)pIter->name; -diff --git a/lib/common/patchset.c b/lib/common/patchset.c -index f3dab03..15cbe07 100644 ---- a/lib/common/patchset.c -+++ b/lib/common/patchset.c -@@ -112,7 +112,7 @@ add_xml_changes_to_patchset(xmlNode *xml, xmlNode *patchset) - } - - // Check each of the XML node's attributes for changes -- for (pIter = pcmk__first_xml_attr(xml); pIter != NULL; -+ for (pIter = pcmk__xe_first_attr(xml); pIter != NULL; - pIter = pIter->next) { - xmlNode *attr = NULL; - -@@ -156,7 +156,7 @@ add_xml_changes_to_patchset(xmlNode *xml, xmlNode *patchset) - change = create_xml_node(change->parent, XML_DIFF_RESULT); - result = create_xml_node(change, (const char *)xml->name); - -- for (pIter = pcmk__first_xml_attr(xml); pIter != NULL; -+ for (pIter = pcmk__xe_first_attr(xml); pIter != NULL; - pIter = pIter->next) { - p = pIter->_private; - if (!pcmk_is_set(p->flags, xpf_deleted)) { -@@ -677,7 +677,7 @@ process_v1_removals(xmlNode *target, xmlNode *patch) - return; - } - -- for (xIter = pcmk__first_xml_attr(patch); xIter != NULL; -+ for (xIter = pcmk__xe_first_attr(patch); xIter != NULL; - xIter = xIter->next) { - const char *p_name = (const char *)xIter->name; - -@@ -745,7 +745,7 @@ process_v1_additions(xmlNode *parent, xmlNode *target, xmlNode *patch) - return); - CRM_CHECK(pcmk__str_eq(ID(target), ID(patch), pcmk__str_casei), return); - -- for (xIter = pcmk__first_xml_attr(patch); xIter != NULL; -+ for (xIter = pcmk__xe_first_attr(patch); xIter != NULL; - xIter = xIter->next) { - const char *p_name = (const char *) xIter->name; - const char *p_value = crm_element_value(patch, p_name); -@@ -1204,7 +1204,7 @@ apply_v2_patchset(xmlNode *xml, xmlNode *patchset) - free_xml(match); - - } else if (strcmp(op, "modify") == 0) { -- xmlAttr *pIter = pcmk__first_xml_attr(match); -+ xmlAttr *pIter = pcmk__xe_first_attr(match); - xmlNode *attrs = NULL; - - attrs = pcmk__xml_first_child(first_named_child(change, -@@ -1220,7 +1220,7 @@ apply_v2_patchset(xmlNode *xml, xmlNode *patchset) - xml_remove_prop(match, name); - } - -- for (pIter = pcmk__first_xml_attr(attrs); pIter != NULL; -+ for (pIter = pcmk__xe_first_attr(attrs); pIter != NULL; - pIter = pIter->next) { - const char *name = (const char *) pIter->name; - const char *value = crm_element_value(attrs, name); -@@ -1553,7 +1553,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right, - } else if (full) { - xmlAttrPtr pIter = NULL; - -- for (pIter = pcmk__first_xml_attr(left); pIter != NULL; -+ for (pIter = pcmk__xe_first_attr(left); pIter != NULL; - pIter = pIter->next) { - const char *p_name = (const char *)pIter->name; - const char *p_value = pcmk__xml_attr_value(pIter); -@@ -1566,7 +1566,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right, - } - - // Changes to name/value pairs -- for (xIter = pcmk__first_xml_attr(left); xIter != NULL; -+ for (xIter = pcmk__xe_first_attr(left); xIter != NULL; - xIter = xIter->next) { - const char *prop_name = (const char *) xIter->name; - xmlAttrPtr right_attr = NULL; -@@ -1594,7 +1594,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right, - if (full) { - xmlAttrPtr pIter = NULL; - -- for (pIter = pcmk__first_xml_attr(left); pIter != NULL; -+ for (pIter = pcmk__xe_first_attr(left); pIter != NULL; - pIter = pIter->next) { - const char *p_name = (const char *) pIter->name; - const char *p_value = pcmk__xml_attr_value(pIter); -@@ -1624,7 +1624,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right, - - crm_trace("Changes detected to %s in <%s id=%s>", prop_name, - crm_element_name(left), id); -- for (pIter = pcmk__first_xml_attr(left); pIter != NULL; -+ for (pIter = pcmk__xe_first_attr(left); pIter != NULL; - pIter = pIter->next) { - const char *p_name = (const char *) pIter->name; - const char *p_value = pcmk__xml_attr_value(pIter); -diff --git a/lib/common/xml.c b/lib/common/xml.c -index abb120c..f2f48e9 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -337,7 +337,7 @@ accept_attr_deletions(xmlNode *xml) - xml_private_t *p = xml->_private; - - p->flags = xpf_none; -- pIter = pcmk__first_xml_attr(xml); -+ pIter = pcmk__xe_first_attr(xml); - - while (pIter != NULL) { - const xmlChar *name = pIter->name; -@@ -528,11 +528,9 @@ copy_in_properties(xmlNode * target, xmlNode * src) - crm_err("No node to copy properties into"); - - } else { -- xmlAttrPtr pIter = NULL; -- -- for (pIter = pcmk__first_xml_attr(src); pIter != NULL; pIter = pIter->next) { -- const char *p_name = (const char *)pIter->name; -- const char *p_value = pcmk__xml_attr_value(pIter); -+ for (xmlAttrPtr a = pcmk__xe_first_attr(src); a != NULL; a = a->next) { -+ const char *p_name = (const char *) a->name; -+ const char *p_value = pcmk__xml_attr_value(a); - - expand_plus_plus(target, p_name, p_value); - } -@@ -546,11 +544,10 @@ fix_plus_plus_recursive(xmlNode * target) - { - /* TODO: Remove recursion and use xpath searches for value++ */ - xmlNode *child = NULL; -- xmlAttrPtr pIter = NULL; - -- for (pIter = pcmk__first_xml_attr(target); pIter != NULL; pIter = pIter->next) { -- const char *p_name = (const char *)pIter->name; -- const char *p_value = pcmk__xml_attr_value(pIter); -+ for (xmlAttrPtr a = pcmk__xe_first_attr(target); a != NULL; a = a->next) { -+ const char *p_name = (const char *) a->name; -+ const char *p_value = pcmk__xml_attr_value(a); - - expand_plus_plus(target, p_name, p_value); - } -@@ -1429,7 +1426,6 @@ pcmk__xe_log(int log_level, const char *file, const char *function, int line, - const char *hidden = NULL; - - xmlNode *child = NULL; -- xmlAttrPtr pIter = NULL; - - if ((data == NULL) || (log_level == LOG_NEVER)) { - return; -@@ -1449,10 +1445,12 @@ pcmk__xe_log(int log_level, const char *file, const char *function, int line, - buffer_print(buffer, max, offset, "<%s", name); - - hidden = crm_element_value(data, "hidden"); -- for (pIter = pcmk__first_xml_attr(data); pIter != NULL; pIter = pIter->next) { -- xml_private_t *p = pIter->_private; -- const char *p_name = (const char *)pIter->name; -- const char *p_value = pcmk__xml_attr_value(pIter); -+ for (xmlAttrPtr a = pcmk__xe_first_attr(data); a != NULL; -+ a = a->next) { -+ -+ xml_private_t *p = a->_private; -+ const char *p_name = (const char *) a->name; -+ const char *p_value = pcmk__xml_attr_value(a); - char *p_copy = NULL; - - if (pcmk_is_set(p->flags, xpf_deleted)) { -@@ -1526,7 +1524,6 @@ log_xml_changes(int log_level, const char *file, const char *function, int line, - xml_private_t *p; - char *prefix_m = NULL; - xmlNode *child = NULL; -- xmlAttrPtr pIter = NULL; - - if ((data == NULL) || (log_level == LOG_NEVER)) { - return; -@@ -1566,10 +1563,10 @@ log_xml_changes(int log_level, const char *file, const char *function, int line, - pcmk__xe_log(log_level, file, function, line, flags, data, depth, - options|xml_log_option_open); - -- for (pIter = pcmk__first_xml_attr(data); pIter != NULL; pIter = pIter->next) { -- const char *aname = (const char*)pIter->name; -+ for (xmlAttrPtr a = pcmk__xe_first_attr(data); a != NULL; a = a->next) { -+ const char *aname = (const char*) a->name; - -- p = pIter->_private; -+ p = a->_private; - if (pcmk_is_set(p->flags, xpf_deleted)) { - const char *value = crm_element_value(data, aname); - flags = prefix_del; -@@ -1684,11 +1681,9 @@ log_data_element(int log_level, const char *file, const char *function, int line - static void - dump_filtered_xml(xmlNode * data, int options, char **buffer, int *offset, int *max) - { -- xmlAttrPtr xIter = NULL; -- -- for (xIter = pcmk__first_xml_attr(data); xIter != NULL; xIter = xIter->next) { -- if (!pcmk__xa_filterable((const char *) (xIter->name))) { -- dump_xml_attr(xIter, options, buffer, offset, max); -+ for (xmlAttrPtr a = pcmk__xe_first_attr(data); a != NULL; a = a->next) { -+ if (!pcmk__xa_filterable((const char *) (a->name))) { -+ dump_xml_attr(a, options, buffer, offset, max); - } - } - } -@@ -1722,10 +1717,8 @@ dump_xml_element(xmlNode * data, int options, char **buffer, int *offset, int *m - dump_filtered_xml(data, options, buffer, offset, max); - - } else { -- xmlAttrPtr xIter = NULL; -- -- for (xIter = pcmk__first_xml_attr(data); xIter != NULL; xIter = xIter->next) { -- dump_xml_attr(xIter, options, buffer, offset, max); -+ for (xmlAttrPtr a = pcmk__xe_first_attr(data); a != NULL; a = a->next) { -+ dump_xml_attr(a, options, buffer, offset, max); - } - } - -@@ -2062,7 +2055,7 @@ save_xml_to_file(xmlNode * xml, const char *desc, const char *filename) - static void - set_attrs_flag(xmlNode *xml, enum xml_private_flags flag) - { -- for (xmlAttr *attr = pcmk__first_xml_attr(xml); attr; attr = attr->next) { -+ for (xmlAttr *attr = pcmk__xe_first_attr(xml); attr; attr = attr->next) { - pcmk__set_xml_flags((xml_private_t *) (attr->_private), flag); - } - } -@@ -2152,7 +2145,7 @@ mark_attr_moved(xmlNode *new_xml, const char *element, xmlAttr *old_attr, - static void - xml_diff_old_attrs(xmlNode *old_xml, xmlNode *new_xml) - { -- xmlAttr *attr_iter = pcmk__first_xml_attr(old_xml); -+ xmlAttr *attr_iter = pcmk__xe_first_attr(old_xml); - - while (attr_iter != NULL) { - xmlAttr *old_attr = attr_iter; -@@ -2194,7 +2187,7 @@ xml_diff_old_attrs(xmlNode *old_xml, xmlNode *new_xml) - static void - mark_created_attrs(xmlNode *new_xml) - { -- xmlAttr *attr_iter = pcmk__first_xml_attr(new_xml); -+ xmlAttr *attr_iter = pcmk__xe_first_attr(new_xml); - - while (attr_iter != NULL) { - xmlAttr *new_attr = attr_iter; -@@ -2371,7 +2364,6 @@ gboolean - can_prune_leaf(xmlNode * xml_node) - { - xmlNode *cIter = NULL; -- xmlAttrPtr pIter = NULL; - gboolean can_prune = TRUE; - const char *name = crm_element_name(xml_node); - -@@ -2380,8 +2372,8 @@ can_prune_leaf(xmlNode * xml_node) - return FALSE; - } - -- for (pIter = pcmk__first_xml_attr(xml_node); pIter != NULL; pIter = pIter->next) { -- const char *p_name = (const char *)pIter->name; -+ for (xmlAttrPtr a = pcmk__xe_first_attr(xml_node); a != NULL; a = a->next) { -+ const char *p_name = (const char *) a->name; - - if (strcmp(p_name, XML_ATTR_ID) == 0) { - continue; -@@ -2558,15 +2550,13 @@ pcmk__xml_update(xmlNode *parent, xmlNode *target, xmlNode *update, - - } else { - /* No need for expand_plus_plus(), just raw speed */ -- xmlAttrPtr pIter = NULL; -- -- for (pIter = pcmk__first_xml_attr(update); pIter != NULL; pIter = pIter->next) { -- const char *p_name = (const char *)pIter->name; -- const char *p_value = pcmk__xml_attr_value(pIter); -+ for (xmlAttrPtr a = pcmk__xe_first_attr(update); a != NULL; -+ a = a->next) { -+ const char *p_value = pcmk__xml_attr_value(a); - - /* Remove it first so the ordering of the update is preserved */ -- xmlUnsetProp(target, (pcmkXmlStr) p_name); -- xmlSetProp(target, (pcmkXmlStr) p_name, (pcmkXmlStr) p_value); -+ xmlUnsetProp(target, a->name); -+ xmlSetProp(target, a->name, (pcmkXmlStr) p_value); - } - } - -@@ -2681,11 +2671,10 @@ replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean - can_delete = FALSE; - } - if (can_delete && delete_only) { -- xmlAttrPtr pIter = NULL; -- -- for (pIter = pcmk__first_xml_attr(update); pIter != NULL; pIter = pIter->next) { -- const char *p_name = (const char *)pIter->name; -- const char *p_value = pcmk__xml_attr_value(pIter); -+ for (xmlAttrPtr a = pcmk__xe_first_attr(update); a != NULL; -+ a = a->next) { -+ const char *p_name = (const char *) a->name; -+ const char *p_value = pcmk__xml_attr_value(a); - - right_val = crm_element_value(child, p_name); - if (!pcmk__str_eq(p_value, right_val, pcmk__str_casei)) { --- -1.8.3.1 - - -From 68e35a6f715f26a8a3b033cc91875f6f7001f106 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 19 Nov 2020 10:31:36 -0600 -Subject: [PATCH 03/12] Refactor: libcrmcommon: new internal function for - removing XML attributes - ---- - include/crm/common/xml_internal.h | 4 ++++ - lib/common/xml.c | 34 ++++++++++++++++++++++++++++++++++ - 2 files changed, 38 insertions(+) - -diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h -index b2ff529..1e80bc6 100644 ---- a/include/crm/common/xml_internal.h -+++ b/include/crm/common/xml_internal.h -@@ -136,6 +136,10 @@ const char *pcmk__xe_add_last_written(xmlNode *xe); - xmlNode *pcmk__xe_match(xmlNode *parent, const char *node_name, - const char *attr_n, const char *attr_v); - -+void pcmk__xe_remove_matching_attrs(xmlNode *element, -+ bool (*match)(xmlAttrPtr, void *), -+ void *user_data); -+ - /*! - * \internal - * \brief Get the root directory to scan XML artefacts of given kind for -diff --git a/lib/common/xml.c b/lib/common/xml.c -index f2f48e9..39c5e53 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -618,6 +618,40 @@ expand_plus_plus(xmlNode * target, const char *name, const char *value) - return; - } - -+/*! -+ * \internal -+ * \brief Remove an XML element's attributes that match some criteria -+ * -+ * \param[in,out] element XML element to modify -+ * \param[in] match If not NULL, only remove attributes for which -+ * this function returns true -+ * \param[in] user_data Data to pass to \p match -+ */ -+void -+pcmk__xe_remove_matching_attrs(xmlNode *element, -+ bool (*match)(xmlAttrPtr, void *), -+ void *user_data) -+{ -+ xmlAttrPtr next = NULL; -+ -+ for (xmlAttrPtr a = pcmk__xe_first_attr(element); a != NULL; a = next) { -+ next = a->next; // Grab now because attribute might get removed -+ if ((match == NULL) || match(a, user_data)) { -+ if (!pcmk__check_acl(element, NULL, xpf_acl_write)) { -+ crm_trace("ACLs prevent removal of %s attribute from %s element", -+ (const char *) a->name, (const char *) element->name); -+ -+ } else if (pcmk__tracking_xml_changes(element, false)) { -+ // Leave (marked for removal) until after diff is calculated -+ set_parent_flag(element, xpf_dirty); -+ pcmk__set_xml_flags((xml_private_t *) a->_private, xpf_deleted); -+ } else { -+ xmlRemoveProp(a); -+ } -+ } -+ } -+} -+ - xmlDoc * - getDocPtr(xmlNode * node) - { --- -1.8.3.1 - - -From a6e1804156dfd1f6c3bc1ffbf888fbd6421aa464 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 19 Nov 2020 11:10:52 -0600 -Subject: [PATCH 04/12] Refactor: libcrmcommon,libpe_status: use new attribute - removing function - -... where appropriate, for readability and efficiency ---- - lib/common/operations.c | 64 ++++++++++++++++++------------------- - lib/common/patchset.c | 29 +++++++---------- - lib/common/xml.c | 39 +++++++++++------------ - lib/pengine/pe_digest.c | 84 ++++++++++++++++++++++++++----------------------- - 4 files changed, 105 insertions(+), 111 deletions(-) - -diff --git a/lib/common/operations.c b/lib/common/operations.c -index 7e6bf5a..f3a11be 100644 ---- a/lib/common/operations.c -+++ b/lib/common/operations.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - #include - - static regex_t *notify_migrate_re = NULL; -@@ -361,6 +362,24 @@ decode_transition_key(const char *key, char **uuid, int *transition_id, int *act - return TRUE; - } - -+#define CRM_META_LEN (sizeof(CRM_META) - 1) -+ -+// Return true if a is an attribute that should be filtered -+static bool -+should_filter_for_digest(xmlAttrPtr a, void *user_data) -+{ -+ // @TODO CRM_META check should be case-sensitive -+ return (strncasecmp((const char *) a->name, CRM_META, CRM_META_LEN) == 0) -+ || pcmk__str_any_of((const char *) a->name, -+ XML_ATTR_ID, -+ XML_ATTR_CRM_VERSION, -+ XML_LRM_ATTR_OP_DIGEST, -+ XML_LRM_ATTR_TARGET, -+ XML_LRM_ATTR_TARGET_UUID, -+ "pcmk_external_ip", -+ NULL); -+} -+ - /*! - * \internal - * \brief Remove XML attributes not needed for operation digest -@@ -374,52 +393,31 @@ pcmk__filter_op_for_digest(xmlNode *param_set) - char *timeout = NULL; - guint interval_ms = 0; - -- const char *attr_filter[] = { -- XML_ATTR_ID, -- XML_ATTR_CRM_VERSION, -- XML_LRM_ATTR_OP_DIGEST, -- XML_LRM_ATTR_TARGET, -- XML_LRM_ATTR_TARGET_UUID, -- "pcmk_external_ip" -- }; -- -- const int meta_len = strlen(CRM_META); -- - if (param_set == NULL) { - return; - } - -- // Remove the specific attributes listed in attr_filter -- for (int lpc = 0; lpc < DIMOF(attr_filter); lpc++) { -- xml_remove_prop(param_set, attr_filter[lpc]); -- } -- -+ /* Timeout is useful for recurring operation digests, so grab it before -+ * removing meta-attributes -+ */ - key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); - if (crm_element_value_ms(param_set, key, &interval_ms) != pcmk_ok) { - interval_ms = 0; - } - free(key); -- -- key = crm_meta_name(XML_ATTR_TIMEOUT); -- timeout = crm_element_value_copy(param_set, key); -- -- // Remove all CRM_meta_* attributes -- for (xmlAttrPtr xIter = param_set->properties; xIter != NULL; ) { -- const char *prop_name = (const char *) (xIter->name); -- -- xIter = xIter->next; -- -- // @TODO Why is this case-insensitive? -- if (strncasecmp(prop_name, CRM_META, meta_len) == 0) { -- xml_remove_prop(param_set, prop_name); -- } -+ key = NULL; -+ if (interval_ms != 0) { -+ key = crm_meta_name(XML_ATTR_TIMEOUT); -+ timeout = crm_element_value_copy(param_set, key); - } - -- if ((interval_ms != 0) && (timeout != NULL)) { -- // Add the timeout back, it's useful for recurring operation digests -+ // Remove all CRM_meta_* attributes and certain other attributes -+ pcmk__xe_remove_matching_attrs(param_set, should_filter_for_digest, NULL); -+ -+ // Add timeout back for recurring operation digests -+ if (timeout != NULL) { - crm_xml_add(param_set, key, timeout); - } -- - free(timeout); - free(key); - } -diff --git a/lib/common/patchset.c b/lib/common/patchset.c -index 15cbe07..fae7046 100644 ---- a/lib/common/patchset.c -+++ b/lib/common/patchset.c -@@ -638,13 +638,19 @@ xml_log_patchset(uint8_t log_level, const char *function, xmlNode *patchset) - } - } - -+// Return true if attribute name is not "id" -+static bool -+not_id(xmlAttrPtr attr, void *user_data) -+{ -+ return strcmp((const char *) attr->name, XML_ATTR_ID) != 0; -+} -+ - // Apply the removals section of an v1 patchset to an XML node - static void - process_v1_removals(xmlNode *target, xmlNode *patch) - { - xmlNode *patch_child = NULL; - xmlNode *cIter = NULL; -- xmlAttrPtr xIter = NULL; - - char *id = NULL; - const char *name = NULL; -@@ -677,15 +683,8 @@ process_v1_removals(xmlNode *target, xmlNode *patch) - return; - } - -- for (xIter = pcmk__xe_first_attr(patch); xIter != NULL; -- xIter = xIter->next) { -- const char *p_name = (const char *)xIter->name; -- -- // Removing then restoring id would change ordering of properties -- if (!pcmk__str_eq(p_name, XML_ATTR_ID, pcmk__str_casei)) { -- xml_remove_prop(target, p_name); -- } -- } -+ // Removing then restoring id would change ordering of properties -+ pcmk__xe_remove_matching_attrs(patch, not_id, NULL); - - // Changes to child objects - cIter = pcmk__xml_first_child(target); -@@ -1204,7 +1203,6 @@ apply_v2_patchset(xmlNode *xml, xmlNode *patchset) - free_xml(match); - - } else if (strcmp(op, "modify") == 0) { -- xmlAttr *pIter = pcmk__xe_first_attr(match); - xmlNode *attrs = NULL; - - attrs = pcmk__xml_first_child(first_named_child(change, -@@ -1213,14 +1211,9 @@ apply_v2_patchset(xmlNode *xml, xmlNode *patchset) - rc = ENOMSG; - continue; - } -- while (pIter != NULL) { -- const char *name = (const char *)pIter->name; -- -- pIter = pIter->next; -- xml_remove_prop(match, name); -- } -+ pcmk__xe_remove_matching_attrs(match, NULL, NULL); // Remove all - -- for (pIter = pcmk__xe_first_attr(attrs); pIter != NULL; -+ for (xmlAttrPtr pIter = pcmk__xe_first_attr(attrs); pIter != NULL; - pIter = pIter->next) { - const char *name = (const char *) pIter->name; - const char *value = crm_element_value(attrs, name); -diff --git a/lib/common/xml.c b/lib/common/xml.c -index 39c5e53..869ed51 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -328,32 +328,31 @@ pcmk__xml_position(xmlNode *xml, enum xml_private_flags ignore_if_set) - return position; - } - --// Remove all attributes marked as deleted from an XML node --static void --accept_attr_deletions(xmlNode *xml) -+// This also clears attribute's flags if not marked as deleted -+static bool -+marked_as_deleted(xmlAttrPtr a, void *user_data) - { -- xmlNode *cIter = NULL; -- xmlAttr *pIter = NULL; -- xml_private_t *p = xml->_private; -+ xml_private_t *p = a->_private; - -+ if (pcmk_is_set(p->flags, xpf_deleted)) { -+ return true; -+ } - p->flags = xpf_none; -- pIter = pcmk__xe_first_attr(xml); -- -- while (pIter != NULL) { -- const xmlChar *name = pIter->name; -- -- p = pIter->_private; -- pIter = pIter->next; -+ return false; -+} - -- if(p->flags & xpf_deleted) { -- xml_remove_prop(xml, (const char *)name); -+// Remove all attributes marked as deleted from an XML node -+static void -+accept_attr_deletions(xmlNode *xml) -+{ -+ // Clear XML node's flags -+ ((xml_private_t *) xml->_private)->flags = xpf_none; - -- } else { -- p->flags = xpf_none; -- } -- } -+ // Remove this XML node's attributes that were marked as deleted -+ pcmk__xe_remove_matching_attrs(xml, marked_as_deleted, NULL); - -- for (cIter = pcmk__xml_first_child(xml); cIter != NULL; -+ // Recursively do the same for this XML node's children -+ for (xmlNodePtr cIter = pcmk__xml_first_child(xml); cIter != NULL; - cIter = pcmk__xml_next(cIter)) { - accept_attr_deletions(cIter); - } -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index dd6b753..e8cb108 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include "pe_status_private.h" - -@@ -45,40 +46,36 @@ pe__free_digests(gpointer ptr) - } - } - --/*! -- * \internal -- * \brief Remove named attributes from an XML element -- * -- * \param[in,out] param_set XML to be filtered -- * \param[in] param_string Space-separated list of attribute names -- * \param[in] need_present Whether to remove attributes that match, -- * or those that don't match -- */ --static void --filter_parameters(xmlNode *param_set, const char *param_string, -- bool need_present) -+// Return true if XML attribute name is substring of a given string -+static bool -+attr_in_string(xmlAttrPtr a, void *user_data) - { -- if ((param_set == NULL) || (param_string == NULL)) { -- return; -- } -- for (xmlAttrPtr xIter = param_set->properties; xIter; ) { -- const char *prop_name = (const char *) xIter->name; -- char *name = crm_strdup_printf(" %s ", prop_name); -- char *match = strstr(param_string, name); -- -- free(name); -+ bool filter = false; -+ char *name = crm_strdup_printf(" %s ", (const char *) a->name); - -- // Do now, because current entry might get removed below -- xIter = xIter->next; -+ if (strstr((const char *) user_data, name) == NULL) { -+ crm_trace("Filtering %s (not found in '%s')", -+ (const char *) a->name, (const char *) user_data); -+ filter = true; -+ } -+ free(name); -+ return filter; -+} - -- if ((need_present && (match == NULL)) -- || (!need_present && (match != NULL))) { -+// Return true if XML attribute name is not substring of a given string -+static bool -+attr_not_in_string(xmlAttrPtr a, void *user_data) -+{ -+ bool filter = false; -+ char *name = crm_strdup_printf(" %s ", (const char *) a->name); - -- crm_trace("Filtering %s (%sfound in '%s')", -- prop_name, (need_present? "not " : ""), param_string); -- xml_remove_prop(param_set, prop_name); -- } -+ if (strstr((const char *) user_data, name) != NULL) { -+ crm_trace("Filtering %s (found in '%s')", -+ (const char *) a->name, (const char *) user_data); -+ filter = true; - } -+ free(name); -+ return filter; - } - - #if ENABLE_VERSIONED_ATTRS -@@ -177,6 +174,13 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - op_version); - } - -+// Return true if XML attribute name is a Pacemaker-defined fencing parameter -+static bool -+is_fence_param(xmlAttrPtr attr, void *user_data) -+{ -+ return pcmk_stonith_param((const char *) attr->name); -+} -+ - /*! - * \internal - * \brief Add secure digest to a digest cache entry -@@ -209,8 +213,12 @@ calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, - if (overrides != NULL) { - g_hash_table_foreach(overrides, hash2field, data->params_secure); - } -+ - g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); -- filter_parameters(data->params_secure, secure_list, FALSE); -+ if (secure_list != NULL) { -+ pcmk__xe_remove_matching_attrs(data->params_secure, attr_not_in_string, -+ (void *) secure_list); -+ } - if (pcmk_is_set(pcmk_get_ra_caps(class), - pcmk_ra_cap_fence_params)) { - /* For stonith resources, Pacemaker adds special parameters, -@@ -218,15 +226,8 @@ calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, - * controller will not hash them. That means we have to filter - * them out before calculating our hash for comparison. - */ -- for (xmlAttrPtr iter = data->params_secure->properties; -- iter != NULL; ) { -- const char *prop_name = (const char *) iter->name; -- -- iter = iter->next; // Grab next now in case we remove current -- if (pcmk_stonith_param(prop_name)) { -- xml_remove_prop(data->params_secure, prop_name); -- } -- } -+ pcmk__xe_remove_matching_attrs(data->params_secure, is_fence_param, -+ NULL); - } - data->digest_secure_calc = calculate_operation_digest(data->params_secure, - op_version); -@@ -264,7 +265,10 @@ calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, - - // Then filter out reloadable parameters, if any - value = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); -- filter_parameters(data->params_restart, value, TRUE); -+ if (value != NULL) { -+ pcmk__xe_remove_matching_attrs(data->params_restart, attr_in_string, -+ (void *) value); -+ } - - value = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); - data->digest_restart_calc = calculate_operation_digest(data->params_restart, --- -1.8.3.1 - - -From 579875b70c2399f6cbf15d7afd210a2d4e2ed9df Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 19 Nov 2020 11:47:20 -0600 -Subject: [PATCH 05/12] Refactor: libcrmcommon,libpe_status: use "first XML - attribute" function - -... where appropriate. It checks for NULL, so that doesn't need to be -duplicated. ---- - lib/common/patchset.c | 11 +++-------- - lib/pengine/complex.c | 12 ++++-------- - 2 files changed, 7 insertions(+), 16 deletions(-) - -diff --git a/lib/common/patchset.c b/lib/common/patchset.c -index fae7046..46d136a 100644 ---- a/lib/common/patchset.c -+++ b/lib/common/patchset.c -@@ -279,15 +279,10 @@ xml_repair_v1_diff(xmlNode *last, xmlNode *next, xmlNode *local_diff, - crm_xml_add(diff_child, vfields[lpc], value); - } - -- if (next) { -- xmlAttrPtr xIter = NULL; -+ for (xmlAttrPtr a = pcmk__xe_first_attr(next); a != NULL; a = a->next) { -+ const char *p_value = crm_element_value(next, (const char *) a->name); - -- for (xIter = next->properties; xIter; xIter = xIter->next) { -- const char *p_name = (const char *) xIter->name; -- const char *p_value = crm_element_value(next, p_name); -- -- xmlSetProp(cib, (pcmkXmlStr) p_name, (pcmkXmlStr) p_value); -- } -+ xmlSetProp(cib, a->name, (pcmkXmlStr) p_value); - } - - crm_log_xml_explicit(local_diff, "Repaired-diff"); -diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c -index a087e5e..5f484ad 100644 ---- a/lib/pengine/complex.c -+++ b/lib/pengine/complex.c -@@ -159,15 +159,11 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, - rule_data.node_hash = node->details->attrs; - } - -- if (rsc->xml) { -- xmlAttrPtr xIter = NULL; -+ for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) { -+ const char *prop_name = (const char *) a->name; -+ const char *prop_value = crm_element_value(rsc->xml, prop_name); - -- for (xIter = rsc->xml->properties; xIter; xIter = xIter->next) { -- const char *prop_name = (const char *)xIter->name; -- const char *prop_value = crm_element_value(rsc->xml, prop_name); -- -- add_hash_param(meta_hash, prop_name, prop_value); -- } -+ add_hash_param(meta_hash, prop_name, prop_value); - } - - pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data, --- -1.8.3.1 - - -From a30d60e6344129e03fcab9cd8c9523bf24951a92 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 19 Nov 2020 13:37:37 -0600 -Subject: [PATCH 06/12] Low: libcrmcommon: compare CRM_meta_ properly - -Previously, when filtering XML attributes for digests, we would filter -attributes starting with "CRM_meta" case-insensitively. Now, compare -against "CRM_meta_" case-sensitively. - -This could potentially cause resource restarts after upgrading to a version -with this commit, for any resource that has instance attributes that start with -something like "crm_meta" -- a highly unlikely scenario. ---- - lib/common/operations.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/lib/common/operations.c b/lib/common/operations.c -index f3a11be..421aaac 100644 ---- a/lib/common/operations.c -+++ b/lib/common/operations.c -@@ -362,14 +362,14 @@ decode_transition_key(const char *key, char **uuid, int *transition_id, int *act - return TRUE; - } - --#define CRM_META_LEN (sizeof(CRM_META) - 1) -+// String length of CRM_META"_" -+#define CRM_META_LEN sizeof(CRM_META) - - // Return true if a is an attribute that should be filtered - static bool - should_filter_for_digest(xmlAttrPtr a, void *user_data) - { -- // @TODO CRM_META check should be case-sensitive -- return (strncasecmp((const char *) a->name, CRM_META, CRM_META_LEN) == 0) -+ return (strncmp((const char *) a->name, CRM_META "_", CRM_META_LEN) == 0) - || pcmk__str_any_of((const char *) a->name, - XML_ATTR_ID, - XML_ATTR_CRM_VERSION, --- -1.8.3.1 - - -From cb81616eaf53e7029a0c196ffab6203d60389386 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 23 Nov 2020 11:04:08 -0600 -Subject: [PATCH 07/12] Low: scheduler: filter non-private parameters properly - for digest - -Do the same filtering for non-private parameter digests as for all-parameter -digests. This matters when a filterable parameter is specified in overrides. ---- - lib/pengine/pe_digest.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index e8cb108..03aa3bc 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -229,6 +229,7 @@ calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, - pcmk__xe_remove_matching_attrs(data->params_secure, is_fence_param, - NULL); - } -+ pcmk__filter_op_for_digest(data->params_secure); - data->digest_secure_calc = calculate_operation_digest(data->params_secure, - op_version); - } --- -1.8.3.1 - - -From fe4aee37ceca64fd8a512e7cae57bae0745c7ace Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 23 Nov 2020 17:17:59 -0600 -Subject: [PATCH 08/12] Refactor: scheduler: pull interval from digest - overrides if appropriate - -When calculating an operation digest, we previously used an operation key -provided by the caller, along with a table of parameter overrides. However, if -CRM_meta_interval is one of the overrides, that should be used in the operation -key instead of the caller-provided key. - -Now, the caller passes in a pointer to the interval, and if it's overridden, -pe__calculate_digests() will use the overridden value and reset the caller's -interval to it. - -As of this commit, no caller passes overrides, so it has no effect, but it will -be useful for an upcoming feature. ---- - include/crm/pengine/internal.h | 2 +- - lib/pengine/pe_digest.c | 117 +++++++++++++++++++++++++---------------- - 2 files changed, 73 insertions(+), 46 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index a4f8086..1e5aee1 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -503,7 +503,7 @@ typedef struct op_digest_cache_s { - } op_digest_cache_t; - - op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task, -- const char *key, pe_node_t *node, -+ guint *interval_ms, pe_node_t *node, - xmlNode *xml_op, GHashTable *overrides, - bool calc_secure, - pe_working_set_t *data_set); -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index 03aa3bc..b608f22 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -121,19 +121,19 @@ append_all_versioned_params(pe_resource_t *rsc, pe_node_t *node, - * \internal - * \brief Add digest of all parameters to a digest cache entry - * -- * \param[out] data Digest cache entry to modify -- * \param[in] rsc Resource that action was for -- * \param[in] node Node action was performed on -- * \param[in] task Name of action performed -- * \param[in] key Action's task key -- * \param[in] xml_op XML of operation in CIB status (if available) -- * \param[in] op_version CRM feature set to use for digest calculation -- * \param[in] overrides Key/value hash table to override resource parameters -- * \param[in] data_set Cluster working set -+ * \param[out] data Digest cache entry to modify -+ * \param[in] rsc Resource that action was for -+ * \param[in] node Node action was performed on -+ * \param[in] task Name of action performed -+ * \param[in,out] interval_ms Action's interval (will be reset if in overrides) -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] op_version CRM feature set to use for digest calculation -+ * \param[in] overrides Key/value table to override resource parameters -+ * \param[in] data_set Cluster working set - */ - static void - calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, -- pe_node_t *node, const char *task, const char *key, -+ pe_node_t *node, const char *task, guint *interval_ms, - xmlNode *xml_op, const char *op_version, - GHashTable *overrides, pe_working_set_t *data_set) - { -@@ -153,7 +153,24 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - rsc->id, node->details->uname); - } - -- action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); -+ // If interval was overridden, reset it -+ if (overrides != NULL) { -+ const char *interval_s = g_hash_table_lookup(overrides, CRM_META "_" -+ XML_LRM_ATTR_INTERVAL); -+ -+ if (interval_s != NULL) { -+ long long value_ll; -+ -+ errno = 0; -+ value_ll = crm_parse_ll(interval_s, NULL); -+ if ((errno == 0) && (value_ll >= 0) && (value_ll <= G_MAXUINT)) { -+ *interval_ms = (guint) value_ll; -+ } -+ } -+ } -+ -+ action = custom_action(rsc, pcmk__op_key(rsc->id, task, *interval_ms), -+ task, node, TRUE, FALSE, data_set); - if (overrides != NULL) { - g_hash_table_foreach(overrides, hash2field, data->params_all); - } -@@ -280,21 +297,21 @@ calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op, - * \internal - * \brief Create a new digest cache entry with calculated digests - * -- * \param[in] rsc Resource that action was for -- * \param[in] task Name of action performed -- * \param[in] key Action's task key -- * \param[in] node Node action was performed on -- * \param[in] xml_op XML of operation in CIB status (if available) -- * \param[in] overrides Key/value hash table to override resource parameters -- * \param[in] calc_secure Whether to calculate secure digest -- * \param[in] data_set Cluster working set -+ * \param[in] rsc Resource that action was for -+ * \param[in] task Name of action performed -+ * \param[in,out] interval_ms Action's interval (will be reset if in overrides) -+ * \param[in] node Node action was performed on -+ * \param[in] xml_op XML of operation in CIB status (if available) -+ * \param[in] overrides Key/value table to override resource parameters -+ * \param[in] calc_secure Whether to calculate secure digest -+ * \param[in] data_set Cluster working set - * - * \return Pointer to new digest cache entry (or NULL on memory error) - * \note It is the caller's responsibility to free the result using - * pe__free_digests(). - */ - op_digest_cache_t * --pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, -+pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms, - pe_node_t *node, xmlNode *xml_op, GHashTable *overrides, - bool calc_secure, pe_working_set_t *data_set) - { -@@ -307,8 +324,9 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, - if (xml_op != NULL) { - op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); - } -- calculate_main_digest(data, rsc, node, task, key, xml_op, op_version, -- overrides, data_set); -+ -+ calculate_main_digest(data, rsc, node, task, interval_ms, xml_op, -+ op_version, overrides, data_set); - if (calc_secure) { - calculate_secure_digest(data, rsc, xml_op, op_version, overrides); - } -@@ -322,7 +340,7 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, - * - * \param[in] rsc Resource that action was for - * \param[in] task Name of action performed -- * \param[in] key Action's task key -+ * \param[in] interval_ms Action's interval - * \param[in] node Node action was performed on - * \param[in] xml_op XML of operation in CIB status (if available) - * \param[in] calc_secure Whether to calculate secure digest -@@ -331,29 +349,40 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, const char *key, - * \return Pointer to node's digest cache entry - */ - static op_digest_cache_t * --rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, -+rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms, - pe_node_t *node, xmlNode *xml_op, bool calc_secure, - pe_working_set_t *data_set) - { - op_digest_cache_t *data = NULL; -+ char *key = pcmk__op_key(rsc->id, task, interval_ms); - - data = g_hash_table_lookup(node->details->digest_cache, key); - if (data == NULL) { -- data = pe__calculate_digests(rsc, task, key, node, xml_op, NULL, -- calc_secure, data_set); -+ data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op, -+ NULL, calc_secure, data_set); - CRM_ASSERT(data != NULL); - g_hash_table_insert(node->details->digest_cache, strdup(key), data); - } -+ free(key); - return data; - } - -+/*! -+ * \internal -+ * \brief Calculate operation digests and compare against an XML history entry -+ * -+ * \param[in] rsc Resource to check -+ * \param[in] xml_op Resource history XML -+ * \param[in] node Node to use for digest calculation -+ * \param[in] data_set Cluster working set -+ * -+ * \return Pointer to node's digest cache entry, with comparison result set -+ */ - op_digest_cache_t * - rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, - pe_working_set_t * data_set) - { - op_digest_cache_t *data = NULL; -- -- char *key = NULL; - guint interval_ms = 0; - - const char *op_version; -@@ -368,17 +397,18 @@ rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, - digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); - - crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); -- key = pcmk__op_key(rsc->id, task, interval_ms); -- data = rsc_action_digest(rsc, task, key, node, xml_op, -+ data = rsc_action_digest(rsc, task, interval_ms, node, xml_op, - pcmk_is_set(data_set->flags, pe_flag_sanitized), - data_set); - - data->rc = RSC_DIGEST_MATCH; - if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { -- pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s", -- key, node->details->uname, -- crm_str(digest_restart), data->digest_restart_calc, -- op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); -+ pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s " -+ "changed: hash was %s vs. now %s (restart:%s) %s", -+ interval_ms, task, rsc->id, node->details->uname, -+ crm_str(digest_restart), data->digest_restart_calc, -+ op_version, -+ crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); - data->rc = RSC_DIGEST_RESTART; - - } else if (digest_all == NULL) { -@@ -386,15 +416,15 @@ rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, - data->rc = RSC_DIGEST_UNKNOWN; - - } else if (strcmp(digest_all, data->digest_all_calc) != 0) { -- pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s", -- key, node->details->uname, -- crm_str(digest_all), data->digest_all_calc, -- (interval_ms > 0)? "reschedule" : "reload", -- op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); -+ pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s " -+ "changed: hash was %s vs. now %s (%s:%s) %s", -+ interval_ms, task, rsc->id, node->details->uname, -+ crm_str(digest_all), data->digest_all_calc, -+ (interval_ms > 0)? "reschedule" : "reload", -+ op_version, -+ crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); - data->rc = RSC_DIGEST_ALL; - } -- -- free(key); - return data; - } - -@@ -483,12 +513,9 @@ pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent, - const char *node_summary = NULL; - - // Calculate device's current parameter digests -- char *key = pcmk__op_key(rsc->id, STONITH_DIGEST_TASK, 0); -- op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, -+ op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U, - node, NULL, TRUE, data_set); - -- free(key); -- - // Check whether node has special unfencing summary node attribute - node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL); - if (node_summary == NULL) { --- -1.8.3.1 - - -From bf20b166b6e7dcf87edd398f2edfc384cb640886 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 23 Nov 2020 17:48:34 -0600 -Subject: [PATCH 09/12] Low: scheduler: don't include timeout in secure digests - -... to match what the controller does ---- - lib/pengine/pe_digest.c | 11 +++++++++++ - 1 file changed, 11 insertions(+) - -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index b608f22..f55c896 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -247,6 +247,17 @@ calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, - NULL); - } - pcmk__filter_op_for_digest(data->params_secure); -+ -+ /* CRM_meta_timeout *should* be part of a digest for recurring operations. -+ * However, currently the controller does not add timeout to secure digests, -+ * because it only includes parameters declared by the resource agent. -+ * Remove any timeout that made it this far, to match. -+ * -+ * @TODO Update the controller to add the timeout (which will require -+ * bumping the feature set and checking that here). -+ */ -+ xml_remove_prop(data->params_secure, CRM_META "_" XML_ATTR_TIMEOUT); -+ - data->digest_secure_calc = calculate_operation_digest(data->params_secure, - op_version); - } --- -1.8.3.1 - - -From a956e32a536942b0fc1f2f058e441b3faf2abdd3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 7 Jan 2021 12:08:56 -0600 -Subject: [PATCH 10/12] Low: scheduler: treat NULL and empty string the same in - literal attribute comparisons - -Previously, expand_value_source() returned NULL if the value was the empty -string ("") only when value-source was "param" or "meta". If value-source was -literal, it would return the empty string. - -This behavior shouldn't depend on value-source, so it now returns NULL when a -literal value is the empty string. - -This could change the behavior for "defined"/"not_defined" checks, and -comparisons against another NULL or empty string value (NULL compares less than -empty strings). But the consistency seems worth it. - -(Another question not addressed here is whether NULL and empty string should -compare as equal.) ---- - lib/pengine/rules.c | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - -diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c -index aa5d6ab..d69f449 100644 ---- a/lib/pengine/rules.c -+++ b/lib/pengine/rules.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -1039,7 +1039,10 @@ expand_value_source(const char *value, const char *value_source, - { - GHashTable *table = NULL; - -- if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) { -+ if (pcmk__str_empty(value)) { -+ return NULL; // value_source is irrelevant -+ -+ } else if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) { - table = match_data->params; - - } else if (pcmk__str_eq(value_source, "meta", pcmk__str_casei)) { -@@ -1049,7 +1052,7 @@ expand_value_source(const char *value, const char *value_source, - return value; - } - -- if ((table == NULL) || pcmk__str_empty(value)) { -+ if (table == NULL) { - return NULL; - } - return (const char *) g_hash_table_lookup(table, value); --- -1.8.3.1 - - -From 2d7cd78340b47045b5987002c9b2d221ccb01ee9 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 7 Jan 2021 13:01:29 -0600 -Subject: [PATCH 11/12] Refactor: libcrmcommon: bail early if ACLs block - attribute removal - -... for efficiency and fewer trace messages ---- - lib/common/xml.c | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - -diff --git a/lib/common/xml.c b/lib/common/xml.c -index 869ed51..8b71911 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -637,10 +637,13 @@ pcmk__xe_remove_matching_attrs(xmlNode *element, - next = a->next; // Grab now because attribute might get removed - if ((match == NULL) || match(a, user_data)) { - if (!pcmk__check_acl(element, NULL, xpf_acl_write)) { -- crm_trace("ACLs prevent removal of %s attribute from %s element", -+ crm_trace("ACLs prevent removal of attributes (%s and " -+ "possibly others) from %s element", - (const char *) a->name, (const char *) element->name); -+ return; // ACLs apply to element, not particular attributes -+ } - -- } else if (pcmk__tracking_xml_changes(element, false)) { -+ if (pcmk__tracking_xml_changes(element, false)) { - // Leave (marked for removal) until after diff is calculated - set_parent_flag(element, xpf_dirty); - pcmk__set_xml_flags((xml_private_t *) a->_private, xpf_deleted); --- -1.8.3.1 - - -From 459f7a58424b05b7c586906d904129a6408d6206 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 7 Jan 2021 13:30:40 -0600 -Subject: [PATCH 12/12] Refactor: libcrmcommon: drop a constant - -It was only used once, and the code is actually more readable without it ---- - lib/common/operations.c | 26 +++++++++++++------------- - 1 file changed, 13 insertions(+), 13 deletions(-) - -diff --git a/lib/common/operations.c b/lib/common/operations.c -index 421aaac..420f078 100644 ---- a/lib/common/operations.c -+++ b/lib/common/operations.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -362,22 +362,22 @@ decode_transition_key(const char *key, char **uuid, int *transition_id, int *act - return TRUE; - } - --// String length of CRM_META"_" --#define CRM_META_LEN sizeof(CRM_META) -- - // Return true if a is an attribute that should be filtered - static bool - should_filter_for_digest(xmlAttrPtr a, void *user_data) - { -- return (strncmp((const char *) a->name, CRM_META "_", CRM_META_LEN) == 0) -- || pcmk__str_any_of((const char *) a->name, -- XML_ATTR_ID, -- XML_ATTR_CRM_VERSION, -- XML_LRM_ATTR_OP_DIGEST, -- XML_LRM_ATTR_TARGET, -- XML_LRM_ATTR_TARGET_UUID, -- "pcmk_external_ip", -- NULL); -+ if (strncmp((const char *) a->name, CRM_META "_", -+ sizeof(CRM_META " ") - 1) == 0) { -+ return true; -+ } -+ return pcmk__str_any_of((const char *) a->name, -+ XML_ATTR_ID, -+ XML_ATTR_CRM_VERSION, -+ XML_LRM_ATTR_OP_DIGEST, -+ XML_LRM_ATTR_TARGET, -+ XML_LRM_ATTR_TARGET_UUID, -+ "pcmk_external_ip", -+ NULL); - } - - /*! --- -1.8.3.1 - diff --git a/SOURCES/021-rhbz1872376.patch b/SOURCES/021-rhbz1872376.patch deleted file mode 100644 index 1395975..0000000 --- a/SOURCES/021-rhbz1872376.patch +++ /dev/null @@ -1,2082 +0,0 @@ -From 4521f547457af1201442e072d426fdf89de1150e Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 9 Nov 2020 18:09:54 -0600 -Subject: [PATCH 01/13] API: libpe_status: add pe_rsc_params() and new - pe_resource_t member - -Instead of a single parameters table for a resource, this allows the -possibility of a parameter table per node for the resource, since -rule-based parameters may evaluate differently on different nodes. ---- - include/crm/pengine/complex.h | 5 +++- - include/crm/pengine/pe_types.h | 8 +++++- - lib/pengine/complex.c | 61 +++++++++++++++++++++++++++++++++++++++++- - 3 files changed, 71 insertions(+), 3 deletions(-) - -diff --git a/include/crm/pengine/complex.h b/include/crm/pengine/complex.h -index effa44f..1d010f4 100644 ---- a/include/crm/pengine/complex.h -+++ b/include/crm/pengine/complex.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2019 the Pacemaker project contributors -+ * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -19,6 +19,9 @@ extern "C" { - #include // pe_node_t, pe_resource_t, etc. - - extern resource_object_functions_t resource_class_functions[]; -+ -+GHashTable *pe_rsc_params(pe_resource_t *rsc, pe_node_t *node, -+ pe_working_set_t *data_set); - void get_meta_attributes(GHashTable * meta_hash, pe_resource_t *rsc, - pe_node_t *node, pe_working_set_t *data_set); - void get_rsc_attributes(GHashTable *meta_hash, pe_resource_t *rsc, -diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h -index 59d5ce8..5529714 100644 ---- a/include/crm/pengine/pe_types.h -+++ b/include/crm/pengine/pe_types.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -371,6 +371,12 @@ struct pe_resource_s { - pe_node_t *lock_node; // Resource is shutdown-locked to this node - time_t lock_time; // When shutdown lock started - -+ /* Resource parameters may have node-attribute-based rules, which means the -+ * values can vary by node. This table is a cache of parameter name/value -+ * tables for each node (as needed). Use pe_rsc_params() to get the table -+ * for a given node. -+ */ -+ GHashTable *parameter_cache; // Key = node name, value = parameters table - #if ENABLE_VERSIONED_ATTRS - xmlNode *versioned_parameters; - #endif -diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c -index 5f484ad..7037ca1 100644 ---- a/lib/pengine/complex.c -+++ b/lib/pengine/complex.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -435,6 +435,62 @@ detect_promotable(pe_resource_t *rsc) - return FALSE; - } - -+static void -+free_params_table(gpointer data) -+{ -+ g_hash_table_destroy((GHashTable *) data); -+} -+ -+/*! -+ * \brief Get a table of resource parameters -+ * -+ * \param[in] rsc Resource to query -+ * \param[in] node Node for evaluating rules (NULL for defaults) -+ * \param[in] data_set Cluster working set -+ * -+ * \return Hash table containing resource parameter names and values -+ * (or NULL if \p rsc or \p data_set is NULL) -+ * \note The returned table will be destroyed when the resource is freed, so -+ * callers should not destroy it. -+ */ -+GHashTable * -+pe_rsc_params(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) -+{ -+ GHashTable *params_on_node = NULL; -+ -+ /* A NULL node is used to request the resource's default parameters -+ * (not evaluated for node), but we always want something non-NULL -+ * as a hash table key. -+ */ -+ const char *node_name = ""; -+ -+ // Sanity check -+ if ((rsc == NULL) || (data_set == NULL)) { -+ return NULL; -+ } -+ if ((node != NULL) && (node->details->uname != NULL)) { -+ node_name = node->details->uname; -+ } -+ -+ // Find the parameter table for given node -+ if (rsc->parameter_cache == NULL) { -+ rsc->parameter_cache = g_hash_table_new_full(crm_strcase_hash, -+ crm_strcase_equal, free, -+ free_params_table); -+ } else { -+ params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name); -+ } -+ -+ // If none exists yet, create one with parameters evaluated for node -+ if (params_on_node == NULL) { -+ params_on_node = crm_str_table_new(); -+ get_rsc_attributes(params_on_node, rsc, node, data_set); -+ g_hash_table_insert(rsc->parameter_cache, strdup(node_name), -+ params_on_node); -+ } -+ return params_on_node; -+} -+ - gboolean - common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, - pe_resource_t * parent, pe_working_set_t * data_set) -@@ -869,6 +925,9 @@ common_free(pe_resource_t * rsc) - if (rsc->parameters != NULL) { - g_hash_table_destroy(rsc->parameters); - } -+ if (rsc->parameter_cache != NULL) { -+ g_hash_table_destroy(rsc->parameter_cache); -+ } - #if ENABLE_VERSIONED_ATTRS - if (rsc->versioned_parameters != NULL) { - free_xml(rsc->versioned_parameters); --- -1.8.3.1 - - -From d5075f64c5fff1f037ee8dbca2ad6268bce15681 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 11 Nov 2020 13:03:50 -0600 -Subject: [PATCH 02/13] API: libpe_status: ignore ->parameter() resource object - function's create argument - -This uses the new resource parameter function to implement ->parameter(). -That means the parameter table will always be created if not already existent. - -->parameter() is not called internally. ---- - lib/pengine/native.c | 28 ++++------------------------ - 1 file changed, 4 insertions(+), 24 deletions(-) - -diff --git a/lib/pengine/native.c b/lib/pengine/native.c -index 193be17..95c3da9 100644 ---- a/lib/pengine/native.c -+++ b/lib/pengine/native.c -@@ -312,48 +312,28 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node, - return NULL; - } - -+// create is ignored - char * - native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name, - pe_working_set_t * data_set) - { - char *value_copy = NULL; - const char *value = NULL; -- GHashTable *hash = NULL; -- GHashTable *local_hash = NULL; -+ GHashTable *params = NULL; - - CRM_CHECK(rsc != NULL, return NULL); - CRM_CHECK(name != NULL && strlen(name) != 0, return NULL); - - pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id); -- -- if (create || g_hash_table_size(rsc->parameters) == 0) { -- if (node != NULL) { -- pe_rsc_trace(rsc, "Creating hash with node %s", node->details->uname); -- } else { -- pe_rsc_trace(rsc, "Creating default hash"); -- } -- -- local_hash = crm_str_table_new(); -- -- get_rsc_attributes(local_hash, rsc, node, data_set); -- -- hash = local_hash; -- } else { -- hash = rsc->parameters; -- } -- -- value = g_hash_table_lookup(hash, name); -+ params = pe_rsc_params(rsc, node, data_set); -+ value = g_hash_table_lookup(params, name); - if (value == NULL) { - /* try meta attributes instead */ - value = g_hash_table_lookup(rsc->meta, name); - } -- - if (value != NULL) { - value_copy = strdup(value); - } -- if (local_hash != NULL) { -- g_hash_table_destroy(local_hash); -- } - return value_copy; - } - --- -1.8.3.1 - - -From 7089c19d1a1a79dd353ade0002ba6ed3321145ca Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 9 Nov 2020 18:15:56 -0600 -Subject: [PATCH 03/13] Refactor: fencer: use new resource parameters function - ---- - daemons/fenced/pacemaker-fenced.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c -index 69c29a7..5390d66 100644 ---- a/daemons/fenced/pacemaker-fenced.c -+++ b/daemons/fenced/pacemaker-fenced.c -@@ -643,6 +643,7 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - /* Our node is allowed, so update the device information */ - int rc; - xmlNode *data; -+ GHashTable *rsc_params = NULL; - GHashTableIter gIter; - stonith_key_value_t *params = NULL; - -@@ -651,12 +652,12 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - const char *rsc_provides = NULL; - - crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight); -- get_rsc_attributes(rsc->parameters, rsc, node, data_set); -+ rsc_params = pe_rsc_params(rsc, node, data_set); - get_meta_attributes(rsc->meta, rsc, node, data_set); - - rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES); - -- g_hash_table_iter_init(&gIter, rsc->parameters); -+ g_hash_table_iter_init(&gIter, rsc_params); - while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) { - if (!name || !value) { - continue; --- -1.8.3.1 - - -From ae0a2f26891b30a7bcf09467dac461a17d071cd9 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 9 Nov 2020 18:35:08 -0600 -Subject: [PATCH 04/13] Refactor: scheduler: use new resource parameter - function when getting fence action timeout - -This means that the fence device's active node is used to evaluate the -parameters, which will be of use if we ever support rules in fence device -configuration. ---- - lib/pengine/utils.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index b07afbe..831f890 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -1099,12 +1099,12 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai - */ - if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard), - pcmk_ra_cap_fence_params) -- && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) -- || is_probe) -- && action->rsc->parameters) { -+ && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) -+ || is_probe)) { - -- value = g_hash_table_lookup(action->rsc->parameters, -- "pcmk_monitor_timeout"); -+ GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set); -+ -+ value = g_hash_table_lookup(params, "pcmk_monitor_timeout"); - - if (value) { - crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', " --- -1.8.3.1 - - -From 25cf7f5e0f6e3a22000fcaa4f4492fe1c086252b Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 13 Nov 2020 09:27:43 -0600 -Subject: [PATCH 05/13] Refactor: scheduler: use new resource parameter - function when replacing bundle #uname - ---- - include/crm/pengine/internal.h | 8 +++++--- - lib/pacemaker/pcmk_sched_allocate.c | 2 +- - lib/pacemaker/pcmk_sched_bundle.c | 14 ++++++++++++-- - lib/pengine/bundle.c | 12 ++++++++---- - lib/pengine/pe_digest.c | 2 +- - lib/pengine/unpack.c | 2 +- - 6 files changed, 28 insertions(+), 12 deletions(-) - -diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h -index 1e5aee1..9f4e28a 100644 ---- a/include/crm/pengine/internal.h -+++ b/include/crm/pengine/internal.h -@@ -545,9 +545,11 @@ int pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, const char - int pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options); - pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle, - const pe_node_t *node); --bool pe__bundle_needs_remote_name(pe_resource_t *rsc); --const char *pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, -- const char *field); -+bool pe__bundle_needs_remote_name(pe_resource_t *rsc, -+ pe_working_set_t *data_set); -+const char *pe__add_bundle_remote_name(pe_resource_t *rsc, -+ pe_working_set_t *data_set, -+ xmlNode *xml, const char *field); - const char *pe_node_attribute_calculated(const pe_node_t *node, - const char *name, - const pe_resource_t *rsc); -diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c -index a0fb5ab..22f284a 100644 ---- a/lib/pacemaker/pcmk_sched_allocate.c -+++ b/lib/pacemaker/pcmk_sched_allocate.c -@@ -470,7 +470,7 @@ check_actions_for(xmlNode * rsc_entry, pe_resource_t * rsc, pe_node_t * node, pe - * has changed, clear any fail count so they can be retried fresh. - */ - -- if (pe__bundle_needs_remote_name(rsc)) { -+ if (pe__bundle_needs_remote_name(rsc, data_set)) { - /* We haven't allocated resources to nodes yet, so if the - * REMOTE_CONTAINER_HACK is used, we may calculate the digest - * based on the literal "#uname" value rather than the properly -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index ac9219c..4f41b70 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -911,7 +911,7 @@ pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t * data_set) - - CRM_ASSERT(replica); - if (replica->remote && replica->container -- && pe__bundle_needs_remote_name(replica->remote)) { -+ && pe__bundle_needs_remote_name(replica->remote, data_set)) { - - /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that - * run pacemaker-remoted inside, without needing a separate IP for -@@ -923,12 +923,22 @@ pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t * data_set) - replica->remote->xml, LOG_ERR); - const char *calculated_addr = NULL; - -+ // Replace the value in replica->remote->xml (if appropriate) - calculated_addr = pe__add_bundle_remote_name(replica->remote, -+ data_set, - nvpair, "value"); - if (calculated_addr) { -+ /* Since this is for the bundle as a resource, and not any -+ * particular action, replace the value in the default -+ * parameters (not evaluated for node). action2xml() will grab -+ * it from there to replace it in node-evaluated parameters. -+ */ -+ GHashTable *params = pe_rsc_params(replica->remote, -+ NULL, data_set); -+ - crm_trace("Set address for bundle connection %s to bundle host %s", - replica->remote->id, calculated_addr); -- g_hash_table_replace(replica->remote->parameters, -+ g_hash_table_replace(params, - strdup(XML_RSC_ATTR_REMOTE_RA_ADDR), - strdup(calculated_addr)); - } else { -diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c -index 7b326e9..615a35a 100644 ---- a/lib/pengine/bundle.c -+++ b/lib/pengine/bundle.c -@@ -948,29 +948,33 @@ replica_for_remote(pe_resource_t *remote) - } - - bool --pe__bundle_needs_remote_name(pe_resource_t *rsc) -+pe__bundle_needs_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set) - { - const char *value; -+ GHashTable *params = NULL; - - if (rsc == NULL) { - return false; - } - -- value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR); -+ // Use NULL node since pcmk__bundle_expand() uses that to set value -+ params = pe_rsc_params(rsc, NULL, data_set); -+ value = g_hash_table_lookup(params, XML_RSC_ATTR_REMOTE_RA_ADDR); - - return pcmk__str_eq(value, "#uname", pcmk__str_casei) - && xml_contains_remote_node(rsc->xml); - } - - const char * --pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field) -+pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set, -+ xmlNode *xml, const char *field) - { - // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside - - pe_node_t *node = NULL; - pe__bundle_replica_t *replica = NULL; - -- if (!pe__bundle_needs_remote_name(rsc)) { -+ if (!pe__bundle_needs_remote_name(rsc, data_set)) { - return NULL; - } - -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index f55c896..f6e41e9 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -147,7 +147,7 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - /* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers - * that themselves are Pacemaker Remote nodes - */ -- if (pe__add_bundle_remote_name(rsc, data->params_all, -+ if (pe__add_bundle_remote_name(rsc, data_set, data->params_all, - XML_RSC_ATTR_REMOTE_RA_ADDR)) { - crm_trace("Set address for bundle connection %s (on %s)", - rsc->id, node->details->uname); -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index a15bb92..281bc88 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -3182,7 +3182,7 @@ should_clear_for_param_change(xmlNode *xml_op, const char *task, - { - if (!strcmp(task, "start") || !strcmp(task, "monitor")) { - -- if (pe__bundle_needs_remote_name(rsc)) { -+ if (pe__bundle_needs_remote_name(rsc, data_set)) { - /* We haven't allocated resources yet, so we can't reliably - * substitute addr parameters for the REMOTE_CONTAINER_HACK. - * When that's needed, defer the check until later. --- -1.8.3.1 - - -From 992b2edfe573a2bfd510090b37a8a1b355ad3c44 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 10 Nov 2020 15:32:29 -0600 -Subject: [PATCH 06/13] Refactor: scheduler: use new resource parameter - function when creating graph - ---- - lib/pacemaker/pcmk_sched_graph.c | 30 +++++++++++++++++++++++++----- - 1 file changed, 25 insertions(+), 5 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c -index c012d23..f0d1f47 100644 ---- a/lib/pacemaker/pcmk_sched_graph.c -+++ b/lib/pacemaker/pcmk_sched_graph.c -@@ -1210,11 +1210,29 @@ action2xml(pe_action_t * action, gboolean as_input, pe_working_set_t *data_set) - - g_hash_table_foreach(action->extra, hash2field, args_xml); - if (action->rsc != NULL && action->node) { -- GHashTable *p = crm_str_table_new(); -+ // Get the resource instance attributes, evaluated properly for node -+ GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set); - -- get_rsc_attributes(p, action->rsc, action->node, data_set); -- g_hash_table_foreach(p, hash2smartfield, args_xml); -- g_hash_table_destroy(p); -+ /* REMOTE_CONTAINER_HACK: If this is a remote connection resource with -+ * addr="#uname", pull the actual value from the parameters evaluated -+ * without a node (which was put there earlier in stage8() when the -+ * bundle's expand() method was called). -+ */ -+ const char *remote_addr = g_hash_table_lookup(params, -+ XML_RSC_ATTR_REMOTE_RA_ADDR); -+ -+ if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) { -+ GHashTable *base = pe_rsc_params(action->rsc, NULL, data_set); -+ -+ remote_addr = g_hash_table_lookup(base, -+ XML_RSC_ATTR_REMOTE_RA_ADDR); -+ if (remote_addr != NULL) { -+ g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR), -+ strdup(remote_addr)); -+ } -+ } -+ -+ g_hash_table_foreach(params, hash2smartfield, args_xml); - - #if ENABLE_VERSIONED_ATTRS - { -@@ -1230,7 +1248,9 @@ action2xml(pe_action_t * action, gboolean as_input, pe_working_set_t *data_set) - #endif - - } else if(action->rsc && action->rsc->variant <= pe_native) { -- g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml); -+ GHashTable *params = pe_rsc_params(action->rsc, NULL, data_set); -+ -+ g_hash_table_foreach(params, hash2smartfield, args_xml); - - #if ENABLE_VERSIONED_ATTRS - if (xml_has_children(action->rsc->versioned_parameters)) { --- -1.8.3.1 - - -From 94316455ceead6f466e901d38e421f6116cf22d3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 9 Nov 2020 16:04:23 -0600 -Subject: [PATCH 07/13] Low: scheduler: calculate secure digest properly for - node attribute rules - -6830621 corrected secure digest calculation in most cases, but did not work -properly when non-sensitive parameters depended on node attribute-based rules -(since it used rsc->parameters, which is not evaluated for node). - -This fixes it by using the new resource parameters function (which is the -equivalent of what calculate_main_digest() already was doing, so it is now -exposed earlier for use by both digest functions). ---- - lib/pengine/pe_digest.c | 28 +++++++++++++--------------- - 1 file changed, 13 insertions(+), 15 deletions(-) - -diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c -index f6e41e9..2066a53 100644 ---- a/lib/pengine/pe_digest.c -+++ b/lib/pengine/pe_digest.c -@@ -124,6 +124,7 @@ append_all_versioned_params(pe_resource_t *rsc, pe_node_t *node, - * \param[out] data Digest cache entry to modify - * \param[in] rsc Resource that action was for - * \param[in] node Node action was performed on -+ * \param[in] params Resource parameters evaluated for node - * \param[in] task Name of action performed - * \param[in,out] interval_ms Action's interval (will be reset if in overrides) - * \param[in] xml_op XML of operation in CIB status (if available) -@@ -133,14 +134,12 @@ append_all_versioned_params(pe_resource_t *rsc, pe_node_t *node, - */ - static void - calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, -- pe_node_t *node, const char *task, guint *interval_ms, -+ pe_node_t *node, GHashTable *params, -+ const char *task, guint *interval_ms, - xmlNode *xml_op, const char *op_version, - GHashTable *overrides, pe_working_set_t *data_set) - { - pe_action_t *action = NULL; -- GHashTable *local_rsc_params = crm_str_table_new(); -- -- get_rsc_attributes(local_rsc_params, rsc, node, data_set); - - data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); - -@@ -174,7 +173,7 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - if (overrides != NULL) { - g_hash_table_foreach(overrides, hash2field, data->params_all); - } -- g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); -+ g_hash_table_foreach(params, hash2field, data->params_all); - g_hash_table_foreach(action->extra, hash2field, data->params_all); - g_hash_table_foreach(action->meta, hash2metafield, data->params_all); - -@@ -184,7 +183,6 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc, - - pcmk__filter_op_for_digest(data->params_all); - -- g_hash_table_destroy(local_rsc_params); - pe_free_action(action); - - data->digest_all_calc = calculate_operation_digest(data->params_all, -@@ -204,14 +202,15 @@ is_fence_param(xmlAttrPtr attr, void *user_data) - * - * \param[out] data Digest cache entry to modify - * \param[in] rsc Resource that action was for -+ * \param[in] params Resource parameters evaluated for node - * \param[in] xml_op XML of operation in CIB status (if available) - * \param[in] op_version CRM feature set to use for digest calculation - * \param[in] overrides Key/value hash table to override resource parameters - */ - static void - calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, -- xmlNode *xml_op, const char *op_version, -- GHashTable *overrides) -+ GHashTable *params, xmlNode *xml_op, -+ const char *op_version, GHashTable *overrides) - { - const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); - const char *secure_list = NULL; -@@ -222,16 +221,12 @@ calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc, - secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); - } - -- /* The controller doesn't create a digest of *all* non-sensitive -- * parameters, only those listed in resource agent meta-data. The -- * equivalent here is rsc->parameters. -- */ - data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS); - if (overrides != NULL) { - g_hash_table_foreach(overrides, hash2field, data->params_secure); - } - -- g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure); -+ g_hash_table_foreach(params, hash2field, data->params_secure); - if (secure_list != NULL) { - pcmk__xe_remove_matching_attrs(data->params_secure, attr_not_in_string, - (void *) secure_list); -@@ -328,6 +323,7 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms, - { - op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t)); - const char *op_version = CRM_FEATURE_SET; -+ GHashTable *params = NULL; - - if (data == NULL) { - return NULL; -@@ -336,10 +332,12 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms, - op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); - } - -- calculate_main_digest(data, rsc, node, task, interval_ms, xml_op, -+ params = pe_rsc_params(rsc, node, data_set); -+ calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op, - op_version, overrides, data_set); - if (calc_secure) { -- calculate_secure_digest(data, rsc, xml_op, op_version, overrides); -+ calculate_secure_digest(data, rsc, params, xml_op, op_version, -+ overrides); - } - calculate_restart_digest(data, xml_op, op_version); - return data; --- -1.8.3.1 - - -From f546d0125b6f39fae744f43dad752089648e3f1f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 10 Nov 2020 16:01:32 -0600 -Subject: [PATCH 08/13] Fix: tools: respect rules when showing node attributes - in crm_mon - -Previously, crm_mon checked rsc->parameters for ocf:pacemaker:ping parameter -values. However that is not evaluated for node attribute rules. It also called -get_rsc_attributes() for all resources unnecessarily, since that's part of -common_unpack(). - -Now, use pe_rsc_params() to get the right values per node. ---- - tools/crm_mon.h | 1 - - tools/crm_mon_print.c | 28 ++++++++++++++-------------- - tools/crm_mon_runtime.c | 15 +-------------- - 3 files changed, 15 insertions(+), 29 deletions(-) - -diff --git a/tools/crm_mon.h b/tools/crm_mon.h -index 143e8d8..f746507 100644 ---- a/tools/crm_mon.h -+++ b/tools/crm_mon.h -@@ -109,7 +109,6 @@ int print_html_status(pcmk__output_t *out, pe_working_set_t *data_set, - - GList *append_attr_list(GList *attr_list, char *name); - void blank_screen(void); --void crm_mon_get_parameters(pe_resource_t *rsc, pe_working_set_t *data_set); - unsigned int get_resource_display_options(unsigned int mon_ops); - - void crm_mon_register_messages(pcmk__output_t *out); -diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c -index cc3efb0..8ae11bf 100644 ---- a/tools/crm_mon_print.c -+++ b/tools/crm_mon_print.c -@@ -38,7 +38,8 @@ static int print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, - static int print_node_history(pcmk__output_t *out, pe_working_set_t *data_set, - pe_node_t *node, xmlNode *node_state, gboolean operations, - unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc); --static gboolean add_extra_info(pcmk__output_t *out, pe_node_t * node, GListPtr rsc_list, -+static gboolean add_extra_info(pcmk__output_t *out, pe_node_t *node, -+ GListPtr rsc_list, pe_working_set_t *data_set, - const char *attrname, int *expected_score); - static void print_node_attribute(gpointer name, gpointer user_data); - static int print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set, -@@ -330,7 +331,8 @@ print_node_history(pcmk__output_t *out, pe_working_set_t *data_set, - */ - static gboolean - add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list, -- const char *attrname, int *expected_score) -+ pe_working_set_t *data_set, const char *attrname, -+ int *expected_score) - { - GListPtr gIter = NULL; - -@@ -338,9 +340,11 @@ add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list, - pe_resource_t *rsc = (pe_resource_t *) gIter->data; - const char *type = g_hash_table_lookup(rsc->meta, "type"); - const char *name = NULL; -+ GHashTable *params = NULL; - - if (rsc->children != NULL) { -- if (add_extra_info(out, node, rsc->children, attrname, expected_score)) { -+ if (add_extra_info(out, node, rsc->children, data_set, attrname, -+ expected_score)) { - return TRUE; - } - } -@@ -349,7 +353,8 @@ add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list, - continue; - } - -- name = g_hash_table_lookup(rsc->parameters, "name"); -+ params = pe_rsc_params(rsc, node, data_set); -+ name = g_hash_table_lookup(params, "name"); - - if (name == NULL) { - name = "pingd"; -@@ -359,8 +364,8 @@ add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list, - if (pcmk__str_eq(name, attrname, pcmk__str_casei)) { - int host_list_num = 0; - /* int value = crm_parse_int(attrvalue, "0"); */ -- const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list"); -- const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier"); -+ const char *hosts = g_hash_table_lookup(params, "host_list"); -+ const char *multiplier = g_hash_table_lookup(params, "multiplier"); - - if (hosts) { - char **host_list = g_strsplit(hosts, " ", 0); -@@ -381,6 +386,7 @@ add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list, - struct mon_attr_data { - pcmk__output_t *out; - pe_node_t *node; -+ pe_working_set_t *data_set; - }; - - static void -@@ -394,7 +400,7 @@ print_node_attribute(gpointer name, gpointer user_data) - value = pe_node_attribute_raw(data->node, name); - - add_extra = add_extra_info(data->out, data->node, data->node->details->running_rsc, -- name, &expected_score); -+ data->data_set, name, &expected_score); - - /* Print attribute name and value */ - data->out->message(data->out, "node-attribute", name, value, add_extra, -@@ -547,19 +553,13 @@ print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set, - GListPtr gIter = NULL; - int rc = pcmk_rc_no_output; - -- /* Unpack all resource parameters (it would be more efficient to do this -- * only when needed for the first time in add_extra_info()) -- */ -- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { -- crm_mon_get_parameters(gIter->data, data_set); -- } -- - /* Display each node's attributes */ - for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { - struct mon_attr_data data; - - data.out = out; - data.node = (pe_node_t *) gIter->data; -+ data.data_set = data_set; - - if (data.node && data.node->details && data.node->details->online) { - GList *attr_list = NULL; -diff --git a/tools/crm_mon_runtime.c b/tools/crm_mon_runtime.c -index ce31559..43152ce 100644 ---- a/tools/crm_mon_runtime.c -+++ b/tools/crm_mon_runtime.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2019 the Pacemaker project contributors -+ * Copyright 2019-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -66,19 +66,6 @@ append_attr_list(GList *attr_list, char *name) - return g_list_insert_sorted(attr_list, name, compare_attribute); - } - --void --crm_mon_get_parameters(pe_resource_t *rsc, pe_working_set_t * data_set) --{ -- get_rsc_attributes(rsc->parameters, rsc, NULL, data_set); -- if(rsc->children) { -- GListPtr gIter = NULL; -- -- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { -- crm_mon_get_parameters(gIter->data, data_set); -- } -- } --} -- - /*! - * \internal - * \brief Return resource display options corresponding to command-line choices --- -1.8.3.1 - - -From 397ad868d464a0ffd14ed527a97010e141ac60b3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 11 Nov 2020 12:59:34 -0600 -Subject: [PATCH 09/13] Fix: scheduler: multiple issues with value-source in - location rules - -Delay creating rule match data for location constraints until the node is -known, and evaluate the rules using that node (using the new resource -parameter function). This fixes multiple issues: - -* Previously, match data using resource parameters was created only when - rsc-pattern was specified and a resource positively matched. This meant that - a node attribute rule expression with a value-source of "param" or "meta" - would only work in that case, and not when rsc was specified instead, - or when rsc-pattern was specified with an inverted match ("!pattern"), - or when a rule was used in a constraint with a resource set. - -* Previously, with rsc-pattern, the match data used the resource's default - parameters (not evaluated by node). This meant that a location rule that used - a node attribute expression with a value-source of "param" could not be - used with a resource parameter that itself was determined by a rule - using a node attribute expression. ---- - lib/pacemaker/pcmk_sched_constraints.c | 40 ++++++++++++++++++---------------- - 1 file changed, 21 insertions(+), 19 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c -index 0029ad7..92b9740 100644 ---- a/lib/pacemaker/pcmk_sched_constraints.c -+++ b/lib/pacemaker/pcmk_sched_constraints.c -@@ -47,7 +47,7 @@ static pe__location_t *generate_location_rule(pe_resource_t *rsc, - const char *discovery, - crm_time_t *next_change, - pe_working_set_t *data_set, -- pe_match_data_t *match_data); -+ pe_re_match_data_t *match_data); - static void unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set); - static void unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set); - -@@ -714,7 +714,7 @@ tag_to_set(xmlNode * xml_obj, xmlNode ** rsc_set, const char * attr, - static void unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc_lh, - const char *role, const char *score, - pe_working_set_t *data_set, -- pe_match_data_t *match_data); -+ pe_re_match_data_t *match_data); - - static void - unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set) -@@ -769,13 +769,9 @@ unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set) - .nregs = nregs, - .pmatch = pmatch - }; -- pe_match_data_t match_data = { -- .re = &re_match_data, -- .params = r->parameters, -- .meta = r->meta, -- }; -+ - crm_debug("'%s' matched '%s' for %s", r->id, value, id); -- unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, &match_data); -+ unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, &re_match_data); - - } else if (invert && (status != 0)) { - crm_debug("'%s' is an inverted match of '%s' for %s", r->id, value, id); -@@ -796,7 +792,7 @@ unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set) - static void - unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc_lh, const char *role, - const char *score, pe_working_set_t *data_set, -- pe_match_data_t *match_data) -+ pe_re_match_data_t *re_match_data) - { - pe__location_t *location = NULL; - const char *id_lh = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); -@@ -836,7 +832,7 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc_lh, const char *role, - empty = FALSE; - crm_trace("Unpacking %s/%s", id, ID(rule_xml)); - generate_location_rule(rsc_lh, rule_xml, discovery, next_change, -- data_set, match_data); -+ data_set, re_match_data); - } - - if (empty) { -@@ -1067,7 +1063,8 @@ get_node_score(const char *rule, const char *score, gboolean raw, pe_node_t * no - static pe__location_t * - generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml, - const char *discovery, crm_time_t *next_change, -- pe_working_set_t *data_set, pe_match_data_t *match_data) -+ pe_working_set_t *data_set, -+ pe_re_match_data_t *re_match_data) - { - const char *rule_id = NULL; - const char *score = NULL; -@@ -1113,14 +1110,14 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml, - return NULL; - } - -- if (match_data && match_data->re && match_data->re->nregs > 0 && match_data->re->pmatch[0].rm_so != -1) { -- if (raw_score == FALSE) { -- char *result = pe_expand_re_matches(score, match_data->re); -+ if ((re_match_data != NULL) && (re_match_data->nregs > 0) -+ && (re_match_data->pmatch[0].rm_so != -1) && !raw_score) { - -- if (result) { -- score = (const char *) result; -- score_allocated = TRUE; -- } -+ char *result = pe_expand_re_matches(score, re_match_data); -+ -+ if (result != NULL) { -+ score = result; -+ score_allocated = TRUE; - } - } - -@@ -1148,9 +1145,14 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml, - for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { - int score_f = 0; - pe_node_t *node = (pe_node_t *) gIter->data; -+ pe_match_data_t match_data = { -+ .re = re_match_data, -+ .params = pe_rsc_params(rsc, node, data_set), -+ .meta = rsc->meta, -+ }; - - accept = pe_test_rule(rule_xml, node->details->attrs, RSC_ROLE_UNKNOWN, -- data_set->now, next_change, match_data); -+ data_set->now, next_change, &match_data); - - crm_trace("Rule %s %s on %s", ID(rule_xml), accept ? "passed" : "failed", - node->details->uname); --- -1.8.3.1 - - -From 40383ae2e2796b1bcbdb86bdd9cdc93c117eb69f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 12 Nov 2020 15:32:47 -0600 -Subject: [PATCH 10/13] Test: scheduler: add regression test for rules using - value-source - ---- - cts/cts-scheduler.in | 1 + - cts/scheduler/value-source.dot | 29 +++ - cts/scheduler/value-source.exp | 200 ++++++++++++++++ - cts/scheduler/value-source.scores | 47 ++++ - cts/scheduler/value-source.summary | 60 +++++ - cts/scheduler/value-source.xml | 463 +++++++++++++++++++++++++++++++++++++ - 6 files changed, 800 insertions(+) - create mode 100644 cts/scheduler/value-source.dot - create mode 100644 cts/scheduler/value-source.exp - create mode 100644 cts/scheduler/value-source.scores - create mode 100644 cts/scheduler/value-source.summary - create mode 100644 cts/scheduler/value-source.xml - -diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in -index 23e6a91..939f8c8 100644 ---- a/cts/cts-scheduler.in -+++ b/cts/cts-scheduler.in -@@ -145,6 +145,7 @@ TESTS = [ - [ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ], - [ "location-date-rules-2", "Use location constraints with effective date-based rules" ], - [ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ], -+ [ "value-source", "Use location constraints with node attribute expressions using value-source" ], - [ "rule-dbl-as-auto-number-match", - "Floating-point rule values default to number comparison: match" ], - [ "rule-dbl-as-auto-number-no-match", -diff --git a/cts/scheduler/value-source.dot b/cts/scheduler/value-source.dot -new file mode 100644 -index 0000000..dfb61e9 ---- /dev/null -+++ b/cts/scheduler/value-source.dot -@@ -0,0 +1,29 @@ -+ digraph "g" { -+"Fencing_monitor_120000 rhel7-1" [ style=bold color="green" fontcolor="black"] -+"Fencing_start_0 rhel7-1" -> "Fencing_monitor_120000 rhel7-1" [ style = bold] -+"Fencing_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"] -+"insane-rsc_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"insane-rsc_start_0 rhel7-4" -> "insane-rsc_monitor_10000 rhel7-4" [ style = bold] -+"insane-rsc_start_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"invert-match_monitor_10000 rhel7-1" [ style=bold color="green" fontcolor="black"] -+"invert-match_start_0 rhel7-1" -> "invert-match_monitor_10000 rhel7-1" [ style = bold] -+"invert-match_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"] -+"meta-rsc_monitor_10000 rhel7-5" [ style=bold color="green" fontcolor="black"] -+"meta-rsc_start_0 rhel7-5" -> "meta-rsc_monitor_10000 rhel7-5" [ style = bold] -+"meta-rsc_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] -+"rsc1_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"rsc1_start_0 rhel7-4" -> "rsc1_monitor_10000 rhel7-4" [ style = bold] -+"rsc1_start_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"rsc2_monitor_10000 rhel7-5" [ style=bold color="green" fontcolor="black"] -+"rsc2_start_0 rhel7-5" -> "rsc2_monitor_10000 rhel7-5" [ style = bold] -+"rsc2_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] -+"set-rsc1_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"set-rsc1_start_0 rhel7-3" -> "set-rsc1_monitor_10000 rhel7-3" [ style = bold] -+"set-rsc1_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"set-rsc2_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"set-rsc2_start_0 rhel7-4" -> "set-rsc2_monitor_10000 rhel7-4" [ style = bold] -+"set-rsc2_start_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"single-rsc_monitor_10000 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"single-rsc_start_0 rhel7-2" -> "single-rsc_monitor_10000 rhel7-2" [ style = bold] -+"single-rsc_start_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/value-source.exp b/cts/scheduler/value-source.exp -new file mode 100644 -index 0000000..4bf469f ---- /dev/null -+++ b/cts/scheduler/value-source.exp -@@ -0,0 +1,200 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/value-source.scores b/cts/scheduler/value-source.scores -new file mode 100644 -index 0000000..1f781c4 ---- /dev/null -+++ b/cts/scheduler/value-source.scores -@@ -0,0 +1,47 @@ -+Allocation scores: -+Using the original execution date of: 2020-11-12 21:28:08Z -+pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 -+pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 -+pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 -+pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 -+pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 -+pcmk__native_allocate: insane-rsc allocation score on rhel7-1: 0 -+pcmk__native_allocate: insane-rsc allocation score on rhel7-2: 0 -+pcmk__native_allocate: insane-rsc allocation score on rhel7-3: 0 -+pcmk__native_allocate: insane-rsc allocation score on rhel7-4: INFINITY -+pcmk__native_allocate: insane-rsc allocation score on rhel7-5: 0 -+pcmk__native_allocate: invert-match allocation score on rhel7-1: INFINITY -+pcmk__native_allocate: invert-match allocation score on rhel7-2: 0 -+pcmk__native_allocate: invert-match allocation score on rhel7-3: 0 -+pcmk__native_allocate: invert-match allocation score on rhel7-4: 0 -+pcmk__native_allocate: invert-match allocation score on rhel7-5: 0 -+pcmk__native_allocate: meta-rsc allocation score on rhel7-1: 0 -+pcmk__native_allocate: meta-rsc allocation score on rhel7-2: 0 -+pcmk__native_allocate: meta-rsc allocation score on rhel7-3: 0 -+pcmk__native_allocate: meta-rsc allocation score on rhel7-4: INFINITY -+pcmk__native_allocate: meta-rsc allocation score on rhel7-5: INFINITY -+pcmk__native_allocate: rsc1 allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc1 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc1 allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc1 allocation score on rhel7-4: INFINITY -+pcmk__native_allocate: rsc1 allocation score on rhel7-5: INFINITY -+pcmk__native_allocate: rsc2 allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc2 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc2 allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc2 allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc2 allocation score on rhel7-5: INFINITY -+pcmk__native_allocate: set-rsc1 allocation score on rhel7-1: 0 -+pcmk__native_allocate: set-rsc1 allocation score on rhel7-2: 0 -+pcmk__native_allocate: set-rsc1 allocation score on rhel7-3: INFINITY -+pcmk__native_allocate: set-rsc1 allocation score on rhel7-4: 0 -+pcmk__native_allocate: set-rsc1 allocation score on rhel7-5: 0 -+pcmk__native_allocate: set-rsc2 allocation score on rhel7-1: 0 -+pcmk__native_allocate: set-rsc2 allocation score on rhel7-2: 0 -+pcmk__native_allocate: set-rsc2 allocation score on rhel7-3: 0 -+pcmk__native_allocate: set-rsc2 allocation score on rhel7-4: INFINITY -+pcmk__native_allocate: set-rsc2 allocation score on rhel7-5: 0 -+pcmk__native_allocate: single-rsc allocation score on rhel7-1: 0 -+pcmk__native_allocate: single-rsc allocation score on rhel7-2: INFINITY -+pcmk__native_allocate: single-rsc allocation score on rhel7-3: 0 -+pcmk__native_allocate: single-rsc allocation score on rhel7-4: 0 -+pcmk__native_allocate: single-rsc allocation score on rhel7-5: 0 -diff --git a/cts/scheduler/value-source.summary b/cts/scheduler/value-source.summary -new file mode 100644 -index 0000000..a9b0392 ---- /dev/null -+++ b/cts/scheduler/value-source.summary -@@ -0,0 +1,60 @@ -+Using the original execution date of: 2020-11-12 21:28:08Z -+ -+Current cluster status: -+Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] -+ -+ Fencing (stonith:fence_xvm): Stopped -+ rsc1 (ocf::pacemaker:Dummy): Stopped -+ rsc2 (ocf::pacemaker:Dummy): Stopped -+ invert-match (ocf::pacemaker:Dummy): Stopped -+ single-rsc (ocf::pacemaker:Dummy): Stopped -+ set-rsc1 (ocf::pacemaker:Dummy): Stopped -+ set-rsc2 (ocf::pacemaker:Dummy): Stopped -+ meta-rsc (ocf::pacemaker:Dummy): Stopped -+ insane-rsc (ocf::pacemaker:Dummy): Stopped -+ -+Transition Summary: -+ * Start Fencing ( rhel7-1 ) -+ * Start rsc1 ( rhel7-4 ) -+ * Start rsc2 ( rhel7-5 ) -+ * Start invert-match ( rhel7-1 ) -+ * Start single-rsc ( rhel7-2 ) -+ * Start set-rsc1 ( rhel7-3 ) -+ * Start set-rsc2 ( rhel7-4 ) -+ * Start meta-rsc ( rhel7-5 ) -+ * Start insane-rsc ( rhel7-4 ) -+ -+Executing cluster transition: -+ * Resource action: Fencing start on rhel7-1 -+ * Resource action: rsc1 start on rhel7-4 -+ * Resource action: rsc2 start on rhel7-5 -+ * Resource action: invert-match start on rhel7-1 -+ * Resource action: single-rsc start on rhel7-2 -+ * Resource action: set-rsc1 start on rhel7-3 -+ * Resource action: set-rsc2 start on rhel7-4 -+ * Resource action: meta-rsc start on rhel7-5 -+ * Resource action: insane-rsc start on rhel7-4 -+ * Resource action: Fencing monitor=120000 on rhel7-1 -+ * Resource action: rsc1 monitor=10000 on rhel7-4 -+ * Resource action: rsc2 monitor=10000 on rhel7-5 -+ * Resource action: invert-match monitor=10000 on rhel7-1 -+ * Resource action: single-rsc monitor=10000 on rhel7-2 -+ * Resource action: set-rsc1 monitor=10000 on rhel7-3 -+ * Resource action: set-rsc2 monitor=10000 on rhel7-4 -+ * Resource action: meta-rsc monitor=10000 on rhel7-5 -+ * Resource action: insane-rsc monitor=10000 on rhel7-4 -+Using the original execution date of: 2020-11-12 21:28:08Z -+ -+Revised cluster status: -+Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] -+ -+ Fencing (stonith:fence_xvm): Started rhel7-1 -+ rsc1 (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc2 (ocf::pacemaker:Dummy): Started rhel7-5 -+ invert-match (ocf::pacemaker:Dummy): Started rhel7-1 -+ single-rsc (ocf::pacemaker:Dummy): Started rhel7-2 -+ set-rsc1 (ocf::pacemaker:Dummy): Started rhel7-3 -+ set-rsc2 (ocf::pacemaker:Dummy): Started rhel7-4 -+ meta-rsc (ocf::pacemaker:Dummy): Started rhel7-5 -+ insane-rsc (ocf::pacemaker:Dummy): Started rhel7-4 -+ -diff --git a/cts/scheduler/value-source.xml b/cts/scheduler/value-source.xml -new file mode 100644 -index 0000000..95e7d57 ---- /dev/null -+++ b/cts/scheduler/value-source.xml -@@ -0,0 +1,463 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - - -From f678813f9054e2eb80a04796b8d35214269cd352 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 9 Nov 2020 18:13:04 -0600 -Subject: [PATCH 11/13] API: libpe_status: deprecate pe_resource_t parameters - member - -Instead, pe_rsc_params() should be used. - -Also, define the parameters member using such a pe_rsc_params() call. ---- - include/crm/pengine/pe_types.h | 2 +- - lib/pengine/complex.c | 17 ++++++++++------- - 2 files changed, 11 insertions(+), 8 deletions(-) - -diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h -index 5529714..1416cee 100644 ---- a/include/crm/pengine/pe_types.h -+++ b/include/crm/pengine/pe_types.h -@@ -358,7 +358,7 @@ struct pe_resource_s { - enum rsc_role_e next_role; - - GHashTable *meta; -- GHashTable *parameters; -+ GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead - GHashTable *utilization; - - GListPtr children; /* pe_resource_t* */ -diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c -index 7037ca1..60199c7 100644 ---- a/lib/pengine/complex.c -+++ b/lib/pengine/complex.c -@@ -557,8 +557,6 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, - return FALSE; - } - -- (*rsc)->parameters = crm_str_table_new(); -- - #if ENABLE_VERSIONED_ATTRS - (*rsc)->versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); - #endif -@@ -584,7 +582,7 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, - pe_rsc_trace((*rsc), "Unpacking resource..."); - - get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set); -- get_rsc_attributes((*rsc)->parameters, *rsc, NULL, data_set); -+ (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated - #if ENABLE_VERSIONED_ATTRS - pe_get_versioned_attributes((*rsc)->versioned_parameters, *rsc, NULL, data_set); - #endif -@@ -808,7 +806,15 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, - } - - if (remote_node) { -- value = g_hash_table_lookup((*rsc)->parameters, XML_REMOTE_ATTR_RECONNECT_INTERVAL); -+ GHashTable *params = pe_rsc_params(*rsc, NULL, data_set); -+ -+ /* Grabbing the value now means that any rules based on node attributes -+ * will evaluate to false, so such rules should not be used with -+ * reconnect_interval. -+ * -+ * @TODO Evaluate per node before using -+ */ -+ value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL); - if (value) { - /* reconnect delay works by setting failure_timeout and preventing the - * connection from starting until the failure is cleared. */ -@@ -922,9 +928,6 @@ common_free(pe_resource_t * rsc) - g_list_free(rsc->rsc_tickets); - g_list_free(rsc->dangling_migrations); - -- if (rsc->parameters != NULL) { -- g_hash_table_destroy(rsc->parameters); -- } - if (rsc->parameter_cache != NULL) { - g_hash_table_destroy(rsc->parameter_cache); - } --- -1.8.3.1 - - -From 267d0cc44e94d6963fe13974f83b2fc845b0c431 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 17 Nov 2020 15:45:35 -0600 -Subject: [PATCH 12/13] Test: execd: use new resource parameter function in - cts-exec-helper - ---- - daemons/execd/cts-exec-helper.c | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - -diff --git a/daemons/execd/cts-exec-helper.c b/daemons/execd/cts-exec-helper.c -index 8be8e18..423b54d 100644 ---- a/daemons/execd/cts-exec-helper.c -+++ b/daemons/execd/cts-exec-helper.c -@@ -482,13 +482,12 @@ generate_params(void) - goto param_gen_bail; - } - -- params = crm_str_table_new(); -+ params = pe_rsc_params(rsc, NULL, data_set); - meta = crm_str_table_new(); - -- get_rsc_attributes(params, rsc, NULL, data_set); - get_meta_attributes(meta, rsc, NULL, data_set); - -- if (params) { -+ if (params != NULL) { - char *key = NULL; - char *value = NULL; - -@@ -496,7 +495,6 @@ generate_params(void) - while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { - options.params = lrmd_key_value_add(options.params, key, value); - } -- g_hash_table_destroy(params); - } - - if (meta) { --- -1.8.3.1 - - -From b10c86bf785beccef147b702366607cc4ead0f79 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 17 Nov 2020 16:01:12 -0600 -Subject: [PATCH 13/13] Refactor: tools: use new resource parameter function in - crm_resource - ---- - tools/crm_resource.c | 14 +++++++++----- - tools/crm_resource_print.c | 10 ++++++++-- - tools/crm_resource_runtime.c | 32 +++++++++++--------------------- - 3 files changed, 28 insertions(+), 28 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index b028c40..9663e68 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1902,6 +1902,7 @@ main(int argc, char **argv) - unsigned int count = 0; - GHashTable *params = NULL; - pe_node_t *current = pe__find_active_on(rsc, &count, NULL); -+ bool free_params = true; - - if (count > 1) { - out->err(out, "%s is active on more than one node," -@@ -1909,23 +1910,26 @@ main(int argc, char **argv) - current = NULL; - } - -- params = crm_str_table_new(); -+ crm_debug("Looking up %s in %s", options.prop_name, rsc->id); - - if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { -- get_rsc_attributes(params, rsc, current, data_set); -+ params = pe_rsc_params(rsc, current, data_set); -+ free_params = false; - - } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { -- /* No need to redirect to the parent */ -+ params = crm_str_table_new(); - get_meta_attributes(params, rsc, current, data_set); - - } else { -+ params = crm_str_table_new(); - pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params, - NULL, FALSE, data_set); - } - -- crm_debug("Looking up %s in %s", options.prop_name, rsc->id); - rc = out->message(out, "attribute-list", rsc, options.prop_name, params); -- g_hash_table_destroy(params); -+ if (free_params) { -+ g_hash_table_destroy(params); -+ } - break; - } - -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 89d6172..398fef0 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -134,8 +134,11 @@ attribute_list_default(pcmk__output_t *out, va_list args) { - char *attr = va_arg(args, char *); - GHashTable *params = va_arg(args, GHashTable *); - -- const char *value = g_hash_table_lookup(params, attr); -+ const char *value = NULL; - -+ if (params != NULL) { -+ value = g_hash_table_lookup(params, attr); -+ } - if (value != NULL) { - out->begin_list(out, NULL, NULL, "Attributes"); - out->list_item(out, attr, "%s", value); -@@ -154,8 +157,11 @@ attribute_list_text(pcmk__output_t *out, va_list args) { - char *attr = va_arg(args, char *); - GHashTable *params = va_arg(args, GHashTable *); - -- const char *value = g_hash_table_lookup(params, attr); -+ const char *value = NULL; - -+ if (params != NULL) { -+ value = g_hash_table_lookup(params, attr); -+ } - if (value != NULL) { - out->info(out, "%s", value); - } else { -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index e0804fc..9ff9e96 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -916,40 +916,29 @@ cli_resource_fail(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - } - - static GHashTable * --generate_resource_params(pe_resource_t * rsc, pe_working_set_t * data_set) -+generate_resource_params(pe_resource_t *rsc, pe_node_t *node, -+ pe_working_set_t *data_set) - { - GHashTable *params = NULL; - GHashTable *meta = NULL; - GHashTable *combined = NULL; - GHashTableIter iter; -+ char *key = NULL; -+ char *value = NULL; - -- if (!rsc) { -- crm_err("Resource does not exist in config"); -- return NULL; -- } -- -- params = crm_str_table_new(); -- meta = crm_str_table_new(); - combined = crm_str_table_new(); - -- get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set); -- get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set); -- -- if (params) { -- char *key = NULL; -- char *value = NULL; -- -+ params = pe_rsc_params(rsc, node, data_set); -+ if (params != NULL) { - g_hash_table_iter_init(&iter, params); - while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { - g_hash_table_insert(combined, strdup(key), strdup(value)); - } -- g_hash_table_destroy(params); - } - -- if (meta) { -- char *key = NULL; -- char *value = NULL; -- -+ meta = crm_str_table_new(); -+ get_meta_attributes(meta, rsc, node, data_set); -+ if (meta != NULL) { - g_hash_table_iter_init(&iter, meta); - while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { - char *crm_name = crm_meta_name(key); -@@ -1827,7 +1816,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, - rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); - rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); - -- params = generate_resource_params(rsc, data_set); -+ params = generate_resource_params(rsc, NULL /* @TODO use local node */, -+ data_set); - - if (timeout_ms == 0) { - timeout_ms = pe_get_configured_timeout(rsc, action, data_set); --- -1.8.3.1 - diff --git a/SOURCES/022-rhbz1872376.patch b/SOURCES/022-rhbz1872376.patch deleted file mode 100644 index b9a405f..0000000 --- a/SOURCES/022-rhbz1872376.patch +++ /dev/null @@ -1,1452 +0,0 @@ -From ea636bc7b290325a8d11f56c4ca461d4d010643d Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 14 Sep 2020 16:29:19 -0500 -Subject: [PATCH 1/6] Refactor: tools: restructure crm_resource command-line - resource configuration - -... to allow (future) options other than --validate to use the command-line -resource configuration options. - -I had planned to use this for a project but went in different direction, so -nothing more is expected to use it for now, but I think it's still worthwhile -to help isolate different parts of code. ---- - tools/crm_resource.c | 189 ++++++++++++++++++++++++++------------------------- - 1 file changed, 98 insertions(+), 91 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 2fc9a86..1dcb0f0 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -66,39 +66,46 @@ enum rsc_command { - }; - - struct { -- enum rsc_command rsc_cmd; // The crm_resource command to perform -- const char *attr_set_type; -- int cib_options; -- gboolean clear_expired; -- int find_flags; /* Flags to use when searching for resource */ -- gboolean force; -- gchar *host_uname; -- gchar *interval_spec; -- gchar *move_lifetime; -- gchar *operation; -- GHashTable *override_params; -- gchar *prop_id; -- char *prop_name; -- gchar *prop_set; -- gchar *prop_value; -- gboolean recursive; -- gchar **remainder; -- gboolean require_cib; // Whether command requires CIB connection -- gboolean require_crmd; /* whether command requires controller connection */ -- gboolean require_dataset; /* whether command requires populated dataset instance */ -- gboolean require_resource; /* whether command requires that resource be specified */ -- int resource_verbose; -- gchar *rsc_id; -- gchar *rsc_type; -- gboolean promoted_role_only; -- int timeout_ms; -- char *agent_spec; // Standard and/or provider and/or agent -- char *v_agent; -- char *v_class; -- char *v_provider; -- gboolean validate_cmdline; /* whether we are just validating based on command line options */ -- GHashTable *validate_options; -- gchar *xml_file; -+ enum rsc_command rsc_cmd; // crm_resource command to perform -+ -+ // Infrastructure that given command needs to work -+ gboolean require_cib; // Whether command requires CIB IPC -+ int cib_options; // Options to use with CIB IPC calls -+ gboolean require_crmd; // Whether command requires controller IPC -+ gboolean require_dataset; // Whether command requires populated data set -+ gboolean require_resource; // Whether command requires resource specified -+ int find_flags; // Flags to use when searching for resource -+ -+ // Command-line option values -+ gchar *rsc_id; // Value of --resource -+ gchar *rsc_type; // Value of --resource-type -+ gboolean force; // --force was given -+ gboolean clear_expired; // --expired was given -+ gboolean recursive; // --recursive was given -+ gboolean promoted_role_only; // --master was given -+ gchar *host_uname; // Value of --node -+ gchar *interval_spec; // Value of --interval -+ gchar *move_lifetime; // Value of --lifetime -+ gchar *operation; // Value of --operation -+ const char *attr_set_type; // Instance, meta, or utilization attribute -+ gchar *prop_id; // --nvpair (attribute XML ID) -+ char *prop_name; // Attribute name -+ gchar *prop_set; // --set-name (attribute block XML ID) -+ gchar *prop_value; // --parameter-value (attribute value) -+ int timeout_ms; // Parsed from --timeout value -+ char *agent_spec; // Standard and/or provider and/or agent -+ gchar *xml_file; // Value of (deprecated) --xml-file -+ -+ // Resource configuration specified via command-line arguments -+ gboolean cmdline_config; // Resource configuration was via arguments -+ char *v_agent; // Value of --agent -+ char *v_class; // Value of --class -+ char *v_provider; // Value of --provider -+ GHashTable *cmdline_params; // Resource parameters specified -+ -+ // Positional command-line arguments -+ gchar **remainder; // Positional arguments as given -+ GHashTable *override_params; // Resource parameter values that override config - } options = { - .attr_set_type = XML_TAG_ATTR_SETS, - .cib_options = cib_sync_call, -@@ -533,28 +540,6 @@ static GOptionEntry advanced_entries[] = { - { NULL } - }; - --static GOptionEntry validate_entries[] = { -- { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb, -- "The standard the resource agent confirms to (for example, ocf).\n" -- INDENT "Use with --agent, --provider, --option, and --validate.", -- "CLASS" }, -- { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, -- "The agent to use (for example, IPaddr). Use with --class,\n" -- INDENT "--provider, --option, and --validate.", -- "AGENT" }, -- { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, -- "The vendor that supplies the resource agent (for example,\n" -- INDENT "heartbeat). Use with --class, --agent, --option, and --validate.", -- "PROVIDER" }, -- { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb, -- "Specify a device configuration parameter as NAME=VALUE (may be\n" -- INDENT "specified multiple times). Use with --validate and without the\n" -- INDENT "-r option.", -- "PARAM" }, -- -- { NULL } --}; -- - static GOptionEntry addl_entries[] = { - { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname, - "Node name", -@@ -582,6 +567,23 @@ static GOptionEntry addl_entries[] = { - { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec, - "Interval of operation to clear (default 0) (with -C -r -n)", - "N" }, -+ { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb, -+ "The standard the resource agent conforms to (for example, ocf).\n" -+ INDENT "Use with --agent, --provider, --option, and --validate.", -+ "CLASS" }, -+ { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, -+ "The agent to use (for example, IPaddr). Use with --class,\n" -+ INDENT "--provider, --option, and --validate.", -+ "AGENT" }, -+ { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, -+ "The vendor that supplies the resource agent (for example,\n" -+ INDENT "heartbeat). Use with --class, --agent, --option, and --validate.", -+ "PROVIDER" }, -+ { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb, -+ "Specify a device configuration parameter as NAME=VALUE (may be\n" -+ INDENT "specified multiple times). Use with --validate and without the\n" -+ INDENT "-r option.", -+ "PARAM" }, - { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set, - "(Advanced) XML ID of attributes element to use (with -p, -d)", - "ID" }, -@@ -608,7 +610,7 @@ static GOptionEntry addl_entries[] = { - - gboolean - agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { -- options.validate_cmdline = TRUE; -+ options.cmdline_config = TRUE; - options.require_resource = FALSE; - - if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) { -@@ -654,7 +656,7 @@ class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError ** - options.v_class = strdup(optarg); - } - -- options.validate_cmdline = TRUE; -+ options.cmdline_config = TRUE; - options.require_resource = FALSE; - return TRUE; - } -@@ -762,10 +764,10 @@ option_cb(const gchar *option_name, const gchar *optarg, gpointer data, - if (pcmk_scan_nvpair(optarg, &name, &value) != 2) { - return FALSE; - } -- if (options.validate_options == NULL) { -- options.validate_options = crm_str_table_new(); -+ if (options.cmdline_params == NULL) { -+ options.cmdline_params = crm_str_table_new(); - } -- g_hash_table_replace(options.validate_options, name, value); -+ g_hash_table_replace(options.cmdline_params, name, value); - return TRUE; - } - -@@ -1365,17 +1367,18 @@ show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code - } - - static void --validate_cmdline(crm_exit_t *exit_code) -+validate_cmdline_config(void) - { -- // -r cannot be used with any of --class, --agent, or --provider -+ // Cannot use both --resource and command-line resource configuration - if (options.rsc_id != NULL) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, - "--resource cannot be used with --class, --agent, and --provider"); - -- // If --class, --agent, or --provider are given, --validate must also be given. -+ // Not all commands support command-line resource configuration - } else if (options.rsc_cmd != cmd_execute_agent) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, -- "--class, --agent, and --provider require --validate"); -+ "--class, --agent, and --provider can only be used with " -+ "--validate"); - - // Not all of --class, --agent, and --provider need to be given. Not all - // classes support the concept of a provider. Check that what we were given -@@ -1398,15 +1401,16 @@ validate_cmdline(crm_exit_t *exit_code) - options.v_agent ? options.v_agent : ""); - } - -- if (error == NULL) { -- if (options.validate_options == NULL) { -- options.validate_options = crm_str_table_new(); -- } -- *exit_code = cli_resource_execute_from_params(out, "test", options.v_class, options.v_provider, options.v_agent, -- "validate-all", options.validate_options, -- options.override_params, options.timeout_ms, -- options.resource_verbose, options.force); -+ if (error != NULL) { -+ return; -+ } -+ -+ if (options.cmdline_params == NULL) { -+ options.cmdline_params = crm_str_table_new(); - } -+ options.require_resource = FALSE; -+ options.require_dataset = FALSE; -+ options.require_cib = FALSE; - } - - static GOptionContext * -@@ -1467,8 +1471,6 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { - "Show command help", command_entries); - pcmk__add_arg_group(context, "locations", "Locations:", - "Show location help", location_entries); -- pcmk__add_arg_group(context, "validate", "Validate:", -- "Show validate help", validate_entries); - pcmk__add_arg_group(context, "advanced", "Advanced:", - "Show advanced option help", advanced_entries); - pcmk__add_arg_group(context, "additional", "Additional Options:", -@@ -1512,7 +1514,6 @@ main(int argc, char **argv) - goto done; - } - -- options.resource_verbose = args->verbosity; - out->quiet = args->quiet; - - crm_log_args(argc, argv); -@@ -1628,15 +1629,15 @@ main(int argc, char **argv) - goto done; - } - -- // Sanity check validating from command line parameters. If everything checks out, -- // go ahead and run the validation. This way we don't need a CIB connection. -- if (options.validate_cmdline) { -- validate_cmdline(&exit_code); -- goto done; -- } else if (options.validate_options != NULL) { -+ if (options.cmdline_config) { -+ /* A resource configuration was given on the command line. Sanity-check -+ * the values and set error if they don't make sense. -+ */ -+ validate_cmdline_config(); -+ } else if (options.cmdline_params != NULL) { - // @COMPAT @TODO error out here when we can break backward compatibility -- g_hash_table_destroy(options.validate_options); -- options.validate_options = NULL; -+ g_hash_table_destroy(options.cmdline_params); -+ options.cmdline_params = NULL; - } - - if (error != NULL) { -@@ -1773,12 +1774,18 @@ main(int argc, char **argv) - break; - - case cmd_execute_agent: -- exit_code = cli_resource_execute(out, rsc, options.rsc_id, -- options.operation, -- options.override_params, -- options.timeout_ms, cib_conn, -- data_set, options.resource_verbose, -- options.force); -+ if (options.cmdline_config) { -+ exit_code = cli_resource_execute_from_params(out, "test", -+ options.v_class, options.v_provider, options.v_agent, -+ "validate-all", options.cmdline_params, -+ options.override_params, options.timeout_ms, -+ args->verbosity, options.force); -+ } else { -+ exit_code = cli_resource_execute(out, rsc, options.rsc_id, -+ options.operation, options.override_params, -+ options.timeout_ms, cib_conn, data_set, -+ args->verbosity, options.force); -+ } - break; - - case cmd_colocations: -@@ -2038,7 +2045,7 @@ done: - g_hash_table_destroy(options.override_params); - } - -- /* options.validate_options does not need to be destroyed here. See the -+ /* options.cmdline_params does not need to be destroyed here. See the - * comments in cli_resource_execute_from_params. - */ - --- -1.8.3.1 - - -From e140bd1bc35a20f027f054b4575808bd0ef547fc Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 16 Sep 2020 15:40:16 -0500 -Subject: [PATCH 2/6] Low: tools: handle required node names better in - crm_resource - -Currently, --fail is the only option that requires a node name to be specified, -but generalize the handling so future options can reuse it. - -This also makes the error handling closer to what's done for required resource -names, both in error message and exit status. ---- - tools/crm_resource.c | 87 ++++++++++++++++++++++++++++---------------- - tools/crm_resource_runtime.c | 8 +--- - 2 files changed, 57 insertions(+), 38 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 1dcb0f0..2717a62 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -74,6 +74,7 @@ struct { - gboolean require_crmd; // Whether command requires controller IPC - gboolean require_dataset; // Whether command requires populated data set - gboolean require_resource; // Whether command requires resource specified -+ gboolean require_node; // Whether command requires node specified - int find_flags; // Flags to use when searching for resource - - // Command-line option values -@@ -774,6 +775,7 @@ option_cb(const gchar *option_name, const gchar *optarg, gpointer data, - gboolean - fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { - options.require_crmd = TRUE; -+ options.require_node = TRUE; - SET_COMMAND(cmd_fail); - return TRUE; - } -@@ -1483,9 +1485,13 @@ main(int argc, char **argv) - { - xmlNode *cib_xml_copy = NULL; - pe_resource_t *rsc = NULL; -- -+ pe_node_t *node = NULL; - int rc = pcmk_rc_ok; - -+ /* -+ * Parse command line arguments -+ */ -+ - pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); - GOptionContext *context = NULL; - GOptionGroup *output_group = NULL; -@@ -1502,6 +1508,10 @@ main(int argc, char **argv) - goto done; - } - -+ /* -+ * Set verbosity -+ */ -+ - for (int i = 0; i < args->verbosity; i++) { - crm_bump_log_level(argc, argv); - } -@@ -1518,9 +1528,9 @@ main(int argc, char **argv) - - crm_log_args(argc, argv); - -- if (options.host_uname) { -- crm_trace("Option host => %s", options.host_uname); -- } -+ /* -+ * Validate option combinations -+ */ - - // If the user didn't explicitly specify a command, list resources - if (options.rsc_cmd == cmd_none) { -@@ -1634,30 +1644,42 @@ main(int argc, char **argv) - * the values and set error if they don't make sense. - */ - validate_cmdline_config(); -+ if (error != NULL) { -+ exit_code = CRM_EX_USAGE; -+ goto done; -+ } -+ - } else if (options.cmdline_params != NULL) { - // @COMPAT @TODO error out here when we can break backward compatibility - g_hash_table_destroy(options.cmdline_params); - options.cmdline_params = NULL; - } - -- if (error != NULL) { -+ if (options.require_resource && (options.rsc_id == NULL)) { -+ rc = ENXIO; -+ exit_code = CRM_EX_USAGE; -+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code, -+ "Must supply a resource id with -r"); -+ goto done; -+ } -+ if (options.require_node && (options.host_uname == NULL)) { -+ rc = ENXIO; - exit_code = CRM_EX_USAGE; -+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code, -+ "Must supply a node name with -N"); - goto done; - } - -+ /* -+ * Set up necessary connections -+ */ -+ - if (options.force) { - crm_debug("Forcing..."); - cib__set_call_options(options.cib_options, crm_system_name, - cib_quorum_override); - } - -- if (options.require_resource && !options.rsc_id) { -- rc = ENXIO; -- g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, -- "Must supply a resource id with -r"); -- goto done; -- } -- - if (options.find_flags && options.rsc_id) { - options.require_dataset = TRUE; - } -@@ -1700,6 +1722,11 @@ main(int argc, char **argv) - } - } - -+ // If user supplied a node name, check whether it exists -+ if ((options.host_uname != NULL) && (data_set != NULL)) { -+ node = pe_find_node(data_set->nodes, options.host_uname); -+ } -+ - // Establish a connection to the controller if needed - if (options.require_crmd) { - rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); -@@ -1718,6 +1745,10 @@ main(int argc, char **argv) - } - } - -+ /* -+ * Handle requested command -+ */ -+ - switch (options.rsc_cmd) { - case cmd_list_resources: { - GListPtr all = NULL; -@@ -1844,18 +1875,11 @@ main(int argc, char **argv) - break; - - case cmd_why: -- { -- pe_node_t *dest = NULL; -- -- if (options.host_uname) { -- dest = pe_find_node(data_set->nodes, options.host_uname); -- if (dest == NULL) { -- rc = pcmk_rc_node_unknown; -- goto done; -- } -- } -- out->message(out, "resource-reasons-list", cib_conn, data_set->resources, rsc, dest); -- rc = pcmk_rc_ok; -+ if ((options.host_uname != NULL) && (node == NULL)) { -+ rc = pcmk_rc_node_unknown; -+ } else { -+ rc = out->message(out, "resource-reasons-list", cib_conn, -+ data_set->resources, rsc, node); - } - break; - -@@ -1878,15 +1902,10 @@ main(int argc, char **argv) - case cmd_ban: - if (options.host_uname == NULL) { - rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); -+ } else if (node == NULL) { -+ rc = pcmk_rc_node_unknown; - } else { -- pe_node_t *dest = pe_find_node(data_set->nodes, -- options.host_uname); -- -- if (dest == NULL) { -- rc = pcmk_rc_node_unknown; -- goto done; -- } -- rc = cli_resource_ban(out, options.rsc_id, dest->details->uname, -+ rc = cli_resource_ban(out, options.rsc_id, node->details->uname, - options.move_lifetime, NULL, cib_conn, - options.cib_options, - options.promoted_role_only); -@@ -2002,6 +2021,10 @@ main(int argc, char **argv) - break; - } - -+ /* -+ * Clean up and exit -+ */ -+ - done: - if (rc != pcmk_rc_ok) { - if (rc == pcmk_rc_no_quorum) { -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 3f28c7b..de5e807 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -511,11 +511,7 @@ send_lrm_rsc_op(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, bool do_fail_ - return EINVAL; - } - -- if (host_uname == NULL) { -- out->err(out, "Please specify a node name"); -- return EINVAL; -- -- } else { -+ { - pe_node_t *node = pe_find_node(data_set->nodes, host_uname); - - if (node == NULL) { --- -1.8.3.1 - - -From 31bda91470487790d6e17b6f2cbed282bafd11d0 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 15 Sep 2020 15:00:53 -0500 -Subject: [PATCH 3/6] Refactor: libpacemaker: add files for resource-related - API - ---- - include/pacemaker-internal.h | 3 ++- - include/pcmki/Makefile.am | 3 ++- - include/pcmki/pcmki_resource.h | 14 ++++++++++++++ - lib/pacemaker/Makefile.am | 3 ++- - lib/pacemaker/pcmk_resource.c | 21 +++++++++++++++++++++ - 5 files changed, 41 insertions(+), 3 deletions(-) - create mode 100644 include/pcmki/pcmki_resource.h - create mode 100644 lib/pacemaker/pcmk_resource.c - -diff --git a/include/pacemaker-internal.h b/include/pacemaker-internal.h -index 2e75d09..bf33f3e 100644 ---- a/include/pacemaker-internal.h -+++ b/include/pacemaker-internal.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2019 the Pacemaker project contributors -+ * Copyright 2019-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -14,6 +14,7 @@ - # include - # include - # include -+# include - # include - # include - # include -diff --git a/include/pcmki/Makefile.am b/include/pcmki/Makefile.am -index 7aa64c7..446c801 100644 ---- a/include/pcmki/Makefile.am -+++ b/include/pcmki/Makefile.am -@@ -1,5 +1,5 @@ - # --# Copyright 2019 the Pacemaker project contributors -+# Copyright 2019-2021 the Pacemaker project contributors - # - # The version control history for this file may have further details. - # -@@ -13,6 +13,7 @@ noinst_HEADERS = pcmki_error.h \ - pcmki_cluster_queries.h \ - pcmki_fence.h \ - pcmki_output.h \ -+ pcmki_resource.h \ - pcmki_sched_allocate.h \ - pcmki_sched_notif.h \ - pcmki_sched_utils.h \ -diff --git a/include/pcmki/pcmki_resource.h b/include/pcmki/pcmki_resource.h -new file mode 100644 -index 0000000..effa945 ---- /dev/null -+++ b/include/pcmki/pcmki_resource.h -@@ -0,0 +1,14 @@ -+/* -+ * Copyright 2021 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. -+ * -+ * This source code is licensed under the GNU Lesser General Public License -+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -+ */ -+#ifndef PCMKI_RESOURCE__H -+#define PCMKI_RESOURCE__H -+ -+#include -+ -+#endif /* PCMK_RESOURCE__H */ -diff --git a/lib/pacemaker/Makefile.am b/lib/pacemaker/Makefile.am -index 4129ade..760c04a 100644 ---- a/lib/pacemaker/Makefile.am -+++ b/lib/pacemaker/Makefile.am -@@ -1,5 +1,5 @@ - # --# Copyright 2004-2019 the Pacemaker project contributors -+# Copyright 2004-2021 the Pacemaker project contributors - # - # The version control history for this file may have further details. - # -@@ -31,6 +31,7 @@ libpacemaker_la_SOURCES = - libpacemaker_la_SOURCES += pcmk_cluster_queries.c - libpacemaker_la_SOURCES += pcmk_fence.c - libpacemaker_la_SOURCES += pcmk_output.c -+libpacemaker_la_SOURCES += pcmk_resource.c - libpacemaker_la_SOURCES += pcmk_sched_allocate.c - libpacemaker_la_SOURCES += pcmk_sched_bundle.c - libpacemaker_la_SOURCES += pcmk_sched_clone.c -diff --git a/lib/pacemaker/pcmk_resource.c b/lib/pacemaker/pcmk_resource.c -new file mode 100644 -index 0000000..05614fc ---- /dev/null -+++ b/lib/pacemaker/pcmk_resource.c -@@ -0,0 +1,21 @@ -+/* -+ * Copyright 2021 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. -+ * -+ * This source code is licensed under the GNU General Public License version 2 -+ * or later (GPLv2+) WITHOUT ANY WARRANTY. -+ */ -+ -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include --- -1.8.3.1 - - -From e45fe95cc6f526ab67ab6f718aa1364861ca525b Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 15 Sep 2020 15:32:25 -0500 -Subject: [PATCH 4/6] API: libpacemaker: new API pcmk_resource_digests() - ---- - include/pacemaker.h | 22 +++++++- - include/pcmki/pcmki_resource.h | 7 +++ - lib/pacemaker/pcmk_output.c | 107 +++++++++++++++++++++++++++++++++++- - lib/pacemaker/pcmk_resource.c | 119 +++++++++++++++++++++++++++++++++++++++++ - xml/Makefile.am | 10 +++- - xml/api/digests-2.6.rng | 33 ++++++++++++ - 6 files changed, 293 insertions(+), 5 deletions(-) - create mode 100644 xml/api/digests-2.6.rng - -diff --git a/include/pacemaker.h b/include/pacemaker.h -index b2a73cd..51bf585 100644 ---- a/include/pacemaker.h -+++ b/include/pacemaker.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2019 the Pacemaker project contributors -+ * Copyright 2019-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -20,8 +20,11 @@ extern "C" { - * \ingroup pacemaker - */ - --# include -+# include - # include -+# include -+ -+# include - - /*! - * \brief Get controller status -@@ -55,6 +58,21 @@ int pcmk_designated_controller(xmlNodePtr *xml, unsigned int message_timeout_ms) - */ - int pcmk_pacemakerd_status(xmlNodePtr *xml, char *ipc_name, unsigned int message_timeout_ms); - -+/*! -+ * \brief Calculate and output resource operation digests -+ * -+ * \param[out] xml Where to store XML with result -+ * \param[in] rsc Resource to calculate digests for -+ * \param[in] node Node whose operation history should be used -+ * \param[in] overrides Hash table of configuration parameters to override -+ * \param[in] data_set Cluster working set (with status) -+ * -+ * \return Standard Pacemaker return code -+ */ -+int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc, -+ pe_node_t *node, GHashTable *overrides, -+ pe_working_set_t *data_set); -+ - #ifdef BUILD_PUBLIC_LIBPACEMAKER - - /*! -diff --git a/include/pcmki/pcmki_resource.h b/include/pcmki/pcmki_resource.h -index effa945..9d2afb5 100644 ---- a/include/pcmki/pcmki_resource.h -+++ b/include/pcmki/pcmki_resource.h -@@ -9,6 +9,13 @@ - #ifndef PCMKI_RESOURCE__H - #define PCMKI_RESOURCE__H - -+#include -+ - #include -+#include -+ -+int pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc, -+ pe_node_t *node, GHashTable *overrides, -+ pe_working_set_t *data_set); - - #endif /* PCMK_RESOURCE__H */ -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 500afd1..bc4b91a 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2019-2020 the Pacemaker project contributors -+ * Copyright 2019-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -539,6 +540,108 @@ crmadmin_node_xml(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - -+PCMK__OUTPUT_ARGS("digests", "pe_resource_t *", "pe_node_t *", "const char *", -+ "guint", "op_digest_cache_t *") -+static int -+digests_text(pcmk__output_t *out, va_list args) -+{ -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ pe_node_t *node = va_arg(args, pe_node_t *); -+ const char *task = va_arg(args, const char *); -+ guint interval_ms = va_arg(args, guint); -+ op_digest_cache_t *digests = va_arg(args, op_digest_cache_t *); -+ -+ char *action_desc = NULL; -+ const char *rsc_desc = "unknown resource"; -+ const char *node_desc = "unknown node"; -+ -+ if (interval_ms != 0) { -+ action_desc = crm_strdup_printf("%ums-interval %s action", interval_ms, -+ ((task == NULL)? "unknown" : task)); -+ } else if (pcmk__str_eq(task, "monitor", pcmk__str_none)) { -+ action_desc = strdup("probe action"); -+ } else { -+ action_desc = crm_strdup_printf("%s action", -+ ((task == NULL)? "unknown" : task)); -+ } -+ if ((rsc != NULL) && (rsc->id != NULL)) { -+ rsc_desc = rsc->id; -+ } -+ if ((node != NULL) && (node->details->uname != NULL)) { -+ node_desc = node->details->uname; -+ } -+ out->begin_list(out, NULL, NULL, "Digests for %s %s on %s", -+ rsc_desc, action_desc, node_desc); -+ free(action_desc); -+ -+ if (digests == NULL) { -+ out->list_item(out, NULL, "none"); -+ out->end_list(out); -+ return pcmk_rc_ok; -+ } -+ if (digests->digest_all_calc != NULL) { -+ out->list_item(out, NULL, "%s (all parameters)", -+ digests->digest_all_calc); -+ } -+ if (digests->digest_secure_calc != NULL) { -+ out->list_item(out, NULL, "%s (non-private parameters)", -+ digests->digest_secure_calc); -+ } -+ if (digests->digest_restart_calc != NULL) { -+ out->list_item(out, NULL, "%s (non-reloadable parameters)", -+ digests->digest_restart_calc); -+ } -+ out->end_list(out); -+ return pcmk_rc_ok; -+} -+ -+static void -+add_digest_xml(xmlNode *parent, const char *type, const char *digest, -+ xmlNode *digest_source) -+{ -+ if (digest != NULL) { -+ xmlNodePtr digest_xml = create_xml_node(parent, "digest"); -+ -+ crm_xml_add(digest_xml, "type", ((type == NULL)? "unspecified" : type)); -+ crm_xml_add(digest_xml, "hash", digest); -+ if (digest_source != NULL) { -+ add_node_copy(digest_xml, digest_source); -+ } -+ } -+} -+ -+PCMK__OUTPUT_ARGS("digests", "pe_resource_t *", "pe_node_t *", "const char *", -+ "guint", "op_digest_cache_t *") -+static int -+digests_xml(pcmk__output_t *out, va_list args) -+{ -+ pe_resource_t *rsc = va_arg(args, pe_resource_t *); -+ pe_node_t *node = va_arg(args, pe_node_t *); -+ const char *task = va_arg(args, const char *); -+ guint interval_ms = va_arg(args, guint); -+ op_digest_cache_t *digests = va_arg(args, op_digest_cache_t *); -+ -+ char *interval_s = crm_strdup_printf("%ums", interval_ms); -+ xmlNode *xml = NULL; -+ -+ xml = pcmk__output_create_xml_node(out, "digests", -+ "resource", crm_str(rsc->id), -+ "node", crm_str(node->details->uname), -+ "task", crm_str(task), -+ "interval", interval_s, -+ NULL); -+ free(interval_s); -+ if (digests != NULL) { -+ add_digest_xml(xml, "all", digests->digest_all_calc, -+ digests->params_all); -+ add_digest_xml(xml, "nonprivate", digests->digest_secure_calc, -+ digests->params_secure); -+ add_digest_xml(xml, "nonreloadable", digests->digest_restart_calc, -+ digests->params_restart); -+ } -+ return pcmk_rc_ok; -+} -+ - static pcmk__message_entry_t fmt_functions[] = { - { "rsc-is-colocated-with-list", "default", rsc_is_colocated_with_list }, - { "rsc-is-colocated-with-list", "xml", rsc_is_colocated_with_list_xml }, -@@ -557,6 +660,8 @@ static pcmk__message_entry_t fmt_functions[] = { - { "crmadmin-node-list", "default", crmadmin_node_list }, - { "crmadmin-node", "default", crmadmin_node_text }, - { "crmadmin-node", "xml", crmadmin_node_xml }, -+ { "digests", "default", digests_text }, -+ { "digests", "xml", digests_xml }, - - { NULL, NULL, NULL } - }; -diff --git a/lib/pacemaker/pcmk_resource.c b/lib/pacemaker/pcmk_resource.c -index 05614fc..197edf8 100644 ---- a/lib/pacemaker/pcmk_resource.c -+++ b/lib/pacemaker/pcmk_resource.c -@@ -9,6 +9,7 @@ - - #include - -+#include - #include - #include - -@@ -19,3 +20,121 @@ - - #include - #include -+ -+// Search path for resource operation history (takes node name and resource ID) -+#define XPATH_OP_HISTORY "//" XML_CIB_TAG_STATUS \ -+ "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \ -+ "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \ -+ "/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" -+ -+static xmlNode * -+best_op(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) -+{ -+ char *xpath = NULL; -+ xmlNode *history = NULL; -+ xmlNode *best = NULL; -+ -+ // Find node's resource history -+ xpath = crm_strdup_printf(XPATH_OP_HISTORY, node->details->uname, rsc->id); -+ history = get_xpath_object(xpath, data_set->input, LOG_NEVER); -+ free(xpath); -+ -+ // Examine each history entry -+ for (xmlNode *lrm_rsc_op = first_named_child(history, XML_LRM_TAG_RSC_OP); -+ lrm_rsc_op != NULL; lrm_rsc_op = crm_next_same_xml(lrm_rsc_op)) { -+ -+ const char *digest = crm_element_value(lrm_rsc_op, -+ XML_LRM_ATTR_RESTART_DIGEST); -+ guint interval_ms = 0; -+ -+ crm_element_value_ms(lrm_rsc_op, XML_LRM_ATTR_INTERVAL, &interval_ms); -+ -+ if (pcmk__ends_with(ID(lrm_rsc_op), "_last_failure_0") -+ || (interval_ms != 0)) { -+ -+ // Only use last failure or recurring op if nothing else available -+ if (best == NULL) { -+ best = lrm_rsc_op; -+ } -+ continue; -+ } -+ -+ best = lrm_rsc_op; -+ if (digest != NULL) { -+ // Any non-recurring action with a restart digest is sufficient -+ break; -+ } -+ } -+ return best; -+} -+ -+/*! -+ * \internal -+ * \brief Calculate and output resource operation digests -+ * -+ * \param[in] out Output object -+ * \param[in] rsc Resource to calculate digests for -+ * \param[in] node Node whose operation history should be used -+ * \param[in] overrides Hash table of configuration parameters to override -+ * \param[in] data_set Cluster working set (with status) -+ * -+ * \return Standard Pacemaker return code -+ */ -+int -+pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc, -+ pe_node_t *node, GHashTable *overrides, -+ pe_working_set_t *data_set) -+{ -+ const char *task = NULL; -+ xmlNode *xml_op = NULL; -+ op_digest_cache_t *digests = NULL; -+ guint interval_ms = 0; -+ int rc = pcmk_rc_ok; -+ -+ if ((out == NULL) || (rsc == NULL) || (node == NULL) || (data_set == NULL)) { -+ return EINVAL; -+ } -+ if (rsc->variant != pe_native) { -+ // Only primitives get operation digests -+ return EOPNOTSUPP; -+ } -+ -+ // Find XML of operation history to use -+ xml_op = best_op(rsc, node, data_set); -+ -+ // Generate an operation key -+ if (xml_op != NULL) { -+ task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); -+ crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); -+ } -+ if (task == NULL) { // Assume start if no history is available -+ task = RSC_START; -+ interval_ms = 0; -+ } -+ -+ // Calculate and show digests -+ digests = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op, -+ overrides, true, data_set); -+ rc = out->message(out, "digests", rsc, node, task, interval_ms, digests); -+ -+ pe__free_digests(digests); -+ return rc; -+} -+ -+int -+pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc, -+ pe_node_t *node, GHashTable *overrides, -+ pe_working_set_t *data_set) -+{ -+ pcmk__output_t *out = NULL; -+ int rc = pcmk_rc_ok; -+ -+ rc = pcmk__out_prologue(&out, xml); -+ if (rc != pcmk_rc_ok) { -+ return rc; -+ } -+ pcmk__register_lib_messages(out); -+ rc = pcmk__resource_digests(out, rsc, node, overrides, data_set); -+ pcmk__out_epilogue(out, xml, rc); -+ return rc; -+} -diff --git a/xml/Makefile.am b/xml/Makefile.am -index e7b9a51..cb6cfa0 100644 ---- a/xml/Makefile.am -+++ b/xml/Makefile.am -@@ -1,5 +1,5 @@ - # --# Copyright 2004-2019 the Pacemaker project contributors -+# Copyright 2004-2021 the Pacemaker project contributors - # - # The version control history for this file may have further details. - # -@@ -50,7 +50,13 @@ version_pairs_last = $(wordlist \ - # problems. - - # Names of API schemas that form the choices for pacemaker-result content --API_request_base = command-output crm_mon crm_resource crmadmin stonith_admin version -+API_request_base = command-output \ -+ crm_mon \ -+ crm_resource \ -+ crmadmin \ -+ digests \ -+ stonith_admin \ -+ version - - # Names of CIB schemas that form the choices for cib/configuration content - CIB_cfg_base = options nodes resources constraints fencing acls tags alerts -diff --git a/xml/api/digests-2.6.rng b/xml/api/digests-2.6.rng -new file mode 100644 -index 0000000..7e843d4 ---- /dev/null -+++ b/xml/api/digests-2.6.rng -@@ -0,0 +1,33 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - - -From 4e726eb67c8eed255ee83706ed13cd7ea31f9864 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 14 Sep 2020 16:29:41 -0500 -Subject: [PATCH 5/6] Feature: tools: add crm_resource --digests option - -This is not particularly useful for end users but can help during development, -and can be used by higher-level tools to bypass Pacemaker's configuration -change detection (with obvious risks). ---- - tools/crm_resource.c | 39 ++++++++++++++++++++++++++++++++++++++- - 1 file changed, 38 insertions(+), 1 deletion(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 2717a62..8c7247a 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -40,6 +40,7 @@ enum rsc_command { - cmd_cts, - cmd_delete, - cmd_delete_param, -+ cmd_digests, - cmd_execute_agent, - cmd_fail, - cmd_get_param, -@@ -158,6 +159,8 @@ gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg, - gpointer data, GError **error); - gboolean restart_cb(const gchar *option_name, const gchar *optarg, - gpointer data, GError **error); -+gboolean digests_cb(const gchar *option_name, const gchar *optarg, -+ gpointer data, GError **error); - gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); - gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); - -@@ -507,6 +510,14 @@ static GOptionEntry advanced_entries[] = { - { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb, - "(Advanced) Wait until the cluster settles into a stable state", - NULL }, -+ { "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, digests_cb, -+ "(Advanced) Show parameter hashes that Pacemaker uses to detect\n" -+ INDENT "configuration changes (only accurate if there is resource\n" -+ INDENT "history on the specified node). Required: --resource, --node.\n" -+ INDENT "Optional: any NAME=VALUE parameters will be used to override\n" -+ INDENT "the configuration (to see what the hash would be with those\n" -+ INDENT "changes).", -+ NULL }, - { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, - validate_or_force_cb, - "(Advanced) Bypass the cluster and demote a resource on the local\n" -@@ -893,7 +904,9 @@ validate_or_force_cb(const gchar *option_name, const gchar *optarg, - } - options.operation = g_strdup(option_name + 2); // skip "--" - options.find_flags = pe_find_renamed|pe_find_anon; -- options.override_params = crm_str_table_new(); -+ if (options.override_params == NULL) { -+ options.override_params = crm_str_table_new(); -+ } - return TRUE; - } - -@@ -907,6 +920,20 @@ restart_cb(const gchar *option_name, const gchar *optarg, gpointer data, - } - - gboolean -+digests_cb(const gchar *option_name, const gchar *optarg, gpointer data, -+ GError **error) -+{ -+ SET_COMMAND(cmd_digests); -+ options.find_flags = pe_find_renamed|pe_find_anon; -+ if (options.override_params == NULL) { -+ options.override_params = crm_str_table_new(); -+ } -+ options.require_node = TRUE; -+ options.require_dataset = TRUE; -+ return TRUE; -+} -+ -+gboolean - wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { - SET_COMMAND(cmd_wait); - options.require_resource = FALSE; -@@ -1819,6 +1846,16 @@ main(int argc, char **argv) - } - break; - -+ case cmd_digests: -+ node = pe_find_node(data_set->nodes, options.host_uname); -+ if (node == NULL) { -+ rc = pcmk_rc_node_unknown; -+ } else { -+ rc = pcmk__resource_digests(out, rsc, node, -+ options.override_params, data_set); -+ } -+ break; -+ - case cmd_colocations: - rc = out->message(out, "stacks-constraints", rsc, data_set, false); - break; --- -1.8.3.1 - - -From bb34d07013bed2e71ac9cedb4b1631ad5e2825bf Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 23 Nov 2020 12:17:31 -0600 -Subject: [PATCH 6/6] Test: cts-cli: add regression tests for crm_resource - --digests - ---- - cts/Makefile.am | 3 +- - cts/cli/crm_resource_digests.xml | 143 +++++++++++++++++++++++++++++++++++++++ - cts/cli/regression.tools.exp | 34 ++++++++++ - cts/cts-cli.in | 14 +++- - 4 files changed, 192 insertions(+), 2 deletions(-) - create mode 100644 cts/cli/crm_resource_digests.xml - -diff --git a/cts/Makefile.am b/cts/Makefile.am -index 5666a9f..de02aed 100644 ---- a/cts/Makefile.am -+++ b/cts/Makefile.am -@@ -1,5 +1,5 @@ - # --# Copyright 2001-2019 the Pacemaker project contributors -+# Copyright 2001-2021 the Pacemaker project contributors - # - # The version control history for this file may have further details. - # -@@ -65,6 +65,7 @@ dist_cli_DATA = cli/constraints.xml \ - cli/crm_diff_old.xml \ - cli/crm_mon.xml \ - cli/crm_mon-partial.xml \ -+ cli/crm_resource_digests.xml \ - cli/regression.acls.exp \ - cli/regression.crm_mon.exp \ - cli/regression.dates.exp \ -diff --git a/cts/cli/crm_resource_digests.xml b/cts/cli/crm_resource_digests.xml -new file mode 100644 -index 0000000..074ca3d ---- /dev/null -+++ b/cts/cli/crm_resource_digests.xml -@@ -0,0 +1,143 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index a85b7d6..510cc0a 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -4019,3 +4019,37 @@ Resources colocated with clone: - - =#=#=#= End test: Recursively check locations and constraints for clone in XML - OK (0) =#=#=#= - * Passed: crm_resource - Recursively check locations and constraints for clone in XML -+=#=#=#= Begin test: Show resource digests =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Show resource digests - OK (0) =#=#=#= -+* Passed: crm_resource - Show resource digests -+=#=#=#= Begin test: Show resource digests with overrides =#=#=#= -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+=#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#= -+* Passed: crm_resource - Show resource digests with overrides -diff --git a/cts/cts-cli.in b/cts/cts-cli.in -index dfdd3de..96f5386 100755 ---- a/cts/cts-cli.in -+++ b/cts/cts-cli.in -@@ -1,6 +1,6 @@ - #!@BASH_PATH@ - # --# Copyright 2008-2020 the Pacemaker project contributors -+# Copyright 2008-2021 the Pacemaker project contributors - # - # The version control history for this file may have further details. - # -@@ -791,6 +791,18 @@ function test_tools() { - done - - unset CIB_file -+ -+ export CIB_file="$test_home/cli/crm_resource_digests.xml" -+ -+ desc="Show resource digests" -+ cmd="crm_resource --digests -r rsc1 -N node1 --output-as=xml" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="Show resource digests with overrides" -+ cmd="$cmd CRM_meta_interval=10000 CRM_meta_timeout=20000" -+ test_assert $CRM_EX_OK 0 -+ -+ unset CIB_file - } - - INVALID_PERIODS=( --- -1.8.3.1 - diff --git a/SOURCES/023-rhbz1872376.patch b/SOURCES/023-rhbz1872376.patch deleted file mode 100644 index acecdb4..0000000 --- a/SOURCES/023-rhbz1872376.patch +++ /dev/null @@ -1,26 +0,0 @@ -From b1241e5026a4fd9a8674ad4b867c7efa3d8ef466 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 12 Jan 2021 09:03:03 -0600 -Subject: [PATCH] Feature: tools: bump feature set for crm_resource --digests - -... so script and tools can check for support ---- - include/crm/crm.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/include/crm/crm.h b/include/crm/crm.h -index b07152c..4bbf46a 100644 ---- a/include/crm/crm.h -+++ b/include/crm/crm.h -@@ -51,7 +51,7 @@ extern "C" { - * >=3.0.13: Fail counts include operation name and interval - * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED - */ --# define CRM_FEATURE_SET "3.6.3" -+# define CRM_FEATURE_SET "3.6.4" - - # define EOS '\0' - # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) --- -1.8.3.1 - diff --git a/SOURCES/024-rhbz1371576.patch b/SOURCES/024-rhbz1371576.patch deleted file mode 100644 index 00f0da4..0000000 --- a/SOURCES/024-rhbz1371576.patch +++ /dev/null @@ -1,4354 +0,0 @@ -From 4540ce980ae861ab1770cd3d9a4e09cf4830dd6c Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Dec 2020 17:16:06 -0600 -Subject: [PATCH 02/12] Low: xml: clone constraints schema in preparation for - changes - ---- - xml/constraints-3.6.rng | 256 ++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 256 insertions(+) - create mode 100644 xml/constraints-3.6.rng - -diff --git a/xml/constraints-3.6.rng b/xml/constraints-3.6.rng -new file mode 100644 -index 0000000..f75d7b4 ---- /dev/null -+++ b/xml/constraints-3.6.rng -@@ -0,0 +1,256 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ group -+ listed -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ stop -+ demote -+ fence -+ freeze -+ -+ -+ -+ -+ -+ -+ -+ -+ always -+ never -+ exclusive -+ -+ -+ -+ -+ -+ start -+ promote -+ demote -+ stop -+ -+ -+ -+ -+ -+ Stopped -+ Started -+ Master -+ Slave -+ -+ -+ -+ -+ -+ Optional -+ Mandatory -+ Serialize -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - - -From f836f5c61b37ca12df55cf2e2b951eeebad8d4f2 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Dec 2020 17:25:23 -0600 -Subject: [PATCH 03/12] Feature: xml: allow colocation constraints to take - "influence" option - -Semantics not yet implemented ---- - xml/constraints-3.6.rng | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/xml/constraints-3.6.rng b/xml/constraints-3.6.rng -index f75d7b4..fece6b0 100644 ---- a/xml/constraints-3.6.rng -+++ b/xml/constraints-3.6.rng -@@ -112,6 +112,9 @@ - - - -+ -+ -+ - - - --- -1.8.3.1 - - -From 0e9ec9d6c4c1885e246fa59b6857300ddda42133 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Dec 2020 17:28:51 -0600 -Subject: [PATCH 04/12] Test: cts-cli: update for schema version bump - ---- - cts/cli/regression.upgrade.exp | 7 +++++-- - cts/cli/regression.validity.exp | 22 ++++++++++++++++++---- - 2 files changed, 23 insertions(+), 6 deletions(-) - -diff --git a/cts/cli/regression.upgrade.exp b/cts/cli/regression.upgrade.exp -index 10f6309..2210ba7 100644 ---- a/cts/cli/regression.upgrade.exp -+++ b/cts/cli/regression.upgrade.exp -@@ -85,8 +85,11 @@ update_validation debug: Configuration valid for schema: pacemaker-3.4 - update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5 - update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) - update_validation debug: Configuration valid for schema: pacemaker-3.5 --update_validation trace: Stopping at pacemaker-3.5 --update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.5 -+update_validation debug: pacemaker-3.5-style configuration is also valid for pacemaker-3.6 -+update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) -+update_validation debug: Configuration valid for schema: pacemaker-3.6 -+update_validation trace: Stopping at pacemaker-3.6 -+update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.6 - =#=#=#= Current cib after: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#= - - -diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp -index 88ca98e..bd6f499 100644 ---- a/cts/cli/regression.validity.exp -+++ b/cts/cli/regression.validity.exp -@@ -113,7 +113,11 @@ update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) - element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order - element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order - update_validation trace: pacemaker-3.5 validation failed --Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.5 -+update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) -+element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order -+element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -+update_validation trace: pacemaker-3.6 validation failed -+Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.6 - =#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Invalid configuration (78) =#=#=#= - * Passed: crm_simulate - Run crm_simulate with invalid CIB (enum violation) - =#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= -@@ -212,7 +216,10 @@ update_validation trace: pacemaker-3.4 validation failed - update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) - element cib: Relax-NG validity error : Invalid attribute validate-with for element cib - update_validation trace: pacemaker-3.5 validation failed --Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.5 -+update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) -+element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -+update_validation trace: pacemaker-3.6 validation failed -+Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.6 - =#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Invalid configuration (78) =#=#=#= - * Passed: crm_simulate - Run crm_simulate with invalid CIB (unrecognized validate-with) - =#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= -@@ -306,8 +313,11 @@ update_validation debug: Configuration valid for schema: pacemaker-3.4 - update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5 - update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) - update_validation debug: Configuration valid for schema: pacemaker-3.5 --update_validation trace: Stopping at pacemaker-3.5 --update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.5 -+update_validation debug: pacemaker-3.5-style configuration is also valid for pacemaker-3.6 -+update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) -+update_validation debug: Configuration valid for schema: pacemaker-3.6 -+update_validation trace: Stopping at pacemaker-3.6 -+update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.6 - unpack_resources error: Resource start-up disabled since no STONITH resources have been defined - unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option - unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity -@@ -417,6 +427,8 @@ element rsc_order: Relax-NG validity error : Invalid attribute first-action for - element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order - element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order - element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -+element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order -+element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order - =#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#= - - -@@ -478,6 +490,8 @@ validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attrib - validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order - validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order - validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -+validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order -+validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order - unpack_resources error: Resource start-up disabled since no STONITH resources have been defined - unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option - unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity --- -1.8.3.1 - - -From 2e7e02bae2432feebb9d731030da808e33ff49ad Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 10 Dec 2020 15:46:28 -0600 -Subject: [PATCH 05/12] Refactor: scheduler: add and parse "critical" resource - meta-attribute - -Semantics not yet implemented. ---- - include/crm/msg_xml.h | 3 ++- - include/crm/pengine/pe_types.h | 1 + - lib/pengine/complex.c | 5 +++++ - 3 files changed, 8 insertions(+), 1 deletion(-) - -diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h -index c8b528b..d5ac418 100644 ---- a/include/crm/msg_xml.h -+++ b/include/crm/msg_xml.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -217,6 +217,7 @@ extern "C" { - # define XML_RSC_ATTR_REMOTE_RA_ADDR "addr" - # define XML_RSC_ATTR_REMOTE_RA_SERVER "server" - # define XML_RSC_ATTR_REMOTE_RA_PORT "port" -+# define XML_RSC_ATTR_CRITICAL "critical" - - # define XML_REMOTE_ATTR_RECONNECT_INTERVAL "reconnect_interval" - -diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h -index 1416cee..7d90c42 100644 ---- a/include/crm/pengine/pe_types.h -+++ b/include/crm/pengine/pe_types.h -@@ -251,6 +251,7 @@ struct pe_node_s { - # define pe_rsc_stop 0x00001000ULL - # define pe_rsc_reload 0x00002000ULL - # define pe_rsc_allow_remote_remotes 0x00004000ULL -+# define pe_rsc_critical 0x00008000ULL - - # define pe_rsc_failed 0x00010000ULL - # define pe_rsc_runnable 0x00040000ULL -diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c -index 60199c7..5d7d628 100644 ---- a/lib/pengine/complex.c -+++ b/lib/pengine/complex.c -@@ -608,6 +608,11 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, - value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY); - (*rsc)->priority = crm_parse_int(value, "0"); - -+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL); -+ if ((value == NULL) || crm_is_true(value)) { -+ pe__set_resource_flags(*rsc, pe_rsc_critical); -+ } -+ - value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY); - if (crm_is_true(value)) { - pe__set_resource_flags(*rsc, pe_rsc_notify); --- -1.8.3.1 - - -From a48e04ac54a3b9cf8292f3d06891ef6bf91a4c49 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Dec 2020 16:35:35 -0600 -Subject: [PATCH 06/12] Refactor: scheduler: add and parse "influence" - colocation property - -Semantics not yet implemented. ---- - include/crm/msg_xml.h | 1 + - include/pcmki/pcmki_sched_utils.h | 4 +- - include/pcmki/pcmki_scheduler.h | 3 +- - lib/pacemaker/pcmk_sched_bundle.c | 9 ++-- - lib/pacemaker/pcmk_sched_constraints.c | 85 +++++++++++++++++++++++++++------- - lib/pacemaker/pcmk_sched_group.c | 8 +++- - lib/pacemaker/pcmk_sched_native.c | 6 ++- - 7 files changed, 89 insertions(+), 27 deletions(-) - -diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h -index d5ac418..b66ab90 100644 ---- a/include/crm/msg_xml.h -+++ b/include/crm/msg_xml.h -@@ -328,6 +328,7 @@ extern "C" { - # define XML_COLOC_ATTR_NODE_ATTR "node-attribute" - # define XML_COLOC_ATTR_SOURCE_INSTANCE "rsc-instance" - # define XML_COLOC_ATTR_TARGET_INSTANCE "with-rsc-instance" -+# define XML_COLOC_ATTR_INFLUENCE "influence" - - # define XML_LOC_ATTR_SOURCE "rsc" - # define XML_LOC_ATTR_SOURCE_PATTERN "rsc-pattern" -diff --git a/include/pcmki/pcmki_sched_utils.h b/include/pcmki/pcmki_sched_utils.h -index c7ae1b8..bed64da 100644 ---- a/include/pcmki/pcmki_sched_utils.h -+++ b/include/pcmki/pcmki_sched_utils.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -31,7 +31,7 @@ pe__location_t *rsc2node_new(const char *id, pe_resource_t *rsc, int weight, - void pcmk__new_colocation(const char *id, const char *node_attr, int score, - pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - const char *state_lh, const char *state_rh, -- pe_working_set_t *data_set); -+ bool influence, pe_working_set_t *data_set); - - extern gboolean rsc_ticket_new(const char *id, pe_resource_t * rsc_lh, pe_ticket_t * ticket, - const char *state_lh, const char *loss_policy, -diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h -index b24e994..c604a32 100644 ---- a/include/pcmki/pcmki_scheduler.h -+++ b/include/pcmki/pcmki_scheduler.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2014-2020 the Pacemaker project contributors -+ * Copyright 2014-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -46,6 +46,7 @@ typedef struct { - int role_rh; - - int score; -+ bool influence; // Whether rsc_lh should influence active rsc_rh placement - } pcmk__colocation_t; - - enum loss_ticket_policy_e { -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index 4f41b70..bc7009d 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -9,6 +9,8 @@ - - #include - -+#include -+ - #include - #include - -@@ -143,7 +145,7 @@ pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *prefer, - pcmk__new_colocation("child-remote-with-docker-remote", NULL, - INFINITY, replica->remote, - container_host->details->remote_rsc, NULL, -- NULL, data_set); -+ NULL, true, data_set); - } - - if (replica->remote) { -@@ -311,7 +313,8 @@ pcmk__bundle_internal_constraints(pe_resource_t *rsc, - pe_order_implies_first|pe_order_preserve, data_set); - - pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip, -- replica->container, NULL, NULL, data_set); -+ replica->container, NULL, NULL, true, -+ data_set); - } - - if (replica->remote) { -diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c -index 92b9740..be93f0b 100644 ---- a/lib/pacemaker/pcmk_sched_constraints.c -+++ b/lib/pacemaker/pcmk_sched_constraints.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -11,6 +11,7 @@ - - #include - #include -+#include - #include - #include - -@@ -1346,7 +1347,7 @@ void - pcmk__new_colocation(const char *id, const char *node_attr, int score, - pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - const char *state_lh, const char *state_rh, -- pe_working_set_t *data_set) -+ bool influence, pe_working_set_t *data_set) - { - pcmk__colocation_t *new_con = NULL; - -@@ -1376,6 +1377,7 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - new_con->role_lh = text2role(state_lh); - new_con->role_rh = text2role(state_rh); - new_con->node_attribute = node_attr; -+ new_con->influence = influence; - - if (node_attr == NULL) { - node_attr = CRM_ATTR_UNAME; -@@ -2279,8 +2281,38 @@ unpack_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) - return TRUE; - } - -+/*! -+ * \internal -+ * \brief Return the boolean influence corresponding to configuration -+ * -+ * \param[in] coloc_id Colocation XML ID (for error logging) -+ * \param[in] rsc Resource involved in constraint (for default) -+ * \param[in] influence_s String value of influence option -+ * -+ * \return true if string evaluates true, false if string evaluates false, -+ * or value of resource's critical option if string is NULL or invalid -+ */ -+static bool -+unpack_influence(const char *coloc_id, const pe_resource_t *rsc, -+ const char *influence_s) -+{ -+ if (influence_s != NULL) { -+ int influence_i = 0; -+ -+ if (crm_str_to_boolean(influence_s, &influence_i) < 0) { -+ pcmk__config_err("Constraint '%s' has invalid value for " -+ XML_COLOC_ATTR_INFLUENCE " (using default)", -+ coloc_id); -+ } else { -+ return (influence_i == TRUE); -+ } -+ } -+ return pcmk_is_set(rsc->flags, pe_rsc_critical); -+} -+ - static gboolean --unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) -+unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, -+ const char *influence_s, pe_working_set_t *data_set) - { - xmlNode *xml_rsc = NULL; - pe_resource_t *with = NULL; -@@ -2314,7 +2346,10 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) - if (with != NULL) { - pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id); - pcmk__new_colocation(set_id, NULL, local_score, resource, -- with, role, role, data_set); -+ with, role, role, -+ unpack_influence(coloc_id, resource, -+ influence_s), -+ data_set); - } - - with = resource; -@@ -2330,7 +2365,10 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) - if (last != NULL) { - pe_rsc_trace(resource, "Colocating %s with %s", last->id, resource->id); - pcmk__new_colocation(set_id, NULL, local_score, last, -- resource, role, role, data_set); -+ resource, role, role, -+ unpack_influence(coloc_id, last, -+ influence_s), -+ data_set); - } - - last = resource; -@@ -2348,8 +2386,10 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) - - if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) { - xmlNode *xml_rsc_with = NULL; -+ bool influence = true; - - EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); -+ influence = unpack_influence(coloc_id, resource, influence_s); - - for (xml_rsc_with = pcmk__xe_first_child(set); - xml_rsc_with != NULL; -@@ -2364,7 +2404,7 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) - with->id); - pcmk__new_colocation(set_id, NULL, local_score, - resource, with, role, role, -- data_set); -+ influence, data_set); - } - } - } -@@ -2376,7 +2416,7 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) - - static gboolean - colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, -- pe_working_set_t * data_set) -+ const char *influence_s, pe_working_set_t *data_set) - { - xmlNode *xml_rsc = NULL; - pe_resource_t *rsc_1 = NULL; -@@ -2416,16 +2456,19 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - - if (rsc_1 != NULL && rsc_2 != NULL) { - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2, -+ unpack_influence(id, rsc_1, influence_s), - data_set); - - } else if (rsc_1 != NULL) { -+ bool influence = unpack_influence(id, rsc_1, influence_s); -+ - for (xml_rsc = pcmk__xe_first_child(set2); xml_rsc != NULL; - xml_rsc = pcmk__xe_next(xml_rsc)) { - - if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) { - EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, -- role_2, data_set); -+ role_2, influence, data_set); - } - } - -@@ -2436,7 +2479,9 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) { - EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, -- role_2, data_set); -+ role_2, -+ unpack_influence(id, rsc_1, influence_s), -+ data_set); - } - } - -@@ -2446,8 +2491,10 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - - if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) { - xmlNode *xml_rsc_2 = NULL; -+ bool influence = true; - - EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); -+ influence = unpack_influence(id, rsc_1, influence_s); - - for (xml_rsc_2 = pcmk__xe_first_child(set2); - xml_rsc_2 != NULL; -@@ -2456,7 +2503,8 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - if (pcmk__str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) { - EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); - pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, -- role_1, role_2, data_set); -+ role_1, role_2, influence, -+ data_set); - } - } - } -@@ -2467,13 +2515,12 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - } - - static void --unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) -+unpack_simple_colocation(xmlNode *xml_obj, const char *id, -+ const char *influence_s, pe_working_set_t *data_set) - { - int score_i = 0; - -- const char *id = crm_element_value(xml_obj, XML_ATTR_ID); - const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); -- - const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); - const char *id_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET); - const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); -@@ -2542,7 +2589,7 @@ unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) - } - - pcmk__new_colocation(id, attr, score_i, rsc_lh, rsc_rh, state_lh, state_rh, -- data_set); -+ unpack_influence(id, rsc_lh, influence_s), data_set); - } - - static gboolean -@@ -2675,6 +2722,8 @@ unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set) - - const char *id = crm_element_value(xml_obj, XML_ATTR_ID); - const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); -+ const char *influence_s = crm_element_value(xml_obj, -+ XML_COLOC_ATTR_INFLUENCE); - - if (score) { - score_i = char2score(score); -@@ -2694,10 +2743,12 @@ unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set) - if (pcmk__str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, pcmk__str_none)) { - any_sets = TRUE; - set = expand_idref(set, data_set->input); -- if (!unpack_colocation_set(set, score_i, data_set)) { -+ if (!unpack_colocation_set(set, score_i, id, influence_s, -+ data_set)) { - return; - } -- if (last && !colocate_rsc_sets(id, last, set, score_i, data_set)) { -+ if ((last != NULL) && !colocate_rsc_sets(id, last, set, score_i, -+ influence_s, data_set)) { - return; - } - last = set; -@@ -2710,7 +2761,7 @@ unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set) - } - - if (!any_sets) { -- unpack_simple_colocation(xml_obj, data_set); -+ unpack_simple_colocation(xml_obj, id, influence_s, data_set); - } - } - -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 5334f23..03df5e2 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -9,6 +9,8 @@ - - #include - -+#include -+ - #include - - #include -@@ -193,7 +195,9 @@ group_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) - - } else if (group_data->colocated) { - pcmk__new_colocation("group:internal_colocation", NULL, INFINITY, -- child_rsc, last_rsc, NULL, NULL, data_set); -+ child_rsc, last_rsc, NULL, NULL, -+ pcmk_is_set(child_rsc->flags, pe_rsc_critical), -+ data_set); - } - - if (pcmk_is_set(top->flags, pe_rsc_promotable)) { -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index c302db6..097a033 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -9,6 +9,8 @@ - - #include - -+#include -+ - #include - #include - #include -@@ -1692,7 +1694,7 @@ native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) - score = INFINITY; /* Force them to run on the same host */ - } - pcmk__new_colocation("resource-with-container", NULL, score, rsc, -- rsc->container, NULL, NULL, data_set); -+ rsc->container, NULL, NULL, true, data_set); - } - } - --- -1.8.3.1 - - -From 1bd66fca86c77f1dbe3d8cfc2a7da5111cec223f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 11 Jan 2021 16:11:30 -0600 -Subject: [PATCH 07/12] Revert "Refactor: scheduler: functionize checking - whether colocation applies" - -This reverts commit 2278e01f8d951d939c172ac71e168a11199f84f7. - -The original idea was that additional conditions (i.e. influence) could be -added to the new function, but influence should only affect whether the -dependent resource's location preferences are considered, not whether the -colocation constraint applies (i.e. the dependent should be stopped if it -can't run where the "with" resource is). ---- - include/pcmki/pcmki_sched_utils.h | 3 --- - lib/pacemaker/pcmk_sched_clone.c | 12 +++++++----- - lib/pacemaker/pcmk_sched_group.c | 12 ++++++------ - lib/pacemaker/pcmk_sched_native.c | 14 ++++++++++---- - lib/pacemaker/pcmk_sched_promotable.c | 25 +++++++++++++++++-------- - lib/pacemaker/pcmk_sched_utils.c | 31 ------------------------------- - 6 files changed, 40 insertions(+), 57 deletions(-) - -diff --git a/include/pcmki/pcmki_sched_utils.h b/include/pcmki/pcmki_sched_utils.h -index bed64da..f6ac263 100644 ---- a/include/pcmki/pcmki_sched_utils.h -+++ b/include/pcmki/pcmki_sched_utils.h -@@ -72,9 +72,6 @@ enum filter_colocation_res { - extern enum filter_colocation_res - filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, - pcmk__colocation_t *constraint, gboolean preview); --bool pcmk__colocation_applies(pe_resource_t *rsc, -- pcmk__colocation_t *colocation, -- bool promoted_only); - - extern int compare_capacity(const pe_node_t * node1, const pe_node_t * node2); - extern void calculate_utilization(GHashTable * current_utilization, -diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c -index 5a06151..9485a98 100644 ---- a/lib/pacemaker/pcmk_sched_clone.c -+++ b/lib/pacemaker/pcmk_sched_clone.c -@@ -658,12 +658,14 @@ pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (GListPtr gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (pcmk__colocation_applies(rsc, constraint, false)) { -- rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, -- rsc->id, rsc->allowed_nodes, constraint->node_attribute, -- constraint->score / (float) INFINITY, -- pe_weights_rollback|pe_weights_positive); -+ if (constraint->score == 0) { -+ continue; - } -+ rsc->allowed_nodes = -+ constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, -+ constraint->node_attribute, -+ (float)constraint->score / INFINITY, -+ (pe_weights_rollback | pe_weights_positive)); - } - - pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes); -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 03df5e2..336a6f9 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -520,13 +520,13 @@ pcmk__group_merge_weights(pe_resource_t *rsc, const char *rhs, - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (pcmk__colocation_applies(rsc, constraint, false)) { -- nodes = pcmk__native_merge_weights(constraint->rsc_lh, rsc->id, -- nodes, -- constraint->node_attribute, -- constraint->score / (float) INFINITY, -- flags); -+ if (constraint->score == 0) { -+ continue; - } -+ nodes = pcmk__native_merge_weights(constraint->rsc_lh, rsc->id, nodes, -+ constraint->node_attribute, -+ constraint->score / (float) INFINITY, -+ flags); - } - - pe__clear_resource_flags(rsc, pe_rsc_merging); -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index 097a033..70f6c2f 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -564,11 +564,17 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (pcmk__colocation_applies(rsc, constraint, false)) { -- rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, -- rsc->id, rsc->allowed_nodes, constraint->node_attribute, -- constraint->score / (float) INFINITY, pe_weights_rollback); -+ if (constraint->score == 0) { -+ continue; - } -+ pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)", -+ constraint->id, constraint->rsc_lh->id, -+ constraint->rsc_rh->id); -+ rsc->allowed_nodes = -+ constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, -+ constraint->node_attribute, -+ (float)constraint->score / INFINITY, -+ pe_weights_rollback); - } - - if (rsc->next_role == RSC_ROLE_STOPPED) { -diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c -index a0eeaad..9a5474a 100644 ---- a/lib/pacemaker/pcmk_sched_promotable.c -+++ b/lib/pacemaker/pcmk_sched_promotable.c -@@ -345,14 +345,23 @@ promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set) - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (pcmk__colocation_applies(rsc, constraint, true)) { -- /* (Re-)add location preferences of resource that wishes to be -- * colocated with the promoted instance. -- */ -- rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, -- rsc->id, rsc->allowed_nodes, constraint->node_attribute, -- constraint->score / (float) INFINITY, -- pe_weights_rollback|pe_weights_positive); -+ if (constraint->score == 0) { -+ continue; -+ } -+ -+ /* (re-)adds location preferences of resource that wish to be -+ * colocated with the master instance -+ */ -+ if (constraint->role_rh == RSC_ROLE_MASTER) { -+ pe_rsc_trace(rsc, "LHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id, -+ constraint->score); -+ rsc->allowed_nodes = -+ constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, -+ rsc->allowed_nodes, -+ constraint->node_attribute, -+ (float)constraint->score / INFINITY, -+ (pe_weights_rollback | -+ pe_weights_positive)); - } - } - -diff --git a/lib/pacemaker/pcmk_sched_utils.c b/lib/pacemaker/pcmk_sched_utils.c -index aba417a..eaaf526 100644 ---- a/lib/pacemaker/pcmk_sched_utils.c -+++ b/lib/pacemaker/pcmk_sched_utils.c -@@ -765,34 +765,3 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op, - free(key); - return xml_op; - } -- --/*! -- * \internal -- * \brief Check whether a colocation constraint should apply -- * -- * \param[in] rsc Resource of interest (for logging) -- * \param[in] colocation Colocation constraint to check -- * \param[in] promoted_only If true, constraint applies if right-hand is promoted -- */ --bool --pcmk__colocation_applies(pe_resource_t *rsc, pcmk__colocation_t *colocation, -- bool promoted_only) --{ -- CRM_CHECK((rsc != NULL) && (colocation != NULL), return false); -- -- if (colocation->score == 0) { -- pe_rsc_trace(rsc, "Ignoring colocation constraint %s: 0 score", -- colocation->id); -- return false; -- } -- if (promoted_only && (colocation->role_rh != RSC_ROLE_MASTER)) { -- pe_rsc_trace(rsc, "Ignoring colocation constraint %s: role", -- colocation->id); -- return false; -- } -- pe_rsc_trace(rsc, "Applying colocation constraint %s: %s with %s%s (%d)", -- colocation->id, colocation->rsc_lh->id, -- (promoted_only? "promoted " : ""), -- colocation->rsc_rh->id, colocation->score); -- return true; --} --- -1.8.3.1 - - -From 92c83d4da20da7dcaf1063a0768563742aea6618 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 11 Jan 2021 16:38:58 -0600 -Subject: [PATCH 08/12] Refactor: scheduler: don't add constraints with score 0 - -Previously, we parsed constraints with a score of 0 into the pcmk__colocation_t -list, then ignored them wherever they were used. Now, don't add them to begin -with. ---- - lib/pacemaker/pcmk_sched_constraints.c | 18 ++++++++++++++++-- - 1 file changed, 16 insertions(+), 2 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c -index be93f0b..ab6f74e 100644 ---- a/lib/pacemaker/pcmk_sched_constraints.c -+++ b/lib/pacemaker/pcmk_sched_constraints.c -@@ -1351,6 +1351,10 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score, - { - pcmk__colocation_t *new_con = NULL; - -+ if (score == 0) { -+ crm_trace("Ignoring colocation '%s' because score is 0", id); -+ return; -+ } - if ((rsc_lh == NULL) || (rsc_rh == NULL)) { - pcmk__config_err("Ignoring colocation '%s' because resource " - "does not exist", id); -@@ -2328,6 +2332,11 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - if (score_s) { - local_score = char2score(score_s); - } -+ if (local_score == 0) { -+ crm_trace("Ignoring colocation '%s' for set '%s' because score is 0", -+ coloc_id, set_id); -+ return TRUE; -+ } - - if(ordering == NULL) { - ordering = "group"; -@@ -2336,7 +2345,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - if (sequential != NULL && crm_is_true(sequential) == FALSE) { - return TRUE; - -- } else if ((local_score >= 0) -+ } else if ((local_score > 0) - && pcmk__str_eq(ordering, "group", pcmk__str_casei)) { - for (xml_rsc = pcmk__xe_first_child(set); xml_rsc != NULL; - xml_rsc = pcmk__xe_next(xml_rsc)) { -@@ -2355,7 +2364,7 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id, - with = resource; - } - } -- } else if (local_score >= 0) { -+ } else if (local_score > 0) { - pe_resource_t *last = NULL; - for (xml_rsc = pcmk__xe_first_child(set); xml_rsc != NULL; - xml_rsc = pcmk__xe_next(xml_rsc)) { -@@ -2428,6 +2437,11 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, - const char *sequential_1 = crm_element_value(set1, "sequential"); - const char *sequential_2 = crm_element_value(set2, "sequential"); - -+ if (score == 0) { -+ crm_trace("Ignoring colocation '%s' between sets because score is 0", -+ id); -+ return TRUE; -+ } - if (sequential_1 == NULL || crm_is_true(sequential_1)) { - /* get the first one */ - for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL; --- -1.8.3.1 - - -From cd04564fea1113d7f89622cd434f0f567998217e Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 11 Jan 2021 16:43:52 -0600 -Subject: [PATCH 09/12] Refactor: scheduler: don't check colocation constraints - for 0 score - -... since they aren't added to begin with anymore ---- - lib/pacemaker/pcmk_sched_bundle.c | 3 --- - lib/pacemaker/pcmk_sched_clone.c | 23 +---------------------- - lib/pacemaker/pcmk_sched_group.c | 9 --------- - lib/pacemaker/pcmk_sched_native.c | 26 +------------------------- - lib/pacemaker/pcmk_sched_promotable.c | 16 +--------------- - 5 files changed, 3 insertions(+), 74 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c -index bc7009d..955a7d3 100644 ---- a/lib/pacemaker/pcmk_sched_bundle.c -+++ b/lib/pacemaker/pcmk_sched_bundle.c -@@ -483,9 +483,6 @@ pcmk__bundle_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc, - CRM_CHECK(rsc != NULL, pe_err("rsc was NULL for %s", constraint->id); return); - CRM_ASSERT(rsc_lh->variant == pe_native); - -- if (constraint->score == 0) { -- return; -- } - if (pcmk_is_set(rsc->flags, pe_rsc_provisional)) { - pe_rsc_trace(rsc, "%s is still provisional", rsc->id); - return; -diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c -index 9485a98..3cfc06c 100644 ---- a/lib/pacemaker/pcmk_sched_clone.c -+++ b/lib/pacemaker/pcmk_sched_clone.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -239,9 +239,6 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) - for (gIter = resource1->parent->rsc_cons; gIter; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } - crm_trace("Applying %s to %s", constraint->id, resource1->id); - - hash1 = pcmk__native_merge_weights(constraint->rsc_rh, -@@ -254,9 +251,6 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) - for (gIter = resource1->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } - crm_trace("Applying %s to %s", constraint->id, resource1->id); - - hash1 = pcmk__native_merge_weights(constraint->rsc_lh, -@@ -501,9 +495,6 @@ append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean al - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; - -- if (cons->score == 0) { -- continue; -- } - if (all || cons->score < 0 || cons->score == INFINITY) { - child->rsc_cons = g_list_prepend(child->rsc_cons, cons); - } -@@ -513,9 +504,6 @@ append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean al - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; - -- if (cons->score == 0) { -- continue; -- } - if (all || cons->score < 0) { - child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons); - } -@@ -647,9 +635,6 @@ pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (GListPtr gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } - pe_rsc_trace(rsc, "%s: Allocating %s first", - rsc->id, constraint->rsc_rh->id); - constraint->rsc_rh->cmds->allocate(constraint->rsc_rh, prefer, data_set); -@@ -658,9 +643,6 @@ pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (GListPtr gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } - rsc->allowed_nodes = - constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, - constraint->node_attribute, -@@ -1079,9 +1061,6 @@ clone_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - CRM_CHECK(rsc_rh != NULL, pe_err("rsc_rh was NULL for %s", constraint->id); return); - CRM_CHECK(rsc_lh->variant == pe_native, return); - -- if (constraint->score == 0) { -- return; -- } - pe_rsc_trace(rsc_rh, "Processing constraint %s: %s -> %s %d", - constraint->id, rsc_lh->id, rsc_rh->id, constraint->score); - -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 336a6f9..439ed91 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -300,9 +300,6 @@ group_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - pe_err("rsc_rh was NULL for %s", constraint->id); - return; - } -- if (constraint->score == 0) { -- return; -- } - - gIter = rsc_lh->children; - pe_rsc_trace(rsc_lh, "Processing constraints from %s", rsc_lh->id); -@@ -341,9 +338,6 @@ group_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - get_group_variant_data(group_data, rsc_rh); - CRM_CHECK(rsc_lh->variant == pe_native, return); - -- if (constraint->score == 0) { -- return; -- } - pe_rsc_trace(rsc_rh, "Processing RH %s of constraint %s (LH is %s)", - rsc_rh->id, constraint->id, rsc_lh->id); - -@@ -520,9 +514,6 @@ pcmk__group_merge_weights(pe_resource_t *rsc, const char *rhs, - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } - nodes = pcmk__native_merge_weights(constraint->rsc_lh, rsc->id, nodes, - constraint->node_attribute, - constraint->score / (float) INFINITY, -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index 70f6c2f..c7bf4fe 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -438,10 +438,6 @@ pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, - pe_resource_t *other = NULL; - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } -- - if (pcmk_is_set(flags, pe_weights_forward)) { - other = constraint->rsc_rh; - } else { -@@ -533,10 +529,6 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - GHashTable *archive = NULL; - pe_resource_t *rsc_rh = constraint->rsc_rh; - -- if (constraint->score == 0) { -- continue; -- } -- - if (constraint->role_lh >= RSC_ROLE_MASTER - || (constraint->score < 0 && constraint->score > -INFINITY)) { - archive = pcmk__copy_node_table(rsc->allowed_nodes); -@@ -564,9 +556,6 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } - pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)", - constraint->id, constraint->rsc_lh->id, - constraint->rsc_rh->id); -@@ -1726,9 +1715,6 @@ native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - return; - } - -- if (constraint->score == 0) { -- return; -- } - pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id, - rsc_rh->id); - -@@ -1739,10 +1725,6 @@ enum filter_colocation_res - filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, - pcmk__colocation_t *constraint, gboolean preview) - { -- if (constraint->score == 0) { -- return influence_nothing; -- } -- - /* rh side must be allocated before we can process constraint */ - if (!preview && pcmk_is_set(rsc_rh->flags, pe_rsc_provisional)) { - return influence_nothing; -@@ -1829,9 +1811,6 @@ influence_priority(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - const char *attribute = CRM_ATTR_ID; - int score_multiplier = 1; - -- if (constraint->score == 0) { -- return; -- } - if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) { - return; - } -@@ -1872,9 +1851,6 @@ colocation_match(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - GHashTableIter iter; - pe_node_t *node = NULL; - -- if (constraint->score == 0) { -- return; -- } - if (constraint->node_attribute != NULL) { - attribute = constraint->node_attribute; - } -@@ -1941,7 +1917,7 @@ native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - CRM_ASSERT(rsc_rh); - filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE); - pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)", -- ((constraint->score >= 0)? "Colocating" : "Anti-colocating"), -+ ((constraint->score > 0)? "Colocating" : "Anti-colocating"), - rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results); - - switch (filter_results) { -diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c -index 9a5474a..0b4f826 100644 ---- a/lib/pacemaker/pcmk_sched_promotable.c -+++ b/lib/pacemaker/pcmk_sched_promotable.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -321,10 +321,6 @@ promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set) - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } -- - /* (re-)adds location preferences of resources that the - * master instance should/must be colocated with - */ -@@ -345,10 +341,6 @@ promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set) - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -- if (constraint->score == 0) { -- continue; -- } -- - /* (re-)adds location preferences of resource that wish to be - * colocated with the master instance - */ -@@ -740,9 +732,6 @@ pcmk__set_instance_roles(pe_resource_t *rsc, pe_working_set_t *data_set) - for (gIter2 = child_rsc->rsc_cons; gIter2 != NULL; gIter2 = gIter2->next) { - pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter2->data; - -- if (cons->score == 0) { -- continue; -- } - child_rsc->cmds->rsc_colocation_lh(child_rsc, cons->rsc_rh, cons, - data_set); - } -@@ -986,9 +975,6 @@ promotable_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, - { - GListPtr gIter = NULL; - -- if (constraint->score == 0) { -- return; -- } - if (pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) { - GListPtr rhs = NULL; - --- -1.8.3.1 - - -From 30d3c16a7fd142a84b342ad1d0bb6f8618d677c5 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 11 Jan 2021 16:53:55 -0600 -Subject: [PATCH 10/12] Refactor: scheduler: convenience function for checking - colocation influence - ---- - include/pcmki/pcmki_scheduler.h | 26 ++++++++++++++++++++++++++ - 1 file changed, 26 insertions(+) - -diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h -index c604a32..f2f3d5b 100644 ---- a/include/pcmki/pcmki_scheduler.h -+++ b/include/pcmki/pcmki_scheduler.h -@@ -107,4 +107,30 @@ extern gboolean show_scores; - extern gboolean show_utilization; - extern const char *transition_idle_timeout; - -+/*! -+ * \internal -+ * \brief Check whether colocation's left-hand preferences should be considered -+ * -+ * \param[in] colocation Colocation constraint -+ * \param[in] rsc Right-hand instance (normally this will be -+ * colocation->rsc_rh, which NULL will be treated as, -+ * but for clones or bundles with multiple instances -+ * this can be a particular instance) -+ * -+ * \return true if colocation influence should be effective, otherwise false -+ */ -+static inline bool -+pcmk__colocation_has_influence(const pcmk__colocation_t *colocation, -+ const pe_resource_t *rsc) -+{ -+ if (rsc == NULL) { -+ rsc = colocation->rsc_rh; -+ } -+ -+ /* The left hand of a colocation influences the right hand's location -+ * if the influence option is true, or the right hand is not yet active. -+ */ -+ return colocation->influence || (rsc->running_on == NULL); -+} -+ - #endif --- -1.8.3.1 - - -From 7ae21be9c9a4c2aea68f74c4c8f80f7ba8053635 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 11 Jan 2021 17:02:30 -0600 -Subject: [PATCH 11/12] Feature: scheduler: implement new critical and - influence options - -The feature set is bumped because critical is a resource meta-attribute and -thus not dependent on schema version, and we don't want it to flip back and -forth between being respected or not. - -critical just sets a resource-wide default for influence, so only influence is -actually used in scheduling. - -It's a little tricky deciding when to consider influence. The basic idea is -that when a colocation constraint "A with B" has no influence, A's location -preferences should not influence B's location. But the colocation still matters -for things like where A is allowed to run. Thus we only consider it when -cycling through a resource's ->rsc_cons_lhs to add the dependents' preferences. ---- - include/crm/crm.h | 4 ++-- - lib/pacemaker/pcmk_sched_clone.c | 12 ++++++++++++ - lib/pacemaker/pcmk_sched_native.c | 5 +++++ - lib/pacemaker/pcmk_sched_promotable.c | 4 ++++ - 4 files changed, 23 insertions(+), 2 deletions(-) - -diff --git a/include/crm/crm.h b/include/crm/crm.h -index 4bbf46a..3f22c4b 100644 ---- a/include/crm/crm.h -+++ b/include/crm/crm.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -51,7 +51,7 @@ extern "C" { - * >=3.0.13: Fail counts include operation name and interval - * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED - */ --# define CRM_FEATURE_SET "3.6.4" -+# define CRM_FEATURE_SET "3.7.0" - - # define EOS '\0' - # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) -diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c -index 3cfc06c..dd6ff48 100644 ---- a/lib/pacemaker/pcmk_sched_clone.c -+++ b/lib/pacemaker/pcmk_sched_clone.c -@@ -251,6 +251,9 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) - for (gIter = resource1->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -+ if (!pcmk__colocation_has_influence(constraint, resource1)) { -+ continue; -+ } - crm_trace("Applying %s to %s", constraint->id, resource1->id); - - hash1 = pcmk__native_merge_weights(constraint->rsc_lh, -@@ -277,6 +280,9 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) - for (gIter = resource2->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -+ if (!pcmk__colocation_has_influence(constraint, resource2)) { -+ continue; -+ } - crm_trace("Applying %s to %s", constraint->id, resource2->id); - - hash2 = pcmk__native_merge_weights(constraint->rsc_lh, -@@ -504,6 +510,9 @@ append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean al - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; - -+ if (!pcmk__colocation_has_influence(cons, child)) { -+ continue; -+ } - if (all || cons->score < 0) { - child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons); - } -@@ -643,6 +652,9 @@ pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (GListPtr gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -+ if (!pcmk__colocation_has_influence(constraint, NULL)) { -+ continue; -+ } - rsc->allowed_nodes = - constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, - constraint->node_attribute, -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index c7bf4fe..0e50eda 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -440,6 +440,8 @@ pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, - - if (pcmk_is_set(flags, pe_weights_forward)) { - other = constraint->rsc_rh; -+ } else if (!pcmk__colocation_has_influence(constraint, NULL)) { -+ continue; - } else { - other = constraint->rsc_lh; - } -@@ -556,6 +558,9 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -+ if (!pcmk__colocation_has_influence(constraint, NULL)) { -+ continue; -+ } - pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)", - constraint->id, constraint->rsc_lh->id, - constraint->rsc_rh->id); -diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c -index 0b4f826..40d07e9 100644 ---- a/lib/pacemaker/pcmk_sched_promotable.c -+++ b/lib/pacemaker/pcmk_sched_promotable.c -@@ -341,6 +341,10 @@ promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set) - for (; gIter != NULL; gIter = gIter->next) { - pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; - -+ if (!pcmk__colocation_has_influence(constraint, NULL)) { -+ continue; -+ } -+ - /* (re-)adds location preferences of resource that wish to be - * colocated with the master instance - */ --- -1.8.3.1 - - -From 3bc2288f65a72d83f15f008d01054f9cb8663865 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 12 Jan 2021 14:24:21 -0600 -Subject: [PATCH 12/12] Test: cts-scheduler: add regression test for colocation - influence - ---- - cts/cts-scheduler.in | 3 +- - cts/scheduler/colocation-influence.dot | 92 ++ - cts/scheduler/colocation-influence.exp | 455 ++++++++++ - cts/scheduler/colocation-influence.scores | 673 ++++++++++++++ - cts/scheduler/colocation-influence.summary | 168 ++++ - cts/scheduler/colocation-influence.xml | 1298 ++++++++++++++++++++++++++++ - 6 files changed, 2688 insertions(+), 1 deletion(-) - create mode 100644 cts/scheduler/colocation-influence.dot - create mode 100644 cts/scheduler/colocation-influence.exp - create mode 100644 cts/scheduler/colocation-influence.scores - create mode 100644 cts/scheduler/colocation-influence.summary - create mode 100644 cts/scheduler/colocation-influence.xml - -diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in -index 919d3ed..027ddf9 100644 ---- a/cts/cts-scheduler.in -+++ b/cts/cts-scheduler.in -@@ -231,6 +231,7 @@ TESTS = [ - [ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ], - [ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ], - [ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ], -+ [ "colocation-influence", "Respect colocation influence" ], - ], - [ - [ "rsc-sets-seq-true", "Resource Sets - sequential=false" ], -diff --git a/cts/scheduler/colocation-influence.dot b/cts/scheduler/colocation-influence.dot -new file mode 100644 -index 0000000..9573ab3 ---- /dev/null -+++ b/cts/scheduler/colocation-influence.dot -@@ -0,0 +1,92 @@ -+ digraph "g" { -+"bundle11-1_monitor_0 rhel7-1" -> "bundle11-1_start_0 rhel7-5" [ style = dashed] -+"bundle11-1_monitor_0 rhel7-1" [ style=dashed color="red" fontcolor="black"] -+"bundle11-1_monitor_0 rhel7-2" -> "bundle11-1_start_0 rhel7-5" [ style = dashed] -+"bundle11-1_monitor_0 rhel7-2" [ style=dashed color="red" fontcolor="black"] -+"bundle11-1_monitor_0 rhel7-3" -> "bundle11-1_start_0 rhel7-5" [ style = dashed] -+"bundle11-1_monitor_0 rhel7-3" [ style=dashed color="red" fontcolor="black"] -+"bundle11-1_monitor_0 rhel7-4" -> "bundle11-1_start_0 rhel7-5" [ style = dashed] -+"bundle11-1_monitor_0 rhel7-4" [ style=dashed color="red" fontcolor="black"] -+"bundle11-1_monitor_0 rhel7-5" -> "bundle11-1_start_0 rhel7-5" [ style = dashed] -+"bundle11-1_monitor_0 rhel7-5" [ style=dashed color="red" fontcolor="black"] -+"bundle11-1_monitor_30000 rhel7-5" [ style=dashed color="red" fontcolor="black"] -+"bundle11-1_start_0 rhel7-5" -> "bundle11-1_monitor_30000 rhel7-5" [ style = dashed] -+"bundle11-1_start_0 rhel7-5" -> "bundle11a:1_monitor_15000 bundle11-1" [ style = dashed] -+"bundle11-1_start_0 rhel7-5" -> "bundle11a:1_start_0 bundle11-1" [ style = dashed] -+"bundle11-1_start_0 rhel7-5" [ style=dashed color="red" fontcolor="black"] -+"bundle11-clone_running_0" -> "bundle11_running_0" [ style = bold] -+"bundle11-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"bundle11-clone_start_0" -> "bundle11-clone_running_0" [ style = bold] -+"bundle11-clone_start_0" -> "bundle11a:1_start_0 bundle11-1" [ style = dashed] -+"bundle11-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"bundle11_running_0" [ style=bold color="green" fontcolor="orange"] -+"bundle11_start_0" -> "bundle11-clone_start_0" [ style = bold] -+"bundle11_start_0" [ style=bold color="green" fontcolor="orange"] -+"bundle11a:1_monitor_15000 bundle11-1" [ style=dashed color="red" fontcolor="black"] -+"bundle11a:1_start_0 bundle11-1" -> "bundle11-clone_running_0" [ style = dashed] -+"bundle11a:1_start_0 bundle11-1" -> "bundle11a:1_monitor_15000 bundle11-1" [ style = dashed] -+"bundle11a:1_start_0 bundle11-1" [ style=dashed color="red" fontcolor="black"] -+"group6a_stop_0" -> "group6a_stopped_0" [ style = bold] -+"group6a_stop_0" -> "rsc6a1_stop_0 rhel7-2" [ style = bold] -+"group6a_stop_0" -> "rsc6a2_stop_0 rhel7-2" [ style = bold] -+"group6a_stop_0" [ style=bold color="green" fontcolor="orange"] -+"group6a_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"group7a_stop_0" -> "group7a_stopped_0" [ style = bold] -+"group7a_stop_0" -> "rsc7a2_stop_0 rhel7-3" [ style = bold] -+"group7a_stop_0" [ style=bold color="green" fontcolor="orange"] -+"group7a_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"rsc10a_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc10a_start_0 rhel7-3" -> "rsc10a_monitor_10000 rhel7-3" [ style = bold] -+"rsc10a_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc10a_stop_0 rhel7-2" -> "rsc10a_start_0 rhel7-3" [ style = bold] -+"rsc10a_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc12b_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] -+"rsc13a_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc14a-clone_demote_0" -> "rsc14a-clone_demoted_0" [ style = bold] -+"rsc14a-clone_demote_0" -> "rsc14a_demote_0 rhel7-4" [ style = bold] -+"rsc14a-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -+"rsc14a-clone_demoted_0" -> "rsc14a-clone_stop_0" [ style = bold] -+"rsc14a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"rsc14a-clone_stop_0" -> "rsc14a-clone_stopped_0" [ style = bold] -+"rsc14a-clone_stop_0" -> "rsc14a_stop_0 rhel7-4" [ style = bold] -+"rsc14a-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"rsc14a-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"rsc14a_demote_0 rhel7-4" -> "rsc14a-clone_demoted_0" [ style = bold] -+"rsc14a_demote_0 rhel7-4" -> "rsc14a_stop_0 rhel7-4" [ style = bold] -+"rsc14a_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"rsc14a_stop_0 rhel7-4" -> "rsc14a-clone_stopped_0" [ style = bold] -+"rsc14a_stop_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"rsc1a_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc1a_start_0 rhel7-3" -> "rsc1a_monitor_10000 rhel7-3" [ style = bold] -+"rsc1a_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc1a_stop_0 rhel7-2" -> "rsc1a_start_0 rhel7-3" [ style = bold] -+"rsc1a_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc1b_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc1b_start_0 rhel7-3" -> "rsc1b_monitor_10000 rhel7-3" [ style = bold] -+"rsc1b_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc1b_stop_0 rhel7-2" -> "rsc1b_start_0 rhel7-3" [ style = bold] -+"rsc1b_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc2a_stop_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"rsc3a_monitor_10000 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc3a_start_0 rhel7-2" -> "rsc3a_monitor_10000 rhel7-2" [ style = bold] -+"rsc3a_start_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc3b_monitor_10000 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc3b_start_0 rhel7-2" -> "rsc3b_monitor_10000 rhel7-2" [ style = bold] -+"rsc3b_start_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc4a_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc5a_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] -+"rsc6a1_stop_0 rhel7-2" -> "group6a_stopped_0" [ style = bold] -+"rsc6a1_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc6a2_stop_0 rhel7-2" -> "group6a_stopped_0" [ style = bold] -+"rsc6a2_stop_0 rhel7-2" -> "rsc6a1_stop_0 rhel7-2" [ style = bold] -+"rsc6a2_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -+"rsc7a2_stop_0 rhel7-3" -> "group7a_stopped_0" [ style = bold] -+"rsc7a2_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -+"rsc8a-clone_stop_0" -> "rsc8a-clone_stopped_0" [ style = bold] -+"rsc8a-clone_stop_0" -> "rsc8a_stop_0 rhel7-4" [ style = bold] -+"rsc8a-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"rsc8a-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"rsc8a_stop_0 rhel7-4" -> "rsc8a-clone_stopped_0" [ style = bold] -+"rsc8a_stop_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -+"rsc9c_stop_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/colocation-influence.exp b/cts/scheduler/colocation-influence.exp -new file mode 100644 -index 0000000..410c46f ---- /dev/null -+++ b/cts/scheduler/colocation-influence.exp -@@ -0,0 +1,455 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/colocation-influence.scores b/cts/scheduler/colocation-influence.scores -new file mode 100644 -index 0000000..1437263 ---- /dev/null -+++ b/cts/scheduler/colocation-influence.scores -@@ -0,0 +1,673 @@ -+Allocation scores: -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-2: 10 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-2: 10010 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-5: 0 -+pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-5: 0 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-3: 10 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-3: 10010 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-5: 0 -+pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-5: 0 -+pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-0: -INFINITY -+pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-0: 0 -+pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-1: -INFINITY -+pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-1: 0 -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-1: -INFINITY -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-2: -INFINITY -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-3: -INFINITY -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-4: -INFINITY -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-5: 0 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-2: 10 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-2: 21 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-2: -INFINITY -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-3: 10 -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-3: 21 -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-1: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-2: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle11 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle11 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle11 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle11 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle11 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-1: 10 -+pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-5: 0 -+pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-5: 0 -+pcmk__bundle_allocate: bundle11-clone allocation score on bundle11-0: -INFINITY -+pcmk__bundle_allocate: bundle11-clone allocation score on bundle11-1: -INFINITY -+pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-5: 0 -+pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-1: 10 -+pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-1: 10 -+pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-1: 0 -+pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-2: 0 -+pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-3: 0 -+pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-4: 0 -+pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-5: -INFINITY -+pcmk__bundle_allocate: bundle11a:0 allocation score on bundle11-0: 510 -+pcmk__bundle_allocate: bundle11a:1 allocation score on bundle11-1: 500 -+pcmk__bundle_allocate: httpd:0 allocation score on bundle10-0: 510 -+pcmk__bundle_allocate: httpd:0 allocation score on bundle10-0: INFINITY -+pcmk__bundle_allocate: httpd:1 allocation score on bundle10-1: 510 -+pcmk__bundle_allocate: httpd:1 allocation score on bundle10-1: INFINITY -+pcmk__clone_allocate: bundle10-clone allocation score on bundle10-0: 0 -+pcmk__clone_allocate: bundle10-clone allocation score on bundle10-1: 0 -+pcmk__clone_allocate: bundle10-clone allocation score on rhel7-1: -INFINITY -+pcmk__clone_allocate: bundle10-clone allocation score on rhel7-2: -INFINITY -+pcmk__clone_allocate: bundle10-clone allocation score on rhel7-3: -INFINITY -+pcmk__clone_allocate: bundle10-clone allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: bundle10-clone allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: bundle11-clone allocation score on bundle11-0: 0 -+pcmk__clone_allocate: bundle11-clone allocation score on bundle11-1: 0 -+pcmk__clone_allocate: bundle11-clone allocation score on rhel7-1: -INFINITY -+pcmk__clone_allocate: bundle11-clone allocation score on rhel7-2: -INFINITY -+pcmk__clone_allocate: bundle11-clone allocation score on rhel7-3: -INFINITY -+pcmk__clone_allocate: bundle11-clone allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: bundle11-clone allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: bundle11a:0 allocation score on bundle11-0: INFINITY -+pcmk__clone_allocate: bundle11a:1 allocation score on bundle11-1: INFINITY -+pcmk__clone_allocate: httpd:0 allocation score on bundle10-0: INFINITY -+pcmk__clone_allocate: httpd:1 allocation score on bundle10-1: INFINITY -+pcmk__clone_allocate: rsc13b-clone allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc13b-clone allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc13b-clone allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc13b-clone allocation score on rhel7-4: 0 -+pcmk__clone_allocate: rsc13b-clone allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:0 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc13b:0 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc13b:0 allocation score on rhel7-3: 20 -+pcmk__clone_allocate: rsc13b:0 allocation score on rhel7-4: 0 -+pcmk__clone_allocate: rsc13b:0 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:1 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc13b:1 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc13b:1 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc13b:1 allocation score on rhel7-4: 15 -+pcmk__clone_allocate: rsc13b:1 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:2 allocation score on rhel7-1: 15 -+pcmk__clone_allocate: rsc13b:2 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc13b:2 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc13b:2 allocation score on rhel7-4: 0 -+pcmk__clone_allocate: rsc13b:2 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:3 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc13b:3 allocation score on rhel7-2: 15 -+pcmk__clone_allocate: rsc13b:3 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc13b:3 allocation score on rhel7-4: 0 -+pcmk__clone_allocate: rsc13b:3 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:4 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc13b:4 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc13b:4 allocation score on rhel7-3: 10 -+pcmk__clone_allocate: rsc13b:4 allocation score on rhel7-4: 5 -+pcmk__clone_allocate: rsc13b:4 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:5 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc13b:5 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc13b:5 allocation score on rhel7-3: 10 -+pcmk__clone_allocate: rsc13b:5 allocation score on rhel7-4: 5 -+pcmk__clone_allocate: rsc13b:5 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:6 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc13b:6 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc13b:6 allocation score on rhel7-3: 10 -+pcmk__clone_allocate: rsc13b:6 allocation score on rhel7-4: 5 -+pcmk__clone_allocate: rsc13b:6 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:7 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc13b:7 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc13b:7 allocation score on rhel7-3: 10 -+pcmk__clone_allocate: rsc13b:7 allocation score on rhel7-4: 5 -+pcmk__clone_allocate: rsc13b:7 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc13b:8 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc13b:8 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc13b:8 allocation score on rhel7-3: 10 -+pcmk__clone_allocate: rsc13b:8 allocation score on rhel7-4: 5 -+pcmk__clone_allocate: rsc13b:8 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a-clone allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc14a-clone allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc14a-clone allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc14a-clone allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a-clone allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:0 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc14a:0 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc14a:0 allocation score on rhel7-3: 15 -+pcmk__clone_allocate: rsc14a:0 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:0 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:1 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc14a:1 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc14a:1 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc14a:1 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:1 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:2 allocation score on rhel7-1: 15 -+pcmk__clone_allocate: rsc14a:2 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc14a:2 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc14a:2 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:2 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:3 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc14a:3 allocation score on rhel7-2: 15 -+pcmk__clone_allocate: rsc14a:3 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc14a:3 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:3 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:4 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc14a:4 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc14a:4 allocation score on rhel7-3: 5 -+pcmk__clone_allocate: rsc14a:4 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:4 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:5 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc14a:5 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc14a:5 allocation score on rhel7-3: 5 -+pcmk__clone_allocate: rsc14a:5 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:5 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:6 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc14a:6 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc14a:6 allocation score on rhel7-3: 5 -+pcmk__clone_allocate: rsc14a:6 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:6 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:7 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc14a:7 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc14a:7 allocation score on rhel7-3: 5 -+pcmk__clone_allocate: rsc14a:7 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:7 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc14a:8 allocation score on rhel7-1: 5 -+pcmk__clone_allocate: rsc14a:8 allocation score on rhel7-2: 5 -+pcmk__clone_allocate: rsc14a:8 allocation score on rhel7-3: 5 -+pcmk__clone_allocate: rsc14a:8 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc14a:8 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc8a-clone allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc8a-clone allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc8a-clone allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc8a-clone allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc8a-clone allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc8a:0 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc8a:0 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc8a:0 allocation score on rhel7-3: 10 -+pcmk__clone_allocate: rsc8a:0 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc8a:0 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc8a:1 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc8a:1 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc8a:1 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc8a:1 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc8a:1 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc8a:2 allocation score on rhel7-1: 10 -+pcmk__clone_allocate: rsc8a:2 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc8a:2 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc8a:2 allocation score on rhel7-4: -INFINITY -+pcmk__clone_allocate: rsc8a:2 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc8b-clone allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc8b-clone allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc8b-clone allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc8b-clone allocation score on rhel7-4: 0 -+pcmk__clone_allocate: rsc8b-clone allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc8b:0 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc8b:0 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc8b:0 allocation score on rhel7-3: 10 -+pcmk__clone_allocate: rsc8b:0 allocation score on rhel7-4: 0 -+pcmk__clone_allocate: rsc8b:0 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc8b:1 allocation score on rhel7-1: 0 -+pcmk__clone_allocate: rsc8b:1 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc8b:1 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc8b:1 allocation score on rhel7-4: 10 -+pcmk__clone_allocate: rsc8b:1 allocation score on rhel7-5: -INFINITY -+pcmk__clone_allocate: rsc8b:2 allocation score on rhel7-1: 10 -+pcmk__clone_allocate: rsc8b:2 allocation score on rhel7-2: 0 -+pcmk__clone_allocate: rsc8b:2 allocation score on rhel7-3: 0 -+pcmk__clone_allocate: rsc8b:2 allocation score on rhel7-4: 0 -+pcmk__clone_allocate: rsc8b:2 allocation score on rhel7-5: -INFINITY -+pcmk__group_allocate: group5a allocation score on rhel7-1: 0 -+pcmk__group_allocate: group5a allocation score on rhel7-2: 0 -+pcmk__group_allocate: group5a allocation score on rhel7-3: 0 -+pcmk__group_allocate: group5a allocation score on rhel7-4: 0 -+pcmk__group_allocate: group5a allocation score on rhel7-5: -INFINITY -+pcmk__group_allocate: group6a allocation score on rhel7-1: 0 -+pcmk__group_allocate: group6a allocation score on rhel7-2: 0 -+pcmk__group_allocate: group6a allocation score on rhel7-3: 0 -+pcmk__group_allocate: group6a allocation score on rhel7-4: 0 -+pcmk__group_allocate: group6a allocation score on rhel7-5: -INFINITY -+pcmk__group_allocate: group7a allocation score on rhel7-1: 0 -+pcmk__group_allocate: group7a allocation score on rhel7-2: 0 -+pcmk__group_allocate: group7a allocation score on rhel7-3: 0 -+pcmk__group_allocate: group7a allocation score on rhel7-4: 0 -+pcmk__group_allocate: group7a allocation score on rhel7-5: -INFINITY -+pcmk__group_allocate: rsc5a1 allocation score on rhel7-1: 10 -+pcmk__group_allocate: rsc5a1 allocation score on rhel7-2: 0 -+pcmk__group_allocate: rsc5a1 allocation score on rhel7-3: 0 -+pcmk__group_allocate: rsc5a1 allocation score on rhel7-4: 0 -+pcmk__group_allocate: rsc5a1 allocation score on rhel7-5: -INFINITY -+pcmk__group_allocate: rsc5a2 allocation score on rhel7-1: 10 -+pcmk__group_allocate: rsc5a2 allocation score on rhel7-2: 0 -+pcmk__group_allocate: rsc5a2 allocation score on rhel7-3: 0 -+pcmk__group_allocate: rsc5a2 allocation score on rhel7-4: 0 -+pcmk__group_allocate: rsc5a2 allocation score on rhel7-5: 0 -+pcmk__group_allocate: rsc6a1 allocation score on rhel7-1: 0 -+pcmk__group_allocate: rsc6a1 allocation score on rhel7-2: -INFINITY -+pcmk__group_allocate: rsc6a1 allocation score on rhel7-3: 0 -+pcmk__group_allocate: rsc6a1 allocation score on rhel7-4: 0 -+pcmk__group_allocate: rsc6a1 allocation score on rhel7-5: -INFINITY -+pcmk__group_allocate: rsc6a2 allocation score on rhel7-1: 0 -+pcmk__group_allocate: rsc6a2 allocation score on rhel7-2: 10 -+pcmk__group_allocate: rsc6a2 allocation score on rhel7-3: 0 -+pcmk__group_allocate: rsc6a2 allocation score on rhel7-4: 0 -+pcmk__group_allocate: rsc6a2 allocation score on rhel7-5: 0 -+pcmk__group_allocate: rsc7a1 allocation score on rhel7-1: 0 -+pcmk__group_allocate: rsc7a1 allocation score on rhel7-2: 0 -+pcmk__group_allocate: rsc7a1 allocation score on rhel7-3: 10 -+pcmk__group_allocate: rsc7a1 allocation score on rhel7-4: 0 -+pcmk__group_allocate: rsc7a1 allocation score on rhel7-5: -INFINITY -+pcmk__group_allocate: rsc7a2 allocation score on rhel7-1: 0 -+pcmk__group_allocate: rsc7a2 allocation score on rhel7-2: 0 -+pcmk__group_allocate: rsc7a2 allocation score on rhel7-3: -INFINITY -+pcmk__group_allocate: rsc7a2 allocation score on rhel7-4: 0 -+pcmk__group_allocate: rsc7a2 allocation score on rhel7-5: 0 -+pcmk__native_allocate: Fencing allocation score on rhel7-1: 10 -+pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 -+pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 -+pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 -+pcmk__native_allocate: Fencing allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle10-0 allocation score on rhel7-1: 0 -+pcmk__native_allocate: bundle10-0 allocation score on rhel7-2: 10010 -+pcmk__native_allocate: bundle10-0 allocation score on rhel7-3: 0 -+pcmk__native_allocate: bundle10-0 allocation score on rhel7-4: 0 -+pcmk__native_allocate: bundle10-0 allocation score on rhel7-5: 0 -+pcmk__native_allocate: bundle10-1 allocation score on rhel7-1: 0 -+pcmk__native_allocate: bundle10-1 allocation score on rhel7-2: 0 -+pcmk__native_allocate: bundle10-1 allocation score on rhel7-3: 10010 -+pcmk__native_allocate: bundle10-1 allocation score on rhel7-4: 0 -+pcmk__native_allocate: bundle10-1 allocation score on rhel7-5: 0 -+pcmk__native_allocate: bundle10-docker-0 allocation score on rhel7-1: 0 -+pcmk__native_allocate: bundle10-docker-0 allocation score on rhel7-2: 21 -+pcmk__native_allocate: bundle10-docker-0 allocation score on rhel7-3: 0 -+pcmk__native_allocate: bundle10-docker-0 allocation score on rhel7-4: 0 -+pcmk__native_allocate: bundle10-docker-0 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle10-docker-1 allocation score on rhel7-1: 0 -+pcmk__native_allocate: bundle10-docker-1 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: bundle10-docker-1 allocation score on rhel7-3: 21 -+pcmk__native_allocate: bundle10-docker-1 allocation score on rhel7-4: 0 -+pcmk__native_allocate: bundle10-docker-1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10 -+pcmk__native_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10 -+pcmk__native_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle11-0 allocation score on rhel7-1: 10010 -+pcmk__native_allocate: bundle11-0 allocation score on rhel7-2: 0 -+pcmk__native_allocate: bundle11-0 allocation score on rhel7-3: 0 -+pcmk__native_allocate: bundle11-0 allocation score on rhel7-4: 0 -+pcmk__native_allocate: bundle11-0 allocation score on rhel7-5: 0 -+pcmk__native_allocate: bundle11-1 allocation score on rhel7-1: 0 -+pcmk__native_allocate: bundle11-1 allocation score on rhel7-2: 0 -+pcmk__native_allocate: bundle11-1 allocation score on rhel7-3: 0 -+pcmk__native_allocate: bundle11-1 allocation score on rhel7-4: 0 -+pcmk__native_allocate: bundle11-1 allocation score on rhel7-5: 0 -+pcmk__native_allocate: bundle11-docker-0 allocation score on rhel7-1: 21 -+pcmk__native_allocate: bundle11-docker-0 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: bundle11-docker-0 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: bundle11-docker-0 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: bundle11-docker-0 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle11-docker-1 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: bundle11-docker-1 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: bundle11-docker-1 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: bundle11-docker-1 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: bundle11-docker-1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-1: 10 -+pcmk__native_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: bundle11a:0 allocation score on bundle11-0: INFINITY -+pcmk__native_allocate: bundle11a:1 allocation score on bundle11-1: INFINITY -+pcmk__native_allocate: httpd:0 allocation score on bundle10-0: INFINITY -+pcmk__native_allocate: httpd:1 allocation score on bundle10-1: INFINITY -+pcmk__native_allocate: rsc10a allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc10a allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc10a allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc10a allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc10a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc11a allocation score on rhel7-1: 10 -+pcmk__native_allocate: rsc11a allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc11a allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc11a allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc11a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc12a allocation score on rhel7-1: 10 -+pcmk__native_allocate: rsc12a allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc12a allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc12a allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc12a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc12b allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc12b allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc12b allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc12b allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc12b allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc12c allocation score on rhel7-1: 10 -+pcmk__native_allocate: rsc12c allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc12c allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc12c allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc12c allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13a allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc13a allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc13a allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13a allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc13a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:0 allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc13b:0 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc13b:0 allocation score on rhel7-3: 20 -+pcmk__native_allocate: rsc13b:0 allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc13b:0 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:1 allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc13b:1 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc13b:1 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13b:1 allocation score on rhel7-4: 15 -+pcmk__native_allocate: rsc13b:1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:2 allocation score on rhel7-1: 15 -+pcmk__native_allocate: rsc13b:2 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc13b:2 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13b:2 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc13b:2 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:3 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc13b:3 allocation score on rhel7-2: 15 -+pcmk__native_allocate: rsc13b:3 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13b:3 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc13b:3 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:4 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc13b:4 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc13b:4 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13b:4 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc13b:4 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:5 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc13b:5 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc13b:5 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13b:5 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc13b:5 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:6 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc13b:6 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc13b:6 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13b:6 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc13b:6 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:7 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc13b:7 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc13b:7 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13b:7 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc13b:7 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc13b:8 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc13b:8 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc13b:8 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc13b:8 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc13b:8 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:0 allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc14a:0 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc14a:0 allocation score on rhel7-3: 15 -+pcmk__native_allocate: rsc14a:0 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:0 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:1 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc14a:1 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc14a:1 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc14a:1 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:2 allocation score on rhel7-1: 15 -+pcmk__native_allocate: rsc14a:2 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc14a:2 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc14a:2 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:2 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:3 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc14a:3 allocation score on rhel7-2: 15 -+pcmk__native_allocate: rsc14a:3 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc14a:3 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:3 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:4 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc14a:4 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc14a:4 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc14a:4 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:4 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:5 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc14a:5 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc14a:5 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc14a:5 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:5 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:6 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc14a:6 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc14a:6 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc14a:6 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:6 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:7 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc14a:7 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc14a:7 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc14a:7 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:7 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14a:8 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc14a:8 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc14a:8 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc14a:8 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc14a:8 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc14b allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc14b allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc14b allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc14b allocation score on rhel7-4: 10 -+pcmk__native_allocate: rsc14b allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc1a allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc1a allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc1a allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc1a allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc1a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc1b allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc1b allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc1b allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc1b allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc1b allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc2a allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc2a allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc2a allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc2a allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc2a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc2b allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc2b allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc2b allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc2b allocation score on rhel7-4: 10 -+pcmk__native_allocate: rsc2b allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc3a allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc3a allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc3a allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc3a allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc3a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc3b allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc3b allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc3b allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc3b allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc3b allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc4a allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc4a allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc4a allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc4a allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc4a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc4b allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc4b allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc4b allocation score on rhel7-3: 10 -+pcmk__native_allocate: rsc4b allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc4b allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc5a allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc5a allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc5a allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc5a allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc5a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc5a1 allocation score on rhel7-1: 20 -+pcmk__native_allocate: rsc5a1 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc5a1 allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc5a1 allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc5a1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc5a2 allocation score on rhel7-1: 10 -+pcmk__native_allocate: rsc5a2 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc5a2 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc5a2 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc5a2 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc6a allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc6a allocation score on rhel7-2: 10 -+pcmk__native_allocate: rsc6a allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc6a allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc6a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc6a1 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc6a1 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc6a1 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc6a1 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc6a1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc6a2 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc6a2 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc6a2 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc6a2 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc6a2 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc7a1 allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc7a1 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc7a1 allocation score on rhel7-3: 10 -+pcmk__native_allocate: rsc7a1 allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc7a1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc7a2 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc7a2 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc7a2 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc7a2 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc7a2 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc8a:0 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc8a:0 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc8a:0 allocation score on rhel7-3: 10 -+pcmk__native_allocate: rsc8a:0 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc8a:0 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc8a:1 allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc8a:1 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc8a:1 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc8a:1 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc8a:1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc8a:2 allocation score on rhel7-1: 10 -+pcmk__native_allocate: rsc8a:2 allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc8a:2 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc8a:2 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc8a:2 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc8b:0 allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc8b:0 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc8b:0 allocation score on rhel7-3: 10 -+pcmk__native_allocate: rsc8b:0 allocation score on rhel7-4: 0 -+pcmk__native_allocate: rsc8b:0 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc8b:1 allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc8b:1 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc8b:1 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc8b:1 allocation score on rhel7-4: 10 -+pcmk__native_allocate: rsc8b:1 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc8b:2 allocation score on rhel7-1: 10 -+pcmk__native_allocate: rsc8b:2 allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc8b:2 allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc8b:2 allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc8b:2 allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc9a allocation score on rhel7-1: 0 -+pcmk__native_allocate: rsc9a allocation score on rhel7-2: 0 -+pcmk__native_allocate: rsc9a allocation score on rhel7-3: 0 -+pcmk__native_allocate: rsc9a allocation score on rhel7-4: 10 -+pcmk__native_allocate: rsc9a allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc9b allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc9b allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc9b allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc9b allocation score on rhel7-4: 10 -+pcmk__native_allocate: rsc9b allocation score on rhel7-5: -INFINITY -+pcmk__native_allocate: rsc9c allocation score on rhel7-1: -INFINITY -+pcmk__native_allocate: rsc9c allocation score on rhel7-2: -INFINITY -+pcmk__native_allocate: rsc9c allocation score on rhel7-3: -INFINITY -+pcmk__native_allocate: rsc9c allocation score on rhel7-4: -INFINITY -+pcmk__native_allocate: rsc9c allocation score on rhel7-5: -INFINITY -+rsc13b:0 promotion score on rhel7-3: 10 -+rsc13b:1 promotion score on rhel7-4: 5 -+rsc13b:2 promotion score on rhel7-1: 5 -+rsc13b:3 promotion score on rhel7-2: 5 -+rsc13b:4 promotion score on none: 0 -+rsc13b:5 promotion score on none: 0 -+rsc13b:6 promotion score on none: 0 -+rsc13b:7 promotion score on none: 0 -+rsc13b:8 promotion score on none: 0 -+rsc14a:0 promotion score on rhel7-3: -INFINITY -+rsc14a:1 promotion score on none: 0 -+rsc14a:2 promotion score on rhel7-1: -INFINITY -+rsc14a:3 promotion score on rhel7-2: -INFINITY -+rsc14a:4 promotion score on none: 0 -+rsc14a:5 promotion score on none: 0 -+rsc14a:6 promotion score on none: 0 -+rsc14a:7 promotion score on none: 0 -+rsc14a:8 promotion score on none: 0 -diff --git a/cts/scheduler/colocation-influence.summary b/cts/scheduler/colocation-influence.summary -new file mode 100644 -index 0000000..626e87a ---- /dev/null -+++ b/cts/scheduler/colocation-influence.summary -@@ -0,0 +1,168 @@ -+ -+Current cluster status: -+Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] -+GuestOnline: [ bundle10-0:bundle10-docker-0 bundle10-1:bundle10-docker-1 bundle11-0:bundle11-docker-0 ] -+ -+ Fencing (stonith:fence_xvm): Started rhel7-1 -+ rsc1a (ocf::pacemaker:Dummy): Started rhel7-2 -+ rsc1b (ocf::pacemaker:Dummy): Started rhel7-2 -+ rsc2a (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc2b (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc3a (ocf::pacemaker:Dummy): Stopped -+ rsc3b (ocf::pacemaker:Dummy): Stopped -+ rsc4a (ocf::pacemaker:Dummy): Started rhel7-3 -+ rsc4b (ocf::pacemaker:Dummy): Started rhel7-3 -+ rsc5a (ocf::pacemaker:Dummy): Started rhel7-1 -+ Resource Group: group5a -+ rsc5a1 (ocf::pacemaker:Dummy): Started rhel7-1 -+ rsc5a2 (ocf::pacemaker:Dummy): Started rhel7-1 -+ Resource Group: group6a -+ rsc6a1 (ocf::pacemaker:Dummy): Started rhel7-2 -+ rsc6a2 (ocf::pacemaker:Dummy): Started rhel7-2 -+ rsc6a (ocf::pacemaker:Dummy): Started rhel7-2 -+ Resource Group: group7a -+ rsc7a1 (ocf::pacemaker:Dummy): Started rhel7-3 -+ rsc7a2 (ocf::pacemaker:Dummy): Started rhel7-3 -+ Clone Set: rsc8a-clone [rsc8a] -+ Started: [ rhel7-1 rhel7-3 rhel7-4 ] -+ Clone Set: rsc8b-clone [rsc8b] -+ Started: [ rhel7-1 rhel7-3 rhel7-4 ] -+ rsc9a (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc9b (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc9c (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc10a (ocf::pacemaker:Dummy): Started rhel7-2 -+ rsc11a (ocf::pacemaker:Dummy): Started rhel7-1 -+ rsc12a (ocf::pacemaker:Dummy): Started rhel7-1 -+ rsc12b (ocf::pacemaker:Dummy): Started rhel7-1 -+ rsc12c (ocf::pacemaker:Dummy): Started rhel7-1 -+ Container bundle set: bundle10 [pcmktest:http] -+ bundle10-0 (192.168.122.131) (ocf::heartbeat:apache): Started rhel7-2 -+ bundle10-1 (192.168.122.132) (ocf::heartbeat:apache): Started rhel7-3 -+ Container bundle set: bundle11 [pcmktest:http] -+ bundle11-0 (192.168.122.134) (ocf::pacemaker:Dummy): Started rhel7-1 -+ bundle11-1 (192.168.122.135) (ocf::pacemaker:Dummy): Stopped -+ rsc13a (ocf::pacemaker:Dummy): Started rhel7-3 -+ Clone Set: rsc13b-clone [rsc13b] (promotable) -+ Masters: [ rhel7-3 ] -+ Slaves: [ rhel7-1 rhel7-2 rhel7-4 ] -+ Stopped: [ rhel7-5 ] -+ rsc14b (ocf::pacemaker:Dummy): Started rhel7-4 -+ Clone Set: rsc14a-clone [rsc14a] (promotable) -+ Masters: [ rhel7-4 ] -+ Slaves: [ rhel7-1 rhel7-2 rhel7-3 ] -+ Stopped: [ rhel7-5 ] -+ -+Transition Summary: -+ * Move rsc1a ( rhel7-2 -> rhel7-3 ) -+ * Move rsc1b ( rhel7-2 -> rhel7-3 ) -+ * Stop rsc2a ( rhel7-4 ) due to node availability -+ * Start rsc3a ( rhel7-2 ) -+ * Start rsc3b ( rhel7-2 ) -+ * Stop rsc4a ( rhel7-3 ) due to node availability -+ * Stop rsc5a ( rhel7-1 ) due to node availability -+ * Stop rsc6a1 ( rhel7-2 ) due to node availability -+ * Stop rsc6a2 ( rhel7-2 ) due to node availability -+ * Stop rsc7a2 ( rhel7-3 ) due to node availability -+ * Stop rsc8a:1 ( rhel7-4 ) due to node availability -+ * Stop rsc9c ( rhel7-4 ) due to node availability -+ * Move rsc10a ( rhel7-2 -> rhel7-3 ) -+ * Stop rsc12b ( rhel7-1 ) due to node availability -+ * Start bundle11-1 ( rhel7-5 ) due to unrunnable bundle11-docker-1 start (blocked) -+ * Start bundle11a:1 ( bundle11-1 ) due to unrunnable bundle11-docker-1 start (blocked) -+ * Stop rsc13a ( rhel7-3 ) due to node availability -+ * Stop rsc14a:1 ( Master rhel7-4 ) due to node availability -+ -+Executing cluster transition: -+ * Resource action: rsc1a stop on rhel7-2 -+ * Resource action: rsc1b stop on rhel7-2 -+ * Resource action: rsc2a stop on rhel7-4 -+ * Resource action: rsc3a start on rhel7-2 -+ * Resource action: rsc3b start on rhel7-2 -+ * Resource action: rsc4a stop on rhel7-3 -+ * Resource action: rsc5a stop on rhel7-1 -+ * Pseudo action: group6a_stop_0 -+ * Resource action: rsc6a2 stop on rhel7-2 -+ * Pseudo action: group7a_stop_0 -+ * Resource action: rsc7a2 stop on rhel7-3 -+ * Pseudo action: rsc8a-clone_stop_0 -+ * Resource action: rsc9c stop on rhel7-4 -+ * Resource action: rsc10a stop on rhel7-2 -+ * Resource action: rsc12b stop on rhel7-1 -+ * Resource action: rsc13a stop on rhel7-3 -+ * Pseudo action: rsc14a-clone_demote_0 -+ * Pseudo action: bundle11_start_0 -+ * Resource action: rsc1a start on rhel7-3 -+ * Resource action: rsc1b start on rhel7-3 -+ * Resource action: rsc3a monitor=10000 on rhel7-2 -+ * Resource action: rsc3b monitor=10000 on rhel7-2 -+ * Resource action: rsc6a1 stop on rhel7-2 -+ * Pseudo action: group7a_stopped_0 -+ * Resource action: rsc8a stop on rhel7-4 -+ * Pseudo action: rsc8a-clone_stopped_0 -+ * Resource action: rsc10a start on rhel7-3 -+ * Pseudo action: bundle11-clone_start_0 -+ * Resource action: rsc14a demote on rhel7-4 -+ * Pseudo action: rsc14a-clone_demoted_0 -+ * Pseudo action: rsc14a-clone_stop_0 -+ * Resource action: rsc1a monitor=10000 on rhel7-3 -+ * Resource action: rsc1b monitor=10000 on rhel7-3 -+ * Pseudo action: group6a_stopped_0 -+ * Resource action: rsc10a monitor=10000 on rhel7-3 -+ * Pseudo action: bundle11-clone_running_0 -+ * Resource action: rsc14a stop on rhel7-4 -+ * Pseudo action: rsc14a-clone_stopped_0 -+ * Pseudo action: bundle11_running_0 -+ -+Revised cluster status: -+Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] -+GuestOnline: [ bundle10-0:bundle10-docker-0 bundle10-1:bundle10-docker-1 bundle11-0:bundle11-docker-0 ] -+ -+ Fencing (stonith:fence_xvm): Started rhel7-1 -+ rsc1a (ocf::pacemaker:Dummy): Started rhel7-3 -+ rsc1b (ocf::pacemaker:Dummy): Started rhel7-3 -+ rsc2a (ocf::pacemaker:Dummy): Stopped -+ rsc2b (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc3a (ocf::pacemaker:Dummy): Started rhel7-2 -+ rsc3b (ocf::pacemaker:Dummy): Started rhel7-2 -+ rsc4a (ocf::pacemaker:Dummy): Stopped -+ rsc4b (ocf::pacemaker:Dummy): Started rhel7-3 -+ rsc5a (ocf::pacemaker:Dummy): Stopped -+ Resource Group: group5a -+ rsc5a1 (ocf::pacemaker:Dummy): Started rhel7-1 -+ rsc5a2 (ocf::pacemaker:Dummy): Started rhel7-1 -+ Resource Group: group6a -+ rsc6a1 (ocf::pacemaker:Dummy): Stopped -+ rsc6a2 (ocf::pacemaker:Dummy): Stopped -+ rsc6a (ocf::pacemaker:Dummy): Started rhel7-2 -+ Resource Group: group7a -+ rsc7a1 (ocf::pacemaker:Dummy): Started rhel7-3 -+ rsc7a2 (ocf::pacemaker:Dummy): Stopped -+ Clone Set: rsc8a-clone [rsc8a] -+ Started: [ rhel7-1 rhel7-3 ] -+ Stopped: [ rhel7-2 rhel7-4 rhel7-5 ] -+ Clone Set: rsc8b-clone [rsc8b] -+ Started: [ rhel7-1 rhel7-3 rhel7-4 ] -+ rsc9a (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc9b (ocf::pacemaker:Dummy): Started rhel7-4 -+ rsc9c (ocf::pacemaker:Dummy): Stopped -+ rsc10a (ocf::pacemaker:Dummy): Started rhel7-3 -+ rsc11a (ocf::pacemaker:Dummy): Started rhel7-1 -+ rsc12a (ocf::pacemaker:Dummy): Started rhel7-1 -+ rsc12b (ocf::pacemaker:Dummy): Stopped -+ rsc12c (ocf::pacemaker:Dummy): Started rhel7-1 -+ Container bundle set: bundle10 [pcmktest:http] -+ bundle10-0 (192.168.122.131) (ocf::heartbeat:apache): Started rhel7-2 -+ bundle10-1 (192.168.122.132) (ocf::heartbeat:apache): Started rhel7-3 -+ Container bundle set: bundle11 [pcmktest:http] -+ bundle11-0 (192.168.122.134) (ocf::pacemaker:Dummy): Started rhel7-1 -+ bundle11-1 (192.168.122.135) (ocf::pacemaker:Dummy): Stopped -+ rsc13a (ocf::pacemaker:Dummy): Stopped -+ Clone Set: rsc13b-clone [rsc13b] (promotable) -+ Masters: [ rhel7-3 ] -+ Slaves: [ rhel7-1 rhel7-2 rhel7-4 ] -+ Stopped: [ rhel7-5 ] -+ rsc14b (ocf::pacemaker:Dummy): Started rhel7-4 -+ Clone Set: rsc14a-clone [rsc14a] (promotable) -+ Slaves: [ rhel7-1 rhel7-2 rhel7-3 ] -+ Stopped: [ rhel7-4 rhel7-5 ] -+ -diff --git a/cts/scheduler/colocation-influence.xml b/cts/scheduler/colocation-influence.xml -new file mode 100644 -index 0000000..5962cd2 ---- /dev/null -+++ b/cts/scheduler/colocation-influence.xml -@@ -0,0 +1,1298 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - diff --git a/SOURCES/025-feature-set.patch b/SOURCES/025-feature-set.patch deleted file mode 100644 index 8ac172d..0000000 --- a/SOURCES/025-feature-set.patch +++ /dev/null @@ -1,505 +0,0 @@ -From cf0eebd913855f9ceda2864cbcd9cbd647fca055 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 13 Jan 2021 12:10:07 -0600 -Subject: [PATCH] Build: xml: rename crm_resource API file - -5aadeaf added an API schema 2.5 for crm_resource, but API schema 2.5 was -released with 2.0.5, so rename it to 2.6 to indicate it is a later change. ---- - xml/api/crm_resource-2.5.rng | 238 ------------------------------------------- - xml/api/crm_resource-2.6.rng | 238 +++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 238 insertions(+), 238 deletions(-) - delete mode 100644 xml/api/crm_resource-2.5.rng - create mode 100644 xml/api/crm_resource-2.6.rng - -diff --git a/xml/api/crm_resource-2.5.rng b/xml/api/crm_resource-2.5.rng -deleted file mode 100644 -index b49e24c..0000000 ---- a/xml/api/crm_resource-2.5.rng -+++ /dev/null -@@ -1,238 +0,0 @@ -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- promoted -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- ocf -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- true -- false -- -- -- -- true -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- Stopped -- Started -- Master -- Slave -- -- -- -diff --git a/xml/api/crm_resource-2.6.rng b/xml/api/crm_resource-2.6.rng -new file mode 100644 -index 0000000..b49e24c ---- /dev/null -+++ b/xml/api/crm_resource-2.6.rng -@@ -0,0 +1,238 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ promoted -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ ocf -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ true -+ false -+ -+ -+ -+ true -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ Stopped -+ Started -+ Master -+ Slave -+ -+ -+ --- -1.8.3.1 - diff --git a/SOURCES/026-tests.patch b/SOURCES/026-tests.patch deleted file mode 100644 index f619c54..0000000 --- a/SOURCES/026-tests.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 610e54caf9a695d3d108c87c735a630f2ea5657f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 2 Dec 2020 15:09:35 -0600 -Subject: [PATCH] Test: cts-fencing: update expected output - -b16b24ed changed the order of some XML attributes ---- - cts/cts-fencing.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/cts/cts-fencing.in b/cts/cts-fencing.in -index c901c6c..224b5d4 100644 ---- a/cts/cts-fencing.in -+++ b/cts/cts-fencing.in -@@ -1102,7 +1102,7 @@ class Tests(object): - - test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5 -V") - -- test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -H node3", 'status="success" .* action="off" target="node3"') -+ test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -H node3", 'action="off" target="node3" .* status="success"') - - # simple test of dynamic list query - for test_type in test_types: --- -1.8.3.1 - diff --git a/SOURCES/027-crm_mon.patch b/SOURCES/027-crm_mon.patch deleted file mode 100644 index ae3501b..0000000 --- a/SOURCES/027-crm_mon.patch +++ /dev/null @@ -1,962 +0,0 @@ -From a32b6e14ba51fefbda2d4a699cf1c48dd3a1bb5a Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 7 Jan 2021 11:37:51 -0500 -Subject: [PATCH 01/10] Fix: tools: Don't pass stonith history to - print_simple_status. - -It's not being used. ---- - tools/crm_mon.c | 7 +++---- - 1 file changed, 3 insertions(+), 4 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 4555516..729f6a1 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -1451,14 +1451,13 @@ main(int argc, char **argv) - * \brief Print one-line status suitable for use with monitoring software - * - * \param[in] data_set Working set of CIB state -- * \param[in] history List of stonith actions - * - * \note This function's output (and the return code when the program exits) - * should conform to https://www.monitoring-plugins.org/doc/guidelines.html - */ - static void - print_simple_status(pcmk__output_t *out, pe_working_set_t * data_set, -- stonith_history_t *history, unsigned int mon_ops) -+ unsigned int mon_ops) - { - GListPtr gIter = NULL; - int nodes_online = 0; -@@ -2012,7 +2011,7 @@ mon_refresh_display(gpointer user_data) - break; - - case mon_output_monitor: -- print_simple_status(out, mon_data_set, stonith_history, options.mon_ops); -+ print_simple_status(out, mon_data_set, options.mon_ops); - if (pcmk_is_set(options.mon_ops, mon_op_has_warnings)) { - clean_up(MON_STATUS_WARN); - return FALSE; --- -1.8.3.1 - - -From 8b9c47089c70295bc0529671ba5991c6d831e14b Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 7 Jan 2021 13:17:04 -0500 -Subject: [PATCH 02/10] Refactor: tools: Don't pass output_format to - mon_refresh_display. - -output_format is a global variable. ---- - tools/crm_mon.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 729f6a1..b801560 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -827,7 +827,7 @@ cib_connect(gboolean full) - - rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); - if (rc == pcmk_ok) { -- mon_refresh_display(&output_format); -+ mon_refresh_display(NULL); - } - - if (rc == pcmk_ok && full) { --- -1.8.3.1 - - -From a1b14ad96f12746167da8588dc086b20e6f6d1d6 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 7 Jan 2021 13:20:14 -0500 -Subject: [PATCH 03/10] Refactor: tools: Remove unnecessary checks for cib != - NULL. - -cib is guaranteed to not be NULL at these points, so there's no need to -do an additional check. This code was leftover from a previous -reorganization that changed when the cib variable gets initialized. ---- - tools/crm_mon.c | 41 +++++++++++++++++++---------------------- - 1 file changed, 19 insertions(+), 22 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index b801560..1eedd38 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1346,7 +1346,7 @@ main(int argc, char **argv) - - /* Extra sanity checks when in CGI mode */ - if (output_format == mon_output_cgi) { -- if (cib && cib->variant == cib_file) { -+ if (cib->variant == cib_file) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "CGI mode used with CIB file"); - return clean_up(CRM_EX_USAGE); - } else if (options.external_agent != NULL) { -@@ -1370,33 +1370,30 @@ main(int argc, char **argv) - - crm_info("Starting %s", crm_system_name); - -- if (cib) { -- -- do { -- if (!pcmk_is_set(options.mon_ops, mon_op_one_shot)) { -- print_as(output_format ,"Waiting until cluster is available on this node ...\n"); -- } -- rc = cib_connect(!pcmk_is_set(options.mon_ops, mon_op_one_shot)); -+ do { -+ if (!pcmk_is_set(options.mon_ops, mon_op_one_shot)) { -+ print_as(output_format ,"Waiting until cluster is available on this node ...\n"); -+ } -+ rc = cib_connect(!pcmk_is_set(options.mon_ops, mon_op_one_shot)); - -- if (pcmk_is_set(options.mon_ops, mon_op_one_shot)) { -- break; -+ if (pcmk_is_set(options.mon_ops, mon_op_one_shot)) { -+ break; - -- } else if (rc != pcmk_ok) { -- sleep(options.reconnect_msec / 1000); -+ } else if (rc != pcmk_ok) { -+ sleep(options.reconnect_msec / 1000); - #if CURSES_ENABLED -- if (output_format == mon_output_console) { -- clear(); -- refresh(); -- } -+ if (output_format == mon_output_console) { -+ clear(); -+ refresh(); -+ } - #endif -- } else { -- if (output_format == mon_output_html && out->dest != stdout) { -- printf("Writing html to %s ...\n", args->output_dest); -- } -+ } else { -+ if (output_format == mon_output_html && out->dest != stdout) { -+ printf("Writing html to %s ...\n", args->output_dest); - } -+ } - -- } while (rc == -ENOTCONN); -- } -+ } while (rc == -ENOTCONN); - - if (rc != pcmk_ok) { - if (output_format == mon_output_monitor) { --- -1.8.3.1 - - -From fe5284a12765e775905bdfe58711c5733a063132 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 7 Jan 2021 14:15:40 -0500 -Subject: [PATCH 04/10] Fix: tools: mon_refresh_display should return an int. - -While GSourceFunc is defined as returning a boolean, our public API -mainloop function expect the dispatch function to return an int. So -change mon_refresh_display to do so. ---- - tools/crm_mon.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 1eedd38..8657a89 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -125,7 +125,7 @@ struct { - static void clean_up_connections(void); - static crm_exit_t clean_up(crm_exit_t exit_code); - static void crm_diff_update(const char *event, xmlNode * msg); --static gboolean mon_refresh_display(gpointer user_data); -+static int mon_refresh_display(gpointer user_data); - static int cib_connect(gboolean full); - static void mon_st_callback_event(stonith_t * st, stonith_event_t * e); - static void mon_st_callback_display(stonith_t * st, stonith_event_t * e); -@@ -1925,7 +1925,7 @@ crm_diff_update(const char *event, xmlNode * msg) - kick_refresh(cib_updated); - } - --static gboolean -+static int - mon_refresh_display(gpointer user_data) - { - xmlNode *cib_copy = copy_xml(current_cib); -@@ -1940,7 +1940,7 @@ mon_refresh_display(gpointer user_data) - } - out->err(out, "Upgrade failed: %s", pcmk_strerror(-pcmk_err_schema_validation)); - clean_up(CRM_EX_CONFIG); -- return FALSE; -+ return 0; - } - - /* get the stonith-history if there is evidence we need it -@@ -1966,7 +1966,7 @@ mon_refresh_display(gpointer user_data) - } - free_xml(cib_copy); - out->err(out, "Reading stonith-history failed"); -- return FALSE; -+ return 0; - } - - if (mon_data_set == NULL) { -@@ -1995,7 +1995,7 @@ mon_refresh_display(gpointer user_data) - options.only_node, options.only_rsc) != 0) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_CANTCREAT, "Critical: Unable to output html file"); - clean_up(CRM_EX_CANTCREAT); -- return FALSE; -+ return 0; - } - break; - -@@ -2044,7 +2044,7 @@ mon_refresh_display(gpointer user_data) - stonith_history_free(stonith_history); - stonith_history = NULL; - pe_reset_working_set(mon_data_set); -- return TRUE; -+ return 1; - } - - static void --- -1.8.3.1 - - -From 7f88a5a428ed73fb5161096ece2517abe1119f06 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 7 Jan 2021 16:59:02 -0500 -Subject: [PATCH 05/10] Refactor: tools: Change a conditional in cib_connect. - -This allows unindenting everything that occurs inside that conditional, -which I think makes it a little bit easier to understand what is going -on. ---- - tools/crm_mon.c | 86 +++++++++++++++++++++++++++++---------------------------- - 1 file changed, 44 insertions(+), 42 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 8657a89..b8ba56b 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -804,55 +804,57 @@ cib_connect(gboolean full) - } - } - -- if (cib->state != cib_connected_query && cib->state != cib_connected_command) { -- crm_trace("Connecting to the CIB"); -- -- /* Hack: the CIB signon will print the prompt for a password if needed, -- * but to stderr. If we're in curses, show it on the screen instead. -- * -- * @TODO Add a password prompt (maybe including input) function to -- * pcmk__output_t and use it in libcib. -- */ -- if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) { -- need_pass = FALSE; -- print_as(output_format, "Password:"); -- } -+ if (cib->state == cib_connected_query || cib->state == cib_connected_command) { -+ return rc; -+ } - -- rc = cib->cmds->signon(cib, crm_system_name, cib_query); -- if (rc != pcmk_ok) { -- out->err(out, "Could not connect to the CIB: %s", -- pcmk_strerror(rc)); -- return rc; -- } -+ crm_trace("Connecting to the CIB"); - -- rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); -- if (rc == pcmk_ok) { -- mon_refresh_display(NULL); -- } -+ /* Hack: the CIB signon will print the prompt for a password if needed, -+ * but to stderr. If we're in curses, show it on the screen instead. -+ * -+ * @TODO Add a password prompt (maybe including input) function to -+ * pcmk__output_t and use it in libcib. -+ */ -+ if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) { -+ need_pass = FALSE; -+ print_as(output_format, "Password:"); -+ } - -- if (rc == pcmk_ok && full) { -- if (rc == pcmk_ok) { -- rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy_regular); -- if (rc == -EPROTONOSUPPORT) { -- print_as -- (output_format, "Notification setup not supported, won't be able to reconnect after failure"); -- if (output_format == mon_output_console) { -- sleep(2); -- } -- rc = pcmk_ok; -- } -+ rc = cib->cmds->signon(cib, crm_system_name, cib_query); -+ if (rc != pcmk_ok) { -+ out->err(out, "Could not connect to the CIB: %s", -+ pcmk_strerror(rc)); -+ return rc; -+ } - -- } -+ rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); -+ if (rc == pcmk_ok) { -+ mon_refresh_display(NULL); -+ } - -- if (rc == pcmk_ok) { -- cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); -- rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); -+ if (rc == pcmk_ok && full) { -+ if (rc == pcmk_ok) { -+ rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy_regular); -+ if (rc == -EPROTONOSUPPORT) { -+ print_as -+ (output_format, "Notification setup not supported, won't be able to reconnect after failure"); -+ if (output_format == mon_output_console) { -+ sleep(2); -+ } -+ rc = pcmk_ok; - } - -- if (rc != pcmk_ok) { -- out->err(out, "Notification setup failed, could not monitor CIB actions"); -- clean_up_connections(); -- } -+ } -+ -+ if (rc == pcmk_ok) { -+ cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); -+ rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); -+ } -+ -+ if (rc != pcmk_ok) { -+ out->err(out, "Notification setup failed, could not monitor CIB actions"); -+ clean_up_connections(); - } - } - return rc; --- -1.8.3.1 - - -From 178ba17e4ee62bef28f8e71cad2c002f823661b5 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 7 Jan 2021 17:37:36 -0500 -Subject: [PATCH 06/10] Refactor: tools: Remove an unnecessary conditional in - cib_connect. - ---- - tools/crm_mon.c | 17 +++++++---------- - 1 file changed, 7 insertions(+), 10 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index b8ba56b..36249e8 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -834,17 +834,14 @@ cib_connect(gboolean full) - } - - if (rc == pcmk_ok && full) { -- if (rc == pcmk_ok) { -- rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy_regular); -- if (rc == -EPROTONOSUPPORT) { -- print_as -- (output_format, "Notification setup not supported, won't be able to reconnect after failure"); -- if (output_format == mon_output_console) { -- sleep(2); -- } -- rc = pcmk_ok; -+ rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy_regular); -+ if (rc == -EPROTONOSUPPORT) { -+ print_as -+ (output_format, "Notification setup not supported, won't be able to reconnect after failure"); -+ if (output_format == mon_output_console) { -+ sleep(2); - } -- -+ rc = pcmk_ok; - } - - if (rc == pcmk_ok) { --- -1.8.3.1 - - -From 33bac5886417afc5c7bbf56f4d31e0e36f8ae947 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 8 Jan 2021 10:00:50 -0500 -Subject: [PATCH 07/10] Refactor: tools: Simplify another conditional in - crm_mon. - ---- - tools/crm_mon.c | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 36249e8..8b47bbc 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1386,10 +1386,8 @@ main(int argc, char **argv) - refresh(); - } - #endif -- } else { -- if (output_format == mon_output_html && out->dest != stdout) { -- printf("Writing html to %s ...\n", args->output_dest); -- } -+ } else if (output_format == mon_output_html && out->dest != stdout) { -+ printf("Writing html to %s ...\n", args->output_dest); - } - - } while (rc == -ENOTCONN); --- -1.8.3.1 - - -From 40bc8b3147e7ebef4318211fa69973a8b5d32e79 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 8 Jan 2021 12:12:37 -0500 -Subject: [PATCH 08/10] Refactor: libcrmcommon,tools,daemons: Put common xpath - code in one place. - ---- - configure.ac | 1 + - daemons/controld/controld_te_callbacks.c | 26 +++----------- - include/crm/common/xml_internal.h | 14 +++++++- - lib/common/tests/Makefile.am | 2 +- - lib/common/tests/xpath/Makefile.am | 29 +++++++++++++++ - lib/common/tests/xpath/pcmk__xpath_node_id_test.c | 43 +++++++++++++++++++++++ - lib/common/xpath.c | 34 +++++++++++++++++- - tools/crm_mon.c | 25 ++----------- - 8 files changed, 127 insertions(+), 47 deletions(-) - create mode 100644 lib/common/tests/xpath/Makefile.am - create mode 100644 lib/common/tests/xpath/pcmk__xpath_node_id_test.c - -diff --git a/configure.ac b/configure.ac -index 5959116..ce0f1fe 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -1920,6 +1920,7 @@ AC_CONFIG_FILES(Makefile \ - lib/common/tests/operations/Makefile \ - lib/common/tests/strings/Makefile \ - lib/common/tests/utils/Makefile \ -+ lib/common/tests/xpath/Makefile \ - lib/cluster/Makefile \ - lib/cib/Makefile \ - lib/gnu/Makefile \ -diff --git a/daemons/controld/controld_te_callbacks.c b/daemons/controld/controld_te_callbacks.c -index 66fc645..4e3e4e6 100644 ---- a/daemons/controld/controld_te_callbacks.c -+++ b/daemons/controld/controld_te_callbacks.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -276,24 +276,6 @@ process_resource_updates(const char *node, xmlNode *xml, xmlNode *change, - } - } - --#define NODE_PATT "/lrm[@id=" --static char *get_node_from_xpath(const char *xpath) --{ -- char *nodeid = NULL; -- char *tmp = strstr(xpath, NODE_PATT); -- -- if(tmp) { -- tmp += strlen(NODE_PATT); -- tmp += 1; -- -- nodeid = strdup(tmp); -- tmp = strstr(nodeid, "\'"); -- CRM_ASSERT(tmp); -- tmp[0] = 0; -- } -- return nodeid; --} -- - static char *extract_node_uuid(const char *xpath) - { - char *mutable_path = strdup(xpath); -@@ -522,19 +504,19 @@ te_update_diff_v2(xmlNode *diff) - process_resource_updates(ID(match), match, change, op, xpath); - - } else if (strcmp(name, XML_LRM_TAG_RESOURCES) == 0) { -- char *local_node = get_node_from_xpath(xpath); -+ char *local_node = pcmk__xpath_node_id(xpath, "lrm"); - - process_resource_updates(local_node, match, change, op, xpath); - free(local_node); - - } else if (strcmp(name, XML_LRM_TAG_RESOURCE) == 0) { -- char *local_node = get_node_from_xpath(xpath); -+ char *local_node = pcmk__xpath_node_id(xpath, "lrm"); - - process_lrm_resource_diff(match, local_node); - free(local_node); - - } else if (strcmp(name, XML_LRM_TAG_RSC_OP) == 0) { -- char *local_node = get_node_from_xpath(xpath); -+ char *local_node = pcmk__xpath_node_id(xpath, "lrm"); - - process_graph_event(match, local_node); - free(local_node); -diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h -index 1e80bc6..d8694ee 100644 ---- a/include/crm/common/xml_internal.h -+++ b/include/crm/common/xml_internal.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2017-2020 the Pacemaker project contributors -+ * Copyright 2017-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -273,4 +273,16 @@ pcmk__xe_first_attr(const xmlNode *xe) - return (xe == NULL)? NULL : xe->properties; - } - -+/*! -+ * \internal -+ * \brief Extract the ID attribute from an XML element -+ * -+ * \param[in] xpath String to search -+ * \param[in] node Node to get the ID for -+ * -+ * \return ID attribute of \p node in xpath string \p xpath -+ */ -+char * -+pcmk__xpath_node_id(const char *xpath, const char *node); -+ - #endif // PCMK__XML_INTERNAL__H -diff --git a/lib/common/tests/Makefile.am b/lib/common/tests/Makefile.am -index 2c33cc5..4c6e8b4 100644 ---- a/lib/common/tests/Makefile.am -+++ b/lib/common/tests/Makefile.am -@@ -1 +1 @@ --SUBDIRS = agents cmdline flags operations strings utils -+SUBDIRS = agents cmdline flags operations strings utils xpath -diff --git a/lib/common/tests/xpath/Makefile.am b/lib/common/tests/xpath/Makefile.am -new file mode 100644 -index 0000000..7a53683 ---- /dev/null -+++ b/lib/common/tests/xpath/Makefile.am -@@ -0,0 +1,29 @@ -+# -+# Copyright 2021 the Pacemaker project contributors -+# -+# The version control history for this file may have further details. -+# -+# This source code is licensed under the GNU General Public License version 2 -+# or later (GPLv2+) WITHOUT ANY WARRANTY. -+# -+AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include -+LDADD = $(top_builddir)/lib/common/libcrmcommon.la -+ -+include $(top_srcdir)/mk/glib-tap.mk -+ -+# Add each test program here. Each test should be written as a little standalone -+# program using the glib unit testing functions. See the documentation for more -+# information. -+# -+# https://developer.gnome.org/glib/unstable/glib-Testing.html -+# -+# Add "_test" to the end of all test program names to simplify .gitignore. -+test_programs = pcmk__xpath_node_id_test -+ -+# If any extra data needs to be added to the source distribution, add it to the -+# following list. -+dist_test_data = -+ -+# If any extra data needs to be used by tests but should not be added to the -+# source distribution, add it to the following list. -+test_data = -diff --git a/lib/common/tests/xpath/pcmk__xpath_node_id_test.c b/lib/common/tests/xpath/pcmk__xpath_node_id_test.c -new file mode 100644 -index 0000000..f6b5c10 ---- /dev/null -+++ b/lib/common/tests/xpath/pcmk__xpath_node_id_test.c -@@ -0,0 +1,43 @@ -+/* -+ * Copyright 2021 the Pacemaker project contributors -+ * -+ * The version control history for this file may have further details. -+ * -+ * This source code is licensed under the GNU Lesser General Public License -+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. -+ */ -+ -+#include -+#include -+ -+static void -+empty_input(void) { -+ g_assert_null(pcmk__xpath_node_id(NULL, "lrm")); -+ g_assert_null(pcmk__xpath_node_id("", "lrm")); -+ g_assert_null(pcmk__xpath_node_id("/blah/blah", NULL)); -+ g_assert_null(pcmk__xpath_node_id("/blah/blah", "")); -+ g_assert_null(pcmk__xpath_node_id(NULL, NULL)); -+} -+ -+static void -+not_present(void) { -+ g_assert_null(pcmk__xpath_node_id("/some/xpath/string[@id='xyz']", "lrm")); -+ g_assert_null(pcmk__xpath_node_id("/some/xpath/containing[@id='lrm']", "lrm")); -+} -+ -+static void -+present(void) { -+ g_assert_cmpint(strcmp(pcmk__xpath_node_id("/some/xpath/containing/lrm[@id='xyz']", "lrm"), "xyz"), ==, 0); -+ g_assert_cmpint(strcmp(pcmk__xpath_node_id("/some/other/lrm[@id='xyz']/xpath", "lrm"), "xyz"), ==, 0); -+} -+ -+int -+main(int argc, char **argv) -+{ -+ g_test_init(&argc, &argv, NULL); -+ -+ g_test_add_func("/common/xpath/node_id/empty_input", empty_input); -+ g_test_add_func("/common/xpath/node_id/not_present", not_present); -+ g_test_add_func("/common/xpath/node_id/present", present); -+ return g_test_run(); -+} -diff --git a/lib/common/xpath.c b/lib/common/xpath.c -index 6fa4941..7851a7c 100644 ---- a/lib/common/xpath.c -+++ b/lib/common/xpath.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include "crmcommon_private.h" - - /* -@@ -297,3 +298,34 @@ xml_get_path(xmlNode *xml) - } - return NULL; - } -+ -+char * -+pcmk__xpath_node_id(const char *xpath, const char *node) -+{ -+ char *retval = NULL; -+ char *patt = NULL; -+ char *start = NULL; -+ char *end = NULL; -+ -+ if (node == NULL || xpath == NULL) { -+ return retval; -+ } -+ -+ patt = crm_strdup_printf("/%s[@id=", node); -+ start = strstr(xpath, patt); -+ -+ if (!start) { -+ free(patt); -+ return retval; -+ } -+ -+ start += strlen(patt); -+ start++; -+ -+ end = strstr(start, "\'"); -+ CRM_ASSERT(end); -+ retval = strndup(start, end-start); -+ -+ free(patt); -+ return retval; -+} -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 8b47bbc..ff1b86b 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1719,25 +1719,6 @@ mon_trigger_refresh(gpointer user_data) - return FALSE; - } - --#define NODE_PATT "/lrm[@id=" --static char * --get_node_from_xpath(const char *xpath) --{ -- char *nodeid = NULL; -- char *tmp = strstr(xpath, NODE_PATT); -- -- if(tmp) { -- tmp += strlen(NODE_PATT); -- tmp += 1; -- -- nodeid = strdup(tmp); -- tmp = strstr(nodeid, "\'"); -- CRM_ASSERT(tmp); -- tmp[0] = 0; -- } -- return nodeid; --} -- - static void - crm_diff_update_v2(const char *event, xmlNode * msg) - { -@@ -1822,19 +1803,19 @@ crm_diff_update_v2(const char *event, xmlNode * msg) - handle_rsc_op(match, node); - - } else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) { -- char *local_node = get_node_from_xpath(xpath); -+ char *local_node = pcmk__xpath_node_id(xpath, "lrm"); - - handle_rsc_op(match, local_node); - free(local_node); - - } else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) { -- char *local_node = get_node_from_xpath(xpath); -+ char *local_node = pcmk__xpath_node_id(xpath, "lrm"); - - handle_rsc_op(match, local_node); - free(local_node); - - } else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) { -- char *local_node = get_node_from_xpath(xpath); -+ char *local_node = pcmk__xpath_node_id(xpath, "lrm"); - - handle_rsc_op(match, local_node); - free(local_node); --- -1.8.3.1 - - -From b0126373d8b2a739ec5b985a7e1f530e850618d3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 11 Jan 2021 10:20:11 -0500 -Subject: [PATCH 09/10] Refactor: libpacemaker: Move reduce_stonith_history - into the library. - -And also rename it to pcmk__reduce_fence_history. I don't see anywhere -else that could use this function at the moment, but it seems too -generic to keep in crm_mon. ---- - include/pcmki/pcmki_fence.h | 16 +++++++++++++- - lib/pacemaker/pcmk_fence.c | 45 ++++++++++++++++++++++++++++++++++++++- - tools/crm_mon.c | 52 +-------------------------------------------- - 3 files changed, 60 insertions(+), 53 deletions(-) - -diff --git a/include/pcmki/pcmki_fence.h b/include/pcmki/pcmki_fence.h -index 241d030..d4cef68 100644 ---- a/include/pcmki/pcmki_fence.h -+++ b/include/pcmki/pcmki_fence.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2019-2020 the Pacemaker project contributors -+ * Copyright 2019-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -219,4 +219,18 @@ int pcmk__fence_validate(pcmk__output_t *out, stonith_t *st, const char *agent, - const char *id, stonith_key_value_t *params, - unsigned int timeout); - -+/** -+ * \brief Reduce the STONITH history -+ * -+ * STONITH history is reduced as follows: -+ * - The last successful action of every action-type and target is kept -+ * - For failed actions, who failed is kept -+ * - All actions in progress are kept -+ * -+ * \param[in] history List of STONITH actions -+ * -+ * \return The reduced history -+ */ -+stonith_history_t * -+pcmk__reduce_fence_history(stonith_history_t *history); - #endif -diff --git a/lib/pacemaker/pcmk_fence.c b/lib/pacemaker/pcmk_fence.c -index d591379..34540cc 100644 ---- a/lib/pacemaker/pcmk_fence.c -+++ b/lib/pacemaker/pcmk_fence.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2009-2020 the Pacemaker project contributors -+ * Copyright 2009-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -520,3 +520,46 @@ pcmk_fence_validate(xmlNodePtr *xml, stonith_t *st, const char *agent, - return rc; - } - #endif -+ -+stonith_history_t * -+pcmk__reduce_fence_history(stonith_history_t *history) -+{ -+ stonith_history_t *new, *hp, *np; -+ -+ if (!history) { -+ return history; -+ } -+ -+ new = history; -+ hp = new->next; -+ new->next = NULL; -+ -+ while (hp) { -+ stonith_history_t *hp_next = hp->next; -+ -+ hp->next = NULL; -+ -+ for (np = new; ; np = np->next) { -+ if ((hp->state == st_done) || (hp->state == st_failed)) { -+ /* action not in progress */ -+ if (pcmk__str_eq(hp->target, np->target, pcmk__str_casei) && -+ pcmk__str_eq(hp->action, np->action, pcmk__str_casei) && -+ (hp->state == np->state) && -+ ((hp->state == st_done) || -+ pcmk__str_eq(hp->delegate, np->delegate, pcmk__str_casei))) { -+ /* purge older hp */ -+ stonith_history_free(hp); -+ break; -+ } -+ } -+ -+ if (!np->next) { -+ np->next = hp; -+ break; -+ } -+ } -+ hp = hp_next; -+ } -+ -+ return new; -+} -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index ff1b86b..2179f53 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1520,56 +1520,6 @@ print_simple_status(pcmk__output_t *out, pe_working_set_t * data_set, - /* coverity[leaked_storage] False positive */ - } - --/*! -- * \internal -- * \brief Reduce the stonith-history -- * for successful actions we keep the last of every action-type & target -- * for failed actions we record as well who had failed -- * for actions in progress we keep full track -- * -- * \param[in] history List of stonith actions -- * -- */ --static stonith_history_t * --reduce_stonith_history(stonith_history_t *history) --{ -- stonith_history_t *new = history, *hp, *np; -- -- if (new) { -- hp = new->next; -- new->next = NULL; -- -- while (hp) { -- stonith_history_t *hp_next = hp->next; -- -- hp->next = NULL; -- -- for (np = new; ; np = np->next) { -- if ((hp->state == st_done) || (hp->state == st_failed)) { -- /* action not in progress */ -- if (pcmk__str_eq(hp->target, np->target, pcmk__str_casei) && -- pcmk__str_eq(hp->action, np->action, pcmk__str_casei) && -- (hp->state == np->state) && -- ((hp->state == st_done) || -- pcmk__str_eq(hp->delegate, np->delegate, pcmk__str_casei))) { -- /* purge older hp */ -- stonith_history_free(hp); -- break; -- } -- } -- -- if (!np->next) { -- np->next = hp; -- break; -- } -- } -- hp = hp_next; -- } -- } -- -- return new; --} -- - static int - send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, - int status, const char *desc) -@@ -1935,7 +1885,7 @@ mon_refresh_display(gpointer user_data) - if (!pcmk_is_set(options.mon_ops, mon_op_fence_full_history) - && (output_format != mon_output_xml)) { - -- stonith_history = reduce_stonith_history(stonith_history); -+ stonith_history = pcmk__reduce_fence_history(stonith_history); - } - break; /* all other cases are errors */ - } --- -1.8.3.1 - - -From af3f1368bc76eb498c2c96b3eda9324b579c9380 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 12 Jan 2021 15:46:55 -0500 -Subject: [PATCH 10/10] Low: tools: Adjust fencing shown indicator in crm_mon. - -If any of the various fencing flags are set, but not all of them, no '*' -will be shown next to the fencing line in the interactive change screen. -This makes it seem like fencing should not be shown, and hitting 'm' -should toggle the fencing display on. However, that's not the case and -hitting 'm' will actually toggle fencing off. Hitting it again will -toggle it on and the '*' will appear. - -This is confusing, so just display the '*' if any fencing flag is set. ---- - tools/crm_mon.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 2179f53..8ec97bb 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -984,7 +984,10 @@ detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_dat - print_option_help(out, 'R', pcmk_is_set(options.mon_ops, mon_op_print_clone_detail)); - print_option_help(out, 'b', pcmk_is_set(options.mon_ops, mon_op_print_brief)); - print_option_help(out, 'j', pcmk_is_set(options.mon_ops, mon_op_print_pending)); -- print_option_help(out, 'm', pcmk_is_set(show, mon_show_fencing_all)); -+ print_option_help(out, 'm', pcmk_any_flags_set(show, -+ mon_show_fence_failed -+ |mon_show_fence_pending -+ |mon_show_fence_worked)); - out->info(out, "%s", "\nToggle fields via field letter, type any other key to return"); - } - --- -1.8.3.1 - diff --git a/SOURCES/028-crm_mon.patch b/SOURCES/028-crm_mon.patch deleted file mode 100644 index 7d295f4..0000000 --- a/SOURCES/028-crm_mon.patch +++ /dev/null @@ -1,1305 +0,0 @@ -From bc91cc5d8b4257627d09103cf676cd83656bda8c Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 12 Jan 2021 10:45:53 -0500 -Subject: [PATCH 01/11] Refactor: tools: Split up connection teardown in - crm_mon. - -We don't necessarily want to tear down the fencing and CIB connections -at the same time always. This can then immediately be used in -mon_refresh_display and do_mon_cib_connection_destroy. ---- - tools/crm_mon.c | 57 +++++++++++++++++++++++++++++++-------------------------- - 1 file changed, 31 insertions(+), 26 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 8ec97bb..fc20e4c 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -122,7 +122,8 @@ struct { - .mon_ops = mon_op_default - }; - --static void clean_up_connections(void); -+static void clean_up_cib_connection(void); -+static void clean_up_fencing_connection(void); - static crm_exit_t clean_up(crm_exit_t exit_code); - static void crm_diff_update(const char *event, xmlNode * msg); - static int mon_refresh_display(gpointer user_data); -@@ -712,12 +713,7 @@ do_mon_cib_connection_destroy(gpointer user_data, bool is_error) - /* the client API won't properly reconnect notifications - * if they are still in the table - so remove them - */ -- st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT); -- st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE); -- st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY); -- if (st->state != stonith_disconnected) { -- st->cmds->disconnect(st); -- } -+ clean_up_fencing_connection(); - } - if (cib) { - cib->cmds->signoff(cib); -@@ -851,7 +847,8 @@ cib_connect(gboolean full) - - if (rc != pcmk_ok) { - out->err(out, "Notification setup failed, could not monitor CIB actions"); -- clean_up_connections(); -+ clean_up_cib_connection(); -+ clean_up_fencing_connection(); - } - } - return rc; -@@ -1866,9 +1863,7 @@ mon_refresh_display(gpointer user_data) - last_refresh = time(NULL); - - if (cli_config_update(&cib_copy, NULL, FALSE) == FALSE) { -- if (cib) { -- cib->cmds->signoff(cib); -- } -+ clean_up_cib_connection(); - out->err(out, "Upgrade failed: %s", pcmk_strerror(-pcmk_err_schema_validation)); - clean_up(CRM_EX_CONFIG); - return 0; -@@ -2040,24 +2035,33 @@ mon_st_callback_display(stonith_t * st, stonith_event_t * e) - } - - static void --clean_up_connections(void) -+clean_up_cib_connection(void) - { -- if (cib != NULL) { -- cib->cmds->signoff(cib); -- cib_delete(cib); -- cib = NULL; -+ if (cib == NULL) { -+ return; - } - -- if (st != NULL) { -- if (st->state != stonith_disconnected) { -- st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT); -- st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE); -- st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY); -- st->cmds->disconnect(st); -- } -- stonith_api_delete(st); -- st = NULL; -+ cib->cmds->signoff(cib); -+ cib_delete(cib); -+ cib = NULL; -+} -+ -+static void -+clean_up_fencing_connection(void) -+{ -+ if (st == NULL) { -+ return; - } -+ -+ if (st->state != stonith_disconnected) { -+ st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT); -+ st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE); -+ st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY); -+ st->cmds->disconnect(st); -+ } -+ -+ stonith_api_delete(st); -+ st = NULL; - } - - /* -@@ -2074,7 +2078,8 @@ clean_up(crm_exit_t exit_code) - /* Quitting crm_mon is much more complicated than it ought to be. */ - - /* (1) Close connections, free things, etc. */ -- clean_up_connections(); -+ clean_up_cib_connection(); -+ clean_up_fencing_connection(); - free(options.pid_file); - free(options.neg_location_prefix); - g_slist_free_full(options.includes_excludes, free); --- -1.8.3.1 - - -From 28d646ce67c6a933eaa76aca51f9973a65d0ee3c Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 7 Jan 2021 17:18:13 -0500 -Subject: [PATCH 02/11] Refactor: tools: Split up connection establishment in - crm_mon. - -We don't necessarily always want to connect to the CIB and fencing in -the same action. Note that bringing up the fencing connection needs to -happen first, because mon_refresh_display is called from cib_connect and -it will want a fencing connection. ---- - tools/crm_mon.c | 66 +++++++++++++++++++++++++++++++++------------------------ - 1 file changed, 38 insertions(+), 28 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index fc20e4c..301a222 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -128,6 +128,7 @@ static crm_exit_t clean_up(crm_exit_t exit_code); - static void crm_diff_update(const char *event, xmlNode * msg); - static int mon_refresh_display(gpointer user_data); - static int cib_connect(gboolean full); -+static int fencing_connect(void); - static void mon_st_callback_event(stonith_t * st, stonith_event_t * e); - static void mon_st_callback_display(stonith_t * st, stonith_event_t * e); - static void kick_refresh(gboolean data_updated); -@@ -668,8 +669,6 @@ static GOptionEntry deprecated_entries[] = { - static gboolean - mon_timer_popped(gpointer data) - { -- int rc = pcmk_ok; -- - #if CURSES_ENABLED - if (output_format == mon_output_console) { - clear(); -@@ -683,9 +682,7 @@ mon_timer_popped(gpointer data) - } - - print_as(output_format, "Reconnecting...\n"); -- rc = cib_connect(TRUE); -- -- if (rc != pcmk_ok) { -+ if (fencing_connect() == pcmk_ok && cib_connect(TRUE) == pcmk_ok) { - timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL); - } - return FALSE; -@@ -767,39 +764,48 @@ mon_winresize(int nsig) - #endif - - static int --cib_connect(gboolean full) -+fencing_connect(void) - { - int rc = pcmk_ok; -- static gboolean need_pass = TRUE; -- -- CRM_CHECK(cib != NULL, return -EINVAL); -- -- if (getenv("CIB_passwd") != NULL) { -- need_pass = FALSE; -- } - - if (pcmk_is_set(options.mon_ops, mon_op_fence_connect) && (st == NULL)) { - st = stonith_api_new(); - } - -- if (pcmk_is_set(options.mon_ops, mon_op_fence_connect) -- && (st != NULL) && (st->state == stonith_disconnected)) { -+ if (!pcmk_is_set(options.mon_ops, mon_op_fence_connect) || -+ st == NULL || st->state != stonith_disconnected) { -+ return rc; -+ } - -- rc = st->cmds->connect(st, crm_system_name, NULL); -- if (rc == pcmk_ok) { -- crm_trace("Setting up stonith callbacks"); -- if (pcmk_is_set(options.mon_ops, mon_op_watch_fencing)) { -- st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, -- mon_st_callback_event); -- st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback_event); -- } else { -- st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, -- mon_st_callback_display); -- st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display); -- } -+ rc = st->cmds->connect(st, crm_system_name, NULL); -+ if (rc == pcmk_ok) { -+ crm_trace("Setting up stonith callbacks"); -+ if (pcmk_is_set(options.mon_ops, mon_op_watch_fencing)) { -+ st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, -+ mon_st_callback_event); -+ st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback_event); -+ } else { -+ st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, -+ mon_st_callback_display); -+ st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display); - } - } - -+ return rc; -+} -+ -+static int -+cib_connect(gboolean full) -+{ -+ int rc = pcmk_ok; -+ static gboolean need_pass = TRUE; -+ -+ CRM_CHECK(cib != NULL, return -EINVAL); -+ -+ if (getenv("CIB_passwd") != NULL) { -+ need_pass = FALSE; -+ } -+ - if (cib->state == cib_connected_query || cib->state == cib_connected_command) { - return rc; - } -@@ -1373,7 +1379,11 @@ main(int argc, char **argv) - if (!pcmk_is_set(options.mon_ops, mon_op_one_shot)) { - print_as(output_format ,"Waiting until cluster is available on this node ...\n"); - } -- rc = cib_connect(!pcmk_is_set(options.mon_ops, mon_op_one_shot)); -+ -+ rc = fencing_connect(); -+ if (rc == pcmk_ok) { -+ rc = cib_connect(!pcmk_is_set(options.mon_ops, mon_op_one_shot)); -+ } - - if (pcmk_is_set(options.mon_ops, mon_op_one_shot)) { - break; --- -1.8.3.1 - - -From e12508ffba06b1c5652e7f49a449aae6d89ec420 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 12 Jan 2021 17:01:53 -0500 -Subject: [PATCH 03/11] Refactor: tools: Split one shot mode out into its own - function. - -Also, the connection error handling function can get split out on its -own as well to allow it to be reused in both the one shot and loop -cases. ---- - tools/crm_mon.c | 69 +++++++++++++++++++++++++++++++++++---------------------- - 1 file changed, 43 insertions(+), 26 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 301a222..b33598b 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1162,6 +1162,41 @@ reconcile_output_format(pcmk__common_args_t *args) { - } - } - -+static void -+handle_connection_failures(int rc) -+{ -+ if (rc == pcmk_ok) { -+ return; -+ } -+ -+ if (output_format == mon_output_monitor) { -+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "CLUSTER CRIT: Connection to cluster failed: %s", -+ pcmk_strerror(rc)); -+ rc = MON_STATUS_CRIT; -+ } else if (rc == -ENOTCONN) { -+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: cluster is not available on this node"); -+ rc = crm_errno2exit(rc); -+ } else { -+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Connection to cluster failed: %s", pcmk_strerror(rc)); -+ rc = crm_errno2exit(rc); -+ } -+ -+ clean_up(rc); -+} -+ -+static void -+one_shot() -+{ -+ int rc = fencing_connect(); -+ -+ if (rc == pcmk_rc_ok) { -+ rc = cib_connect(FALSE); -+ handle_connection_failures(rc); -+ } -+ -+ clean_up(CRM_EX_OK); -+} -+ - int - main(int argc, char **argv) - { -@@ -1375,20 +1410,19 @@ main(int argc, char **argv) - - crm_info("Starting %s", crm_system_name); - -+ if (pcmk_is_set(options.mon_ops, mon_op_one_shot)) { -+ one_shot(); -+ } -+ - do { -- if (!pcmk_is_set(options.mon_ops, mon_op_one_shot)) { -- print_as(output_format ,"Waiting until cluster is available on this node ...\n"); -- } -+ print_as(output_format ,"Waiting until cluster is available on this node ...\n"); - - rc = fencing_connect(); - if (rc == pcmk_ok) { -- rc = cib_connect(!pcmk_is_set(options.mon_ops, mon_op_one_shot)); -+ rc = cib_connect(TRUE); - } - -- if (pcmk_is_set(options.mon_ops, mon_op_one_shot)) { -- break; -- -- } else if (rc != pcmk_ok) { -+ if (rc != pcmk_ok) { - sleep(options.reconnect_msec / 1000); - #if CURSES_ENABLED - if (output_format == mon_output_console) { -@@ -1402,24 +1436,7 @@ main(int argc, char **argv) - - } while (rc == -ENOTCONN); - -- if (rc != pcmk_ok) { -- if (output_format == mon_output_monitor) { -- g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "CLUSTER CRIT: Connection to cluster failed: %s", -- pcmk_strerror(rc)); -- return clean_up(MON_STATUS_CRIT); -- } else { -- if (rc == -ENOTCONN) { -- g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: cluster is not available on this node"); -- } else { -- g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Connection to cluster failed: %s", pcmk_strerror(rc)); -- } -- } -- return clean_up(crm_errno2exit(rc)); -- } -- -- if (pcmk_is_set(options.mon_ops, mon_op_one_shot)) { -- return clean_up(CRM_EX_OK); -- } -+ handle_connection_failures(rc); - - mainloop = g_main_loop_new(NULL, FALSE); - --- -1.8.3.1 - - -From 0eb307a19d57d4a59a4b51a64a3b62dcd0b7cc9a Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 13 Jan 2021 12:47:41 -0500 -Subject: [PATCH 04/11] Refactor: tools: Don't call mon_refresh_display from - cib_connect. - ---- - tools/crm_mon.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index b33598b..b0daf76 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -683,6 +683,7 @@ mon_timer_popped(gpointer data) - - print_as(output_format, "Reconnecting...\n"); - if (fencing_connect() == pcmk_ok && cib_connect(TRUE) == pcmk_ok) { -+ mon_refresh_display(NULL); - timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL); - } - return FALSE; -@@ -831,9 +832,6 @@ cib_connect(gboolean full) - } - - rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); -- if (rc == pcmk_ok) { -- mon_refresh_display(NULL); -- } - - if (rc == pcmk_ok && full) { - rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy_regular); -@@ -1192,6 +1190,7 @@ one_shot() - if (rc == pcmk_rc_ok) { - rc = cib_connect(FALSE); - handle_connection_failures(rc); -+ mon_refresh_display(NULL); - } - - clean_up(CRM_EX_OK); -@@ -1437,6 +1436,7 @@ main(int argc, char **argv) - } while (rc == -ENOTCONN); - - handle_connection_failures(rc); -+ mon_refresh_display(NULL); - - mainloop = g_main_loop_new(NULL, FALSE); - --- -1.8.3.1 - - -From 46696d3135e699c58918e41c93c357d951146d5c Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Wed, 13 Jan 2021 13:52:49 -0500 -Subject: [PATCH 05/11] Fix: tools: Report if getting fencing history failed in - crm_mon. - -This just takes history_rc into account in the text and html formatters. -It was already used by the XML formatter. If we can't get fencing -history, add a message to the output indicating that happened. ---- - tools/crm_mon.c | 13 +++++---- - tools/crm_mon.h | 12 ++++----- - tools/crm_mon_print.c | 74 ++++++++++++++++++++++++++++++++++++++------------- - 3 files changed, 70 insertions(+), 29 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index b0daf76..1a68555 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1943,7 +1943,8 @@ mon_refresh_display(gpointer user_data) - switch (output_format) { - case mon_output_html: - case mon_output_cgi: -- if (print_html_status(out, mon_data_set, stonith_history, options.mon_ops, -+ if (print_html_status(out, mon_data_set, crm_errno2exit(history_rc), -+ stonith_history, options.mon_ops, - show, options.neg_location_prefix, - options.only_node, options.only_rsc) != 0) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_CANTCREAT, "Critical: Unable to output html file"); -@@ -1974,15 +1975,17 @@ mon_refresh_display(gpointer user_data) - */ - #if CURSES_ENABLED - blank_screen(); -- print_status(out, mon_data_set, stonith_history, options.mon_ops, show, -- options.neg_location_prefix, options.only_node, options.only_rsc); -+ print_status(out, mon_data_set, crm_errno2exit(history_rc), stonith_history, -+ options.mon_ops, show, options.neg_location_prefix, -+ options.only_node, options.only_rsc); - refresh(); - break; - #endif - - case mon_output_plain: -- print_status(out, mon_data_set, stonith_history, options.mon_ops, show, -- options.neg_location_prefix, options.only_node, options.only_rsc); -+ print_status(out, mon_data_set, crm_errno2exit(history_rc), stonith_history, -+ options.mon_ops, show, options.neg_location_prefix, -+ options.only_node, options.only_rsc); - break; - - case mon_output_unset: -diff --git a/tools/crm_mon.h b/tools/crm_mon.h -index f746507..73c926d 100644 ---- a/tools/crm_mon.h -+++ b/tools/crm_mon.h -@@ -95,17 +95,17 @@ typedef enum mon_output_format_e { - #define mon_op_default (mon_op_print_pending | mon_op_fence_history | mon_op_fence_connect) - - void print_status(pcmk__output_t *out, pe_working_set_t *data_set, -- stonith_history_t *stonith_history, unsigned int mon_ops, -- unsigned int show, char *prefix, char *only_node, -- char *only_rsc); -+ crm_exit_t history_rc, stonith_history_t *stonith_history, -+ unsigned int mon_ops, unsigned int show, char *prefix, -+ char *only_node, char *only_rsc); - void print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set, - crm_exit_t history_rc, stonith_history_t *stonith_history, - unsigned int mon_ops, unsigned int show, char *prefix, - char *only_node, char *only_rsc); - int print_html_status(pcmk__output_t *out, pe_working_set_t *data_set, -- stonith_history_t *stonith_history, unsigned int mon_ops, -- unsigned int show, char *prefix, char *only_node, -- char *only_rsc); -+ crm_exit_t history_rc, stonith_history_t *stonith_history, -+ unsigned int mon_ops, unsigned int show, char *prefix, -+ char *only_node, char *only_rsc); - - GList *append_attr_list(GList *attr_list, char *name); - void blank_screen(void); -diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c -index 8ae11bf..73406bd 100644 ---- a/tools/crm_mon_print.c -+++ b/tools/crm_mon_print.c -@@ -656,6 +656,7 @@ print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set, - * - * \param[in] out The output functions structure. - * \param[in] data_set Cluster state to display. -+ * \param[in] history_rc Result of getting stonith history - * \param[in] stonith_history List of stonith actions. - * \param[in] mon_ops Bitmask of mon_op_*. - * \param[in] show Bitmask of mon_show_*. -@@ -663,14 +664,16 @@ print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set, - */ - void - print_status(pcmk__output_t *out, pe_working_set_t *data_set, -- stonith_history_t *stonith_history, unsigned int mon_ops, -- unsigned int show, char *prefix, char *only_node, char *only_rsc) -+ crm_exit_t history_rc, stonith_history_t *stonith_history, -+ unsigned int mon_ops, unsigned int show, char *prefix, -+ char *only_node, char *only_rsc) - { - GListPtr unames = NULL; - GListPtr resources = NULL; - - unsigned int print_opts = get_resource_display_options(mon_ops); - int rc = pcmk_rc_no_output; -+ bool already_printed_failure = false; - - CHECK_RC(rc, out->message(out, "cluster-summary", data_set, - pcmk_is_set(mon_ops, mon_op_print_clone_detail), -@@ -731,13 +734,23 @@ print_status(pcmk__output_t *out, pe_working_set_t *data_set, - if (pcmk_is_set(show, mon_show_fence_failed) - && pcmk_is_set(mon_ops, mon_op_fence_history)) { - -- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, -- GINT_TO_POINTER(st_failed)); -+ if (history_rc == 0) { -+ stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, -+ GINT_TO_POINTER(st_failed)); -+ -+ if (hp) { -+ CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames, -+ pcmk_is_set(mon_ops, mon_op_fence_full_history), -+ rc == pcmk_rc_ok)); -+ } -+ } else { -+ PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); -+ out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); -+ out->list_item(out, NULL, "Failed to get fencing history: %s", -+ crm_exit_str(history_rc)); -+ out->end_list(out); - -- if (hp) { -- CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames, -- pcmk_is_set(mon_ops, mon_op_fence_full_history), -- rc == pcmk_rc_ok)); -+ already_printed_failure = true; - } - } - -@@ -754,7 +767,15 @@ print_status(pcmk__output_t *out, pe_working_set_t *data_set, - - /* Print stonith history */ - if (pcmk_is_set(mon_ops, mon_op_fence_history)) { -- if (pcmk_is_set(show, mon_show_fence_worked)) { -+ if (history_rc != 0) { -+ if (!already_printed_failure) { -+ PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); -+ out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); -+ out->list_item(out, NULL, "Failed to get fencing history: %s", -+ crm_exit_str(history_rc)); -+ out->end_list(out); -+ } -+ } else if (pcmk_is_set(show, mon_show_fence_worked)) { - stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, - GINT_TO_POINTER(st_failed)); - -@@ -783,6 +804,7 @@ print_status(pcmk__output_t *out, pe_working_set_t *data_set, - * - * \param[in] out The output functions structure. - * \param[in] data_set Cluster state to display. -+ * \param[in] history_rc Result of getting stonith history - * \param[in] stonith_history List of stonith actions. - * \param[in] mon_ops Bitmask of mon_op_*. - * \param[in] show Bitmask of mon_show_*. -@@ -878,6 +900,7 @@ print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set, - * - * \param[in] out The output functions structure. - * \param[in] data_set Cluster state to display. -+ * \param[in] history_rc Result of getting stonith history - * \param[in] stonith_history List of stonith actions. - * \param[in] mon_ops Bitmask of mon_op_*. - * \param[in] show Bitmask of mon_show_*. -@@ -885,14 +908,15 @@ print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set, - */ - int - print_html_status(pcmk__output_t *out, pe_working_set_t *data_set, -- stonith_history_t *stonith_history, unsigned int mon_ops, -- unsigned int show, char *prefix, char *only_node, -- char *only_rsc) -+ crm_exit_t history_rc, stonith_history_t *stonith_history, -+ unsigned int mon_ops, unsigned int show, char *prefix, -+ char *only_node, char *only_rsc) - { - GListPtr unames = NULL; - GListPtr resources = NULL; - - unsigned int print_opts = get_resource_display_options(mon_ops); -+ bool already_printed_failure = false; - - out->message(out, "cluster-summary", data_set, - pcmk_is_set(mon_ops, mon_op_print_clone_detail), -@@ -950,18 +974,32 @@ print_html_status(pcmk__output_t *out, pe_working_set_t *data_set, - if (pcmk_is_set(show, mon_show_fence_failed) - && pcmk_is_set(mon_ops, mon_op_fence_history)) { - -- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, -- GINT_TO_POINTER(st_failed)); -+ if (history_rc == 0) { -+ stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, -+ GINT_TO_POINTER(st_failed)); - -- if (hp) { -- out->message(out, "failed-fencing-list", stonith_history, unames, -- pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE); -+ if (hp) { -+ out->message(out, "failed-fencing-list", stonith_history, unames, -+ pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE); -+ } -+ } else { -+ out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); -+ out->list_item(out, NULL, "Failed to get fencing history: %s", -+ crm_exit_str(history_rc)); -+ out->end_list(out); - } - } - - /* Print stonith history */ - if (pcmk_is_set(mon_ops, mon_op_fence_history)) { -- if (pcmk_is_set(show, mon_show_fence_worked)) { -+ if (history_rc != 0) { -+ if (!already_printed_failure) { -+ out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); -+ out->list_item(out, NULL, "Failed to get fencing history: %s", -+ crm_exit_str(history_rc)); -+ out->end_list(out); -+ } -+ } else if (pcmk_is_set(show, mon_show_fence_worked)) { - stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, - GINT_TO_POINTER(st_failed)); - --- -1.8.3.1 - - -From 2e391be6fdbbbccd6aef49b3f109e5c342eb5dcc Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 11 Jan 2021 12:54:40 -0500 -Subject: [PATCH 06/11] Fix: tools: A lack of stonith history is not fatal in - crm_mon. - -Instead, print out all the rest of the typical output. This should also -include an error message in the fencing section, if that section was -requested. - -See: rhbz#1880426 ---- - tools/crm_mon.c | 40 ++++++++++++++++------------------------ - 1 file changed, 16 insertions(+), 24 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 1a68555..17b8ee9 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -682,7 +682,8 @@ mon_timer_popped(gpointer data) - } - - print_as(output_format, "Reconnecting...\n"); -- if (fencing_connect() == pcmk_ok && cib_connect(TRUE) == pcmk_ok) { -+ fencing_connect(); -+ if (cib_connect(TRUE) == pcmk_ok) { - mon_refresh_display(NULL); - timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL); - } -@@ -726,12 +727,6 @@ mon_cib_connection_destroy_regular(gpointer user_data) - do_mon_cib_connection_destroy(user_data, false); - } - --static void --mon_cib_connection_destroy_error(gpointer user_data) --{ -- do_mon_cib_connection_destroy(user_data, true); --} -- - /* - * Mainloop signal handler. - */ -@@ -790,6 +785,8 @@ fencing_connect(void) - mon_st_callback_display); - st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display); - } -+ } else { -+ st = NULL; - } - - return rc; -@@ -1185,12 +1182,15 @@ handle_connection_failures(int rc) - static void - one_shot() - { -- int rc = fencing_connect(); -+ int rc; -+ -+ fencing_connect(); - -+ rc = cib_connect(FALSE); - if (rc == pcmk_rc_ok) { -- rc = cib_connect(FALSE); -- handle_connection_failures(rc); - mon_refresh_display(NULL); -+ } else { -+ handle_connection_failures(rc); - } - - clean_up(CRM_EX_OK); -@@ -1416,10 +1416,8 @@ main(int argc, char **argv) - do { - print_as(output_format ,"Waiting until cluster is available on this node ...\n"); - -- rc = fencing_connect(); -- if (rc == pcmk_ok) { -- rc = cib_connect(TRUE); -- } -+ fencing_connect(); -+ rc = cib_connect(TRUE); - - if (rc != pcmk_ok) { - sleep(options.reconnect_msec / 1000); -@@ -1896,16 +1894,12 @@ mon_refresh_display(gpointer user_data) - return 0; - } - -- /* get the stonith-history if there is evidence we need it -- */ -+ /* get the stonith-history if there is evidence we need it */ - while (pcmk_is_set(options.mon_ops, mon_op_fence_history)) { - if (st != NULL) { - history_rc = st->cmds->history(st, st_opt_sync_call, NULL, &stonith_history, 120); - -- if (history_rc != 0) { -- out->err(out, "Critical: Unable to get stonith-history"); -- mon_cib_connection_destroy_error(NULL); -- } else { -+ if (history_rc == 0) { - stonith_history = stonith__sort_history(stonith_history); - if (!pcmk_is_set(options.mon_ops, mon_op_fence_full_history) - && (output_format != mon_output_xml)) { -@@ -1915,11 +1909,9 @@ mon_refresh_display(gpointer user_data) - break; /* all other cases are errors */ - } - } else { -- out->err(out, "Critical: No stonith-API"); -+ history_rc = ENOTCONN; -+ break; - } -- free_xml(cib_copy); -- out->err(out, "Reading stonith-history failed"); -- return 0; - } - - if (mon_data_set == NULL) { --- -1.8.3.1 - - -From 8abcb2bf0c5c90004a687e27aa86fd6ad1b62eb3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 14 Jan 2021 14:31:25 -0500 -Subject: [PATCH 07/11] Refactor: Split the fencing history code into its own - function. - ---- - tools/crm_mon.c | 46 ++++++++++++++++++++++++++++------------------ - 1 file changed, 28 insertions(+), 18 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 17b8ee9..1baba5f 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1879,6 +1879,33 @@ crm_diff_update(const char *event, xmlNode * msg) - } - - static int -+get_fencing_history(stonith_history_t **stonith_history) -+{ -+ int rc = 0; -+ -+ while (pcmk_is_set(options.mon_ops, mon_op_fence_history)) { -+ if (st != NULL) { -+ rc = st->cmds->history(st, st_opt_sync_call, NULL, stonith_history, 120); -+ -+ if (rc == 0) { -+ *stonith_history = stonith__sort_history(*stonith_history); -+ if (!pcmk_is_set(options.mon_ops, mon_op_fence_full_history) -+ && (output_format != mon_output_xml)) { -+ -+ *stonith_history = pcmk__reduce_fence_history(*stonith_history); -+ } -+ break; /* all other cases are errors */ -+ } -+ } else { -+ rc = ENOTCONN; -+ break; -+ } -+ } -+ -+ return rc; -+} -+ -+static int - mon_refresh_display(gpointer user_data) - { - xmlNode *cib_copy = copy_xml(current_cib); -@@ -1895,24 +1922,7 @@ mon_refresh_display(gpointer user_data) - } - - /* get the stonith-history if there is evidence we need it */ -- while (pcmk_is_set(options.mon_ops, mon_op_fence_history)) { -- if (st != NULL) { -- history_rc = st->cmds->history(st, st_opt_sync_call, NULL, &stonith_history, 120); -- -- if (history_rc == 0) { -- stonith_history = stonith__sort_history(stonith_history); -- if (!pcmk_is_set(options.mon_ops, mon_op_fence_full_history) -- && (output_format != mon_output_xml)) { -- -- stonith_history = pcmk__reduce_fence_history(stonith_history); -- } -- break; /* all other cases are errors */ -- } -- } else { -- history_rc = ENOTCONN; -- break; -- } -- } -+ history_rc = get_fencing_history(&stonith_history); - - if (mon_data_set == NULL) { - mon_data_set = pe_new_working_set(); --- -1.8.3.1 - - -From fa75e884e3c3822e1010ad1d67958e4f1cc5400b Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 14 Jan 2021 14:49:09 -0500 -Subject: [PATCH 08/11] Refactor: tools: Get rid of - mon_cib_connection_destroy_regular. - -With the _error version removed in a previous commit, there's no need -for this wrapper to exist anymore. We can just call -mon_cib_connection_destroy directly. ---- - tools/crm_mon.c | 22 ++++++---------------- - 1 file changed, 6 insertions(+), 16 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 1baba5f..a0764a5 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -691,13 +691,9 @@ mon_timer_popped(gpointer data) - } - - static void --do_mon_cib_connection_destroy(gpointer user_data, bool is_error) -+mon_cib_connection_destroy(gpointer user_data) - { -- if (is_error) { -- out->err(out, "Connection to the cluster-daemons terminated"); -- } else { -- out->info(out, "Connection to the cluster-daemons terminated"); -- } -+ out->info(out, "Connection to the cluster-daemons terminated"); - - if (refresh_timer != NULL) { - /* we'll trigger a refresh after reconnect */ -@@ -721,12 +717,6 @@ do_mon_cib_connection_destroy(gpointer user_data, bool is_error) - return; - } - --static void --mon_cib_connection_destroy_regular(gpointer user_data) --{ -- do_mon_cib_connection_destroy(user_data, false); --} -- - /* - * Mainloop signal handler. - */ -@@ -831,7 +821,7 @@ cib_connect(gboolean full) - rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); - - if (rc == pcmk_ok && full) { -- rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy_regular); -+ rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy); - if (rc == -EPROTONOSUPPORT) { - print_as - (output_format, "Notification setup not supported, won't be able to reconnect after failure"); -@@ -890,7 +880,7 @@ detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_dat - options.mon_ops |= mon_op_fence_history; - options.mon_ops |= mon_op_fence_connect; - if (st == NULL) { -- mon_cib_connection_destroy_regular(NULL); -+ mon_cib_connection_destroy(NULL); - } - } - -@@ -2010,7 +2000,7 @@ mon_st_callback_event(stonith_t * st, stonith_event_t * e) - { - if (st->state == stonith_disconnected) { - /* disconnect cib as well and have everything reconnect */ -- mon_cib_connection_destroy_regular(NULL); -+ mon_cib_connection_destroy(NULL); - } else if (options.external_agent) { - char *desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)", - e->operation, e->origin, e->target, pcmk_strerror(e->result), -@@ -2059,7 +2049,7 @@ mon_st_callback_display(stonith_t * st, stonith_event_t * e) - { - if (st->state == stonith_disconnected) { - /* disconnect cib as well and have everything reconnect */ -- mon_cib_connection_destroy_regular(NULL); -+ mon_cib_connection_destroy(NULL); - } else { - print_dot(output_format); - kick_refresh(TRUE); --- -1.8.3.1 - - -From 009f3aa0caa6d138d4da418297f12c4a1210cf6b Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 14 Jan 2021 16:25:37 -0500 -Subject: [PATCH 09/11] Refactor: Add comments to connection functions in - crm_mon.c. - -There are an awful lot of these functions, and trying to make sense of -them can be confusing when there's no comments explaining when they -happen. Hopefully this helps a little. ---- - tools/crm_mon.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++-------- - 1 file changed, 48 insertions(+), 8 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index a0764a5..54a7958 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -666,6 +666,10 @@ static GOptionEntry deprecated_entries[] = { - }; - /* *INDENT-ON* */ - -+/* Reconnect to the CIB and fencing agent after reconnect_msec has passed. This sounds -+ * like it would be more broadly useful, but only ever happens after a disconnect via -+ * mon_cib_connection_destroy. -+ */ - static gboolean - mon_timer_popped(gpointer data) - { -@@ -684,12 +688,17 @@ mon_timer_popped(gpointer data) - print_as(output_format, "Reconnecting...\n"); - fencing_connect(); - if (cib_connect(TRUE) == pcmk_ok) { -+ /* Redraw the screen and reinstall ourselves to get called after another reconnect_msec. */ - mon_refresh_display(NULL); - timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL); - } - return FALSE; - } - -+/* Called from various places when we are disconnected from the CIB or from the -+ * fencing agent. If the CIB connection is still valid, this function will also -+ * attempt to sign off and reconnect. -+ */ - static void - mon_cib_connection_destroy(gpointer user_data) - { -@@ -717,9 +726,7 @@ mon_cib_connection_destroy(gpointer user_data) - return; - } - --/* -- * Mainloop signal handler. -- */ -+/* Signal handler installed into the mainloop for normal program shutdown */ - static void - mon_shutdown(int nsig) - { -@@ -729,6 +736,10 @@ mon_shutdown(int nsig) - #if CURSES_ENABLED - static sighandler_t ncurses_winch_handler; - -+/* Signal handler installed the regular way (not into the main loop) for when -+ * the screen is resized. Commonly, this happens when running in an xterm and -+ * the user changes its size. -+ */ - static void - mon_winresize(int nsig) - { -@@ -743,6 +754,9 @@ mon_winresize(int nsig) - (*ncurses_winch_handler) (SIGWINCH); - getmaxyx(stdscr, lines, cols); - resizeterm(lines, cols); -+ /* Alert the mainloop code we'd like the refresh_trigger to run next -+ * time the mainloop gets around to checking. -+ */ - mainloop_set_trigger(refresh_trigger); - } - not_done--; -@@ -863,6 +877,12 @@ get_option_desc(char c) - #define print_option_help(output_format, option, condition) \ - out->info(out, "%c %c: \t%s", ((condition)? '*': ' '), option, get_option_desc(option)); - -+/* This function is called from the main loop when there is something to be read -+ * on stdin, like an interactive user's keystroke. All it does is read the keystroke, -+ * set flags (or show the page showing which keystrokes are valid), and redraw the -+ * screen. It does not do anything with connections to the CIB or fencing agent -+ * agent what would happen in mon_refresh_display. -+ */ - static gboolean - detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_data) - { -@@ -951,6 +971,7 @@ detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_dat - config_mode = TRUE; - break; - default: -+ /* All other keys just redraw the screen. */ - goto refresh; - } - -@@ -1441,6 +1462,10 @@ main(int argc, char **argv) - g_io_add_watch(io_channel, G_IO_IN, detect_user_input, NULL); - } - #endif -+ -+ /* When refresh_trigger->trigger is set to TRUE, call mon_refresh_display. In -+ * this file, that is anywhere mainloop_set_trigger is called. -+ */ - refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL); - - g_main_loop_run(mainloop); -@@ -1677,6 +1702,10 @@ handle_rsc_op(xmlNode * xml, const char *node_id) - free(task); - } - -+/* This function is just a wrapper around mainloop_set_trigger so that it can be -+ * called from a mainloop directly. It's simply another way of ensuring the screen -+ * gets redrawn. -+ */ - static gboolean - mon_trigger_refresh(gpointer user_data) - { -@@ -1995,6 +2024,9 @@ mon_refresh_display(gpointer user_data) - return 1; - } - -+/* This function is called for fencing events (see fencing_connect for which ones) when -+ * --watch-fencing is used on the command line. -+ */ - static void - mon_st_callback_event(stonith_t * st, stonith_event_t * e) - { -@@ -2010,6 +2042,16 @@ mon_st_callback_event(stonith_t * st, stonith_event_t * e) - } - } - -+/* Cause the screen to be redrawn (via mainloop_set_trigger) when various conditions are met: -+ * -+ * - If the last update occurred more than reconnect_msec ago (defaults to 5s, but can be -+ * changed via the -i command line option), or -+ * - After every 10 CIB updates, or -+ * - If it's been 2s since the last update -+ * -+ * This function sounds like it would be more broadly useful, but it is only called when a -+ * fencing event is received or a CIB diff occurrs. -+ */ - static void - kick_refresh(gboolean data_updated) - { -@@ -2024,11 +2066,6 @@ kick_refresh(gboolean data_updated) - refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL); - } - -- /* Refresh -- * - immediately if the last update was more than 5s ago -- * - every 10 cib-updates -- * - at most 2s after the last update -- */ - if ((now - last_refresh) > (options.reconnect_msec / 1000)) { - mainloop_set_trigger(refresh_trigger); - mainloop_timer_stop(refresh_timer); -@@ -2044,6 +2081,9 @@ kick_refresh(gboolean data_updated) - } - } - -+/* This function is called for fencing events (see fencing_connect for which ones) when -+ * --watch-fencing is NOT used on the command line. -+ */ - static void - mon_st_callback_display(stonith_t * st, stonith_event_t * e) - { --- -1.8.3.1 - - -From aa328f0788ef0057874aeeeae7261dfb450b9b9e Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 14 Jan 2021 16:44:45 -0500 -Subject: [PATCH 10/11] Refactor: tools: Rename some connection-related symbols - in crm_mon. - ---- - tools/crm_mon.c | 28 ++++++++++++++-------------- - 1 file changed, 14 insertions(+), 14 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 54a7958..89d7ae2 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -66,7 +66,7 @@ static mon_output_format_t output_format = mon_output_unset; - /* other globals */ - static GIOChannel *io_channel = NULL; - static GMainLoop *mainloop = NULL; --static guint timer_id = 0; -+static guint reconnect_timer = 0; - static mainloop_timer_t *refresh_timer = NULL; - static pe_working_set_t *mon_data_set = NULL; - -@@ -131,7 +131,7 @@ static int cib_connect(gboolean full); - static int fencing_connect(void); - static void mon_st_callback_event(stonith_t * st, stonith_event_t * e); - static void mon_st_callback_display(stonith_t * st, stonith_event_t * e); --static void kick_refresh(gboolean data_updated); -+static void refresh_after_event(gboolean data_updated); - - static unsigned int - all_includes(mon_output_format_t fmt) { -@@ -671,7 +671,7 @@ static GOptionEntry deprecated_entries[] = { - * mon_cib_connection_destroy. - */ - static gboolean --mon_timer_popped(gpointer data) -+reconnect_after_timeout(gpointer data) - { - #if CURSES_ENABLED - if (output_format == mon_output_console) { -@@ -680,9 +680,9 @@ mon_timer_popped(gpointer data) - } - #endif - -- if (timer_id > 0) { -- g_source_remove(timer_id); -- timer_id = 0; -+ if (reconnect_timer > 0) { -+ g_source_remove(reconnect_timer); -+ reconnect_timer = 0; - } - - print_as(output_format, "Reconnecting...\n"); -@@ -690,7 +690,7 @@ mon_timer_popped(gpointer data) - if (cib_connect(TRUE) == pcmk_ok) { - /* Redraw the screen and reinstall ourselves to get called after another reconnect_msec. */ - mon_refresh_display(NULL); -- timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL); -+ reconnect_timer = g_timeout_add(options.reconnect_msec, reconnect_after_timeout, NULL); - } - return FALSE; - } -@@ -708,10 +708,10 @@ mon_cib_connection_destroy(gpointer user_data) - /* we'll trigger a refresh after reconnect */ - mainloop_timer_stop(refresh_timer); - } -- if (timer_id) { -+ if (reconnect_timer) { - /* we'll trigger a new reconnect-timeout at the end */ -- g_source_remove(timer_id); -- timer_id = 0; -+ g_source_remove(reconnect_timer); -+ reconnect_timer = 0; - } - if (st) { - /* the client API won't properly reconnect notifications -@@ -721,7 +721,7 @@ mon_cib_connection_destroy(gpointer user_data) - } - if (cib) { - cib->cmds->signoff(cib); -- timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL); -+ reconnect_timer = g_timeout_add(options.reconnect_msec, reconnect_after_timeout, NULL); - } - return; - } -@@ -1894,7 +1894,7 @@ crm_diff_update(const char *event, xmlNode * msg) - } - - stale = FALSE; -- kick_refresh(cib_updated); -+ refresh_after_event(cib_updated); - } - - static int -@@ -2053,7 +2053,7 @@ mon_st_callback_event(stonith_t * st, stonith_event_t * e) - * fencing event is received or a CIB diff occurrs. - */ - static void --kick_refresh(gboolean data_updated) -+refresh_after_event(gboolean data_updated) - { - static int updates = 0; - time_t now = time(NULL); -@@ -2092,7 +2092,7 @@ mon_st_callback_display(stonith_t * st, stonith_event_t * e) - mon_cib_connection_destroy(NULL); - } else { - print_dot(output_format); -- kick_refresh(TRUE); -+ refresh_after_event(TRUE); - } - } - --- -1.8.3.1 - - -From 8c51b4980f349e8773681f7ed2882ca639e0e63a Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Mon, 18 Jan 2021 14:03:39 -0500 -Subject: [PATCH 11/11] Fix: tools: Attempt to reestablish connections in - crm_mon. - -If the fencing or CIB connections go away between screen refreshes, -attempt to re-establish those connections. The functions that do this -should be safe to be called repeatedly. - -See: rhbz#1880426, rhbz#1466875 ---- - tools/crm_mon.c | 17 ++++++++++++++--- - 1 file changed, 14 insertions(+), 3 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 89d7ae2..083b7ae 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -126,6 +126,7 @@ static void clean_up_cib_connection(void); - static void clean_up_fencing_connection(void); - static crm_exit_t clean_up(crm_exit_t exit_code); - static void crm_diff_update(const char *event, xmlNode * msg); -+static void handle_connection_failures(int rc); - static int mon_refresh_display(gpointer user_data); - static int cib_connect(gboolean full); - static int fencing_connect(void); -@@ -690,9 +691,11 @@ reconnect_after_timeout(gpointer data) - if (cib_connect(TRUE) == pcmk_ok) { - /* Redraw the screen and reinstall ourselves to get called after another reconnect_msec. */ - mon_refresh_display(NULL); -- reconnect_timer = g_timeout_add(options.reconnect_msec, reconnect_after_timeout, NULL); -+ return FALSE; - } -- return FALSE; -+ -+ reconnect_timer = g_timeout_add(options.reconnect_msec, reconnect_after_timeout, NULL); -+ return TRUE; - } - - /* Called from various places when we are disconnected from the CIB or from the -@@ -887,6 +890,7 @@ static gboolean - detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_data) - { - int c; -+ int rc; - gboolean config_mode = FALSE; - - while (1) { -@@ -1001,7 +1005,14 @@ detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_dat - } - - refresh: -- mon_refresh_display(NULL); -+ fencing_connect(); -+ rc = cib_connect(FALSE); -+ if (rc == pcmk_rc_ok) { -+ mon_refresh_display(NULL); -+ } else { -+ handle_connection_failures(rc); -+ } -+ - return TRUE; - } - #endif --- -1.8.3.1 - diff --git a/SOURCES/029-crm_mon.patch b/SOURCES/029-crm_mon.patch deleted file mode 100644 index 135898a..0000000 --- a/SOURCES/029-crm_mon.patch +++ /dev/null @@ -1,202 +0,0 @@ -From bc60f9c84bd6f0fa4d73db8d140030dfcdbf4f5e Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 19 Jan 2021 15:58:36 -0500 -Subject: [PATCH 1/2] Fix: tools: Describe interactive crm_mon use in help and - man page. - ---- - tools/crm_mon.8.inc | 3 +++ - tools/crm_mon.c | 4 ++++ - tools/fix-manpages | 2 +- - 3 files changed, 8 insertions(+), 1 deletion(-) - -diff --git a/tools/crm_mon.8.inc b/tools/crm_mon.8.inc -index e4cd7e3..6b46d7b 100644 ---- a/tools/crm_mon.8.inc -+++ b/tools/crm_mon.8.inc -@@ -12,3 +12,6 @@ crm_mon mode [options] - - /command line arguments./ - .SH TIME SPECIFICATION -+ -+/or --exclude=list./ -+.SH INTERACTIVE USE -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 083b7ae..aafc80f 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1062,6 +1062,10 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { - "times on the command line, and each can give a comma-separated list of sections.\n" - "The options are applied to the default set, from left to right as seen on the\n" - "command line. For a list of valid sections, pass --include=list or --exclude=list.\n\n" -+ "Interactive Use:\n\n" -+ "When run interactively, crm_mon can be told to hide and display various sections\n" -+ "of output. To see a help screen explaining the options, hit '?'. Any key stroke\n" -+ "aside from those listed will cause the screen to refresh.\n\n" - "Examples:\n\n" - "Display the cluster status on the console with updates as they occur:\n\n" - "\tcrm_mon\n\n" -diff --git a/tools/fix-manpages b/tools/fix-manpages -index 714ecce..f1f6f0d 100644 ---- a/tools/fix-manpages -+++ b/tools/fix-manpages -@@ -26,7 +26,7 @@ - # This leaves the --help-all output looking good and removes redundant - # stuff from the man page. Feel free to add additional headers here. - # Not all tools will have all headers. --/.SH NOTES\|.SH OPERATION SPECIFICATION\|.SH OUTPUT CONTROL\|.SH TIME SPECIFICATION/{ n -+/.SH NOTES\|.SH INTERACTIVE USE\|.SH OPERATION SPECIFICATION\|.SH OUTPUT CONTROL\|.SH TIME SPECIFICATION/{ n - N - N - d --- -1.8.3.1 - - -From ed4e4370dc97bc220878db89d69c71426b9458a3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 19 Jan 2021 17:02:45 -0500 -Subject: [PATCH 2/2] Fix: tools: The 'm' key in crm_mon is a cycle, not a - toggle. - -Each time it's pressed, a different amount of fencing information should -be shown, cycling back to nothing after level 3. ---- - tools/crm_mon.c | 76 +++++++++++++++++++++++++++++++++++++++++---------------- - 1 file changed, 55 insertions(+), 21 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index aafc80f..0981634 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -83,6 +83,8 @@ static gchar **processed_args = NULL; - static time_t last_refresh = 0; - crm_trigger_t *refresh_trigger = NULL; - -+int interactive_fence_level = 0; -+ - static pcmk__supported_format_t formats[] = { - #if CURSES_ENABLED - CRM_MON_SUPPORTED_FORMAT_CURSES, -@@ -382,9 +384,9 @@ as_xml_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError * - - static gboolean - fence_history_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { -- int rc = crm_atoi(optarg, "2"); -+ interactive_fence_level = crm_atoi(optarg, "2"); - -- switch (rc) { -+ switch (interactive_fence_level) { - case 3: - options.mon_ops |= mon_op_fence_full_history | mon_op_fence_history | mon_op_fence_connect; - return include_exclude_cb("--include", "fencing", data, err); -@@ -862,6 +864,38 @@ cib_connect(gboolean full) - return rc; - } - -+/* This is used to set up the fencing options after the interactive UI has been stared. -+ * fence_history_cb can't be used because it builds up a list of includes/excludes that -+ * then have to be processed with apply_include_exclude and that could affect other -+ * things. -+ */ -+static void -+set_fencing_options(int level) -+{ -+ switch (level) { -+ case 3: -+ options.mon_ops |= mon_op_fence_full_history | mon_op_fence_history | mon_op_fence_connect; -+ show |= mon_show_fencing_all; -+ break; -+ -+ case 2: -+ options.mon_ops |= mon_op_fence_history | mon_op_fence_connect; -+ show |= mon_show_fencing_all; -+ break; -+ -+ case 1: -+ options.mon_ops |= mon_op_fence_history | mon_op_fence_connect; -+ show |= mon_show_fence_failed | mon_show_fence_pending; -+ break; -+ -+ default: -+ level = 0; -+ options.mon_ops &= ~(mon_op_fence_history | mon_op_fence_connect); -+ show &= ~mon_show_fencing_all; -+ break; -+ } -+} -+ - #if CURSES_ENABLED - static const char * - get_option_desc(char c) -@@ -900,23 +934,12 @@ detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_dat - - switch (c) { - case 'm': -- if (!pcmk_is_set(show, mon_show_fencing_all)) { -- options.mon_ops |= mon_op_fence_history; -- options.mon_ops |= mon_op_fence_connect; -- if (st == NULL) { -- mon_cib_connection_destroy(NULL); -- } -- } -- -- if (pcmk_any_flags_set(show, -- mon_show_fence_failed -- |mon_show_fence_pending -- |mon_show_fence_worked)) { -- show &= ~mon_show_fencing_all; -- } else { -- show |= mon_show_fencing_all; -+ interactive_fence_level++; -+ if (interactive_fence_level > 3) { -+ interactive_fence_level = 0; - } - -+ set_fencing_options(interactive_fence_level); - break; - case 'c': - show ^= mon_show_tickets; -@@ -997,10 +1020,7 @@ detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_dat - print_option_help(out, 'R', pcmk_is_set(options.mon_ops, mon_op_print_clone_detail)); - print_option_help(out, 'b', pcmk_is_set(options.mon_ops, mon_op_print_brief)); - print_option_help(out, 'j', pcmk_is_set(options.mon_ops, mon_op_print_pending)); -- print_option_help(out, 'm', pcmk_any_flags_set(show, -- mon_show_fence_failed -- |mon_show_fence_pending -- |mon_show_fence_worked)); -+ out->info(out, "%d m: \t%s", interactive_fence_level, get_option_desc('m')); - out->info(out, "%s", "\nToggle fields via field letter, type any other key to return"); - } - -@@ -1400,6 +1420,19 @@ main(int argc, char **argv) - return clean_up(CRM_EX_USAGE); - } - -+ /* Sync up the initial value of interactive_fence_level with whatever was set with -+ * --include/--exclude= options. -+ */ -+ if (pcmk_is_set(show, mon_show_fencing_all)) { -+ interactive_fence_level = 3; -+ } else if (pcmk_is_set(show, mon_show_fence_worked)) { -+ interactive_fence_level = 2; -+ } else if (pcmk_any_flags_set(show, mon_show_fence_failed | mon_show_fence_pending)) { -+ interactive_fence_level = 1; -+ } else { -+ interactive_fence_level = 0; -+ } -+ - crm_mon_register_messages(out); - pe__register_messages(out); - stonith__register_messages(out); -@@ -1460,6 +1493,7 @@ main(int argc, char **argv) - } while (rc == -ENOTCONN); - - handle_connection_failures(rc); -+ set_fencing_options(interactive_fence_level); - mon_refresh_display(NULL); - - mainloop = g_main_loop_new(NULL, FALSE); --- -1.8.3.1 - diff --git a/SOURCES/030-crmadmin.patch b/SOURCES/030-crmadmin.patch deleted file mode 100644 index 8c39e44..0000000 --- a/SOURCES/030-crmadmin.patch +++ /dev/null @@ -1,1505 +0,0 @@ -From 4fc4140d0c3daa3a8de32adaebf33462c9b3d581 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 26 Nov 2020 13:34:21 +0100 -Subject: [PATCH 1/7] Refactor: move crm_admin_list() to pcmk__list_nodes() and - only print node name for -N -q - ---- - lib/pacemaker/pcmk_cluster_queries.c | 32 ++++++++++++++++++++++++---- - lib/pacemaker/pcmk_output.c | 41 +++--------------------------------- - 2 files changed, 31 insertions(+), 42 deletions(-) - -diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c -index c705b7f..1d1e775 100644 ---- a/lib/pacemaker/pcmk_cluster_queries.c -+++ b/lib/pacemaker/pcmk_cluster_queries.c -@@ -395,7 +395,7 @@ int - pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT) - { - cib_t *the_cib = cib_new(); -- xmlNode *output = NULL; -+ xmlNode *xml_node = NULL; - int rc; - - if (the_cib == NULL) { -@@ -406,11 +406,35 @@ pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT) - return pcmk_legacy2rc(rc); - } - -- rc = the_cib->cmds->query(the_cib, NULL, &output, -+ rc = the_cib->cmds->query(the_cib, NULL, &xml_node, - cib_scope_local | cib_sync_call); - if (rc == pcmk_ok) { -- out->message(out, "crmadmin-node-list", output, BASH_EXPORT); -- free_xml(output); -+ int found = 0; -+ xmlNode *node = NULL; -+ xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -+ -+ out->begin_list(out, NULL, NULL, "nodes"); -+ -+ for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -+ node = crm_next_same_xml(node)) { -+ const char *node_type = BASH_EXPORT ? NULL : -+ crm_element_value(node, XML_ATTR_TYPE); -+ out->message(out, "crmadmin-node", node_type, -+ crm_str(crm_element_value(node, XML_ATTR_UNAME)), -+ crm_str(crm_element_value(node, XML_ATTR_ID)), -+ BASH_EXPORT); -+ -+ found++; -+ } -+ // @TODO List Pacemaker Remote nodes that don't have a entry -+ -+ out->end_list(out); -+ -+ if (found == 0) { -+ out->info(out, "No nodes configured"); -+ } -+ -+ free_xml(xml_node); - } - the_cib->cmds->signoff(the_cib); - return pcmk_legacy2rc(rc); -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index bc4b91a..8f5e301 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -469,42 +469,6 @@ dc_xml(pcmk__output_t *out, va_list args) - return pcmk_rc_ok; - } - -- --PCMK__OUTPUT_ARGS("crmadmin-node-list", "xmlNodePtr", "gboolean") --static int --crmadmin_node_list(pcmk__output_t *out, va_list args) --{ -- xmlNodePtr xml_node = va_arg(args, xmlNodePtr); -- gboolean BASH_EXPORT = va_arg(args, gboolean); -- -- int found = 0; -- xmlNode *node = NULL; -- xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -- -- out->begin_list(out, NULL, NULL, "nodes"); -- -- for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -- node = crm_next_same_xml(node)) { -- const char *node_type = BASH_EXPORT ? NULL : -- crm_element_value(node, XML_ATTR_TYPE); -- out->message(out, "crmadmin-node", node_type, -- crm_str(crm_element_value(node, XML_ATTR_UNAME)), -- crm_str(crm_element_value(node, XML_ATTR_ID)), -- BASH_EXPORT); -- -- found++; -- } -- // @TODO List Pacemaker Remote nodes that don't have a entry -- -- out->end_list(out); -- -- if (found == 0) { -- out->info(out, "No nodes configured"); -- } -- -- return pcmk_rc_ok; --} -- - PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "gboolean") - static int - crmadmin_node_text(pcmk__output_t *out, va_list args) -@@ -514,7 +478,9 @@ crmadmin_node_text(pcmk__output_t *out, va_list args) - const char *id = va_arg(args, const char *); - gboolean BASH_EXPORT = va_arg(args, gboolean); - -- if (BASH_EXPORT) { -+ if (out->is_quiet(out)) { -+ out->info(out, "%s", crm_str(name)); -+ } else if (BASH_EXPORT) { - out->info(out, "export %s=%s", crm_str(name), crm_str(id)); - } else { - out->info(out, "%s node: %s (%s)", type ? type : "member", -@@ -657,7 +623,6 @@ static pcmk__message_entry_t fmt_functions[] = { - { "pacemakerd-health", "xml", pacemakerd_health_xml }, - { "dc", "default", dc_text }, - { "dc", "xml", dc_xml }, -- { "crmadmin-node-list", "default", crmadmin_node_list }, - { "crmadmin-node", "default", crmadmin_node_text }, - { "crmadmin-node", "xml", crmadmin_node_xml }, - { "digests", "default", digests_text }, --- -1.8.3.1 - - -From ecc6d582fd18f774ab6c081c4d72a7913a0f419e Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Fri, 4 Dec 2020 09:30:04 +0100 -Subject: [PATCH 2/7] Feature/API: crmadmin/pcmk_list_nodes(): list - remote/guest nodes and add parameter/variable to choose which node type(s) to - list - ---- - include/crm/common/xml_internal.h | 18 +++++++ - include/pacemaker.h | 6 +-- - include/pcmki/pcmki_cluster_queries.h | 2 +- - lib/cluster/membership.c | 17 +------ - lib/pacemaker/pcmk_cluster_queries.c | 89 ++++++++++++++++++++++++++++------- - tools/crmadmin.c | 9 +++- - 6 files changed, 103 insertions(+), 38 deletions(-) - -diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h -index d8694ee..969a57d 100644 ---- a/include/crm/common/xml_internal.h -+++ b/include/crm/common/xml_internal.h -@@ -123,6 +123,24 @@ do { - } \ - } while (0) - -+/* XML search strings for guest, remote and pacemaker_remote nodes */ -+ -+/* search string to find CIB resources entries for guest nodes */ -+#define XPATH_GUEST_NODE_CONFIG \ -+ "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \ -+ "//" XML_TAG_META_SETS "//" XML_CIB_TAG_NVPAIR \ -+ "[@name='" XML_RSC_ATTR_REMOTE_NODE "']" -+ -+/* search string to find CIB resources entries for remote nodes */ -+#define XPATH_REMOTE_NODE_CONFIG \ -+ "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \ -+ "[@type='remote'][@provider='pacemaker']" -+ -+/* search string to find CIB node status entries for pacemaker_remote nodes */ -+#define XPATH_REMOTE_NODE_STATUS \ -+ "//" XML_TAG_CIB "//" XML_CIB_TAG_STATUS "//" XML_CIB_TAG_STATE \ -+ "[@" XML_NODE_IS_REMOTE "='true']" -+ - enum pcmk__xml_artefact_ns { - pcmk__xml_artefact_ns_legacy_rng = 1, - pcmk__xml_artefact_ns_legacy_xslt, -diff --git a/include/pacemaker.h b/include/pacemaker.h -index 51bf585..42d096f 100644 ---- a/include/pacemaker.h -+++ b/include/pacemaker.h -@@ -73,8 +73,6 @@ int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc, - pe_node_t *node, GHashTable *overrides, - pe_working_set_t *data_set); - --#ifdef BUILD_PUBLIC_LIBPACEMAKER -- - /*! - * \brief Get nodes list - * -@@ -82,7 +80,9 @@ int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc, - * - * \return Standard Pacemaker return code - */ --int pcmk_list_nodes(xmlNodePtr *xml); -+int pcmk_list_nodes(xmlNodePtr *xml, char *node_types); -+ -+#ifdef BUILD_PUBLIC_LIBPACEMAKER - - /*! - * \brief Perform a STONITH action. -diff --git a/include/pcmki/pcmki_cluster_queries.h b/include/pcmki/pcmki_cluster_queries.h -index eb3b51c..955eea3 100644 ---- a/include/pcmki/pcmki_cluster_queries.h -+++ b/include/pcmki/pcmki_cluster_queries.h -@@ -8,7 +8,7 @@ - int pcmk__controller_status(pcmk__output_t *out, char *dest_node, guint message_timeout_ms); - int pcmk__designated_controller(pcmk__output_t *out, guint message_timeout_ms); - int pcmk__pacemakerd_status(pcmk__output_t *out, char *ipc_name, guint message_timeout_ms); --int pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT); -+int pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT); - - // remove when parameters removed from tools/crmadmin.c - int pcmk__shutdown_controller(pcmk__output_t *out, char *dest_node); -diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c -index 8bf5764..5841f16 100644 ---- a/lib/cluster/membership.c -+++ b/lib/cluster/membership.c -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -235,22 +236,6 @@ is_dirty(gpointer key, gpointer value, gpointer user_data) - return pcmk_is_set(((crm_node_t*)value)->flags, crm_node_dirty); - } - --/* search string to find CIB resources entries for guest nodes */ --#define XPATH_GUEST_NODE_CONFIG \ -- "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \ -- "//" XML_TAG_META_SETS "//" XML_CIB_TAG_NVPAIR \ -- "[@name='" XML_RSC_ATTR_REMOTE_NODE "']" -- --/* search string to find CIB resources entries for remote nodes */ --#define XPATH_REMOTE_NODE_CONFIG \ -- "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \ -- "[@type='remote'][@provider='pacemaker']" -- --/* search string to find CIB node status entries for pacemaker_remote nodes */ --#define XPATH_REMOTE_NODE_STATUS \ -- "//" XML_TAG_CIB "//" XML_CIB_TAG_STATUS "//" XML_CIB_TAG_STATE \ -- "[@" XML_NODE_IS_REMOTE "='true']" -- - /*! - * \brief Repopulate the remote peer cache based on CIB XML - * -diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c -index 1d1e775..fc5cfc4 100644 ---- a/lib/pacemaker/pcmk_cluster_queries.c -+++ b/lib/pacemaker/pcmk_cluster_queries.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -390,9 +391,33 @@ pcmk_pacemakerd_status(xmlNodePtr *xml, char *ipc_name, unsigned int message_tim - return rc; - } - -+/* user data for looping through remote node xpath searches */ -+struct node_data { -+ pcmk__output_t *out; -+ int found; -+ const char *field; /* XML attribute to check for node name */ -+ const char *type; -+ gboolean BASH_EXPORT; -+}; -+ -+static void -+remote_node_print_helper(xmlNode *result, void *user_data) -+{ -+ struct node_data *data = user_data; -+ pcmk__output_t *out = data->out; -+ const char *remote = crm_element_value(result, data->field); -+ -+ // node name and node id are the same for remote/guest nodes -+ out->message(out, "crmadmin-node", data->type, -+ remote, -+ remote, -+ data->BASH_EXPORT); -+ data->found++; -+} -+ - // \return Standard Pacemaker return code - int --pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT) -+pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT) - { - cib_t *the_cib = cib_new(); - xmlNode *xml_node = NULL; -@@ -409,28 +434,60 @@ pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT) - rc = the_cib->cmds->query(the_cib, NULL, &xml_node, - cib_scope_local | cib_sync_call); - if (rc == pcmk_ok) { -- int found = 0; - xmlNode *node = NULL; - xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); -+ struct node_data data = { -+ .out = out, -+ .found = 0, -+ .BASH_EXPORT = BASH_EXPORT -+ }; - - out->begin_list(out, NULL, NULL, "nodes"); - -- for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -- node = crm_next_same_xml(node)) { -- const char *node_type = BASH_EXPORT ? NULL : -- crm_element_value(node, XML_ATTR_TYPE); -- out->message(out, "crmadmin-node", node_type, -- crm_str(crm_element_value(node, XML_ATTR_UNAME)), -- crm_str(crm_element_value(node, XML_ATTR_ID)), -- BASH_EXPORT); -+ if (!pcmk__str_empty(node_types) && strstr(node_types, "all")) { -+ node_types = NULL; -+ } -+ -+ if (pcmk__str_empty(node_types) || strstr(node_types, "member")) { -+ for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -+ node = crm_next_same_xml(node)) { -+ const char *node_type = crm_element_value(node, XML_ATTR_TYPE); -+ //if (node_type == NULL || !strcmp(node_type, "member")) { -+ if (node_type == NULL) { -+ out->message(out, "crmadmin-node", node_type, -+ crm_str(crm_element_value(node, XML_ATTR_UNAME)), -+ crm_str(crm_element_value(node, XML_ATTR_ID)), -+ BASH_EXPORT); -+ data.found++; -+ } -+ -+ } -+ } -+ -+ if (pcmk__str_empty(node_types) || strstr(node_types, "pacemaker_remote")) { -+ data.field = "id"; -+ data.type = "pacemaker_remote"; -+ crm_foreach_xpath_result(xml_node, XPATH_REMOTE_NODE_STATUS, -+ remote_node_print_helper, &data); -+ } -+ -+ if (pcmk__str_empty(node_types) || strstr(node_types, "guest")) { -+ data.field = "value"; -+ data.type = "guest"; -+ crm_foreach_xpath_result(xml_node, XPATH_GUEST_NODE_CONFIG, -+ remote_node_print_helper, &data); -+ } - -- found++; -+ if (pcmk__str_empty(node_types) || !pcmk__strcmp(node_types, ",|^remote", pcmk__str_regex)) { -+ data.field = "id"; -+ data.type = "remote"; -+ crm_foreach_xpath_result(xml_node, XPATH_REMOTE_NODE_CONFIG, -+ remote_node_print_helper, &data); - } -- // @TODO List Pacemaker Remote nodes that don't have a entry - - out->end_list(out); - -- if (found == 0) { -+ if (data.found == 0) { - out->info(out, "No nodes configured"); - } - -@@ -440,9 +497,8 @@ pcmk__list_nodes(pcmk__output_t *out, gboolean BASH_EXPORT) - return pcmk_legacy2rc(rc); - } - --#ifdef BUILD_PUBLIC_LIBPACEMAKER - int --pcmk_list_nodes(xmlNodePtr *xml) -+pcmk_list_nodes(xmlNodePtr *xml, char *node_types) - { - pcmk__output_t *out = NULL; - int rc = pcmk_rc_ok; -@@ -454,11 +510,10 @@ pcmk_list_nodes(xmlNodePtr *xml) - - pcmk__register_lib_messages(out); - -- rc = pcmk__list_nodes(out, FALSE); -+ rc = pcmk__list_nodes(out, node_types, FALSE); - pcmk__out_epilogue(out, xml, rc); - return rc; - } --#endif - - // remove when parameters removed from tools/crmadmin.c - int -diff --git a/tools/crmadmin.c b/tools/crmadmin.c -index 2d9d663..3f31c69 100644 ---- a/tools/crmadmin.c -+++ b/tools/crmadmin.c -@@ -38,10 +38,12 @@ struct { - gint timeout; - char *dest_node; - char *ipc_name; -+ char *node_types; - gboolean BASH_EXPORT; - } options = { - .dest_node = NULL, - .ipc_name = NULL, -+ .node_types = NULL, - .BASH_EXPORT = FALSE - }; - -@@ -93,6 +95,11 @@ static GOptionEntry additional_options[] = { - "\n operation failed", - NULL - }, -+ { "node-types", 'T', 0, G_OPTION_ARG_STRING, &options.node_types, -+ "Node types to list (available options: all, member, pacemaker_remote," -+ "\n guest, remote) (valid with -N/--nodes)", -+ NULL -+ }, - { "bash-export", 'B', 0, G_OPTION_ARG_NONE, &options.BASH_EXPORT, - "Display nodes as shell commands of the form 'export uname=uuid'" - "\n (valid with -N/--nodes)", -@@ -264,7 +271,7 @@ main(int argc, char **argv) - rc = pcmk__pacemakerd_status(out, options.ipc_name, options.timeout); - break; - case cmd_list_nodes: -- rc = pcmk__list_nodes(out, options.BASH_EXPORT); -+ rc = pcmk__list_nodes(out, options.node_types, options.BASH_EXPORT); - break; - case cmd_whois_dc: - rc = pcmk__designated_controller(out, options.timeout); --- -1.8.3.1 - - -From f25d4e7aa9de55bb296087e6cbaf1654d01d6b0d Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Thu, 10 Dec 2020 13:58:31 +0100 -Subject: [PATCH 3/7] Refactor: use PCMK__XP_ prefix instead of XPATH_ for XML - constants - ---- - include/crm/common/xml_internal.h | 6 +++--- - lib/cluster/membership.c | 12 ++++++------ - lib/pacemaker/pcmk_cluster_queries.c | 6 +++--- - 3 files changed, 12 insertions(+), 12 deletions(-) - -diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h -index 969a57d..4501bee 100644 ---- a/include/crm/common/xml_internal.h -+++ b/include/crm/common/xml_internal.h -@@ -126,18 +126,18 @@ do { - /* XML search strings for guest, remote and pacemaker_remote nodes */ - - /* search string to find CIB resources entries for guest nodes */ --#define XPATH_GUEST_NODE_CONFIG \ -+#define PCMK__XP_GUEST_NODE_CONFIG \ - "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \ - "//" XML_TAG_META_SETS "//" XML_CIB_TAG_NVPAIR \ - "[@name='" XML_RSC_ATTR_REMOTE_NODE "']" - - /* search string to find CIB resources entries for remote nodes */ --#define XPATH_REMOTE_NODE_CONFIG \ -+#define PCMK__XP_REMOTE_NODE_CONFIG \ - "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \ - "[@type='remote'][@provider='pacemaker']" - - /* search string to find CIB node status entries for pacemaker_remote nodes */ --#define XPATH_REMOTE_NODE_STATUS \ -+#define PCMK__XP_REMOTE_NODE_STATUS \ - "//" XML_TAG_CIB "//" XML_CIB_TAG_STATUS "//" XML_CIB_TAG_STATE \ - "[@" XML_NODE_IS_REMOTE "='true']" - -diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c -index 5841f16..d70091e 100644 ---- a/lib/cluster/membership.c -+++ b/lib/cluster/membership.c -@@ -257,7 +257,7 @@ crm_remote_peer_cache_refresh(xmlNode *cib) - /* Look for guest nodes and remote nodes in the status section */ - data.field = "id"; - data.has_state = TRUE; -- crm_foreach_xpath_result(cib, XPATH_REMOTE_NODE_STATUS, -+ crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_STATUS, - remote_cache_refresh_helper, &data); - - /* Look for guest nodes and remote nodes in the configuration section, -@@ -268,11 +268,11 @@ crm_remote_peer_cache_refresh(xmlNode *cib) - */ - data.field = "value"; - data.has_state = FALSE; -- crm_foreach_xpath_result(cib, XPATH_GUEST_NODE_CONFIG, -+ crm_foreach_xpath_result(cib, PCMK__XP_GUEST_NODE_CONFIG, - remote_cache_refresh_helper, &data); - data.field = "id"; - data.has_state = FALSE; -- crm_foreach_xpath_result(cib, XPATH_REMOTE_NODE_CONFIG, -+ crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_CONFIG, - remote_cache_refresh_helper, &data); - - /* Remove all old cache entries that weren't seen in the CIB */ -@@ -1232,7 +1232,7 @@ known_node_cache_refresh_helper(xmlNode *xml_node, void *user_data) - - } - --#define XPATH_MEMBER_NODE_CONFIG \ -+#define PCMK__XP_MEMBER_NODE_CONFIG \ - "//" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_NODES \ - "/" XML_CIB_TAG_NODE "[not(@type) or @type='member']" - -@@ -1243,7 +1243,7 @@ crm_known_peer_cache_refresh(xmlNode *cib) - - g_hash_table_foreach(crm_known_peer_cache, mark_dirty, NULL); - -- crm_foreach_xpath_result(cib, XPATH_MEMBER_NODE_CONFIG, -+ crm_foreach_xpath_result(cib, PCMK__XP_MEMBER_NODE_CONFIG, - known_peer_cache_refresh_helper, NULL); - - /* Remove all old cache entries that weren't seen in the CIB */ -diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c -index fc5cfc4..e512f32 100644 ---- a/lib/pacemaker/pcmk_cluster_queries.c -+++ b/lib/pacemaker/pcmk_cluster_queries.c -@@ -467,21 +467,21 @@ pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT) - if (pcmk__str_empty(node_types) || strstr(node_types, "pacemaker_remote")) { - data.field = "id"; - data.type = "pacemaker_remote"; -- crm_foreach_xpath_result(xml_node, XPATH_REMOTE_NODE_STATUS, -+ crm_foreach_xpath_result(xml_node, PCMK__XP_REMOTE_NODE_STATUS, - remote_node_print_helper, &data); - } - - if (pcmk__str_empty(node_types) || strstr(node_types, "guest")) { - data.field = "value"; - data.type = "guest"; -- crm_foreach_xpath_result(xml_node, XPATH_GUEST_NODE_CONFIG, -+ crm_foreach_xpath_result(xml_node, PCMK__XP_GUEST_NODE_CONFIG, - remote_node_print_helper, &data); - } - - if (pcmk__str_empty(node_types) || !pcmk__strcmp(node_types, ",|^remote", pcmk__str_regex)) { - data.field = "id"; - data.type = "remote"; -- crm_foreach_xpath_result(xml_node, XPATH_REMOTE_NODE_CONFIG, -+ crm_foreach_xpath_result(xml_node, PCMK__XP_REMOTE_NODE_CONFIG, - remote_node_print_helper, &data); - } - --- -1.8.3.1 - - -From 225d5fabedb1319245a2a3e661df2252e60aca1a Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Fri, 22 Jan 2021 16:34:39 +0100 -Subject: [PATCH 4/7] Fix: crmadmin: use cluster instead of member term and - remove pacemaker_remote node type - ---- - include/pacemaker.h | 1 + - lib/cluster/membership.c | 2 +- - lib/pacemaker/pcmk_cluster_queries.c | 10 +--------- - lib/pacemaker/pcmk_output.c | 4 ++-- - tools/crmadmin.c | 4 ++-- - 5 files changed, 7 insertions(+), 14 deletions(-) - -diff --git a/include/pacemaker.h b/include/pacemaker.h -index 42d096f..a6a9d13 100644 ---- a/include/pacemaker.h -+++ b/include/pacemaker.h -@@ -77,6 +77,7 @@ int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc, - * \brief Get nodes list - * - * \param[in,out] xml The destination for the result, as an XML tree. -+ * \param[in] node_types Node type(s) to return (default: all) - * - * \return Standard Pacemaker return code - */ -diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c -index e512f32..9f19915 100644 ---- a/lib/pacemaker/pcmk_cluster_queries.c -+++ b/lib/pacemaker/pcmk_cluster_queries.c -@@ -448,11 +448,10 @@ pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT) - node_types = NULL; - } - -- if (pcmk__str_empty(node_types) || strstr(node_types, "member")) { -+ if (pcmk__str_empty(node_types) || strstr(node_types, "cluster")) { - for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; - node = crm_next_same_xml(node)) { - const char *node_type = crm_element_value(node, XML_ATTR_TYPE); -- //if (node_type == NULL || !strcmp(node_type, "member")) { - if (node_type == NULL) { - out->message(out, "crmadmin-node", node_type, - crm_str(crm_element_value(node, XML_ATTR_UNAME)), -@@ -464,13 +463,6 @@ pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT) - } - } - -- if (pcmk__str_empty(node_types) || strstr(node_types, "pacemaker_remote")) { -- data.field = "id"; -- data.type = "pacemaker_remote"; -- crm_foreach_xpath_result(xml_node, PCMK__XP_REMOTE_NODE_STATUS, -- remote_node_print_helper, &data); -- } -- - if (pcmk__str_empty(node_types) || strstr(node_types, "guest")) { - data.field = "value"; - data.type = "guest"; -diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c -index 8f5e301..655b723 100644 ---- a/lib/pacemaker/pcmk_output.c -+++ b/lib/pacemaker/pcmk_output.c -@@ -483,7 +483,7 @@ crmadmin_node_text(pcmk__output_t *out, va_list args) - } else if (BASH_EXPORT) { - out->info(out, "export %s=%s", crm_str(name), crm_str(id)); - } else { -- out->info(out, "%s node: %s (%s)", type ? type : "member", -+ out->info(out, "%s node: %s (%s)", type ? type : "cluster", - crm_str(name), crm_str(id)); - } - -@@ -499,7 +499,7 @@ crmadmin_node_xml(pcmk__output_t *out, va_list args) - const char *id = va_arg(args, const char *); - - pcmk__output_create_xml_node(out, "node", -- "type", type ? type : "member", -+ "type", type ? type : "cluster", - "name", crm_str(name), - "id", crm_str(id), - NULL); -diff --git a/tools/crmadmin.c b/tools/crmadmin.c -index 3f31c69..d699786 100644 ---- a/tools/crmadmin.c -+++ b/tools/crmadmin.c -@@ -96,8 +96,8 @@ static GOptionEntry additional_options[] = { - NULL - }, - { "node-types", 'T', 0, G_OPTION_ARG_STRING, &options.node_types, -- "Node types to list (available options: all, member, pacemaker_remote," -- "\n guest, remote) (valid with -N/--nodes)", -+ "Node types to list (available options: all, cluster, guest, remote)" -+ "\n (valid with -N/--nodes)", - NULL - }, - { "bash-export", 'B', 0, G_OPTION_ARG_NONE, &options.BASH_EXPORT, --- -1.8.3.1 - - -From ceacef32c70d1760a4dd89c00134373d42457ea2 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 25 Jan 2021 16:52:00 +0100 -Subject: [PATCH 5/7] Refactor: crmadmin/pcmk__list_nodes(): use - crm_foreach_xpath_result() for all types of nodes - ---- - include/crm/common/xml_internal.h | 5 +++++ - lib/cluster/membership.c | 4 ---- - lib/pacemaker/pcmk_cluster_queries.c | 25 ++++++++----------------- - 3 files changed, 13 insertions(+), 21 deletions(-) - -diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h -index 4501bee..2193b50 100644 ---- a/include/crm/common/xml_internal.h -+++ b/include/crm/common/xml_internal.h -@@ -125,6 +125,11 @@ do { - - /* XML search strings for guest, remote and pacemaker_remote nodes */ - -+/* search string to find CIB resources entries for cluster nodes */ -+#define PCMK__XP_MEMBER_NODE_CONFIG \ -+ "//" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_NODES \ -+ "/" XML_CIB_TAG_NODE "[not(@type) or @type='member']" -+ - /* search string to find CIB resources entries for guest nodes */ - #define PCMK__XP_GUEST_NODE_CONFIG \ - "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \ -diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c -index 3417836..5d042e2 100644 ---- a/lib/cluster/membership.c -+++ b/lib/cluster/membership.c -@@ -1232,10 +1232,6 @@ known_node_cache_refresh_helper(xmlNode *xml_node, void *user_data) - - } - --#define PCMK__XP_MEMBER_NODE_CONFIG \ -- "//" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_NODES \ -- "/" XML_CIB_TAG_NODE "[not(@type) or @type='member']" -- - static void - crm_known_peer_cache_refresh(xmlNode *cib) - { -diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c -index 9f19915..6e6acda 100644 ---- a/lib/pacemaker/pcmk_cluster_queries.c -+++ b/lib/pacemaker/pcmk_cluster_queries.c -@@ -405,12 +405,13 @@ remote_node_print_helper(xmlNode *result, void *user_data) - { - struct node_data *data = user_data; - pcmk__output_t *out = data->out; -- const char *remote = crm_element_value(result, data->field); -+ const char *name = crm_element_value(result, XML_ATTR_UNAME); -+ const char *id = crm_element_value(result, data->field); - - // node name and node id are the same for remote/guest nodes - out->message(out, "crmadmin-node", data->type, -- remote, -- remote, -+ name ? name : id, -+ id, - data->BASH_EXPORT); - data->found++; - } -@@ -434,8 +435,6 @@ pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT) - rc = the_cib->cmds->query(the_cib, NULL, &xml_node, - cib_scope_local | cib_sync_call); - if (rc == pcmk_ok) { -- xmlNode *node = NULL; -- xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); - struct node_data data = { - .out = out, - .found = 0, -@@ -449,18 +448,10 @@ pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT) - } - - if (pcmk__str_empty(node_types) || strstr(node_types, "cluster")) { -- for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; -- node = crm_next_same_xml(node)) { -- const char *node_type = crm_element_value(node, XML_ATTR_TYPE); -- if (node_type == NULL) { -- out->message(out, "crmadmin-node", node_type, -- crm_str(crm_element_value(node, XML_ATTR_UNAME)), -- crm_str(crm_element_value(node, XML_ATTR_ID)), -- BASH_EXPORT); -- data.found++; -- } -- -- } -+ data.field = "id"; -+ data.type = "cluster"; -+ crm_foreach_xpath_result(xml_node, PCMK__XP_MEMBER_NODE_CONFIG, -+ remote_node_print_helper, &data); - } - - if (pcmk__str_empty(node_types) || strstr(node_types, "guest")) { --- -1.8.3.1 - - -From a56ce0cf463798ac8d7ff945dda3be019cb9297d Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 26 Jan 2021 11:03:10 +0100 -Subject: [PATCH 6/7] Refactor: crmadmin: move new -T functionality to -N - parameter - ---- - include/crm/crm.h | 2 +- - tools/crmadmin.c | 30 ++++++++++++------------------ - 2 files changed, 13 insertions(+), 19 deletions(-) - -diff --git a/include/crm/crm.h b/include/crm/crm.h -index 3f22c4b..09643c2 100644 ---- a/include/crm/crm.h -+++ b/include/crm/crm.h -@@ -51,7 +51,7 @@ extern "C" { - * >=3.0.13: Fail counts include operation name and interval - * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED - */ --# define CRM_FEATURE_SET "3.7.0" -+# define CRM_FEATURE_SET "3.7.1" - - # define EOS '\0' - # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) -diff --git a/tools/crmadmin.c b/tools/crmadmin.c -index d699786..9c280aa 100644 ---- a/tools/crmadmin.c -+++ b/tools/crmadmin.c -@@ -36,14 +36,12 @@ static enum { - struct { - gboolean health; - gint timeout; -- char *dest_node; -+ char *optarg; - char *ipc_name; -- char *node_types; - gboolean BASH_EXPORT; - } options = { -- .dest_node = NULL, -+ .optarg = NULL, - .ipc_name = NULL, -- .node_types = NULL, - .BASH_EXPORT = FALSE - }; - -@@ -69,9 +67,10 @@ static GOptionEntry command_options[] = { - "\n node to examine the logs.", - NULL - }, -- { "nodes", 'N', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, -- "Display the uname of all member nodes", -- NULL -+ { "nodes", 'N', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, command_cb, -+ "Display the uname of all member nodes [optionally filtered by type (comma-separated)]" -+ "\n Types: all (default), cluster, guest, remote", -+ "TYPE" - }, - { "election", 'E', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, - "(Advanced) Start an election for the cluster co-ordinator", -@@ -95,11 +94,6 @@ static GOptionEntry additional_options[] = { - "\n operation failed", - NULL - }, -- { "node-types", 'T', 0, G_OPTION_ARG_STRING, &options.node_types, -- "Node types to list (available options: all, cluster, guest, remote)" -- "\n (valid with -N/--nodes)", -- NULL -- }, - { "bash-export", 'B', 0, G_OPTION_ARG_NONE, &options.BASH_EXPORT, - "Display nodes as shell commands of the form 'export uname=uuid'" - "\n (valid with -N/--nodes)", -@@ -142,10 +136,10 @@ command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError - } - - if (optarg) { -- if (options.dest_node != NULL) { -- free(options.dest_node); -+ if (options.optarg != NULL) { -+ free(options.optarg); - } -- options.dest_node = strdup(optarg); -+ options.optarg = strdup(optarg); - } - - return TRUE; -@@ -265,19 +259,19 @@ main(int argc, char **argv) - - switch (command) { - case cmd_health: -- rc = pcmk__controller_status(out, options.dest_node, options.timeout); -+ rc = pcmk__controller_status(out, options.optarg, options.timeout); - break; - case cmd_pacemakerd_health: - rc = pcmk__pacemakerd_status(out, options.ipc_name, options.timeout); - break; - case cmd_list_nodes: -- rc = pcmk__list_nodes(out, options.node_types, options.BASH_EXPORT); -+ rc = pcmk__list_nodes(out, options.optarg, options.BASH_EXPORT); - break; - case cmd_whois_dc: - rc = pcmk__designated_controller(out, options.timeout); - break; - case cmd_shutdown: -- rc = pcmk__shutdown_controller(out, options.dest_node); -+ rc = pcmk__shutdown_controller(out, options.optarg); - break; - case cmd_elect_dc: - rc = pcmk__start_election(out); --- -1.8.3.1 - - -From dae5688edaf1b3008b70296421004ebdfbdf4b7b Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Tue, 26 Jan 2021 15:02:39 +0100 -Subject: [PATCH 7/7] Test: cts-cli: add crmadmin -N/--nodes tests - ---- - cts/Makefile.am | 1 + - cts/cli/crmadmin-cluster-remote-guest-nodes.xml | 483 ++++++++++++++++++++++++ - cts/cli/regression.tools.exp | 24 ++ - cts/cts-cli.in | 42 ++- - 4 files changed, 543 insertions(+), 7 deletions(-) - create mode 100644 cts/cli/crmadmin-cluster-remote-guest-nodes.xml - -diff --git a/cts/Makefile.am b/cts/Makefile.am -index de02aed..2a73774 100644 ---- a/cts/Makefile.am -+++ b/cts/Makefile.am -@@ -61,6 +61,7 @@ cts_SCRIPTS = CTSlab.py \ - - clidir = $(testdir)/cli - dist_cli_DATA = cli/constraints.xml \ -+ cli/crmadmin-cluster-remote-guest-nodes.xml \ - cli/crm_diff_new.xml \ - cli/crm_diff_old.xml \ - cli/crm_mon.xml \ -diff --git a/cts/cli/crmadmin-cluster-remote-guest-nodes.xml b/cts/cli/crmadmin-cluster-remote-guest-nodes.xml -new file mode 100644 -index 0000000..8db656f ---- /dev/null -+++ b/cts/cli/crmadmin-cluster-remote-guest-nodes.xml -@@ -0,0 +1,483 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 510cc0a..7324053 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -4053,3 +4053,27 @@ Resources colocated with clone: - - =#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#= - * Passed: crm_resource - Show resource digests with overrides -+=#=#=#= Begin test: List all nodes =#=#=#= -+11 -+=#=#=#= End test: List all nodes - OK (0) =#=#=#= -+* Passed: crmadmin - List all nodes -+=#=#=#= Begin test: List cluster nodes =#=#=#= -+6 -+=#=#=#= End test: List cluster nodes - OK (0) =#=#=#= -+* Passed: crmadmin - List cluster nodes -+=#=#=#= Begin test: List guest nodes =#=#=#= -+2 -+=#=#=#= End test: List guest nodes - OK (0) =#=#=#= -+* Passed: crmadmin - List guest nodes -+=#=#=#= Begin test: List remote nodes =#=#=#= -+3 -+=#=#=#= End test: List remote nodes - OK (0) =#=#=#= -+* Passed: crmadmin - List remote nodes -+=#=#=#= Begin test: List cluster,remote nodes =#=#=#= -+9 -+=#=#=#= End test: List cluster,remote nodes - OK (0) =#=#=#= -+* Passed: crmadmin - List cluster,remote nodes -+=#=#=#= Begin test: List guest,remote nodes =#=#=#= -+5 -+=#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#= -+* Passed: crmadmin - List guest,remote nodes -diff --git a/cts/cts-cli.in b/cts/cts-cli.in -index 96f5386..8e2dbe5 100755 ---- a/cts/cts-cli.in -+++ b/cts/cts-cli.in -@@ -501,15 +501,15 @@ function test_tools() { - desc="Default standby value" - cmd="crm_standby -N node1 -G" - test_assert $CRM_EX_OK -- -+ - desc="Set standby status" - cmd="crm_standby -N node1 -v true" - test_assert $CRM_EX_OK -- -+ - desc="Query standby value" - cmd="crm_standby -N node1 -G" - test_assert $CRM_EX_OK -- -+ - desc="Delete standby value" - cmd="crm_standby -N node1 -D" - test_assert $CRM_EX_OK -@@ -657,7 +657,7 @@ function test_tools() { - desc="Drop the status section" - cmd="cibadmin -R -o status --xml-text ''" - test_assert $CRM_EX_OK 0 -- -+ - desc="Create a clone" - cmd="cibadmin -C -o resources --xml-text ''" - test_assert $CRM_EX_OK 0 -@@ -697,7 +697,7 @@ function test_tools() { - desc="Update existing resource meta attribute" - cmd="crm_resource -r test-clone --meta -p is-managed -v true" - test_assert $CRM_EX_OK -- -+ - desc="Create a resource meta attribute in the parent" - cmd="crm_resource -r test-clone --meta -p is-managed -v true --force" - test_assert $CRM_EX_OK -@@ -803,6 +803,34 @@ function test_tools() { - test_assert $CRM_EX_OK 0 - - unset CIB_file -+ -+ export CIB_file="$test_home/cli/crmadmin-cluster-remote-guest-nodes.xml" -+ -+ desc="List all nodes" -+ cmd="crmadmin -N | wc -l | grep 11" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="List cluster nodes" -+ cmd="crmadmin -N cluster | wc -l | grep 6" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="List guest nodes" -+ cmd="crmadmin -N guest | wc -l | grep 2" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="List remote nodes" -+ cmd="crmadmin -N remote | wc -l | grep 3" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="List cluster,remote nodes" -+ cmd="crmadmin -N cluster,remote | wc -l | grep 9" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="List guest,remote nodes" -+ cmd="crmadmin -N guest,remote | wc -l | grep 5" -+ test_assert $CRM_EX_OK 0 -+ -+ unset CIB_file - } - - INVALID_PERIODS=( -@@ -822,7 +850,7 @@ INVALID_PERIODS=( - "P1Z/2019-02-20 00:00:00Z" # Invalid duration unit - "P1YM/2019-02-20 00:00:00Z" # No number for duration unit - ) -- -+ - function test_dates() { - # Ensure invalid period specifications are rejected - for spec in '' "${INVALID_PERIODS[@]}"; do -@@ -1665,7 +1693,7 @@ for t in $tests; do - done - - rm -rf "${shadow_dir}" -- -+ - failed=0 - - if [ $verbose -eq 1 ]; then --- -1.8.3.1 - diff --git a/SOURCES/031-cibsecret.patch b/SOURCES/031-cibsecret.patch deleted file mode 100644 index 4985bb4..0000000 --- a/SOURCES/031-cibsecret.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 240b9ec01e6a6e6554f0ae13d759c01339835a40 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Wed, 27 Jan 2021 10:10:03 +0100 -Subject: [PATCH] Feature: cibsecret: use crmadmin -N (which also lists guest - and remote nodes) to get nodes to sync to - ---- - tools/cibsecret.in | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/tools/cibsecret.in b/tools/cibsecret.in -index 8923a70..6326bf0 100644 ---- a/tools/cibsecret.in -+++ b/tools/cibsecret.in -@@ -182,9 +182,9 @@ get_live_peers() { - [ $? -eq 0 ] || fatal $CRM_EX_UNAVAILABLE "couldn't get local node name" - - # Get a list of all other cluster nodes -- GLP_ALL_PEERS="$(crm_node -l)" -+ GLP_ALL_PEERS="$(crmadmin -N -q)" - [ $? -eq 0 ] || fatal $CRM_EX_UNAVAILABLE "couldn't determine cluster nodes" -- GLP_ALL_PEERS="$(echo "$GLP_ALL_PEERS" | awk '{print $2}' | grep -v "^${GLP_LOCAL_NODE}$")" -+ GLP_ALL_PEERS="$(echo "$GLP_ALL_PEERS" | grep -v "^${GLP_LOCAL_NODE}$")" - - # Make a list of those that respond to pings - if [ "$(id -u)" = "0" ] && which fping >/dev/null 2>&1; then --- -1.8.3.1 - diff --git a/SOURCES/032-rhbz1898457.patch b/SOURCES/032-rhbz1898457.patch deleted file mode 100644 index d36e9b5..0000000 --- a/SOURCES/032-rhbz1898457.patch +++ /dev/null @@ -1,6801 +0,0 @@ -From 04a259fbd6fe24f909b82a2b0790c39841618c3c Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 20 Jan 2021 17:08:43 -0600 -Subject: [PATCH 01/16] Refactor: scheduler: use convenience functions when - unpacking node history - -... to simplify a bit and improve readability ---- - lib/pengine/unpack.c | 14 +++++--------- - 1 file changed, 5 insertions(+), 9 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 281bc88..f0f3425 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -1019,19 +1019,15 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - bool changed = false; - xmlNode *lrm_rsc = NULL; - -- for (xmlNode *state = pcmk__xe_first_child(status); state != NULL; -- state = pcmk__xe_next(state)) { -+ // Loop through all node_state entries in CIB status -+ for (xmlNode *state = first_named_child(status, XML_CIB_TAG_STATE); -+ state != NULL; state = crm_next_same_xml(state)) { - -- const char *id = NULL; -+ const char *id = ID(state); - const char *uname = NULL; - pe_node_t *this_node = NULL; - bool process = FALSE; - -- if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { -- continue; -- } -- -- id = crm_element_value(state, XML_ATTR_ID); - uname = crm_element_value(state, XML_ATTR_UNAME); - this_node = pe_find_node_any(data_set->nodes, id, uname); - --- -1.8.3.1 - - -From 4e1a0fa5ffbdad7fe0ed2e40550fc2773073a2dd Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 20 Jan 2021 17:12:19 -0600 -Subject: [PATCH 02/16] Refactor: scheduler: return standard code when - unpacking node history - -... for consistency and readability. Also, document the function. ---- - lib/pengine/unpack.c | 31 ++++++++++++++++++++++++------- - 1 file changed, 24 insertions(+), 7 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index f0f3425..2471bf0 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1013,10 +1013,27 @@ unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_ - } - } - --static bool -+/*! -+ * \internal -+ * \brief Unpack nodes' resource history as much as possible -+ * -+ * Unpack as many nodes' resource history as possible in one pass through the -+ * status. We need to process Pacemaker Remote nodes' connections/containers -+ * before unpacking their history; the connection/container history will be -+ * in another node's history, so it might take multiple passes to unpack -+ * everything. -+ * -+ * \param[in] status CIB XML status section -+ * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen -+ * \param[in] data_set Cluster working set -+ * -+ * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done, -+ * or EAGAIN if more unpacking remains to be done) -+ */ -+static int - unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - { -- bool changed = false; -+ int rc = pcmk_rc_ok; - xmlNode *lrm_rsc = NULL; - - // Loop through all node_state entries in CIB status -@@ -1091,15 +1108,16 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - fence?"un":"", - (pe__is_guest_or_remote_node(this_node)? " remote" : ""), - this_node->details->uname); -- changed = TRUE; - this_node->details->unpacked = TRUE; - - lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); - lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); - unpack_lrm_resources(this_node, lrm_rsc, data_set); -+ -+ rc = EAGAIN; // Other node histories might depend on this one - } - } -- return changed; -+ return rc; - } - - /* remove nodes that are down, stopping */ -@@ -1195,9 +1213,8 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set) - } - } - -- -- while(unpack_node_loop(status, FALSE, data_set)) { -- crm_trace("Start another loop"); -+ while (unpack_node_loop(status, FALSE, data_set) == EAGAIN) { -+ crm_trace("Another pass through node resource histories is needed"); - } - - // Now catch any nodes we didn't see --- -1.8.3.1 - - -From 14a94866978e4a36bb329deb6b7a004c97eab912 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 20 Jan 2021 17:15:20 -0600 -Subject: [PATCH 03/16] Low: scheduler: warn if node state has no ID or uname - -This should be possible only if someone manually mis-edits the CIB, but that -case does merit a warning, and we should bail early if it happens (previously, -the right thing would eventually be done anyway, but log messages using a NULL -could theoretically cause a crash). ---- - lib/pengine/unpack.c | 26 +++++++++++++++++++------- - 1 file changed, 19 insertions(+), 7 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 2471bf0..0b4e0cd 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1041,11 +1041,15 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - state != NULL; state = crm_next_same_xml(state)) { - - const char *id = ID(state); -- const char *uname = NULL; -+ const char *uname = crm_element_value(state, XML_ATTR_UNAME); - pe_node_t *this_node = NULL; - bool process = FALSE; - -- uname = crm_element_value(state, XML_ATTR_UNAME); -+ if ((id == NULL) || (uname == NULL)) { -+ // Warning already logged in first pass through status section -+ continue; -+ } -+ - this_node = pe_find_node_any(data_set->nodes, id, uname); - - if (this_node == NULL) { -@@ -1150,19 +1154,27 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set) - const char *resource_discovery_enabled = NULL; - - id = crm_element_value(state, XML_ATTR_ID); -- uname = crm_element_value(state, XML_ATTR_UNAME); -- this_node = pe_find_node_any(data_set->nodes, id, uname); -+ if (id == NULL) { -+ crm_warn("Ignoring malformed " XML_CIB_TAG_STATE -+ " entry without " XML_ATTR_ID); -+ continue; -+ } - -+ uname = crm_element_value(state, XML_ATTR_UNAME); - if (uname == NULL) { -- /* error */ -+ crm_warn("Ignoring malformed " XML_CIB_TAG_STATE -+ " entry without " XML_ATTR_UNAME); - continue; -+ } - -- } else if (this_node == NULL) { -+ this_node = pe_find_node_any(data_set->nodes, id, uname); -+ if (this_node == NULL) { - pcmk__config_warn("Ignoring recorded node status for '%s' " - "because no longer in configuration", uname); - continue; -+ } - -- } else if (pe__is_guest_or_remote_node(this_node)) { -+ if (pe__is_guest_or_remote_node(this_node)) { - /* online state for remote nodes is determined by the - * rsc state after all the unpacking is done. we do however - * need to mark whether or not the node has been fenced as this plays --- -1.8.3.1 - - -From 2b77c873ad3f3f04e164695eafe945cec7d16f75 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 20 Jan 2021 17:22:48 -0600 -Subject: [PATCH 04/16] Log: scheduler: improve messages when unpacking node - histories - ---- - lib/pengine/unpack.c | 26 ++++++++++++++------------ - 1 file changed, 14 insertions(+), 12 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 0b4e0cd..641b601 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1047,21 +1047,27 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - - if ((id == NULL) || (uname == NULL)) { - // Warning already logged in first pass through status section -+ crm_trace("Not unpacking resource history from malformed " -+ XML_CIB_TAG_STATE " without id and/or uname"); - continue; - } - - this_node = pe_find_node_any(data_set->nodes, id, uname); -- - if (this_node == NULL) { -- crm_info("Node %s is unknown", id); -+ // Warning already logged in first pass through status section -+ crm_trace("Not unpacking resource history for node %s because " -+ "no longer in configuration", id); - continue; -+ } - -- } else if (this_node->details->unpacked) { -- crm_trace("Node %s was already processed", id); -+ if (this_node->details->unpacked) { -+ crm_trace("Not unpacking resource history for node %s because " -+ "already unpacked", id); - continue; -+ } - -- } else if (!pe__is_guest_or_remote_node(this_node) -- && pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { -+ if (!pe__is_guest_or_remote_node(this_node) -+ && pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { - // A redundant test, but preserves the order for regression tests - process = TRUE; - -@@ -1082,13 +1088,11 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - * known to be up before we process resources running in it. - */ - check = TRUE; -- crm_trace("Checking node %s/%s/%s status %d/%d/%d", id, rsc->id, rsc->container->id, fence, rsc->role, RSC_ROLE_STARTED); - - } else if (!pe__is_guest_node(this_node) - && ((rsc->role == RSC_ROLE_STARTED) - || pcmk_is_set(data_set->flags, pe_flag_shutdown_lock))) { - check = TRUE; -- crm_trace("Checking node %s/%s status %d/%d/%d", id, rsc->id, fence, rsc->role, RSC_ROLE_STARTED); - } - - if (check) { -@@ -1108,10 +1112,8 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - } - - if(process) { -- crm_trace("Processing lrm resource entries on %shealthy%s node: %s", -- fence?"un":"", -- (pe__is_guest_or_remote_node(this_node)? " remote" : ""), -- this_node->details->uname); -+ crm_trace("Unpacking resource history for %snode %s", -+ (fence? "unseen " : ""), id); - this_node->details->unpacked = TRUE; - - lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); --- -1.8.3.1 - - -From 9e1747453bf3d0315f189814fc025eaeab35b937 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 20 Jan 2021 17:26:05 -0600 -Subject: [PATCH 05/16] Refactor: scheduler: avoid a level of indentation when - unpacking node histories - ---- - lib/pengine/unpack.c | 20 +++++++++++--------- - 1 file changed, 11 insertions(+), 9 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 641b601..7e68d64 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1111,17 +1111,19 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - process = TRUE; - } - -- if(process) { -- crm_trace("Unpacking resource history for %snode %s", -- (fence? "unseen " : ""), id); -- this_node->details->unpacked = TRUE; -+ if (!process) { -+ continue; -+ } - -- lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); -- lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); -- unpack_lrm_resources(this_node, lrm_rsc, data_set); -+ crm_trace("Unpacking resource history for %snode %s", -+ (fence? "unseen " : ""), id); - -- rc = EAGAIN; // Other node histories might depend on this one -- } -+ this_node->details->unpacked = TRUE; -+ lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); -+ lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); -+ unpack_lrm_resources(this_node, lrm_rsc, data_set); -+ -+ rc = EAGAIN; // Other node histories might depend on this one - } - return rc; - } --- -1.8.3.1 - - -From 7d5dadb32dbd52d287bbc94c2d55d75e018a3146 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 20 Jan 2021 17:26:41 -0600 -Subject: [PATCH 06/16] Refactor: scheduler: simplify unpacking node histories - -By separating the processing of remote and guest nodes, we can eliminate some -variables, improve trace messages, and make the code a little easier to follow. ---- - lib/pengine/unpack.c | 82 +++++++++++++++++++++++++++------------------------- - 1 file changed, 42 insertions(+), 40 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 7e68d64..9b968a9 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1031,7 +1031,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_ - * or EAGAIN if more unpacking remains to be done) - */ - static int --unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) -+unpack_node_loop(xmlNode *status, bool fence, pe_working_set_t *data_set) - { - int rc = pcmk_rc_ok; - xmlNode *lrm_rsc = NULL; -@@ -1043,7 +1043,6 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - const char *id = ID(state); - const char *uname = crm_element_value(state, XML_ATTR_UNAME); - pe_node_t *this_node = NULL; -- bool process = FALSE; - - if ((id == NULL) || (uname == NULL)) { - // Warning already logged in first pass through status section -@@ -1066,53 +1065,56 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) - continue; - } - -- if (!pe__is_guest_or_remote_node(this_node) -- && pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { -- // A redundant test, but preserves the order for regression tests -- process = TRUE; -+ if (fence) { -+ // We're processing all remaining nodes - -- } else if (pe__is_guest_or_remote_node(this_node)) { -- bool check = FALSE; -+ } else if (pe__is_guest_node(this_node)) { -+ /* We can unpack a guest node's history only after we've unpacked -+ * other resource history to the point that we know that the node's -+ * connection and containing resource are both up. -+ */ - pe_resource_t *rsc = this_node->details->remote_rsc; - -- if(fence) { -- check = TRUE; -- -- } else if(rsc == NULL) { -- /* Not ready yet */ -- -- } else if (pe__is_guest_node(this_node) -- && rsc->role == RSC_ROLE_STARTED -- && rsc->container->role == RSC_ROLE_STARTED) { -- /* Both the connection and its containing resource need to be -- * known to be up before we process resources running in it. -- */ -- check = TRUE; -- -- } else if (!pe__is_guest_node(this_node) -- && ((rsc->role == RSC_ROLE_STARTED) -- || pcmk_is_set(data_set->flags, pe_flag_shutdown_lock))) { -- check = TRUE; -- } -- -- if (check) { -- determine_remote_online_status(data_set, this_node); -- unpack_handle_remote_attrs(this_node, state, data_set); -- process = TRUE; -+ if ((rsc == NULL) || (rsc->role != RSC_ROLE_STARTED) -+ || (rsc->container->role != RSC_ROLE_STARTED)) { -+ crm_trace("Not unpacking resource history for guest node %s " -+ "because container and connection are not known to " -+ "be up", id); -+ continue; - } - -- } else if (this_node->details->online) { -- process = TRUE; -+ } else if (pe__is_remote_node(this_node)) { -+ /* We can unpack a remote node's history only after we've unpacked -+ * other resource history to the point that we know that the node's -+ * connection is up, with the exception of when shutdown locks are -+ * in use. -+ */ -+ pe_resource_t *rsc = this_node->details->remote_rsc; - -- } else if (fence) { -- process = TRUE; -+ if ((rsc == NULL) -+ || (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock) -+ && (rsc->role != RSC_ROLE_STARTED))) { -+ crm_trace("Not unpacking resource history for remote node %s " -+ "because connection is not known to be up", id); -+ continue; -+ } - -- } else if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) { -- process = TRUE; -+ /* If fencing and shutdown locks are disabled and we're not processing -+ * unseen nodes, then we don't want to unpack offline nodes until online -+ * nodes have been unpacked. This allows us to number active clone -+ * instances first. -+ */ -+ } else if (!pcmk_any_flags_set(data_set->flags, pe_flag_stonith_enabled -+ |pe_flag_shutdown_lock) -+ && !this_node->details->online) { -+ crm_trace("Not unpacking resource history for offline " -+ "cluster node %s", id); -+ continue; - } - -- if (!process) { -- continue; -+ if (pe__is_guest_or_remote_node(this_node)) { -+ determine_remote_online_status(data_set, this_node); -+ unpack_handle_remote_attrs(this_node, state, data_set); - } - - crm_trace("Unpacking resource history for %snode %s", --- -1.8.3.1 - - -From 0772d5e22df71557ca217e148bac5c9df00ddb3e Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 21 Jan 2021 12:17:50 -0600 -Subject: [PATCH 07/16] Refactor: scheduler: functionize unpacking node state - better - -unpack_status() was large and a bit difficult to follow, so separate unpacking -the node state and unpacking a cluster node's transient attributes into their -own functions. - -Aside from formatting and log message tweaks, the code remains the same. ---- - lib/pengine/unpack.c | 188 ++++++++++++++++++++++++++++++--------------------- - 1 file changed, 112 insertions(+), 76 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 9b968a9..2e565e3 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1015,6 +1015,117 @@ unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_ - - /*! - * \internal -+ * \brief Unpack a cluster node's transient attributes -+ * -+ * \param[in] state CIB node state XML -+ * \param[in] node Cluster node whose attributes are being unpacked -+ * \param[in] data_set Cluster working set -+ */ -+static void -+unpack_transient_attributes(xmlNode *state, pe_node_t *node, -+ pe_working_set_t *data_set) -+{ -+ const char *discovery = NULL; -+ xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); -+ -+ add_node_attrs(attrs, node, TRUE, data_set); -+ -+ if (crm_is_true(pe_node_attribute_raw(node, "standby"))) { -+ crm_info("Node %s is in standby-mode", node->details->uname); -+ node->details->standby = TRUE; -+ } -+ -+ if (crm_is_true(pe_node_attribute_raw(node, "maintenance"))) { -+ crm_info("Node %s is in maintenance-mode", node->details->uname); -+ node->details->maintenance = TRUE; -+ } -+ -+ discovery = pe_node_attribute_raw(node, XML_NODE_ATTR_RSC_DISCOVERY); -+ if ((discovery != NULL) && !crm_is_true(discovery)) { -+ crm_warn("Ignoring %s attribute for node %s because disabling " -+ "resource discovery is not allowed for cluster nodes", -+ XML_NODE_ATTR_RSC_DISCOVERY, node->details->uname); -+ } -+} -+ -+/*! -+ * \internal -+ * \brief Unpack a node state entry (first pass) -+ * -+ * Unpack one node state entry from status. This unpacks information from the -+ * node_state element itself and node attributes inside it, but not the -+ * resource history inside it. Multiple passes through the status are needed to -+ * fully unpack everything. -+ * -+ * \param[in] state CIB node state XML -+ * \param[in] data_set Cluster working set -+ */ -+static void -+unpack_node_state(xmlNode *state, pe_working_set_t *data_set) -+{ -+ const char *id = NULL; -+ const char *uname = NULL; -+ pe_node_t *this_node = NULL; -+ -+ id = crm_element_value(state, XML_ATTR_ID); -+ if (id == NULL) { -+ crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without " -+ XML_ATTR_ID); -+ return; -+ } -+ -+ uname = crm_element_value(state, XML_ATTR_UNAME); -+ if (uname == NULL) { -+ crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without " -+ XML_ATTR_UNAME); -+ return; -+ } -+ -+ this_node = pe_find_node_any(data_set->nodes, id, uname); -+ if (this_node == NULL) { -+ pcmk__config_warn("Ignoring recorded node state for '%s' because " -+ "it is no longer in the configuration", uname); -+ return; -+ } -+ -+ if (pe__is_guest_or_remote_node(this_node)) { -+ /* We can't determine the online status of Pacemaker Remote nodes until -+ * after all resource history has been unpacked. In this first pass, we -+ * do need to mark whether the node has been fenced, as this plays a -+ * role during unpacking cluster node resource state. -+ */ -+ const char *is_fenced = crm_element_value(state, XML_NODE_IS_FENCED); -+ -+ this_node->details->remote_was_fenced = crm_atoi(is_fenced, "0"); -+ return; -+ } -+ -+ unpack_transient_attributes(state, this_node, data_set); -+ -+ /* Provisionally mark this cluster node as clean. We have at least seen it -+ * in the current cluster's lifetime. -+ */ -+ this_node->details->unclean = FALSE; -+ this_node->details->unseen = FALSE; -+ -+ crm_trace("Determining online status of cluster node %s (id %s)", -+ this_node->details->uname, id); -+ determine_online_status(state, this_node, data_set); -+ -+ if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum) -+ && this_node->details->online -+ && (data_set->no_quorum_policy == no_quorum_suicide)) { -+ /* Everything else should flow from this automatically -+ * (at least until the scheduler becomes able to migrate off -+ * healthy resources) -+ */ -+ pe_fence_node(data_set, this_node, "cluster does not have quorum", -+ FALSE); -+ } -+} -+ -+/*! -+ * \internal - * \brief Unpack nodes' resource history as much as possible - * - * Unpack as many nodes' resource history as possible in one pass through the -@@ -1136,11 +1247,7 @@ unpack_node_loop(xmlNode *status, bool fence, pe_working_set_t *data_set) - gboolean - unpack_status(xmlNode * status, pe_working_set_t * data_set) - { -- const char *id = NULL; -- const char *uname = NULL; -- - xmlNode *state = NULL; -- pe_node_t *this_node = NULL; - - crm_trace("Beginning unpack"); - -@@ -1156,78 +1263,7 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set) - unpack_tickets_state((xmlNode *) state, data_set); - - } else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { -- xmlNode *attrs = NULL; -- const char *resource_discovery_enabled = NULL; -- -- id = crm_element_value(state, XML_ATTR_ID); -- if (id == NULL) { -- crm_warn("Ignoring malformed " XML_CIB_TAG_STATE -- " entry without " XML_ATTR_ID); -- continue; -- } -- -- uname = crm_element_value(state, XML_ATTR_UNAME); -- if (uname == NULL) { -- crm_warn("Ignoring malformed " XML_CIB_TAG_STATE -- " entry without " XML_ATTR_UNAME); -- continue; -- } -- -- this_node = pe_find_node_any(data_set->nodes, id, uname); -- if (this_node == NULL) { -- pcmk__config_warn("Ignoring recorded node status for '%s' " -- "because no longer in configuration", uname); -- continue; -- } -- -- if (pe__is_guest_or_remote_node(this_node)) { -- /* online state for remote nodes is determined by the -- * rsc state after all the unpacking is done. we do however -- * need to mark whether or not the node has been fenced as this plays -- * a role during unpacking cluster node resource state */ -- this_node->details->remote_was_fenced = -- crm_atoi(crm_element_value(state, XML_NODE_IS_FENCED), "0"); -- continue; -- } -- -- crm_trace("Processing node id=%s, uname=%s", id, uname); -- -- /* Mark the node as provisionally clean -- * - at least we have seen it in the current cluster's lifetime -- */ -- this_node->details->unclean = FALSE; -- this_node->details->unseen = FALSE; -- attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); -- add_node_attrs(attrs, this_node, TRUE, data_set); -- -- if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { -- crm_info("Node %s is in standby-mode", this_node->details->uname); -- this_node->details->standby = TRUE; -- } -- -- if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance"))) { -- crm_info("Node %s is in maintenance-mode", this_node->details->uname); -- this_node->details->maintenance = TRUE; -- } -- -- resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); -- if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { -- crm_warn("ignoring %s attribute on node %s, disabling resource discovery is not allowed on cluster nodes", -- XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); -- } -- -- crm_trace("determining node state"); -- determine_online_status(state, this_node, data_set); -- -- if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum) -- && this_node->details->online -- && (data_set->no_quorum_policy == no_quorum_suicide)) { -- /* Everything else should flow from this automatically -- * (at least until the scheduler becomes able to migrate off -- * healthy resources) -- */ -- pe_fence_node(data_set, this_node, "cluster does not have quorum", FALSE); -- } -+ unpack_node_state(state, data_set); - } - } - --- -1.8.3.1 - - -From f36ac7e59b430ed21c2ceca6f58117c3566b35a6 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 21 Jan 2021 12:20:38 -0600 -Subject: [PATCH 08/16] Refactor: scheduler: rename node history unpacking - function - -unpack_node_loop() was a confusing name since it doesn't unpack a node or -even most of its node state, just its resource history. ---- - lib/pengine/unpack.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 2e565e3..28951bd 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1142,7 +1142,7 @@ unpack_node_state(xmlNode *state, pe_working_set_t *data_set) - * or EAGAIN if more unpacking remains to be done) - */ - static int --unpack_node_loop(xmlNode *status, bool fence, pe_working_set_t *data_set) -+unpack_node_history(xmlNode *status, bool fence, pe_working_set_t *data_set) - { - int rc = pcmk_rc_ok; - xmlNode *lrm_rsc = NULL; -@@ -1267,14 +1267,14 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set) - } - } - -- while (unpack_node_loop(status, FALSE, data_set) == EAGAIN) { -+ while (unpack_node_history(status, FALSE, data_set) == EAGAIN) { - crm_trace("Another pass through node resource histories is needed"); - } - - // Now catch any nodes we didn't see -- unpack_node_loop(status, -- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled), -- data_set); -+ unpack_node_history(status, -+ pcmk_is_set(data_set->flags, pe_flag_stonith_enabled), -+ data_set); - - /* Now that we know where resources are, we can schedule stops of containers - * with failed bundle connections -@@ -3448,7 +3448,7 @@ check_operation_expiry(pe_resource_t *rsc, pe_node_t *node, int rc, - * - * We could limit this to remote_node->details->unclean, but at - * this point, that's always true (it won't be reliable until -- * after unpack_node_loop() is done). -+ * after unpack_node_history() is done). - */ - crm_info("Clearing %s failure will wait until any scheduled " - "fencing of %s completes", task, rsc->id); --- -1.8.3.1 - - -From 50cda757beb809de555b6f0efbb19c711b99e72a Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 21 Jan 2021 12:52:22 -0600 -Subject: [PATCH 09/16] Refactor: scheduler: reorganize lrm_resources unpacker - -Drill down from node_state to lrm_resources within the function rather than -before it, so it's more self-contained, and unpack_node_history() is cleaner. -Rename it accordingly. - -Also comment it better, and use convenience functions to simplify a bit. ---- - lib/pengine/unpack.c | 59 +++++++++++++++++++++++++++++++--------------------- - 1 file changed, 35 insertions(+), 24 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 28951bd..637be8d 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -60,8 +60,8 @@ static void add_node_attrs(xmlNode *attrs, pe_node_t *node, bool overwrite, - static void determine_online_status(xmlNode *node_state, pe_node_t *this_node, - pe_working_set_t *data_set); - --static void unpack_lrm_resources(pe_node_t *node, xmlNode *lrm_state, -- pe_working_set_t *data_set); -+static void unpack_node_lrm(pe_node_t *node, xmlNode *xml, -+ pe_working_set_t *data_set); - - - // Bitmask for warnings we only want to print once -@@ -1145,7 +1145,6 @@ static int - unpack_node_history(xmlNode *status, bool fence, pe_working_set_t *data_set) - { - int rc = pcmk_rc_ok; -- xmlNode *lrm_rsc = NULL; - - // Loop through all node_state entries in CIB status - for (xmlNode *state = first_named_child(status, XML_CIB_TAG_STATE); -@@ -1232,9 +1231,7 @@ unpack_node_history(xmlNode *status, bool fence, pe_working_set_t *data_set) - (fence? "unseen " : ""), id); - - this_node->details->unpacked = TRUE; -- lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); -- lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); -- unpack_lrm_resources(this_node, lrm_rsc, data_set); -+ unpack_node_lrm(this_node, state, data_set); - - rc = EAGAIN; // Other node histories might depend on this one - } -@@ -2458,32 +2455,46 @@ handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * dat - } - } - -+/*! -+ * \internal -+ * \brief Unpack one node's lrm status section -+ * -+ * \param[in] node Node whose status is being unpacked -+ * \param[in] xml CIB node state XML -+ * \param[in] data_set Cluster working set -+ */ - static void --unpack_lrm_resources(pe_node_t *node, xmlNode *lrm_rsc_list, -- pe_working_set_t *data_set) -+unpack_node_lrm(pe_node_t *node, xmlNode *xml, pe_working_set_t *data_set) - { -- xmlNode *rsc_entry = NULL; -- gboolean found_orphaned_container_filler = FALSE; -+ bool found_orphaned_container_filler = false; - -- for (rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL; -- rsc_entry = pcmk__xe_next(rsc_entry)) { -+ // Drill down to lrm_resources section -+ xml = find_xml_node(xml, XML_CIB_TAG_LRM, FALSE); -+ if (xml == NULL) { -+ return; -+ } -+ xml = find_xml_node(xml, XML_LRM_TAG_RESOURCES, FALSE); -+ if (xml == NULL) { -+ return; -+ } - -- if (pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) { -- pe_resource_t *rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set); -- if (!rsc) { -- continue; -- } -- if (pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) { -- found_orphaned_container_filler = TRUE; -- } -+ // Unpack each lrm_resource entry -+ for (xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE); -+ rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) { -+ -+ pe_resource_t *rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set); -+ -+ if ((rsc != NULL) -+ && pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) { -+ found_orphaned_container_filler = true; - } - } - -- /* now that all the resource state has been unpacked for this node -- * we have to go back and map any orphaned container fillers to their -- * container resource */ -+ /* Now that all resource state has been unpacked for this node, map any -+ * orphaned container fillers to their container resource. -+ */ - if (found_orphaned_container_filler) { -- handle_orphaned_container_fillers(lrm_rsc_list, data_set); -+ handle_orphaned_container_fillers(xml, data_set); - } - } - --- -1.8.3.1 - - -From 32b07e573cafc6fa63f29a3619914afc7e40944f Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 21 Jan 2021 13:00:01 -0600 -Subject: [PATCH 10/16] Refactor: scheduler: rename lrm_resource unpacking - function - -... to reflect the XML element directly, for readability. Similarly -rename one of its arguments. ---- - lib/pengine/unpack.c | 27 +++++++++++++++++++-------- - 1 file changed, 19 insertions(+), 8 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 637be8d..4c70cdc 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -2306,8 +2306,19 @@ unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node, - } - } - -+/*! -+ * \internal -+ * \brief Unpack one lrm_resource entry from a node's CIB status -+ * -+ * \param[in] node Node whose status is being unpacked -+ * \param[in] rsc_entry lrm_resource XML being unpacked -+ * \param[in] data_set Cluster working set -+ * -+ * \return Resource corresponding to the entry, or NULL if no operation history -+ */ - static pe_resource_t * --unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set) -+unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource, -+ pe_working_set_t *data_set) - { - GListPtr gIter = NULL; - int stop_index = -1; -@@ -2315,7 +2326,7 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d - enum rsc_role_e req_role = RSC_ROLE_UNKNOWN; - - const char *task = NULL; -- const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); -+ const char *rsc_id = crm_element_value(lrm_resource, XML_ATTR_ID); - - pe_resource_t *rsc = NULL; - GListPtr op_list = NULL; -@@ -2329,13 +2340,13 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d - enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; - - crm_trace("[%s] Processing %s on %s", -- crm_element_name(rsc_entry), rsc_id, node->details->uname); -+ crm_element_name(lrm_resource), rsc_id, node->details->uname); - - /* extract operations */ - op_list = NULL; - sorted_op_list = NULL; - -- for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL; -+ for (rsc_op = pcmk__xe_first_child(lrm_resource); rsc_op != NULL; - rsc_op = pcmk__xe_next(rsc_op)) { - - if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, -@@ -2352,20 +2363,20 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d - } - - /* find the resource */ -- rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry); -+ rsc = unpack_find_resource(data_set, node, rsc_id, lrm_resource); - if (rsc == NULL) { - if (op_list == NULL) { - // If there are no operations, there is nothing to do - return NULL; - } else { -- rsc = process_orphan_resource(rsc_entry, node, data_set); -+ rsc = process_orphan_resource(lrm_resource, node, data_set); - } - } - CRM_ASSERT(rsc != NULL); - - // Check whether the resource is "shutdown-locked" to this node - if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) { -- unpack_shutdown_lock(rsc_entry, rsc, node, data_set); -+ unpack_shutdown_lock(lrm_resource, rsc, node, data_set); - } - - /* process operations */ -@@ -2482,7 +2493,7 @@ unpack_node_lrm(pe_node_t *node, xmlNode *xml, pe_working_set_t *data_set) - for (xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE); - rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) { - -- pe_resource_t *rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set); -+ pe_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, data_set); - - if ((rsc != NULL) - && pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) { --- -1.8.3.1 - - -From ed08885366fc3a43ac83d5c3fdab12fbf0b29ded Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 21 Jan 2021 13:04:25 -0600 -Subject: [PATCH 11/16] Refactor: scheduler: use convenience functions when - unpacking lrm_resource - -... to simplify a bit and improve readability ---- - lib/pengine/unpack.c | 16 +++++----------- - 1 file changed, 5 insertions(+), 11 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 4c70cdc..82b7562 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -2326,7 +2326,7 @@ unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource, - enum rsc_role_e req_role = RSC_ROLE_UNKNOWN; - - const char *task = NULL; -- const char *rsc_id = crm_element_value(lrm_resource, XML_ATTR_ID); -+ const char *rsc_id = ID(lrm_resource); - - pe_resource_t *rsc = NULL; - GListPtr op_list = NULL; -@@ -2342,17 +2342,11 @@ unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource, - crm_trace("[%s] Processing %s on %s", - crm_element_name(lrm_resource), rsc_id, node->details->uname); - -- /* extract operations */ -- op_list = NULL; -- sorted_op_list = NULL; -- -- for (rsc_op = pcmk__xe_first_child(lrm_resource); rsc_op != NULL; -- rsc_op = pcmk__xe_next(rsc_op)) { -+ // Build a list of individual lrm_rsc_op entries, so we can sort them -+ for (rsc_op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP); -+ rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) { - -- if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, -- pcmk__str_none)) { -- op_list = g_list_prepend(op_list, rsc_op); -- } -+ op_list = g_list_prepend(op_list, rsc_op); - } - - if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) { --- -1.8.3.1 - - -From e727f29e1fe194938e8ecc698d8ef556031fb5a3 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 21 Jan 2021 13:09:58 -0600 -Subject: [PATCH 12/16] Low: scheduler: warn if lrm_resource has no ID - -This should be possible only if someone manually mis-edits the CIB, but that -case does merit a warning, and we should bail early if it happens (previously, -the right thing would eventually be done anyway, but log messages using a NULL -could theoretically cause a crash). ---- - lib/pengine/unpack.c | 9 +++++++-- - 1 file changed, 7 insertions(+), 2 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 82b7562..19cde7a 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -2339,8 +2339,13 @@ unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource, - enum action_fail_response on_fail = action_fail_ignore; - enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; - -- crm_trace("[%s] Processing %s on %s", -- crm_element_name(lrm_resource), rsc_id, node->details->uname); -+ if (rsc_id == NULL) { -+ crm_warn("Ignoring malformed " XML_LRM_TAG_RESOURCE -+ " entry without id"); -+ return NULL; -+ } -+ crm_trace("Unpacking " XML_LRM_TAG_RESOURCE " for %s on %s", -+ rsc_id, node->details->uname); - - // Build a list of individual lrm_rsc_op entries, so we can sort them - for (rsc_op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP); --- -1.8.3.1 - - -From 981dcf0f8442cf95ba6e6df3cac07d78e7510cae Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 22 Jan 2021 15:04:49 -0600 -Subject: [PATCH 13/16] Refactor: scheduler: new convenience function for - changing resource's next role - -... for logging consistency ---- - include/crm/pengine/complex.h | 5 ++++- - lib/pengine/complex.c | 19 +++++++++++++++++++ - 2 files changed, 23 insertions(+), 1 deletion(-) - -diff --git a/include/crm/pengine/complex.h b/include/crm/pengine/complex.h -index 1d010f4..d5e1a39 100644 ---- a/include/crm/pengine/complex.h -+++ b/include/crm/pengine/complex.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -35,6 +35,9 @@ void pe_get_versioned_attributes(xmlNode *meta_hash, pe_resource_t *rsc, - gboolean is_parent(pe_resource_t *child, pe_resource_t *rsc); - pe_resource_t *uber_parent(pe_resource_t *rsc); - -+void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, -+ const char *why); -+ - #ifdef __cplusplus - } - #endif -diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c -index 5d7d628..3b5f722 100644 ---- a/lib/pengine/complex.c -+++ b/lib/pengine/complex.c -@@ -1109,3 +1109,22 @@ pe__count_common(pe_resource_t *rsc) - } - } - } -+ -+/*! -+ * \internal -+ * \brief Update a resource's next role -+ * -+ * \param[in,out] rsc Resource to be updated -+ * \param[in] role Resource's new next role -+ * \param[in] why Human-friendly reason why role is changing (for logs) -+ */ -+void -+pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why) -+{ -+ CRM_ASSERT((rsc != NULL) && (why != NULL)); -+ if (rsc->next_role != role) { -+ pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)", -+ rsc->id, role2text(rsc->next_role), role2text(role), why); -+ rsc->next_role = role; -+ } -+} --- -1.8.3.1 - - -From 2ae780b8746ffd8e7575fe4d30fea3971df87c66 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 22 Jan 2021 15:25:59 -0600 -Subject: [PATCH 14/16] Log: scheduler: use new function to set a resource's - next role - -This does downgrade one log from debug to trace, but that seems reasonable. -Otherwise it adds a trace message whenever the next role is changed. ---- - lib/pacemaker/pcmk_sched_group.c | 3 ++- - lib/pacemaker/pcmk_sched_native.c | 8 +++++--- - lib/pacemaker/pcmk_sched_promotable.c | 11 +++-------- - lib/pacemaker/pcmk_sched_utils.c | 6 +++--- - lib/pengine/unpack.c | 23 +++++++++++------------ - lib/pengine/utils.c | 5 +++-- - 6 files changed, 27 insertions(+), 29 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c -index 439ed91..c9026e4 100644 ---- a/lib/pacemaker/pcmk_sched_group.c -+++ b/lib/pacemaker/pcmk_sched_group.c -@@ -68,7 +68,8 @@ pcmk__group_allocate(pe_resource_t *rsc, pe_node_t *prefer, - } - } - -- rsc->next_role = group_data->first_child->next_role; -+ pe__set_next_role(rsc, group_data->first_child->next_role, -+ "first group member"); - pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional); - - if (group_data->colocated) { -diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c -index 0e50eda..6548b20 100644 ---- a/lib/pacemaker/pcmk_sched_native.c -+++ b/lib/pacemaker/pcmk_sched_native.c -@@ -581,7 +581,7 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - && data_set->no_quorum_policy == no_quorum_freeze) { - crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze", - rsc->id, role2text(rsc->role), role2text(rsc->next_role)); -- rsc->next_role = rsc->role; -+ pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze"); - } - - pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes); -@@ -594,7 +594,7 @@ pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, - const char *reason = NULL; - pe_node_t *assign_to = NULL; - -- rsc->next_role = rsc->role; -+ pe__set_next_role(rsc, rsc->role, "unmanaged"); - assign_to = pe__current_node(rsc); - if (assign_to == NULL) { - reason = "inactive"; -@@ -1226,7 +1226,9 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) - chosen = rsc->allocated_to; - next_role = rsc->next_role; - if (next_role == RSC_ROLE_UNKNOWN) { -- rsc->next_role = (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED; -+ pe__set_next_role(rsc, -+ (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED, -+ "allocation"); - } - pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s", - rsc->id, role2text(rsc->role), role2text(rsc->next_role), -diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c -index 40d07e9..f3bde0c 100644 ---- a/lib/pacemaker/pcmk_sched_promotable.c -+++ b/lib/pacemaker/pcmk_sched_promotable.c -@@ -622,13 +622,8 @@ set_role_slave(pe_resource_t * rsc, gboolean current) - GListPtr allocated = NULL; - - rsc->fns->location(rsc, &allocated, FALSE); -- -- if (allocated) { -- rsc->next_role = RSC_ROLE_SLAVE; -- -- } else { -- rsc->next_role = RSC_ROLE_STOPPED; -- } -+ pe__set_next_role(rsc, (allocated? RSC_ROLE_SLAVE : RSC_ROLE_STOPPED), -+ "unpromoted instance"); - g_list_free(allocated); - } - -@@ -645,7 +640,7 @@ set_role_master(pe_resource_t * rsc) - GListPtr gIter = rsc->children; - - if (rsc->next_role == RSC_ROLE_UNKNOWN) { -- rsc->next_role = RSC_ROLE_MASTER; -+ pe__set_next_role(rsc, RSC_ROLE_MASTER, "promoted instance"); - } - - for (; gIter != NULL; gIter = gIter->next) { -diff --git a/lib/pacemaker/pcmk_sched_utils.c b/lib/pacemaker/pcmk_sched_utils.c -index eaaf526..177f43e 100644 ---- a/lib/pacemaker/pcmk_sched_utils.c -+++ b/lib/pacemaker/pcmk_sched_utils.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -287,7 +287,7 @@ native_assign_node(pe_resource_t * rsc, GListPtr nodes, pe_node_t * chosen, gboo - crm_debug("All nodes for resource %s are unavailable" - ", unclean or shutting down (%s: %d, %d)", - rsc->id, chosen->details->uname, can_run_resources(chosen), chosen->weight); -- rsc->next_role = RSC_ROLE_STOPPED; -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); - chosen = NULL; - } - } -@@ -304,7 +304,7 @@ native_assign_node(pe_resource_t * rsc, GListPtr nodes, pe_node_t * chosen, gboo - char *rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); - - crm_debug("Could not allocate a node for %s", rsc->id); -- rsc->next_role = RSC_ROLE_STOPPED; -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate"); - - for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { - pe_action_t *op = (pe_action_t *) gIter->data; -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 19cde7a..ce51429 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -980,7 +980,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_ - crm_info("Node %s is shutting down", this_node->details->uname); - this_node->details->shutdown = TRUE; - if (rsc) { -- rsc->next_role = RSC_ROLE_STOPPED; -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote shutdown"); - } - } - -@@ -2060,7 +2060,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, - break; - - case action_fail_stop: -- rsc->next_role = RSC_ROLE_STOPPED; -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "on-fail=stop"); - break; - - case action_fail_recover: -@@ -2114,7 +2114,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, - /* if reconnect delay is in use, prevent the connection from exiting the - * "STOPPED" role until the failure is cleared by the delay timeout. */ - if (rsc->remote_reconnect_ms) { -- rsc->next_role = RSC_ROLE_STOPPED; -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote reset"); - } - break; - } -@@ -2405,10 +2405,7 @@ unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource, - - if (get_target_role(rsc, &req_role)) { - if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) { -- pe_rsc_debug(rsc, "%s: Overwriting calculated next role %s" -- " with requested next role %s", -- rsc->id, role2text(rsc->next_role), role2text(req_role)); -- rsc->next_role = req_role; -+ pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE); - - } else if (req_role > rsc->next_role) { - pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s" -@@ -3052,7 +3049,8 @@ unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * x - } else if (!strcmp(task, CRMD_ACTION_DEMOTE)) { - if (action->on_fail == action_fail_block) { - rsc->role = RSC_ROLE_MASTER; -- rsc->next_role = RSC_ROLE_STOPPED; -+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, -+ "demote with on-fail=block"); - - } else if(rc == PCMK_OCF_NOT_RUNNING) { - rsc->role = RSC_ROLE_STOPPED; -@@ -3083,7 +3081,7 @@ unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * x - fail2text(action->on_fail), role2text(action->fail_role)); - - if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) { -- rsc->next_role = action->fail_role; -+ pe__set_next_role(rsc, action->fail_role, "failure"); - } - - if (action->fail_role == RSC_ROLE_STOPPED) { -@@ -3200,7 +3198,7 @@ determine_op_status( - - /* clear any previous failure actions */ - *on_fail = action_fail_ignore; -- rsc->next_role = RSC_ROLE_UNKNOWN; -+ pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, "not running"); - } - break; - -@@ -3595,7 +3593,7 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c - case action_fail_recover: - case action_fail_restart_container: - *on_fail = action_fail_ignore; -- rsc->next_role = RSC_ROLE_UNKNOWN; -+ pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, "clear past failures"); - break; - case action_fail_reset_remote: - if (rsc->remote_reconnect_ms == 0) { -@@ -3606,7 +3604,8 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c - * to reconnect.) - */ - *on_fail = action_fail_ignore; -- rsc->next_role = RSC_ROLE_UNKNOWN; -+ pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, -+ "clear past failures and reset remote"); - } - break; - } -diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c -index 831f890..dbfe048 100644 ---- a/lib/pengine/utils.c -+++ b/lib/pengine/utils.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -450,7 +450,8 @@ effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) - case RSC_ROLE_MASTER: - case RSC_ROLE_SLAVE: - if (rsc->next_role > RSC_ROLE_SLAVE) { -- rsc->next_role = RSC_ROLE_SLAVE; -+ pe__set_next_role(rsc, RSC_ROLE_SLAVE, -+ "no-quorum-policy=demote"); - } - policy = no_quorum_ignore; - break; --- -1.8.3.1 - - -From 588a7c6bcdef8d051a43004a9744641e76d7d3cd Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 22 Jan 2021 16:45:18 -0600 -Subject: [PATCH 15/16] Fix: scheduler: process remote shutdowns correctly - -When unpacking node histories, the scheduler can make multiple passes through -the node_state entries, because the state of remote node connections (on other -nodes) must be known before the history of the remote node itself can be -unpacked. - -When unpacking a remote or guest node's history, the scheduler also unpacks its -transient attributes. If the shutdown attribute has been set, the scheduler -marks the node as shutting down. - -Previously, at that time, it would also set the remote connection's next role -to stopped. However, if it so happened that remote connection history on -another node was processed later in the node history unpacking, and a probe had -found the connection not running, this would reset the next role to unknown. -The connection stop would not be scheduled, and the shutdown would hang until -it timed out. - -Now, set the remote connection to stopped for shutdowns after all node -histories have been unpacked. ---- - lib/pengine/unpack.c | 22 +++++++++++++--------- - 1 file changed, 13 insertions(+), 9 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index ce51429..2d91abc 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -979,9 +979,6 @@ unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_ - if (pe__shutdown_requested(this_node)) { - crm_info("Node %s is shutting down", this_node->details->uname); - this_node->details->shutdown = TRUE; -- if (rsc) { -- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote shutdown"); -- } - } - - if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { -@@ -1289,17 +1286,24 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set) - data_set->stop_needed = NULL; - } - -+ /* Now that we know status of all Pacemaker Remote connections and nodes, -+ * we can stop connections for node shutdowns, and check the online status -+ * of remote/guest nodes that didn't have any node history to unpack. -+ */ - for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { - pe_node_t *this_node = gIter->data; - -- if (this_node == NULL) { -- continue; -- } else if (!pe__is_guest_or_remote_node(this_node)) { -- continue; -- } else if(this_node->details->unpacked) { -+ if (!pe__is_guest_or_remote_node(this_node)) { - continue; - } -- determine_remote_online_status(data_set, this_node); -+ if (this_node->details->shutdown -+ && (this_node->details->remote_rsc != NULL)) { -+ pe__set_next_role(this_node->details->remote_rsc, RSC_ROLE_STOPPED, -+ "remote shutdown"); -+ } -+ if (!this_node->details->unpacked) { -+ determine_remote_online_status(data_set, this_node); -+ } - } - - return TRUE; --- -1.8.3.1 - - -From 119d0aa3f8c9e0c9aeba8398de185a559aa40f5e Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 22 Jan 2021 16:59:37 -0600 -Subject: [PATCH 16/16] Test: scheduler: add regression test for remote - connection shutdown - -In particular, a remote node is shutting down, and its node history comes -before a cluster node's history in the status section, where that cluster node -has a probe that found the remote node's connection not running in its history. - -The test ensures that the connection is scheduled to be stopped, even though -that later history will set the next role to unknown. ---- - cts/scheduler/remote-connection-shutdown.dot | 57 + - cts/scheduler/remote-connection-shutdown.exp | 402 ++++ - cts/scheduler/remote-connection-shutdown.scores | 2560 ++++++++++++++++++++++ - cts/scheduler/remote-connection-shutdown.summary | 186 ++ - cts/scheduler/remote-connection-shutdown.xml | 2109 ++++++++++++++++++ - 5 files changed, 5314 insertions(+) - create mode 100644 cts/scheduler/remote-connection-shutdown.dot - create mode 100644 cts/scheduler/remote-connection-shutdown.exp - create mode 100644 cts/scheduler/remote-connection-shutdown.scores - create mode 100644 cts/scheduler/remote-connection-shutdown.summary - create mode 100644 cts/scheduler/remote-connection-shutdown.xml - -diff --git a/cts/scheduler/remote-connection-shutdown.dot b/cts/scheduler/remote-connection-shutdown.dot -new file mode 100644 -index 0000000..74eb9e3 ---- /dev/null -+++ b/cts/scheduler/remote-connection-shutdown.dot -@@ -0,0 +1,57 @@ -+ digraph "g" { -+"compute-0_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] -+"compute-unfence-trigger-clone_stop_0" -> "compute-unfence-trigger-clone_stopped_0" [ style = bold] -+"compute-unfence-trigger-clone_stop_0" -> "compute-unfence-trigger_stop_0 compute-0" [ style = bold] -+"compute-unfence-trigger-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"compute-unfence-trigger-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"compute-unfence-trigger_stop_0 compute-0" -> "compute-0_stop_0 controller-0" [ style = bold] -+"compute-unfence-trigger_stop_0 compute-0" -> "compute-unfence-trigger-clone_stopped_0" [ style = bold] -+"compute-unfence-trigger_stop_0 compute-0" [ style=bold color="green" fontcolor="black"] -+"nova-evacuate_monitor_10000 database-1" [ style=bold color="green" fontcolor="black"] -+"nova-evacuate_start_0 database-1" -> "nova-evacuate_monitor_10000 database-1" [ style = bold] -+"nova-evacuate_start_0 database-1" [ style=bold color="green" fontcolor="black"] -+"nova-evacuate_stop_0 database-0" -> "nova-evacuate_start_0 database-1" [ style = bold] -+"nova-evacuate_stop_0 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 controller-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 controller-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 controller-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 database-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 database-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 messaging-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 messaging-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_monitor_60000 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_compute-fence-nova_start_0 database-0" -> "stonith-fence_compute-fence-nova_monitor_60000 database-0" [ style = bold] -+"stonith-fence_compute-fence-nova_start_0 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254001f5f3c_monitor_60000 messaging-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254001f5f3c_start_0 messaging-0" -> "stonith-fence_ipmilan-5254001f5f3c_monitor_60000 messaging-0" [ style = bold] -+"stonith-fence_ipmilan-5254001f5f3c_start_0 messaging-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254001f5f3c_stop_0 database-2" -> "stonith-fence_ipmilan-5254001f5f3c_start_0 messaging-0" [ style = bold] -+"stonith-fence_ipmilan-5254001f5f3c_stop_0 database-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-52540033df9c_monitor_60000 database-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-52540033df9c_start_0 database-2" -> "stonith-fence_ipmilan-52540033df9c_monitor_60000 database-2" [ style = bold] -+"stonith-fence_ipmilan-52540033df9c_start_0 database-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-52540033df9c_stop_0 database-1" -> "stonith-fence_ipmilan-52540033df9c_start_0 database-2" [ style = bold] -+"stonith-fence_ipmilan-52540033df9c_stop_0 database-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254003f88b4_monitor_60000 messaging-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254003f88b4_start_0 messaging-1" -> "stonith-fence_ipmilan-5254003f88b4_monitor_60000 messaging-1" [ style = bold] -+"stonith-fence_ipmilan-5254003f88b4_start_0 messaging-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254003f88b4_stop_0 messaging-0" -> "stonith-fence_ipmilan-5254003f88b4_start_0 messaging-1" [ style = bold] -+"stonith-fence_ipmilan-5254003f88b4_stop_0 messaging-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254007b7920_monitor_60000 messaging-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254007b7920_start_0 messaging-2" -> "stonith-fence_ipmilan-5254007b7920_monitor_60000 messaging-2" [ style = bold] -+"stonith-fence_ipmilan-5254007b7920_start_0 messaging-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254007b7920_stop_0 messaging-1" -> "stonith-fence_ipmilan-5254007b7920_start_0 messaging-2" [ style = bold] -+"stonith-fence_ipmilan-5254007b7920_stop_0 messaging-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254009cb549_monitor_60000 database-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254009cb549_start_0 database-1" -> "stonith-fence_ipmilan-5254009cb549_monitor_60000 database-1" [ style = bold] -+"stonith-fence_ipmilan-5254009cb549_start_0 database-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-5254009cb549_stop_0 database-0" -> "stonith-fence_ipmilan-5254009cb549_start_0 database-1" [ style = bold] -+"stonith-fence_ipmilan-5254009cb549_stop_0 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400ffc780_monitor_60000 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400ffc780_start_0 database-0" -> "stonith-fence_ipmilan-525400ffc780_monitor_60000 database-0" [ style = bold] -+"stonith-fence_ipmilan-525400ffc780_start_0 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400ffc780_stop_0 messaging-2" -> "stonith-fence_ipmilan-525400ffc780_start_0 database-0" [ style = bold] -+"stonith-fence_ipmilan-525400ffc780_stop_0 messaging-2" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/remote-connection-shutdown.exp b/cts/scheduler/remote-connection-shutdown.exp -new file mode 100644 -index 0000000..f3c3424 ---- /dev/null -+++ b/cts/scheduler/remote-connection-shutdown.exp -@@ -0,0 +1,402 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/remote-connection-shutdown.scores b/cts/scheduler/remote-connection-shutdown.scores -new file mode 100644 -index 0000000..003b067 ---- /dev/null -+++ b/cts/scheduler/remote-connection-shutdown.scores -@@ -0,0 +1,2560 @@ -+Allocation scores: -+Only 'private' parameters to nova-evacuate_monitor_10000 on database-0 changed: 0:0;259:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to nova-evacuate_start_0 on database-0 changed: 0:0;258:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-0 changed -+Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-1 changed -+Only 'private' parameters to stonith-fence_ipmilan-5254001f5f3c_monitor_60000 on database-2 changed: 0:0;265:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254001f5f3c_start_0 on database-2 changed: 0:0;263:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-52540033df9c_monitor_60000 on database-1 changed: 0:0;263:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-52540033df9c_start_0 on database-1 changed: 0:0;261:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254003f88b4_monitor_60000 on messaging-0 changed: 0:0;269:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254003f88b4_start_0 on messaging-0 changed: 0:0;267:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400642894_monitor_60000 on messaging-2 changed: 0:0;274:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400642894_start_0 on messaging-2 changed: 0:0;272:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254007b7920_monitor_60000 on messaging-1 changed: 0:0;273:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254007b7920_start_0 on messaging-1 changed: 0:0;271:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254009cb549_monitor_60000 on database-0 changed: 0:0;323:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254009cb549_start_0 on database-0 changed: 0:0;322:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400bb150b_monitor_60000 on messaging-0 changed: 0:0;325:70:0:40f880e4-b328-4380-9703-47856390a1e0 -+Only 'private' parameters to stonith-fence_ipmilan-525400bb150b_start_0 on messaging-0 changed: 0:0;324:70:0:40f880e4-b328-4380-9703-47856390a1e0 -+Only 'private' parameters to stonith-fence_ipmilan-525400d5382b_monitor_60000 on database-2 changed: 0:0;320:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400d5382b_start_0 on database-2 changed: 0:0;319:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400dc0f81_monitor_60000 on database-1 changed: 0:0;331:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400dc0f81_start_0 on database-1 changed: 0:0;330:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400e10267_monitor_60000 on messaging-1 changed: 0:0;326:1318:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400e10267_start_0 on messaging-1 changed: 0:0;320:1317:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400ffc780_monitor_60000 on messaging-2 changed: 0:0;323:50:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400ffc780_start_0 on messaging-2 changed: 0:0;321:49:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Using the original execution date of: 2020-11-17 07:03:16Z -+galera:0 promotion score on galera-bundle-0: 100 -+galera:1 promotion score on galera-bundle-1: 100 -+galera:2 promotion score on galera-bundle-2: 100 -+ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 10 -+ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 -+ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 -+pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on compute-0: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on compute-1: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0 -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501 -+pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501 -+pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501 -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 10000 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 10000 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 10000 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: 10 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: 5 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: 5 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 501 -+pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY -+pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501 -+pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY -+pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501 -+pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 -+pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501 -+pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 -+pcmk__bundle_allocate: redis-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on compute-0: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on compute-1: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501 -+pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501 -+pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501 -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on compute-0: 1 -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on compute-1: 1 -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on compute-0: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on compute-1: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on database-0: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on database-1: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on database-2: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-0: 0 -+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-1: 0 -+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-2: 0 -+pcmk__clone_allocate: galera-bundle-master allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -+pcmk__clone_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -+pcmk__clone_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on compute-0: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on compute-1: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-0: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-1: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-2: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: 0 -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: 0 -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: 0 -+pcmk__clone_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY -+pcmk__clone_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY -+pcmk__clone_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on compute-0: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on compute-1: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-0: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-1: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-2: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: 0 -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: 0 -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: 0 -+pcmk__clone_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -+pcmk__clone_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -+pcmk__clone_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on compute-0: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on compute-1: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on database-0: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on database-1: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on database-2: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-0: 0 -+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-1: 0 -+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-2: 0 -+pcmk__clone_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -+pcmk__clone_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -+pcmk__clone_allocate: redis:2 allocation score on redis-bundle-2: INFINITY -+pcmk__native_allocate: compute-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-1 allocation score on controller-0: 0 -+pcmk__native_allocate: compute-1 allocation score on controller-1: 0 -+pcmk__native_allocate: compute-1 allocation score on controller-2: 0 -+pcmk__native_allocate: compute-1 allocation score on database-0: 0 -+pcmk__native_allocate: compute-1 allocation score on database-1: 0 -+pcmk__native_allocate: compute-1 allocation score on database-2: 0 -+pcmk__native_allocate: compute-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: compute-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: compute-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on compute-1: 1 -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-0 allocation score on controller-0: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on controller-1: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on controller-2: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on database-0: 10000 -+pcmk__native_allocate: galera-bundle-0 allocation score on database-1: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-1 allocation score on controller-0: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on controller-1: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on controller-2: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on database-0: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on database-1: 10000 -+pcmk__native_allocate: galera-bundle-1 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-2 allocation score on controller-0: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on controller-1: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on controller-2: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on database-0: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on database-1: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on database-2: 10000 -+pcmk__native_allocate: galera-bundle-2 allocation score on messaging-0: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on messaging-1: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on messaging-2: 0 -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-0: 0 -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-1: 0 -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-1: 0 -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -+pcmk__native_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -+pcmk__native_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-0: INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-1: 0 -+pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-2: 0 -+pcmk__native_allocate: ip-10.0.0.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-1: 0 -+pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-2: INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-1: INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-2: 0 -+pcmk__native_allocate: ip-172.17.1.151 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on controller-2: INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.57 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-0: INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-1: 0 -+pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-2: 0 -+pcmk__native_allocate: ip-172.17.3.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-1: INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-2: 0 -+pcmk__native_allocate: ip-172.17.4.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-1: 0 -+pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-2: INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: nova-evacuate allocation score on compute-0: -INFINITY -+pcmk__native_allocate: nova-evacuate allocation score on compute-1: -INFINITY -+pcmk__native_allocate: nova-evacuate allocation score on controller-0: 0 -+pcmk__native_allocate: nova-evacuate allocation score on controller-1: 0 -+pcmk__native_allocate: nova-evacuate allocation score on controller-2: 0 -+pcmk__native_allocate: nova-evacuate allocation score on database-0: 0 -+pcmk__native_allocate: nova-evacuate allocation score on database-1: 0 -+pcmk__native_allocate: nova-evacuate allocation score on database-2: 0 -+pcmk__native_allocate: nova-evacuate allocation score on messaging-0: 0 -+pcmk__native_allocate: nova-evacuate allocation score on messaging-1: 0 -+pcmk__native_allocate: nova-evacuate allocation score on messaging-2: 0 -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 10000 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 10000 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 10000 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY -+pcmk__native_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY -+pcmk__native_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 10000 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 10000 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 10000 -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -+pcmk__native_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -+pcmk__native_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -+pcmk__native_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-0 allocation score on controller-0: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on controller-2: 10000 -+pcmk__native_allocate: redis-bundle-0 allocation score on database-0: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on database-1: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on database-2: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-1 allocation score on controller-0: 10000 -+pcmk__native_allocate: redis-bundle-1 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on controller-2: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on database-0: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on database-1: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on database-2: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-2 allocation score on controller-0: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on controller-1: 10000 -+pcmk__native_allocate: redis-bundle-2 allocation score on controller-2: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on database-0: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on database-1: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on database-2: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on messaging-0: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on messaging-1: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on messaging-2: 0 -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -+pcmk__native_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -+pcmk__native_allocate: redis:2 allocation score on redis-bundle-2: INFINITY -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on database-0: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on messaging-2: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on controller-2: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on controller-1: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on messaging-1: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on database-2: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on controller-0: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on messaging-0: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on database-1: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on messaging-2: 0 -+redis:0 promotion score on redis-bundle-0: 1 -+redis:1 promotion score on redis-bundle-1: 1 -+redis:2 promotion score on redis-bundle-2: 1 -diff --git a/cts/scheduler/remote-connection-shutdown.summary b/cts/scheduler/remote-connection-shutdown.summary -new file mode 100644 -index 0000000..8756c33 ---- /dev/null -+++ b/cts/scheduler/remote-connection-shutdown.summary -@@ -0,0 +1,186 @@ -+Using the original execution date of: 2020-11-17 07:03:16Z -+ -+Current cluster status: -+Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] -+RemoteOnline: [ compute-0 compute-1 ] -+GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bundle-podman-1 galera-bundle-2:galera-bundle-podman-2 ovn-dbs-bundle-0:ovn-dbs-bundle-podman-0 ovn-dbs-bundle-1:ovn-dbs-bundle-podman-1 ovn-dbs-bundle-2:ovn-dbs-bundle-podman-2 rabbitmq-bundle-0:rabbitmq-bundle-podman-0 rabbitmq-bundle-1:rabbitmq-bundle-podman-1 rabbitmq-bundle-2:rabbitmq-bundle-podman-2 redis-bundle-0:redis-bundle-podman-0 redis-bundle-1:redis-bundle-podman-1 redis-bundle-2:redis-bundle-podman-2 ] -+ -+ compute-0 (ocf::pacemaker:remote): Started controller-0 -+ compute-1 (ocf::pacemaker:remote): Started controller-1 -+ Container bundle set: galera-bundle [cluster.common.tag/mariadb:pcmklatest] -+ galera-bundle-0 (ocf::heartbeat:galera): Master database-0 -+ galera-bundle-1 (ocf::heartbeat:galera): Master database-1 -+ galera-bundle-2 (ocf::heartbeat:galera): Master database-2 -+ Container bundle set: rabbitmq-bundle [cluster.common.tag/rabbitmq:pcmklatest] -+ rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started messaging-0 -+ rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started messaging-1 -+ rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started messaging-2 -+ Container bundle set: redis-bundle [cluster.common.tag/redis:pcmklatest] -+ redis-bundle-0 (ocf::heartbeat:redis): Master controller-2 -+ redis-bundle-1 (ocf::heartbeat:redis): Slave controller-0 -+ redis-bundle-2 (ocf::heartbeat:redis): Slave controller-1 -+ ip-192.168.24.150 (ocf::heartbeat:IPaddr2): Started controller-2 -+ ip-10.0.0.150 (ocf::heartbeat:IPaddr2): Started controller-0 -+ ip-172.17.1.151 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-172.17.1.150 (ocf::heartbeat:IPaddr2): Started controller-2 -+ ip-172.17.3.150 (ocf::heartbeat:IPaddr2): Started controller-0 -+ ip-172.17.4.150 (ocf::heartbeat:IPaddr2): Started controller-1 -+ Container bundle set: haproxy-bundle [cluster.common.tag/haproxy:pcmklatest] -+ haproxy-bundle-podman-0 (ocf::heartbeat:podman): Started controller-2 -+ haproxy-bundle-podman-1 (ocf::heartbeat:podman): Started controller-0 -+ haproxy-bundle-podman-2 (ocf::heartbeat:podman): Started controller-1 -+ Container bundle set: ovn-dbs-bundle [cluster.common.tag/ovn-northd:pcmklatest] -+ ovn-dbs-bundle-0 (ocf::ovn:ovndb-servers): Master controller-2 -+ ovn-dbs-bundle-1 (ocf::ovn:ovndb-servers): Slave controller-0 -+ ovn-dbs-bundle-2 (ocf::ovn:ovndb-servers): Slave controller-1 -+ ip-172.17.1.57 (ocf::heartbeat:IPaddr2): Started controller-2 -+ stonith-fence_compute-fence-nova (stonith:fence_compute): Stopped -+ Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger] -+ Started: [ compute-0 compute-1 ] -+ Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] -+ nova-evacuate (ocf::openstack:NovaEvacuate): Started database-0 -+ stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-1 -+ stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started database-2 -+ stonith-fence_ipmilan-5254003f88b4 (stonith:fence_ipmilan): Started messaging-0 -+ stonith-fence_ipmilan-5254007b7920 (stonith:fence_ipmilan): Started messaging-1 -+ stonith-fence_ipmilan-525400642894 (stonith:fence_ipmilan): Started messaging-2 -+ stonith-fence_ipmilan-525400d5382b (stonith:fence_ipmilan): Started database-2 -+ stonith-fence_ipmilan-525400bb150b (stonith:fence_ipmilan): Started messaging-0 -+ stonith-fence_ipmilan-525400ffc780 (stonith:fence_ipmilan): Started messaging-2 -+ stonith-fence_ipmilan-5254009cb549 (stonith:fence_ipmilan): Started database-0 -+ stonith-fence_ipmilan-525400e10267 (stonith:fence_ipmilan): Started messaging-1 -+ stonith-fence_ipmilan-525400dc0f81 (stonith:fence_ipmilan): Started database-1 -+ Container bundle: openstack-cinder-volume [cluster.common.tag/cinder-volume:pcmklatest] -+ openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-0 -+ -+Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-0 changed -+Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-1 changed -+Only 'private' parameters to stonith-fence_ipmilan-5254009cb549_start_0 on database-0 changed: 0:0;322:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254009cb549_monitor_60000 on database-0 changed: 0:0;323:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to nova-evacuate_start_0 on database-0 changed: 0:0;258:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to nova-evacuate_monitor_10000 on database-0 changed: 0:0;259:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400dc0f81_start_0 on database-1 changed: 0:0;330:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400dc0f81_monitor_60000 on database-1 changed: 0:0;331:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-52540033df9c_start_0 on database-1 changed: 0:0;261:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-52540033df9c_monitor_60000 on database-1 changed: 0:0;263:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400d5382b_start_0 on database-2 changed: 0:0;319:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400d5382b_monitor_60000 on database-2 changed: 0:0;320:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254001f5f3c_start_0 on database-2 changed: 0:0;263:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254001f5f3c_monitor_60000 on database-2 changed: 0:0;265:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400e10267_start_0 on messaging-1 changed: 0:0;320:1317:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400e10267_monitor_60000 on messaging-1 changed: 0:0;326:1318:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254007b7920_start_0 on messaging-1 changed: 0:0;271:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254007b7920_monitor_60000 on messaging-1 changed: 0:0;273:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400bb150b_start_0 on messaging-0 changed: 0:0;324:70:0:40f880e4-b328-4380-9703-47856390a1e0 -+Only 'private' parameters to stonith-fence_ipmilan-525400bb150b_monitor_60000 on messaging-0 changed: 0:0;325:70:0:40f880e4-b328-4380-9703-47856390a1e0 -+Only 'private' parameters to stonith-fence_ipmilan-5254003f88b4_start_0 on messaging-0 changed: 0:0;267:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-5254003f88b4_monitor_60000 on messaging-0 changed: 0:0;269:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400642894_start_0 on messaging-2 changed: 0:0;272:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400642894_monitor_60000 on messaging-2 changed: 0:0;274:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400ffc780_start_0 on messaging-2 changed: 0:0;321:49:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Only 'private' parameters to stonith-fence_ipmilan-525400ffc780_monitor_60000 on messaging-2 changed: 0:0;323:50:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 -+Transition Summary: -+ * Stop compute-0 ( controller-0 ) due to node availability -+ * Start stonith-fence_compute-fence-nova ( database-0 ) -+ * Stop compute-unfence-trigger:0 ( compute-0 ) due to node availability -+ * Move nova-evacuate ( database-0 -> database-1 ) -+ * Move stonith-fence_ipmilan-52540033df9c ( database-1 -> database-2 ) -+ * Move stonith-fence_ipmilan-5254001f5f3c ( database-2 -> messaging-0 ) -+ * Move stonith-fence_ipmilan-5254003f88b4 ( messaging-0 -> messaging-1 ) -+ * Move stonith-fence_ipmilan-5254007b7920 ( messaging-1 -> messaging-2 ) -+ * Move stonith-fence_ipmilan-525400ffc780 ( messaging-2 -> database-0 ) -+ * Move stonith-fence_ipmilan-5254009cb549 ( database-0 -> database-1 ) -+ -+Executing cluster transition: -+ * Resource action: stonith-fence_compute-fence-nova start on database-0 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-2 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-1 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-2 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-1 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-0 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-2 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-1 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-0 -+ * Pseudo action: compute-unfence-trigger-clone_stop_0 -+ * Resource action: nova-evacuate stop on database-0 -+ * Resource action: stonith-fence_ipmilan-52540033df9c stop on database-1 -+ * Resource action: stonith-fence_ipmilan-5254001f5f3c stop on database-2 -+ * Resource action: stonith-fence_ipmilan-5254003f88b4 stop on messaging-0 -+ * Resource action: stonith-fence_ipmilan-5254007b7920 stop on messaging-1 -+ * Resource action: stonith-fence_ipmilan-525400ffc780 stop on messaging-2 -+ * Resource action: stonith-fence_ipmilan-5254009cb549 stop on database-0 -+ * Resource action: stonith-fence_compute-fence-nova monitor=60000 on database-0 -+ * Resource action: compute-unfence-trigger stop on compute-0 -+ * Pseudo action: compute-unfence-trigger-clone_stopped_0 -+ * Resource action: nova-evacuate start on database-1 -+ * Resource action: stonith-fence_ipmilan-52540033df9c start on database-2 -+ * Resource action: stonith-fence_ipmilan-5254001f5f3c start on messaging-0 -+ * Resource action: stonith-fence_ipmilan-5254003f88b4 start on messaging-1 -+ * Resource action: stonith-fence_ipmilan-5254007b7920 start on messaging-2 -+ * Resource action: stonith-fence_ipmilan-525400ffc780 start on database-0 -+ * Resource action: stonith-fence_ipmilan-5254009cb549 start on database-1 -+ * Resource action: compute-0 stop on controller-0 -+ * Resource action: nova-evacuate monitor=10000 on database-1 -+ * Resource action: stonith-fence_ipmilan-52540033df9c monitor=60000 on database-2 -+ * Resource action: stonith-fence_ipmilan-5254001f5f3c monitor=60000 on messaging-0 -+ * Resource action: stonith-fence_ipmilan-5254003f88b4 monitor=60000 on messaging-1 -+ * Resource action: stonith-fence_ipmilan-5254007b7920 monitor=60000 on messaging-2 -+ * Resource action: stonith-fence_ipmilan-525400ffc780 monitor=60000 on database-0 -+ * Resource action: stonith-fence_ipmilan-5254009cb549 monitor=60000 on database-1 -+Using the original execution date of: 2020-11-17 07:03:16Z -+ -+Revised cluster status: -+Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] -+RemoteOnline: [ compute-1 ] -+RemoteOFFLINE: [ compute-0 ] -+GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bundle-podman-1 galera-bundle-2:galera-bundle-podman-2 ovn-dbs-bundle-0:ovn-dbs-bundle-podman-0 ovn-dbs-bundle-1:ovn-dbs-bundle-podman-1 ovn-dbs-bundle-2:ovn-dbs-bundle-podman-2 rabbitmq-bundle-0:rabbitmq-bundle-podman-0 rabbitmq-bundle-1:rabbitmq-bundle-podman-1 rabbitmq-bundle-2:rabbitmq-bundle-podman-2 redis-bundle-0:redis-bundle-podman-0 redis-bundle-1:redis-bundle-podman-1 redis-bundle-2:redis-bundle-podman-2 ] -+ -+ compute-0 (ocf::pacemaker:remote): Stopped -+ compute-1 (ocf::pacemaker:remote): Started controller-1 -+ Container bundle set: galera-bundle [cluster.common.tag/mariadb:pcmklatest] -+ galera-bundle-0 (ocf::heartbeat:galera): Master database-0 -+ galera-bundle-1 (ocf::heartbeat:galera): Master database-1 -+ galera-bundle-2 (ocf::heartbeat:galera): Master database-2 -+ Container bundle set: rabbitmq-bundle [cluster.common.tag/rabbitmq:pcmklatest] -+ rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started messaging-0 -+ rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started messaging-1 -+ rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started messaging-2 -+ Container bundle set: redis-bundle [cluster.common.tag/redis:pcmklatest] -+ redis-bundle-0 (ocf::heartbeat:redis): Master controller-2 -+ redis-bundle-1 (ocf::heartbeat:redis): Slave controller-0 -+ redis-bundle-2 (ocf::heartbeat:redis): Slave controller-1 -+ ip-192.168.24.150 (ocf::heartbeat:IPaddr2): Started controller-2 -+ ip-10.0.0.150 (ocf::heartbeat:IPaddr2): Started controller-0 -+ ip-172.17.1.151 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-172.17.1.150 (ocf::heartbeat:IPaddr2): Started controller-2 -+ ip-172.17.3.150 (ocf::heartbeat:IPaddr2): Started controller-0 -+ ip-172.17.4.150 (ocf::heartbeat:IPaddr2): Started controller-1 -+ Container bundle set: haproxy-bundle [cluster.common.tag/haproxy:pcmklatest] -+ haproxy-bundle-podman-0 (ocf::heartbeat:podman): Started controller-2 -+ haproxy-bundle-podman-1 (ocf::heartbeat:podman): Started controller-0 -+ haproxy-bundle-podman-2 (ocf::heartbeat:podman): Started controller-1 -+ Container bundle set: ovn-dbs-bundle [cluster.common.tag/ovn-northd:pcmklatest] -+ ovn-dbs-bundle-0 (ocf::ovn:ovndb-servers): Master controller-2 -+ ovn-dbs-bundle-1 (ocf::ovn:ovndb-servers): Slave controller-0 -+ ovn-dbs-bundle-2 (ocf::ovn:ovndb-servers): Slave controller-1 -+ ip-172.17.1.57 (ocf::heartbeat:IPaddr2): Started controller-2 -+ stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-0 -+ Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger] -+ Started: [ compute-1 ] -+ Stopped: [ compute-0 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] -+ nova-evacuate (ocf::openstack:NovaEvacuate): Started database-1 -+ stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-2 -+ stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started messaging-0 -+ stonith-fence_ipmilan-5254003f88b4 (stonith:fence_ipmilan): Started messaging-1 -+ stonith-fence_ipmilan-5254007b7920 (stonith:fence_ipmilan): Started messaging-2 -+ stonith-fence_ipmilan-525400642894 (stonith:fence_ipmilan): Started messaging-2 -+ stonith-fence_ipmilan-525400d5382b (stonith:fence_ipmilan): Started database-2 -+ stonith-fence_ipmilan-525400bb150b (stonith:fence_ipmilan): Started messaging-0 -+ stonith-fence_ipmilan-525400ffc780 (stonith:fence_ipmilan): Started database-0 -+ stonith-fence_ipmilan-5254009cb549 (stonith:fence_ipmilan): Started database-1 -+ stonith-fence_ipmilan-525400e10267 (stonith:fence_ipmilan): Started messaging-1 -+ stonith-fence_ipmilan-525400dc0f81 (stonith:fence_ipmilan): Started database-1 -+ Container bundle: openstack-cinder-volume [cluster.common.tag/cinder-volume:pcmklatest] -+ openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-0 -+ -diff --git a/cts/scheduler/remote-connection-shutdown.xml b/cts/scheduler/remote-connection-shutdown.xml -new file mode 100644 -index 0000000..0e4f995 ---- /dev/null -+++ b/cts/scheduler/remote-connection-shutdown.xml -@@ -0,0 +1,2109 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - diff --git a/SOURCES/033-cibsecret.patch b/SOURCES/033-cibsecret.patch deleted file mode 100644 index 541f4f4..0000000 --- a/SOURCES/033-cibsecret.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 494eebe33d56b24e1f3a13ebe6c0ec651c99a2af Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 3 Feb 2021 09:47:39 -0600 -Subject: [PATCH] Fix: tools: get cibsecret stash working again - -Broke with dfe636c4 ---- - tools/cibsecret.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/cibsecret.in b/tools/cibsecret.in -index 6326bf0..ce57a18 100644 ---- a/tools/cibsecret.in -+++ b/tools/cibsecret.in -@@ -393,7 +393,7 @@ cibsecret_stash() { - fatal $CRM_EX_NOSUCH "nothing to stash for resource $rsc parameter $param" - is_secret "$CIBSTASH_CURRENT" && - fatal $CRM_EX_EXISTS "resource $rsc parameter $param already set as secret, nothing to stash" -- cibsecret_set "$CIBSTASH_CURRENT" -+ cibsecret_set "$CIBSTASH_CURRENT" 4 - } - - cibsecret_unstash() { --- -1.8.3.1 - diff --git a/SOURCES/034-crm_mon.patch b/SOURCES/034-crm_mon.patch deleted file mode 100644 index d38af90..0000000 --- a/SOURCES/034-crm_mon.patch +++ /dev/null @@ -1,709 +0,0 @@ -From 68139dc8ff5efbfd81d3b5e868462e7eaefa2c74 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Mon, 25 Jan 2021 15:35:33 +0100 -Subject: [PATCH 1/7] Fix: crm_mon: add explicit void to one_shot prototype for - compat - ---- - tools/crm_mon.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 0981634..1eca1b7 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1226,7 +1226,7 @@ handle_connection_failures(int rc) - } - - static void --one_shot() -+one_shot(void) - { - int rc; - --- -1.8.3.1 - - -From 8c7a01f8880efff8457e8421c381082b250d4512 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Mon, 25 Jan 2021 16:26:30 +0100 -Subject: [PATCH 2/7] Refactor: crm_mon: cib_connect & - handle_connection_failures -> new rc - ---- - tools/crm_mon.c | 62 ++++++++++++++++++++++++++++++++------------------------- - 1 file changed, 35 insertions(+), 27 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 1eca1b7..3fbac5f 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -690,7 +690,7 @@ reconnect_after_timeout(gpointer data) - - print_as(output_format, "Reconnecting...\n"); - fencing_connect(); -- if (cib_connect(TRUE) == pcmk_ok) { -+ if (cib_connect(TRUE) == pcmk_rc_ok) { - /* Redraw the screen and reinstall ourselves to get called after another reconnect_msec. */ - mon_refresh_display(NULL); - return FALSE; -@@ -804,16 +804,17 @@ fencing_connect(void) - static int - cib_connect(gboolean full) - { -- int rc = pcmk_ok; -+ int rc = pcmk_rc_ok; - static gboolean need_pass = TRUE; - -- CRM_CHECK(cib != NULL, return -EINVAL); -+ CRM_CHECK(cib != NULL, return EINVAL); - - if (getenv("CIB_passwd") != NULL) { - need_pass = FALSE; - } - -- if (cib->state == cib_connected_query || cib->state == cib_connected_command) { -+ if (cib->state == cib_connected_query || -+ cib->state == cib_connected_command) { - return rc; - } - -@@ -825,37 +826,44 @@ cib_connect(gboolean full) - * @TODO Add a password prompt (maybe including input) function to - * pcmk__output_t and use it in libcib. - */ -- if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) { -+ if ((output_format == mon_output_console) && -+ need_pass && -+ (cib->variant == cib_remote)) { - need_pass = FALSE; - print_as(output_format, "Password:"); - } - -- rc = cib->cmds->signon(cib, crm_system_name, cib_query); -- if (rc != pcmk_ok) { -+ rc = pcmk_legacy2rc(cib->cmds->signon(cib, crm_system_name, cib_query)); -+ if (rc != pcmk_rc_ok) { - out->err(out, "Could not connect to the CIB: %s", -- pcmk_strerror(rc)); -+ pcmk_rc_str(rc)); - return rc; - } - -- rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); -+ rc = pcmk_legacy2rc(cib->cmds->query(cib, NULL, ¤t_cib, -+ cib_scope_local | cib_sync_call)); - -- if (rc == pcmk_ok && full) { -- rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy); -- if (rc == -EPROTONOSUPPORT) { -- print_as -- (output_format, "Notification setup not supported, won't be able to reconnect after failure"); -+ if (rc == pcmk_rc_ok && full) { -+ rc = pcmk_legacy2rc(cib->cmds->set_connection_dnotify(cib, -+ mon_cib_connection_destroy)); -+ if (rc == EPROTONOSUPPORT) { -+ print_as(output_format, -+ "Notification setup not supported, won't be " -+ "able to reconnect after failure"); - if (output_format == mon_output_console) { - sleep(2); - } -- rc = pcmk_ok; -+ rc = pcmk_rc_ok; - } - -- if (rc == pcmk_ok) { -- cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); -- rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); -+ if (rc == pcmk_rc_ok) { -+ cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, -+ crm_diff_update); -+ rc = pcmk_legacy2rc(cib->cmds->add_notify_callback(cib, -+ T_CIB_DIFF_NOTIFY, crm_diff_update)); - } - -- if (rc != pcmk_ok) { -+ if (rc != pcmk_rc_ok) { - out->err(out, "Notification setup failed, could not monitor CIB actions"); - clean_up_cib_connection(); - clean_up_fencing_connection(); -@@ -1206,20 +1214,20 @@ reconcile_output_format(pcmk__common_args_t *args) { - static void - handle_connection_failures(int rc) - { -- if (rc == pcmk_ok) { -+ if (rc == pcmk_rc_ok) { - return; - } - - if (output_format == mon_output_monitor) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "CLUSTER CRIT: Connection to cluster failed: %s", -- pcmk_strerror(rc)); -+ pcmk_rc_str(rc)); - rc = MON_STATUS_CRIT; -- } else if (rc == -ENOTCONN) { -+ } else if (rc == ENOTCONN) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: cluster is not available on this node"); -- rc = crm_errno2exit(rc); -+ rc = pcmk_rc2exitc(rc); - } else { -- g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Connection to cluster failed: %s", pcmk_strerror(rc)); -- rc = crm_errno2exit(rc); -+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Connection to cluster failed: %s", pcmk_rc_str(rc)); -+ rc = pcmk_rc2exitc(rc); - } - - clean_up(rc); -@@ -1478,7 +1486,7 @@ main(int argc, char **argv) - fencing_connect(); - rc = cib_connect(TRUE); - -- if (rc != pcmk_ok) { -+ if (rc != pcmk_rc_ok) { - sleep(options.reconnect_msec / 1000); - #if CURSES_ENABLED - if (output_format == mon_output_console) { -@@ -1490,7 +1498,7 @@ main(int argc, char **argv) - printf("Writing html to %s ...\n", args->output_dest); - } - -- } while (rc == -ENOTCONN); -+ } while (rc == ENOTCONN); - - handle_connection_failures(rc); - set_fencing_options(interactive_fence_level); --- -1.8.3.1 - - -From 9b8fb7b608280f65a3b76d66a99b575a4da70944 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Mon, 25 Jan 2021 18:26:04 +0100 -Subject: [PATCH 3/7] Fix: tools: Report pacemakerd in state waiting for sbd - -Waiting for pacemakerd to report that all subdaemons are started -before trying to connect to cib and fencer should remove the -potential race introduced by making fencer connection failure -non fatal when cib is faster to come up. ---- - tools/crm_mon.c | 158 ++++++++++++++++++++++++++++++++++++++++++++++++++++---- - tools/crm_mon.h | 1 + - 2 files changed, 148 insertions(+), 11 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 3fbac5f..61f070d 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -132,6 +132,7 @@ static void handle_connection_failures(int rc); - static int mon_refresh_display(gpointer user_data); - static int cib_connect(gboolean full); - static int fencing_connect(void); -+static int pacemakerd_status(void); - static void mon_st_callback_event(stonith_t * st, stonith_event_t * e); - static void mon_st_callback_display(stonith_t * st, stonith_event_t * e); - static void refresh_after_event(gboolean data_updated); -@@ -689,11 +690,13 @@ reconnect_after_timeout(gpointer data) - } - - print_as(output_format, "Reconnecting...\n"); -- fencing_connect(); -- if (cib_connect(TRUE) == pcmk_rc_ok) { -- /* Redraw the screen and reinstall ourselves to get called after another reconnect_msec. */ -- mon_refresh_display(NULL); -- return FALSE; -+ if (pacemakerd_status() == pcmk_rc_ok) { -+ fencing_connect(); -+ if (cib_connect(TRUE) == pcmk_rc_ok) { -+ /* Redraw the screen and reinstall ourselves to get called after another reconnect_msec. */ -+ mon_refresh_display(NULL); -+ return FALSE; -+ } - } - - reconnect_timer = g_timeout_add(options.reconnect_msec, reconnect_after_timeout, NULL); -@@ -840,6 +843,13 @@ cib_connect(gboolean full) - return rc; - } - -+#if CURSES_ENABLED -+ /* just show this if refresh is gonna remove all traces */ -+ if (output_format == mon_output_console) { -+ print_as(output_format ,"Waiting for CIB ...\n"); -+ } -+#endif -+ - rc = pcmk_legacy2rc(cib->cmds->query(cib, NULL, ¤t_cib, - cib_scope_local | cib_sync_call)); - -@@ -904,6 +914,121 @@ set_fencing_options(int level) - } - } - -+/* Before trying to connect to fencer or cib check for state of -+ pacemakerd - just no sense in trying till pacemakerd has -+ taken care of starting all the sub-processes -+ -+ Only noteworthy thing to show here is when pacemakerd is -+ waiting for startup-trigger from SBD. -+ */ -+static void -+pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, -+ enum pcmk_ipc_event event_type, crm_exit_t status, -+ void *event_data, void *user_data) -+{ -+ pcmk_pacemakerd_api_reply_t *reply = event_data; -+ enum pcmk_pacemakerd_state *state = -+ (enum pcmk_pacemakerd_state *) user_data; -+ -+ /* we are just interested in the latest reply */ -+ *state = pcmk_pacemakerd_state_invalid; -+ -+ switch (event_type) { -+ case pcmk_ipc_event_reply: -+ break; -+ -+ default: -+ return; -+ } -+ -+ if (status != CRM_EX_OK) { -+ out->err(out, "Bad reply from pacemakerd: %s", -+ crm_exit_str(status)); -+ return; -+ } -+ -+ if (reply->reply_type != pcmk_pacemakerd_reply_ping) { -+ out->err(out, "Unknown reply type %d from pacemakerd", -+ reply->reply_type); -+ } else { -+ if ((reply->data.ping.last_good != (time_t) 0) && -+ (reply->data.ping.status == pcmk_rc_ok)) { -+ *state = reply->data.ping.state; -+ } -+ } -+} -+ -+static int -+pacemakerd_status(void) -+{ -+ int rc = pcmk_rc_ok; -+ pcmk_ipc_api_t *pacemakerd_api = NULL; -+ enum pcmk_pacemakerd_state state = pcmk_pacemakerd_state_invalid; -+ -+ if (!pcmk_is_set(options.mon_ops, mon_op_cib_native)) { -+ /* we don't need fully functional pacemakerd otherwise */ -+ return rc; -+ } -+ if (cib != NULL && -+ (cib->state == cib_connected_query || -+ cib->state == cib_connected_command)) { -+ /* As long as we have a cib-connection let's go with -+ * that to fetch further cluster-status and avoid -+ * unnecessary pings to pacemakerd. -+ * If cluster is going down and fencer is down already -+ * this will lead to a silently failing fencer reconnect. -+ * On cluster startup we shouldn't see this situation -+ * as first we do is wait for pacemakerd to report all -+ * daemons running. -+ */ -+ return rc; -+ } -+ rc = pcmk_new_ipc_api(&pacemakerd_api, pcmk_ipc_pacemakerd); -+ if (pacemakerd_api == NULL) { -+ out->err(out, "Could not connect to pacemakerd: %s", -+ pcmk_rc_str(rc)); -+ /* this is unrecoverable so return with rc we have */ -+ return rc; -+ } -+ pcmk_register_ipc_callback(pacemakerd_api, pacemakerd_event_cb, (void *) &state); -+ rc = pcmk_connect_ipc(pacemakerd_api, pcmk_ipc_dispatch_poll); -+ if (rc == pcmk_rc_ok) { -+ rc = pcmk_pacemakerd_api_ping(pacemakerd_api, crm_system_name); -+ if (rc == pcmk_rc_ok) { -+ rc = pcmk_poll_ipc(pacemakerd_api, options.reconnect_msec/2); -+ if (rc == pcmk_rc_ok) { -+ pcmk_dispatch_ipc(pacemakerd_api); -+ rc = ENOTCONN; -+ switch (state) { -+ case pcmk_pacemakerd_state_running: -+ rc = pcmk_rc_ok; -+ break; -+ case pcmk_pacemakerd_state_starting_daemons: -+ print_as(output_format ,"Pacemaker daemons starting ...\n"); -+ break; -+ case pcmk_pacemakerd_state_wait_for_ping: -+ print_as(output_format ,"Waiting for startup-trigger from SBD ...\n"); -+ break; -+ case pcmk_pacemakerd_state_shutting_down: -+ print_as(output_format ,"Pacemaker daemons shutting down ...\n"); -+ break; -+ case pcmk_pacemakerd_state_shutdown_complete: -+ /* assuming pacemakerd doesn't dispatch any pings after entering -+ * that state unless it is waiting for SBD -+ */ -+ print_as(output_format ,"Pacemaker daemons shut down - reporting to SBD ...\n"); -+ break; -+ default: -+ break; -+ } -+ } -+ } -+ } -+ pcmk_free_ipc_api(pacemakerd_api); -+ /* returning with ENOTCONN triggers a retry */ -+ return (rc == pcmk_rc_ok)?rc:ENOTCONN; -+} -+ - #if CURSES_ENABLED - static const char * - get_option_desc(char c) -@@ -1033,8 +1158,11 @@ detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_dat - } - - refresh: -- fencing_connect(); -- rc = cib_connect(FALSE); -+ rc = pacemakerd_status(); -+ if (rc == pcmk_rc_ok) { -+ fencing_connect(); -+ rc = cib_connect(FALSE); -+ } - if (rc == pcmk_rc_ok) { - mon_refresh_display(NULL); - } else { -@@ -1238,9 +1366,13 @@ one_shot(void) - { - int rc; - -- fencing_connect(); -+ rc = pacemakerd_status(); -+ -+ if (rc == pcmk_rc_ok) { -+ fencing_connect(); -+ rc = cib_connect(FALSE); -+ } - -- rc = cib_connect(FALSE); - if (rc == pcmk_rc_ok) { - mon_refresh_display(NULL); - } else { -@@ -1316,6 +1448,7 @@ main(int argc, char **argv) - - case cib_native: - /* cib & fencing - everything available */ -+ options.mon_ops |= mon_op_cib_native; - break; - - case cib_file: -@@ -1483,8 +1616,11 @@ main(int argc, char **argv) - do { - print_as(output_format ,"Waiting until cluster is available on this node ...\n"); - -- fencing_connect(); -- rc = cib_connect(TRUE); -+ rc = pacemakerd_status(); -+ if (rc == pcmk_rc_ok) { -+ fencing_connect(); -+ rc = cib_connect(TRUE); -+ } - - if (rc != pcmk_rc_ok) { - sleep(options.reconnect_msec / 1000); -diff --git a/tools/crm_mon.h b/tools/crm_mon.h -index 73c926d..b556913 100644 ---- a/tools/crm_mon.h -+++ b/tools/crm_mon.h -@@ -91,6 +91,7 @@ typedef enum mon_output_format_e { - #define mon_op_print_brief (0x0200U) - #define mon_op_print_pending (0x0400U) - #define mon_op_print_clone_detail (0x0800U) -+#define mon_op_cib_native (0x1000U) - - #define mon_op_default (mon_op_print_pending | mon_op_fence_history | mon_op_fence_connect) - --- -1.8.3.1 - - -From 046516dbe66fb2c52b90f36215cf60c5ad3c269b Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Thu, 28 Jan 2021 16:38:22 +0100 -Subject: [PATCH 4/7] Refactor: crm_mon: do refreshes rather via - refresh_after_event - ---- - tools/crm_mon.c | 35 ++++++++++++++--------------------- - 1 file changed, 14 insertions(+), 21 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 61f070d..195e7b5 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -135,7 +135,7 @@ static int fencing_connect(void); - static int pacemakerd_status(void); - static void mon_st_callback_event(stonith_t * st, stonith_event_t * e); - static void mon_st_callback_display(stonith_t * st, stonith_event_t * e); --static void refresh_after_event(gboolean data_updated); -+static void refresh_after_event(gboolean data_updated, gboolean enforce); - - static unsigned int - all_includes(mon_output_format_t fmt) { -@@ -694,13 +694,13 @@ reconnect_after_timeout(gpointer data) - fencing_connect(); - if (cib_connect(TRUE) == pcmk_rc_ok) { - /* Redraw the screen and reinstall ourselves to get called after another reconnect_msec. */ -- mon_refresh_display(NULL); -+ refresh_after_event(FALSE, TRUE); - return FALSE; - } - } - - reconnect_timer = g_timeout_add(options.reconnect_msec, reconnect_after_timeout, NULL); -- return TRUE; -+ return FALSE; - } - - /* Called from various places when we are disconnected from the CIB or from the -@@ -1057,7 +1057,6 @@ static gboolean - detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_data) - { - int c; -- int rc; - gboolean config_mode = FALSE; - - while (1) { -@@ -1158,16 +1157,7 @@ detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_dat - } - - refresh: -- rc = pacemakerd_status(); -- if (rc == pcmk_rc_ok) { -- fencing_connect(); -- rc = cib_connect(FALSE); -- } -- if (rc == pcmk_rc_ok) { -- mon_refresh_display(NULL); -- } else { -- handle_connection_failures(rc); -- } -+ refresh_after_event(FALSE, TRUE); - - return TRUE; - } -@@ -2087,7 +2077,7 @@ crm_diff_update(const char *event, xmlNode * msg) - } - - stale = FALSE; -- refresh_after_event(cib_updated); -+ refresh_after_event(cib_updated, FALSE); - } - - static int -@@ -2246,7 +2236,7 @@ mon_st_callback_event(stonith_t * st, stonith_event_t * e) - * fencing event is received or a CIB diff occurrs. - */ - static void --refresh_after_event(gboolean data_updated) -+refresh_after_event(gboolean data_updated, gboolean enforce) - { - static int updates = 0; - time_t now = time(NULL); -@@ -2259,12 +2249,15 @@ refresh_after_event(gboolean data_updated) - refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL); - } - -- if ((now - last_refresh) > (options.reconnect_msec / 1000)) { -- mainloop_set_trigger(refresh_trigger); -+ if (reconnect_timer > 0) { -+ /* we will receive a refresh request after successful reconnect */ - mainloop_timer_stop(refresh_timer); -- updates = 0; -+ return; -+ } - -- } else if(updates >= 10) { -+ if (enforce || -+ now - last_refresh > options.reconnect_msec / 1000 || -+ updates >= 10) { - mainloop_set_trigger(refresh_trigger); - mainloop_timer_stop(refresh_timer); - updates = 0; -@@ -2285,7 +2278,7 @@ mon_st_callback_display(stonith_t * st, stonith_event_t * e) - mon_cib_connection_destroy(NULL); - } else { - print_dot(output_format); -- refresh_after_event(TRUE); -+ refresh_after_event(TRUE, FALSE); - } - } - --- -1.8.3.1 - - -From a63af2713f96719fc1d5ef594eb033d0f251187f Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Thu, 28 Jan 2021 16:52:57 +0100 -Subject: [PATCH 5/7] Fix: crm_mon: retry fencer connection as not fatal - initially - -and cleanup fencer api to not leak memory on multiple reconnects ---- - tools/crm_mon.c | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 195e7b5..a768ca9 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -798,7 +798,7 @@ fencing_connect(void) - st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display); - } - } else { -- st = NULL; -+ clean_up_fencing_connection(); - } - - return rc; -@@ -2255,6 +2255,12 @@ refresh_after_event(gboolean data_updated, gboolean enforce) - return; - } - -+ /* as we're not handling initial failure of fencer-connection as -+ * fatal give it a retry here -+ * not getting here if cib-reconnection is already on the way -+ */ -+ fencing_connect(); -+ - if (enforce || - now - last_refresh > options.reconnect_msec / 1000 || - updates >= 10) { --- -1.8.3.1 - - -From b6f4b5dfc0b5fec8cdc029409fc61252de019415 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Thu, 28 Jan 2021 18:08:43 +0100 -Subject: [PATCH 6/7] Refactor: crm_mon: have reconnect-timer removed - implicitly - ---- - tools/crm_mon.c | 12 ++++-------- - 1 file changed, 4 insertions(+), 8 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index a768ca9..4f73379 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -684,23 +684,19 @@ reconnect_after_timeout(gpointer data) - } - #endif - -- if (reconnect_timer > 0) { -- g_source_remove(reconnect_timer); -- reconnect_timer = 0; -- } -- - print_as(output_format, "Reconnecting...\n"); - if (pacemakerd_status() == pcmk_rc_ok) { - fencing_connect(); - if (cib_connect(TRUE) == pcmk_rc_ok) { -- /* Redraw the screen and reinstall ourselves to get called after another reconnect_msec. */ -+ /* trigger redrawing the screen (needs reconnect_timer == 0) */ -+ reconnect_timer = 0; - refresh_after_event(FALSE, TRUE); -- return FALSE; -+ return G_SOURCE_REMOVE; - } - } - - reconnect_timer = g_timeout_add(options.reconnect_msec, reconnect_after_timeout, NULL); -- return FALSE; -+ return G_SOURCE_REMOVE; - } - - /* Called from various places when we are disconnected from the CIB or from the --- -1.8.3.1 - - -From 586e69ec38d5273b348c42a61b9bc7bbcc2b93b3 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Thu, 28 Jan 2021 21:08:16 +0100 -Subject: [PATCH 7/7] Fix: crm_mon: suppress pacemakerd-status for non-text - output - ---- - tools/crm_mon.c | 53 ++++++++++++++++++++++++++++++++--------------------- - 1 file changed, 32 insertions(+), 21 deletions(-) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 4f73379..d4d4ac3 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -995,27 +995,38 @@ pacemakerd_status(void) - if (rc == pcmk_rc_ok) { - pcmk_dispatch_ipc(pacemakerd_api); - rc = ENOTCONN; -- switch (state) { -- case pcmk_pacemakerd_state_running: -- rc = pcmk_rc_ok; -- break; -- case pcmk_pacemakerd_state_starting_daemons: -- print_as(output_format ,"Pacemaker daemons starting ...\n"); -- break; -- case pcmk_pacemakerd_state_wait_for_ping: -- print_as(output_format ,"Waiting for startup-trigger from SBD ...\n"); -- break; -- case pcmk_pacemakerd_state_shutting_down: -- print_as(output_format ,"Pacemaker daemons shutting down ...\n"); -- break; -- case pcmk_pacemakerd_state_shutdown_complete: -- /* assuming pacemakerd doesn't dispatch any pings after entering -- * that state unless it is waiting for SBD -- */ -- print_as(output_format ,"Pacemaker daemons shut down - reporting to SBD ...\n"); -- break; -- default: -- break; -+ if ((output_format == mon_output_console) || -+ (output_format == mon_output_plain)) { -+ switch (state) { -+ case pcmk_pacemakerd_state_running: -+ rc = pcmk_rc_ok; -+ break; -+ case pcmk_pacemakerd_state_starting_daemons: -+ print_as(output_format ,"Pacemaker daemons starting ...\n"); -+ break; -+ case pcmk_pacemakerd_state_wait_for_ping: -+ print_as(output_format ,"Waiting for startup-trigger from SBD ...\n"); -+ break; -+ case pcmk_pacemakerd_state_shutting_down: -+ print_as(output_format ,"Pacemaker daemons shutting down ...\n"); -+ break; -+ case pcmk_pacemakerd_state_shutdown_complete: -+ /* assuming pacemakerd doesn't dispatch any pings after entering -+ * that state unless it is waiting for SBD -+ */ -+ print_as(output_format ,"Pacemaker daemons shut down - reporting to SBD ...\n"); -+ break; -+ default: -+ break; -+ } -+ } else { -+ switch (state) { -+ case pcmk_pacemakerd_state_running: -+ rc = pcmk_rc_ok; -+ break; -+ default: -+ break; -+ } - } - } - } --- -1.8.3.1 - diff --git a/SOURCES/035-crm_mon.patch b/SOURCES/035-crm_mon.patch deleted file mode 100644 index 9e6c828..0000000 --- a/SOURCES/035-crm_mon.patch +++ /dev/null @@ -1,267 +0,0 @@ -From f4e3d77c94906a062641c7bf34243049de521a87 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Wed, 3 Feb 2021 13:25:22 +0100 -Subject: [PATCH] Fix: crm_mon: detect when run on remote-node - ---- - daemons/execd/remoted_proxy.c | 17 +++++++ - daemons/pacemakerd/pacemakerd.c | 6 +-- - include/crm/common/ipc_internal.h | 2 + - lib/common/ipc_server.c | 26 ++++++++++ - tools/crm_mon.c | 99 ++++++++++++++++++++++++--------------- - 5 files changed, 106 insertions(+), 44 deletions(-) - -diff --git a/daemons/execd/remoted_proxy.c b/daemons/execd/remoted_proxy.c -index 9329fa6..0fe39bf 100644 ---- a/daemons/execd/remoted_proxy.c -+++ b/daemons/execd/remoted_proxy.c -@@ -29,6 +29,7 @@ static qb_ipcs_service_t *cib_shm = NULL; - static qb_ipcs_service_t *attrd_ipcs = NULL; - static qb_ipcs_service_t *crmd_ipcs = NULL; - static qb_ipcs_service_t *stonith_ipcs = NULL; -+static qb_ipcs_service_t *pacemakerd_ipcs = NULL; - - // An IPC provider is a cluster node controller connecting as a client - static GList *ipc_providers = NULL; -@@ -126,6 +127,12 @@ stonith_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) - } - - static int32_t -+pacemakerd_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) -+{ -+ return -EREMOTEIO; -+} -+ -+static int32_t - cib_proxy_accept_rw(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) - { - return ipc_proxy_accept(c, uid, gid, PCMK__SERVER_BASED_RW); -@@ -356,6 +363,14 @@ static struct qb_ipcs_service_handlers stonith_proxy_callbacks = { - .connection_destroyed = ipc_proxy_destroy - }; - -+static struct qb_ipcs_service_handlers pacemakerd_proxy_callbacks = { -+ .connection_accept = pacemakerd_proxy_accept, -+ .connection_created = NULL, -+ .msg_process = NULL, -+ .connection_closed = NULL, -+ .connection_destroyed = NULL -+}; -+ - static struct qb_ipcs_service_handlers cib_proxy_callbacks_ro = { - .connection_accept = cib_proxy_accept_ro, - .connection_created = NULL, -@@ -422,6 +437,7 @@ ipc_proxy_init(void) - &cib_proxy_callbacks_rw); - pcmk__serve_attrd_ipc(&attrd_ipcs, &attrd_proxy_callbacks); - pcmk__serve_fenced_ipc(&stonith_ipcs, &stonith_proxy_callbacks); -+ pcmk__serve_pacemakerd_ipc(&pacemakerd_ipcs, &pacemakerd_proxy_callbacks); - crmd_ipcs = pcmk__serve_controld_ipc(&crmd_proxy_callbacks); - if (crmd_ipcs == NULL) { - crm_err("Failed to create controller: exiting and inhibiting respawn"); -@@ -444,6 +460,7 @@ ipc_proxy_cleanup(void) - pcmk__stop_based_ipc(cib_ro, cib_rw, cib_shm); - qb_ipcs_destroy(attrd_ipcs); - qb_ipcs_destroy(stonith_ipcs); -+ qb_ipcs_destroy(pacemakerd_ipcs); - qb_ipcs_destroy(crmd_ipcs); - cib_ro = NULL; - cib_rw = NULL; -diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c -index 509b0f8..4572b70 100644 ---- a/daemons/pacemakerd/pacemakerd.c -+++ b/daemons/pacemakerd/pacemakerd.c -@@ -1287,11 +1287,7 @@ main(int argc, char **argv) - - // Don't build CRM_RSCTMP_DIR, pacemaker-execd will do it - -- ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, &mcp_ipc_callbacks); -- if (ipcs == NULL) { -- crm_err("Couldn't start IPC server"); -- crm_exit(CRM_EX_OSERR); -- } -+ pcmk__serve_pacemakerd_ipc(&ipcs, &mcp_ipc_callbacks); - - #ifdef SUPPORT_COROSYNC - /* Allows us to block shutdown */ -diff --git a/include/crm/common/ipc_internal.h b/include/crm/common/ipc_internal.h -index cf935f3..fb82ce1 100644 ---- a/include/crm/common/ipc_internal.h -+++ b/include/crm/common/ipc_internal.h -@@ -221,6 +221,8 @@ void pcmk__serve_attrd_ipc(qb_ipcs_service_t **ipcs, - struct qb_ipcs_service_handlers *cb); - void pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs, - struct qb_ipcs_service_handlers *cb); -+void pcmk__serve_pacemakerd_ipc(qb_ipcs_service_t **ipcs, -+ struct qb_ipcs_service_handlers *cb); - qb_ipcs_service_t *pcmk__serve_controld_ipc(struct qb_ipcs_service_handlers *cb); - - void pcmk__serve_based_ipc(qb_ipcs_service_t **ipcs_ro, -diff --git a/lib/common/ipc_server.c b/lib/common/ipc_server.c -index 4d3e954..b3aaf8e 100644 ---- a/lib/common/ipc_server.c -+++ b/lib/common/ipc_server.c -@@ -922,6 +922,32 @@ pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs, - } - - /*! -+ * \internal -+ * \brief Add an IPC server to the main loop for the pacemakerd API -+ * -+ * \param[in] cb IPC callbacks -+ * -+ * \note This function exits with CRM_EX_OSERR if unable to create the servers. -+ */ -+void -+pcmk__serve_pacemakerd_ipc(qb_ipcs_service_t **ipcs, -+ struct qb_ipcs_service_handlers *cb) -+{ -+ *ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, cb); -+ -+ if (*ipcs == NULL) { -+ crm_err("Couldn't start pacemakerd IPC server"); -+ crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); -+ /* sub-daemons are observed by pacemakerd. Thus we exit CRM_EX_FATAL -+ * if we want to prevent pacemakerd from restarting them. -+ * With pacemakerd we leave the exit-code shown to e.g. systemd -+ * to what it was prior to moving the code here from pacemakerd.c -+ */ -+ crm_exit(CRM_EX_OSERR); -+ } -+} -+ -+/*! - * \brief Check whether string represents a client name used by cluster daemons - * - * \param[in] name String to check -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index d4d4ac3..e58fed2 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -83,6 +83,8 @@ static gchar **processed_args = NULL; - static time_t last_refresh = 0; - crm_trigger_t *refresh_trigger = NULL; - -+static gboolean on_remote_node = FALSE; -+ - int interactive_fence_level = 0; - - static pcmk__supported_format_t formats[] = { -@@ -988,48 +990,63 @@ pacemakerd_status(void) - } - pcmk_register_ipc_callback(pacemakerd_api, pacemakerd_event_cb, (void *) &state); - rc = pcmk_connect_ipc(pacemakerd_api, pcmk_ipc_dispatch_poll); -- if (rc == pcmk_rc_ok) { -- rc = pcmk_pacemakerd_api_ping(pacemakerd_api, crm_system_name); -- if (rc == pcmk_rc_ok) { -- rc = pcmk_poll_ipc(pacemakerd_api, options.reconnect_msec/2); -+ switch (rc) { -+ case pcmk_rc_ok: -+ rc = pcmk_pacemakerd_api_ping(pacemakerd_api, crm_system_name); - if (rc == pcmk_rc_ok) { -- pcmk_dispatch_ipc(pacemakerd_api); -- rc = ENOTCONN; -- if ((output_format == mon_output_console) || -- (output_format == mon_output_plain)) { -- switch (state) { -- case pcmk_pacemakerd_state_running: -- rc = pcmk_rc_ok; -- break; -- case pcmk_pacemakerd_state_starting_daemons: -- print_as(output_format ,"Pacemaker daemons starting ...\n"); -- break; -- case pcmk_pacemakerd_state_wait_for_ping: -- print_as(output_format ,"Waiting for startup-trigger from SBD ...\n"); -- break; -- case pcmk_pacemakerd_state_shutting_down: -- print_as(output_format ,"Pacemaker daemons shutting down ...\n"); -- break; -- case pcmk_pacemakerd_state_shutdown_complete: -- /* assuming pacemakerd doesn't dispatch any pings after entering -- * that state unless it is waiting for SBD -- */ -- print_as(output_format ,"Pacemaker daemons shut down - reporting to SBD ...\n"); -- break; -- default: -- break; -- } -- } else { -- switch (state) { -- case pcmk_pacemakerd_state_running: -- rc = pcmk_rc_ok; -- break; -- default: -- break; -+ rc = pcmk_poll_ipc(pacemakerd_api, options.reconnect_msec/2); -+ if (rc == pcmk_rc_ok) { -+ pcmk_dispatch_ipc(pacemakerd_api); -+ rc = ENOTCONN; -+ if ((output_format == mon_output_console) || -+ (output_format == mon_output_plain)) { -+ switch (state) { -+ case pcmk_pacemakerd_state_running: -+ rc = pcmk_rc_ok; -+ break; -+ case pcmk_pacemakerd_state_starting_daemons: -+ print_as(output_format ,"Pacemaker daemons starting ...\n"); -+ break; -+ case pcmk_pacemakerd_state_wait_for_ping: -+ print_as(output_format ,"Waiting for startup-trigger from SBD ...\n"); -+ break; -+ case pcmk_pacemakerd_state_shutting_down: -+ print_as(output_format ,"Pacemaker daemons shutting down ...\n"); -+ break; -+ case pcmk_pacemakerd_state_shutdown_complete: -+ /* assuming pacemakerd doesn't dispatch any pings after entering -+ * that state unless it is waiting for SBD -+ */ -+ print_as(output_format ,"Pacemaker daemons shut down - reporting to SBD ...\n"); -+ break; -+ default: -+ break; -+ } -+ } else { -+ switch (state) { -+ case pcmk_pacemakerd_state_running: -+ rc = pcmk_rc_ok; -+ break; -+ default: -+ break; -+ } - } - } - } -- } -+ break; -+ case EREMOTEIO: -+ rc = pcmk_rc_ok; -+ on_remote_node = TRUE; -+#if CURSES_ENABLED -+ /* just show this if refresh is gonna remove all traces */ -+ if (output_format == mon_output_console) { -+ print_as(output_format , -+ "Running on remote-node waiting to be connected by cluster ...\n"); -+ } -+#endif -+ break; -+ default: -+ break; - } - pcmk_free_ipc_api(pacemakerd_api); - /* returning with ENOTCONN triggers a retry */ -@@ -1348,7 +1365,11 @@ handle_connection_failures(int rc) - pcmk_rc_str(rc)); - rc = MON_STATUS_CRIT; - } else if (rc == ENOTCONN) { -- g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: cluster is not available on this node"); -+ if (on_remote_node) { -+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: remote-node not connected to cluster"); -+ } else { -+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: cluster is not available on this node"); -+ } - rc = pcmk_rc2exitc(rc); - } else { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Connection to cluster failed: %s", pcmk_rc_str(rc)); --- -1.8.3.1 - diff --git a/SOURCES/036-crm_resource.patch b/SOURCES/036-crm_resource.patch deleted file mode 100644 index cfe7b50..0000000 --- a/SOURCES/036-crm_resource.patch +++ /dev/null @@ -1,1676 +0,0 @@ -From 82ae2f1b652c361dadacf25dece42a43340776ee Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 11 Feb 2021 09:57:21 -0500 -Subject: [PATCH 1/3] Low: tools: Rename the result of cli_resource_search. - -The result of cli_resource_search is a list of nodes, not a list of -resources. Change the variable name appropriately. ---- - tools/crm_resource.c | 4 ++-- - tools/crm_resource_runtime.c | 4 ++-- - 2 files changed, 4 insertions(+), 4 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 564600e..78b2246 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1873,8 +1873,8 @@ main(int argc, char **argv) - break; - - case cmd_locate: { -- GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set); -- rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id); -+ GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set); -+ rc = out->message(out, "resource-search-list", nodes, rsc, options.rsc_id); - break; - } - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index b6e4df1..adfdfba 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1780,8 +1780,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, - action = rsc_action+6; - - if(pe_rsc_is_clone(rsc)) { -- GListPtr rscs = cli_resource_search(out, rsc, requested_name, data_set); -- if(rscs != NULL && force == FALSE) { -+ GListPtr nodes = cli_resource_search(out, rsc, requested_name, data_set); -+ if(nodes != NULL && force == FALSE) { - out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", - action, rsc->id); - out->err(out, "Try setting target-role=Stopped first or specifying " --- -1.8.3.1 - - -From e8b320aaaabdd60b7ac851e5b70a2a1b3c2180a3 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 11 Feb 2021 11:07:07 -0500 -Subject: [PATCH 2/3] Test: cts: Add a test for a promotable clone resource. - -Note that for the moment, the crm_resource output in -regression.tools.exp is incorrect. There's a bug in that tool, but I -wanted to get a test case working before fixing it. ---- - cts/cli/crm_mon.xml | 32 +++- - cts/cli/regression.crm_mon.exp | 401 +++++++++++++++++++++++++++++------------ - cts/cli/regression.tools.exp | 18 ++ - cts/cts-cli.in | 20 ++ - 4 files changed, 357 insertions(+), 114 deletions(-) - -diff --git a/cts/cli/crm_mon.xml b/cts/cli/crm_mon.xml -index d8d5d35..f0f14fd 100644 ---- a/cts/cli/crm_mon.xml -+++ b/cts/cli/crm_mon.xml -@@ -1,4 +1,4 @@ -- -+ - - - -@@ -99,9 +99,25 @@ - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -+ -+ -+ -+ -+ - - - -@@ -153,6 +169,13 @@ - - - -+ -+ -+ -+ -+ -+ -+ - - - -@@ -170,7 +193,7 @@ - - - -- -+ - - - -@@ -185,6 +208,11 @@ - - - -+ -+ -+ -+ -+ - - - -diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp -index dd20116..c223b7f 100644 ---- a/cts/cli/regression.crm_mon.exp -+++ b/cts/cli/regression.crm_mon.exp -@@ -5,7 +5,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -20,6 +20,9 @@ Active Resources: - * Email (lsb:exim): Started cluster02 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster01 cluster02 ] -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * Masters: [ cluster02 ] -+ * Slaves: [ cluster01 ] - =#=#=#= End test: Basic text output - OK (0) =#=#=#= - * Passed: crm_mon - Basic text output - =#=#=#= Begin test: XML output =#=#=#= -@@ -30,12 +33,12 @@ Active Resources: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -112,6 +115,17 @@ Active Resources: - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -142,6 +156,12 @@ Active Resources: - - - -+ -+ -+ -+ -+ -+ - - - -@@ -150,7 +170,7 @@ Active Resources: - - - -- -+ - - - -@@ -159,6 +179,10 @@ Active Resources: - - - -+ -+ -+ -+ - - - -@@ -175,7 +199,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Active Resources: - * Clone Set: ping-clone [ping]: -@@ -187,6 +211,9 @@ Active Resources: - * Email (lsb:exim): Started cluster02 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster01 cluster02 ] -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * Masters: [ cluster02 ] -+ * Slaves: [ cluster01 ] - =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#= - * Passed: crm_mon - Basic text output without node section - =#=#=#= Begin test: XML output without the node section =#=#=#= -@@ -197,7 +224,7 @@ Active Resources: - - - -- -+ - - - -@@ -272,6 +299,17 @@ Active Resources: - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -302,6 +340,12 @@ Active Resources: - - - -+ -+ -+ -+ -+ -+ - - - -@@ -310,7 +354,7 @@ Active Resources: - - - -- -+ - - - -@@ -319,6 +363,10 @@ Active Resources: - - - -+ -+ -+ -+ - - - -@@ -340,7 +388,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -355,6 +403,9 @@ Active Resources: - * Email (lsb:exim): Started cluster02 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster01 cluster02 ] -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * Masters: [ cluster02 ] -+ * Slaves: [ cluster01 ] - - Node Attributes: - * Node: cluster01: -@@ -378,18 +429,26 @@ Operations: - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (4) monitor: interval="10000ms" -+ * (5) cancel: interval="10000ms" -+ * (6) promote -+ * (7) monitor: interval="5000ms" - * Node: cluster01: - * ping: migration-threshold=1000000: - * (17) start - * (18) monitor: interval="10000ms" - * Fencing: migration-threshold=1000000: - * (15) start -- * (19) monitor: interval="60000ms" -+ * (20) monitor: interval="60000ms" - * dummy: migration-threshold=1000000: - * (16) stop - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (2) start -+ * (4) monitor: interval="10000ms" - - Negative Location Constraints: - * not-on-cluster1 prevents dummy from running on cluster01 -@@ -402,7 +461,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 (1) cluster02 (2) ] -@@ -427,6 +486,12 @@ Active Resources: - * mysql-proxy (lsb:mysql-proxy): Stopped - * Resource Group: mysql-group:4: - * mysql-proxy (lsb:mysql-proxy): Stopped -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * promotable-rsc (ocf::pacemaker:Stateful): Master cluster02 -+ * promotable-rsc (ocf::pacemaker:Stateful): Slave cluster01 -+ * promotable-rsc (ocf::pacemaker:Stateful): Stopped -+ * promotable-rsc (ocf::pacemaker:Stateful): Stopped -+ * promotable-rsc (ocf::pacemaker:Stateful): Stopped - - Node Attributes: - * Node: cluster01 (1): -@@ -450,18 +515,26 @@ Operations: - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (4) monitor: interval="10000ms" -+ * (5) cancel: interval="10000ms" -+ * (6) promote -+ * (7) monitor: interval="5000ms" - * Node: cluster01 (1): - * ping: migration-threshold=1000000: - * (17) start - * (18) monitor: interval="10000ms" - * Fencing: migration-threshold=1000000: - * (15) start -- * (19) monitor: interval="60000ms" -+ * (20) monitor: interval="60000ms" - * dummy: migration-threshold=1000000: - * (16) stop - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (2) start -+ * (4) monitor: interval="10000ms" - - Negative Location Constraints: - * not-on-cluster1 prevents dummy from running on cluster01 (1) -@@ -474,7 +547,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -489,6 +562,9 @@ Active Resources: - * 1/1 (ocf::heartbeat:IPaddr): Active cluster02 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster01 cluster02 ] -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * Masters: [ cluster02 ] -+ * Slaves: [ cluster01 ] - - Node Attributes: - * Node: cluster01: -@@ -512,18 +588,26 @@ Operations: - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (4) monitor: interval="10000ms" -+ * (5) cancel: interval="10000ms" -+ * (6) promote -+ * (7) monitor: interval="5000ms" - * Node: cluster01: - * ping: migration-threshold=1000000: - * (17) start - * (18) monitor: interval="10000ms" - * Fencing: migration-threshold=1000000: - * (15) start -- * (19) monitor: interval="60000ms" -+ * (20) monitor: interval="60000ms" - * dummy: migration-threshold=1000000: - * (16) stop - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (2) start -+ * (4) monitor: interval="10000ms" - - Negative Location Constraints: - * not-on-cluster1 prevents dummy from running on cluster01 -@@ -536,7 +620,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Node cluster01: online: -@@ -544,6 +628,7 @@ Node List: - * ping (ocf::pacemaker:ping): Started - * Fencing (stonith:fence_xvm): Started - * mysql-proxy (lsb:mysql-proxy): Started -+ * promotable-rsc (ocf::pacemaker:Stateful): Slave - * Node cluster02: online: - * Resources: - * ping (ocf::pacemaker:ping): Started -@@ -551,6 +636,7 @@ Node List: - * Public-IP (ocf::heartbeat:IPaddr): Started - * Email (lsb:exim): Started - * mysql-proxy (lsb:mysql-proxy): Started -+ * promotable-rsc (ocf::pacemaker:Stateful): Master - * GuestNode httpd-bundle-0@: OFFLINE: - * Resources: - * GuestNode httpd-bundle-1@: OFFLINE: -@@ -580,18 +666,26 @@ Operations: - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (4) monitor: interval="10000ms" -+ * (5) cancel: interval="10000ms" -+ * (6) promote -+ * (7) monitor: interval="5000ms" - * Node: cluster01: - * ping: migration-threshold=1000000: - * (17) start - * (18) monitor: interval="10000ms" - * Fencing: migration-threshold=1000000: - * (15) start -- * (19) monitor: interval="60000ms" -+ * (20) monitor: interval="60000ms" - * dummy: migration-threshold=1000000: - * (16) stop - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (2) start -+ * (4) monitor: interval="10000ms" - - Negative Location Constraints: - * not-on-cluster1 prevents dummy from running on cluster01 -@@ -604,12 +698,13 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Node cluster01: online: - * Resources: - * 1 (lsb:mysql-proxy): Active -+ * 1 (ocf::pacemaker:Stateful): Active - * 1 (ocf::pacemaker:ping): Active - * 1 (stonith:fence_xvm): Active - * Node cluster02: online: -@@ -618,6 +713,7 @@ Node List: - * 1 (lsb:mysql-proxy): Active - * 1 (ocf::heartbeat:IPaddr): Active - * 1 (ocf::pacemaker:Dummy): Active -+ * 1 (ocf::pacemaker:Stateful): Active - * 1 (ocf::pacemaker:ping): Active - - Node Attributes: -@@ -642,18 +738,26 @@ Operations: - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (4) monitor: interval="10000ms" -+ * (5) cancel: interval="10000ms" -+ * (6) promote -+ * (7) monitor: interval="5000ms" - * Node: cluster01: - * ping: migration-threshold=1000000: - * (17) start - * (18) monitor: interval="10000ms" - * Fencing: migration-threshold=1000000: - * (15) start -- * (19) monitor: interval="60000ms" -+ * (20) monitor: interval="60000ms" - * dummy: migration-threshold=1000000: - * (16) stop - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (2) start -+ * (4) monitor: interval="10000ms" - - Negative Location Constraints: - * not-on-cluster1 prevents dummy from running on cluster01 -@@ -667,11 +771,11 @@ Negative Location Constraints: - - - -- -+ - - - -- -+ - - - -@@ -681,8 +785,11 @@ Negative Location Constraints: - - - -+ -+ -+ - -- -+ - - - -@@ -698,6 +805,9 @@ Negative Location Constraints: - - - -+ -+ -+ - - - -@@ -753,6 +863,17 @@ Negative Location Constraints: - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -783,6 +904,12 @@ Negative Location Constraints: - - - -+ -+ -+ -+ -+ -+ - - - -@@ -791,7 +918,7 @@ Negative Location Constraints: - - - -- -+ - - - -@@ -800,6 +927,10 @@ Negative Location Constraints: - - - -+ -+ -+ -+ - - - -@@ -816,7 +947,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 ] -@@ -827,6 +958,8 @@ Active Resources: - * Fencing (stonith:fence_xvm): Started cluster01 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster01 ] -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * Slaves: [ cluster01 ] - - Node Attributes: - * Node: cluster01: -@@ -840,12 +973,15 @@ Operations: - * (18) monitor: interval="10000ms" - * Fencing: migration-threshold=1000000: - * (15) start -- * (19) monitor: interval="60000ms" -+ * (20) monitor: interval="60000ms" - * dummy: migration-threshold=1000000: - * (16) stop - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (2) start -+ * (4) monitor: interval="10000ms" - - Negative Location Constraints: - * not-on-cluster1 prevents dummy from running on cluster01 -@@ -859,11 +995,11 @@ Negative Location Constraints: - - - -- -+ - - - -- -+ - - - -@@ -918,6 +1054,14 @@ Negative Location Constraints: - - - -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -933,7 +1077,7 @@ Negative Location Constraints: - - - -- -+ - - - -@@ -942,6 +1086,10 @@ Negative Location Constraints: - - - -+ -+ -+ -+ - - - -@@ -958,7 +1106,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster02 ] -@@ -972,6 +1120,8 @@ Active Resources: - * Email (lsb:exim): Started cluster02 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster02 ] -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * Masters: [ cluster02 ] - - Node Attributes: - * Node: cluster02: -@@ -992,6 +1142,11 @@ Operations: - * mysql-proxy: migration-threshold=1000000: - * (2) start - * (3) monitor: interval="10000ms" -+ * promotable-rsc: migration-threshold=1000000: -+ * (4) monitor: interval="10000ms" -+ * (5) cancel: interval="10000ms" -+ * (6) promote -+ * (7) monitor: interval="5000ms" - - Negative Location Constraints: - * not-on-cluster1 prevents dummy from running on cluster01 -@@ -1005,11 +1160,11 @@ Negative Location Constraints: - - - -- -+ - - - -- -+ - - - -@@ -1072,6 +1227,14 @@ Negative Location Constraints: - - - -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -1098,6 +1261,12 @@ Negative Location Constraints: - - - -+ -+ -+ -+ -+ -+ - - - -@@ -1114,7 +1283,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1133,7 +1302,7 @@ Operations: - * Node: cluster01: - * Fencing: migration-threshold=1000000: - * (15) start -- * (19) monitor: interval="60000ms" -+ * (20) monitor: interval="60000ms" - =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#= - * Passed: crm_mon - Complete text output filtered by resource tag - =#=#=#= Begin test: XML output filtered by resource tag =#=#=#= -@@ -1144,12 +1313,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1172,7 +1341,7 @@ Operations: - - - -- -+ - - - -@@ -1187,7 +1356,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Active Resources: - * No active resources -@@ -1201,7 +1370,7 @@ Active Resources: - - - -- -+ - - - -@@ -1249,7 +1418,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1273,6 +1442,9 @@ Full List of Resources: - * Email (lsb:exim): Started cluster02 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster01 cluster02 ] -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * Masters: [ cluster02 ] -+ * Slaves: [ cluster01 ] - =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#= - * Passed: crm_mon - Basic text output with inactive resources - =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#= -@@ -1282,7 +1454,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster02 ] -@@ -1305,6 +1477,8 @@ Full List of Resources: - * Email (lsb:exim): Started cluster02 - * Clone Set: mysql-clone-group [mysql-group]: - * Started: [ cluster02 ] -+ * Clone Set: promotable-clone [promotable-rsc] (promotable): -+ * Masters: [ cluster02 ] - =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#= - * Passed: crm_mon - Basic text output with inactive resources, filtered by node - =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#= -@@ -1314,7 +1488,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1333,7 +1507,7 @@ Operations: - * Node: cluster01: - * Fencing: migration-threshold=1000000: - * (15) start -- * (19) monitor: interval="60000ms" -+ * (20) monitor: interval="60000ms" - =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#= - * Passed: crm_mon - Complete text output filtered by primitive resource - =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#= -@@ -1344,12 +1518,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1372,7 +1546,7 @@ Operations: - - - -- -+ - - - -@@ -1387,7 +1561,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1420,12 +1594,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1470,7 +1644,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1500,12 +1674,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1544,7 +1718,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1579,12 +1753,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1633,7 +1807,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1668,12 +1842,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1722,7 +1896,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 (1) cluster02 (2) ] -@@ -1757,12 +1931,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1808,7 +1982,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1825,12 +1999,12 @@ Active Resources: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1857,7 +2031,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1877,7 +2051,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1897,12 +2071,12 @@ Full List of Resources: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -1950,7 +2124,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -1969,12 +2143,12 @@ Full List of Resources: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -2007,7 +2181,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -2026,12 +2200,12 @@ Full List of Resources: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -2064,7 +2238,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -2083,12 +2257,12 @@ Full List of Resources: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -2121,7 +2295,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 cluster02 ] -@@ -2144,12 +2318,12 @@ Full List of Resources: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -2188,7 +2362,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 (1) cluster02 (2) ] -@@ -2232,12 +2406,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -2279,7 +2453,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 (1) cluster02 (2) ] -@@ -2323,12 +2497,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -2390,7 +2564,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 (1) cluster02 (2) ] -@@ -2426,12 +2600,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -2479,7 +2653,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 (1) cluster02 (2) ] -@@ -2523,12 +2697,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -2590,7 +2764,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - Node List: - * Online: [ cluster01 (1) cluster02 (2) ] -@@ -2626,12 +2800,12 @@ Operations: - - - -- -+ - - - -- -- -+ -+ - - - -@@ -3083,7 +3257,7 @@ Cluster Summary: - * Last updated: - * Last change: - * 5 nodes configured -- * 27 resource instances configured (4 DISABLED) -+ * 32 resource instances configured (4 DISABLED) - - *** Resource management is DISABLED *** - The cluster will not attempt to start, stop or recover services -@@ -3114,5 +3288,8 @@ Full List of Resources: - * mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged) - * Resource Group: mysql-group:1 (unmanaged): - * mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged) -+ * Clone Set: promotable-clone [promotable-rsc] (promotable) (unmanaged): -+ * promotable-rsc (ocf::pacemaker:Stateful): Master cluster02 (unmanaged) -+ * promotable-rsc (ocf::pacemaker:Stateful): Slave cluster01 (unmanaged) - =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#= - * Passed: crm_mon - Text output of all resources with maintenance-mode enabled -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 1afe596..708c340 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -4077,3 +4077,21 @@ Resources colocated with clone: - 5 - =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#= - * Passed: crmadmin - List guest,remote nodes -+=#=#=#= Begin test: List a promotable clone resource =#=#=#= -+resource promotable-clone is running on: cluster02 -+resource promotable-clone is running on: cluster01 -+=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#= -+* Passed: crm_resource - List a promotable clone resource -+=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#= -+resource promotable-rsc is running on: cluster02 Master -+resource promotable-rsc is running on: cluster01 Master -+=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#= -+* Passed: crm_resource - List the primitive of a promotable clone resource -+=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#= -+resource promotable-rsc:0 is running on: cluster02 Master -+=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#= -+* Passed: crm_resource - List a single instance of a promotable clone resource -+=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#= -+resource promotable-rsc:1 is running on: cluster01 -+=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#= -+* Passed: crm_resource - List another instance of a promotable clone resource -diff --git a/cts/cts-cli.in b/cts/cts-cli.in -index 8e2dbe5..6f7eb80 100755 ---- a/cts/cts-cli.in -+++ b/cts/cts-cli.in -@@ -831,6 +831,26 @@ function test_tools() { - test_assert $CRM_EX_OK 0 - - unset CIB_file -+ -+ export CIB_file="$test_home/cli/crm_mon.xml" -+ -+ desc="List a promotable clone resource" -+ cmd="crm_resource --locate -r promotable-clone" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="List the primitive of a promotable clone resource" -+ cmd="crm_resource --locate -r promotable-rsc" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="List a single instance of a promotable clone resource" -+ cmd="crm_resource --locate -r promotable-rsc:0" -+ test_assert $CRM_EX_OK 0 -+ -+ desc="List another instance of a promotable clone resource" -+ cmd="crm_resource --locate -r promotable-rsc:1" -+ test_assert $CRM_EX_OK 0 -+ -+ unset CIB_file - } - - INVALID_PERIODS=( --- -1.8.3.1 - - -From d1bb0758726c09fd78efbc30c7eb46559e9c10e2 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Thu, 11 Feb 2021 15:09:54 -0500 -Subject: [PATCH 3/3] Fix: Correct output of "crm_resource --locate" in case of - clones. - -For non-clone resources, the rsc parameter passed to -resource_search_list_* is accurate - it is the resource object for the -name given on the command line. For clone resources, this parameter is -incorrect. It will be a single instance of the clone resource, no -matter which instance might have been asked for on the command line. - -This typically doesn't matter, but results in incorrect output when -promotable clones are searched for. For promotable clones, the "Master" -text may not appear for any of the instances. This is because the -single instance passed in as the rsc parameter might not be the master, -but each iteration through the loop will use that same parameter. - -The fix is to change cli_resource_search to return a list of -node/promoted pairs so we we already have all the information we need. -Printing is then a simple matter of just walking that list. - -The referenced bug has a much better explanation of the cause of the -problem. - -See: rhbz#1925681 ---- - cts/cli/regression.tools.exp | 4 ++-- - tools/crm_resource.c | 3 ++- - tools/crm_resource.h | 7 +++++- - tools/crm_resource_print.c | 23 +++++++------------- - tools/crm_resource_runtime.c | 51 +++++++++++++++++++++++++++++++------------- - 5 files changed, 54 insertions(+), 34 deletions(-) - -diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp -index 708c340..b3f16fa 100644 ---- a/cts/cli/regression.tools.exp -+++ b/cts/cli/regression.tools.exp -@@ -4078,13 +4078,13 @@ Resources colocated with clone: - =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#= - * Passed: crmadmin - List guest,remote nodes - =#=#=#= Begin test: List a promotable clone resource =#=#=#= --resource promotable-clone is running on: cluster02 - resource promotable-clone is running on: cluster01 -+resource promotable-clone is running on: cluster02 Master - =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#= - * Passed: crm_resource - List a promotable clone resource - =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#= -+resource promotable-rsc is running on: cluster01 - resource promotable-rsc is running on: cluster02 Master --resource promotable-rsc is running on: cluster01 Master - =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#= - * Passed: crm_resource - List the primitive of a promotable clone resource - =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#= -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 78b2246..7d2f0f6 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1874,7 +1874,8 @@ main(int argc, char **argv) - - case cmd_locate: { - GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set); -- rc = out->message(out, "resource-search-list", nodes, rsc, options.rsc_id); -+ rc = out->message(out, "resource-search-list", nodes, options.rsc_id); -+ g_list_free_full(nodes, free); - break; - } - -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 5bfadb7..777490a 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -23,6 +23,11 @@ - #include - #include - -+typedef struct node_info_s { -+ const char *node_name; -+ bool promoted; -+} node_info_t; -+ - enum resource_check_flags { - rsc_remain_stopped = (1 << 0), - rsc_unpromotable = (1 << 1), -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 398fef0..053f806 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -276,12 +276,11 @@ resource_check_list_xml(pcmk__output_t *out, va_list args) { - return rc; - } - --PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *") -+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "gchar *") - static int - resource_search_list_default(pcmk__output_t *out, va_list args) - { - GList *nodes = va_arg(args, GList *); -- pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gchar *requested_name = va_arg(args, gchar *); - - bool printed = false; -@@ -293,7 +292,7 @@ resource_search_list_default(pcmk__output_t *out, va_list args) - } - - for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { -- pe_node_t *node = (pe_node_t *) lpc->data; -+ node_info_t *ni = (node_info_t *) lpc->data; - - if (!printed) { - out->begin_list(out, NULL, NULL, "Nodes"); -@@ -302,15 +301,10 @@ resource_search_list_default(pcmk__output_t *out, va_list args) - } - - if (out->is_quiet(out)) { -- out->list_item(out, "node", "%s", node->details->uname); -+ out->list_item(out, "node", "%s", ni->node_name); - } else { -- const char *state = ""; -- -- if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { -- state = " Master"; -- } - out->list_item(out, "node", "resource %s is running on: %s%s", -- requested_name, node->details->uname, state); -+ requested_name, ni->node_name, ni->promoted ? " Master" : ""); - } - } - -@@ -321,12 +315,11 @@ resource_search_list_default(pcmk__output_t *out, va_list args) - return rc; - } - --PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *") -+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "gchar *") - static int - resource_search_list_xml(pcmk__output_t *out, va_list args) - { - GList *nodes = va_arg(args, GList *); -- pe_resource_t *rsc = va_arg(args, pe_resource_t *); - gchar *requested_name = va_arg(args, gchar *); - - pcmk__output_xml_create_parent(out, "nodes", -@@ -334,10 +327,10 @@ resource_search_list_xml(pcmk__output_t *out, va_list args) - NULL); - - for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { -- pe_node_t *node = (pe_node_t *) lpc->data; -- xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname); -+ node_info_t *ni = (node_info_t *) lpc->data; -+ xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", ni->node_name); - -- if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { -+ if (ni->promoted) { - crm_xml_add(sub_node, "state", "promoted"); - } - } -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index adfdfba..1769042 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -41,20 +41,37 @@ cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed) - return rc; - } - -+static GListPtr -+build_node_info_list(pe_resource_t *rsc) -+{ -+ GListPtr retval = NULL; -+ -+ for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { -+ pe_resource_t *child = (pe_resource_t *) iter->data; -+ -+ for (GListPtr iter2 = child->running_on; iter2 != NULL; iter2 = iter2->next) { -+ pe_node_t *node = (pe_node_t *) iter2->data; -+ node_info_t *ni = calloc(1, sizeof(node_info_t)); -+ ni->node_name = node->details->uname; -+ ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) && -+ child->fns->state(child, TRUE) == RSC_ROLE_MASTER; -+ -+ retval = g_list_prepend(retval, ni); -+ } -+ } -+ -+ return retval; -+} -+ - GListPtr - cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, - pe_working_set_t *data_set) - { -- GListPtr found = NULL; -+ GListPtr retval = NULL; - pe_resource_t *parent = uber_parent(rsc); - - if (pe_rsc_is_clone(rsc)) { -- for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { -- GListPtr extra = ((pe_resource_t *) iter->data)->running_on; -- if (extra != NULL) { -- found = g_list_concat(found, extra); -- } -- } -+ retval = build_node_info_list(rsc); - - /* The anonymous clone children's common ID is supplied */ - } else if (pe_rsc_is_clone(parent) -@@ -63,18 +80,20 @@ cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *request - && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei) - && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) { - -- for (GListPtr iter = parent->children; iter; iter = iter->next) { -- GListPtr extra = ((pe_resource_t *) iter->data)->running_on; -- if (extra != NULL) { -- found = g_list_concat(found, extra); -- } -- } -+ retval = build_node_info_list(parent); - - } else if (rsc->running_on != NULL) { -- found = g_list_concat(found, rsc->running_on); -+ for (GListPtr iter = rsc->running_on; iter != NULL; iter = iter->next) { -+ pe_node_t *node = (pe_node_t *) iter->data; -+ node_info_t *ni = calloc(1, sizeof(node_info_t)); -+ ni->node_name = node->details->uname; -+ ni->promoted = rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER; -+ -+ retval = g_list_prepend(retval, ni); -+ } - } - -- return found; -+ return retval; - } - - #define XPATH_MAX 1024 -@@ -1788,6 +1807,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, - "the force option"); - return CRM_EX_UNSAFE; - } -+ -+ g_list_free_full(nodes, free); - } - - } else { --- -1.8.3.1 - diff --git a/SOURCES/037-scheduler.patch b/SOURCES/037-scheduler.patch deleted file mode 100644 index d349116..0000000 --- a/SOURCES/037-scheduler.patch +++ /dev/null @@ -1,6493 +0,0 @@ -From 8d06d1493ed774ab502b63375421f788ebe9a10c Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 15 Feb 2021 15:52:09 -0600 -Subject: [PATCH 1/3] Log: scheduler: add trace messages when routing remote - actions - -This area of code has had a few bugs, and this will be helpful when diagnosing -similar ones. ---- - lib/pacemaker/pcmk_sched_graph.c | 95 +++++++++++++++++++++++++--------------- - 1 file changed, 59 insertions(+), 36 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c -index f0d1f47..b3b088c 100644 ---- a/lib/pacemaker/pcmk_sched_graph.c -+++ b/lib/pacemaker/pcmk_sched_graph.c -@@ -1,5 +1,5 @@ - /* -- * Copyright 2004-2020 the Pacemaker project contributors -+ * Copyright 2004-2021 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * -@@ -783,7 +783,6 @@ get_router_node(pe_action_t *action) - { - pe_node_t *began_on = NULL; - pe_node_t *ended_on = NULL; -- pe_node_t *router_node = NULL; - bool partial_migration = FALSE; - const char *task = action->task; - -@@ -802,52 +801,76 @@ get_router_node(pe_action_t *action) - partial_migration = TRUE; - } - -- /* if there is only one location to choose from, -- * this is easy. Check for those conditions first */ -- if (!began_on || !ended_on) { -- /* remote rsc is either shutting down or starting up */ -- return began_on ? began_on : ended_on; -- } else if (began_on->details == ended_on->details) { -- /* remote rsc didn't move nodes. */ -+ if (began_on == NULL) { -+ crm_trace("Routing %s for %s through remote connection's " -+ "next node %s (starting)%s", -+ action->task, (action->rsc? action->rsc->id : "no resource"), -+ (ended_on? ended_on->details->uname : "none"), -+ partial_migration? " (partial migration)" : ""); -+ return ended_on; -+ } -+ -+ if (ended_on == NULL) { -+ crm_trace("Routing %s for %s through remote connection's " -+ "current node %s (stopping)%s", -+ action->task, (action->rsc? action->rsc->id : "no resource"), -+ (began_on? began_on->details->uname : "none"), -+ partial_migration? " (partial migration)" : ""); - return began_on; - } - -- /* If we have get here, we know the remote resource -- * began on one node and is moving to another node. -- * -- * This means some actions will get routed through the cluster -- * node the connection rsc began on, and others are routed through -- * the cluster node the connection rsc ends up on. -- * -- * 1. stop, demote, migrate actions of resources living in the remote -- * node _MUST_ occur _BEFORE_ the connection can move (these actions -- * are all required before the remote rsc stop action can occur.) In -- * this case, we know these actions have to be routed through the initial -- * cluster node the connection resource lived on before the move takes place. -- * The exception is a partial migration of a (non-guest) remote -- * connection resource; in that case, all actions (even these) will be -- * ordered after the connection's pseudo-start on the migration target, -- * so the target is the router node. -- * -- * 2. Everything else (start, promote, monitor, probe, refresh, clear failcount -- * delete ....) must occur after the resource starts on the node it is -- * moving to. -+ if (began_on->details == ended_on->details) { -+ crm_trace("Routing %s for %s through remote connection's " -+ "current node %s (not moving)%s", -+ action->task, (action->rsc? action->rsc->id : "no resource"), -+ (began_on? began_on->details->uname : "none"), -+ partial_migration? " (partial migration)" : ""); -+ return began_on; -+ } -+ -+ /* If we get here, the remote connection is moving during this transition. -+ * This means some actions for resources behind the connection will get -+ * routed through the cluster node the connection reource is currently on, -+ * and others are routed through the cluster node the connection will end up -+ * on. - */ - - if (pcmk__str_eq(task, "notify", pcmk__str_casei)) { - task = g_hash_table_lookup(action->meta, "notify_operation"); - } - -- /* 1. before connection rsc moves. */ -+ /* -+ * Stop, demote, and migration actions must occur before the connection can -+ * move (these actions are required before the remote resource can stop). In -+ * this case, we know these actions have to be routed through the initial -+ * cluster node the connection resource lived on before the move takes -+ * place. -+ * -+ * The exception is a partial migration of a (non-guest) remote connection -+ * resource; in that case, all actions (even these) will be ordered after -+ * the connection's pseudo-start on the migration target, so the target is -+ * the router node. -+ */ - if (pcmk__strcase_any_of(task, "stop", "demote", "migrate_from", "migrate_to", - NULL) && !partial_migration) { -- router_node = began_on; -- -- /* 2. after connection rsc moves. */ -- } else { -- router_node = ended_on; -+ crm_trace("Routing %s for %s through remote connection's " -+ "current node %s (moving)%s", -+ action->task, (action->rsc? action->rsc->id : "no resource"), -+ (began_on? began_on->details->uname : "none"), -+ partial_migration? " (partial migration)" : ""); -+ return began_on; - } -- return router_node; -+ -+ /* Everything else (start, promote, monitor, probe, refresh, -+ * clear failcount, delete, ...) must occur after the connection starts on -+ * the node it is moving to. -+ */ -+ crm_trace("Routing %s for %s through remote connection's " -+ "next node %s (moving)%s", -+ action->task, (action->rsc? action->rsc->id : "no resource"), -+ (ended_on? ended_on->details->uname : "none"), -+ partial_migration? " (partial migration)" : ""); -+ return ended_on; - } - - /*! --- -1.8.3.1 - - -From 8f5b73c07a52d74e333d21fe9658e8ff9ee9664b Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 15 Feb 2021 15:55:29 -0600 -Subject: [PATCH 2/3] Fix: scheduler: route monitor cancellations behind moving - remote connections correctly - ---- - lib/pacemaker/pcmk_sched_graph.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c -index b3b088c..466fb9f 100644 ---- a/lib/pacemaker/pcmk_sched_graph.c -+++ b/lib/pacemaker/pcmk_sched_graph.c -@@ -851,8 +851,8 @@ get_router_node(pe_action_t *action) - * the connection's pseudo-start on the migration target, so the target is - * the router node. - */ -- if (pcmk__strcase_any_of(task, "stop", "demote", "migrate_from", "migrate_to", -- NULL) && !partial_migration) { -+ if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from", -+ "migrate_to", NULL) && !partial_migration) { - crm_trace("Routing %s for %s through remote connection's " - "current node %s (moving)%s", - action->task, (action->rsc? action->rsc->id : "no resource"), --- -1.8.3.1 - - -From 1f1ca1586532a75d5cad184fcfa85f991d82b74a Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 15 Feb 2021 16:43:11 -0600 -Subject: [PATCH 3/3] Test: scheduler: add regression test for cancel behind - moving remote - ---- - cts/cts-scheduler.in | 2 + - cts/scheduler/cancel-behind-moving-remote.dot | 213 ++ - cts/scheduler/cancel-behind-moving-remote.exp | 1137 +++++++++ - cts/scheduler/cancel-behind-moving-remote.scores | 2559 +++++++++++++++++++++ - cts/scheduler/cancel-behind-moving-remote.summary | 235 ++ - cts/scheduler/cancel-behind-moving-remote.xml | 2108 +++++++++++++++++ - 6 files changed, 6254 insertions(+) - create mode 100644 cts/scheduler/cancel-behind-moving-remote.dot - create mode 100644 cts/scheduler/cancel-behind-moving-remote.exp - create mode 100644 cts/scheduler/cancel-behind-moving-remote.scores - create mode 100644 cts/scheduler/cancel-behind-moving-remote.summary - create mode 100644 cts/scheduler/cancel-behind-moving-remote.xml - -diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in -index 027ddf9..2cbbaa6 100644 ---- a/cts/cts-scheduler.in -+++ b/cts/cts-scheduler.in -@@ -998,6 +998,8 @@ TESTS = [ - [ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ], - [ "remote-connection-unrecoverable", - "Remote connection host must be fenced, with connection unrecoverable" ], -+ [ "cancel-behind-moving-remote", -+ "Route recurring monitor cancellations through original node of a moving remote connection" ], - ], - [ - [ "resource-discovery", "Exercises resource-discovery location constraint option" ], -diff --git a/cts/scheduler/cancel-behind-moving-remote.dot b/cts/scheduler/cancel-behind-moving-remote.dot -new file mode 100644 -index 0000000..427d487 ---- /dev/null -+++ b/cts/scheduler/cancel-behind-moving-remote.dot -@@ -0,0 +1,213 @@ -+ digraph "g" { -+"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -+"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"ip-172.17.1.87_monitor_10000 controller-0" [ style=bold color="green" fontcolor="black"] -+"ip-172.17.1.87_start_0 controller-0" -> "ip-172.17.1.87_monitor_10000 controller-0" [ style = bold] -+"ip-172.17.1.87_start_0 controller-0" [ style=bold color="green" fontcolor="black"] -+"nova-evacuate_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-0_clear_failcount_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold] -+"ovn-dbs-bundle-0_clear_failcount_0 controller-0" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-0_monitor_30000 controller-2" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-0_start_0 controller-2" -> "ovn-dbs-bundle-0_monitor_30000 controller-2" [ style = bold] -+"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-1_clear_failcount_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle-1_clear_failcount_0 controller-2" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-1_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-1_start_0 controller-0" -> "ovn-dbs-bundle-1_monitor_30000 controller-0" [ style = bold] -+"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold] -+"ovn-dbs-bundle-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovn-dbs-bundle_promoted_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovn-dbs-bundle_running_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_start_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle_stopped_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_post_notify_running_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold] -+"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-2" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_post_notify_running_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers:0_post_notify_start_0 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style = bold] -+"ovn-dbs-bundle-master_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style = bold] -+"ovn-dbs-bundle-master_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_promote_0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_promoted_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold] -+"ovn-dbs-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_running_0" -> "ovn-dbs-bundle-master_post_notify_running_0" [ style = bold] -+"ovn-dbs-bundle-master_running_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] -+"ovn-dbs-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_start_0" -> "ovn-dbs-bundle-master_running_0" [ style = bold] -+"ovn-dbs-bundle-master_start_0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-master_start_0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_stop_0" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] -+"ovn-dbs-bundle-master_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold] -+"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] -+"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] -+"ovn-dbs-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold] -+"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style = bold] -+"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle_running_0" [ style = bold] -+"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -+"ovn-dbs-bundle-podman-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style = bold] -+"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold] -+"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle-podman-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle_stopped_0" [ style = bold] -+"ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] -+"ovn-dbs-bundle_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] -+"ovn-dbs-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle_promoted_0" -> "ip-172.17.1.87_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle_running_0" -> "ovn-dbs-bundle_promote_0" [ style = bold] -+"ovn-dbs-bundle_running_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] -+"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-2" [ style = bold] -+"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold] -+"ovn-dbs-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold] -+"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold] -+"ovn-dbs-bundle_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold] -+"ovn-dbs-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] -+"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_promote_0" [ style = bold] -+"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_start_0" [ style = bold] -+"ovn-dbs-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] -+"ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers:0_post_notify_start_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold] -+"ovndb_servers:0_post_notify_start_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] -+"ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_running_0" [ style = bold] -+"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -+"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -+"ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] -+"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] -+"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold] -+"ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold] -+"ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold] -+"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] -+"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] -+"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] -+"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_promoted_0" [ style = bold] -+"ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -+"ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_running_0" [ style = bold] -+"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -+"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -+"ovndb_servers_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-1_stop_0 controller-2" [ style = bold] -+"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] -+"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -+"ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -+"rabbitmq-bundle-1_monitor_30000 controller-0" [ style=dashed color="red" fontcolor="black"] -+"rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq-bundle-1_monitor_30000 controller-0" [ style = dashed] -+"rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq:1_monitor_10000 rabbitmq-bundle-1" [ style = dashed] -+"rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq:1_start_0 rabbitmq-bundle-1" [ style = dashed] -+"rabbitmq-bundle-1_start_0 controller-0" [ style=dashed color="red" fontcolor="black"] -+"rabbitmq-bundle-clone_confirmed-post_notify_running_0" -> "rabbitmq-bundle_running_0" [ style = bold] -+"rabbitmq-bundle-clone_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"] -+"rabbitmq-bundle-clone_confirmed-pre_notify_start_0" -> "rabbitmq-bundle-clone_post_notify_running_0" [ style = bold] -+"rabbitmq-bundle-clone_confirmed-pre_notify_start_0" -> "rabbitmq-bundle-clone_start_0" [ style = bold] -+"rabbitmq-bundle-clone_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -+"rabbitmq-bundle-clone_post_notify_running_0" -> "rabbitmq-bundle-clone_confirmed-post_notify_running_0" [ style = bold] -+"rabbitmq-bundle-clone_post_notify_running_0" [ style=bold color="green" fontcolor="orange"] -+"rabbitmq-bundle-clone_pre_notify_start_0" -> "rabbitmq-bundle-clone_confirmed-pre_notify_start_0" [ style = bold] -+"rabbitmq-bundle-clone_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -+"rabbitmq-bundle-clone_running_0" -> "rabbitmq-bundle-clone_post_notify_running_0" [ style = bold] -+"rabbitmq-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"rabbitmq-bundle-clone_start_0" -> "rabbitmq-bundle-clone_running_0" [ style = bold] -+"rabbitmq-bundle-clone_start_0" -> "rabbitmq:1_start_0 rabbitmq-bundle-1" [ style = dashed] -+"rabbitmq-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"rabbitmq-bundle_running_0" [ style=bold color="green" fontcolor="orange"] -+"rabbitmq-bundle_start_0" -> "rabbitmq-bundle-clone_start_0" [ style = bold] -+"rabbitmq-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -+"rabbitmq:1_monitor_10000 rabbitmq-bundle-1" [ style=dashed color="red" fontcolor="black"] -+"rabbitmq:1_start_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_running_0" [ style = dashed] -+"rabbitmq:1_start_0 rabbitmq-bundle-1" -> "rabbitmq:1_monitor_10000 rabbitmq-bundle-1" [ style = dashed] -+"rabbitmq:1_start_0 rabbitmq-bundle-1" [ style=dashed color="red" fontcolor="black"] -+"stonith-fence_compute-fence-nova_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400066e50_clear_failcount_0 messaging-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-52540040bb56_monitor_60000 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-52540040bb56_start_0 database-0" -> "stonith-fence_ipmilan-52540040bb56_monitor_60000 database-0" [ style = bold] -+"stonith-fence_ipmilan-52540040bb56_start_0 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-52540040bb56_stop_0 messaging-2" -> "stonith-fence_ipmilan-52540040bb56_start_0 database-0" [ style = bold] -+"stonith-fence_ipmilan-52540040bb56_stop_0 messaging-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-52540060dbba_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-52540078fb07_clear_failcount_0 messaging-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400aa1373_clear_failcount_0 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400c87cdb_clear_failcount_0 database-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400dc23e0_clear_failcount_0 database-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400e018b6_clear_failcount_0 database-0" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400e1534e_clear_failcount_0 database-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400e1534e_monitor_60000 messaging-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400e1534e_start_0 messaging-2" -> "stonith-fence_ipmilan-525400e1534e_monitor_60000 messaging-2" [ style = bold] -+"stonith-fence_ipmilan-525400e1534e_start_0 messaging-2" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400e1534e_stop_0 database-1" -> "stonith-fence_ipmilan-525400e1534e_start_0 messaging-2" [ style = bold] -+"stonith-fence_ipmilan-525400e1534e_stop_0 database-1" [ style=bold color="green" fontcolor="black"] -+"stonith-fence_ipmilan-525400ea59b0_clear_failcount_0 database-0" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/cts/scheduler/cancel-behind-moving-remote.exp b/cts/scheduler/cancel-behind-moving-remote.exp -new file mode 100644 -index 0000000..933c2be ---- /dev/null -+++ b/cts/scheduler/cancel-behind-moving-remote.exp -@@ -0,0 +1,1137 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/cts/scheduler/cancel-behind-moving-remote.scores b/cts/scheduler/cancel-behind-moving-remote.scores -new file mode 100644 -index 0000000..6813b2e ---- /dev/null -+++ b/cts/scheduler/cancel-behind-moving-remote.scores -@@ -0,0 +1,2559 @@ -+Allocation scores: -+Only 'private' parameters to nova-evacuate_monitor_10000 on database-2 changed: 0:0;280:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to nova-evacuate_start_0 on database-2 changed: 0:0;279:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_compute-fence-nova_monitor_60000 on database-1 changed: 0:0;275:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_compute-fence-nova_start_0 on database-1 changed: 0:0;273:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400066e50_monitor_60000 on database-2 changed: 0:0;305:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400066e50_start_0 on database-2 changed: 0:0;304:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540040bb56_monitor_60000 on messaging-2 changed: 0:0;295:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540040bb56_start_0 on messaging-2 changed: 0:0;293:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540060dbba_monitor_60000 on database-2 changed: 0:0;307:51:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540060dbba_start_0 on database-2 changed: 0:0;306:51:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540078fb07_monitor_60000 on database-0 changed: 0:0;296:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540078fb07_start_0 on database-0 changed: 0:0;294:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400aa1373_monitor_60000 on messaging-0 changed: 0:0;284:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400aa1373_start_0 on messaging-0 changed: 0:0;282:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400addd38_monitor_60000 on messaging-0 changed: 0:0;297:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400addd38_start_0 on messaging-0 changed: 0:0;295:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400c87cdb_monitor_60000 on messaging-0 changed: 0:0;311:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400c87cdb_start_0 on messaging-0 changed: 0:0;310:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400dc23e0_monitor_60000 on messaging-2 changed: 0:0;288:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400dc23e0_start_0 on messaging-2 changed: 0:0;286:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400e018b6_monitor_60000 on database-0 changed: 0:0;312:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400e018b6_start_0 on database-0 changed: 0:0;311:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400e1534e_monitor_60000 on database-1 changed: 0:0;302:39:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400e1534e_start_0 on database-1 changed: 0:0;301:39:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400ea59b0_monitor_60000 on database-1 changed: 0:0;299:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400ea59b0_start_0 on database-1 changed: 0:0;298:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Using the original execution date of: 2021-02-15 01:40:51Z -+galera:0 promotion score on galera-bundle-0: 100 -+galera:1 promotion score on galera-bundle-1: 100 -+galera:2 promotion score on galera-bundle-2: 100 -+ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1 -+ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 -+ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 -+pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on compute-0: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on compute-1: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0 -+pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0 -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501 -+pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501 -+pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501 -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 10000 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 10000 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 10000 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: 5 -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: 5 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 500 -+pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY -+pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501 -+pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY -+pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501 -+pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 -+pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500 -+pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 -+pcmk__bundle_allocate: redis-bundle allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: 0 -+pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on compute-0: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on compute-1: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0 -+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: 0 -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501 -+pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501 -+pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501 -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on compute-0: 1 -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on compute-1: 1 -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on compute-0: 0 -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on compute-1: 0 -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-0: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-1: -INFINITY -+pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-2: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on compute-0: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on compute-1: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on database-0: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on database-1: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on database-2: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-0: 0 -+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-1: 0 -+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-2: 0 -+pcmk__clone_allocate: galera-bundle-master allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: galera-bundle-master allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -+pcmk__clone_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -+pcmk__clone_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on compute-0: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on compute-1: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-0: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-1: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-2: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: 0 -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: 0 -+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: 0 -+pcmk__clone_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY -+pcmk__clone_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY -+pcmk__clone_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on compute-0: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on compute-1: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-0: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-1: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-2: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: 0 -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: 0 -+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: 0 -+pcmk__clone_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -+pcmk__clone_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -+pcmk__clone_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on compute-0: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on compute-1: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on controller-0: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on controller-1: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on controller-2: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on database-0: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on database-1: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on database-2: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on messaging-0: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on messaging-1: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on messaging-2: -INFINITY -+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-0: 0 -+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-1: 0 -+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-2: 0 -+pcmk__clone_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -+pcmk__clone_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -+pcmk__clone_allocate: redis:2 allocation score on redis-bundle-2: INFINITY -+pcmk__native_allocate: compute-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-0 allocation score on controller-0: 0 -+pcmk__native_allocate: compute-0 allocation score on controller-1: 0 -+pcmk__native_allocate: compute-0 allocation score on controller-2: 0 -+pcmk__native_allocate: compute-0 allocation score on database-0: 0 -+pcmk__native_allocate: compute-0 allocation score on database-1: 0 -+pcmk__native_allocate: compute-0 allocation score on database-2: 0 -+pcmk__native_allocate: compute-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: compute-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: compute-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: compute-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-1 allocation score on controller-0: 0 -+pcmk__native_allocate: compute-1 allocation score on controller-1: 0 -+pcmk__native_allocate: compute-1 allocation score on controller-2: 0 -+pcmk__native_allocate: compute-1 allocation score on database-0: 0 -+pcmk__native_allocate: compute-1 allocation score on database-1: 0 -+pcmk__native_allocate: compute-1 allocation score on database-2: 0 -+pcmk__native_allocate: compute-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: compute-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: compute-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on compute-0: 1 -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on compute-1: 0 -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on compute-1: 1 -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-2: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-0: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-1: -INFINITY -+pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-2: -INFINITY -+pcmk__native_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-0 allocation score on controller-0: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on controller-1: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on controller-2: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on database-0: 10000 -+pcmk__native_allocate: galera-bundle-0 allocation score on database-1: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: galera-bundle-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-1 allocation score on controller-0: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on controller-1: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on controller-2: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on database-0: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on database-1: 10000 -+pcmk__native_allocate: galera-bundle-1 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: galera-bundle-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-2 allocation score on controller-0: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on controller-1: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on controller-2: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on database-0: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on database-1: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on database-2: 10000 -+pcmk__native_allocate: galera-bundle-2 allocation score on messaging-0: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on messaging-1: 0 -+pcmk__native_allocate: galera-bundle-2 allocation score on messaging-2: 0 -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-0: 0 -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-1: 0 -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-1: 0 -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-2: 0 -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -+pcmk__native_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -+pcmk__native_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-1: 0 -+pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-2: INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-1: INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-2: 0 -+pcmk__native_allocate: ip-172.17.1.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-1: INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-2: 0 -+pcmk__native_allocate: ip-172.17.1.151 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-172.17.1.87 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.1.87 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-1: INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-2: 0 -+pcmk__native_allocate: ip-172.17.3.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-1: 0 -+pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-2: INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-0: 0 -+pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-1: INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-2: 0 -+pcmk__native_allocate: ip-192.168.24.150 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: nova-evacuate allocation score on compute-0: -INFINITY -+pcmk__native_allocate: nova-evacuate allocation score on compute-1: -INFINITY -+pcmk__native_allocate: nova-evacuate allocation score on controller-0: 0 -+pcmk__native_allocate: nova-evacuate allocation score on controller-1: 0 -+pcmk__native_allocate: nova-evacuate allocation score on controller-2: 0 -+pcmk__native_allocate: nova-evacuate allocation score on database-0: 0 -+pcmk__native_allocate: nova-evacuate allocation score on database-1: 0 -+pcmk__native_allocate: nova-evacuate allocation score on database-2: 0 -+pcmk__native_allocate: nova-evacuate allocation score on messaging-0: 0 -+pcmk__native_allocate: nova-evacuate allocation score on messaging-1: 0 -+pcmk__native_allocate: nova-evacuate allocation score on messaging-2: 0 -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 10000 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 10000 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 10000 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY -+pcmk__native_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY -+pcmk__native_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 10000 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0 -+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 10000 -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 -+pcmk__native_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -+pcmk__native_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -+pcmk__native_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -+pcmk__native_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-0 allocation score on controller-0: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on controller-2: 10000 -+pcmk__native_allocate: redis-bundle-0 allocation score on database-0: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on database-1: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on database-2: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on messaging-0: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on messaging-1: 0 -+pcmk__native_allocate: redis-bundle-0 allocation score on messaging-2: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-1 allocation score on controller-0: 10000 -+pcmk__native_allocate: redis-bundle-1 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on controller-2: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on database-0: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on database-1: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on database-2: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on messaging-0: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on messaging-1: 0 -+pcmk__native_allocate: redis-bundle-1 allocation score on messaging-2: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-2 allocation score on controller-0: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on controller-1: 10000 -+pcmk__native_allocate: redis-bundle-2 allocation score on controller-2: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on database-0: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on database-1: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on database-2: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on messaging-0: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on messaging-1: 0 -+pcmk__native_allocate: redis-bundle-2 allocation score on messaging-2: 0 -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-0: 0 -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY -+pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY -+pcmk__native_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -+pcmk__native_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -+pcmk__native_allocate: redis:2 allocation score on redis-bundle-2: INFINITY -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on database-1: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400066e50 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540040bb56 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on controller-2: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540060dbba allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on messaging-1: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-52540078fb07 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400aa1373 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400addd38 allocation score on messaging-2: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on controller-0: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400c87cdb allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on database-0: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400dc23e0 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on controller-1: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e018b6 allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on database-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on messaging-0: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400e1534e allocation score on messaging-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on compute-0: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on compute-1: -INFINITY -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on controller-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on controller-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on controller-2: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on database-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on database-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on database-2: -10000 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on messaging-0: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on messaging-1: 0 -+pcmk__native_allocate: stonith-fence_ipmilan-525400ea59b0 allocation score on messaging-2: 0 -+redis:0 promotion score on redis-bundle-0: 1 -+redis:1 promotion score on redis-bundle-1: 1 -+redis:2 promotion score on redis-bundle-2: 1 -diff --git a/cts/scheduler/cancel-behind-moving-remote.summary b/cts/scheduler/cancel-behind-moving-remote.summary -new file mode 100644 -index 0000000..b725ef4 ---- /dev/null -+++ b/cts/scheduler/cancel-behind-moving-remote.summary -@@ -0,0 +1,235 @@ -+Using the original execution date of: 2021-02-15 01:40:51Z -+ -+Current cluster status: -+Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ] -+OFFLINE: [ messaging-1 ] -+RemoteOnline: [ compute-0 compute-1 ] -+GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bundle-podman-1 galera-bundle-2:galera-bundle-podman-2 ovn-dbs-bundle-1:ovn-dbs-bundle-podman-1 ovn-dbs-bundle-2:ovn-dbs-bundle-podman-2 rabbitmq-bundle-0:rabbitmq-bundle-podman-0 rabbitmq-bundle-2:rabbitmq-bundle-podman-2 redis-bundle-0:redis-bundle-podman-0 redis-bundle-1:redis-bundle-podman-1 redis-bundle-2:redis-bundle-podman-2 ] -+ -+ compute-0 (ocf::pacemaker:remote): Started controller-1 -+ compute-1 (ocf::pacemaker:remote): Started controller-2 -+ Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest] -+ galera-bundle-0 (ocf::heartbeat:galera): Master database-0 -+ galera-bundle-1 (ocf::heartbeat:galera): Master database-1 -+ galera-bundle-2 (ocf::heartbeat:galera): Master database-2 -+ Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest] -+ rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started messaging-0 -+ rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Stopped -+ rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started messaging-2 -+ Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest] -+ redis-bundle-0 (ocf::heartbeat:redis): Master controller-2 -+ redis-bundle-1 (ocf::heartbeat:redis): Slave controller-0 -+ redis-bundle-2 (ocf::heartbeat:redis): Slave controller-1 -+ ip-192.168.24.150 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-10.0.0.150 (ocf::heartbeat:IPaddr2): Started controller-2 -+ ip-172.17.1.151 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-172.17.1.150 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-172.17.3.150 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-172.17.4.150 (ocf::heartbeat:IPaddr2): Started controller-2 -+ Container bundle set: haproxy-bundle [cluster.common.tag/rhosp16-openstack-haproxy:pcmklatest] -+ haproxy-bundle-podman-0 (ocf::heartbeat:podman): Started controller-2 -+ haproxy-bundle-podman-1 (ocf::heartbeat:podman): Started controller-0 -+ haproxy-bundle-podman-2 (ocf::heartbeat:podman): Started controller-1 -+ Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest] -+ ovn-dbs-bundle-0 (ocf::ovn:ovndb-servers): Stopped -+ ovn-dbs-bundle-1 (ocf::ovn:ovndb-servers): Slave controller-2 -+ ovn-dbs-bundle-2 (ocf::ovn:ovndb-servers): Slave controller-1 -+ ip-172.17.1.87 (ocf::heartbeat:IPaddr2): Stopped -+ stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 -+ Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger] -+ Started: [ compute-0 compute-1 ] -+ Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] -+ nova-evacuate (ocf::openstack:NovaEvacuate): Started database-2 -+ stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0 -+ stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2 -+ stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started messaging-2 -+ stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0 -+ stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0 -+ stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1 -+ stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2 -+ stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started database-1 -+ stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2 -+ stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0 -+ stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0 -+ Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest] -+ openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-2 -+ -+Only 'private' parameters to stonith-fence_compute-fence-nova_start_0 on database-1 changed: 0:0;273:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_compute-fence-nova_monitor_60000 on database-1 changed: 0:0;275:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400ea59b0_start_0 on database-1 changed: 0:0;298:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400ea59b0_monitor_60000 on database-1 changed: 0:0;299:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400e1534e_start_0 on database-1 changed: 0:0;301:39:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400e1534e_monitor_60000 on database-1 changed: 0:0;302:39:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540078fb07_start_0 on database-0 changed: 0:0;294:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540078fb07_monitor_60000 on database-0 changed: 0:0;296:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400e018b6_start_0 on database-0 changed: 0:0;311:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400e018b6_monitor_60000 on database-0 changed: 0:0;312:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400aa1373_start_0 on messaging-0 changed: 0:0;282:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400aa1373_monitor_60000 on messaging-0 changed: 0:0;284:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400addd38_start_0 on messaging-0 changed: 0:0;295:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400addd38_monitor_60000 on messaging-0 changed: 0:0;297:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400c87cdb_start_0 on messaging-0 changed: 0:0;310:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400c87cdb_monitor_60000 on messaging-0 changed: 0:0;311:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400dc23e0_start_0 on messaging-2 changed: 0:0;286:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400dc23e0_monitor_60000 on messaging-2 changed: 0:0;288:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540040bb56_start_0 on messaging-2 changed: 0:0;293:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540040bb56_monitor_60000 on messaging-2 changed: 0:0;295:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to nova-evacuate_start_0 on database-2 changed: 0:0;279:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to nova-evacuate_monitor_10000 on database-2 changed: 0:0;280:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400066e50_start_0 on database-2 changed: 0:0;304:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-525400066e50_monitor_60000 on database-2 changed: 0:0;305:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540060dbba_start_0 on database-2 changed: 0:0;306:51:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Only 'private' parameters to stonith-fence_ipmilan-52540060dbba_monitor_60000 on database-2 changed: 0:0;307:51:0:ef0c178d-d0fc-4118-9005-2571eab8a55d -+Transition Summary: -+ * Start rabbitmq-bundle-1 ( controller-0 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) -+ * Start rabbitmq:1 ( rabbitmq-bundle-1 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) -+ * Start ovn-dbs-bundle-podman-0 ( controller-2 ) -+ * Start ovn-dbs-bundle-0 ( controller-2 ) -+ * Start ovndb_servers:0 ( ovn-dbs-bundle-0 ) -+ * Move ovn-dbs-bundle-podman-1 ( controller-2 -> controller-0 ) -+ * Move ovn-dbs-bundle-1 ( controller-2 -> controller-0 ) -+ * Restart ovndb_servers:1 ( Slave -> Master ovn-dbs-bundle-1 ) due to required ovn-dbs-bundle-podman-1 start -+ * Start ip-172.17.1.87 ( controller-0 ) -+ * Move stonith-fence_ipmilan-52540040bb56 ( messaging-2 -> database-0 ) -+ * Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 ) -+ -+Executing cluster transition: -+ * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 -+ * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 -+ * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 -+ * Cluster action: clear_failcount for ovn-dbs-bundle-0 on controller-0 -+ * Cluster action: clear_failcount for ovn-dbs-bundle-1 on controller-2 -+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 -+ * Cluster action: clear_failcount for nova-evacuate on messaging-0 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-525400dc23e0 on database-2 -+ * Resource action: stonith-fence_ipmilan-52540040bb56 stop on messaging-2 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-52540078fb07 on messaging-2 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-525400ea59b0 on database-0 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-525400066e50 on messaging-2 -+ * Resource action: stonith-fence_ipmilan-525400e1534e stop on database-1 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e1534e on database-2 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0 -+ * Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2 -+ * Pseudo action: ovn-dbs-bundle_stop_0 -+ * Pseudo action: rabbitmq-bundle_start_0 -+ * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 -+ * Pseudo action: rabbitmq-bundle-clone_start_0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -+ * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0 -+ * Pseudo action: ovn-dbs-bundle-master_stop_0 -+ * Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0 -+ * Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2 -+ * Pseudo action: rabbitmq-bundle-clone_running_0 -+ * Resource action: ovndb_servers stop on ovn-dbs-bundle-1 -+ * Pseudo action: ovn-dbs-bundle-master_stopped_0 -+ * Resource action: ovn-dbs-bundle-1 stop on controller-2 -+ * Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0 -+ * Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2 -+ * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 -+ * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0 -+ * Resource action: ovn-dbs-bundle-podman-1 stop on controller-2 -+ * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -+ * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0 -+ * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 -+ * Pseudo action: ovn-dbs-bundle_stopped_0 -+ * Pseudo action: ovn-dbs-bundle_start_0 -+ * Pseudo action: rabbitmq-bundle_running_0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -+ * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 -+ * Pseudo action: ovn-dbs-bundle-master_start_0 -+ * Resource action: ovn-dbs-bundle-podman-0 start on controller-2 -+ * Resource action: ovn-dbs-bundle-0 start on controller-2 -+ * Resource action: ovn-dbs-bundle-podman-1 start on controller-0 -+ * Resource action: ovn-dbs-bundle-1 start on controller-0 -+ * Resource action: ovndb_servers start on ovn-dbs-bundle-0 -+ * Resource action: ovndb_servers start on ovn-dbs-bundle-1 -+ * Pseudo action: ovn-dbs-bundle-master_running_0 -+ * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-2 -+ * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-2 -+ * Resource action: ovn-dbs-bundle-podman-1 monitor=60000 on controller-0 -+ * Resource action: ovn-dbs-bundle-1 monitor=30000 on controller-0 -+ * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -+ * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0 -+ * Pseudo action: ovn-dbs-bundle_running_0 -+ * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0 -+ * Pseudo action: ovn-dbs-bundle_promote_0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -+ * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0 -+ * Pseudo action: ovn-dbs-bundle-master_promote_0 -+ * Resource action: ovndb_servers promote on ovn-dbs-bundle-1 -+ * Pseudo action: ovn-dbs-bundle-master_promoted_0 -+ * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 -+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 -+ * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0 -+ * Pseudo action: ovn-dbs-bundle_promoted_0 -+ * Resource action: ovndb_servers monitor=30000 on ovn-dbs-bundle-0 -+ * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1 -+ * Resource action: ip-172.17.1.87 start on controller-0 -+ * Resource action: ip-172.17.1.87 monitor=10000 on controller-0 -+Using the original execution date of: 2021-02-15 01:40:51Z -+ -+Revised cluster status: -+Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ] -+OFFLINE: [ messaging-1 ] -+RemoteOnline: [ compute-0 compute-1 ] -+GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bundle-podman-1 galera-bundle-2:galera-bundle-podman-2 ovn-dbs-bundle-0:ovn-dbs-bundle-podman-0 ovn-dbs-bundle-1:ovn-dbs-bundle-podman-1 ovn-dbs-bundle-2:ovn-dbs-bundle-podman-2 rabbitmq-bundle-0:rabbitmq-bundle-podman-0 rabbitmq-bundle-2:rabbitmq-bundle-podman-2 redis-bundle-0:redis-bundle-podman-0 redis-bundle-1:redis-bundle-podman-1 redis-bundle-2:redis-bundle-podman-2 ] -+ -+ compute-0 (ocf::pacemaker:remote): Started controller-1 -+ compute-1 (ocf::pacemaker:remote): Started controller-2 -+ Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest] -+ galera-bundle-0 (ocf::heartbeat:galera): Master database-0 -+ galera-bundle-1 (ocf::heartbeat:galera): Master database-1 -+ galera-bundle-2 (ocf::heartbeat:galera): Master database-2 -+ Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest] -+ rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started messaging-0 -+ rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Stopped -+ rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started messaging-2 -+ Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest] -+ redis-bundle-0 (ocf::heartbeat:redis): Master controller-2 -+ redis-bundle-1 (ocf::heartbeat:redis): Slave controller-0 -+ redis-bundle-2 (ocf::heartbeat:redis): Slave controller-1 -+ ip-192.168.24.150 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-10.0.0.150 (ocf::heartbeat:IPaddr2): Started controller-2 -+ ip-172.17.1.151 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-172.17.1.150 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-172.17.3.150 (ocf::heartbeat:IPaddr2): Started controller-1 -+ ip-172.17.4.150 (ocf::heartbeat:IPaddr2): Started controller-2 -+ Container bundle set: haproxy-bundle [cluster.common.tag/rhosp16-openstack-haproxy:pcmklatest] -+ haproxy-bundle-podman-0 (ocf::heartbeat:podman): Started controller-2 -+ haproxy-bundle-podman-1 (ocf::heartbeat:podman): Started controller-0 -+ haproxy-bundle-podman-2 (ocf::heartbeat:podman): Started controller-1 -+ Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest] -+ ovn-dbs-bundle-0 (ocf::ovn:ovndb-servers): Slave controller-2 -+ ovn-dbs-bundle-1 (ocf::ovn:ovndb-servers): Master controller-0 -+ ovn-dbs-bundle-2 (ocf::ovn:ovndb-servers): Slave controller-1 -+ ip-172.17.1.87 (ocf::heartbeat:IPaddr2): Started controller-0 -+ stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 -+ Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger] -+ Started: [ compute-0 compute-1 ] -+ Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] -+ nova-evacuate (ocf::openstack:NovaEvacuate): Started database-2 -+ stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0 -+ stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2 -+ stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started database-0 -+ stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0 -+ stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0 -+ stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1 -+ stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2 -+ stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started messaging-2 -+ stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2 -+ stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0 -+ stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0 -+ Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest] -+ openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-2 -+ -diff --git a/cts/scheduler/cancel-behind-moving-remote.xml b/cts/scheduler/cancel-behind-moving-remote.xml -new file mode 100644 -index 0000000..d52d9a4 ---- /dev/null -+++ b/cts/scheduler/cancel-behind-moving-remote.xml -@@ -0,0 +1,2108 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - diff --git a/SOURCES/038-feature-set.patch b/SOURCES/038-feature-set.patch deleted file mode 100644 index 01af1ee..0000000 --- a/SOURCES/038-feature-set.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 98589d8e1ef9b57d806702b9968ff7e5560e9c8f Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Fri, 12 Feb 2021 11:51:16 -0500 -Subject: [PATCH] Low: Fix a problem with crm_resource exit code handling. - -If no output is produced but an error message is printed (like, when an -inactive resource is provided on the command line), don't print the -error message for the pcmk_rc_no_output error code. It's weird to see -output and a message about no output at the same time. - -Similarly, don't print an "Error: OK" message when usage is printed. ---- - tools/crm_resource.c | 13 +++++++++---- - 1 file changed, 9 insertions(+), 4 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 7d2f0f6..29b0a04 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -1534,9 +1534,9 @@ main(int argc, char **argv) - - rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); - if (rc != pcmk_rc_ok) { -- fprintf(stderr, "Error creating output format %s: %s\n", -- args->output_ty, pcmk_rc_str(rc)); - exit_code = CRM_EX_ERROR; -+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s", -+ args->output_ty, pcmk_rc_str(rc)); - goto done; - } - -@@ -2039,7 +2039,12 @@ main(int argc, char **argv) - */ - - done: -- if (rc != pcmk_rc_ok) { -+ /* Don't do any of this for pcmk_rc_no_output (doesn't make sense to show an -+ * error message for no output) or for CRM_EX_USAGE (we don't want to show -+ * an "error: OK" message from pcmk_rc_str). -+ */ -+ if ((rc != pcmk_rc_ok && rc != pcmk_rc_no_output) || -+ (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE)) { - if (rc == pcmk_rc_no_quorum) { - g_prefix_error(&error, "To ignore quorum, use the force option.\n"); - } -@@ -2054,10 +2059,10 @@ done: - g_set_error(&error, PCMK__RC_ERROR, rc, - "Error performing operation: %s", pcmk_rc_str(rc)); - } -+ } - -- if (exit_code == CRM_EX_OK) { -- exit_code = pcmk_rc2exitc(rc); -- } -+ if (exit_code == CRM_EX_OK) { -+ exit_code = pcmk_rc2exitc(rc); - } - - g_free(options.host_uname); --- -1.8.3.1 - diff --git a/SOURCES/039-crm_mon.patch b/SOURCES/039-crm_mon.patch deleted file mode 100644 index e3d525f..0000000 --- a/SOURCES/039-crm_mon.patch +++ /dev/null @@ -1,760 +0,0 @@ -From bd4f396f267d8ef8f9c9bcbf286a77dc78d4e1b0 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 Mar 2021 10:26:13 -0500 -Subject: [PATCH 1/3] Med: Generate xml/crm_mon.rng from the contents of - xml/crm_mon*. - -This prevents the version reference in it from getting out of sync. - -See: rhbz#1931332 ---- - xml/Makefile.am | 28 +++++++++++++++++++++++----- - xml/crm_mon.rng | 16 ---------------- - 3 files changed, 24 insertions(+), 21 deletions(-) - delete mode 100644 xml/crm_mon.rng - -diff --git a/xml/Makefile.am b/xml/Makefile.am -index cb6cfa0..c52b968 100644 ---- a/xml/Makefile.am -+++ b/xml/Makefile.am -@@ -76,22 +76,24 @@ CIB_abs_xsl = $(abs_srcdir)/upgrade-1.3.xsl \ - $(abs_srcdir)/upgrade-2.10.xsl \ - $(wildcard $(abs_srcdir)/upgrade-*enter.xsl) \ - $(wildcard $(abs_srcdir)/upgrade-*leave.xsl) --MON_abs_files = $(abs_srcdir)/crm_mon.rng -+MON_abs_files = $(abs_srcdir)/crm_mon.rng - API_files = $(foreach base,$(API_base),$(wildcard $(srcdir)/api/$(base)*.rng)) - CIB_files = $(foreach base,$(CIB_base),$(wildcard $(srcdir)/$(base).rng $(srcdir)/$(base)-*.rng)) - CIB_xsl = $(srcdir)/upgrade-1.3.xsl \ - $(srcdir)/upgrade-2.10.xsl \ - $(wildcard $(srcdir)/upgrade-*enter.xsl) \ - $(wildcard $(srcdir)/upgrade-*leave.xsl) --MON_files = $(srcdir)/crm_mon.rng -+MON_files = $(srcdir)/crm_mon.rng - - # Sorted lists of all numeric schema versions - API_numeric_versions = $(call numeric_versions,${API_files}) - CIB_numeric_versions = $(call numeric_versions,${CIB_files}) -+MON_numeric_versions = $(call numeric_versions,$(wildcard $(srcdir)/api/crm_mon*.rng)) - - # The highest numeric schema version - API_max ?= $(lastword $(API_numeric_versions)) - CIB_max ?= $(lastword $(CIB_numeric_versions)) -+MON_max ?= $(lastword $(MON_numeric_versions)) - - # Sorted lists of all schema versions (including "next") - API_versions = next $(API_numeric_versions) -@@ -100,11 +102,12 @@ CIB_versions = next $(CIB_numeric_versions) - # Build tree locations of static schema files and transforms (for VPATH builds) - API_build_copies = $(foreach f,$(API_abs_files),$(subst $(abs_srcdir),$(abs_builddir),$(f))) - CIB_build_copies = $(foreach f,$(CIB_abs_files) $(CIB_abs_xsl),$(subst $(abs_srcdir),$(abs_builddir),$(f))) --MON_build_copies = $(foreach f,$(MON_abs_files),$(subst $(abs_srcdir),$(abs_builddir),$(f))) -+MON_build_copies = $(foreach f,$(MON_abs_files),$(subst $(abs_srcdir),$(abs_builddir),$(f))) - - # Dynamically generated schema files - API_generated = api/api-result.rng $(foreach base,$(API_versions),api/api-result-$(base).rng) - CIB_generated = pacemaker.rng $(foreach base,$(CIB_versions),pacemaker-$(base).rng) versions.rng -+MON_generated = crm_mon.rng - - CIB_version_pairs = $(call version_pairs,${CIB_numeric_versions}) - CIB_version_pairs_cnt = $(words ${CIB_version_pairs}) -@@ -112,10 +115,10 @@ CIB_version_pairs_last = $(call version_pairs_last,${CIB_version_pairs_cnt},${C - - dist_API_DATA = $(API_files) - dist_CIB_DATA = $(CIB_files) $(CIB_xsl) --dist_MON_DATA = $(MON_files) - - nodist_API_DATA = $(API_generated) - nodist_CIB_DATA = $(CIB_generated) -+nodist_MON_DATA = $(MON_generated) - - EXTRA_DIST = Readme.md \ - best-match.sh \ -@@ -162,6 +165,21 @@ api/api-result-%.rng: $(API_build_copies) best-match.sh Makefile.am - $(AM_V_at)echo ' ' >> $@ - $(AM_V_SCHEMA)echo '' >> $@ - -+crm_mon.rng: api/crm_mon-$(MON_max).rng -+ $(AM_V_at)echo '' > $@ -+ $(AM_V_at)echo '> $@ -+ $(AM_V_at)echo ' datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_at)echo ' ' >> $@ -+ $(AM_V_SCHEMA)echo '' >> $@ -+ - # Dynamically generated top-level CIB schema - pacemaker.rng: pacemaker-$(CIB_max).rng - $(AM_V_SCHEMA)cp $(top_builddir)/xml/$< $@ -@@ -256,7 +274,7 @@ fulldiff: best-match.sh - @echo "# Comparing all changes across all the subsequent increments" - $(call version_diff,${CIB_version_pairs}) - --CLEANFILES = $(API_generated) $(CIB_generated) -+CLEANFILES = $(API_generated) $(CIB_generated) $(MON_generated) - - clean-local: - if [ "x$(srcdir)" != "x$(builddir)" ]; then \ -diff --git a/xml/crm_mon.rng b/xml/crm_mon.rng -deleted file mode 100644 -index be87fba..0000000 ---- a/xml/crm_mon.rng -+++ /dev/null -@@ -1,16 +0,0 @@ -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --- -1.8.3.1 - - -From 0cbc5b0a66ac0bf206ff45f36206253a60620e07 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 Mar 2021 10:53:17 -0500 -Subject: [PATCH 2/3] Med: Copy crm_mon.rng and crm_resource.rng in preparation - for updates. - -See: rhbz#1931332 ---- - xml/api/crm_mon-2.7.rng | 311 +++++++++++++++++++++++++++++++++++++++++++ - xml/api/crm_resource-2.7.rng | 238 +++++++++++++++++++++++++++++++++ - 2 files changed, 549 insertions(+) - create mode 100644 xml/api/crm_mon-2.7.rng - create mode 100644 xml/api/crm_resource-2.7.rng - -diff --git a/xml/api/crm_mon-2.7.rng b/xml/api/crm_mon-2.7.rng -new file mode 100644 -index 0000000..88973a4 ---- /dev/null -+++ b/xml/api/crm_mon-2.7.rng -@@ -0,0 +1,311 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ unknown -+ member -+ remote -+ ping -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ granted -+ revoked -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/xml/api/crm_resource-2.7.rng b/xml/api/crm_resource-2.7.rng -new file mode 100644 -index 0000000..b49e24c ---- /dev/null -+++ b/xml/api/crm_resource-2.7.rng -@@ -0,0 +1,238 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ promoted -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ ocf -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ true -+ false -+ -+ -+ -+ true -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ Stopped -+ Started -+ Master -+ Slave -+ -+ -+ --- -1.8.3.1 - - -From 9b6ee6eb5aa1008beebae9d9f6c3889c81c3bbb6 Mon Sep 17 00:00:00 2001 -From: Chris Lumens -Date: Tue, 2 Mar 2021 10:58:15 -0500 -Subject: [PATCH 3/3] Med: Change the schema type of 'expected' and 'call' to - integer. - -Regression in 2.0.3. - -See: rhbz#1931332 ---- - xml/api/crm_mon-2.7.rng | 4 ++-- - xml/api/crm_resource-2.7.rng | 2 +- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/xml/api/crm_mon-2.7.rng b/xml/api/crm_mon-2.7.rng -index 88973a4..8e6792b 100644 ---- a/xml/api/crm_mon-2.7.rng -+++ b/xml/api/crm_mon-2.7.rng -@@ -198,7 +198,7 @@ - - - -- -+ - - - -@@ -269,7 +269,7 @@ - - - -- -+ - - - -diff --git a/xml/api/crm_resource-2.7.rng b/xml/api/crm_resource-2.7.rng -index b49e24c..8e386db 100644 ---- a/xml/api/crm_resource-2.7.rng -+++ b/xml/api/crm_resource-2.7.rng -@@ -217,7 +217,7 @@ - - - -- -+ - - - --- -1.8.3.1 - diff --git a/SOURCES/040-crm_mon-shutdown.patch b/SOURCES/040-crm_mon-shutdown.patch deleted file mode 100644 index 78a20f1..0000000 --- a/SOURCES/040-crm_mon-shutdown.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 49ebe4ce934b7bcc81b77a61e7ba3cf76f4ce911 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Mon, 12 Apr 2021 15:06:09 +0200 -Subject: [PATCH] Fix: crm_mon: try to connect CIB while pacemakerd shutting - down - -actually while resources are evacuated from the node. But atm -there is no clean and easy way to tell when this is done and -pacemakerd is just shutting down leftover daemons. So try to -connect anyway. ---- - tools/crm_mon.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index f77268a..8b6e174 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1014,6 +1014,8 @@ pacemakerd_status(void) - break; - case pcmk_pacemakerd_state_shutting_down: - print_as(output_format ,"Pacemaker daemons shutting down ...\n"); -+ /* try our luck maybe CIB is still accessible */ -+ rc = pcmk_rc_ok; - break; - case pcmk_pacemakerd_state_shutdown_complete: - /* assuming pacemakerd doesn't dispatch any pings after entering --- -1.8.3.1 - diff --git a/SOURCES/041-crm_mon-shutdown.patch b/SOURCES/041-crm_mon-shutdown.patch deleted file mode 100644 index 2273370..0000000 --- a/SOURCES/041-crm_mon-shutdown.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 46d6edd5a7eb6079925ed69576c754ab46ab3f1d Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Tue, 13 Apr 2021 09:59:00 +0200 -Subject: [PATCH] Fix: crm_mon: try to connect CIB while pacemakerd shutting - down - -we need to do this in all output modes (xml specifically) ---- - tools/crm_mon.c | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 8b6e174..95adef8 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -1031,6 +1031,10 @@ pacemakerd_status(void) - case pcmk_pacemakerd_state_running: - rc = pcmk_rc_ok; - break; -+ case pcmk_pacemakerd_state_shutting_down: -+ /* try our luck maybe CIB is still accessible */ -+ rc = pcmk_rc_ok; -+ break; - default: - break; - } --- -1.8.3.1 - diff --git a/SOURCES/042-unfencing-loop.patch b/SOURCES/042-unfencing-loop.patch deleted file mode 100644 index 2bae706..0000000 --- a/SOURCES/042-unfencing-loop.patch +++ /dev/null @@ -1,733 +0,0 @@ -From 6dcd6b51d7d3993bc483588d6ed75077518ed600 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Jun 2021 16:30:55 -0500 -Subject: [PATCH 01/11] Low: controller: check whether unfenced node was remote - node - -... so the controller can indicate the node is remote (if known at that point, -which is not guaranteed) when setting unfencing-related node attributes. ---- - daemons/controld/controld_fencing.c | 21 ++++++++++++++++++--- - 1 file changed, 18 insertions(+), 3 deletions(-) - -diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c -index 23dff28..0fba661 100644 ---- a/daemons/controld/controld_fencing.c -+++ b/daemons/controld/controld_fencing.c -@@ -757,15 +757,30 @@ tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data) - if (pcmk__str_eq("on", op, pcmk__str_casei)) { - const char *value = NULL; - char *now = crm_ttoa(time(NULL)); -+ gboolean is_remote_node = FALSE; -+ -+ /* This check is not 100% reliable, since this node is not -+ * guaranteed to have the remote node cached. However, it -+ * doesn't have to be reliable, since the attribute manager can -+ * learn a node's "remoteness" by other means sooner or later. -+ * This allows it to learn more quickly if this node does have -+ * the information. -+ */ -+ if (g_hash_table_lookup(crm_remote_peer_cache, uuid) != NULL) { -+ is_remote_node = TRUE; -+ } - -- update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, FALSE); -+ update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, -+ is_remote_node); - free(now); - - value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_ALL); -- update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, FALSE); -+ update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, -+ is_remote_node); - - value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_SECURE); -- update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, FALSE); -+ update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, -+ is_remote_node); - - } else if (action->sent_update == FALSE) { - send_stonith_update(action, target, uuid); --- -1.8.3.1 - - -From 3ef6d9403f68ab8559c45cc99f5a8da05ca6420b Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 7 Jun 2021 10:50:36 -0500 -Subject: [PATCH 02/11] Refactor: pacemaker-attrd: functionize adding remote - node to cache - -... for future reuse ---- - daemons/attrd/attrd_commands.c | 34 +++++++++++++++++++++++----------- - 1 file changed, 23 insertions(+), 11 deletions(-) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index 731c243..93a165b 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -102,6 +102,28 @@ free_attribute(gpointer data) - } - } - -+/*! -+ * \internal -+ * \brief Ensure a Pacemaker Remote node is in the correct peer cache -+ * -+ * \param[in] -+ */ -+static void -+cache_remote_node(const char *node_name) -+{ -+ /* If we previously assumed this node was an unseen cluster node, -+ * remove its entry from the cluster peer cache. -+ */ -+ crm_node_t *dup = crm_find_peer(0, node_name); -+ -+ if (dup && (dup->uuid == NULL)) { -+ reap_crm_member(0, node_name); -+ } -+ -+ // Ensure node is in the remote peer cache -+ CRM_ASSERT(crm_remote_peer_get(node_name) != NULL); -+} -+ - static xmlNode * - build_attribute_xml( - xmlNode *parent, const char *name, const char *set, const char *uuid, unsigned int timeout_ms, const char *user, -@@ -709,17 +731,7 @@ attrd_lookup_or_create_value(GHashTable *values, const char *host, xmlNode *xml) - - crm_element_value_int(xml, PCMK__XA_ATTR_IS_REMOTE, &is_remote); - if (is_remote) { -- /* If we previously assumed this node was an unseen cluster node, -- * remove its entry from the cluster peer cache. -- */ -- crm_node_t *dup = crm_find_peer(0, host); -- -- if (dup && (dup->uuid == NULL)) { -- reap_crm_member(0, host); -- } -- -- /* Ensure this host is in the remote peer cache */ -- CRM_ASSERT(crm_remote_peer_get(host) != NULL); -+ cache_remote_node(host); - } - - if (v == NULL) { --- -1.8.3.1 - - -From 6fac2c71bc2c56870ac828d7cd7b7c799279c47e Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 7 Jun 2021 10:39:34 -0500 -Subject: [PATCH 03/11] Refactor: pacemaker-attrd: don't try to remove votes - for remote nodes - -Remote nodes never vote. - -This has no effect in practice since the removal would simply do nothing, -but we might as well not waste time trying. ---- - daemons/attrd/attrd_commands.c | 11 ++++++----- - 1 file changed, 6 insertions(+), 5 deletions(-) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index 93a165b..dbe777e 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -976,7 +976,8 @@ attrd_election_cb(gpointer user_data) - void - attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) - { -- bool remove_voter = FALSE; -+ bool gone = false; -+ bool is_remote = pcmk_is_set(peer->flags, crm_remote_node); - - switch (kind) { - case crm_status_uname: -@@ -984,7 +985,7 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da - - case crm_status_processes: - if (!pcmk_is_set(peer->processes, crm_get_cluster_proc())) { -- remove_voter = TRUE; -+ gone = true; - } - break; - -@@ -1000,13 +1001,13 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da - } else { - // Remove all attribute values associated with lost nodes - attrd_peer_remove(peer->uname, FALSE, "loss"); -- remove_voter = TRUE; -+ gone = true; - } - break; - } - -- // In case an election is in progress, remove any vote by the node -- if (remove_voter) { -+ // Remove votes from cluster nodes that leave, in case election in progress -+ if (gone && !is_remote) { - attrd_remove_voter(peer); - } - } --- -1.8.3.1 - - -From 54089fc663d6aaf10ca164c6c94b3b17237788de Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 7 Jun 2021 10:40:06 -0500 -Subject: [PATCH 04/11] Low: pacemaker-attrd: check for remote nodes in peer - update callback - -If a remote node was started before the local cluster node joined the cluster, -the cluster node will assume its node attributes are for a cluster node until -it learns otherwise. Check for remoteness in the peer update callback, to have -another way we can learn it. ---- - daemons/attrd/attrd_commands.c | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index dbe777e..5f6a754 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -1009,6 +1009,10 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da - // Remove votes from cluster nodes that leave, in case election in progress - if (gone && !is_remote) { - attrd_remove_voter(peer); -+ -+ // Ensure remote nodes that come up are in the remote node cache -+ } else if (!gone && is_remote) { -+ cache_remote_node(peer->uname); - } - } - --- -1.8.3.1 - - -From 8c048df0312d0d9c857d87b570a352429a710928 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 7 Jun 2021 11:29:12 -0500 -Subject: [PATCH 05/11] Log: pacemaker-attrd: log peer status changes - ---- - daemons/attrd/attrd_commands.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index 5f6a754..d6d179b 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -972,6 +972,7 @@ attrd_election_cb(gpointer user_data) - return FALSE; - } - -+#define state_text(state) ((state)? (const char *)(state) : "in unknown state") - - void - attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) -@@ -981,15 +982,23 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da - - switch (kind) { - case crm_status_uname: -+ crm_debug("%s node %s is now %s", -+ (is_remote? "Remote" : "Cluster"), -+ peer->uname, state_text(peer->state)); - break; - - case crm_status_processes: - if (!pcmk_is_set(peer->processes, crm_get_cluster_proc())) { - gone = true; - } -+ crm_debug("Node %s is %s a peer", -+ peer->uname, (gone? "no longer" : "now")); - break; - - case crm_status_nstate: -+ crm_debug("%s node %s is now %s (was %s)", -+ (is_remote? "Remote" : "Cluster"), -+ peer->uname, state_text(peer->state), state_text(data)); - if (pcmk__str_eq(peer->state, CRM_NODE_MEMBER, pcmk__str_casei)) { - /* If we're the writer, send new peers a list of all attributes - * (unless it's a remote node, which doesn't run its own attrd) --- -1.8.3.1 - - -From 1dcc8dee4990cf0dbdec0e14db6d9a3ad67a41d5 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 7 Jun 2021 11:13:53 -0500 -Subject: [PATCH 06/11] Low: pacemaker-attrd: ensure node ID is only set for - attributes when known - -In most cases, attribute updates contained the node ID, and the node ID was -used by other code, only if known (i.e. positive). However a couple places did -not check this, so add that. - -I am unsure whether the missing check caused problems in practice, but there -appears to be the possibility that a remote node would wrongly be added to the -cluster node cache. ---- - daemons/attrd/attrd_commands.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index d6d179b..b3f441c 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -136,7 +136,9 @@ build_attribute_xml( - crm_xml_add(xml, PCMK__XA_ATTR_UUID, uuid); - crm_xml_add(xml, PCMK__XA_ATTR_USER, user); - crm_xml_add(xml, PCMK__XA_ATTR_NODE_NAME, peer); -- crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, peerid); -+ if (peerid > 0) { -+ crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, peerid); -+ } - crm_xml_add(xml, PCMK__XA_ATTR_VALUE, value); - crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, timeout_ms/1000); - crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, is_private); -@@ -937,7 +939,7 @@ attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter) - /* If this is a cluster node whose node ID we are learning, remember it */ - if ((v->nodeid == 0) && (v->is_remote == FALSE) - && (crm_element_value_int(xml, PCMK__XA_ATTR_NODE_ID, -- (int*)&v->nodeid) == 0)) { -+ (int*)&v->nodeid) == 0) && (v->nodeid > 0)) { - - crm_node_t *known_peer = crm_get_peer(v->nodeid, host); - --- -1.8.3.1 - - -From 8d12490e88b558d01db37a38f7d35175c6d2d69a Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 10 Jun 2021 17:25:57 -0500 -Subject: [PATCH 07/11] Refactor: pacemaker-attrd: functionize processing a - sync response - -... for code isolation, and because we need to add more to it ---- - daemons/attrd/attrd_commands.c | 59 ++++++++++++++++++++++++++++-------------- - 1 file changed, 39 insertions(+), 20 deletions(-) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index b3f441c..d02d3e6 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -572,6 +572,43 @@ attrd_peer_clear_failure(crm_node_t *peer, xmlNode *xml) - } - - /*! -+ * \internal -+ * \brief Load attributes from a peer sync response -+ * -+ * \param[in] peer Peer that sent clear request -+ * \param[in] peer_won Whether peer is the attribute writer -+ * \param[in] xml Request XML -+ */ -+static void -+process_peer_sync_response(crm_node_t *peer, bool peer_won, xmlNode *xml) -+{ -+ crm_info("Processing " PCMK__ATTRD_CMD_SYNC_RESPONSE " from %s", -+ peer->uname); -+ -+ if (peer_won) { -+ /* Initialize the "seen" flag for all attributes to cleared, so we can -+ * detect attributes that local node has but the writer doesn't. -+ */ -+ clear_attribute_value_seen(); -+ } -+ -+ // Process each attribute update in the sync response -+ for (xmlNode *child = pcmk__xml_first_child(xml); child != NULL; -+ child = pcmk__xml_next(child)) { -+ attrd_peer_update(peer, child, -+ crm_element_value(child, PCMK__XA_ATTR_NODE_NAME), -+ TRUE); -+ } -+ -+ if (peer_won) { -+ /* If any attributes are still not marked as seen, the writer doesn't -+ * know about them, so send all peers an update with them. -+ */ -+ attrd_current_only_attribute_update(peer, xml); -+ } -+} -+ -+/*! - \internal - \brief Broadcast private attribute for local node with protocol version - */ -@@ -596,7 +633,7 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml) - const char *op = crm_element_value(xml, PCMK__XA_TASK); - const char *election_op = crm_element_value(xml, F_CRM_TASK); - const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); -- bool peer_won = FALSE; -+ bool peer_won = false; - - if (election_op) { - attrd_handle_election_op(peer, xml); -@@ -631,25 +668,7 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml) - - } else if (pcmk__str_eq(op, PCMK__ATTRD_CMD_SYNC_RESPONSE, pcmk__str_casei) - && !pcmk__str_eq(peer->uname, attrd_cluster->uname, pcmk__str_casei)) { -- xmlNode *child = NULL; -- -- crm_info("Processing %s from %s", op, peer->uname); -- -- /* Clear the seen flag for attribute processing held only in the own node. */ -- if (peer_won) { -- clear_attribute_value_seen(); -- } -- -- for (child = pcmk__xml_first_child(xml); child != NULL; -- child = pcmk__xml_next(child)) { -- host = crm_element_value(child, PCMK__XA_ATTR_NODE_NAME); -- attrd_peer_update(peer, child, host, TRUE); -- } -- -- if (peer_won) { -- /* Synchronize if there is an attribute held only by own node that Writer does not have. */ -- attrd_current_only_attribute_update(peer, xml); -- } -+ process_peer_sync_response(peer, peer_won, xml); - } - } - --- -1.8.3.1 - - -From a890a0e5bbbcabf907f51ed0460868035f72464d Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 11 Jun 2021 14:40:39 -0500 -Subject: [PATCH 08/11] Refactor: pacemaker-attrd: functionize broadcasting - local override - -... for code isolation ---- - daemons/attrd/attrd_commands.c | 42 +++++++++++++++++++++++++++++------------- - 1 file changed, 29 insertions(+), 13 deletions(-) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index d02d3e6..4783427 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -804,6 +804,34 @@ attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml) - free_xml(sync); - } - -+/*! -+ * \internal -+ * \brief Override an attribute sync with a local value -+ * -+ * Broadcast the local node's value for an attribute that's different from the -+ * value provided in a peer's attribute synchronization response. This ensures a -+ * node's values for itself take precedence and all peers are kept in sync. -+ * -+ * \param[in] a Attribute entry to override -+ * -+ * \return Local instance of attribute value -+ */ -+static attribute_value_t * -+broadcast_local_value(attribute_t *a) -+{ -+ attribute_value_t *v = g_hash_table_lookup(a->values, attrd_cluster->uname); -+ xmlNode *sync = create_xml_node(NULL, __func__); -+ -+ crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); -+ build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, -+ a->user, a->is_private, v->nodename, v->nodeid, -+ v->current, FALSE); -+ attrd_xml_add_writer(sync); -+ send_attrd_message(NULL, sync); -+ free_xml(sync); -+ return v; -+} -+ - void - attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter) - { -@@ -899,21 +927,9 @@ attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter) - if (filter && !pcmk__str_eq(v->current, value, pcmk__str_casei) - && pcmk__str_eq(host, attrd_cluster->uname, pcmk__str_casei)) { - -- xmlNode *sync = create_xml_node(NULL, __func__); -- - crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s", - attr, host, v->current, value, peer->uname); -- -- crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); -- v = g_hash_table_lookup(a->values, host); -- build_attribute_xml(sync, attr, a->set, a->uuid, a->timeout_ms, a->user, -- a->is_private, v->nodename, v->nodeid, v->current, FALSE); -- -- attrd_xml_add_writer(sync); -- -- /* Broadcast in case any other nodes had the inconsistent value */ -- send_attrd_message(NULL, sync); -- free_xml(sync); -+ v = broadcast_local_value(a); - - } else if (!pcmk__str_eq(v->current, value, pcmk__str_casei)) { - crm_notice("Setting %s[%s]: %s -> %s " CRM_XS " from %s", --- -1.8.3.1 - - -From f6f65e3dab070f1bbdf6d1383f4d6173a8840bc9 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 11 Jun 2021 14:50:29 -0500 -Subject: [PATCH 09/11] Log: pacemaker-attrd: improve messages when - broadcasting local-only values - -The traces aren't necessary since build_attribute_xml() already logs the same -info at debug. Also, rename function for clarity, and make static. ---- - daemons/attrd/attrd_commands.c | 35 ++++++++++++++++------------------- - 1 file changed, 16 insertions(+), 19 deletions(-) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index 4783427..356defb 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -51,11 +51,12 @@ GHashTable *attributes = NULL; - - void write_attribute(attribute_t *a, bool ignore_delay); - void write_or_elect_attribute(attribute_t *a); --void attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml); - void attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter); - void attrd_peer_sync(crm_node_t *peer, xmlNode *xml); - void attrd_peer_remove(const char *host, gboolean uncache, const char *source); - -+static void broadcast_unseen_local_values(crm_node_t *peer, xmlNode *xml); -+ - static gboolean - send_attrd_message(crm_node_t * node, xmlNode * data) - { -@@ -604,7 +605,7 @@ process_peer_sync_response(crm_node_t *peer, bool peer_won, xmlNode *xml) - /* If any attributes are still not marked as seen, the writer doesn't - * know about them, so send all peers an update with them. - */ -- attrd_current_only_attribute_update(peer, xml); -+ broadcast_unseen_local_values(peer, xml); - } - } - -@@ -768,40 +769,36 @@ attrd_lookup_or_create_value(GHashTable *values, const char *host, xmlNode *xml) - return(v); - } - --void --attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml) -+void -+broadcast_unseen_local_values(crm_node_t *peer, xmlNode *xml) - { - GHashTableIter aIter; - GHashTableIter vIter; -- attribute_t *a; -+ attribute_t *a = NULL; - attribute_value_t *v = NULL; -- xmlNode *sync = create_xml_node(NULL, __func__); -- gboolean build = FALSE; -- -- crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); -+ xmlNode *sync = NULL; - - g_hash_table_iter_init(&aIter, attributes); - while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { - g_hash_table_iter_init(&vIter, a->values); - while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { -- if (pcmk__str_eq(v->nodename, attrd_cluster->uname, pcmk__str_casei) && v->seen == FALSE) { -- crm_trace("Syncing %s[%s] = %s to everyone.(from local only attributes)", a->id, v->nodename, v->current); -- -- build = TRUE; -+ if (!(v->seen) && pcmk__str_eq(v->nodename, attrd_cluster->uname, -+ pcmk__str_casei)) { -+ if (sync == NULL) { -+ sync = create_xml_node(NULL, __func__); -+ crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); -+ } - build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, - v->nodename, v->nodeid, v->current, (a->timeout_ms && a->timer ? TRUE : FALSE)); -- } else { -- crm_trace("Local attribute(%s[%s] = %s) was ignore.(another host) : [%s]", a->id, v->nodename, v->current, attrd_cluster->uname); -- continue; - } - } - } - -- if (build) { -- crm_debug("Syncing values to everyone.(from local only attributes)"); -+ if (sync != NULL) { -+ crm_debug("Broadcasting local-only values"); - send_attrd_message(NULL, sync); -+ free_xml(sync); - } -- free_xml(sync); - } - - /*! --- -1.8.3.1 - - -From ab90ffb785ea018556f216b8f540f8c3429a3947 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 11 Jun 2021 15:04:20 -0500 -Subject: [PATCH 10/11] Refactor: pacemaker-attrd: simplify attribute XML - creation function - -... and rename for clarity ---- - daemons/attrd/attrd_commands.c | 48 ++++++++++++++++++++++++------------------ - 1 file changed, 27 insertions(+), 21 deletions(-) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index 356defb..5b32a77 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -125,25 +125,35 @@ cache_remote_node(const char *node_name) - CRM_ASSERT(crm_remote_peer_get(node_name) != NULL); - } - -+/*! -+ * \internal -+ * \brief Create an XML representation of an attribute for use in peer messages -+ * -+ * \param[in] parent Create attribute XML as child element of this element -+ * \param[in] a Attribute to represent -+ * \param[in] v Attribute value to represent -+ * \param[in] force_write If true, value should be written even if unchanged -+ * -+ * \return XML representation of attribute -+ */ - static xmlNode * --build_attribute_xml( -- xmlNode *parent, const char *name, const char *set, const char *uuid, unsigned int timeout_ms, const char *user, -- gboolean is_private, const char *peer, uint32_t peerid, const char *value, gboolean is_force_write) -+add_attribute_value_xml(xmlNode *parent, attribute_t *a, attribute_value_t *v, -+ bool force_write) - { - xmlNode *xml = create_xml_node(parent, __func__); - -- crm_xml_add(xml, PCMK__XA_ATTR_NAME, name); -- crm_xml_add(xml, PCMK__XA_ATTR_SET, set); -- crm_xml_add(xml, PCMK__XA_ATTR_UUID, uuid); -- crm_xml_add(xml, PCMK__XA_ATTR_USER, user); -- crm_xml_add(xml, PCMK__XA_ATTR_NODE_NAME, peer); -- if (peerid > 0) { -- crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, peerid); -+ crm_xml_add(xml, PCMK__XA_ATTR_NAME, a->id); -+ crm_xml_add(xml, PCMK__XA_ATTR_SET, a->set); -+ crm_xml_add(xml, PCMK__XA_ATTR_UUID, a->uuid); -+ crm_xml_add(xml, PCMK__XA_ATTR_USER, a->user); -+ crm_xml_add(xml, PCMK__XA_ATTR_NODE_NAME, v->nodename); -+ if (v->nodeid > 0) { -+ crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, v->nodeid); - } -- crm_xml_add(xml, PCMK__XA_ATTR_VALUE, value); -- crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, timeout_ms/1000); -- crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, is_private); -- crm_xml_add_int(xml, PCMK__XA_ATTR_FORCE, is_force_write); -+ crm_xml_add(xml, PCMK__XA_ATTR_VALUE, v->current); -+ crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, a->timeout_ms / 1000); -+ crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, a->is_private); -+ crm_xml_add_int(xml, PCMK__XA_ATTR_FORCE, force_write); - - return xml; - } -@@ -695,8 +705,7 @@ attrd_peer_sync(crm_node_t *peer, xmlNode *xml) - g_hash_table_iter_init(&vIter, a->values); - while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { - crm_debug("Syncing %s[%s] = %s to %s", a->id, v->nodename, v->current, peer?peer->uname:"everyone"); -- build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, -- v->nodename, v->nodeid, v->current, FALSE); -+ add_attribute_value_xml(sync, a, v, false); - } - } - -@@ -788,8 +797,7 @@ broadcast_unseen_local_values(crm_node_t *peer, xmlNode *xml) - sync = create_xml_node(NULL, __func__); - crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); - } -- build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, -- v->nodename, v->nodeid, v->current, (a->timeout_ms && a->timer ? TRUE : FALSE)); -+ add_attribute_value_xml(sync, a, v, a->timeout_ms && a->timer); - } - } - } -@@ -820,9 +828,7 @@ broadcast_local_value(attribute_t *a) - xmlNode *sync = create_xml_node(NULL, __func__); - - crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); -- build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, -- a->user, a->is_private, v->nodename, v->nodeid, -- v->current, FALSE); -+ add_attribute_value_xml(sync, a, v, false); - attrd_xml_add_writer(sync); - send_attrd_message(NULL, sync); - free_xml(sync); --- -1.8.3.1 - - -From 540d74130c5c8d9c626d6c50475e4dc4f64234e7 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Fri, 4 Jun 2021 16:34:26 -0500 -Subject: [PATCH 11/11] Fix: pacemaker-attrd: avoid repeated unfencing of - remote nodes - -The attribute manager can't record a remote node's attributes to the CIB until -it knows the node is remote. Normally, this is learned when the remote node -starts, because the controller clears the CRM_OP_PROBED attribute and indicates -that it is for a remote node. - -However, if a cluster node is down when a remote node starts, and later comes -up, it learns the remote node's existing attributes as part of the attribute -sync. Previously, this did not include whether each value is for a cluster or -remote node, so the newly joined attribute manager couldn't write out remote -nodes' attributes until it learned that via some other event -- which might not -happen before the node becomes DC, in which case its scheduler will not see any -unfencing-related node attributes and may wrongly schedule unfencing. - -The sync response handling already calls attrd_lookup_or_create_value(), which -checks PCMK__XA_ATTR_IS_REMOTE, so all we need to do is add that to the sync -response. ---- - daemons/attrd/attrd_commands.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c -index 5b32a77..0142383 100644 ---- a/daemons/attrd/attrd_commands.c -+++ b/daemons/attrd/attrd_commands.c -@@ -43,8 +43,9 @@ - * 1 1.1.15 PCMK__ATTRD_CMD_UPDATE_BOTH, - * PCMK__ATTRD_CMD_UPDATE_DELAY - * 2 1.1.17 PCMK__ATTRD_CMD_CLEAR_FAILURE -+ * 3 2.1.1 PCMK__ATTRD_CMD_SYNC_RESPONSE indicates remote nodes - */ --#define ATTRD_PROTOCOL_VERSION "2" -+#define ATTRD_PROTOCOL_VERSION "3" - - int last_cib_op_done = 0; - GHashTable *attributes = NULL; -@@ -150,6 +151,9 @@ add_attribute_value_xml(xmlNode *parent, attribute_t *a, attribute_value_t *v, - if (v->nodeid > 0) { - crm_xml_add_int(xml, PCMK__XA_ATTR_NODE_ID, v->nodeid); - } -+ if (v->is_remote != 0) { -+ crm_xml_add_int(xml, PCMK__XA_ATTR_IS_REMOTE, 1); -+ } - crm_xml_add(xml, PCMK__XA_ATTR_VALUE, v->current); - crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, a->timeout_ms / 1000); - crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, a->is_private); --- -1.8.3.1 - diff --git a/SOURCES/043-retry-metadata.patch b/SOURCES/043-retry-metadata.patch deleted file mode 100644 index f66817f..0000000 --- a/SOURCES/043-retry-metadata.patch +++ /dev/null @@ -1,176 +0,0 @@ -From 5c2d8665773254ff8b9676ac359a1210e34640e3 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Mon, 1 Mar 2021 14:02:52 +0100 -Subject: [PATCH] API: add pcmk__mainloop_timer_get_period() to internal API - ---- - include/crm/common/internal.h | 1 + - lib/common/mainloop.c | 34 +++++++++++++++++++++++++--------- - 2 files changed, 26 insertions(+), 9 deletions(-) - -diff --git a/include/crm/common/internal.h b/include/crm/common/internal.h -index f69abe8..63bfd2c 100644 ---- a/include/crm/common/internal.h -+++ b/include/crm/common/internal.h -@@ -96,6 +96,7 @@ pcmk__open_devnull(int flags) - int pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata, - struct ipc_client_callbacks *callbacks, - mainloop_io_t **source); -+guint pcmk__mainloop_timer_get_period(mainloop_timer_t *timer); - - - /* internal messaging utilities (from messages.c) */ -diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c -index 2f00e31..75f24e2 100644 ---- a/lib/common/mainloop.c -+++ b/lib/common/mainloop.c -@@ -49,6 +49,15 @@ struct trigger_s { - - }; - -+struct mainloop_timer_s { -+ guint id; -+ guint period_ms; -+ bool repeat; -+ char *name; -+ GSourceFunc cb; -+ void *userdata; -+}; -+ - static gboolean - crm_trigger_prepare(GSource * source, gint * timeout) - { -@@ -875,6 +884,22 @@ pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata, - return pcmk_rc_ok; - } - -+/*! -+ * \brief Get period for mainloop timer -+ * -+ * \param[in] timer Timer -+ * -+ * \return Period in ms -+ */ -+guint -+pcmk__mainloop_timer_get_period(mainloop_timer_t *timer) -+{ -+ if (timer) { -+ return timer->period_ms; -+ } -+ return 0; -+} -+ - mainloop_io_t * - mainloop_add_ipc_client(const char *name, int priority, size_t max_size, - void *userdata, struct ipc_client_callbacks *callbacks) -@@ -1252,15 +1277,6 @@ mainloop_child_add(pid_t pid, int timeout, const char *desc, void *privatedata, - mainloop_child_add_with_flags(pid, timeout, desc, privatedata, 0, callback); - } - --struct mainloop_timer_s { -- guint id; -- guint period_ms; -- bool repeat; -- char *name; -- GSourceFunc cb; -- void *userdata; --}; -- - static gboolean - mainloop_timer_cb(gpointer user_data) - { --- -1.8.3.1 - -From 1d33712201e42f0e8ee108999cd4cb8fa0eeca95 Mon Sep 17 00:00:00 2001 -From: Oyvind Albrigtsen -Date: Fri, 19 Feb 2021 12:34:04 +0100 -Subject: [PATCH] Feature: fenced: retry getting metadata until we get it - ---- - daemons/fenced/fenced_commands.c | 35 +++++++++++++++++++++++++++++++++++ - daemons/fenced/pacemaker-fenced.h | 1 + - 2 files changed, 36 insertions(+) - -diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c -index 41901e5..65b41c5 100644 ---- a/daemons/fenced/fenced_commands.c -+++ b/daemons/fenced/fenced_commands.c -@@ -69,6 +69,9 @@ static void stonith_send_reply(xmlNode * reply, int call_options, const char *re - static void search_devices_record_result(struct device_search_s *search, const char *device, - gboolean can_fence); - -+static xmlNode * get_agent_metadata(const char *agent); -+static void read_action_metadata(stonith_device_t *device); -+ - typedef struct async_command_s { - - int id; -@@ -323,6 +326,25 @@ fork_cb(GPid pid, gpointer user_data) - cmd->activating_on = NULL; - } - -+static int -+get_agent_metadata_cb(gpointer data) { -+ stonith_device_t *device = data; -+ -+ device->agent_metadata = get_agent_metadata(device->agent); -+ if (device->agent_metadata) { -+ read_action_metadata(device); -+ stonith__device_parameter_flags(&(device->flags), device->id, -+ device->agent_metadata); -+ return G_SOURCE_REMOVE; -+ } else { -+ guint period_ms = pcmk__mainloop_timer_get_period(device->timer); -+ if (period_ms < 160 * 1000) { -+ mainloop_timer_set_period(device->timer, 2 * period_ms); -+ } -+ return G_SOURCE_CONTINUE; -+ } -+} -+ - static gboolean - stonith_device_execute(stonith_device_t * device) - { -@@ -569,6 +591,11 @@ free_device(gpointer data) - - g_list_free_full(device->targets, free); - -+ if (device->timer) { -+ mainloop_timer_stop(device->timer); -+ mainloop_timer_del(device->timer); -+ } -+ - mainloop_destroy_trigger(device->work); - - free_xml(device->agent_metadata); -@@ -916,6 +943,14 @@ build_device_from_xml(xmlNode * msg) - read_action_metadata(device); - stonith__device_parameter_flags(&(device->flags), device->id, - device->agent_metadata); -+ } else { -+ if (device->timer == NULL) { -+ device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000, -+ TRUE, get_agent_metadata_cb, device); -+ } -+ if (!mainloop_timer_running(device->timer)) { -+ mainloop_timer_start(device->timer); -+ } - } - - value = g_hash_table_lookup(device->params, "nodeid"); -diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h -index 13cf6dc..e342692 100644 ---- a/daemons/fenced/pacemaker-fenced.h -+++ b/daemons/fenced/pacemaker-fenced.h -@@ -41,6 +41,7 @@ typedef struct stonith_device_s { - GHashTable *params; - GHashTable *aliases; - GList *pending_ops; -+ mainloop_timer_t *timer; - crm_trigger_t *work; - xmlNode *agent_metadata; - --- -1.8.3.1 - diff --git a/SOURCES/044-sbd.patch b/SOURCES/044-sbd.patch deleted file mode 100644 index f4c7358..0000000 --- a/SOURCES/044-sbd.patch +++ /dev/null @@ -1,1633 +0,0 @@ -From 30c04b0f6d717ad27601477eb4b4c47402f46b57 Mon Sep 17 00:00:00 2001 -From: Kazunori INOUE -Date: Fri, 29 Jan 2021 11:28:20 +0900 -Subject: [PATCH] Fix: fencing: remove any devices that are not installed - ---- - daemons/fenced/fenced_commands.c | 2 ++ - daemons/fenced/pacemaker-fenced.c | 37 ++++++++++++++++++++++--------------- - daemons/fenced/pacemaker-fenced.h | 1 + - 3 files changed, 25 insertions(+), 15 deletions(-) - -diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c -index 2729d0d..a4f92cc 100644 ---- a/daemons/fenced/fenced_commands.c -+++ b/daemons/fenced/fenced_commands.c -@@ -1173,6 +1173,8 @@ stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib) - g_hash_table_size(device_list)); - free_device(device); - device = dup; -+ dup = g_hash_table_lookup(device_list, device->id); -+ dup->dirty = FALSE; - - } else { - stonith_device_t *old = g_hash_table_lookup(device_list, device->id); -diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c -index 5390d66..edfd407 100644 ---- a/daemons/fenced/pacemaker-fenced.c -+++ b/daemons/fenced/pacemaker-fenced.c -@@ -583,11 +583,8 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - const char *value = NULL; - const char *rclass = NULL; - pe_node_t *parent = NULL; -- gboolean remove = TRUE; - -- /* If this is a complex resource, check children rather than this resource itself. -- * TODO: Mark each installed device and remove if untouched when this process finishes. -- */ -+ /* If this is a complex resource, check children rather than this resource itself. */ - if(rsc->children) { - GListPtr gIter = NULL; - for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { -@@ -606,10 +603,10 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - return; - } - -- /* If this STONITH resource is disabled, just remove it. */ -+ /* If this STONITH resource is disabled, remove it. */ - if (pe__resource_is_disabled(rsc)) { - crm_info("Device %s has been disabled", rsc->id); -- goto update_done; -+ return; - } - - /* Check whether our node is allowed for this resource (and its parent if in a group) */ -@@ -628,7 +625,7 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - crm_trace("Available: %s = %d", node->details->uname, node->weight); - } - -- goto update_done; -+ return; - - } else if(node->weight < 0 || (parent && parent->weight < 0)) { - /* Our node (or its group) is disallowed by score, so remove the device */ -@@ -637,7 +634,7 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - crm_info("Device %s has been disabled on %s: score=%s", rsc->id, stonith_our_uname, score); - free(score); - -- goto update_done; -+ return; - - } else { - /* Our node is allowed, so update the device information */ -@@ -666,7 +663,6 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - crm_trace(" %s=%s", name, value); - } - -- remove = FALSE; - data = create_device_registration_xml(rsc_name(rsc), st_namespace_any, - agent, params, rsc_provides); - stonith_key_value_freeall(params, 1, 1); -@@ -674,12 +670,6 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - CRM_ASSERT(rc == pcmk_ok); - free_xml(data); - } -- --update_done: -- -- if(remove && g_hash_table_lookup(device_list, rsc_name(rsc))) { -- stonith_device_remove(rsc_name(rsc), TRUE); -- } - } - - /*! -@@ -690,6 +680,8 @@ static void - cib_devices_update(void) - { - GListPtr gIter = NULL; -+ GHashTableIter iter; -+ stonith_device_t *device = NULL; - - crm_info("Updating devices to version %s.%s.%s", - crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN), -@@ -705,9 +697,24 @@ cib_devices_update(void) - cluster_status(fenced_data_set); - pcmk__schedule_actions(fenced_data_set, NULL, NULL); - -+ g_hash_table_iter_init(&iter, device_list); -+ while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { -+ if (device->cib_registered) { -+ device->dirty = TRUE; -+ } -+ } -+ - for (gIter = fenced_data_set->resources; gIter != NULL; gIter = gIter->next) { - cib_device_update(gIter->data, fenced_data_set); - } -+ -+ g_hash_table_iter_init(&iter, device_list); -+ while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { -+ if (device->dirty) { -+ g_hash_table_iter_remove(&iter); -+ } -+ } -+ - fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it - pe_reset_working_set(fenced_data_set); - } -diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h -index ed2817f..13cf6dc 100644 ---- a/daemons/fenced/pacemaker-fenced.h -+++ b/daemons/fenced/pacemaker-fenced.h -@@ -50,6 +50,7 @@ typedef struct stonith_device_s { - - gboolean cib_registered; - gboolean api_registered; -+ gboolean dirty; - } stonith_device_t; - - /* These values are used to index certain arrays by "phase". Usually an --- -1.8.3.1 - -From 66a88740105bde4de358f9c1e774ebd5eef3bb68 Mon Sep 17 00:00:00 2001 -From: Kazunori INOUE -Date: Mon, 10 May 2021 12:16:23 +0900 -Subject: [PATCH] Fix: fencing: register/remove the watchdog device - ---- - daemons/fenced/pacemaker-fenced.c | 108 ++++++++++++++++++++++---------------- - 1 file changed, 62 insertions(+), 46 deletions(-) - -diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c -index 2c61423..b05b085 100644 ---- a/daemons/fenced/pacemaker-fenced.c -+++ b/daemons/fenced/pacemaker-fenced.c -@@ -578,6 +578,66 @@ our_node_allowed_for(pe_resource_t *rsc) - return node; - } - -+static void -+watchdog_device_update(xmlNode *cib) -+{ -+ xmlNode *stonith_enabled_xml = NULL; -+ const char *stonith_enabled_s = NULL; -+ long timeout_ms = 0; -+ -+ stonith_enabled_xml = get_xpath_object("//nvpair[@name='stonith-enabled']", -+ cib, LOG_NEVER); -+ if (stonith_enabled_xml) { -+ stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE); -+ } -+ -+ if (stonith_enabled_s == NULL || crm_is_true(stonith_enabled_s)) { -+ xmlNode *stonith_watchdog_xml = NULL; -+ const char *value = NULL; -+ -+ stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", -+ cib, LOG_NEVER); -+ if (stonith_watchdog_xml) { -+ value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE); -+ } -+ if (value) { -+ timeout_ms = crm_get_msec(value); -+ } -+ -+ if (timeout_ms < 0) { -+ timeout_ms = pcmk__auto_watchdog_timeout(); -+ } -+ } -+ -+ if (timeout_ms != stonith_watchdog_timeout_ms) { -+ crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000); -+ stonith_watchdog_timeout_ms = timeout_ms; -+ -+ if (stonith_watchdog_timeout_ms > 0) { -+ int rc; -+ xmlNode *xml; -+ stonith_key_value_t *params = NULL; -+ -+ params = stonith_key_value_add(params, PCMK_STONITH_HOST_LIST, -+ stonith_our_uname); -+ -+ xml = create_device_registration_xml("watchdog", st_namespace_internal, -+ STONITH_WATCHDOG_AGENT, params, -+ NULL); -+ stonith_key_value_freeall(params, 1, 1); -+ rc = stonith_device_register(xml, NULL, FALSE); -+ free_xml(xml); -+ if (rc != pcmk_ok) { -+ crm_crit("Cannot register watchdog pseudo fence agent"); -+ crm_exit(CRM_EX_FATAL); -+ } -+ -+ } else { -+ stonith_device_remove("watchdog", FALSE); -+ } -+ } -+} -+ - /*! - * \internal - * \brief If a resource or any of its children are STONITH devices, update their -@@ -1012,7 +1072,6 @@ update_cib_cache_cb(const char *event, xmlNode * msg) - { - int rc = pcmk_ok; - xmlNode *stonith_enabled_xml = NULL; -- xmlNode *stonith_watchdog_xml = NULL; - const char *stonith_enabled_s = NULL; - static gboolean stonith_enabled_saved = TRUE; - -@@ -1076,31 +1135,7 @@ update_cib_cache_cb(const char *event, xmlNode * msg) - stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE); - } - -- if (stonith_enabled_s == NULL || crm_is_true(stonith_enabled_s)) { -- long timeout_ms = 0; -- const char *value = NULL; -- -- stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", -- local_cib, LOG_NEVER); -- if (stonith_watchdog_xml) { -- value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE); -- } -- -- if(value) { -- timeout_ms = crm_get_msec(value); -- } -- if (timeout_ms < 0) { -- timeout_ms = pcmk__auto_watchdog_timeout(); -- } -- -- if(timeout_ms != stonith_watchdog_timeout_ms) { -- crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000); -- stonith_watchdog_timeout_ms = timeout_ms; -- } -- -- } else { -- stonith_watchdog_timeout_ms = 0; -- } -+ watchdog_device_update(local_cib); - - if (stonith_enabled_s && crm_is_true(stonith_enabled_s) == FALSE) { - crm_trace("Ignoring cib updates while stonith is disabled"); -@@ -1130,6 +1165,7 @@ init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *us - crm_peer_caches_refresh(local_cib); - - fencing_topology_init(); -+ watchdog_device_update(local_cib); - cib_devices_update(); - } - -@@ -1535,26 +1571,6 @@ main(int argc, char **argv) - init_device_list(); - init_topology_list(); - -- if(stonith_watchdog_timeout_ms > 0) { -- int rc; -- xmlNode *xml; -- stonith_key_value_t *params = NULL; -- -- params = stonith_key_value_add(params, PCMK_STONITH_HOST_LIST, -- stonith_our_uname); -- -- xml = create_device_registration_xml("watchdog", st_namespace_internal, -- STONITH_WATCHDOG_AGENT, params, -- NULL); -- stonith_key_value_freeall(params, 1, 1); -- rc = stonith_device_register(xml, NULL, FALSE); -- free_xml(xml); -- if (rc != pcmk_ok) { -- crm_crit("Cannot register watchdog pseudo fence agent"); -- crm_exit(CRM_EX_FATAL); -- } -- } -- - pcmk__serve_fenced_ipc(&ipcs, &ipc_callbacks); - - /* Create the mainloop and run it... */ --- -1.8.3.1 - -From b49f49576ef9d801a48ce7a01a78c72e65be7880 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Fri, 30 Jul 2021 18:07:25 +0200 -Subject: [PATCH 1/3] Fix, Refactor: fenced: add return value to - get_agent_metadata - -Used to distinguish between empty metadata per design, -case of failed getting metadata that might succeed on a -retry and fatal failure. -Fixes as well regression that leads to endless retries getting -metadata for #watchdog - not superserious as it happens with -delays in between but still undesirable. ---- - daemons/fenced/fenced_commands.c | 92 +++++++++++++++++++------------- - 1 file changed, 55 insertions(+), 37 deletions(-) - -diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c -index a778801b1..cd9968f1a 100644 ---- a/daemons/fenced/fenced_commands.c -+++ b/daemons/fenced/fenced_commands.c -@@ -69,7 +69,7 @@ static void stonith_send_reply(xmlNode * reply, int call_options, const char *re - static void search_devices_record_result(struct device_search_s *search, const char *device, - gboolean can_fence); - --static xmlNode * get_agent_metadata(const char *agent); -+static int get_agent_metadata(const char *agent, xmlNode **metadata); - static void read_action_metadata(stonith_device_t *device); - - typedef struct async_command_s { -@@ -323,19 +323,26 @@ fork_cb(GPid pid, gpointer user_data) - static int - get_agent_metadata_cb(gpointer data) { - stonith_device_t *device = data; -+ guint period_ms; - -- device->agent_metadata = get_agent_metadata(device->agent); -- if (device->agent_metadata) { -- read_action_metadata(device); -- stonith__device_parameter_flags(&(device->flags), device->id, -+ switch (get_agent_metadata(device->agent, &device->agent_metadata)) { -+ case pcmk_rc_ok: -+ if (device->agent_metadata) { -+ read_action_metadata(device); -+ stonith__device_parameter_flags(&(device->flags), device->id, - device->agent_metadata); -- return G_SOURCE_REMOVE; -- } else { -- guint period_ms = pcmk__mainloop_timer_get_period(device->timer); -- if (period_ms < 160 * 1000) { -- mainloop_timer_set_period(device->timer, 2 * period_ms); -- } -- return G_SOURCE_CONTINUE; -+ } -+ return G_SOURCE_REMOVE; -+ -+ case EAGAIN: -+ period_ms = pcmk__mainloop_timer_get_period(device->timer); -+ if (period_ms < 160 * 1000) { -+ mainloop_timer_set_period(device->timer, 2 * period_ms); -+ } -+ return G_SOURCE_CONTINUE; -+ -+ default: -+ return G_SOURCE_REMOVE; - } - } - -@@ -700,38 +707,41 @@ init_metadata_cache(void) { - } - } - --static xmlNode * --get_agent_metadata(const char *agent) -+int -+get_agent_metadata(const char *agent, xmlNode ** metadata) - { -- xmlNode *xml = NULL; - char *buffer = NULL; - -+ if (metadata == NULL) { -+ return EINVAL; -+ } -+ *metadata = NULL; -+ if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT, pcmk__str_none)) { -+ return pcmk_rc_ok; -+ } - init_metadata_cache(); - buffer = g_hash_table_lookup(metadata_cache, agent); -- if(pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT, pcmk__str_casei)) { -- return NULL; -- -- } else if(buffer == NULL) { -+ if (buffer == NULL) { - stonith_t *st = stonith_api_new(); - int rc; - - if (st == NULL) { - crm_warn("Could not get agent meta-data: " - "API memory allocation failed"); -- return NULL; -+ return EAGAIN; - } -- rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10); -+ rc = st->cmds->metadata(st, st_opt_sync_call, agent, -+ NULL, &buffer, 10); - stonith_api_delete(st); - if (rc || !buffer) { - crm_err("Could not retrieve metadata for fencing agent %s", agent); -- return NULL; -+ return EAGAIN; - } - g_hash_table_replace(metadata_cache, strdup(agent), buffer); - } - -- xml = string2xml(buffer); -- -- return xml; -+ *metadata = string2xml(buffer); -+ return pcmk_rc_ok; - } - - static gboolean -@@ -962,19 +972,27 @@ build_device_from_xml(xmlNode * msg) - value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_MAP); - device->aliases = build_port_aliases(value, &(device->targets)); - -- device->agent_metadata = get_agent_metadata(device->agent); -- if (device->agent_metadata) { -- read_action_metadata(device); -- stonith__device_parameter_flags(&(device->flags), device->id, -- device->agent_metadata); -- } else { -- if (device->timer == NULL) { -- device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000, -+ switch (get_agent_metadata(device->agent, &device->agent_metadata)) { -+ case pcmk_rc_ok: -+ if (device->agent_metadata) { -+ read_action_metadata(device); -+ stonith__device_parameter_flags(&(device->flags), device->id, -+ device->agent_metadata); -+ } -+ break; -+ -+ case EAGAIN: -+ if (device->timer == NULL) { -+ device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000, - TRUE, get_agent_metadata_cb, device); -- } -- if (!mainloop_timer_running(device->timer)) { -- mainloop_timer_start(device->timer); -- } -+ } -+ if (!mainloop_timer_running(device->timer)) { -+ mainloop_timer_start(device->timer); -+ } -+ break; -+ -+ default: -+ break; - } - - value = g_hash_table_lookup(device->params, "nodeid"); --- -2.27.0 - - -From 5dd1e4459335764e0adf5fa78d81c875ae2332e9 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Fri, 30 Jul 2021 18:15:10 +0200 -Subject: [PATCH 2/3] feature: watchdog-fencing: allow restriction to certain - nodes - -Bump CRM_FEATURE_SET to 3.11.0 to encourage cluster being -fully upgraded to a version that supports the feature -before explicitly adding a watchdog-fence-device. ---- - configure.ac | 1 + - daemons/controld/controld_control.c | 2 +- - daemons/controld/controld_fencing.c | 14 ++ - daemons/controld/controld_fencing.h | 1 + - daemons/fenced/Makefile.am | 2 +- - daemons/fenced/fence_watchdog.in | 283 ++++++++++++++++++++++++++++ - daemons/fenced/fenced_commands.c | 141 +++++++++++--- - daemons/fenced/fenced_remote.c | 71 ++++--- - daemons/fenced/pacemaker-fenced.c | 131 +++++++++---- - daemons/fenced/pacemaker-fenced.h | 5 +- - include/crm/crm.h | 2 +- - include/crm/fencing/internal.h | 8 +- - lib/fencing/st_client.c | 61 ++++++ - lib/lrmd/lrmd_client.c | 6 +- - rpm/pacemaker.spec.in | 3 + - 16 files changed, 635 insertions(+), 97 deletions(-) - create mode 100755 daemons/fenced/fence_watchdog.in - -diff --git a/configure.ac b/configure.ac -index 436100c81..013562e46 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -1950,6 +1950,7 @@ - AC_CONFIG_FILES([cts/fence_dummy], [chmod +x cts/fence_dummy]) - AC_CONFIG_FILES([cts/pacemaker-cts-dummyd], [chmod +x cts/pacemaker-cts-dummyd]) - AC_CONFIG_FILES([daemons/fenced/fence_legacy], [chmod +x daemons/fenced/fence_legacy]) -+AC_CONFIG_FILES([daemons/fenced/fence_watchdog], [chmod +x daemons/fenced/fence_watchdog]) - AC_CONFIG_FILES([doc/abi-check], [chmod +x doc/abi-check]) - AC_CONFIG_FILES([extra/resources/ClusterMon], [chmod +x extra/resources/ClusterMon]) - AC_CONFIG_FILES([extra/resources/HealthSMART], [chmod +x extra/resources/HealthSMART]) -diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c -index 45a70bb92..b5da6a46c 100644 ---- a/daemons/controld/controld_control.c -+++ b/daemons/controld/controld_control.c -@@ -615,7 +615,7 @@ static pcmk__cluster_option_t crmd_opts[] = { - }, - { - "stonith-watchdog-timeout", NULL, "time", NULL, -- "0", pcmk__valid_sbd_timeout, -+ "0", controld_verify_stonith_watchdog_timeout, - "How long to wait before we can assume nodes are safely down " - "when watchdog-based self-fencing via SBD is in use", - "If nonzero, along with `have-watchdog=true` automatically set by the " -diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c -index 0fba6613b..6c2a6c550 100644 ---- a/daemons/controld/controld_fencing.c -+++ b/daemons/controld/controld_fencing.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -886,6 +887,19 @@ te_fence_node(crm_graph_t *graph, crm_action_t *action) - return TRUE; - } - -+bool -+controld_verify_stonith_watchdog_timeout(const char *value) -+{ -+ gboolean rv = TRUE; -+ -+ if (stonith_api && (stonith_api->state != stonith_disconnected) && -+ stonith__watchdog_fencing_enabled_for_node_api(stonith_api, -+ fsa_our_uname)) { -+ rv = pcmk__valid_sbd_timeout(value); -+ } -+ return rv; -+} -+ - /* end stonith API client functions */ - - -diff --git a/daemons/controld/controld_fencing.h b/daemons/controld/controld_fencing.h -index d0ecc8234..ef68a0c83 100644 ---- a/daemons/controld/controld_fencing.h -+++ b/daemons/controld/controld_fencing.h -@@ -24,6 +24,7 @@ void update_stonith_max_attempts(const char* value); - void controld_trigger_fencer_connect(void); - void controld_disconnect_fencer(bool destroy); - gboolean te_fence_node(crm_graph_t *graph, crm_action_t *action); -+bool controld_verify_stonith_watchdog_timeout(const char *value); - - // stonith cleanup list - void add_stonith_cleanup(const char *target); -diff --git a/daemons/fenced/Makefile.am b/daemons/fenced/Makefile.am -index 43413e11d..2923d7c9b 100644 ---- a/daemons/fenced/Makefile.am -+++ b/daemons/fenced/Makefile.am -@@ -15,7 +15,7 @@ halibdir = $(CRM_DAEMON_DIR) - - halib_PROGRAMS = pacemaker-fenced cts-fence-helper - --sbin_SCRIPTS = fence_legacy -+sbin_SCRIPTS = fence_legacy fence_watchdog - - noinst_HEADERS = pacemaker-fenced.h - -diff --git a/daemons/fenced/fence_watchdog.in b/daemons/fenced/fence_watchdog.in -new file mode 100755 -index 000000000..c83304f1d ---- /dev/null -+++ b/daemons/fenced/fence_watchdog.in -@@ -0,0 +1,284 @@ -+#!@PYTHON@ -+"""Dummy watchdog fence agent for providing meta-data for the pacemaker internal agent -+""" -+ -+__copyright__ = "Copyright 2012-2021 the Pacemaker project contributors" -+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" -+ -+import io -+import os -+import re -+import sys -+import atexit -+import getopt -+ -+AGENT_VERSION = "1.0.0" -+SHORT_DESC = "Dummy watchdog fence agent" -+LONG_DESC = """fence_watchdog just provides -+meta-data - actual fencing is done by the pacemaker internal watchdog agent.""" -+ -+ALL_OPT = { -+ "version" : { -+ "getopt" : "V", -+ "longopt" : "version", -+ "help" : "-V, --version Display version information and exit", -+ "required" : "0", -+ "shortdesc" : "Display version information and exit", -+ "order" : 53 -+ }, -+ "help" : { -+ "getopt" : "h", -+ "longopt" : "help", -+ "help" : "-h, --help Display this help and exit", -+ "required" : "0", -+ "shortdesc" : "Display help and exit", -+ "order" : 54 -+ }, -+ "action" : { -+ "getopt" : "o:", -+ "longopt" : "action", -+ "help" : "-o, --action=[action] Action: metadata", -+ "required" : "1", -+ "shortdesc" : "Fencing Action", -+ "default" : "metadata", -+ "order" : 1 -+ }, -+ "nodename" : { -+ "getopt" : "N:", -+ "longopt" : "nodename", -+ "help" : "-N, --nodename Node name of fence victim (ignored)", -+ "required" : "0", -+ "shortdesc" : "Ignored", -+ "order" : 2 -+ }, -+ "plug" : { -+ "getopt" : "n:", -+ "longopt" : "plug", -+ "help" : "-n, --plug=[id] Physical plug number on device (ignored)", -+ "required" : "1", -+ "shortdesc" : "Ignored", -+ "order" : 4 -+ } -+} -+ -+ -+def agent(): -+ """ Return name this file was run as. """ -+ -+ return os.path.basename(sys.argv[0]) -+ -+ -+def fail_usage(message): -+ """ Print a usage message and exit. """ -+ -+ sys.exit("%s\nPlease use '-h' for usage" % message) -+ -+ -+def show_docs(options): -+ """ Handle informational options (display info and exit). """ -+ -+ device_opt = options["device_opt"] -+ -+ if "-h" in options: -+ usage(device_opt) -+ sys.exit(0) -+ -+ if "-o" in options and options["-o"].lower() == "metadata": -+ metadata(device_opt, options) -+ sys.exit(0) -+ -+ if "-V" in options: -+ print(AGENT_VERSION) -+ sys.exit(0) -+ -+ -+def sorted_options(avail_opt): -+ """ Return a list of all options, in their internally specified order. """ -+ -+ sorted_list = [(key, ALL_OPT[key]) for key in avail_opt] -+ sorted_list.sort(key=lambda x: x[1]["order"]) -+ return sorted_list -+ -+ -+def usage(avail_opt): -+ """ Print a usage message. """ -+ print(LONG_DESC) -+ print() -+ print("Usage:") -+ print("\t" + agent() + " [options]") -+ print("Options:") -+ -+ for dummy, value in sorted_options(avail_opt): -+ if len(value["help"]) != 0: -+ print(" " + value["help"]) -+ -+ -+def metadata(avail_opt, options): -+ """ Print agent metadata. """ -+ -+ print(""" -+ -+%s -+""" % (agent(), SHORT_DESC, LONG_DESC)) -+ -+ for option, dummy in sorted_options(avail_opt): -+ if "shortdesc" in ALL_OPT[option]: -+ print(' ') -+ -+ default = "" -+ default_name_arg = "-" + ALL_OPT[option]["getopt"][:-1] -+ default_name_no_arg = "-" + ALL_OPT[option]["getopt"] -+ -+ if "default" in ALL_OPT[option]: -+ default = 'default="%s"' % str(ALL_OPT[option]["default"]) -+ elif default_name_arg in options: -+ if options[default_name_arg]: -+ try: -+ default = 'default="%s"' % options[default_name_arg] -+ except TypeError: -+ ## @todo/@note: Currently there is no clean way how to handle lists -+ ## we can create a string from it but we can't set it on command line -+ default = 'default="%s"' % str(options[default_name_arg]) -+ elif default_name_no_arg in options: -+ default = 'default="true"' -+ -+ mixed = ALL_OPT[option]["help"] -+ ## split it between option and help text -+ res = re.compile(r"^(.*--\S+)\s+", re.IGNORECASE | re.S).search(mixed) -+ if None != res: -+ mixed = res.group(1) -+ mixed = mixed.replace("<", "<").replace(">", ">") -+ print(' ') -+ -+ if ALL_OPT[option]["getopt"].count(":") > 0: -+ print(' ') -+ else: -+ print(' ') -+ -+ print(' ' + ALL_OPT[option]["shortdesc"] + '') -+ print(' ') -+ -+ print(' \n ') -+ print(' ') -+ print(' ') -+ print(' ') -+ print(' ') -+ print(' ') -+ print(' ') -+ print(' ') -+ print('') -+ -+ -+def option_longopt(option): -+ """ Return the getopt-compatible long-option name of the given option. """ -+ -+ if ALL_OPT[option]["getopt"].endswith(":"): -+ return ALL_OPT[option]["longopt"] + "=" -+ else: -+ return ALL_OPT[option]["longopt"] -+ -+ -+def opts_from_command_line(argv, avail_opt): -+ """ Read options from command-line arguments. """ -+ -+ # Prepare list of options for getopt -+ getopt_string = "" -+ longopt_list = [] -+ for k in avail_opt: -+ if k in ALL_OPT: -+ getopt_string += ALL_OPT[k]["getopt"] -+ else: -+ fail_usage("Parse error: unknown option '" + k + "'") -+ -+ if k in ALL_OPT and "longopt" in ALL_OPT[k]: -+ longopt_list.append(option_longopt(k)) -+ -+ try: -+ opt, dummy = getopt.gnu_getopt(argv, getopt_string, longopt_list) -+ except getopt.GetoptError as error: -+ fail_usage("Parse error: " + error.msg) -+ -+ # Transform longopt to short one which are used in fencing agents -+ old_opt = opt -+ opt = {} -+ for old_option in dict(old_opt).keys(): -+ if old_option.startswith("--"): -+ for option in ALL_OPT.keys(): -+ if "longopt" in ALL_OPT[option] and "--" + ALL_OPT[option]["longopt"] == old_option: -+ opt["-" + ALL_OPT[option]["getopt"].rstrip(":")] = dict(old_opt)[old_option] -+ else: -+ opt[old_option] = dict(old_opt)[old_option] -+ -+ return opt -+ -+ -+def opts_from_stdin(avail_opt): -+ """ Read options from standard input. """ -+ -+ opt = {} -+ name = "" -+ for line in sys.stdin.readlines(): -+ line = line.strip() -+ if line.startswith("#") or (len(line) == 0): -+ continue -+ -+ (name, value) = (line + "=").split("=", 1) -+ value = value[:-1] -+ -+ if name not in avail_opt: -+ print("Parse error: Ignoring unknown option '%s'" % line, -+ file=sys.stderr) -+ continue -+ -+ if ALL_OPT[name]["getopt"].endswith(":"): -+ opt["-"+ALL_OPT[name]["getopt"].rstrip(":")] = value -+ elif value.lower() in ["1", "yes", "on", "true"]: -+ opt["-"+ALL_OPT[name]["getopt"]] = "1" -+ -+ return opt -+ -+ -+def process_input(avail_opt): -+ """ Set standard environment variables, and parse all options. """ -+ -+ # Set standard environment -+ os.putenv("LANG", "C") -+ os.putenv("LC_ALL", "C") -+ -+ # Read options from command line or standard input -+ if len(sys.argv) > 1: -+ return opts_from_command_line(sys.argv[1:], avail_opt) -+ else: -+ return opts_from_stdin(avail_opt) -+ -+ -+def atexit_handler(): -+ """ Close stdout on exit. """ -+ -+ try: -+ sys.stdout.close() -+ os.close(1) -+ except IOError: -+ sys.exit("%s failed to close standard output" % agent()) -+ -+ -+def main(): -+ """ Make it so! """ -+ -+ device_opt = ALL_OPT.keys() -+ -+ ## Defaults for fence agent -+ atexit.register(atexit_handler) -+ options = process_input(device_opt) -+ options["device_opt"] = device_opt -+ show_docs(options) -+ -+ print("Watchdog fencing may be initiated only by the cluster, not this agent.", -+ file=sys.stderr) -+ -+ sys.exit(1) -+ -+ -+if __name__ == "__main__": -+ main() -diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c -index cd9968f1a..9470ea2c1 100644 ---- a/daemons/fenced/fenced_commands.c -+++ b/daemons/fenced/fenced_commands.c -@@ -397,15 +397,13 @@ stonith_device_execute(stonith_device_t * device) - return TRUE; - } - -- if(pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT, pcmk__str_casei)) { -- if(pcmk__str_eq(cmd->action, "reboot", pcmk__str_casei)) { -- pcmk__panic(__func__); -- goto done; -- -- } else if(pcmk__str_eq(cmd->action, "off", pcmk__str_casei)) { -- pcmk__panic(__func__); -- goto done; -- -+ if (pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, -+ STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { -+ if (pcmk__str_any_of(cmd->action, "reboot", "off", NULL)) { -+ if (node_does_watchdog_fencing(stonith_our_uname)) { -+ pcmk__panic(__func__); -+ goto done; -+ } - } else { - crm_info("Faking success for %s watchdog operation", cmd->action); - cmd->done_cb(0, 0, NULL, cmd); -@@ -716,7 +714,7 @@ get_agent_metadata(const char *agent, xmlNode ** metadata) - return EINVAL; - } - *metadata = NULL; -- if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT, pcmk__str_none)) { -+ if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT_INTERNAL, pcmk__str_none)) { - return pcmk_rc_ok; - } - init_metadata_cache(); -@@ -1050,24 +1048,6 @@ schedule_internal_command(const char *origin, - schedule_stonith_command(cmd, device); - } - --gboolean --string_in_list(GListPtr list, const char *item) --{ -- int lpc = 0; -- int max = g_list_length(list); -- -- for (lpc = 0; lpc < max; lpc++) { -- const char *value = g_list_nth_data(list, lpc); -- -- if (pcmk__str_eq(item, value, pcmk__str_casei)) { -- return TRUE; -- } else { -- crm_trace("%d: '%s' != '%s'", lpc, item, value); -- } -- } -- return FALSE; --} -- - static void - status_search_cb(GPid pid, int rc, const char *output, gpointer user_data) - { -@@ -1144,7 +1124,7 @@ dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data) - if (!alias) { - alias = search->host; - } -- if (string_in_list(dev->targets, alias)) { -+ if (pcmk__strcase_in_list(dev->targets, alias)) { - can_fence = TRUE; - } - } -@@ -1215,8 +1195,61 @@ stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib) - stonith_device_t *dup = NULL; - stonith_device_t *device = build_device_from_xml(msg); -+ int rv = pcmk_ok; - - CRM_CHECK(device != NULL, return -ENOMEM); - -+ /* do we have a watchdog-device? */ -+ if (pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none) || -+ pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, -+ STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) do { -+ if (stonith_watchdog_timeout_ms <= 0) { -+ crm_err("Ignoring watchdog fence device without " -+ "stonith-watchdog-timeout set."); -+ rv = -ENODEV; -+ /* fall through to cleanup & return */ -+ } else if (!pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, -+ STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { -+ crm_err("Ignoring watchdog fence device with unknown " -+ "agent '%s' unequal '" STONITH_WATCHDOG_AGENT "'.", -+ device->agent?device->agent:""); -+ rv = -ENODEV; -+ /* fall through to cleanup & return */ -+ } else if (!pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, -+ pcmk__str_none)) { -+ crm_err("Ignoring watchdog fence device " -+ "named %s !='"STONITH_WATCHDOG_ID"'.", -+ device->id?device->id:""); -+ rv = -ENODEV; -+ /* fall through to cleanup & return */ -+ } else { -+ if (pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT, -+ pcmk__str_none)) { -+ /* this either has an empty list or the targets -+ configured for watchdog-fencing -+ */ -+ g_list_free_full(stonith_watchdog_targets, free); -+ stonith_watchdog_targets = device->targets; -+ device->targets = NULL; -+ } -+ if (node_does_watchdog_fencing(stonith_our_uname)) { -+ g_list_free_full(device->targets, free); -+ device->targets = stonith__parse_targets(stonith_our_uname); -+ g_hash_table_replace(device->params, -+ strdup(PCMK_STONITH_HOST_LIST), -+ strdup(stonith_our_uname)); -+ /* proceed as with any other stonith-device */ -+ break; -+ } -+ -+ crm_debug("Skip registration of watchdog fence device on node not in host-list."); -+ /* cleanup and fall through to more cleanup and return */ -+ device->targets = NULL; -+ stonith_device_remove(device->id, from_cib); -+ } -+ free_device(device); -+ return rv; -+ } while (0); -+ - dup = device_has_duplicate(device); - if (dup) { - crm_debug("Device '%s' already existed in device list (%d active devices)", device->id, -@@ -1598,6 +1631,39 @@ stonith_level_remove(xmlNode *msg, char **desc) - * (CIB registration is not sufficient), because monitor should not be - * possible unless the device is "started" (API registered). - */ -+ -+static char * -+list_to_string(GList *list, const char *delim, gboolean terminate_with_delim) -+{ -+ int max = g_list_length(list); -+ size_t delim_len = delim?strlen(delim):0; -+ size_t alloc_size = 1 + (max?((max-1+(terminate_with_delim?1:0))*delim_len):0); -+ char *rv; -+ GList *gIter; -+ -+ for (gIter = list; gIter != NULL; gIter = gIter->next) { -+ const char *value = (const char *) gIter->data; -+ -+ alloc_size += strlen(value); -+ } -+ rv = calloc(alloc_size, sizeof(char)); -+ if (rv) { -+ char *pos = rv; -+ const char *lead_delim = ""; -+ -+ for (gIter = list; gIter != NULL; gIter = gIter->next) { -+ const char *value = (const char *) gIter->data; -+ -+ pos = &pos[sprintf(pos, "%s%s", lead_delim, value)]; -+ lead_delim = delim; -+ } -+ if (max && terminate_with_delim) { -+ sprintf(pos, "%s", delim); -+ } -+ } -+ return rv; -+} -+ - static int - stonith_device_action(xmlNode * msg, char **output) - { -@@ -1615,6 +1681,19 @@ stonith_device_action(xmlNode * msg, char **output) - return -EPROTO; - } - -+ if (pcmk__str_eq(id, STONITH_WATCHDOG_ID, pcmk__str_none)) { -+ if (stonith_watchdog_timeout_ms <= 0) { -+ return -ENODEV; -+ } else { -+ if (pcmk__str_eq(action, "list", pcmk__str_casei)) { -+ *output = list_to_string(stonith_watchdog_targets, "\n", TRUE); -+ return pcmk_ok; -+ } else if (pcmk__str_eq(action, "monitor", pcmk__str_casei)) { -+ return pcmk_ok; -+ } -+ } -+ } -+ - device = g_hash_table_lookup(device_list, id); - if ((device == NULL) - || (!device->api_registered && !strcmp(action, "monitor"))) { -@@ -1742,7 +1821,7 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc - * Only use if all hosts on which the device can be active can always fence all listed hosts - */ - -- if (string_in_list(dev->targets, host)) { -+ if (pcmk__strcase_in_list(dev->targets, host)) { - can = TRUE; - } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP) - && g_hash_table_lookup(dev->aliases, host)) { -@@ -1763,7 +1842,7 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc - return; - } - -- if (string_in_list(dev->targets, alias)) { -+ if (pcmk__strcase_in_list(dev->targets, alias)) { - can = TRUE; - } - -diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c -index cf91acaed..224f2baba 100644 ---- a/daemons/fenced/fenced_remote.c -+++ b/daemons/fenced/fenced_remote.c -@@ -1522,6 +1522,25 @@ advance_topology_device_in_level(remote_fencing_op_t *op, const char *device, - } - } - -+static gboolean -+check_watchdog_fencing_and_wait(remote_fencing_op_t * op) -+{ -+ if (node_does_watchdog_fencing(op->target)) { -+ -+ crm_notice("Waiting %lds for %s to self-fence (%s) for " -+ "client %s " CRM_XS " id=%.8s", -+ (stonith_watchdog_timeout_ms / 1000), -+ op->target, op->action, op->client_name, op->id); -+ op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, -+ remote_op_watchdog_done, op); -+ return TRUE; -+ } else { -+ crm_debug("Skipping fallback to watchdog-fencing as %s is " -+ "not in host-list", op->target); -+ } -+ return FALSE; -+} -+ - void - call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer, int rc) - { -@@ -1592,26 +1611,33 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer, int rc) - g_source_remove(op->op_timer_one); - } - -- if(stonith_watchdog_timeout_ms > 0 && device && pcmk__str_eq(device, "watchdog", pcmk__str_casei)) { -- crm_notice("Waiting %lds for %s to self-fence (%s) for client %s.%.8s", -- stonith_watchdog_timeout_ms/1000, op->target, op->action, -- op->client_name, op->id); -- op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); -- -- /* TODO check devices to verify watchdog will be in use */ -- } else if(stonith_watchdog_timeout_ms > 0 -- && pcmk__str_eq(peer->host, op->target, pcmk__str_casei) -- && !pcmk__str_eq(op->action, "on", pcmk__str_casei)) { -- crm_notice("Waiting %lds for %s to self-fence (%s) for client %s.%.8s", -- stonith_watchdog_timeout_ms/1000, op->target, op->action, -- op->client_name, op->id); -- op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); -- -- } else { -+ if (!(stonith_watchdog_timeout_ms > 0 && ( -+ (pcmk__str_eq(device, STONITH_WATCHDOG_ID, -+ pcmk__str_none)) || -+ (pcmk__str_eq(peer->host, op->target, pcmk__str_casei) -+ && !pcmk__str_eq(op->action, "on", pcmk__str_casei))) && -+ check_watchdog_fencing_and_wait(op))) { -+ -+ /* Some thoughts about self-fencing cases reaching this point: -+ - Actually check in check_watchdog_fencing_and_wait -+ shouldn't fail if STONITH_WATCHDOG_ID is -+ chosen as fencing-device and it being present implies -+ watchdog-fencing is enabled anyway -+ - If watchdog-fencing is disabled either in general or for -+ a specific target - detected in check_watchdog_fencing_and_wait - -+ for some other kind of self-fencing we can't expect -+ a success answer but timeout is fine if the node doesn't -+ come back in between -+ - Delicate might be the case where we have watchdog-fencing -+ enabled for a node but the watchdog-fencing-device isn't -+ explicitly chosen for suicide. Local pe-execution in sbd -+ may detect the node as unclean and lead to timely suicide. -+ Otherwise the selection of stonith-watchdog-timeout at -+ least is questionable. -+ */ - op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op); - } - -- - send_cluster_message(crm_get_peer(0, peer->host), crm_msg_stonith_ng, remote_op, FALSE); - peer->tried = TRUE; - free_xml(remote_op); -@@ -1645,13 +1671,11 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer, int rc) - * but we have all the expected replies, then no devices - * are available to execute the fencing operation. */ - -- if(stonith_watchdog_timeout_ms && pcmk__str_eq(device, "watchdog", pcmk__str_null_matches | pcmk__str_casei)) { -- crm_notice("Waiting %lds for %s to self-fence (%s) for client %s.%.8s", -- stonith_watchdog_timeout_ms/1000, op->target, -- op->action, op->client_name, op->id); -- -- op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); -- return; -+ if(stonith_watchdog_timeout_ms > 0 && pcmk__str_eq(device, -+ STONITH_WATCHDOG_ID, pcmk__str_null_matches)) { -+ if (check_watchdog_fencing_and_wait(op)) { -+ return; -+ } - } - - if (op->state == st_query) { -diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c -index 39738d8be..7f8b427d9 100644 ---- a/daemons/fenced/pacemaker-fenced.c -+++ b/daemons/fenced/pacemaker-fenced.c -@@ -42,7 +42,8 @@ - - char *stonith_our_uname = NULL; - char *stonith_our_uuid = NULL; - long stonith_watchdog_timeout_ms = 0; -+GList *stonith_watchdog_targets = NULL; - - static GMainLoop *mainloop = NULL; - -@@ -578,7 +579,44 @@ our_node_allowed_for(pe_resource_t *rsc) - } - - static void --watchdog_device_update(xmlNode *cib) -+watchdog_device_update(void) -+{ -+ if (stonith_watchdog_timeout_ms > 0) { -+ if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) && -+ !stonith_watchdog_targets) { -+ /* getting here watchdog-fencing enabled, no device there yet -+ and reason isn't stonith_watchdog_targets preventing that -+ */ -+ int rc; -+ xmlNode *xml; -+ -+ xml = create_device_registration_xml( -+ STONITH_WATCHDOG_ID, -+ st_namespace_internal, -+ STONITH_WATCHDOG_AGENT, -+ NULL, /* stonith_device_register will add our -+ own name as PCMK_STONITH_HOST_LIST param -+ so we can skip that here -+ */ -+ NULL); -+ rc = stonith_device_register(xml, NULL, TRUE); -+ free_xml(xml); -+ if (rc != pcmk_ok) { -+ crm_crit("Cannot register watchdog pseudo fence agent"); -+ crm_exit(CRM_EX_FATAL); -+ } -+ } -+ -+ } else { -+ /* be silent if no device - todo parameter to stonith_device_remove */ -+ if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID)) { -+ stonith_device_remove(STONITH_WATCHDOG_ID, TRUE); -+ } -+ } -+} -+ -+static void -+update_stonith_watchdog_timeout_ms(xmlNode *cib) - { - xmlNode *stonith_enabled_xml = NULL; - const char *stonith_enabled_s = NULL; -@@ -608,33 +646,7 @@ watchdog_device_update(xmlNode *cib) - } - } - -- if (timeout_ms != stonith_watchdog_timeout_ms) { -- crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000); -- stonith_watchdog_timeout_ms = timeout_ms; -- -- if (stonith_watchdog_timeout_ms > 0) { -- int rc; -- xmlNode *xml; -- stonith_key_value_t *params = NULL; -- -- params = stonith_key_value_add(params, PCMK_STONITH_HOST_LIST, -- stonith_our_uname); -- -- xml = create_device_registration_xml("watchdog", st_namespace_internal, -- STONITH_WATCHDOG_AGENT, params, -- NULL); -- stonith_key_value_freeall(params, 1, 1); -- rc = stonith_device_register(xml, NULL, FALSE); -- free_xml(xml); -- if (rc != pcmk_ok) { -- crm_crit("Cannot register watchdog pseudo fence agent"); -- crm_exit(CRM_EX_FATAL); -- } -- -- } else { -- stonith_device_remove("watchdog", FALSE); -- } -- } -+ stonith_watchdog_timeout_ms = timeout_ms; - } - - /*! -@@ -677,6 +689,16 @@ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) - return; - } - -+ /* if watchdog-fencing is disabled handle any watchdog-fence -+ resource as if it was disabled -+ */ -+ if ((stonith_watchdog_timeout_ms <= 0) && -+ pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) { -+ crm_info("Watchdog-fencing disabled thus handling " -+ "device %s as disabled", rsc->id); -+ return; -+ } -+ - /* Check whether our node is allowed for this resource (and its parent if in a group) */ - node = our_node_allowed_for(rsc); - if (rsc->parent && (rsc->parent->variant == pe_group)) { -@@ -772,6 +794,12 @@ cib_devices_update(void) - } - } - -+ /* have list repopulated if cib has a watchdog-fencing-resource -+ TODO: keep a cached list for queries happening while we are refreshing -+ */ -+ g_list_free_full(stonith_watchdog_targets, free); -+ stonith_watchdog_targets = NULL; -+ - for (gIter = fenced_data_set->resources; gIter != NULL; gIter = gIter->next) { - cib_device_update(gIter->data, fenced_data_set); - } -@@ -825,6 +853,8 @@ update_cib_stonith_devices_v2(const char *event, xmlNode * msg) - if (search != NULL) { - *search = 0; - stonith_device_remove(rsc_id, TRUE); -+ /* watchdog_device_update called afterwards -+ to fall back to implicit definition if needed */ - } else { - crm_warn("Ignoring malformed CIB update (resource deletion)"); - } -@@ -968,6 +998,24 @@ node_has_attr(const char *node, const char *name, const char *value) - return (match != NULL); - } - -+/*! -+ * \internal -+ * \brief Check whether a node does watchdog-fencing -+ * -+ * \param[in] node Name of node to check -+ * -+ * \return TRUE if node found in stonith_watchdog_targets -+ * or stonith_watchdog_targets is empty indicating -+ * all nodes are doing watchdog-fencing -+ */ -+gboolean -+node_does_watchdog_fencing(const char *node) -+{ -+ return ((stonith_watchdog_targets == NULL) || -+ pcmk__strcase_in_list(stonith_watchdog_targets, node)); -+} -+ -+ - static void - update_fencing_topology(const char *event, xmlNode * msg) - { -@@ -1073,6 +1121,8 @@ update_cib_cache_cb(const char *event, xmlNode * msg) - xmlNode *stonith_enabled_xml = NULL; - const char *stonith_enabled_s = NULL; - static gboolean stonith_enabled_saved = TRUE; -+ long timeout_ms_saved = stonith_watchdog_timeout_ms; -+ gboolean need_full_refresh = FALSE; - - if(!have_cib_devices) { - crm_trace("Skipping updates until we get a full dump"); -@@ -1127,6 +1177,7 @@ update_cib_cache_cb(const char *event, xmlNode * msg) - } - - crm_peer_caches_refresh(local_cib); -+ update_stonith_watchdog_timeout_ms(local_cib); - - stonith_enabled_xml = get_xpath_object("//nvpair[@name='stonith-enabled']", - local_cib, LOG_NEVER); -@@ -1134,22 +1185,29 @@ update_cib_cache_cb(const char *event, xmlNode * msg) - stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE); - } - -- watchdog_device_update(local_cib); -- - if (stonith_enabled_s && crm_is_true(stonith_enabled_s) == FALSE) { - crm_trace("Ignoring cib updates while stonith is disabled"); - stonith_enabled_saved = FALSE; -- return; - - } else if (stonith_enabled_saved == FALSE) { - crm_info("Updating stonith device and topology lists now that stonith is enabled"); - stonith_enabled_saved = TRUE; -- fencing_topology_init(); -- cib_devices_update(); -+ need_full_refresh = TRUE; - - } else { -- update_fencing_topology(event, msg); -- update_cib_stonith_devices(event, msg); -+ if (timeout_ms_saved != stonith_watchdog_timeout_ms) { -+ need_full_refresh = TRUE; -+ } else { -+ update_fencing_topology(event, msg); -+ update_cib_stonith_devices(event, msg); -+ watchdog_device_update(); -+ } -+ } -+ -+ if (need_full_refresh) { -+ fencing_topology_init(); -+ cib_devices_update(); -+ watchdog_device_update(); - } - } - -@@ -1162,10 +1220,11 @@ init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *us - local_cib = copy_xml(output); - - crm_peer_caches_refresh(local_cib); -+ update_stonith_watchdog_timeout_ms(local_cib); - - fencing_topology_init(); -- watchdog_device_update(local_cib); - cib_devices_update(); -+ watchdog_device_update(); - } - - static void -diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h -index d330fda4d..14e085e98 100644 ---- a/daemons/fenced/pacemaker-fenced.h -+++ b/daemons/fenced/pacemaker-fenced.h -@@ -260,14 +260,15 @@ bool fencing_peer_active(crm_node_t *peer); - - int stonith_manual_ack(xmlNode * msg, remote_fencing_op_t * op); - --gboolean string_in_list(GListPtr list, const char *item); -- - gboolean node_has_attr(const char *node, const char *name, const char *value); - -+gboolean node_does_watchdog_fencing(const char *node); -+ - extern char *stonith_our_uname; - extern gboolean stand_alone; - extern GHashTable *device_list; - extern GHashTable *topology; - extern long stonith_watchdog_timeout_ms; -+extern GList *stonith_watchdog_targets; - - extern GHashTable *stonith_remote_op_list; -diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h -index 8bcb544d8..f222edba3 100644 ---- a/include/crm/fencing/internal.h -+++ b/include/crm/fencing/internal.h -@@ -164,7 +164,10 @@ void stonith__device_parameter_flags(uint32_t *device_flags, - # define STONITH_OP_LEVEL_ADD "st_level_add" - # define STONITH_OP_LEVEL_DEL "st_level_remove" - --# define STONITH_WATCHDOG_AGENT "#watchdog" -+# define STONITH_WATCHDOG_AGENT "fence_watchdog" -+/* Don't change 2 below as it would break rolling upgrade */ -+# define STONITH_WATCHDOG_AGENT_INTERNAL "#watchdog" -+# define STONITH_WATCHDOG_ID "watchdog" - - # ifdef HAVE_STONITH_STONITH_H - // utilities from st_lha.c -@@ -211,4 +214,7 @@ stonith__event_state_pending(stonith_history_t *history, void *user_data) - bool stonith__event_state_eq(stonith_history_t *history, void *user_data); - bool stonith__event_state_neq(stonith_history_t *history, void *user_data); - -+gboolean stonith__watchdog_fencing_enabled_for_node(const char *node); -+gboolean stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node); -+ - #endif -diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c -index e285f51e2..0ff98157b 100644 ---- a/lib/fencing/st_client.c -+++ b/lib/fencing/st_client.c -@@ -195,6 +195,67 @@ stonith_get_namespace(const char *agent, const char *namespace_s) - return st_namespace_invalid; - } - -+gboolean -+stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node) -+{ -+ gboolean rv = FALSE; -+ stonith_t *stonith_api = st?st:stonith_api_new(); -+ char *list = NULL; -+ -+ if(stonith_api) { -+ if (stonith_api->state == stonith_disconnected) { -+ int rc = stonith_api->cmds->connect(stonith_api, "stonith-api", NULL); -+ -+ if (rc != pcmk_ok) { -+ crm_err("Failed connecting to Stonith-API for watchdog-fencing-query."); -+ } -+ } -+ -+ if (stonith_api->state != stonith_disconnected) { -+ /* caveat!!! -+ * this might fail when when stonithd is just updating the device-list -+ * probably something we should fix as well for other api-calls */ -+ int rc = stonith_api->cmds->list(stonith_api, st_opt_sync_call, STONITH_WATCHDOG_ID, &list, 0); -+ if ((rc != pcmk_ok) || (list == NULL)) { -+ /* due to the race described above it can happen that -+ * we drop in here - so as not to make remote nodes -+ * panic on that answer -+ */ -+ crm_warn("watchdog-fencing-query failed"); -+ } else if (list[0] == '\0') { -+ crm_warn("watchdog-fencing-query returned an empty list - any node"); -+ rv = TRUE; -+ } else { -+ GList *targets = stonith__parse_targets(list); -+ rv = pcmk__strcase_in_list(targets, node); -+ g_list_free_full(targets, free); -+ } -+ free(list); -+ if (!st) { -+ /* if we're provided the api we still might have done the -+ * connection - but let's assume the caller won't bother -+ */ -+ stonith_api->cmds->disconnect(stonith_api); -+ } -+ } -+ -+ if (!st) { -+ stonith_api_delete(stonith_api); -+ } -+ } else { -+ crm_err("Stonith-API for watchdog-fencing-query couldn't be created."); -+ } -+ crm_trace("Pacemaker assumes node %s %sto do watchdog-fencing.", -+ node, rv?"":"not "); -+ return rv; -+} -+ -+gboolean -+stonith__watchdog_fencing_enabled_for_node(const char *node) -+{ -+ return stonith__watchdog_fencing_enabled_for_node_api(NULL, node); -+} -+ - static void - log_action(stonith_action_t *action, pid_t pid) - { -diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c -index 87d050ed1..bf4bceb42 100644 ---- a/lib/lrmd/lrmd_client.c -+++ b/lib/lrmd/lrmd_client.c -@@ -34,6 +34,7 @@ - #include - - #include -+#include - - #ifdef HAVE_GNUTLS_GNUTLS_H - # undef KEYFILE -@@ -934,7 +935,10 @@ lrmd__validate_remote_settings(lrmd_t *lrmd, GHashTable *hash) - crm_xml_add(data, F_LRMD_ORIGIN, __func__); - - value = g_hash_table_lookup(hash, "stonith-watchdog-timeout"); -- crm_xml_add(data, F_LRMD_WATCHDOG, value); -+ if ((value) && -+ (stonith__watchdog_fencing_enabled_for_node(native->remote_nodename))) { -+ crm_xml_add(data, F_LRMD_WATCHDOG, value); -+ } - - rc = lrmd_send_command(lrmd, LRMD_OP_CHECK, data, NULL, 0, 0, - (native->type == pcmk__client_ipc)); -diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in -index 79e78ede9..f58357a77 100644 ---- a/rpm/pacemaker.spec.in -+++ b/rpm/pacemaker.spec.in -@@ -718,5 +718,6 @@ exit 0 - %{_sbindir}/crm_attribute - %{_sbindir}/crm_master - %{_sbindir}/fence_legacy -+%{_sbindir}/fence_watchdog - - %doc %{_mandir}/man7/pacemaker-controld.* -@@ -744,6 +744,7 @@ exit 0 - %doc %{_mandir}/man8/crm_attribute.* - %doc %{_mandir}/man8/crm_master.* - %doc %{_mandir}/man8/fence_legacy.* -+%doc %{_mandir}/man8/fence_watchdog.* - %doc %{_mandir}/man8/pacemakerd.* - - %doc %{_datadir}/pacemaker/alerts -@@ -822,6 +824,7 @@ exit 0 - %exclude %{_mandir}/man8/crm_attribute.* - %exclude %{_mandir}/man8/crm_master.* - %exclude %{_mandir}/man8/fence_legacy.* -+%exclude %{_mandir}/man8/fence_watchdog.* - %exclude %{_mandir}/man8/pacemakerd.* - %exclude %{_mandir}/man8/pacemaker-remoted.* - --- -2.27.0 - - -From 53dd360f096e5f005e3221e8d44d82d3654b5172 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Wed, 4 Aug 2021 15:57:23 +0200 -Subject: [PATCH 3/3] Fix: watchdog-fencing: Silence warning without node - restriction - ---- - lib/fencing/st_client.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c -index 0ff98157b..14fa7b2a6 100644 ---- a/lib/fencing/st_client.c -+++ b/lib/fencing/st_client.c -@@ -223,7 +223,6 @@ stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node) - */ - crm_warn("watchdog-fencing-query failed"); - } else if (list[0] == '\0') { -- crm_warn("watchdog-fencing-query returned an empty list - any node"); - rv = TRUE; - } else { - GList *targets = stonith__parse_targets(list); --- -2.27.0 - ---- a/include/crm/common/strings_internal.h -+++ b/include/crm/common/strings_internal.h -@@ -42,6 +42,7 @@ - - int pcmk__parse_ll_range(const char *srcstring, long long *start, long long *end); - gboolean pcmk__str_in_list(GList *lst, const gchar *s); -+gboolean pcmk__strcase_in_list(GList *lst, const gchar *s); - - bool pcmk__strcase_any_of(const char *s, ...) G_GNUC_NULL_TERMINATED; - bool pcmk__str_any_of(const char *s, ...) G_GNUC_NULL_TERMINATED; ---- a/lib/common/strings.c -+++ b/lib/common/strings.c -@@ -804,6 +804,20 @@ - return g_list_find_custom(lst, s, (GCompareFunc) strcmp) != NULL; - } - -+gboolean -+pcmk__strcase_in_list(GList *lst, const gchar *s) -+{ -+ if (lst == NULL) { -+ return FALSE; -+ } -+ -+ if (strcmp(lst->data, "*") == 0 && lst->next == NULL) { -+ return TRUE; -+ } -+ -+ return g_list_find_custom(lst, s, (GCompareFunc) strcasecmp) != NULL; -+} -+ - static bool - str_any_of(bool casei, const char *s, va_list args) - { diff --git a/SOURCES/045-controller-attribute.patch b/SOURCES/045-controller-attribute.patch deleted file mode 100644 index 0271906..0000000 --- a/SOURCES/045-controller-attribute.patch +++ /dev/null @@ -1,122 +0,0 @@ -From ee7eba6a7a05bdf0a12d60ebabb334d8ee021101 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 9 Aug 2021 14:48:57 -0500 -Subject: [PATCH] Fix: controller: ensure lost node's transient attributes are - cleared without DC - -Previously, peer_update_callback() cleared a lost node's transient attributes -if either the local node is DC, or there is no DC. - -However, that left the possibility of the DC being lost at the same time as -another node -- the local node would still have fsa_our_dc set while processing -the leave notifications, so no node would clear the attributes for the non-DC -node. - -Now, the controller has its own CPG configuration change callback, which sets a -global boolean before calling the usual one, so that peer_update_callback() can -know when the DC has been lost. ---- - daemons/controld/controld_callbacks.c | 4 ++- - daemons/controld/controld_corosync.c | 57 ++++++++++++++++++++++++++++++++++- - 2 files changed, 59 insertions(+), 2 deletions(-) - -diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c -index af24856..e564b3d 100644 ---- a/daemons/controld/controld_callbacks.c -+++ b/daemons/controld/controld_callbacks.c -@@ -99,6 +99,8 @@ node_alive(const crm_node_t *node) - - #define state_text(state) ((state)? (const char *)(state) : "in unknown state") - -+bool controld_dc_left = false; -+ - void - peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data) - { -@@ -217,7 +219,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d - cib_scope_local); - } - -- } else if (AM_I_DC || (fsa_our_dc == NULL)) { -+ } else if (AM_I_DC || controld_dc_left || (fsa_our_dc == NULL)) { - /* This only needs to be done once, so normally the DC should do - * it. However if there is no DC, every node must do it, since - * there is no other way to ensure some one node does it. -diff --git a/daemons/controld/controld_corosync.c b/daemons/controld/controld_corosync.c -index db99630..c5ab658 100644 ---- a/daemons/controld/controld_corosync.c -+++ b/daemons/controld/controld_corosync.c -@@ -87,6 +87,61 @@ crmd_cs_destroy(gpointer user_data) - } - } - -+extern bool controld_dc_left; -+ -+/*! -+ * \brief Handle a Corosync notification of a CPG configuration change -+ * -+ * \param[in] handle CPG connection -+ * \param[in] cpg_name CPG group name -+ * \param[in] member_list List of current CPG members -+ * \param[in] member_list_entries Number of entries in \p member_list -+ * \param[in] left_list List of CPG members that left -+ * \param[in] left_list_entries Number of entries in \p left_list -+ * \param[in] joined_list List of CPG members that joined -+ * \param[in] joined_list_entries Number of entries in \p joined_list -+ */ -+static void -+cpg_membership_callback(cpg_handle_t handle, const struct cpg_name *cpg_name, -+ const struct cpg_address *member_list, -+ size_t member_list_entries, -+ const struct cpg_address *left_list, -+ size_t left_list_entries, -+ const struct cpg_address *joined_list, -+ size_t joined_list_entries) -+{ -+ /* When nodes leave CPG, the DC clears their transient node attributes. -+ * -+ * However if there is no DC, or the DC is among the nodes that left, each -+ * remaining node needs to do the clearing, to ensure it gets done. -+ * Otherwise, the attributes would persist when the nodes rejoin, which -+ * could have serious consequences for unfencing, agents that use attributes -+ * for internal logic, etc. -+ * -+ * Here, we set a global boolean if the DC is among the nodes that left, for -+ * use by the peer callback. -+ */ -+ if (fsa_our_dc != NULL) { -+ crm_node_t *peer = crm_find_peer(0, fsa_our_dc); -+ -+ if (peer != NULL) { -+ for (int i = 0; i < left_list_entries; ++i) { -+ if (left_list[i].nodeid == peer->id) { -+ controld_dc_left = true; -+ break; -+ } -+ } -+ } -+ } -+ -+ // Process the change normally, which will call the peer callback as needed -+ pcmk_cpg_membership(handle, cpg_name, member_list, member_list_entries, -+ left_list, left_list_entries, -+ joined_list, joined_list_entries); -+ -+ controld_dc_left = false; -+} -+ - extern gboolean crm_connect_corosync(crm_cluster_t * cluster); - - gboolean -@@ -95,7 +150,7 @@ crm_connect_corosync(crm_cluster_t * cluster) - if (is_corosync_cluster()) { - crm_set_status_callback(&peer_update_callback); - cluster->cpg.cpg_deliver_fn = crmd_cs_dispatch; -- cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; -+ cluster->cpg.cpg_confchg_fn = cpg_membership_callback; - cluster->destroy = crmd_cs_destroy; - - if (crm_cluster_connect(cluster)) { --- -1.8.3.1 - diff --git a/SOURCES/100-default-to-syncing-with-sbd.patch b/SOURCES/100-default-to-syncing-with-sbd.patch deleted file mode 100644 index 2cde070..0000000 --- a/SOURCES/100-default-to-syncing-with-sbd.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 34b2d8ab82dcdf49535c74e6a580240455498759 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Wed, 2 Dec 2020 22:51:33 +0100 -Subject: [PATCH] default to syncing with sbd - ---- - lib/common/watchdog.c | 10 +++++++--- - 1 file changed, 7 insertions(+), 3 deletions(-) - -diff --git a/lib/common/watchdog.c b/lib/common/watchdog.c -index 03ee7f1..bf5df18 100644 ---- a/lib/common/watchdog.c -+++ b/lib/common/watchdog.c -@@ -244,12 +244,16 @@ pcmk__get_sbd_timeout(void) - bool - pcmk__get_sbd_sync_resource_startup(void) - { -- static bool sync_resource_startup = false; -+ static bool sync_resource_startup = true; // default overruled by env - static bool checked_sync_resource_startup = false; - - if (!checked_sync_resource_startup) { -- sync_resource_startup = -- crm_is_true(getenv("SBD_SYNC_RESOURCE_STARTUP")); -+ gboolean ret = FALSE; -+ const char *s = getenv("SBD_SYNC_RESOURCE_STARTUP"); -+ -+ if ((s != NULL) && (crm_str_to_boolean(s, &ret) > 0)) { -+ sync_resource_startup = ret; -+ } - checked_sync_resource_startup = true; - } - --- -1.8.3.1 - diff --git a/SPECS/pacemaker.spec b/SPECS/pacemaker.spec index 961fb71..65a1299 100644 --- a/SPECS/pacemaker.spec +++ b/SPECS/pacemaker.spec @@ -19,20 +19,35 @@ ## GitHub entity that distributes source (for ease of using a fork) %global github_owner ClusterLabs +## Where bug reports should be submitted +## Leave bug_url undefined to use ClusterLabs default, others define it here +%if 0%{?rhel} +%global bug_url https://bugzilla.redhat.com/ +%else +%if 0%{?fedora} +%global bug_url https://bugz.fedoraproject.org/%{name} +%endif +%endif + +## What to use as the OCF resource agent root directory +%global ocf_root %{_prefix}/lib/ocf + ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) -%global pcmkversion 2.0.5 -%global specversion 9 +%global pcmkversion 2.1.0 +%global specversion 8 ## Upstream commit (full commit ID, abbreviated commit ID, or tag) to build -%global commit ba59be71228fed04f78ab374dfac748d314d0e89 +%global commit 7c3f660707a495a1331716ad32cd3ac9d9f8ff58 + ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. +%if (0%{?fedora} >= 26) || (0%{?rhel} >= 9) +%global commit_abbrev 9 +%else %global commit_abbrev 7 - -## Python major version to use (2, 3, or 0 for auto-detect) -%global python_major 0 +%endif ## Nagios source control identifiers %global nagios_name nagios-agents-metadata @@ -45,7 +60,7 @@ ## Add option to enable support for stonith/external fencing agents %bcond_with stonithd -## Add option to disable support for storing sensitive information outside CIB +## Add option for whether to support storing sensitive information outside CIB %bcond_without cibsecrets ## Add option to create binaries suitable for use with profiling tools @@ -54,8 +69,22 @@ ## Add option to create binaries with coverage analysis %bcond_with coverage -## Add option to generate documentation (requires Publican, Asciidoc and Inkscape) +## Add option to skip (or enable, on RHEL) generating documentation +## (the build tools aren't available everywhere) +%if 0%{?rhel} %bcond_with doc +%else +%bcond_without doc +%endif + +## Add option to default to start-up synchronization with SBD. +## +## If enabled, SBD *MUST* be built to default similarly, otherwise data +## corruption could occur. Building both Pacemaker and SBD to default +## to synchronization improves safety, without requiring higher-level tools +## to be aware of the setting or requiring users to modify configurations +## after upgrading to versions that support synchronization. +%bcond_without sbd_sync ## Add option to prefix package version with "0." ## (so later "official" packages will be considered updates) @@ -67,9 +96,12 @@ ## Add option to turn off hardening of libraries and daemon executables %bcond_without hardening -## Add option to disable links for legacy daemon names +## Add option to enable (or disable, on RHEL 8) links for legacy daemon names +%if 0%{?rhel} && 0%{?rhel} <= 8 %bcond_without legacy_links - +%else +%bcond_with legacy_links +%endif # Define globals for convenient use later @@ -89,13 +121,9 @@ %define archive_version %(c=%{commit}; echo ${c:0:%{commit_abbrev}}) %define archive_github_url %{archive_version}#/%{name}-%{archive_version}.tar.gz %endif -# RHEL always uses a simple release number +### Always use a simple release number %define pcmk_release %{specversion} -## Heuristic used to infer bleeding-edge deployments that are -## less likely to have working versions of the documentation tools -%define bleeding %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) - ## Whether this platform defaults to using systemd as an init system ## (needs to be evaluated prior to BuildRequires being enumerated and ## installed as it's intended to conditionally select some of these, and @@ -122,14 +150,6 @@ %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM} %endif -%if !%{defined _rundir} -%if 0%{?fedora} >= 15 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1200 -%define _rundir /run -%else -%define _rundir /var/run -%endif -%endif - %if 0%{?fedora} > 22 || 0%{?rhel} > 7 %global supports_recommends 1 %endif @@ -153,21 +173,31 @@ %global pkgname_gnutls_devel gnutls-devel %global pkgname_shadow_utils shadow-utils %global pkgname_procps procps-ng -%global pkgname_publican publican %global pkgname_glue_libs cluster-glue-libs %global pkgname_pcmk_libs %{name}-libs %global hacluster_id 189 %endif -# Python-related definitions +## Distro-specific configuration choices + +### Use 2.0-style output when other distro packages don't support current output +%if 0%{?fedora} || ( 0%{?rhel} && 0%{?rhel} <= 8 ) +%global compat20 --enable-compat-2.0 +%endif -## Use Python 3 on certain platforms if major version not specified -%if %{?python_major} == 0 -%if 0%{?fedora} > 26 || 0%{?rhel} > 7 -%global python_major 3 +### Default concurrent-fencing to true when distro prefers that +%if 0%{?rhel} >= 7 +%global concurrent_fencing --with-concurrent-fencing-default=true %endif + +### Default resource-stickiness to 1 when distro prefers that +%if 0%{?fedora} >= 35 || 0%{?rhel} >= 9 +%global resource_stickiness --with-resource-stickiness-default=1 %endif + +# Python-related definitions + ## Turn off auto-compilation of Python files outside Python specific paths, ## so there's no risk that unexpected "__python" macro gets picked to do the ## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected) @@ -181,39 +211,25 @@ sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) %endif -## Values that differ by Python major version -%if 0%{?python_major} > 2 +## Prefer Python 3 definitions explicitly, in case 2 is also available +%if %{defined __python3} %global python_name python3 -%global python_path %{?__python3}%{!?__python3:/usr/bin/python%{?python3_pkgversion}%{!?python3_pkgversion:3}} +%global python_path %{__python3} %define python_site %{?python3_sitelib}%{!?python3_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else -%if 0%{?python_major} > 1 -%global python_name python2 -%global python_path %{?__python2}%{!?__python2:/usr/bin/python%{?python2_pkgversion}%{!?python2_pkgversion:2}} -%define python_site %{?python2_sitelib}%{!?python2_sitelib:%( - %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} +%if %{defined python_version} +%global python_name python%(echo %{python_version} | cut -d'.' -f1) +%define python_path %{?__python}%{!?__python:/usr/bin/%{python_name}} %else %global python_name python %global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}} -%define python_site %{?python_sitelib}%{!?python_sitelib:%( - python -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %endif +%define python_site %{?python_sitelib}%{!?python_sitelib:%( + %{python_name} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %endif -# Definitions for backward compatibility with older RPM versions - -## Ensure the license macro behaves consistently (older RPM will otherwise -## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts: -## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77 -%if !%{defined _licensedir} -%define description %{lua: - rpm.define("license %doc") - print("%description") -} -%endif - # Keep sane profiling data if requested %if %{with profiling} @@ -226,7 +242,7 @@ Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} -Release: %{pcmk_release}%{?dist}.3 +Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else @@ -234,7 +250,6 @@ License: GPLv2+ and LGPLv2+ License: GPLv2+ and LGPLv2+ and BSD %endif Url: https://www.clusterlabs.org/ -Group: System Environment/Daemons # Example: https://codeload.github.com/ClusterLabs/pacemaker/tar.gz/e91769e # will download pacemaker-e91769e.tar.gz @@ -248,54 +263,29 @@ Source0: https://codeload.github.com/%{github_owner}/%{name}/tar.gz/%{arch Source1: nagios-agents-metadata-%{nagios_hash}.tar.gz # upstream commits -Patch1: 001-feature-set.patch -Patch2: 002-feature-set.patch -Patch3: 003-feature-set.patch -Patch4: 004-feature-set.patch -Patch5: 005-feature-set.patch -Patch6: 006-digests.patch -Patch7: 007-feature-set.patch -Patch8: 008-digests.patch -Patch9: 009-digests.patch -Patch10: 010-feature-set.patch -Patch11: 011-feature-set.patch -Patch12: 012-feature-set.patch -Patch13: 013-feature-set.patch -Patch14: 014-feature-set.patch -Patch15: 015-feature-set.patch -Patch16: 016-feature-set.patch -Patch17: 017-feature-set.patch -Patch18: 018-rhbz1907726.patch -Patch19: 019-rhbz1371576.patch -Patch20: 020-rhbz1872376.patch -Patch21: 021-rhbz1872376.patch -Patch22: 022-rhbz1872376.patch -Patch23: 023-rhbz1872376.patch -Patch24: 024-rhbz1371576.patch -Patch25: 025-feature-set.patch -Patch26: 026-tests.patch -Patch27: 027-crm_mon.patch -Patch28: 028-crm_mon.patch -Patch29: 029-crm_mon.patch -Patch30: 030-crmadmin.patch -Patch31: 031-cibsecret.patch -Patch32: 032-rhbz1898457.patch -Patch33: 033-cibsecret.patch -Patch34: 034-crm_mon.patch -Patch35: 035-crm_mon.patch -Patch36: 036-crm_resource.patch -Patch37: 037-scheduler.patch -Patch38: 038-feature-set.patch -Patch39: 039-crm_mon.patch -Patch40: 040-crm_mon-shutdown.patch -Patch41: 041-crm_mon-shutdown.patch -Patch42: 042-unfencing-loop.patch -Patch43: 043-retry-metadata.patch -Patch44: 044-sbd.patch -Patch45: 045-controller-attribute.patch +Patch1: 001-ping-agent.patch +Patch2: 002-pacemakerd-options.patch +Patch3: 003-pacemakerd-output.patch +Patch4: 004-check-level.patch +Patch5: 005-crm_resource.patch +Patch6: 006-crm_simulate.patch +Patch7: 007-unfencing-loop.patch +Patch8: 008-dynamic-list-fencing.patch +Patch9: 009-crm_resource-messages.patch +Patch10: 010-probe-pending.patch +Patch11: 011-crm_attribute-regression.patch +Patch12: 012-string-arguments.patch +Patch13: 013-leaks.patch +Patch14: 014-str-list.patch +Patch15: 015-sbd.patch +Patch16: 016-cts.patch +Patch17: 017-watchdog-fixes.patch +Patch18: 018-controller.patch +Patch19: 019-crm_resource.patch +Patch20: 020-fence_watchdog.patch # downstream-only commits -Patch100: 100-default-to-syncing-with-sbd.patch +#Patch1xx: 1xx-xxxx.patch Requires: resource-agents Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} @@ -310,8 +300,10 @@ Requires: psmisc %if %{defined centos} ExclusiveArch: aarch64 i686 ppc64le s390x x86_64 %{arm} %else +%if 0%{?rhel} ExclusiveArch: aarch64 i686 ppc64le s390x x86_64 %endif +%endif Requires: %{python_path} BuildRequires: %{python_name}-devel @@ -320,18 +312,35 @@ BuildRequires: %{python_name}-devel Requires: libqb >= 0.17.0 BuildRequires: libqb-devel >= 0.17.0 -# Basics required for the build (even if usually satisfied through other BRs) -BuildRequires: coreutils findutils grep sed +# Required basic build tools +BuildRequires: autoconf +BuildRequires: automake +BuildRequires: coreutils +BuildRequires: findutils +BuildRequires: gcc +BuildRequires: grep +BuildRequires: libtool +%if %{defined pkgname_libtool_devel} +BuildRequires: %{?pkgname_libtool_devel} +%endif +BuildRequires: make +BuildRequires: pkgconfig +BuildRequires: sed # Required for core functionality -BuildRequires: automake autoconf gcc libtool pkgconfig %{?pkgname_libtool_devel} -BuildRequires: pkgconfig(glib-2.0) >= 2.16 -BuildRequires: libxml2-devel libxslt-devel libuuid-devel +BuildRequires: pkgconfig(glib-2.0) >= 2.42 +BuildRequires: libxml2-devel +BuildRequires: libxslt-devel +BuildRequires: libuuid-devel BuildRequires: %{pkgname_bzip2_devel} # Enables optional functionality -BuildRequires: ncurses-devel %{pkgname_docbook_xsl} -BuildRequires: help2man %{pkgname_gnutls_devel} pam-devel pkgconfig(dbus-1) +BuildRequires: pkgconfig(dbus-1) +BuildRequires: %{pkgname_docbook_xsl} +BuildRequires: %{pkgname_gnutls_devel} +BuildRequires: help2man +BuildRequires: ncurses-devel +BuildRequires: pam-devel %if %{systemd_native} BuildRequires: pkgconfig(systemd) @@ -347,11 +356,10 @@ BuildRequires: corosync-devel >= 2.0.0 BuildRequires: %{pkgname_glue_libs}-devel %endif -## (note no avoiding effect when building through non-customized mock) -%if !%{bleeding} %if %{with doc} -BuildRequires: inkscape asciidoc %{?pkgname_publican} -%endif +BuildRequires: asciidoc +BuildRequires: inkscape +BuildRequires: %{python_name}-sphinx %endif Provides: pcmk-cluster-manager = %{version}-%{release} @@ -375,20 +383,15 @@ when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: - --with(out) : cibsecrets coverage doc stonithd hardening pre_release - profiling + --with(out) : cibsecrets coverage doc hardening pre_release profiling stonithd %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters -Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} -%if 0%{?supports_recommends} -#Recommends: pcmk-cluster-manager = %{version}-%{release} # For crm_report Requires: tar Requires: bzip2 -%endif Requires: perl-TimeDate Requires: %{pkgname_procps} Requires: psmisc @@ -405,13 +408,11 @@ be part of the cluster. %package -n %{pkgname_pcmk_libs} License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries -Group: System Environment/Daemons Requires(pre): %{pkgname_shadow_utils} Requires: %{name}-schemas = %{version}-%{release} # sbd 1.4.0+ supports the libpe_status API for pe_working_set_t # sbd 1.4.2+ supports startup/shutdown handshake via pacemakerd-api -# and handshake defaults to enabled with rhel-builds -# applying 100-default-to-syncing-with-sbd.patch +# and handshake defaults to enabled in this spec Conflicts: sbd < 1.4.2 %description -n %{pkgname_pcmk_libs} @@ -424,7 +425,6 @@ nodes and those just running the CLI tools. %package cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker -Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %description cluster-libs @@ -441,8 +441,7 @@ License: GPLv2+ and LGPLv2+ # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif -Summary: Pacemaker remote daemon for non-cluster nodes -Group: System Environment/Daemons +Summary: Pacemaker remote executor daemon for non-cluster nodes Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents @@ -465,14 +464,18 @@ nodes not running the full corosync/cluster stack. %package -n %{pkgname_pcmk_libs}-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package -Group: Development/Libraries Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} -Requires: libuuid-devel%{?_isa} %{?pkgname_libtool_devel_arch} -Requires: libxml2-devel%{?_isa} libxslt-devel%{?_isa} -Requires: %{pkgname_bzip2_devel}%{?_isa} glib2-devel%{?_isa} -Requires: libqb-devel%{?_isa} +Requires: %{pkgname_bzip2_devel}%{?_isa} Requires: corosync-devel >= 2.0.0 +Requires: glib2-devel%{?_isa} +Requires: libqb-devel%{?_isa} +%if %{defined pkgname_libtool_devel_arch} +Requires: %{?pkgname_libtool_devel_arch} +%endif +Requires: libuuid-devel%{?_isa} +Requires: libxml2-devel%{?_isa} +Requires: libxslt-devel%{?_isa} %description -n %{pkgname_pcmk_libs}-devel Pacemaker is an advanced, scalable High-Availability cluster resource @@ -484,7 +487,6 @@ for developing tools for Pacemaker. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker -Group: System Environment/Daemons Requires: %{python_path} Requires: %{pkgname_pcmk_libs} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} @@ -492,17 +494,11 @@ Requires: %{pkgname_procps} Requires: psmisc BuildArch: noarch -# systemd python bindings are separate package in some distros +# systemd Python bindings are a separate package in some distros %if %{defined systemd_requires} - %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Requires: %{python_name}-systemd -%else -%if 0%{?fedora} > 20 || 0%{?rhel} > 6 -Requires: systemd-python -%endif %endif - %endif %description cts @@ -511,7 +507,6 @@ Test framework for cluster-related technologies like Pacemaker %package doc License: CC-BY-SA-4.0 Summary: Documentation for Pacemaker -Group: Documentation BuildArch: noarch %description doc @@ -534,7 +529,6 @@ manager. %package nagios-plugins-metadata License: GPLv3 Summary: Pacemaker Nagios Metadata -Group: System Environment/Daemons # NOTE below are the plugins this metadata uses. # These packages are not requirements because RHEL does not ship these plugins. # This metadata provides third-party support for nagios. Users may install the @@ -559,9 +553,6 @@ monitor resources. export systemdsystemunitdir=%{?_unitdir}%{!?_unitdir:no} -# RHEL changes pacemaker's concurrent-fencing default to true -export CPPFLAGS="-DDEFAULT_CONCURRENT_FENCING_TRUE" - %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will @@ -579,16 +570,21 @@ export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %{configure} \ PYTHON=%{python_path} \ %{!?with_hardening: --disable-hardening} \ - %{!?with_legacy_links: --disable-legacy-links} \ + %{?with_legacy_links: --enable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{?with_cibsecrets: --with-cibsecrets} \ - %{!?with_doc: --with-brand=} \ + %{?with_sbd_sync: --with-sbd-sync-default="true"} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ + %{?bug_url: --with-bug-url=%{bug_url}} \ + %{?ocf_root: --with-ocfdir=%{ocf_root}} \ + %{?concurrent_fencing} \ + %{?resource_stickiness} \ + %{?compat20} \ + --disable-static \ --with-initdir=%{_initrddir} \ --with-runstatedir=%{_rundir} \ --localstatedir=%{_var} \ - --with-bug-url=https://bugzilla.redhat.com/ \ --with-nagios \ --with-nagios-metadata-dir=%{_datadir}/pacemaker/nagios/plugins-metadata/ \ --with-nagios-plugin-dir=%{_libdir}/nagios/plugins/ \ @@ -620,10 +616,6 @@ make install \ DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \ %{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}} -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig -install -m 644 daemons/pacemakerd/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker -install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon - %if %{with upstart_job} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf @@ -640,8 +632,7 @@ done mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif -# Don't package static libs -find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f +# Don't package libtool archives find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # Do not package these either @@ -807,9 +798,7 @@ exit 0 %exclude %{_libexecdir}/pacemaker/cts-log-watcher %exclude %{_libexecdir}/pacemaker/cts-support %exclude %{_sbindir}/pacemaker-remoted -%if %{with legacy_links} %exclude %{_sbindir}/pacemaker_remoted -%endif %exclude %{_datadir}/pacemaker/nagios %{_libexecdir}/pacemaker/* @@ -835,8 +824,8 @@ exit 0 %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine -/usr/lib/ocf/resource.d/pacemaker/controld -/usr/lib/ocf/resource.d/pacemaker/remote +%{ocf_root}/resource.d/pacemaker/controld +%{ocf_root}/resource.d/pacemaker/remote %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/pacemaker.conf @@ -883,12 +872,12 @@ exit 0 # XXX "dirname" is not owned by any prerequisite %{_datadir}/snmp/mibs/PCMK-MIB.txt -%exclude /usr/lib/ocf/resource.d/pacemaker/controld -%exclude /usr/lib/ocf/resource.d/pacemaker/remote +%exclude %{ocf_root}/resource.d/pacemaker/controld +%exclude %{ocf_root}/resource.d/pacemaker/remote -%dir /usr/lib/ocf -%dir /usr/lib/ocf/resource.d -/usr/lib/ocf/resource.d/pacemaker +%dir %{ocf_root} +%dir %{ocf_root}/resource.d +%{ocf_root}/resource.d/pacemaker %doc %{_mandir}/man7/* %exclude %{_mandir}/man7/pacemaker-controld.* @@ -945,9 +934,7 @@ exit 0 %endif %{_sbindir}/pacemaker-remoted -%if %{with legacy_links} %{_sbindir}/pacemaker_remoted -%endif %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING @@ -993,25 +980,63 @@ exit 0 %license %{nagios_name}-%{nagios_hash}/COPYING %changelog -* Mon Aug 9 2021 Klaus Wenninger - 2.0.5-9.3 -- retry fence-agent metadata -- assure transient attributes of lost node are cleared -- added configurable watchdog-fencing feature -- Resolves: rhbz1992014 -- Resolves: rhbz1989622 -- Resolves: rhbz1993891 - -* Thu Jun 24 2021 Ken Gaillot - 2.0.5-9.2 +* Fri Aug 20 2021 Ken Gaillot - 2.1.0-8 +- Fix XML issue in fence_watchdog meta-data +- Resolves: rhbz1443666 + +* Thu Aug 12 2021 Ken Gaillot - 2.1.0-7 +- Fix minor issue with crm_resource error message change +- Resolves: rhbz1447918 + +* Tue Aug 10 2021 Ken Gaillot - 2.1.0-6 +- Fix watchdog agent version information +- Ensure transient attributes are cleared when multiple nodes are lost +- Resolves: rhbz1443666 +- Resolves: rhbz1986998 + +* Fri Aug 06 2021 Ken Gaillot - 2.1.0-5 +- Allow configuring specific nodes to use watchdog-only sbd for fencing +- Resolves: rhbz1443666 + +* Fri Jul 30 2021 Ken Gaillot - 2.1.0-4 +- Show better error messages in crm_resource with invalid resource types +- Avoid selecting wrong device when dynamic-list fencing is used with host map +- Do not schedule probes of unmanaged resources on pending nodes +- Fix regressions in crm_attribute and crm_master argument handling +- Resolves: rhbz1447918 +- Resolves: rhbz1978010 +- Resolves: rhbz1982453 +- Resolves: rhbz1984120 + +* Tue Jun 22 2021 Ken Gaillot - 2.1.0-3 +- crm_resource now supports XML output from resource agent actions +- Correct output for crm_simulate --show-failcounts - Avoid remote node unfencing loop -- Resolves: rhbz1972273 - -* Mon Apr 19 2021 Ken Gaillot - 2.0.5-9.1 -- Fix regression in crm_mon during cluster shutdown that affects ocf:heartbeat:pgsql agent -- Resolves: rhbz1951098 - -* Tue Mar 2 2021 Ken Gaillot - 2.0.5-9 -- Avoid pcs failures when Pacemaker records negative call ID in history -- Resolves: rhbz1931332 +- Resolves: rhbz1644628 +- Resolves: rhbz1686426 +- Resolves: rhbz1961857 + +* Wed Jun 9 2021 Ken Gaillot - 2.1.0-2 +- Rebase on upstream 2.1.0 final release +- Correct schema for crm_resource XML output +- Resolves: rhbz1935464 +- Resolves: rhbz1967087 + +* Thu May 20 2021 Ken Gaillot - 2.1.0-1 +- Add crm_simulate --show-attrs and --show-failcounts options +- Retry getting fence agent meta-data after initial failure +- Add debug option for more verbose ocf:pacemaker:ping logs +- Rebase on upstream 2.1.0-rc2 release +- Support OCF Resource Agent API 1.1 standard +- Fix crm_mon regression that could cause certain agents to fail at shutdown +- Allow setting OCF check level for crm_resource --validate and --force-check +- Resolves: rhbz1686426 +- Resolves: rhbz1797579 +- Resolves: rhbz1843177 +- Resolves: rhbz1935464 +- Resolves: rhbz1936696 +- Resolves: rhbz1948620 +- Resolves: rhbz1955792 * Mon Feb 15 2021 Ken Gaillot - 2.0.5-8 - Route cancellations through correct node when remote connection is moving