From 82ae2f1b652c361dadacf25dece42a43340776ee Mon Sep 17 00:00:00 2001 From: Chris Lumens Date: Thu, 11 Feb 2021 09:57:21 -0500 Subject: [PATCH 1/3] Low: tools: Rename the result of cli_resource_search. The result of cli_resource_search is a list of nodes, not a list of resources. Change the variable name appropriately. --- tools/crm_resource.c | 4 ++-- tools/crm_resource_runtime.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 564600e..78b2246 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1873,8 +1873,8 @@ main(int argc, char **argv) break; case cmd_locate: { - GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set); - rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id); + GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set); + rc = out->message(out, "resource-search-list", nodes, rsc, options.rsc_id); break; } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index b6e4df1..adfdfba 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1780,8 +1780,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, action = rsc_action+6; if(pe_rsc_is_clone(rsc)) { - GListPtr rscs = cli_resource_search(out, rsc, requested_name, data_set); - if(rscs != NULL && force == FALSE) { + GListPtr nodes = cli_resource_search(out, rsc, requested_name, data_set); + if(nodes != NULL && force == FALSE) { out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", action, rsc->id); out->err(out, "Try setting target-role=Stopped first or specifying " -- 1.8.3.1 From e8b320aaaabdd60b7ac851e5b70a2a1b3c2180a3 Mon Sep 17 00:00:00 2001 From: Chris Lumens Date: Thu, 11 Feb 2021 11:07:07 -0500 Subject: [PATCH 2/3] Test: cts: Add a test for a promotable clone resource. Note that for the moment, the crm_resource output in regression.tools.exp is incorrect. There's a bug in that tool, but I wanted to get a test case working before fixing it. --- cts/cli/crm_mon.xml | 32 +++- cts/cli/regression.crm_mon.exp | 401 +++++++++++++++++++++++++++++------------ cts/cli/regression.tools.exp | 18 ++ cts/cts-cli.in | 20 ++ 4 files changed, 357 insertions(+), 114 deletions(-) diff --git a/cts/cli/crm_mon.xml b/cts/cli/crm_mon.xml index d8d5d35..f0f14fd 100644 --- a/cts/cli/crm_mon.xml +++ b/cts/cli/crm_mon.xml @@ -1,4 +1,4 @@ - + @@ -99,9 +99,25 @@ + + + + + + + + + + + + + + + + @@ -153,6 +169,13 @@ + + + + + + + @@ -170,7 +193,7 @@ - + @@ -185,6 +208,11 @@ + + + + + diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index dd20116..c223b7f 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -5,7 +5,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -20,6 +20,9 @@ Active Resources: * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * Masters: [ cluster02 ] + * Slaves: [ cluster01 ] =#=#=#= End test: Basic text output - OK (0) =#=#=#= * Passed: crm_mon - Basic text output =#=#=#= Begin test: XML output =#=#=#= @@ -30,12 +33,12 @@ Active Resources: - + - - + + @@ -112,6 +115,17 @@ Active Resources: + + + + + + + + + + + @@ -142,6 +156,12 @@ Active Resources: + + + + + + @@ -150,7 +170,7 @@ Active Resources: - + @@ -159,6 +179,10 @@ Active Resources: + + + + @@ -175,7 +199,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Active Resources: * Clone Set: ping-clone [ping]: @@ -187,6 +211,9 @@ Active Resources: * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * Masters: [ cluster02 ] + * Slaves: [ cluster01 ] =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#= * Passed: crm_mon - Basic text output without node section =#=#=#= Begin test: XML output without the node section =#=#=#= @@ -197,7 +224,7 @@ Active Resources: - + @@ -272,6 +299,17 @@ Active Resources: + + + + + + + + + + + @@ -302,6 +340,12 @@ Active Resources: + + + + + + @@ -310,7 +354,7 @@ Active Resources: - + @@ -319,6 +363,10 @@ Active Resources: + + + + @@ -340,7 +388,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -355,6 +403,9 @@ Active Resources: * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * Masters: [ cluster02 ] + * Slaves: [ cluster01 ] Node Attributes: * Node: cluster01: @@ -378,18 +429,26 @@ Operations: * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (4) monitor: interval="10000ms" + * (5) cancel: interval="10000ms" + * (6) promote + * (7) monitor: interval="5000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start - * (19) monitor: interval="60000ms" + * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (2) start + * (4) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 @@ -402,7 +461,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] @@ -427,6 +486,12 @@ Active Resources: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:4: * mysql-proxy (lsb:mysql-proxy): Stopped + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * promotable-rsc (ocf::pacemaker:Stateful): Master cluster02 + * promotable-rsc (ocf::pacemaker:Stateful): Slave cluster01 + * promotable-rsc (ocf::pacemaker:Stateful): Stopped + * promotable-rsc (ocf::pacemaker:Stateful): Stopped + * promotable-rsc (ocf::pacemaker:Stateful): Stopped Node Attributes: * Node: cluster01 (1): @@ -450,18 +515,26 @@ Operations: * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (4) monitor: interval="10000ms" + * (5) cancel: interval="10000ms" + * (6) promote + * (7) monitor: interval="5000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start - * (19) monitor: interval="60000ms" + * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (2) start + * (4) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 (1) @@ -474,7 +547,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -489,6 +562,9 @@ Active Resources: * 1/1 (ocf::heartbeat:IPaddr): Active cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * Masters: [ cluster02 ] + * Slaves: [ cluster01 ] Node Attributes: * Node: cluster01: @@ -512,18 +588,26 @@ Operations: * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (4) monitor: interval="10000ms" + * (5) cancel: interval="10000ms" + * (6) promote + * (7) monitor: interval="5000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start - * (19) monitor: interval="60000ms" + * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (2) start + * (4) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 @@ -536,7 +620,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: @@ -544,6 +628,7 @@ Node List: * ping (ocf::pacemaker:ping): Started * Fencing (stonith:fence_xvm): Started * mysql-proxy (lsb:mysql-proxy): Started + * promotable-rsc (ocf::pacemaker:Stateful): Slave * Node cluster02: online: * Resources: * ping (ocf::pacemaker:ping): Started @@ -551,6 +636,7 @@ Node List: * Public-IP (ocf::heartbeat:IPaddr): Started * Email (lsb:exim): Started * mysql-proxy (lsb:mysql-proxy): Started + * promotable-rsc (ocf::pacemaker:Stateful): Master * GuestNode httpd-bundle-0@: OFFLINE: * Resources: * GuestNode httpd-bundle-1@: OFFLINE: @@ -580,18 +666,26 @@ Operations: * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (4) monitor: interval="10000ms" + * (5) cancel: interval="10000ms" + * (6) promote + * (7) monitor: interval="5000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start - * (19) monitor: interval="60000ms" + * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (2) start + * (4) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 @@ -604,12 +698,13 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * 1 (lsb:mysql-proxy): Active + * 1 (ocf::pacemaker:Stateful): Active * 1 (ocf::pacemaker:ping): Active * 1 (stonith:fence_xvm): Active * Node cluster02: online: @@ -618,6 +713,7 @@ Node List: * 1 (lsb:mysql-proxy): Active * 1 (ocf::heartbeat:IPaddr): Active * 1 (ocf::pacemaker:Dummy): Active + * 1 (ocf::pacemaker:Stateful): Active * 1 (ocf::pacemaker:ping): Active Node Attributes: @@ -642,18 +738,26 @@ Operations: * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (4) monitor: interval="10000ms" + * (5) cancel: interval="10000ms" + * (6) promote + * (7) monitor: interval="5000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start - * (19) monitor: interval="60000ms" + * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (2) start + * (4) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 @@ -667,11 +771,11 @@ Negative Location Constraints: - + - + @@ -681,8 +785,11 @@ Negative Location Constraints: + + + - + @@ -698,6 +805,9 @@ Negative Location Constraints: + + + @@ -753,6 +863,17 @@ Negative Location Constraints: + + + + + + + + + + + @@ -783,6 +904,12 @@ Negative Location Constraints: + + + + + + @@ -791,7 +918,7 @@ Negative Location Constraints: - + @@ -800,6 +927,10 @@ Negative Location Constraints: + + + + @@ -816,7 +947,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 ] @@ -827,6 +958,8 @@ Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * Slaves: [ cluster01 ] Node Attributes: * Node: cluster01: @@ -840,12 +973,15 @@ Operations: * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start - * (19) monitor: interval="60000ms" + * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (2) start + * (4) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 @@ -859,11 +995,11 @@ Negative Location Constraints: - + - + @@ -918,6 +1054,14 @@ Negative Location Constraints: + + + + + + + + @@ -933,7 +1077,7 @@ Negative Location Constraints: - + @@ -942,6 +1086,10 @@ Negative Location Constraints: + + + + @@ -958,7 +1106,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] @@ -972,6 +1120,8 @@ Active Resources: * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * Masters: [ cluster02 ] Node Attributes: * Node: cluster02: @@ -992,6 +1142,11 @@ Operations: * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" + * promotable-rsc: migration-threshold=1000000: + * (4) monitor: interval="10000ms" + * (5) cancel: interval="10000ms" + * (6) promote + * (7) monitor: interval="5000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 @@ -1005,11 +1160,11 @@ Negative Location Constraints: - + - + @@ -1072,6 +1227,14 @@ Negative Location Constraints: + + + + + + + + @@ -1098,6 +1261,12 @@ Negative Location Constraints: + + + + + + @@ -1114,7 +1283,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1133,7 +1302,7 @@ Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start - * (19) monitor: interval="60000ms" + * (20) monitor: interval="60000ms" =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by resource tag =#=#=#= Begin test: XML output filtered by resource tag =#=#=#= @@ -1144,12 +1313,12 @@ Operations: - + - - + + @@ -1172,7 +1341,7 @@ Operations: - + @@ -1187,7 +1356,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Active Resources: * No active resources @@ -1201,7 +1370,7 @@ Active Resources: - + @@ -1249,7 +1418,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1273,6 +1442,9 @@ Full List of Resources: * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * Masters: [ cluster02 ] + * Slaves: [ cluster01 ] =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#= @@ -1282,7 +1454,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] @@ -1305,6 +1477,8 @@ Full List of Resources: * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * Masters: [ cluster02 ] =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by node =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#= @@ -1314,7 +1488,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1333,7 +1507,7 @@ Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start - * (19) monitor: interval="60000ms" + * (20) monitor: interval="60000ms" =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by primitive resource =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#= @@ -1344,12 +1518,12 @@ Operations: - + - - + + @@ -1372,7 +1546,7 @@ Operations: - + @@ -1387,7 +1561,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1420,12 +1594,12 @@ Operations: - + - - + + @@ -1470,7 +1644,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1500,12 +1674,12 @@ Operations: - + - - + + @@ -1544,7 +1718,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1579,12 +1753,12 @@ Operations: - + - - + + @@ -1633,7 +1807,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1668,12 +1842,12 @@ Operations: - + - - + + @@ -1722,7 +1896,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] @@ -1757,12 +1931,12 @@ Operations: - + - - + + @@ -1808,7 +1982,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1825,12 +1999,12 @@ Active Resources: - + - - + + @@ -1857,7 +2031,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1877,7 +2051,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1897,12 +2071,12 @@ Full List of Resources: - + - - + + @@ -1950,7 +2124,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -1969,12 +2143,12 @@ Full List of Resources: - + - - + + @@ -2007,7 +2181,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -2026,12 +2200,12 @@ Full List of Resources: - + - - + + @@ -2064,7 +2238,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -2083,12 +2257,12 @@ Full List of Resources: - + - - + + @@ -2121,7 +2295,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] @@ -2144,12 +2318,12 @@ Full List of Resources: - + - - + + @@ -2188,7 +2362,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] @@ -2232,12 +2406,12 @@ Operations: - + - - + + @@ -2279,7 +2453,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] @@ -2323,12 +2497,12 @@ Operations: - + - - + + @@ -2390,7 +2564,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] @@ -2426,12 +2600,12 @@ Operations: - + - - + + @@ -2479,7 +2653,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] @@ -2523,12 +2697,12 @@ Operations: - + - - + + @@ -2590,7 +2764,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] @@ -2626,12 +2800,12 @@ Operations: - + - - + + @@ -3083,7 +3257,7 @@ Cluster Summary: * Last updated: * Last change: * 5 nodes configured - * 27 resource instances configured (4 DISABLED) + * 32 resource instances configured (4 DISABLED) *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services @@ -3114,5 +3288,8 @@ Full List of Resources: * mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged) * Resource Group: mysql-group:1 (unmanaged): * mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged) + * Clone Set: promotable-clone [promotable-rsc] (promotable) (unmanaged): + * promotable-rsc (ocf::pacemaker:Stateful): Master cluster02 (unmanaged) + * promotable-rsc (ocf::pacemaker:Stateful): Slave cluster01 (unmanaged) =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#= * Passed: crm_mon - Text output of all resources with maintenance-mode enabled diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index 1afe596..708c340 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -4077,3 +4077,21 @@ Resources colocated with clone: 5 =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#= * Passed: crmadmin - List guest,remote nodes +=#=#=#= Begin test: List a promotable clone resource =#=#=#= +resource promotable-clone is running on: cluster02 +resource promotable-clone is running on: cluster01 +=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#= +* Passed: crm_resource - List a promotable clone resource +=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#= +resource promotable-rsc is running on: cluster02 Master +resource promotable-rsc is running on: cluster01 Master +=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#= +* Passed: crm_resource - List the primitive of a promotable clone resource +=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#= +resource promotable-rsc:0 is running on: cluster02 Master +=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#= +* Passed: crm_resource - List a single instance of a promotable clone resource +=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#= +resource promotable-rsc:1 is running on: cluster01 +=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#= +* Passed: crm_resource - List another instance of a promotable clone resource diff --git a/cts/cts-cli.in b/cts/cts-cli.in index 8e2dbe5..6f7eb80 100755 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -831,6 +831,26 @@ function test_tools() { test_assert $CRM_EX_OK 0 unset CIB_file + + export CIB_file="$test_home/cli/crm_mon.xml" + + desc="List a promotable clone resource" + cmd="crm_resource --locate -r promotable-clone" + test_assert $CRM_EX_OK 0 + + desc="List the primitive of a promotable clone resource" + cmd="crm_resource --locate -r promotable-rsc" + test_assert $CRM_EX_OK 0 + + desc="List a single instance of a promotable clone resource" + cmd="crm_resource --locate -r promotable-rsc:0" + test_assert $CRM_EX_OK 0 + + desc="List another instance of a promotable clone resource" + cmd="crm_resource --locate -r promotable-rsc:1" + test_assert $CRM_EX_OK 0 + + unset CIB_file } INVALID_PERIODS=( -- 1.8.3.1 From d1bb0758726c09fd78efbc30c7eb46559e9c10e2 Mon Sep 17 00:00:00 2001 From: Chris Lumens Date: Thu, 11 Feb 2021 15:09:54 -0500 Subject: [PATCH 3/3] Fix: Correct output of "crm_resource --locate" in case of clones. For non-clone resources, the rsc parameter passed to resource_search_list_* is accurate - it is the resource object for the name given on the command line. For clone resources, this parameter is incorrect. It will be a single instance of the clone resource, no matter which instance might have been asked for on the command line. This typically doesn't matter, but results in incorrect output when promotable clones are searched for. For promotable clones, the "Master" text may not appear for any of the instances. This is because the single instance passed in as the rsc parameter might not be the master, but each iteration through the loop will use that same parameter. The fix is to change cli_resource_search to return a list of node/promoted pairs so we we already have all the information we need. Printing is then a simple matter of just walking that list. The referenced bug has a much better explanation of the cause of the problem. See: rhbz#1925681 --- cts/cli/regression.tools.exp | 4 ++-- tools/crm_resource.c | 3 ++- tools/crm_resource.h | 7 +++++- tools/crm_resource_print.c | 23 +++++++------------- tools/crm_resource_runtime.c | 51 +++++++++++++++++++++++++++++++------------- 5 files changed, 54 insertions(+), 34 deletions(-) diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index 708c340..b3f16fa 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -4078,13 +4078,13 @@ Resources colocated with clone: =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#= * Passed: crmadmin - List guest,remote nodes =#=#=#= Begin test: List a promotable clone resource =#=#=#= -resource promotable-clone is running on: cluster02 resource promotable-clone is running on: cluster01 +resource promotable-clone is running on: cluster02 Master =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#= +resource promotable-rsc is running on: cluster01 resource promotable-rsc is running on: cluster02 Master -resource promotable-rsc is running on: cluster01 Master =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#= diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 78b2246..7d2f0f6 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1874,7 +1874,8 @@ main(int argc, char **argv) case cmd_locate: { GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set); - rc = out->message(out, "resource-search-list", nodes, rsc, options.rsc_id); + rc = out->message(out, "resource-search-list", nodes, options.rsc_id); + g_list_free_full(nodes, free); break; } diff --git a/tools/crm_resource.h b/tools/crm_resource.h index 5bfadb7..777490a 100644 --- a/tools/crm_resource.h +++ b/tools/crm_resource.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2020 the Pacemaker project contributors + * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -23,6 +23,11 @@ #include #include +typedef struct node_info_s { + const char *node_name; + bool promoted; +} node_info_t; + enum resource_check_flags { rsc_remain_stopped = (1 << 0), rsc_unpromotable = (1 << 1), diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c index 398fef0..053f806 100644 --- a/tools/crm_resource_print.c +++ b/tools/crm_resource_print.c @@ -276,12 +276,11 @@ resource_check_list_xml(pcmk__output_t *out, va_list args) { return rc; } -PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *") +PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "gchar *") static int resource_search_list_default(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); gchar *requested_name = va_arg(args, gchar *); bool printed = false; @@ -293,7 +292,7 @@ resource_search_list_default(pcmk__output_t *out, va_list args) } for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { - pe_node_t *node = (pe_node_t *) lpc->data; + node_info_t *ni = (node_info_t *) lpc->data; if (!printed) { out->begin_list(out, NULL, NULL, "Nodes"); @@ -302,15 +301,10 @@ resource_search_list_default(pcmk__output_t *out, va_list args) } if (out->is_quiet(out)) { - out->list_item(out, "node", "%s", node->details->uname); + out->list_item(out, "node", "%s", ni->node_name); } else { - const char *state = ""; - - if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { - state = " Master"; - } out->list_item(out, "node", "resource %s is running on: %s%s", - requested_name, node->details->uname, state); + requested_name, ni->node_name, ni->promoted ? " Master" : ""); } } @@ -321,12 +315,11 @@ resource_search_list_default(pcmk__output_t *out, va_list args) return rc; } -PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *") +PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "gchar *") static int resource_search_list_xml(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); gchar *requested_name = va_arg(args, gchar *); pcmk__output_xml_create_parent(out, "nodes", @@ -334,10 +327,10 @@ resource_search_list_xml(pcmk__output_t *out, va_list args) NULL); for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { - pe_node_t *node = (pe_node_t *) lpc->data; - xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname); + node_info_t *ni = (node_info_t *) lpc->data; + xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", ni->node_name); - if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { + if (ni->promoted) { crm_xml_add(sub_node, "state", "promoted"); } } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index adfdfba..1769042 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -41,20 +41,37 @@ cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed) return rc; } +static GListPtr +build_node_info_list(pe_resource_t *rsc) +{ + GListPtr retval = NULL; + + for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { + pe_resource_t *child = (pe_resource_t *) iter->data; + + for (GListPtr iter2 = child->running_on; iter2 != NULL; iter2 = iter2->next) { + pe_node_t *node = (pe_node_t *) iter2->data; + node_info_t *ni = calloc(1, sizeof(node_info_t)); + ni->node_name = node->details->uname; + ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) && + child->fns->state(child, TRUE) == RSC_ROLE_MASTER; + + retval = g_list_prepend(retval, ni); + } + } + + return retval; +} + GListPtr cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, pe_working_set_t *data_set) { - GListPtr found = NULL; + GListPtr retval = NULL; pe_resource_t *parent = uber_parent(rsc); if (pe_rsc_is_clone(rsc)) { - for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { - GListPtr extra = ((pe_resource_t *) iter->data)->running_on; - if (extra != NULL) { - found = g_list_concat(found, extra); - } - } + retval = build_node_info_list(rsc); /* The anonymous clone children's common ID is supplied */ } else if (pe_rsc_is_clone(parent) @@ -63,18 +80,20 @@ cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *request && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei) && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) { - for (GListPtr iter = parent->children; iter; iter = iter->next) { - GListPtr extra = ((pe_resource_t *) iter->data)->running_on; - if (extra != NULL) { - found = g_list_concat(found, extra); - } - } + retval = build_node_info_list(parent); } else if (rsc->running_on != NULL) { - found = g_list_concat(found, rsc->running_on); + for (GListPtr iter = rsc->running_on; iter != NULL; iter = iter->next) { + pe_node_t *node = (pe_node_t *) iter->data; + node_info_t *ni = calloc(1, sizeof(node_info_t)); + ni->node_name = node->details->uname; + ni->promoted = rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER; + + retval = g_list_prepend(retval, ni); + } } - return found; + return retval; } #define XPATH_MAX 1024 @@ -1788,6 +1807,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, "the force option"); return CRM_EX_UNSAFE; } + + g_list_free_full(nodes, free); } } else { -- 1.8.3.1