diff --git a/attrd/commands.c b/attrd/commands.c
index 18c0523..c6586c7 100644
--- a/attrd/commands.c
+++ b/attrd/commands.c
@@ -832,7 +832,6 @@ attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *u
}
}
done:
- free(name);
if(a && a->changed && election_state(writer) == election_won) {
write_attribute(a);
}
@@ -1019,8 +1018,10 @@ write_attribute(attribute_t *a)
crm_info("Sent update %d with %d changes for %s, id=%s, set=%s",
a->update, cib_updates, a->id, (a->uuid? a->uuid : "<n/a>"), a->set);
- the_cib->cmds->register_callback(
- the_cib, a->update, 120, FALSE, strdup(a->id), "attrd_cib_callback", attrd_cib_callback);
+ the_cib->cmds->register_callback_full(the_cib, a->update, 120, FALSE,
+ strdup(a->id),
+ "attrd_cib_callback",
+ attrd_cib_callback, free);
}
free_xml(xml_top);
}
diff --git a/attrd/legacy.c b/attrd/legacy.c
index 4aae4c4..8a18c38 100644
--- a/attrd/legacy.c
+++ b/attrd/legacy.c
@@ -635,6 +635,20 @@ struct attrd_callback_s {
char *value;
};
+/*
+ * \internal
+ * \brief Free an attrd callback structure
+ */
+static void
+free_attrd_callback(void *user_data)
+{
+ struct attrd_callback_s *data = user_data;
+
+ free(data->attr);
+ free(data->value);
+ free(data);
+}
+
static void
attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
@@ -646,7 +660,7 @@ attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *u
} else if (call_id < 0) {
crm_warn("Update %s=%s failed: %s", data->attr, data->value, pcmk_strerror(call_id));
- goto cleanup;
+ return;
}
switch (rc) {
@@ -674,10 +688,6 @@ attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *u
crm_err("Update %d for %s=%s failed: %s",
call_id, data->attr, data->value, pcmk_strerror(rc));
}
- cleanup:
- free(data->value);
- free(data->attr);
- free(data);
}
void
@@ -749,8 +759,10 @@ attrd_perform_update(attr_hash_entry_t * hash_entry)
if (hash_entry->value != NULL) {
data->value = strdup(hash_entry->value);
}
- cib_conn->cmds->register_callback(cib_conn, rc, 120, FALSE, data, "attrd_cib_callback",
- attrd_cib_callback);
+ cib_conn->cmds->register_callback_full(cib_conn, rc, 120, FALSE, data,
+ "attrd_cib_callback",
+ attrd_cib_callback,
+ free_attrd_callback);
return;
}
diff --git a/bumplibs.sh b/bumplibs.sh
index 68f2f58..2044efa 100755
--- a/bumplibs.sh
+++ b/bumplibs.sh
@@ -3,6 +3,7 @@
declare -A headers
headers[crmcommon]="include/crm/common include/crm/crm.h"
headers[crmcluster]="include/crm/cluster.h"
+headers[crmservice]="include/crm/services.h"
headers[transitioner]="include/crm/transition.h"
headers[cib]="include/crm/cib.h include/crm/cib/util.h"
headers[pe_rules]="include/crm/pengine/rules.h"
@@ -11,8 +12,17 @@ headers[pengine]="include/crm/pengine/common.h include/crm/pengine/complex.h i
headers[stonithd]="include/crm/stonith-ng.h"
headers[lrmd]="include/crm/lrmd.h"
-LAST_RELEASE=`test -e /Volumes || git tag -l | grep Pacemaker | grep -v rc | sort -Vr | head -n 1`
-for lib in crmcommon crmcluster transitioner cib pe_rules pe_status stonithd pengine lrmd; do
+if [ ! -z $1 ]; then
+ LAST_RELEASE=$1
+else
+ LAST_RELEASE=`test -e /Volumes || git tag -l | grep Pacemaker | grep -v rc | sort -Vr | head -n 1`
+fi
+libs=$(find . -name "*.am" -exec grep "lib.*_la_LDFLAGS.*version-info" \{\} \; | sed -e s/_la_LDFLAGS.*// -e s/^lib//)
+for lib in $libs; do
+ if [ -z "${headers[$lib]}" ]; then
+ echo "Unknown headers for lib$lib"
+ exit 0
+ fi
git diff -w $LAST_RELEASE..HEAD ${headers[$lib]}
echo ""
@@ -27,6 +37,7 @@ for lib in crmcommon crmcluster transitioner cib pe_rules pe_status stonithd pen
fi
sources=`grep "lib${lib}_la_SOURCES" $am | sed s/.*=// | sed 's:$(top_builddir)/::' | sed 's:$(top_srcdir)/::' | sed 's:\\\::' | sed 's:$(libpe_rules_la_SOURCES):rules.c\ common.c:'`
+
full_sources=""
for f in $sources; do
if
@@ -48,6 +59,11 @@ for lib in crmcommon crmcluster transitioner cib pe_rules pe_status stonithd pen
echo ""
echo "New arguments to functions or changes to the middle of structs are incompatible additions"
echo ""
+ echo "Where possible:"
+ echo "- move new fields to the end of structs"
+ echo "- use bitfields instead of booleans"
+ echo "- when adding arguments, create new functions that the old version can call"
+ echo ""
read -p "Are the changes to lib$lib: [a]dditions, [i]ncompatible additions, [r]emovals or [f]ixes? [None]: " CHANGE
git show $LAST_RELEASE:$am | grep version-info
diff --git a/cib/callbacks.c b/cib/callbacks.c
index 1452ded..28844b8 100644
--- a/cib/callbacks.c
+++ b/cib/callbacks.c
@@ -1570,7 +1570,7 @@ static gboolean
cib_force_exit(gpointer data)
{
crm_notice("Forcing exit!");
- terminate_cib(__FUNCTION__, TRUE);
+ terminate_cib(__FUNCTION__, -1);
return FALSE;
}
@@ -1656,7 +1656,7 @@ initiate_exit(void)
active = crm_active_peers();
if (active < 2) {
- terminate_cib(__FUNCTION__, FALSE);
+ terminate_cib(__FUNCTION__, 0);
return;
}
@@ -1675,9 +1675,19 @@ initiate_exit(void)
extern int remote_fd;
extern int remote_tls_fd;
+/*
+ * \internal
+ * \brief Close remote sockets, free the global CIB and quit
+ *
+ * \param[in] caller Name of calling function (for log message)
+ * \param[in] fast If 1, skip disconnect; if -1, also exit error
+ */
void
-terminate_cib(const char *caller, gboolean fast)
+terminate_cib(const char *caller, int fast)
{
+ crm_info("%s: Exiting%s...", caller,
+ (fast < 0)? " fast" : mainloop ? " from mainloop" : "");
+
if (remote_fd > 0) {
close(remote_fd);
remote_fd = 0;
@@ -1687,27 +1697,29 @@ terminate_cib(const char *caller, gboolean fast)
remote_tls_fd = 0;
}
- if (!fast) {
- crm_info("%s: Disconnecting from cluster infrastructure", caller);
- crm_cluster_disconnect(&crm_cluster);
- }
-
uninitializeCib();
- crm_info("%s: Exiting%s...", caller, fast ? " fast" : mainloop ? " from mainloop" : "");
+ if (fast < 0) {
+ /* Quit fast on error */
+ cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm);
+ crm_exit(EINVAL);
- if (fast == FALSE && mainloop != NULL && g_main_is_running(mainloop)) {
+ } else if ((mainloop != NULL) && g_main_is_running(mainloop)) {
+ /* Quit via returning from the main loop. If fast == 1, we skip the
+ * disconnect here, and it will be done when the main loop returns
+ * (this allows the peer status callback to avoid messing with the
+ * peer caches).
+ */
+ if (fast == 0) {
+ crm_cluster_disconnect(&crm_cluster);
+ }
g_main_quit(mainloop);
} else {
- qb_ipcs_destroy(ipcs_ro);
- qb_ipcs_destroy(ipcs_rw);
- qb_ipcs_destroy(ipcs_shm);
-
- if (fast) {
- crm_exit(EINVAL);
- } else {
- crm_exit(pcmk_ok);
- }
+ /* Quit via clean exit. Even the peer status callback can disconnect
+ * here, because we're not returning control to the caller. */
+ crm_cluster_disconnect(&crm_cluster);
+ cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm);
+ crm_exit(pcmk_ok);
}
}
diff --git a/cib/callbacks.h b/cib/callbacks.h
index bca9992..a49428e 100644
--- a/cib/callbacks.h
+++ b/cib/callbacks.h
@@ -71,7 +71,7 @@ extern void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op
void cib_shutdown(int nsig);
void initiate_exit(void);
-void terminate_cib(const char *caller, gboolean fast);
+void terminate_cib(const char *caller, int fast);
extern gboolean cib_legacy_mode(void);
diff --git a/cib/main.c b/cib/main.c
index e20a2b6..cbaf7b5 100644
--- a/cib/main.c
+++ b/cib/main.c
@@ -71,8 +71,6 @@ gboolean cib_register_ha(ll_cluster_t * hb_cluster, const char *client_name);
void *hb_conn = NULL;
#endif
-extern void terminate_cib(const char *caller, gboolean fast);
-
GMainLoop *mainloop = NULL;
const char *cib_root = NULL;
char *cib_our_uname = NULL;
@@ -414,7 +412,7 @@ cib_cs_destroy(gpointer user_data)
crm_info("Corosync disconnection complete");
} else {
crm_err("Corosync connection lost! Exiting.");
- terminate_cib(__FUNCTION__, TRUE);
+ terminate_cib(__FUNCTION__, -1);
}
}
#endif
@@ -422,30 +420,29 @@ cib_cs_destroy(gpointer user_data)
static void
cib_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
{
- if ((type == crm_status_processes) && legacy_mode
- && is_not_set(node->processes, crm_get_cluster_proc())) {
- uint32_t old = 0;
-
- if (data) {
- old = *(const uint32_t *)data;
- }
+ switch (type) {
+ case crm_status_processes:
+ if (legacy_mode && is_not_set(node->processes, crm_get_cluster_proc())) {
+ uint32_t old = data? *(const uint32_t *)data : 0;
+
+ if ((node->processes ^ old) & crm_proc_cpg) {
+ crm_info("Attempting to disable legacy mode after %s left the cluster",
+ node->uname);
+ legacy_mode = FALSE;
+ }
+ }
+ break;
- if ((node->processes ^ old) & crm_proc_cpg) {
- crm_info("Attempting to disable legacy mode after %s left the cluster", node->uname);
- legacy_mode = FALSE;
- }
- }
+ case crm_status_uname:
+ case crm_status_rstate:
+ case crm_status_nstate:
+ if (cib_shutdown_flag && (crm_active_peers() < 2)
+ && crm_hash_table_size(client_connections) == 0) {
- if (cib_shutdown_flag && crm_active_peers() < 2 && crm_hash_table_size(client_connections) == 0) {
- crm_info("No more peers");
- /* @TODO
- * terminate_cib() calls crm_cluster_disconnect() which calls
- * crm_peer_destroy() which destroys the peer caches, which a peer
- * status callback shouldn't do. For now, there is a workaround in
- * crm_update_peer_proc(), but CIB should be refactored to avoid
- * destroying the peer caches here.
- */
- terminate_cib(__FUNCTION__, FALSE);
+ crm_info("No more peers");
+ terminate_cib(__FUNCTION__, 1);
+ }
+ break;
}
}
@@ -455,10 +452,10 @@ cib_ha_connection_destroy(gpointer user_data)
{
if (cib_shutdown_flag) {
crm_info("Heartbeat disconnection complete... exiting");
- terminate_cib(__FUNCTION__, FALSE);
+ terminate_cib(__FUNCTION__, 0);
} else {
crm_err("Heartbeat connection lost! Exiting.");
- terminate_cib(__FUNCTION__, TRUE);
+ terminate_cib(__FUNCTION__, -1);
}
}
#endif
@@ -541,8 +538,12 @@ cib_init(void)
/* Create the mainloop and run it... */
mainloop = g_main_new(FALSE);
crm_info("Starting %s mainloop", crm_system_name);
-
g_main_run(mainloop);
+
+ /* If main loop returned, clean up and exit. We disconnect in case
+ * terminate_cib() was called with fast=1.
+ */
+ crm_cluster_disconnect(&crm_cluster);
cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm);
return crm_exit(pcmk_ok);
diff --git a/cib/messages.c b/cib/messages.c
index 363562c..eca63b9 100644
--- a/cib/messages.c
+++ b/cib/messages.c
@@ -87,7 +87,7 @@ cib_process_shutdown_req(const char *op, int options, const char *section, xmlNo
} else if (cib_shutdown_flag) {
crm_info("Shutdown ACK from %s", host);
- terminate_cib(__FUNCTION__, FALSE);
+ terminate_cib(__FUNCTION__, 0);
return pcmk_ok;
} else {
diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h
index 78ccad2..78214bf 100644
--- a/crmd/crmd_utils.h
+++ b/crmd/crmd_utils.h
@@ -102,11 +102,14 @@ gboolean too_many_st_failures(void);
void st_fail_count_reset(const char * target);
void crmd_peer_down(crm_node_t *peer, bool full);
+/* Convenience macro for registering a CIB callback
+ * (assumes that data can be freed with free())
+ */
# define fsa_register_cib_callback(id, flag, data, fn) do { \
CRM_ASSERT(fsa_cib_conn); \
- fsa_cib_conn->cmds->register_callback( \
+ fsa_cib_conn->cmds->register_callback_full( \
fsa_cib_conn, id, 10 * (1 + crm_active_peers()), \
- flag, data, #fn, fn); \
+ flag, data, #fn, fn, free); \
} while(0)
# define start_transition(state) do { \
diff --git a/crmd/join_client.c b/crmd/join_client.c
index 286cd92..65e3bed 100644
--- a/crmd/join_client.c
+++ b/crmd/join_client.c
@@ -116,8 +116,8 @@ do_cl_join_offer_respond(long long action,
/* we only ever want the last one */
if (query_call_id > 0) {
- /* Calling remove_cib_op_callback() would result in a memory leak of the data field */
crm_trace("Cancelling previous join query: %d", query_call_id);
+ remove_cib_op_callback(query_call_id, FALSE);
query_call_id = 0;
}
@@ -173,7 +173,6 @@ join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *
done:
free_xml(generation);
- free(join_id);
}
/* A_CL_JOIN_RESULT */
diff --git a/crmd/join_dc.c b/crmd/join_dc.c
index f777296..5280b6e 100644
--- a/crmd/join_dc.c
+++ b/crmd/join_dc.c
@@ -452,8 +452,6 @@ finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, voi
crm_debug("No longer the DC in S_FINALIZE_JOIN: %s/%s",
AM_I_DC ? "DC" : "CRMd", fsa_state2string(fsa_state));
}
-
- free(user_data);
}
static void
diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c
index 162ad03..c03fa0b 100644
--- a/crmd/lrm_state.c
+++ b/crmd/lrm_state.c
@@ -490,7 +490,7 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg)
if (remote_proxy_new(lrm_state->node_name, session, channel) == NULL) {
remote_proxy_notify_destroy(lrmd, session);
}
- crm_info("new remote proxy client established to %s, session id %s", channel, session);
+ crm_trace("new remote proxy client established to %s, session id %s", channel, session);
} else if (safe_str_eq(op, "destroy")) {
remote_proxy_end_session(session);
@@ -534,7 +534,16 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg)
}
} else if(is_set(flags, crm_ipc_proxied)) {
- int rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL);
+ const char *type = crm_element_value(request, F_TYPE);
+ int rc = 0;
+
+ if (safe_str_eq(type, T_ATTRD)
+ && crm_element_value(request, F_ATTRD_HOST) == NULL) {
+ crm_xml_add(request, F_ATTRD_HOST, proxy->node_name);
+ crm_xml_add_int(request, F_ATTRD_HOST_ID, get_local_nodeid(0));
+ }
+
+ rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL);
if(rc < 0) {
xmlNode *op_reply = create_xml_node(NULL, "nack");
diff --git a/crmd/membership.c b/crmd/membership.c
index 447e6a8..27ae710 100644
--- a/crmd/membership.c
+++ b/crmd/membership.c
@@ -200,7 +200,6 @@ remove_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE,
"Deletion of the unknown conflicting node \"%s\": %s (rc=%d)",
node_uuid, pcmk_strerror(rc), rc);
- free(node_uuid);
}
static void
@@ -215,11 +214,9 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
crm_notice("Searching conflicting nodes for %s failed: %s (%d)",
new_node_uuid, pcmk_strerror(rc), rc);
}
- free(new_node_uuid);
return;
} else if (output == NULL) {
- free(new_node_uuid);
return;
}
@@ -283,8 +280,6 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
free_xml(node_state_xml);
}
}
-
- free(new_node_uuid);
}
static void
diff --git a/crmd/pengine.c b/crmd/pengine.c
index c9544a9..46df648 100644
--- a/crmd/pengine.c
+++ b/crmd/pengine.c
@@ -77,8 +77,6 @@ save_cib_contents(xmlNode * msg, int call_id, int rc, xmlNode * output, void *us
free(filename);
}
-
- free(id);
}
static void
@@ -320,9 +318,10 @@ do_pe_invoke_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void
crm_debug("Discarding PE request in state: %s", fsa_state2string(fsa_state));
return;
- } else if (num_cib_op_callbacks() != 0) {
- crm_debug("Re-asking for the CIB: %d peer updates still pending", num_cib_op_callbacks());
-
+ /* this callback counts as 1 */
+ } else if (num_cib_op_callbacks() > 1) {
+ crm_debug("Re-asking for the CIB: %d other peer updates still pending",
+ (num_cib_op_callbacks() - 1));
sleep(1);
register_fsa_action(A_PE_INVOKE);
return;
diff --git a/crmd/te_callbacks.c b/crmd/te_callbacks.c
index 68742c2..c22b273 100644
--- a/crmd/te_callbacks.c
+++ b/crmd/te_callbacks.c
@@ -294,6 +294,49 @@ static char *get_node_from_xpath(const char *xpath)
return nodeid;
}
+static char *extract_node_uuid(const char *xpath)
+{
+ char *mutable_path = strdup(xpath);
+ char *node_uuid = NULL;
+ char *search = NULL;
+ char *match = NULL;
+
+ match = strstr(mutable_path, "node_state[@id=\'") + strlen("node_state[@id=\'");
+ search = strchr(match, '\'');
+ search[0] = 0;
+
+ node_uuid = strdup(match);
+ free(mutable_path);
+ return node_uuid;
+}
+
+static void abort_unless_down(const char *xpath, const char *op, xmlNode *change, const char *reason)
+{
+ char *node_uuid = NULL;
+ crm_action_t *down = NULL;
+
+ if(safe_str_neq(op, "delete")) {
+ abort_transition(INFINITY, tg_restart, reason, change);
+ return;
+ }
+
+ node_uuid = extract_node_uuid(xpath);
+ if(node_uuid == NULL) {
+ crm_err("Could not extract node ID from %s", xpath);
+ abort_transition(INFINITY, tg_restart, reason, change);
+ return;
+ }
+
+ down = match_down_event(0, node_uuid, NULL, FALSE);
+ if(down == NULL || down->executed == false) {
+ crm_trace("Not expecting %s to be down (%s)", node_uuid, xpath);
+ abort_transition(INFINITY, tg_restart, reason, change);
+ } else {
+ crm_trace("Expecting changes to %s (%s)", node_uuid, xpath);
+ }
+ free(node_uuid);
+}
+
void
te_update_diff(const char *event, xmlNode * msg)
{
@@ -388,27 +431,22 @@ te_update_diff(const char *event, xmlNode * msg)
break; /* Wont be packaged with any resource operations we may be waiting for */
} else if(strstr(xpath, "/"XML_TAG_TRANSIENT_NODEATTRS"[") || safe_str_eq(name, XML_TAG_TRANSIENT_NODEATTRS)) {
- abort_transition(INFINITY, tg_restart, "Transient attribute change", change);
+ abort_unless_down(xpath, op, change, "Transient attribute change");
break; /* Wont be packaged with any resource operations we may be waiting for */
} else if(strstr(xpath, "/"XML_LRM_TAG_RSC_OP"[") && safe_str_eq(op, "delete")) {
crm_action_t *cancel = NULL;
char *mutable_key = strdup(xpath);
- char *mutable_node = strdup(xpath);
char *search = NULL;
const char *key = NULL;
- const char *node_uuid = NULL;
+ char *node_uuid = extract_node_uuid(xpath);
search = strrchr(mutable_key, '\'');
search[0] = 0;
key = strrchr(mutable_key, '\'') + 1;
- node_uuid = strstr(mutable_node, "node_state[@id=\'") + strlen("node_state[@id=\'");
- search = strchr(node_uuid, '\'');
- search[0] = 0;
-
cancel = get_cancel_action(key, node_uuid);
if (cancel == NULL) {
abort_transition(INFINITY, tg_restart, "Resource operation removal", change);
@@ -422,14 +460,14 @@ te_update_diff(const char *event, xmlNode * msg)
trigger_graph();
}
- free(mutable_node);
free(mutable_key);
+ free(node_uuid);
} else if(strstr(xpath, "/"XML_CIB_TAG_LRM"[") && safe_str_eq(op, "delete")) {
- abort_transition(INFINITY, tg_restart, "Resource state removal", change);
+ abort_unless_down(xpath, op, change, "Resource state removal");
} else if(strstr(xpath, "/"XML_CIB_TAG_STATE"[") && safe_str_eq(op, "delete")) {
- abort_transition(INFINITY, tg_restart, "Node state removal", change);
+ abort_unless_down(xpath, op, change, "Node state removal");
} else if(name == NULL) {
crm_debug("No result for %s operation to %s", op, xpath);
@@ -717,7 +755,6 @@ cib_fencing_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *
} else {
crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data);
}
- free(user_data);
}
void
diff --git a/crmd/utils.c b/crmd/utils.c
index 5ca4b9d..4fe3a49 100644
--- a/crmd/utils.c
+++ b/crmd/utils.c
@@ -999,7 +999,6 @@ erase_xpath_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void
do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE,
"Deletion of \"%s\": %s (rc=%d)", xpath, pcmk_strerror(rc), rc);
- free(xpath);
}
void
diff --git a/cts/CIB.py b/cts/CIB.py
index 82d02d7..8fbba6c 100644
--- a/cts/CIB.py
+++ b/cts/CIB.py
@@ -105,7 +105,7 @@ class CIB11(ConfigBase):
if not name:
name = "r%s%d" % (self.CM.Env["IPagent"], self.counter)
self.counter = self.counter + 1
- r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
+ r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
r.add_op("monitor", "5s")
return r
@@ -387,7 +387,7 @@ class ConfigFactory:
"""register a constructor"""
_args = [constructor]
_args.extend(args)
- setattr(self, methodName, apply(ConfigFactoryItem,_args, kargs))
+ setattr(self, methodName, ConfigFactoryItem(*_args, **kargs))
def unregister(self, methodName):
"""unregister a constructor"""
@@ -415,7 +415,6 @@ class ConfigFactory:
class ConfigFactoryItem:
def __init__(self, function, *args, **kargs):
- assert callable(function), "function should be a callable obj"
self._function = function
self._args = args
self._kargs = kargs
@@ -426,7 +425,7 @@ class ConfigFactoryItem:
_args.extend(args)
_kargs = self._kargs.copy()
_kargs.update(kargs)
- return apply(self._function,_args,_kargs)
+ return self._function(*_args,**_kargs)
# Basic Sanity Testing
if __name__ == '__main__':
@@ -449,4 +448,4 @@ if __name__ == '__main__':
CibFactory = ConfigFactory(manager)
cib = CibFactory.createConfig("pacemaker-1.1")
- print cib.contents()
+ print(cib.contents())
diff --git a/cts/CM_ais.py b/cts/CM_ais.py
index a34f9b1..d2e2c1f 100644
--- a/cts/CM_ais.py
+++ b/cts/CM_ais.py
@@ -80,7 +80,7 @@ class crm_ais(crm_lha):
# Processes running under valgrind can't be shot with "killall -9 processname",
# so don't include them in the returned list
vgrind = self.Env["valgrind-procs"].split()
- for key in self.fullcomplist.keys():
+ for key in list(self.fullcomplist.keys()):
if self.Env["valgrind-tests"]:
if key in vgrind:
self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
diff --git a/cts/CM_lha.py b/cts/CM_lha.py
index b192272..28742d9 100755
--- a/cts/CM_lha.py
+++ b/cts/CM_lha.py
@@ -92,7 +92,7 @@ class crm_lha(ClusterManager):
self.log("Node %s is not up." % node)
return None
- if not self.CIBsync.has_key(node) and self.Env["ClobberCIB"] == 1:
+ if not node in self.CIBsync and self.Env["ClobberCIB"] == 1:
self.CIBsync[node] = 1
self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
diff --git a/cts/CTS.py b/cts/CTS.py
index 9f9a291..634348a 100644
--- a/cts/CTS.py
+++ b/cts/CTS.py
@@ -69,7 +69,7 @@ function status() {
function start() {
# Is it already running?
if
- status
+ status
then
return
fi
@@ -94,20 +94,20 @@ case $action in
nohup $0 $f start >/dev/null 2>&1 </dev/null &
;;
stop)
- killpid
- ;;
+ killpid
+ ;;
delete)
- killpid
- rm -f $f
- ;;
+ killpid
+ rm -f $f
+ ;;
mark)
- uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
- echo " $*" >> $f
+ uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
+ echo " $*" >> $f
start
- ;;
+ ;;
*)
- echo "Unknown action: $action."
- ;;
+ echo "Unknown action: $action."
+ ;;
esac
"""
@@ -157,7 +157,7 @@ class CtsLab:
self.Env.dump()
def has_key(self, key):
- return self.Env.has_key(key)
+ return key in self.Env.keys()
def __getitem__(self, key):
return self.Env[key]
@@ -275,7 +275,7 @@ class ClusterManager(UserDict):
None
def _finalConditions(self):
- for key in self.keys():
+ for key in list(self.keys()):
if self[key] == None:
raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.")
@@ -299,14 +299,14 @@ class ClusterManager(UserDict):
if key == "Name":
return self.name
- print "FIXME: Getting %s from %s" % (key, repr(self))
- if self.data.has_key(key):
+ print("FIXME: Getting %s from %s" % (key, repr(self)))
+ if key in self.data:
return self.data[key]
return self.templates.get_patterns(self.Env["Name"], key)
def __setitem__(self, key, value):
- print "FIXME: Setting %s=%s on %s" % (key, value, repr(self))
+ print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
self.data[key] = value
def key_for_node(self, node):
@@ -333,7 +333,7 @@ class ClusterManager(UserDict):
def prepare(self):
'''Finish the Initialization process. Prepare to test...'''
- print repr(self)+"prepare"
+ print(repr(self)+"prepare")
for node in self.Env["nodes"]:
if self.StataCM(node):
self.ShouldBeStatus[node] = "up"
@@ -387,11 +387,11 @@ class ClusterManager(UserDict):
return None
if not self.templates["Pat:Fencing_start"]:
- print "No start pattern"
+ print("No start pattern")
return None
if not self.templates["Pat:Fencing_ok"]:
- print "No ok pattern"
+ print("No ok pattern")
return None
stonith = None
@@ -500,7 +500,7 @@ class ClusterManager(UserDict):
else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
ret = 1
- if not self.ShouldBeStatus.has_key(node):
+ if not node in self.ShouldBeStatus:
self.ShouldBeStatus[node] = "down"
if self.ShouldBeStatus[node] != "down":
@@ -871,13 +871,13 @@ class ClusterManager(UserDict):
for host in self.Env["nodes"]:
log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
- if has_log_stats.has_key(host):
+ if host in has_log_stats:
self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
(rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2)
self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host)
- print "Extracted stats: %s" % fname
+ print("Extracted stats: %s" % fname)
fd = open(fname, "a")
fd.writelines(lines)
fd.close()
@@ -891,7 +891,7 @@ class ClusterManager(UserDict):
for host in self.Env["nodes"]:
log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
- if not has_log_stats.has_key(host):
+ if not host in has_log_stats:
global log_stats
global log_stats_bin
@@ -986,7 +986,7 @@ class Process(Component):
self.CM = cm
self.badnews_ignore = badnews_ignore
self.badnews_ignore.extend(common_ignore)
- self.triggersreboot = triggersreboot
+ self.triggersreboot = triggersreboot
if process:
self.proc = str(process)
diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py
index 8d52062..e8663f2 100755
--- a/cts/CTSaudits.py
+++ b/cts/CTSaudits.py
@@ -108,7 +108,7 @@ class LogAudit(ClusterAudit):
self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node))
for k in self.kinds:
- if watch.has_key(k):
+ if k in watch:
w = watch[k]
if watch_pref == "any": self.CM.log("Testing for %s logs" % (k))
w.lookforall(silent=True)
@@ -118,7 +118,7 @@ class LogAudit(ClusterAudit):
self.CM.Env["LogWatcher"] = w.kind
return 1
- for k in watch.keys():
+ for k in list(watch.keys()):
w = watch[k]
if w.unmatched:
for regex in w.unmatched:
@@ -226,7 +226,7 @@ class FileAudit(ClusterAudit):
self.known.append(line)
self.CM.log("Warning: Corosync core file on %s: %s" % (node, line))
- if self.CM.ShouldBeStatus.has_key(node) and self.CM.ShouldBeStatus[node] == "down":
+ if node in self.CM.ShouldBeStatus and self.CM.ShouldBeStatus[node] == "down":
clean = 0
(rc, lsout) = self.CM.rsh(node, "ls -al /dev/shm | grep qb-", None)
for line in lsout:
@@ -532,7 +532,7 @@ class CrmdStateAudit(ClusterAudit):
, "auditfail":0}
def has_key(self, key):
- return self.Stats.has_key(key)
+ return key in self.Stats
def __setitem__(self, key, value):
self.Stats[key] = value
@@ -542,7 +542,7 @@ class CrmdStateAudit(ClusterAudit):
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
- if not self.Stats.has_key(name):
+ if not name in self.Stats:
self.Stats[name] = 0
self.Stats[name] = self.Stats[name]+1
@@ -601,7 +601,7 @@ class CIBAudit(ClusterAudit):
, "auditfail":0}
def has_key(self, key):
- return self.Stats.has_key(key)
+ return key in self.Stats
def __setitem__(self, key, value):
self.Stats[key] = value
@@ -611,7 +611,7 @@ class CIBAudit(ClusterAudit):
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
- if not self.Stats.has_key(name):
+ if not name in self.Stats:
self.Stats[name] = 0
self.Stats[name] = self.Stats[name]+1
@@ -726,7 +726,7 @@ class PartitionAudit(ClusterAudit):
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
- if not self.Stats.has_key(name):
+ if not name in self.Stats:
self.Stats[name] = 0
self.Stats[name] = self.Stats[name]+1
diff --git a/cts/CTSscenarios.py b/cts/CTSscenarios.py
index 2f3a69b..cc6e67e 100644
--- a/cts/CTSscenarios.py
+++ b/cts/CTSscenarios.py
@@ -124,7 +124,7 @@ A partially set up scenario is torn down if it fails during setup.
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
- if not self.Stats.has_key(name):
+ if not name in self.Stats:
self.Stats[name] = 0
self.Stats[name] = self.Stats[name]+1
@@ -176,7 +176,7 @@ A partially set up scenario is torn down if it fails during setup.
elapsed_time = stoptime - starttime
test_time = stoptime - test.get_timer()
- if not test.has_key("min_time"):
+ if not test["min_time"]:
test["elapsed_time"] = elapsed_time
test["min_time"] = test_time
test["max_time"] = test_time
@@ -211,7 +211,7 @@ A partially set up scenario is torn down if it fails during setup.
}
self.ClusterManager.log("Test Summary")
for test in self.Tests:
- for key in stat_filter.keys():
+ for key in list(stat_filter.keys()):
stat_filter[key] = test.Stats[key]
self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
@@ -387,7 +387,7 @@ According to the manual page for ping:
'''Start the PingFest!'''
self.PingSize = 1024
- if CM.Env.has_key("PingSize"):
+ if "PingSize" in CM.Env.keys():
self.PingSize = CM.Env["PingSize"]
CM.log("Starting %d byte flood pings" % self.PingSize)
@@ -550,7 +550,7 @@ Test a rolling upgrade between two versions of the stack
return self.install(node, self.CM.Env["previous-version"])
def SetUp(self, CM):
- print repr(self)+"prepare"
+ print(repr(self)+"prepare")
CM.prepare()
# Clear out the cobwebs
diff --git a/cts/CTStests.py b/cts/CTStests.py
index f817004..00fcd13 100644
--- a/cts/CTStests.py
+++ b/cts/CTStests.py
@@ -97,13 +97,18 @@ class CTSTest:
self.logger.debug(args)
def has_key(self, key):
- return self.Stats.has_key(key)
+ return key in self.Stats
def __setitem__(self, key, value):
self.Stats[key] = value
def __getitem__(self, key):
- return self.Stats[key]
+ if str(key) == "0":
+ raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead")
+
+ if key in self.Stats:
+ return self.Stats[key]
+ return None
def log_mark(self, msg):
self.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
@@ -128,7 +133,7 @@ class CTSTest:
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
- if not self.Stats.has_key(name):
+ if not name in self.Stats:
self.Stats[name] = 0
self.Stats[name] = self.Stats[name]+1
@@ -534,7 +539,7 @@ class StonithdTest(CTSTest):
if not self.is_applicable_common():
return 0
- if self.Env.has_key("DoFencing"):
+ if "DoFencing" in self.Env.keys():
return self.Env["DoFencing"]
return 1
@@ -1048,7 +1053,7 @@ class BandwidthTest(CTSTest):
T1 = linesplit[0]
timesplit = string.split(T1,":")
time2split = string.split(timesplit[2],".")
- time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
+ time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
break
while count < 100:
@@ -1070,7 +1075,7 @@ class BandwidthTest(CTSTest):
T2 = linessplit[0]
timesplit = string.split(T2,":")
time2split = string.split(timesplit[2],".")
- time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
+ time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
time = time2-time1
if (time <= 0):
return 0
@@ -1105,7 +1110,7 @@ class MaintenanceMode(CTSTest):
# fail the resource right after turning Maintenance mode on
# verify it is not recovered until maintenance mode is turned off
if action == "On":
- pats.append("pengine.*: warning:.* Processing failed op %s for %s on" % (self.action, self.rid))
+ pats.append(r"pengine.*:\s+warning:.*Processing failed op %s for %s on" % (self.action, self.rid))
else:
pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0"))
pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "start_0"))
@@ -1314,7 +1319,7 @@ class ResourceRecover(CTSTest):
self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id))
pats = []
- pats.append(r"pengine.*: warning:.* Processing failed op %s for (%s|%s) on" % (self.action,
+ pats.append(r"pengine.*:\s+warning:.*Processing failed op %s for (%s|%s) on" % (self.action,
rsc.id, rsc.clone_id))
if rsc.managed():
@@ -1574,7 +1579,7 @@ class SplitBrainTest(CTSTest):
p_max = len(self.Env["nodes"])
for node in self.Env["nodes"]:
p = self.Env.RandomGen.randint(1, p_max)
- if not partitions.has_key(p):
+ if not p in partitions:
partitions[p] = []
partitions[p].append(node)
p_max = len(partitions.keys())
@@ -1583,13 +1588,13 @@ class SplitBrainTest(CTSTest):
# else, try again
self.debug("Created %d partitions" % p_max)
- for key in partitions.keys():
+ for key in list(partitions.keys()):
self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
# Disabling STONITH to reduce test complexity for now
self.rsh(node, "crm_attribute -V -n stonith-enabled -v false")
- for key in partitions.keys():
+ for key in list(partitions.keys()):
self.isolate_partition(partitions[key])
count = 30
@@ -1612,7 +1617,7 @@ class SplitBrainTest(CTSTest):
self.CM.partitions_expected = 1
# And heal them again
- for key in partitions.keys():
+ for key in list(partitions.keys()):
self.heal_partition(partitions[key])
# Wait for a single partition to form
@@ -2247,11 +2252,11 @@ class RollingUpgradeTest(CTSTest):
if not self.is_applicable_common():
return None
- if not self.Env.has_key("rpm-dir"):
+ if not "rpm-dir" in self.Env.keys():
return None
- if not self.Env.has_key("current-version"):
+ if not "current-version" in self.Env.keys():
return None
- if not self.Env.has_key("previous-version"):
+ if not "previous-version" in self.Env.keys():
return None
return 1
@@ -2305,7 +2310,7 @@ class BSC_AddResource(CTSTest):
if ":" in ip:
fields = ip.rpartition(":")
fields[2] = str(hex(int(fields[2], 16)+1))
- print str(hex(int(f[2], 16)+1))
+ print(str(hex(int(f[2], 16)+1)))
else:
fields = ip.rpartition('.')
fields[2] = str(int(fields[2])+1)
@@ -3109,7 +3114,7 @@ class RemoteStonithd(CTSTest):
if not self.driver.is_applicable():
return False
- if self.Env.has_key("DoFencing"):
+ if "DoFencing" in self.Env.keys():
return self.Env["DoFencing"]
return True
diff --git a/cts/OCFIPraTest.py b/cts/OCFIPraTest.py
index 9900a62..03d964b 100755
--- a/cts/OCFIPraTest.py
+++ b/cts/OCFIPraTest.py
@@ -28,13 +28,13 @@ from cts.CTSvars import *
def usage():
- print "usage: " + sys.argv[0] \
+ print("usage: " + sys.argv[0] \
+ " [-2]"\
+ " [--ipbase|-i first-test-ip]"\
+ " [--ipnum|-n test-ip-num]"\
+ " [--help|-h]"\
+ " [--perform|-p op]"\
- + " [number-of-iterations]"
+ + " [number-of-iterations]")
sys.exit(1)
@@ -71,7 +71,7 @@ def log(towrite):
t = time.strftime("%Y/%m/%d_%H:%M:%S\t", time.localtime(time.time()))
logstr = t + " "+str(towrite)
syslog.syslog(logstr)
- print logstr
+ print(logstr)
if __name__ == '__main__':
ra = "IPaddr"
diff --git a/cts/cib_xml.py b/cts/cib_xml.py
index 0bd963b..3d8f8d4 100644
--- a/cts/cib_xml.py
+++ b/cts/cib_xml.py
@@ -19,7 +19,7 @@ class XmlBase(CibBase):
text = '''<%s''' % self.tag
if self.name:
text += ''' id="%s"''' % (self.name)
- for k in self.kwargs.keys():
+ for k in list(self.kwargs.keys()):
text += ''' %s="%s"''' % (k, self.kwargs[k])
if not self.children:
@@ -149,22 +149,22 @@ class Resource(XmlBase):
def constraints(self):
text = "<constraints>"
- for k in self.scores.keys():
+ for k in list(self.scores.keys()):
text += '''<rsc_location id="prefer-%s" rsc="%s">''' % (k, self.name)
text += self.scores[k].show()
text += '''</rsc_location>'''
- for k in self.needs.keys():
+ for k in list(self.needs.keys()):
text += '''<rsc_order id="%s-after-%s" first="%s" then="%s"''' % (self.name, k, k, self.name)
kargs = self.needs[k]
- for kw in kargs.keys():
+ for kw in list(kargs.keys()):
text += ''' %s="%s"''' % (kw, kargs[kw])
text += '''/>'''
- for k in self.coloc.keys():
+ for k in list(self.coloc.keys()):
text += '''<rsc_colocation id="%s-with-%s" rsc="%s" with-rsc="%s"''' % (self.name, k, self.name, k)
kargs = self.coloc[k]
- for kw in kargs.keys():
+ for kw in list(kargs.keys()):
text += ''' %s="%s"''' % (kw, kargs[kw])
text += '''/>'''
@@ -179,13 +179,13 @@ class Resource(XmlBase):
if len(self.meta) > 0:
text += '''<meta_attributes id="%s-meta">''' % self.name
- for p in self.meta.keys():
+ for p in list(self.meta.keys()):
text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.meta[p])
text += '''</meta_attributes>'''
if len(self.param) > 0:
text += '''<instance_attributes id="%s-params">''' % self.name
- for p in self.param.keys():
+ for p in list(self.param.keys()):
text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.param[p])
text += '''</instance_attributes>'''
@@ -219,7 +219,7 @@ class Group(Resource):
if len(self.meta) > 0:
text += '''<meta_attributes id="%s-meta">''' % self.name
- for p in self.meta.keys():
+ for p in list(self.meta.keys()):
text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.meta[p])
text += '''</meta_attributes>'''
diff --git a/cts/environment.py b/cts/environment.py
index 61d4211..4ed5ced 100644
--- a/cts/environment.py
+++ b/cts/environment.py
@@ -92,7 +92,7 @@ class Environment:
def dump(self):
keys = []
- for key in self.data.keys():
+ for key in list(self.data.keys()):
keys.append(key)
keys.sort()
@@ -106,16 +106,19 @@ class Environment:
if key == "nodes":
return True
- return self.data.has_key(key)
+ return key in self.data
def __getitem__(self, key):
+ if str(key) == "0":
+ raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
+
if key == "nodes":
return self.Nodes
elif key == "Name":
return self.get_stack_short()
- elif self.data.has_key(key):
+ elif key in self.data:
return self.data[key]
else:
@@ -175,12 +178,12 @@ class Environment:
self.data["Stack"] = "corosync (plugin v0)"
else:
- print "Unknown stack: "+name
+ raise ValueError("Unknown stack: "+name)
sys.exit(1)
def get_stack_short(self):
# Create the Cluster Manager object
- if not self.data.has_key("Stack"):
+ if not "Stack" in self.data:
return "unknown"
elif self.data["Stack"] == "heartbeat":
@@ -202,12 +205,12 @@ class Environment:
return "crm-plugin-v0"
else:
- LogFactory().log("Unknown stack: "+self.data["stack"])
- sys.exit(1)
+ LogFactory().log("Unknown stack: "+self["stack"])
+ raise ValueError("Unknown stack: "+self["stack"])
def detect_syslog(self):
# Detect syslog variant
- if not self.has_key("syslogd"):
+ if not "syslogd" in self.data:
if self["have_systemd"]:
# Systemd
self["syslogd"] = self.rsh(self.target, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", stdout=1).strip()
@@ -215,13 +218,13 @@ class Environment:
# SYS-V
self["syslogd"] = self.rsh(self.target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", stdout=1).strip()
- if not self.has_key("syslogd") or not self["syslogd"]:
+ if not "syslogd" in self.data or not self["syslogd"]:
# default
self["syslogd"] = "rsyslog"
def detect_at_boot(self):
# Detect if the cluster starts at boot
- if not self.has_key("at-boot"):
+ if not "at-boot" in self.data:
atboot = 0
if self["have_systemd"]:
@@ -237,7 +240,7 @@ class Environment:
def detect_ip_offset(self):
# Try to determin an offset for IPaddr resources
- if self["CIBResource"] and not self.has_key("IPBase"):
+ if self["CIBResource"] and not "IPBase" in self.data:
network=self.rsh(self.target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", stdout=1).strip()
self["IPBase"] = self.rsh(self.target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, stdout=1).strip()
if not self["IPBase"]:
@@ -261,7 +264,7 @@ class Environment:
def validate(self):
if len(self["nodes"]) < 1:
- print "No nodes specified!"
+ print("No nodes specified!")
sys.exit(1)
def discover(self):
@@ -276,7 +279,7 @@ class Environment:
break;
self["cts-master"] = master
- if not self.has_key("have_systemd"):
+ if not "have_systemd" in self.data:
self["have_systemd"] = not self.rsh(self.target, "systemctl list-units")
self.detect_syslog()
@@ -390,7 +393,7 @@ class Environment:
self["DoStonith"]=1
self["stonith-type"] = "fence_openstack"
- print "Obtaining OpenStack credentials from the current environment"
+ print("Obtaining OpenStack credentials from the current environment")
self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
os.environ['OS_REGION_NAME'],
os.environ['OS_TENANT_NAME'],
@@ -403,7 +406,7 @@ class Environment:
self["DoStonith"]=1
self["stonith-type"] = "fence_rhevm"
- print "Obtaining RHEV-M credentials from the current environment"
+ print("Obtaining RHEV-M credentials from the current environment")
self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
os.environ['RHEVM_USERNAME'],
os.environ['RHEVM_PASSWORD'],
@@ -442,7 +445,7 @@ class Environment:
try:
float(args[i+1])
except ValueError:
- print ("--xmit-loss parameter should be float")
+ print("--xmit-loss parameter should be float")
self.usage(args[i+1])
skipthis=1
self["XmitLoss"] = args[i+1]
@@ -451,7 +454,7 @@ class Environment:
try:
float(args[i+1])
except ValueError:
- print ("--recv-loss parameter should be float")
+ print("--recv-loss parameter should be float")
self.usage(args[i+1])
skipthis=1
self["RecvLoss"] = args[i+1]
@@ -503,7 +506,7 @@ class Environment:
self["DoStonith"]=1
self["stonith-type"] = "fence_rhevm"
- print "Obtaining RHEV-M credentials from the current environment"
+ print("Obtaining RHEV-M credentials from the current environment")
self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
os.environ['RHEVM_USERNAME'],
os.environ['RHEVM_PASSWORD'],
@@ -605,7 +608,7 @@ class Environment:
skipthis=1
(name, value) = args[i+1].split('=')
self[name] = value
- print "Setting %s = %s" % (name, value)
+ print("Setting %s = %s" % (name, value))
elif args[i] == "--help":
self.usage(args[i], 0)
@@ -622,52 +625,52 @@ class Environment:
def usage(self, arg, status=1):
if status:
- print "Illegal argument %s" % arg
- print "usage: " + sys.argv[0] +" [options] number-of-iterations"
- print "\nCommon options: "
- print "\t [--nodes 'node list'] list of cluster nodes separated by whitespace"
- print "\t [--group | -g 'name'] use the nodes listed in the named DSH group (~/.dsh/groups/$name)"
- print "\t [--limit-nodes max] only use the first 'max' cluster nodes supplied with --nodes"
- print "\t [--stack (v0|v1|cman|corosync|heartbeat|openais)] which cluster stack is installed"
- print "\t [--list-tests] list the valid tests"
- print "\t [--benchmark] add the timing information"
- print "\t "
- print "Options that CTS will usually auto-detect correctly: "
- print "\t [--logfile path] where should the test software look for logs from cluster nodes"
- print "\t [--syslog-facility name] which syslog facility should the test software log to"
- print "\t [--at-boot (1|0)] does the cluster software start at boot time"
- print "\t [--test-ip-base ip] offset for generated IP address resources"
- print "\t "
- print "Options for release testing: "
- print "\t [--populate-resources | -r] generate a sample configuration"
- print "\t [--choose name] run only the named test"
- print "\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]"
- print "\t [--once] run all valid tests once"
- print "\t "
- print "Additional (less common) options: "
- print "\t [--clobber-cib | -c ] erase any existing configuration"
- print "\t [--outputfile path] optional location for the test software to write logs to"
- print "\t [--trunc] truncate logfile before starting"
- print "\t [--xmit-loss lost-rate(0.0-1.0)]"
- print "\t [--recv-loss lost-rate(0.0-1.0)]"
- print "\t [--standby (1 | 0 | yes | no)]"
- print "\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]"
- print "\t [--stonith-type type]"
- print "\t [--stonith-args name=value]"
- print "\t [--bsc]"
- print "\t [--no-loop-tests] dont run looping/time-based tests"
- print "\t [--no-unsafe-tests] dont run tests that are unsafe for use with ocfs2/drbd"
- print "\t [--valgrind-tests] include tests using valgrind"
- print "\t [--experimental-tests] include experimental tests"
- print "\t [--container-tests] include pacemaker_remote tests that run in lxc container resources"
- print "\t [--oprofile 'node list'] list of cluster nodes to run oprofile on]"
- print "\t [--qarsh] use the QARSH backdoor to access nodes instead of SSH"
- print "\t [--docker] Indicates nodes are docker nodes."
- print "\t [--seed random_seed]"
- print "\t [--set option=value]"
- print "\t "
- print "\t Example: "
- print "\t python sys.argv[0] -g virt1 --stack cs -r --stonith ssh --schema pacemaker-1.0 500"
+ print("Illegal argument %s" % arg)
+ print("usage: " + sys.argv[0] +" [options] number-of-iterations")
+ print("\nCommon options: ")
+ print("\t [--nodes 'node list'] list of cluster nodes separated by whitespace")
+ print("\t [--group | -g 'name'] use the nodes listed in the named DSH group (~/.dsh/groups/$name)")
+ print("\t [--limit-nodes max] only use the first 'max' cluster nodes supplied with --nodes")
+ print("\t [--stack (v0|v1|cman|corosync|heartbeat|openais)] which cluster stack is installed")
+ print("\t [--list-tests] list the valid tests")
+ print("\t [--benchmark] add the timing information")
+ print("\t ")
+ print("Options that CTS will usually auto-detect correctly: ")
+ print("\t [--logfile path] where should the test software look for logs from cluster nodes")
+ print("\t [--syslog-facility name] which syslog facility should the test software log to")
+ print("\t [--at-boot (1|0)] does the cluster software start at boot time")
+ print("\t [--test-ip-base ip] offset for generated IP address resources")
+ print("\t ")
+ print("Options for release testing: ")
+ print("\t [--populate-resources | -r] generate a sample configuration")
+ print("\t [--choose name] run only the named test")
+ print("\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]")
+ print("\t [--once] run all valid tests once")
+ print("\t ")
+ print("Additional (less common) options: ")
+ print("\t [--clobber-cib | -c ] erase any existing configuration")
+ print("\t [--outputfile path] optional location for the test software to write logs to")
+ print("\t [--trunc] truncate logfile before starting")
+ print("\t [--xmit-loss lost-rate(0.0-1.0)]")
+ print("\t [--recv-loss lost-rate(0.0-1.0)]")
+ print("\t [--standby (1 | 0 | yes | no)]")
+ print("\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]")
+ print("\t [--stonith-type type]")
+ print("\t [--stonith-args name=value]")
+ print("\t [--bsc]")
+ print("\t [--no-loop-tests] dont run looping/time-based tests")
+ print("\t [--no-unsafe-tests] dont run tests that are unsafe for use with ocfs2/drbd")
+ print("\t [--valgrind-tests] include tests using valgrind")
+ print("\t [--experimental-tests] include experimental tests")
+ print("\t [--container-tests] include pacemaker_remote tests that run in lxc container resources")
+ print("\t [--oprofile 'node list'] list of cluster nodes to run oprofile on]")
+ print("\t [--qarsh] use the QARSH backdoor to access nodes instead of SSH")
+ print("\t [--docker] Indicates nodes are docker nodes.")
+ print("\t [--seed random_seed]")
+ print("\t [--set option=value]")
+ print("\t ")
+ print("\t Example: ")
+ print("\t python sys.argv[0] -g virt1 --stack cs -r --stonith ssh --schema pacemaker-1.0 500")
sys.exit(status)
diff --git a/cts/logging.py b/cts/logging.py
index 8afa611..08da44a 100644
--- a/cts/logging.py
+++ b/cts/logging.py
@@ -22,7 +22,7 @@ Licensed under the GNU GPL.
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-import types, string, sys, time, os
+import string, sys, time, os
class Logger:
TimeFormat = "%b %d %H:%M:%S\t"
@@ -47,7 +47,7 @@ class StdErrLog(Logger):
def __call__(self, lines):
t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
- if isinstance(lines, types.StringType):
+ if isinstance(lines, basestring):
sys.__stderr__.writelines([t, lines, "\n"])
else:
for line in lines:
@@ -71,7 +71,7 @@ class FileLog(Logger):
fd = open(self.logfile, "a")
t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
- if isinstance(lines, types.StringType):
+ if isinstance(lines, basestring):
fd.writelines([t, self.hostname, self.source, lines, "\n"])
else:
for line in lines:
diff --git a/cts/patterns.py b/cts/patterns.py
index 493b690..3cdce2f 100644
--- a/cts/patterns.py
+++ b/cts/patterns.py
@@ -67,9 +67,9 @@ class BasePatterns:
}
def get_component(self, key):
- if self.components.has_key(key):
+ if key in self.components:
return self.components[key]
- print "Unknown component '%s' for %s" % (key, self.name)
+ print("Unknown component '%s' for %s" % (key, self.name))
return []
def get_patterns(self, key):
@@ -87,12 +87,12 @@ class BasePatterns:
def __getitem__(self, key):
if key == "Name":
return self.name
- elif self.commands.has_key(key):
+ elif key in self.commands:
return self.commands[key]
- elif self.search.has_key(key):
+ elif key in self.search:
return self.search[key]
else:
- print "Unknown template '%s' for %s" % (key, self.name)
+ print("Unknown template '%s' for %s" % (key, self.name))
return None
class crm_lha(BasePatterns):
@@ -489,9 +489,9 @@ class PatternSelector:
crm_mcp_docker(name)
def get_variant(self, variant):
- if patternvariants.has_key(variant):
+ if variant in patternvariants:
return patternvariants[variant]
- print "defaulting to crm-base for %s" % variant
+ print("defaulting to crm-base for %s" % variant)
return self.base
def get_patterns(self, variant, kind):
@@ -532,7 +532,7 @@ if __name__ == '__main__':
template = args[i+1]
else:
- print "Illegal argument " + args[i]
+ print("Illegal argument " + args[i])
- print PatternSelector(kind)[template]
+ print(PatternSelector(kind)[template])
diff --git a/cts/remote.py b/cts/remote.py
index b32b028..040b48a 100644
--- a/cts/remote.py
+++ b/cts/remote.py
@@ -147,7 +147,7 @@ class RemoteExec:
sysname = args[0]
command = args[1]
- #print "sysname: %s, us: %s" % (sysname, self.OurNode)
+ #print("sysname: %s, us: %s" % (sysname, self.OurNode))
if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost":
ret = command
else:
@@ -164,7 +164,7 @@ class RemoteExec:
self.logger.debug(args)
def call_async(self, node, command, completionDelegate=None):
- #if completionDelegate: print "Waiting for %d on %s: %s" % (proc.pid, node, command)
+ #if completionDelegate: print("Waiting for %d on %s: %s" % (proc.pid, node, command))
aproc = AsyncRemoteCmd(node, self._cmd([node, command]), completionDelegate=completionDelegate)
aproc.start()
return aproc
@@ -186,7 +186,7 @@ class RemoteExec:
proc = Popen(self._cmd([node, command]),
stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
- #if completionDelegate: print "Waiting for %d on %s: %s" % (proc.pid, node, command)
+ #if completionDelegate: print("Waiting for %d on %s: %s" % (proc.pid, node, command))
if not synchronous and proc.pid > 0 and not self.silent:
aproc = AsyncWaitProc(proc, node, command, completionDelegate=completionDelegate)
aproc.start()
@@ -257,14 +257,14 @@ class RemoteFactory:
return RemoteExec(RemoteFactory.rsh, silent)
def enable_docker(self):
- print "Using DOCKER backend for connections to cluster nodes"
+ print("Using DOCKER backend for connections to cluster nodes")
RemoteFactory.rsh.Command = "/usr/libexec/phd/docker/phd_docker_remote_cmd "
RemoteFactory.rsh.CpCommand = "/usr/libexec/phd/docker/phd_docker_cp"
def enable_qarsh(self):
# http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/
- print "Using QARSH for connections to cluster nodes"
+ print("Using QARSH for connections to cluster nodes")
RemoteFactory.rsh.Command = "qarsh -t 300 -l root"
RemoteFactory.rsh.CpCommand = "qacp -q"
diff --git a/cts/watcher.py b/cts/watcher.py
index 1182c8b..de032f7 100644
--- a/cts/watcher.py
+++ b/cts/watcher.py
@@ -73,7 +73,7 @@ for i in range(0, len(args)):
skipthis=1
if not os.access(filename, os.R_OK):
- print prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0)
+ print(prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0))
sys.exit(1)
logfile=open(filename, 'r')
@@ -85,7 +85,7 @@ if offset != 'EOF':
if newsize >= offset:
logfile.seek(offset)
else:
- print prefix + ('File truncated from %d to %d' % (offset, newsize))
+ print(prefix + ('File truncated from %d to %d' % (offset, newsize)))
if (newsize*1.05) < offset:
logfile.seek(0)
# else: we probably just lost a few logs after a fencing op
@@ -103,10 +103,10 @@ while True:
line = logfile.readline()
if not line: break
- print line.strip()
+ print(line.strip())
count += 1
-print prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count)
+print(prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count))
logfile.close()
"""
@@ -158,7 +158,7 @@ class FileObj(SearchObj):
SearchObj.__init__(self, filename, host, name)
if host is not None:
- if not has_log_watcher.has_key(host):
+ if not host in has_log_watcher:
global log_watcher
global log_watcher_bin
@@ -381,7 +381,7 @@ class LogWatcher(RemoteExec):
else:
self.file_list.append(FileObj(self.filename))
- # print "%s now has %d files" % (self.name, len(self.file_list))
+ # print("%s now has %d files" % (self.name, len(self.file_list)))
def __del__(self):
if self.debug_level > 1: self.debug("Destroy")
@@ -406,7 +406,7 @@ class LogWatcher(RemoteExec):
raise ValueError("No sources to read from")
pending = []
- #print "%s waiting for %d operations" % (self.name, self.pending)
+ #print("%s waiting for %d operations" % (self.name, self.pending))
for f in self.file_list:
t = f.harvest_async(self)
if t:
@@ -418,7 +418,7 @@ class LogWatcher(RemoteExec):
self.logger.log("%s: Aborting after 20s waiting for %s logging commands" % (self.name, repr(t)))
return
- #print "Got %d lines" % len(self.line_cache)
+ #print("Got %d lines" % len(self.line_cache))
def end(self):
for f in self.file_list:
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt
index 5d5fa33..b0115fb 100644
--- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt
+++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt
@@ -643,6 +643,16 @@ indexterm:[Action,Property,on-fail]
indexterm:[enabled,Action Property]
indexterm:[Action,Property,enabled]
+|role
+|
+|This option only makes sense for recurring operations. It restricts
+ the operation to a specific role. The truely paranoid can even
+ specify +role=Stopped+ which allows the cluster to detect an admin
+ that manually started cluster services.
+ Allowed values: +Stopped+, +Started+, +Slave+, +Master+.
+ indexterm:[role,Action Property]
+ indexterm:[Action,Property,role]
+
|=========================================================
[[s-operation-defaults]]
diff --git a/fencing/commands.c b/fencing/commands.c
index 0d2d614..bd3b27d 100644
--- a/fencing/commands.c
+++ b/fencing/commands.c
@@ -124,17 +124,7 @@ static xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char
static gboolean
is_action_required(const char *action, stonith_device_t *device)
{
- if(device == NULL) {
- return FALSE;
-
- } else if (device->required_actions == NULL) {
- return FALSE;
-
- } else if (strstr(device->required_actions, action)) {
- return TRUE;
- }
-
- return FALSE;
+ return device && device->automatic_unfencing && safe_str_eq(action, "on");
}
static int
@@ -449,7 +439,6 @@ free_device(gpointer data)
free_xml(device->agent_metadata);
free(device->namespace);
free(device->on_target_actions);
- free(device->required_actions);
free(device->agent);
free(device->id);
free(device);
@@ -713,8 +702,6 @@ read_action_metadata(stonith_device_t *device)
for (lpc = 0; lpc < max; lpc++) {
const char *on_target = NULL;
const char *action = NULL;
- const char *automatic = NULL;
- const char *required = NULL;
xmlNode *match = getXpathResult(xpath, lpc);
CRM_LOG_ASSERT(match != NULL);
@@ -722,8 +709,6 @@ read_action_metadata(stonith_device_t *device)
on_target = crm_element_value(match, "on_target");
action = crm_element_value(match, "name");
- automatic = crm_element_value(match, "automatic");
- required = crm_element_value(match, "required");
if(safe_str_eq(action, "list")) {
set_bit(device->flags, st_device_supports_list);
@@ -731,17 +716,21 @@ read_action_metadata(stonith_device_t *device)
set_bit(device->flags, st_device_supports_status);
} else if(safe_str_eq(action, "reboot")) {
set_bit(device->flags, st_device_supports_reboot);
- } else if(safe_str_eq(action, "on") && (crm_is_true(automatic))) {
- /* this setting implies required=true for unfencing */
- required = "true";
+ } else if (safe_str_eq(action, "on")) {
+ /* "automatic" means the cluster will unfence node when it joins */
+ const char *automatic = crm_element_value(match, "automatic");
+
+ /* "required" is a deprecated synonym for "automatic" */
+ const char *required = crm_element_value(match, "required");
+
+ if (crm_is_true(automatic) || crm_is_true(required)) {
+ device->automatic_unfencing = TRUE;
+ }
}
if (action && crm_is_true(on_target)) {
device->on_target_actions = add_action(device->on_target_actions, action);
}
- if (action && crm_is_true(required)) {
- device->required_actions = add_action(device->required_actions, action);
- }
}
freeXpathObject(xpath);
@@ -778,8 +767,7 @@ build_device_from_xml(xmlNode * msg)
value = crm_element_value(dev, "rsc_provides");
if (safe_str_eq(value, "unfencing")) {
- /* if this agent requires unfencing, 'on' is considered a required action */
- device->required_actions = add_action(device->required_actions, "on");
+ device->automatic_unfencing = TRUE;
}
if (is_action_required("on", device)) {
@@ -1224,7 +1212,6 @@ stonith_device_action(xmlNode * msg, char **output)
} else if (device) {
cmd = create_async_command(msg);
if (cmd == NULL) {
- free_device(device);
return -EPROTO;
}
diff --git a/fencing/internal.h b/fencing/internal.h
index 5fb8f9c..0f418ec 100644
--- a/fencing/internal.h
+++ b/fencing/internal.h
@@ -26,12 +26,13 @@ typedef struct stonith_device_s {
/*! list of actions that must execute on the target node. Used for unfencing */
char *on_target_actions;
- char *required_actions;
GListPtr targets;
time_t targets_age;
gboolean has_attr_map;
/* should nodeid parameter for victim be included in agent arguments */
gboolean include_nodeid;
+ /* whether the cluster should automatically unfence nodes with the device */
+ gboolean automatic_unfencing;
guint priority;
guint active_pid;
@@ -59,7 +60,8 @@ typedef struct stonith_device_s {
enum st_remap_phase {
st_phase_requested = 0,
st_phase_off = 1,
- st_phase_on = 2
+ st_phase_on = 2,
+ st_phase_max = 3
};
typedef struct remote_fencing_op_s {
@@ -128,15 +130,9 @@ typedef struct remote_fencing_op_s {
/*! The current operation phase being executed */
enum st_remap_phase phase;
- /* For phase 0 or 1 (requested action or a remapped "off"), required devices
- * will be executed regardless of what topology level is being executed
- * currently. For phase 1 (remapped "on"), required devices will not be
- * attempted, because the cluster will execute them automatically when the
- * node next joins the cluster.
- */
- /*! Lists of devices marked as required for each phase */
- GListPtr required_list[3];
- /*! The device list of all the devices at the current executing topology level. */
+ /*! Devices with automatic unfencing (always run if "on" requested, never if remapped) */
+ GListPtr automatic_list;
+ /*! List of all devices at the currently executing topology level */
GListPtr devices_list;
/*! Current entry in the topology device list */
GListPtr devices;
diff --git a/fencing/main.c b/fencing/main.c
index 46d7352..c48e12d 100644
--- a/fencing/main.c
+++ b/fencing/main.c
@@ -553,7 +553,7 @@ remove_fencing_topology(xmlXPathObjectPtr xpathObj)
}
static void
-register_fencing_topology(xmlXPathObjectPtr xpathObj, gboolean force)
+register_fencing_topology(xmlXPathObjectPtr xpathObj)
{
int max = numXpathResults(xpathObj), lpc = 0;
@@ -584,7 +584,7 @@ register_fencing_topology(xmlXPathObjectPtr xpathObj, gboolean force)
*/
static void
-fencing_topology_init(xmlNode * msg)
+fencing_topology_init()
{
xmlXPathObjectPtr xpathObj = NULL;
const char *xpath = "//" XML_TAG_FENCING_LEVEL;
@@ -598,7 +598,7 @@ fencing_topology_init(xmlNode * msg)
/* Grab everything */
xpathObj = xpath_search(local_cib, xpath);
- register_fencing_topology(xpathObj, TRUE);
+ register_fencing_topology(xpathObj);
freeXpathObject(xpathObj);
}
@@ -931,7 +931,7 @@ update_fencing_topology(const char *event, xmlNode * msg)
xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
xpathObj = xpath_search(msg, xpath);
- register_fencing_topology(xpathObj, FALSE);
+ register_fencing_topology(xpathObj);
freeXpathObject(xpathObj);
} else if(format == 2) {
@@ -969,7 +969,7 @@ update_fencing_topology(const char *event, xmlNode * msg)
/* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
op, add[0], add[1], add[2], xpath);
- fencing_topology_init(NULL);
+ fencing_topology_init();
return;
}
@@ -977,7 +977,7 @@ update_fencing_topology(const char *event, xmlNode * msg)
/* Change to the topology in general */
crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s",
op, add[0], add[1], add[2], xpath);
- fencing_topology_init(NULL);
+ fencing_topology_init();
return;
} else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
@@ -989,7 +989,7 @@ update_fencing_topology(const char *event, xmlNode * msg)
} else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
op, add[0], add[1], add[2], xpath);
- fencing_topology_init(NULL);
+ fencing_topology_init();
return;
}
@@ -1098,7 +1098,7 @@ update_cib_cache_cb(const char *event, xmlNode * msg)
} else if (stonith_enabled_saved == FALSE) {
crm_info("Updating stonith device and topology lists now that stonith is enabled");
stonith_enabled_saved = TRUE;
- fencing_topology_init(NULL);
+ fencing_topology_init();
cib_devices_update();
} else {
@@ -1114,7 +1114,7 @@ init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *us
have_cib_devices = TRUE;
local_cib = copy_xml(output);
- fencing_topology_init(msg);
+ fencing_topology_init();
cib_devices_update();
}
@@ -1239,7 +1239,7 @@ st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void
* This is a hack until we can send to a nodeid and/or we fix node name lookups
* These messages are ignored in stonith_peer_callback()
*/
- xmlNode *query = query = create_xml_node(NULL, "stonith_command");
+ xmlNode *query = create_xml_node(NULL, "stonith_command");
crm_xml_add(query, F_XML_TAGNAME, "stonith_command");
crm_xml_add(query, F_TYPE, T_STONITH_NG);
diff --git a/fencing/remote.c b/fencing/remote.c
index 2c00b5f..d741672 100644
--- a/fencing/remote.c
+++ b/fencing/remote.c
@@ -60,13 +60,13 @@ typedef struct device_properties_s {
/* The remaining members are indexed by the operation's "phase" */
/* Whether this device has been executed in each phase */
- gboolean executed[3];
+ gboolean executed[st_phase_max];
/* Whether this device is disallowed from executing in each phase */
- gboolean disallowed[3];
+ gboolean disallowed[st_phase_max];
/* Action-specific timeout for each phase */
- int custom_action_timeout[3];
+ int custom_action_timeout[st_phase_max];
/* Action-specific maximum random delay for each phase */
- int delay_max[3];
+ int delay_max[st_phase_max];
} device_properties_t;
typedef struct st_query_result_s {
@@ -207,22 +207,6 @@ grab_peer_device(const remote_fencing_op_t *op, st_query_result_t *peer,
return TRUE;
}
-/*
- * \internal
- * \brief Free the list of required devices for a particular phase
- *
- * \param[in,out] op Operation to modify
- * \param[in] phase Phase to modify
- */
-static void
-free_required_list(remote_fencing_op_t *op, enum st_remap_phase phase)
-{
- if (op->required_list[phase]) {
- g_list_free_full(op->required_list[phase], free);
- op->required_list[phase] = NULL;
- }
-}
-
static void
clear_remote_op_timers(remote_fencing_op_t * op)
{
@@ -268,9 +252,7 @@ free_remote_op(gpointer data)
g_list_free_full(op->devices_list, free);
op->devices_list = NULL;
}
- free_required_list(op, st_phase_requested);
- free_required_list(op, st_phase_off);
- free_required_list(op, st_phase_on);
+ g_list_free_full(op->automatic_list, free);
free(op);
}
@@ -323,10 +305,10 @@ op_phase_on(remote_fencing_op_t *op)
op->phase = st_phase_on;
strcpy(op->action, "on");
- /* Any devices that are required for "on" will be automatically executed by
- * the cluster when the node next joins, so we skip them here.
+ /* Skip devices with automatic unfencing, because the cluster will handle it
+ * when the node rejoins.
*/
- for (iter = op->required_list[op->phase]; iter != NULL; iter = iter->next) {
+ for (iter = op->automatic_list; iter != NULL; iter = iter->next) {
GListPtr match = g_list_find_custom(op->devices_list, iter->data,
sort_strings);
@@ -334,12 +316,8 @@ op_phase_on(remote_fencing_op_t *op)
op->devices_list = g_list_remove(op->devices_list, match->data);
}
}
-
- /* We know this level will succeed, because phase 1 completed successfully
- * and we ignore any errors from phase 2. So we can free the required list,
- * which will keep them from being executed after the device list is done.
- */
- free_required_list(op, op->phase);
+ g_list_free_full(op->automatic_list, free);
+ op->automatic_list = NULL;
/* Rewind device list pointer */
op->devices = op->devices_list;
@@ -659,28 +637,25 @@ topology_is_empty(stonith_topology_t *tp)
/*
* \internal
- * \brief Add a device to the required list for a particular phase
+ * \brief Add a device to an operation's automatic unfencing list
*
* \param[in,out] op Operation to modify
- * \param[in] phase Phase to modify
* \param[in] device Device ID to add
*/
static void
-add_required_device(remote_fencing_op_t *op, enum st_remap_phase phase,
- const char *device)
+add_required_device(remote_fencing_op_t *op, const char *device)
{
- GListPtr match = g_list_find_custom(op->required_list[phase], device,
+ GListPtr match = g_list_find_custom(op->automatic_list, device,
sort_strings);
if (!match) {
- op->required_list[phase] = g_list_prepend(op->required_list[phase],
- strdup(device));
+ op->automatic_list = g_list_prepend(op->automatic_list, strdup(device));
}
}
/*
* \internal
- * \brief Remove a device from the required list for the current phase
+ * \brief Remove a device from the automatic unfencing list
*
* \param[in,out] op Operation to modify
* \param[in] device Device ID to remove
@@ -688,12 +663,11 @@ add_required_device(remote_fencing_op_t *op, enum st_remap_phase phase,
static void
remove_required_device(remote_fencing_op_t *op, const char *device)
{
- GListPtr match = g_list_find_custom(op->required_list[op->phase], device,
+ GListPtr match = g_list_find_custom(op->automatic_list, device,
sort_strings);
if (match) {
- op->required_list[op->phase] = g_list_remove(op->required_list[op->phase],
- match->data);
+ op->automatic_list = g_list_remove(op->automatic_list, match->data);
}
}
@@ -938,7 +912,7 @@ create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer)
op = calloc(1, sizeof(remote_fencing_op_t));
- crm_element_value_int(request, F_STONITH_TIMEOUT, (int *)&(op->base_timeout));
+ crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout));
if (peer && dev) {
op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID);
@@ -974,7 +948,7 @@ create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer)
crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
op->call_options = call_options;
- crm_element_value_int(request, F_STONITH_CALLID, (int *)&(op->client_callid));
+ crm_element_value_int(request, F_STONITH_CALLID, &(op->client_callid));
crm_trace("%s new stonith op: %s - %s of %s for %s",
(peer
@@ -1352,14 +1326,17 @@ advance_op_topology(remote_fencing_op_t *op, const char *device, xmlNode *msg,
op->devices = op->devices->next;
}
- /* If this device was required, it's not anymore */
- remove_required_device(op, device);
+ /* Handle automatic unfencing if an "on" action was requested */
+ if ((op->phase == st_phase_requested) && safe_str_eq(op->action, "on")) {
+ /* If the device we just executed was required, it's not anymore */
+ remove_required_device(op, device);
- /* If there are no more devices at this topology level,
- * run through any required devices not already executed
- */
- if (op->devices == NULL) {
- op->devices = op->required_list[op->phase];
+ /* If there are no more devices at this topology level, run through any
+ * remaining devices with automatic unfencing
+ */
+ if (op->devices == NULL) {
+ op->devices = op->automatic_list;
+ }
}
if ((op->devices == NULL) && (op->phase == st_phase_off)) {
@@ -1613,8 +1590,6 @@ parse_action_specific(xmlNode *xml, const char *peer, const char *device,
const char *action, remote_fencing_op_t *op,
enum st_remap_phase phase, device_properties_t *props)
{
- int required;
-
props->custom_action_timeout[phase] = 0;
crm_element_value_int(xml, F_STONITH_ACTION_TIMEOUT,
&props->custom_action_timeout[phase]);
@@ -1630,20 +1605,16 @@ parse_action_specific(xmlNode *xml, const char *peer, const char *device,
peer, device, props->delay_max[phase], action);
}
- required = 0;
- crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required);
- if (required) {
- /* If the action is marked as required, add the device to the
- * operation's list of required devices for this phase. We use this
- * for unfencing when executing a topology. In phase 0 (requested
- * action) or phase 1 (remapped "off"), required devices get executed
- * regardless of their topology level; in phase 2 (remapped "on"),
- * required devices are not attempted, because the cluster will
- * execute them automatically later.
- */
- crm_trace("Peer %s requires device %s to execute for action %s",
- peer, device, action);
- add_required_device(op, phase, device);
+ /* Handle devices with automatic unfencing */
+ if (safe_str_eq(action, "on")) {
+ int required = 0;
+
+ crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required);
+ if (required) {
+ crm_trace("Peer %s requires device %s to execute for action %s",
+ peer, device, action);
+ add_required_device(op, device);
+ }
}
/* If a reboot is remapped to off+on, it's possible that a node is allowed
diff --git a/include/crm/cib.h b/include/crm/cib.h
index cb465bf..306706e 100644
--- a/include/crm/cib.h
+++ b/include/crm/cib.h
@@ -136,6 +136,13 @@ typedef struct cib_api_operations_s {
void *user_data, const char *callback_name,
void (*callback) (xmlNode *, int, int, xmlNode *, void *));
+ gboolean (*register_callback_full)(cib_t *cib, int call_id, int timeout,
+ gboolean only_success, void *user_data,
+ const char *callback_name,
+ void (*callback)(xmlNode *, int, int,
+ xmlNode *, void *),
+ void (*free_func)(void *));
+
} cib_api_operations_t;
struct cib_s {
diff --git a/include/crm/cib/internal.h b/include/crm/cib/internal.h
index 431a2bd..adc2faf 100644
--- a/include/crm/cib/internal.h
+++ b/include/crm/cib/internal.h
@@ -106,7 +106,7 @@ typedef struct cib_callback_client_s {
void *user_data;
gboolean only_success;
struct timer_rec_s *timer;
-
+ void (*free_func)(void *);
} cib_callback_client_t;
struct timer_rec_s {
@@ -137,6 +137,13 @@ int cib_native_register_notification(cib_t * cib, const char *callback, int enab
gboolean cib_client_register_callback(cib_t * cib, int call_id, int timeout, gboolean only_success,
void *user_data, const char *callback_name,
void (*callback) (xmlNode *, int, int, xmlNode *, void *));
+gboolean cib_client_register_callback_full(cib_t *cib, int call_id,
+ int timeout, gboolean only_success,
+ void *user_data,
+ const char *callback_name,
+ void (*callback)(xmlNode *, int, int,
+ xmlNode *, void *),
+ void (*free_func)(void *));
int cib_process_query(const char *op, int options, const char *section, xmlNode * req,
xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib,
diff --git a/include/crm/common/ipc.h b/include/crm/common/ipc.h
index db83b09..d6ceda2 100644
--- a/include/crm/common/ipc.h
+++ b/include/crm/common/ipc.h
@@ -75,7 +75,7 @@ long crm_ipc_read(crm_ipc_t * client);
const char *crm_ipc_buffer(crm_ipc_t * client);
uint32_t crm_ipc_buffer_flags(crm_ipc_t * client);
const char *crm_ipc_name(crm_ipc_t * client);
-int crm_ipc_default_buffer_size(void);
+unsigned int crm_ipc_default_buffer_size(void);
/* Utils */
xmlNode *create_hello_message(const char *uuid, const char *client_name,
diff --git a/include/crm/common/ipcs.h b/include/crm/common/ipcs.h
index b43fc53..d825912 100644
--- a/include/crm/common/ipcs.h
+++ b/include/crm/common/ipcs.h
@@ -110,7 +110,7 @@ void crm_ipcs_send_ack(crm_client_t * c, uint32_t request, uint32_t flags,
const char *tag, const char *function, int line);
/* when max_send_size is 0, default ipc buffer size is used */
-ssize_t crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec **result, int32_t max_send_size);
+ssize_t crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, uint32_t max_send_size);
ssize_t crm_ipcs_send(crm_client_t * c, uint32_t request, xmlNode * message, enum crm_ipc_flags flags);
ssize_t crm_ipcs_sendv(crm_client_t * c, struct iovec *iov, enum crm_ipc_flags flags);
xmlNode *crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t * flags);
diff --git a/lib/cib/cib_client.c b/lib/cib/cib_client.c
index b13323e..f7a19b8 100644
--- a/lib/cib/cib_client.c
+++ b/lib/cib/cib_client.c
@@ -198,6 +198,11 @@ cib_destroy_op_callback(gpointer data)
g_source_remove(blob->timer->ref);
}
free(blob->timer);
+
+ if (blob->user_data && blob->free_func) {
+ blob->free_func(blob->user_data);
+ }
+
free(blob);
}
@@ -327,10 +332,15 @@ cib_new(void)
return cib_native_new();
}
-/* this is backwards...
- cib_*_new should call this not the other way around
+/*
+ * \internal
+ * \brief Create a generic CIB connection instance
+ *
+ * \return Newly allocated and initialized cib_t instance
+ *
+ * \note This is called by each variant's cib_*_new() function before setting
+ * variant-specific values.
*/
-
cib_t *
cib_new_variant(void)
{
@@ -364,6 +374,7 @@ cib_new_variant(void)
new_cib->cmds->add_notify_callback = cib_client_add_notify_callback;
new_cib->cmds->del_notify_callback = cib_client_del_notify_callback;
new_cib->cmds->register_callback = cib_client_register_callback;
+ new_cib->cmds->register_callback_full = cib_client_register_callback_full;
new_cib->cmds->noop = cib_client_noop;
new_cib->cmds->ping = cib_client_ping;
@@ -545,6 +556,19 @@ cib_client_register_callback(cib_t * cib, int call_id, int timeout, gboolean onl
void *user_data, const char *callback_name,
void (*callback) (xmlNode *, int, int, xmlNode *, void *))
{
+ return cib_client_register_callback_full(cib, call_id, timeout,
+ only_success, user_data,
+ callback_name, callback, NULL);
+}
+
+gboolean
+cib_client_register_callback_full(cib_t *cib, int call_id, int timeout,
+ gboolean only_success, void *user_data,
+ const char *callback_name,
+ void (*callback)(xmlNode *, int, int,
+ xmlNode *, void *),
+ void (*free_func)(void *))
+{
cib_callback_client_t *blob = NULL;
if (call_id < 0) {
@@ -553,6 +577,9 @@ cib_client_register_callback(cib_t * cib, int call_id, int timeout, gboolean onl
} else {
crm_warn("CIB call failed: %s", pcmk_strerror(call_id));
}
+ if (user_data && free_func) {
+ free_func(user_data);
+ }
return FALSE;
}
@@ -561,6 +588,7 @@ cib_client_register_callback(cib_t * cib, int call_id, int timeout, gboolean onl
blob->only_success = only_success;
blob->user_data = user_data;
blob->callback = callback;
+ blob->free_func = free_func;
if (timeout > 0) {
struct timer_rec_s *async_timer = NULL;
diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c
index d321517..4dc65aa 100644
--- a/lib/cib/cib_utils.c
+++ b/lib/cib/cib_utils.c
@@ -624,12 +624,6 @@ cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc)
{
xmlNode *output = NULL;
cib_callback_client_t *blob = NULL;
- cib_callback_client_t local_blob;
-
- local_blob.id = NULL;
- local_blob.callback = NULL;
- local_blob.user_data = NULL;
- local_blob.only_success = FALSE;
if (msg != NULL) {
crm_element_value_int(msg, F_CIB_RC, &rc);
@@ -638,16 +632,8 @@ cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc)
}
blob = g_hash_table_lookup(cib_op_callback_table, GINT_TO_POINTER(call_id));
-
- if (blob != NULL) {
- local_blob = *blob;
- blob = NULL;
-
- remove_cib_op_callback(call_id, FALSE);
-
- } else {
+ if (blob == NULL) {
crm_trace("No callback found for call %d", call_id);
- local_blob.callback = NULL;
}
if (cib == NULL) {
@@ -659,15 +645,20 @@ cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc)
rc = pcmk_ok;
}
- if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) {
- crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id);
- local_blob.callback(msg, call_id, rc, output, local_blob.user_data);
+ if (blob && blob->callback && (rc == pcmk_ok || blob->only_success == FALSE)) {
+ crm_trace("Invoking callback %s for call %d", crm_str(blob->id), call_id);
+ blob->callback(msg, call_id, rc, output, blob->user_data);
} else if (cib && cib->op_callback == NULL && rc != pcmk_ok) {
crm_warn("CIB command failed: %s", pcmk_strerror(rc));
crm_log_xml_debug(msg, "Failed CIB Update");
}
+ /* This may free user_data, so do it after the callback */
+ if (blob) {
+ remove_cib_op_callback(call_id, FALSE);
+ }
+
if (cib && cib->op_callback != NULL) {
crm_trace("Invoking global callback for call %d", call_id);
cib->op_callback(msg, call_id, rc, output);
diff --git a/lib/cluster/legacy.c b/lib/cluster/legacy.c
index d93613d..e9905f6 100644
--- a/lib/cluster/legacy.c
+++ b/lib/cluster/legacy.c
@@ -52,6 +52,21 @@ void *ais_ipc_ctx = NULL;
hdb_handle_t ais_ipc_handle = 0;
+static bool valid_cman_name(const char *name, uint32_t nodeid)
+{
+ bool rc = TRUE;
+
+ /* Yes, %d, because that's what CMAN does */
+ char *fakename = crm_strdup_printf("Node%d", nodeid);
+
+ if(crm_str_eq(fakename, name, TRUE)) {
+ rc = FALSE;
+ crm_notice("Ignoring inferred name from cman: %s", fakename);
+ }
+ free(fakename);
+ return rc;
+}
+
static gboolean
plugin_get_details(uint32_t * id, char **uname)
{
@@ -361,6 +376,7 @@ cman_event_callback(cman_handle_t handle, void *privdata, int reason, int arg)
arg ? "retained" : "still lost");
}
+ memset(cman_nodes, 0, MAX_NODES * sizeof(cman_node_t));
rc = cman_get_nodes(pcmk_cman_handle, MAX_NODES, &node_count, cman_nodes);
if (rc < 0) {
crm_err("Couldn't query cman node list: %d %d", rc, errno);
@@ -369,6 +385,7 @@ cman_event_callback(cman_handle_t handle, void *privdata, int reason, int arg)
for (lpc = 0; lpc < node_count; lpc++) {
crm_node_t *peer = NULL;
+ const char *name = NULL;
if (cman_nodes[lpc].cn_nodeid == 0) {
/* Never allow node ID 0 to be considered a member #315711 */
@@ -376,7 +393,11 @@ cman_event_callback(cman_handle_t handle, void *privdata, int reason, int arg)
continue;
}
- peer = crm_get_peer(cman_nodes[lpc].cn_nodeid, cman_nodes[lpc].cn_name);
+ if(valid_cman_name(cman_nodes[lpc].cn_name, cman_nodes[lpc].cn_nodeid)) {
+ name = cman_nodes[lpc].cn_name;
+ }
+
+ peer = crm_get_peer(cman_nodes[lpc].cn_nodeid, name);
if(cman_nodes[lpc].cn_member) {
crm_update_peer_state(__FUNCTION__, peer, CRM_NODE_MEMBER, crm_peer_seq);
@@ -631,15 +652,17 @@ cman_node_name(uint32_t nodeid)
cman = cman_init(NULL);
if (cman != NULL && cman_is_active(cman)) {
- us.cn_name[0] = 0;
+
+ memset(&us, 0, sizeof(cman_node_t));
cman_get_node(cman, nodeid, &us);
- name = strdup(us.cn_name);
- crm_info("Using CMAN node name %s for %u", name, nodeid);
- }
+ if(valid_cman_name(us.cn_name, nodeid)) {
+ name = strdup(us.cn_name);
+ crm_info("Using CMAN node name %s for %u", name, nodeid);
+ }
+ }
cman_finish(cman);
# endif
-
if (name == NULL) {
crm_debug("Unable to get node name for nodeid %u", nodeid);
}
@@ -667,7 +690,6 @@ init_cs_connection_once(crm_cluster_t * cluster)
if (cluster_connect_cpg(cluster) == FALSE) {
return FALSE;
}
- cluster->uname = cman_node_name(0 /* CMAN_NODEID_US */ );
break;
case pcmk_cluster_heartbeat:
crm_info("Could not find an active corosync based cluster");
diff --git a/lib/common/ipc.c b/lib/common/ipc.c
index d71c54a..f4188ed 100644
--- a/lib/common/ipc.c
+++ b/lib/common/ipc.c
@@ -46,8 +46,8 @@ struct crm_ipc_response_header {
};
static int hdr_offset = 0;
-static int ipc_buffer_max = 0;
-static unsigned int pick_ipc_buffer(int max);
+static unsigned int ipc_buffer_max = 0;
+static unsigned int pick_ipc_buffer(unsigned int max);
static inline void
crm_ipc_init(void)
@@ -60,7 +60,7 @@ crm_ipc_init(void)
}
}
-int
+unsigned int
crm_ipc_default_buffer_size(void)
{
return pick_ipc_buffer(0);
@@ -91,7 +91,7 @@ generateReference(const char *custom1, const char *custom2)
since_epoch = calloc(1, reference_len);
if (since_epoch != NULL) {
- sprintf(since_epoch, "%s-%s-%ld-%u",
+ sprintf(since_epoch, "%s-%s-%lu-%u",
local_cust1, local_cust2, (unsigned long)time(NULL), ref_counter++);
}
@@ -431,7 +431,7 @@ crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t
unsigned int size_u = 1 + header->size_uncompressed;
uncompressed = calloc(1, size_u);
- crm_trace("Decompressing message data %d bytes into %d bytes",
+ crm_trace("Decompressing message data %u bytes into %u bytes",
header->size_compressed, size_u);
rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0);
@@ -531,9 +531,9 @@ crm_ipcs_flush_events(crm_client_t * c)
}
ssize_t
-crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, int32_t max_send_size)
+crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, uint32_t max_send_size)
{
- static int biggest = 0;
+ static unsigned int biggest = 0;
struct iovec *iov;
unsigned int total = 0;
char *compressed = NULL;
@@ -579,20 +579,18 @@ crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, int
free(buffer);
- if (header->size_compressed > biggest) {
- biggest = 2 * QB_MAX(header->size_compressed, biggest);
- }
+ biggest = QB_MAX(header->size_compressed, biggest);
} else {
ssize_t rc = -EMSGSIZE;
crm_log_xml_trace(message, "EMSGSIZE");
- biggest = 2 * QB_MAX(header->size_uncompressed, biggest);
+ biggest = QB_MAX(header->size_uncompressed, biggest);
crm_err
- ("Could not compress the message into less than the configured ipc limit (%d bytes)."
- "Set PCMK_ipc_buffer to a higher value (%d bytes suggested)", max_send_size,
- biggest);
+ ("Could not compress the message (%u bytes) into less than the configured ipc limit (%u bytes). "
+ "Set PCMK_ipc_buffer to a higher value (%u bytes suggested)",
+ header->size_uncompressed, max_send_size, 4 * biggest);
free(compressed);
free(buffer);
@@ -656,7 +654,7 @@ crm_ipcs_sendv(crm_client_t * c, struct iovec * iov, enum crm_ipc_flags flags)
rc = qb_ipcs_response_sendv(c->ipcs, iov, 2);
if (rc < header->qb.size) {
- crm_notice("Response %d to %p[%d] (%d bytes) failed: %s (%d)",
+ crm_notice("Response %d to %p[%d] (%u bytes) failed: %s (%d)",
header->qb.id, c->ipcs, c->pid, header->qb.size, pcmk_strerror(rc), rc);
} else {
@@ -747,9 +745,9 @@ struct crm_ipc_s {
};
static unsigned int
-pick_ipc_buffer(int max)
+pick_ipc_buffer(unsigned int max)
{
- static int global_max = 0;
+ static unsigned int global_max = 0;
if(global_max == 0) {
const char *env = getenv("PCMK_ipc_buffer");
@@ -925,7 +923,7 @@ crm_ipc_decompress(crm_ipc_t * client)
unsigned int new_buf_size = QB_MAX((hdr_offset + size_u), client->max_buf_size);
char *uncompressed = calloc(1, new_buf_size);
- crm_trace("Decompressing message data %d bytes into %d bytes",
+ crm_trace("Decompressing message data %u bytes into %u bytes",
header->size_compressed, size_u);
rc = BZ2_bzBuffToBuffDecompress(uncompressed + hdr_offset, &size_u,
@@ -986,7 +984,7 @@ crm_ipc_read(crm_ipc_t * client)
return -EBADMSG;
}
- crm_trace("Received %s event %d, size=%d, rc=%d, text: %.100s",
+ crm_trace("Received %s event %d, size=%u, rc=%d, text: %.100s",
client->name, header->qb.id, header->qb.size, client->msg_size,
client->buffer + hdr_offset);
@@ -1166,9 +1164,9 @@ crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, in
if(header->size_compressed) {
if(factor < 10 && (client->max_buf_size / 10) < (rc / factor)) {
- crm_notice("Compressed message exceeds %d0%% of the configured ipc limit (%d bytes), "
- "consider setting PCMK_ipc_buffer to %d or higher",
- factor, client->max_buf_size, 2*client->max_buf_size);
+ crm_notice("Compressed message exceeds %d0%% of the configured ipc limit (%u bytes), "
+ "consider setting PCMK_ipc_buffer to %u or higher",
+ factor, client->max_buf_size, 2 * client->max_buf_size);
factor++;
}
}
@@ -1211,7 +1209,7 @@ crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, in
if (rc > 0) {
struct crm_ipc_response_header *hdr = (struct crm_ipc_response_header *)(void*)client->buffer;
- crm_trace("Received response %d, size=%d, rc=%ld, text: %.200s", hdr->qb.id, hdr->qb.size,
+ crm_trace("Received response %d, size=%u, rc=%ld, text: %.200s", hdr->qb.id, hdr->qb.size,
rc, crm_ipc_buffer(client));
if (reply) {
diff --git a/lib/common/xml.c b/lib/common/xml.c
index 8eed245..299c7bf 100644
--- a/lib/common/xml.c
+++ b/lib/common/xml.c
@@ -3821,6 +3821,7 @@ crm_xml_dump(xmlNode * data, int options, char **buffer, int *offset, int *max,
if(data == NULL) {
*offset = 0;
*max = 0;
+ return;
}
#if 0
if (is_not_set(options, xml_log_option_filtered)) {
@@ -5621,7 +5622,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g
break;
} else if (known_schemas[lpc].transform == NULL) {
- crm_notice("%s-style configuration is also valid for %s",
+ crm_debug("%s-style configuration is also valid for %s",
known_schemas[lpc].name, known_schemas[next].name);
if (validate_with(xml, next, to_logs)) {
diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c
index f5e34ee..42bdf2b 100644
--- a/lib/lrmd/lrmd_client.c
+++ b/lib/lrmd/lrmd_client.c
@@ -1369,7 +1369,7 @@ lrmd_api_disconnect(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->private;
- crm_info("Disconnecting from lrmd service");
+ crm_info("Disconnecting from %d lrmd service", native->type);
switch (native->type) {
case CRM_CLIENT_IPC:
lrmd_ipc_disconnect(lrmd);
diff --git a/lib/services/dbus.c b/lib/services/dbus.c
index e2efecb..d42affe 100644
--- a/lib/services/dbus.c
+++ b/lib/services/dbus.c
@@ -329,9 +329,6 @@ pcmk_dbus_lookup_cb(DBusPendingCall *pending, void *user_data)
pcmk_dbus_lookup_result(reply, user_data);
- if(pending) {
- dbus_pending_call_unref(pending);
- }
if(reply) {
dbus_message_unref(reply);
}
diff --git a/lib/services/services.c b/lib/services/services.c
index 7e2b9f7..3f40078 100644
--- a/lib/services/services.c
+++ b/lib/services/services.c
@@ -150,6 +150,7 @@ resources_action_create(const char *name, const char *standard, const char *prov
op = calloc(1, sizeof(svc_action_t));
op->opaque = calloc(1, sizeof(svc_action_private_t));
+ op->opaque->pending = NULL;
op->rsc = strdup(name);
op->action = strdup(action);
op->interval = interval;
@@ -158,6 +159,7 @@ resources_action_create(const char *name, const char *standard, const char *prov
op->agent = strdup(agent);
op->sequence = ++operations;
op->flags = flags;
+
if (asprintf(&op->id, "%s_%s_%d", name, action, interval) == -1) {
goto return_error;
}
@@ -335,6 +337,7 @@ services_action_create_generic(const char *exec, const char *args[])
op->opaque->exec = strdup(exec);
op->opaque->args[0] = strdup(exec);
+ op->opaque->pending = NULL;
for (cur_arg = 1; args && args[cur_arg - 1]; cur_arg++) {
op->opaque->args[cur_arg] = strdup(args[cur_arg - 1]);
@@ -361,17 +364,17 @@ services_set_op_pending(svc_action_t *op, DBusPendingCall *pending)
{
if (op->opaque->pending && (op->opaque->pending != pending)) {
if (pending) {
- crm_info("Lost pending DBus call (%p)", op->opaque->pending);
+ crm_info("Lost pending %s DBus call (%p)", op->id, op->opaque->pending);
} else {
- crm_trace("Done with pending DBus call (%p)", op->opaque->pending);
+ crm_info("Done with pending %s DBus call (%p)", op->id, op->opaque->pending);
}
dbus_pending_call_unref(op->opaque->pending);
}
op->opaque->pending = pending;
if (pending) {
- crm_trace("Updated pending DBus call (%p)", pending);
+ crm_info("Updated pending %s DBus call (%p)", op->id, pending);
} else {
- crm_trace("Cleared pending DBus call");
+ crm_info("Cleared pending %s DBus call", op->id);
}
}
#endif
@@ -457,7 +460,7 @@ services_action_free(svc_action_t * op)
gboolean
cancel_recurring_action(svc_action_t * op)
{
- crm_info("Cancelling operation %s", op->id);
+ crm_info("Cancelling %s operation %s", op->standard, op->id);
if (recurring_actions) {
g_hash_table_remove(recurring_actions, op->id);
diff --git a/lib/services/systemd.c b/lib/services/systemd.c
index e1e1bc9..ca56915 100644
--- a/lib/services/systemd.c
+++ b/lib/services/systemd.c
@@ -189,16 +189,13 @@ systemd_loadunit_cb(DBusPendingCall *pending, void *user_data)
reply = dbus_pending_call_steal_reply(pending);
}
- if(op) {
- crm_trace("Got result: %p for %p for %s, %s", reply, pending, op->rsc, op->action);
- } else {
- crm_trace("Got result: %p for %p", reply, pending);
- }
+ crm_trace("Got result: %p for %p / %p for %s", reply, pending, op->opaque->pending, op->id);
+
+ CRM_LOG_ASSERT(pending == op->opaque->pending);
+ services_set_op_pending(op, NULL);
+
systemd_loadunit_result(reply, user_data);
- if(pending) {
- dbus_pending_call_unref(pending);
- }
if(reply) {
dbus_message_unref(reply);
}
@@ -209,6 +206,7 @@ systemd_unit_by_name(const gchar * arg_name, svc_action_t *op)
{
DBusMessage *msg;
DBusMessage *reply = NULL;
+ DBusPendingCall* pending = NULL;
char *name = NULL;
/*
@@ -249,7 +247,11 @@ systemd_unit_by_name(const gchar * arg_name, svc_action_t *op)
return munit;
}
- pcmk_dbus_send(msg, systemd_proxy, systemd_loadunit_cb, op, op? op->timeout : DBUS_TIMEOUT_USE_DEFAULT);
+ pending = pcmk_dbus_send(msg, systemd_proxy, systemd_loadunit_cb, op, op->timeout);
+ if(pending) {
+ services_set_op_pending(op, pending);
+ }
+
dbus_message_unref(msg);
return NULL;
}
@@ -459,23 +461,12 @@ systemd_async_dispatch(DBusPendingCall *pending, void *user_data)
reply = dbus_pending_call_steal_reply(pending);
}
- if(op) {
- crm_trace("Got result: %p for %p for %s, %s", reply, pending, op->rsc, op->action);
- if (pending == op->opaque->pending) {
- op->opaque->pending = NULL;
- } else {
- crm_info("Received unexpected reply for pending DBus call (%p vs %p)",
- op->opaque->pending, pending);
- }
- systemd_exec_result(reply, op);
+ crm_trace("Got result: %p for %p for %s, %s", reply, pending, op->rsc, op->action);
- } else {
- crm_trace("Got result: %p for %p", reply, pending);
- }
+ CRM_LOG_ASSERT(pending == op->opaque->pending);
+ services_set_op_pending(op, NULL);
+ systemd_exec_result(reply, op);
- if(pending) {
- dbus_pending_call_unref(pending);
- }
if(reply) {
dbus_message_unref(reply);
}
@@ -536,7 +527,6 @@ systemd_unit_exec_with_unit(svc_action_t * op, const char *unit)
free(state);
return op->rc == PCMK_OCF_OK;
} else if (pending) {
- dbus_pending_call_ref(pending);
services_set_op_pending(op, pending);
return TRUE;
}
diff --git a/lib/services/upstart.c b/lib/services/upstart.c
index 31b875b..eb8cfa8 100644
--- a/lib/services/upstart.c
+++ b/lib/services/upstart.c
@@ -322,10 +322,7 @@ upstart_job_check(const char *name, const char *state, void *userdata)
}
if (op->synchronous == FALSE) {
- if (op->opaque->pending) {
- dbus_pending_call_unref(op->opaque->pending);
- }
- op->opaque->pending = NULL;
+ services_set_op_pending(op, NULL);
operation_finalize(op);
}
}
@@ -392,6 +389,7 @@ upstart_async_dispatch(DBusPendingCall *pending, void *user_data)
if(pending) {
reply = dbus_pending_call_steal_reply(pending);
}
+
if(pcmk_dbus_find_error(op->action, pending, reply, &error)) {
/* ignore "already started" or "not running" errors */
@@ -419,11 +417,10 @@ upstart_async_dispatch(DBusPendingCall *pending, void *user_data)
}
}
+ CRM_LOG_ASSERT(pending == op->opaque->pending);
+ services_set_op_pending(op, NULL);
operation_finalize(op);
- if(pending) {
- dbus_pending_call_unref(pending);
- }
if(reply) {
dbus_message_unref(reply);
}
@@ -483,8 +480,7 @@ upstart_job_exec(svc_action_t * op, gboolean synchronous)
free(state);
return op->rc == PCMK_OCF_OK;
} else if (pending) {
- dbus_pending_call_ref(pending);
- op->opaque->pending = pending;
+ services_set_op_pending(op, pending);
return TRUE;
}
return FALSE;
@@ -527,8 +523,7 @@ upstart_job_exec(svc_action_t * op, gboolean synchronous)
free(job);
if(pending) {
- dbus_pending_call_ref(pending);
- op->opaque->pending = pending;
+ services_set_op_pending(op, pending);
return TRUE;
}
return FALSE;
diff --git a/lrmd/ipc_proxy.c b/lrmd/ipc_proxy.c
index 72d83c4..9427393 100644
--- a/lrmd/ipc_proxy.c
+++ b/lrmd/ipc_proxy.c
@@ -165,14 +165,14 @@ ipc_proxy_forward_client(crm_client_t *ipc_proxy, xmlNode *xml)
*/
if (safe_str_eq(msg_type, "event")) {
- crm_info("Sending event to %s", ipc_client->id);
+ crm_trace("Sending event to %s", ipc_client->id);
rc = crm_ipcs_send(ipc_client, 0, msg, crm_ipc_server_event);
} else if (safe_str_eq(msg_type, "response")) {
int msg_id = 0;
crm_element_value_int(xml, F_LRMD_IPC_MSG_ID, &msg_id);
- crm_info("Sending response to %d - %s", ipc_client->request_id, ipc_client->id);
+ crm_trace("Sending response to %d - %s", ipc_client->request_id, ipc_client->id);
rc = crm_ipcs_send(ipc_client, msg_id, msg, FALSE);
CRM_LOG_ASSERT(msg_id == ipc_client->request_id);
diff --git a/lrmd/pacemaker_remote.service.in b/lrmd/pacemaker_remote.service.in
index 7ec42b4..15e61fb 100644
--- a/lrmd/pacemaker_remote.service.in
+++ b/lrmd/pacemaker_remote.service.in
@@ -9,7 +9,6 @@ WantedBy=multi-user.target
Type=simple
KillMode=process
NotifyAccess=none
-SysVStartPriority=99
EnvironmentFile=-/etc/sysconfig/pacemaker
ExecStart=@sbindir@/pacemaker_remoted
diff --git a/mcp/pacemaker.service.in b/mcp/pacemaker.service.in
index 2ef9454..9b0a824 100644
--- a/mcp/pacemaker.service.in
+++ b/mcp/pacemaker.service.in
@@ -20,7 +20,6 @@ WantedBy=multi-user.target
Type=simple
KillMode=process
NotifyAccess=main
-SysVStartPriority=99
EnvironmentFile=-@sysconfdir@/sysconfig/pacemaker
EnvironmentFile=-@sysconfdir@/sysconfig/sbd
SuccessExitStatus=100
diff --git a/pengine/allocate.c b/pengine/allocate.c
index ec5a18d..c2e56f9 100644
--- a/pengine/allocate.c
+++ b/pengine/allocate.c
@@ -1495,11 +1495,12 @@ stage6(pe_working_set_t * data_set)
}
}
- if (last_stonith) {
- order_actions(last_stonith, done, pe_order_implies_then);
- } else if (dc_fence) {
+ if (dc_fence) {
order_actions(dc_down, done, pe_order_implies_then);
+
+ } else if (last_stonith) {
+ order_actions(last_stonith, done, pe_order_implies_then);
}
order_actions(done, all_stopped, pe_order_implies_then);
diff --git a/pengine/test10/rec-node-14.dot b/pengine/test10/rec-node-14.dot
index 395fa89..5ceef92 100644
--- a/pengine/test10/rec-node-14.dot
+++ b/pengine/test10/rec-node-14.dot
@@ -2,9 +2,9 @@
"all_stopped" [ style=bold color="green" fontcolor="orange" ]
"stonith 'reboot' node1" -> "stonith 'reboot' node3" [ style = bold]
"stonith 'reboot' node1" [ style=bold color="green" fontcolor="black"]
+"stonith 'reboot' node2" -> "stonith_complete" [ style = bold]
"stonith 'reboot' node2" [ style=bold color="green" fontcolor="black"]
"stonith 'reboot' node3" -> "stonith 'reboot' node2" [ style = bold]
-"stonith 'reboot' node3" -> "stonith_complete" [ style = bold]
"stonith 'reboot' node3" [ style=bold color="green" fontcolor="black"]
"stonith_complete" -> "all_stopped" [ style = bold]
"stonith_complete" [ style=bold color="green" fontcolor="orange" ]
diff --git a/pengine/test10/rec-node-14.exp b/pengine/test10/rec-node-14.exp
index 58bb5ca..0e5e163 100644
--- a/pengine/test10/rec-node-14.exp
+++ b/pengine/test10/rec-node-14.exp
@@ -39,7 +39,7 @@
</action_set>
<inputs>
<trigger>
- <crm_event id="5" operation="stonith" operation_key="stonith-node3-reboot" on_node="node3" on_node_uuid="uuid3"/>
+ <crm_event id="4" operation="stonith" operation_key="stonith-node2-reboot" on_node="node2" on_node_uuid="uuid2"/>
</trigger>
</inputs>
</synapse>
diff --git a/pengine/test10/stonith-0.dot b/pengine/test10/stonith-0.dot
index 29cdd59..8ad32fd 100644
--- a/pengine/test10/stonith-0.dot
+++ b/pengine/test10/stonith-0.dot
@@ -71,13 +71,13 @@ digraph "g" {
"stonith 'reboot' c001n03" -> "ocf_192.168.100.181_stop_0 c001n03" [ style = bold]
"stonith 'reboot' c001n03" -> "ocf_192.168.100.183_stop_0 c001n03" [ style = bold]
"stonith 'reboot' c001n03" -> "rsc_c001n07_stop_0 c001n03" [ style = bold]
+"stonith 'reboot' c001n03" -> "stonith_complete" [ style = bold]
"stonith 'reboot' c001n03" [ style=bold color="green" fontcolor="black"]
"stonith 'reboot' c001n05" -> "group-1_stop_0" [ style = bold]
"stonith 'reboot' c001n05" -> "ocf_192.168.100.181_stop_0 c001n05" [ style = bold]
"stonith 'reboot' c001n05" -> "ocf_192.168.100.183_stop_0 c001n05" [ style = bold]
"stonith 'reboot' c001n05" -> "rsc_c001n05_stop_0 c001n05" [ style = bold]
"stonith 'reboot' c001n05" -> "stonith 'reboot' c001n03" [ style = bold]
-"stonith 'reboot' c001n05" -> "stonith_complete" [ style = bold]
"stonith 'reboot' c001n05" [ style=bold color="green" fontcolor="black"]
"stonith_complete" -> "all_stopped" [ style = bold]
"stonith_complete" -> "heartbeat_192.168.100.182_start_0 c001n02" [ style = bold]
diff --git a/pengine/test10/stonith-0.exp b/pengine/test10/stonith-0.exp
index 9d47215..a6695c9 100644
--- a/pengine/test10/stonith-0.exp
+++ b/pengine/test10/stonith-0.exp
@@ -394,7 +394,7 @@
</action_set>
<inputs>
<trigger>
- <crm_event id="111" operation="stonith" operation_key="stonith-c001n05-reboot" on_node="c001n05" on_node_uuid="52a5ea5e-86ee-442c-b251-0bc9825c517e"/>
+ <crm_event id="110" operation="stonith" operation_key="stonith-c001n03-reboot" on_node="c001n03" on_node_uuid="f5e1d2de-73da-432a-9d5c-37472253c2ee"/>
</trigger>
</inputs>
</synapse>
diff --git a/pengine/test10/systemhealth1.dot b/pengine/test10/systemhealth1.dot
index 28841b7..a29f519 100644
--- a/pengine/test10/systemhealth1.dot
+++ b/pengine/test10/systemhealth1.dot
@@ -1,8 +1,8 @@
digraph "g" {
"all_stopped" [ style=bold color="green" fontcolor="orange" ]
+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"]
"stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold]
-"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"]
"stonith_complete" -> "all_stopped" [ style = bold]
"stonith_complete" [ style=bold color="green" fontcolor="orange" ]
diff --git a/pengine/test10/systemhealth1.exp b/pengine/test10/systemhealth1.exp
index 80a2329..aa2afe1 100644
--- a/pengine/test10/systemhealth1.exp
+++ b/pengine/test10/systemhealth1.exp
@@ -27,7 +27,7 @@
</action_set>
<inputs>
<trigger>
- <crm_event id="4" operation="stonith" operation_key="stonith-hs21d-reboot" on_node="hs21d" on_node_uuid="737318c6-0f92-4592-9754-45967d45aff7"/>
+ <crm_event id="3" operation="stonith" operation_key="stonith-hs21c-reboot" on_node="hs21c" on_node_uuid="c97a3ee5-02d8-4fad-a9fb-a79ae2b35549"/>
</trigger>
</inputs>
</synapse>
diff --git a/pengine/test10/systemhealthm1.dot b/pengine/test10/systemhealthm1.dot
index 28841b7..a29f519 100644
--- a/pengine/test10/systemhealthm1.dot
+++ b/pengine/test10/systemhealthm1.dot
@@ -1,8 +1,8 @@
digraph "g" {
"all_stopped" [ style=bold color="green" fontcolor="orange" ]
+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"]
"stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold]
-"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"]
"stonith_complete" -> "all_stopped" [ style = bold]
"stonith_complete" [ style=bold color="green" fontcolor="orange" ]
diff --git a/pengine/test10/systemhealthm1.exp b/pengine/test10/systemhealthm1.exp
index 80a2329..aa2afe1 100644
--- a/pengine/test10/systemhealthm1.exp
+++ b/pengine/test10/systemhealthm1.exp
@@ -27,7 +27,7 @@
</action_set>
<inputs>
<trigger>
- <crm_event id="4" operation="stonith" operation_key="stonith-hs21d-reboot" on_node="hs21d" on_node_uuid="737318c6-0f92-4592-9754-45967d45aff7"/>
+ <crm_event id="3" operation="stonith" operation_key="stonith-hs21c-reboot" on_node="hs21c" on_node_uuid="c97a3ee5-02d8-4fad-a9fb-a79ae2b35549"/>
</trigger>
</inputs>
</synapse>
diff --git a/pengine/test10/systemhealthn1.dot b/pengine/test10/systemhealthn1.dot
index 28841b7..a29f519 100644
--- a/pengine/test10/systemhealthn1.dot
+++ b/pengine/test10/systemhealthn1.dot
@@ -1,8 +1,8 @@
digraph "g" {
"all_stopped" [ style=bold color="green" fontcolor="orange" ]
+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"]
"stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold]
-"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"]
"stonith_complete" -> "all_stopped" [ style = bold]
"stonith_complete" [ style=bold color="green" fontcolor="orange" ]
diff --git a/pengine/test10/systemhealthn1.exp b/pengine/test10/systemhealthn1.exp
index 80a2329..aa2afe1 100644
--- a/pengine/test10/systemhealthn1.exp
+++ b/pengine/test10/systemhealthn1.exp
@@ -27,7 +27,7 @@
</action_set>
<inputs>
<trigger>
- <crm_event id="4" operation="stonith" operation_key="stonith-hs21d-reboot" on_node="hs21d" on_node_uuid="737318c6-0f92-4592-9754-45967d45aff7"/>
+ <crm_event id="3" operation="stonith" operation_key="stonith-hs21c-reboot" on_node="hs21c" on_node_uuid="c97a3ee5-02d8-4fad-a9fb-a79ae2b35549"/>
</trigger>
</inputs>
</synapse>
diff --git a/pengine/test10/systemhealtho1.dot b/pengine/test10/systemhealtho1.dot
index 28841b7..a29f519 100644
--- a/pengine/test10/systemhealtho1.dot
+++ b/pengine/test10/systemhealtho1.dot
@@ -1,8 +1,8 @@
digraph "g" {
"all_stopped" [ style=bold color="green" fontcolor="orange" ]
+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"]
"stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold]
-"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"]
"stonith_complete" -> "all_stopped" [ style = bold]
"stonith_complete" [ style=bold color="green" fontcolor="orange" ]
diff --git a/pengine/test10/systemhealtho1.exp b/pengine/test10/systemhealtho1.exp
index 80a2329..aa2afe1 100644
--- a/pengine/test10/systemhealtho1.exp
+++ b/pengine/test10/systemhealtho1.exp
@@ -27,7 +27,7 @@
</action_set>
<inputs>
<trigger>
- <crm_event id="4" operation="stonith" operation_key="stonith-hs21d-reboot" on_node="hs21d" on_node_uuid="737318c6-0f92-4592-9754-45967d45aff7"/>
+ <crm_event id="3" operation="stonith" operation_key="stonith-hs21c-reboot" on_node="hs21c" on_node_uuid="c97a3ee5-02d8-4fad-a9fb-a79ae2b35549"/>
</trigger>
</inputs>
</synapse>
diff --git a/pengine/test10/systemhealthp1.dot b/pengine/test10/systemhealthp1.dot
index 28841b7..a29f519 100644
--- a/pengine/test10/systemhealthp1.dot
+++ b/pengine/test10/systemhealthp1.dot
@@ -1,8 +1,8 @@
digraph "g" {
"all_stopped" [ style=bold color="green" fontcolor="orange" ]
+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"]
"stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold]
-"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold]
"stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"]
"stonith_complete" -> "all_stopped" [ style = bold]
"stonith_complete" [ style=bold color="green" fontcolor="orange" ]
diff --git a/pengine/test10/systemhealthp1.exp b/pengine/test10/systemhealthp1.exp
index 80a2329..aa2afe1 100644
--- a/pengine/test10/systemhealthp1.exp
+++ b/pengine/test10/systemhealthp1.exp
@@ -27,7 +27,7 @@
</action_set>
<inputs>
<trigger>
- <crm_event id="4" operation="stonith" operation_key="stonith-hs21d-reboot" on_node="hs21d" on_node_uuid="737318c6-0f92-4592-9754-45967d45aff7"/>
+ <crm_event id="3" operation="stonith" operation_key="stonith-hs21c-reboot" on_node="hs21c" on_node_uuid="c97a3ee5-02d8-4fad-a9fb-a79ae2b35549"/>
</trigger>
</inputs>
</synapse>
diff --git a/tools/1node2heartbeat b/tools/1node2heartbeat
deleted file mode 100755
index b63a0c8..0000000
--- a/tools/1node2heartbeat
+++ /dev/null
@@ -1,326 +0,0 @@
-#!/usr/bin/python
-#
-# Program to determine current list of enabled services for init state 3
-# and create heartbeat CRM configuration for heartbeat to manage them
-#
-__copyright__='''
-Author: Alan Robertson <alanr@unix.sh>
-Copyright (C) 2006 International Business Machines
-'''
-
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-import os,re
-#
-# Here's the plan:
-# Find out the default run level
-# Find out what (additional?) services are enabled in that run level
-# Figure out which of them start after the network (or heartbeat?)
-# Ignore heartbeat :-)
-# Figure out which services supply the $services
-# Look to see if the SUSE /etc/insserv.conf file exists
-# If so, then scan it for who provides the $services
-# defined by the LSB
-# If we're on Red Hat, then make some Red Hat type assumptions
-# (whatever those might be)
-# If we're not, then make some generic assumptions...
-# Scan the init scripts for their dependencies...
-# Eliminate anything at or before 'network'.
-# Create resources corresponding to all active services
-# Include monitor actions for those services
-# that can be started after 'network'
-# Add the start-after dependencies
-#
-# Things to consider doing in the future:
-# Constrain them to only run on the local system?
-# Put them all in a convenience group (no colocation, no ordering)
-# Add start and stop timeouts
-
-ServiceKeywords = {}
-ServiceMap = {}
-ProvidesMap = {}
-RequiresMap = {}
-SkipMap = {'heartbeat': None, 'random': None}
-NoMonitor = {'microcode': None}
-PreReqs = ['network']
-IgnoreList = []
-sysname = os.uname()[1]
-InitDir = "/etc/init.d"
-
-def service_is_hb_compatible(service):
- scriptname = os.path.join(InitDir, service)
- command=scriptname + " status >/dev/null 2>&1";
- rc = os.system(command)
- return rc == 0
-
-def find_ordered_services(dir):
- allscripts = os.listdir(dir)
- allscripts.sort()
- services = []
- for entry in allscripts:
- matchobj = re.match("S[0-9]+(.*)", entry)
- if not matchobj:
- continue
- service = matchobj.group(1)
- if SkipMap.has_key(service):
- continue
- if service_is_hb_compatible(service):
- services.append(service)
- else:
- IgnoreList.append(service)
- return services
-
-
-def register_services(initdir, services):
- for service in services:
- if not ServiceMap.has_key(service):
- ServiceMap[service] = os.path.join(initdir, service)
- for service in services:
- script_dependency_scan(service, os.path.join(initdir, service), ServiceMap)
-
-#
-# From the LSB version 3.1: "Comment Conventions for Init Scripts"
-#
-### BEGIN INIT INFO
-### END INIT INFO
-#
-# The delimiter lines may contain trailing whitespace, which shall be ignored.
-# All lines inside the block shall begin with a hash character '#' in the
-# first column, so the shell interprets them as comment lines which do not
-# affect operation of the script. The lines shall be of the form:
-# {keyword}: arg1 [arg2...]
-# with exactly one space character between the '#' and the keyword, with a
-# single exception. In lines following a line containing the Description
-# keyword, and until the next keyword or block ending delimiter is seen,
-# a line where the '#' is followed by more than one space or a tab
-# character shall be treated as a continuation of the previous line.
-#
-
-# Make this a class to avoid recompiling it for each script we scan.
-class pats:
- begin=re.compile("###\s+BEGIN\s+INIT\s+INFO")
- end=re.compile("###\s+END\s+INIT\s+INFO")
- desc=re.compile("# Description:\s*(.*)", re.IGNORECASE)
- desc_continue=re.compile("#( +|\t)\s*(.*)")
- keyword=re.compile("# ([^\s:]+):\s*(.*)\s*\Z")
-
-def script_keyword_scan(filename, servicename):
- keywords = {}
- ST_START=0
- ST_INITINFO=1
- ST_DESCRIPTION=1
- description=""
- state=ST_START
-
- try:
- fd = open(filename)
- except IOError:
- return keywords
-
- while 1:
- line = fd.readline()
- if not line:
- break
-
- if state == ST_START:
- if pats.begin.match(line):
- state = ST_INITINFO
- continue
- if pats.end.match(line):
- break
-
- if state == ST_DESCRIPTION:
- match = pats.desc_continue.match(line)
- if match:
- description += ("\n" + match.group(2))
- continue
- state = ST_INITINFO
-
- match = pats.desc.match(line)
- if match:
- state = ST_DESCRIPTION
- description = match.group(1)
- continue
-
- match = pats.keyword.match(line)
- if match:
- keywords[match.group(1)] = match.group(2)
-
- # Clean up and return
- fd.close()
- if description != "":
- keywords["Description"] = description
- keywords["_PATHNAME_"] = filename
- keywords["_RESOURCENAME_"] = "R_" + sysname + "_" + servicename
- return keywords
-
-def script_dependency_scan(service, script, servicemap):
- keywords=script_keyword_scan(script, service)
- ServiceKeywords[service] = keywords
-
-SysServiceGuesses = {
- '$local_fs': ['boot.localfs'],
- '$network': ['network'],
- '$named': ['named'],
- '$portmap': ['portmap'],
- '$remote_fs': ['nfs'],
- '$syslog': ['syslog'],
- '$netdaemons': ['portmap', 'inetd'],
- '$time': ['ntp'],
-}
-
-#
-# For specific versions of Linux, there are often better ways
-# to do this...
-#
-# (e.g., for SUSE Linux, one should look at /etc/insserv.conf file)
-#
-def map_sys_services(servicemap):
- sysservicemap = {}
- for sysserv in SysServiceGuesses.keys():
- servlist = SysServiceGuesses[sysserv]
- result = []
- for service in servlist:
- if servicemap.has_key(service):
- result.append(service)
-
- sysservicemap[sysserv] = result
- return sysservicemap
-
-#
-#
-#
-def create_service_dependencies(servicekeywords, systemservicemap):
- dependencies = {}
- for service in servicekeywords.keys():
- if not dependencies.has_key(service):
- dependencies[service] = {}
- for key in ('Required-Start', 'Should-Start'):
- if not servicekeywords[service].has_key(key):
- continue
- for depserv in servicekeywords[service][key].split():
- if systemservicemap.has_key(depserv):
- sysserv = systemservicemap[depserv]
- for serv in sysserv:
- dependencies[service][serv] = None
- else:
- if servicekeywords.has_key(depserv):
- dependencies[service][depserv] = None
- if len(dependencies[service]) == 0:
- del dependencies[service]
- return dependencies
-
-#
-# Modify the service name map to include all the mappings from
-# 'Provides' services to real service script names...
-#
-def map_script_services(sysservmap, servicekeywords):
- for service in servicekeywords.keys():
- if not servicekeywords[service].has_key('Provides'):
- continue
- for provided in servicekeywords[service]['Provides'].split():
- if not sysservmap.has_key(provided):
- sysservmap[provided] = []
- sysservmap[provided].append(service)
- return sysservmap
-
-def create_cib_update(keywords, depmap):
- services = keywords.keys()
- services.sort()
- result = ""
- # Create the XML for the resources
- result += '<cib>\n'
- result += '<configuration>\n'
- result += '<crm_config/>\n'
- result += '<nodes/>\n'
- result += '<resources>\n'
- groupname="G_" + sysname + "_localinit"
- result += ' <group id="'+groupname+'" ordered="0" collocated="0">\n'
- for service in services:
- rid = keywords[service]["_RESOURCENAME_"]
- monid = "OPmon_" + sysname + '_' + service
- result += \
- ' <primitive id="' + rid + '" class="lsb" type="'+ service + \
- '">\n' + \
- ' <instance_attributes/>\n' + \
- ' <operations>\n'
- if not NoMonitor.has_key(service):
- result += \
- ' <op id="' + monid + '" name="monitor" interval="30s" timeout="30s"/>\n'
- result += \
- ' </operations>\n' \
- ' </primitive>\n'
- result += ' </group>\n'
- result += '</resources>\n'
- services = depmap.keys()
- services.sort()
- result += '<constraints>\n'
- for service in services:
- rid = keywords[service]["_RESOURCENAME_"]
- deps = depmap[service].keys()
- deps.sort()
- for dep in deps:
- if not keywords.has_key(dep):
- continue
- depid = keywords[dep]["_RESOURCENAME_"]
- orderid='O_' + sysname + '_' + service + '_' + dep
- result += ' <rsc_order id="' + orderid + '" from="' + rid + \
- '" to="' + depid + '" type="after"/>\n'
- loc_id="Loc_" + sysname + "_localinit"
- rule_id="LocRule_" + sysname + "_localinit"
- expr_id="LocExp_" + sysname + "_localinit"
-
- result += ' <rsc_location id="' + loc_id + '" rsc="' + groupname + '">\n'
- result += ' <rule id="' + rule_id + '" score="-INFINITY">\n'
- result += ' <expression attribute="#uname" id="' + expr_id + \
- '" operation="ne" value="' + sysname + '"/>\n'
- result += ' </rule>\n'
- result += ' </rsc_location>\n'
- result += '</constraints>\n'
- result += '</configuration>\n'
- result += '<status/>\n'
- result += '</cib>\n'
- return result
-
-
-
-def remove_a_prereq(service, servicemap, keywords, deps):
- if deps.has_key(service):
- parents = deps[service].keys()
- del deps[service]
- else:
- parents = []
- if servicemap.has_key(service):
- del servicemap[service]
- if keywords.has_key(service):
- del keywords[service]
- for parent in parents:
- if not deps.has_key(parent):
- continue
- remove_a_prereq(parent, servicemap, keywords, deps)
-
-
-def remove_important_prereqs(prereqs, servicemap, keywords, deps):
- # Find everything these important prereqs need and get rid of them...
- for service in prereqs:
- remove_a_prereq(service, servicemap, keywords, deps)
-
-ServiceList = find_ordered_services(os.path.join(InitDir, "rc3.d"))
-register_services(InitDir, ServiceList)
-SysServiceMap = map_sys_services(ServiceMap)
-map_script_services(SysServiceMap, ServiceKeywords)
-ServiceDependencies = create_service_dependencies(ServiceKeywords,SysServiceMap)
-remove_important_prereqs(PreReqs, SysServiceMap, ServiceKeywords, ServiceDependencies)
-
-print create_cib_update(ServiceKeywords, ServiceDependencies)
diff --git a/tools/crm_commands.py.in b/tools/crm_commands.py.in
deleted file mode 100644
index c48d82c..0000000
--- a/tools/crm_commands.py.in
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# pingd OCF Resource Agent
-# Records (in the CIB) the current number of ping nodes a
-# cluster node can connect to.
-#
-# Copyright (c) 2006 Andrew Beekhof
-# All Rights Reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of version 2 of the GNU General Public License as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it would be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-#
-# Further, this software is distributed without any warranty that it is
-# free of the rightful claim of any third person regarding infringement
-# or the like. Any license provided herein, whether implied or
-# otherwise, applies only to this software file. Patent licenses, if
-# any, provided herein do not apply to combinations of this program with
-# other software, or any other product whatsoever.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write the Free Software Foundation,
-# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-#######################################################################
-
-import crm_utils as utl
-
-class HelpRequest(Exception):
- """Exception raised when a help listing is required."""
-
-class ReparseRequest(Exception):
- """Exception raised when a command changed the command-line."""
-
-def up(*args, **cmdoptions):
- l = len(utl.topic_stack)
- if l > 1:
- utl.topic_stack.pop()
- utl.set_topic(utl.topic_stack[-1])
- else:
- utl.log_debug("Already at the top of the stack")
-
-def toggle_flag(*args, **cmdoptions):
- flag = cmdoptions["flag"]
- if utl.global_opts[flag]:
- utl.global_opts[flag] = 0
- else:
- utl.global_opts[flag] = 1
-
- return utl.global_opts[flag]
-
-def cd_(*args, **cmdoptions):
- utl.log_dev("args: %s\nopts: %s" % (repr(args), repr(cmdoptions)))
- if not cmdoptions["topic"]:
- utl.log_err("No topic specified")
- return 1
-
- if cmdoptions["topic"]:
- utl.set_topic(cmdoptions["topic"])
- if args:
- raise ReparseRequest()
- if utl.crm_topic not in utl.topic_stack:
- utl.topic_stack.append(cmdoptions["topic"])
- if not utl.global_opts["interactive"]:
- help(cmdoptions["topic"])
- return 0
-
-def exit(*args, **cmdoptions):
- sys.exit(0)
-
-def help(*args, **cmdoptions):
- if args:
- raise HelpRequest(args[0])
- raise HelpRequest(utl.crm_topic)
-
-def debugstate(*args, **cmdoptions):
- utl.log_info("Global Options: ")
- for opt in utl.global_opts.keys():
- utl.log_info(" * %s:\t%s" % (opt, utl.global_opts[opt]))
- utl.log_info("Stack: "+repr(utl.topic_stack))
- utl.log_info("Stack Head: "+utl.crm_topic)
- return 0
-
-def do_list(*args, **cmdoptions):
- topic = utl.crm_topic
- if cmdoptions.has_key("topic") and cmdoptions["topic"]:
- topic = cmdoptions["topic"]
-
- utl.log_debug("Complete '%s' listing" % topic)
- if topic == "resources":
- utl.os_system("crm_resource -l", True)
- elif topic == "nodes":
- lines = utl.os_system("cibadmin -Q -o nodes", False)
- for line in lines:
- if line.find("node ") >= 0:
- print line.rstrip()
- else:
- utl.log_err("%s: Topic %s is not (yet) supported" % ("list", topic))
- return 1
- return 0
-
-def do_status(*args, **cmdoptions):
- topic = utl.crm_topic
- if cmdoptions.has_key("topic") and cmdoptions["topic"]:
- topic = cmdoptions["topic"]
-
- if topic == "resources":
- if not args:
- utl.os_system("crm_resource -L", True)
- for rsc in args:
- utl.os_system("crm_resource -W -r %s"%rsc, True)
-
- elif topic == "nodes":
- lines = utl.os_system("cibadmin -Q -o status", False)
- for line in lines:
- line = line.rstrip()
- utl.log_dev("status line: "+line)
- if line.find("node_state ") >= 0:
- if not args:
- print line
- for node in args:
- if line.find(node) >= 0:
- print line
- else:
- utl.log_err("Topic %s is not (yet) supported" % topic)
- return 1
-
- return 0
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 0b71275..46a59d6 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -2715,6 +2715,7 @@ print_status(pe_working_set_t * data_set)
} else {
online_nodes = add_list_element(online_nodes, node_name);
}
+ free(node_name);
continue;
}
} else {
@@ -2727,6 +2728,7 @@ print_status(pe_working_set_t * data_set)
} else {
offline_nodes = add_list_element(offline_nodes, node_name);
}
+ free(node_name);
continue;
}
}
@@ -3078,6 +3080,7 @@ print_html_status(pe_working_set_t * data_set, const char *filename)
fprintf(stream, "</ul>\n");
}
fprintf(stream, "</li>\n");
+ free(node_name);
}
fprintf(stream, "</ul>\n");
diff --git a/tools/crm_node.c b/tools/crm_node.c
index c484e17..d0195e3 100644
--- a/tools/crm_node.c
+++ b/tools/crm_node.c
@@ -470,6 +470,7 @@ try_cman(int command, enum cluster_type_e stack)
case 'l':
case 'p':
+ memset(cman_nodes, 0, MAX_NODES * sizeof(cman_node_t));
rc = cman_get_nodes(cman_handle, MAX_NODES, &node_count, cman_nodes);
if (rc != 0) {
fprintf(stderr, "Couldn't query cman node list: %d %d", rc, errno);
@@ -489,6 +490,7 @@ try_cman(int command, enum cluster_type_e stack)
break;
case 'i':
+ memset(&node, 0, sizeof(cman_node_t));
rc = cman_get_node(cman_handle, CMAN_NODEID_US, &node);
if (rc != 0) {
fprintf(stderr, "Couldn't query cman node id: %d %d", rc, errno);
diff --git a/tools/crm_primitive.py.in b/tools/crm_primitive.py.in
deleted file mode 100644
index cfe0b5c..0000000
--- a/tools/crm_primitive.py.in
+++ /dev/null
@@ -1,268 +0,0 @@
-#!@PYTHON@
-
-'''Create an XML fragment describing a new resource
-'''
-
-__copyright__='''
-Author: Andrew Beekhof <andrew@beekhof.net>
-Copyright (C) 2005 Andrew Beekhof
-'''
-
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import sys,string,os
-import xml.dom.minidom
-
-print_rsc_only = 0
-rsc_name = None
-rsc_class = None
-rsc_type = None
-rsc_provider = None
-start_timeout = None
-stop_timeout = None
-monitor_interval = None
-monitor_timeout = None
-rsc_options = []
-rsc_location = []
-rsc_colocation = []
-
-def create_cib() :
- doc = xml.dom.minidom.Document()
- cib = doc.createElement("cib")
- doc.appendChild(cib)
-
- configuration = doc.createElement("configuration")
- cib.appendChild(configuration)
-
- #crm_config = doc.createElement("crm_config")
- #configuration.appendChild(crm_config)
-
- resources = doc.createElement("resources")
- configuration.appendChild(resources)
- constraints = doc.createElement("constraints")
- configuration.appendChild(constraints)
-
- return doc, resources, constraints
-
-def cib_resource(doc, id, ra_class, type, provider):
-
- params = None
-
- resource = doc.createElement("primitive")
-
- resource.setAttribute("id", id)
- resource.setAttribute("type", type)
- resource.setAttribute("class", ra_class)
-
- if ra_class == "ocf":
- if not provider:
- provider = "heartbeat"
- resource.setAttribute("provider", provider)
-
- elif ra_class != "lsb" and ra_class != "heartbeat":
- print "Unknown resource class: "+ ra_class
- return None
-
- operations = doc.createElement("operations")
- resource.appendChild(operations)
-
- if monitor_interval != None:
- op = doc.createElement("op")
- operations.appendChild(op)
- op.setAttribute("id", id + "_mon_" + monitor_interval)
- op.setAttribute("name", "monitor")
- op.setAttribute("interval", monitor_interval)
- if monitor_timeout != None:
- op.setAttribute("timeout", monitor_timeout)
-
- if start_timeout != None:
- op = doc.createElement("op")
- operations.appendChild(op)
- op.setAttribute("id", id + "_start")
- op.setAttribute("name", "start")
- op.setAttribute("timeout", start_timeout)
-
- if stop_timeout != None:
- op = doc.createElement("op")
- operations.appendChild(op)
- op.setAttribute("id", id + "_stop")
- op.setAttribute("name", "stop")
- op.setAttribute("timeout", stop_timeout)
-
- instance_attributes = doc.createElement("instance_attributes")
- instance_attributes.setAttribute("id", id)
- resource.appendChild(instance_attributes)
- attributes = doc.createElement("attributes")
- instance_attributes.appendChild(attributes)
- for i in range(0,len(rsc_options)) :
- if rsc_options[i] == None :
- continue
-
- param = string.split(rsc_options[i], "=")
- nvpair = doc.createElement("nvpair")
- nvpair.setAttribute("id", id + "_" + param[0])
- nvpair.setAttribute("name", param[0])
- nvpair.setAttribute("value", param[1])
- attributes.appendChild(nvpair)
-
- return resource
-
-def cib_rsc_location(doc, id, node, score):
- rule = doc.createElement("rule")
- rule.setAttribute("id", id+"_prefer_"+node+"_rule")
- rule.setAttribute("score", score)
- expression = doc.createElement("expression")
- expression.setAttribute("id",id+"_prefer_"+node+"_expr")
- expression.setAttribute("attribute","#uname")
- expression.setAttribute("operation","eq")
- expression.setAttribute("value", node)
- rule.appendChild(expression)
- return rule
-
-def cib_rsc_colocation(doc, id, other_resource, score):
- rsc_colocation = doc.createElement("rsc_colocation")
- rsc_colocation.setAttribute("id", id+"_colocate_with_"+other_resource)
- rsc_colocation.setAttribute("from", id)
- rsc_colocation.setAttribute("to", other_resource)
- rsc_colocation.setAttribute("score", score)
- return rsc_colocation
-
-def print_usage():
- print "usage: " \
- + sys.argv[0] \
- + " --name <string>"\
- + " --class <string>"\
- + " --type <string>"\
- + " [--provider <string>]"\
- + "\n\t"\
- + " [--start-timeout <interval>]"\
- + " [--stop-timeout <interval>]"\
- + " [--monitor <interval>]"\
- + " [--monitor-timeout <interval>]"\
- + "\n\t"\
- + " [--rsc-option name=value]*"\
- + " [--rsc-location uname=score]*"\
- + " [--rsc-colocation resource=score]*"
- print "Example:\n\t" + sys.argv[0] \
- + " --name cluster_ip_1 --type IPaddr --provider heartbeat --class ocf "\
- + "--rsc-option ip=192.168.1.101 --rsc-location node1=500 | cibadmin -C -p"
- sys.exit(1)
-
-if __name__=="__main__" :
-
- # Process arguments...
- skipthis = None
- args = sys.argv[1:]
- if len(args) == 0:
- print_usage()
-
- for i in range(0, len(args)) :
- if skipthis :
- skipthis = None
- continue
-
- elif args[i] == "--name" :
- skipthis = True
- rsc_name = args[i+1]
-
- elif args[i] == "--class" :
- skipthis = True
- rsc_class = args[i+1]
-
- elif args[i] == "--type" :
- skipthis = True
- rsc_type = args[i+1]
-
- elif args[i] == "--provider" :
- skipthis = True
- rsc_provider = args[i+1]
-
- elif args[i] == "--start-timeout" :
- skipthis = True
- start_timeout = args[i+1]
-
- elif args[i] == "--stop-timeout" :
- skipthis = True
- stop_timeout = args[i+1]
-
- elif args[i] == "--monitor" :
- skipthis = True
- monitor_interval = args[i+1]
-
- elif args[i] == "--monitor-timeout" :
- skipthis = True
- monitor_timeout = args[i+1]
-
- elif args[i] == "--rsc-option" :
- skipthis = True
- params = string.split(args[i+1], "=")
- if params[1] != None:
- rsc_options.append(args[i+1])
- else:
- print "option '"+args[i+1]+"' must be of the form name=value"
-
- elif args[i] == "--rsc-location" :
- skipthis = True
- params = string.split(args[i+1], "=")
- if params[1] != None:
- rsc_location.append(args[i+1])
- else:
- print "option '"+args[i+1]+"' must be of the form host=score"
-
- elif args[i] == "--rsc-colocation" :
- skipthis = True
- params = string.split(args[i+1], "=")
- if params[1] != None:
- rsc_colocation.append(args[i+1])
- else:
- print "option '"+args[i+1]+"' must be of the form resource=score"
-
- elif args[i] == "--rsc-only" :
- print_rsc_only = 1
- else:
- print "Unknown argument: "+ args[i]
- print_usage()
-
- cib = create_cib()
- pre_line = ""
- id_index = 1
- resource = cib_resource(cib[0], rsc_name, rsc_class, rsc_type, rsc_provider)
-
- if print_rsc_only:
- print resource.toprettyxml()
- sys.exit(0)
-
- cib[1].appendChild(resource)
-
- if rsc_location != None :
- rsc_loc = cib[0].createElement("rsc_location")
- rsc_loc.setAttribute("id", rsc_name+"_preferences")
- rsc_loc.setAttribute("rsc", rsc_name)
- for i in range(0, len(rsc_location)) :
- param = string.split(rsc_location[i], "=")
- location_rule = cib_rsc_location(cib[0], rsc_name, param[0], param[1])
- rsc_loc.appendChild(location_rule)
- cib[2].appendChild(rsc_loc)
-
- for i in range(0, len(rsc_colocation)) :
- if rsc_location[i] == None :
- continue
-
- param = string.split(rsc_colocation[i], "=")
- colocation_rule = cib_rsc_colocation(cib[0], rsc_name, param[0], param[1])
- cib[2].appendChild(colocation_rule)
-
- print cib[0].toprettyxml()
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 31136ef..2fce3b7 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -853,6 +853,7 @@ main(int argc, char **argv)
rc = -ENXIO;
goto bail;
}
+
rc = cli_resource_print_attribute(rsc_id, prop_name, &data_set);
} else if (rsc_cmd == 'p') {
@@ -883,6 +884,10 @@ main(int argc, char **argv)
} else if (rsc_cmd == 'C' && rsc_id) {
resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
+ if(do_force == FALSE) {
+ rsc = uber_parent(rsc);
+ }
+
crm_debug("Re-checking the state of %s on %s", rsc_id, host_uname);
if(rsc) {
crmd_replies_needed = 0;
@@ -891,6 +896,11 @@ main(int argc, char **argv)
rc = -ENODEV;
}
+ if(rc == pcmk_ok && BE_QUIET == FALSE) {
+ /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */
+ cli_resource_check(cib_conn, rsc);
+ }
+
if (rc == pcmk_ok) {
start_mainloop();
}
diff --git a/tools/crm_resource.h b/tools/crm_resource.h
index 49b6138..5a206e0 100644
--- a/tools/crm_resource.h
+++ b/tools/crm_resource.h
@@ -68,6 +68,7 @@ int cli_resource_print_property(const char *rsc, const char *attr, pe_working_se
int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pe_working_set_t * data_set);
/* runtime */
+void cli_resource_check(cib_t * cib, resource_t *rsc);
int cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t * data_set);
int cli_resource_search(const char *rsc, pe_working_set_t * data_set);
int cli_resource_delete(cib_t *cib_conn, crm_ipc_t * crmd_channel, const char *host_uname, resource_t * rsc, pe_working_set_t * data_set);
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
index 9c3711c..946b9e3 100644
--- a/tools/crm_resource_print.c
+++ b/tools/crm_resource_print.c
@@ -352,8 +352,11 @@ cli_resource_print_attribute(const char *rsc, const char *attr, pe_working_set_t
if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) {
get_rsc_attributes(params, the_rsc, current, data_set);
+
} else if (safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
+ /* No need to redirect to the parent */
get_meta_attributes(params, the_rsc, current, data_set);
+
} else {
unpack_instance_attributes(data_set->input, the_rsc->xml, XML_TAG_UTILIZATION, NULL,
params, NULL, FALSE, data_set->now);
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 006ec08..a270cbf 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -198,6 +198,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch
int rc = pcmk_ok;
static bool need_init = TRUE;
+ char *lookup_id = NULL;
char *local_attr_id = NULL;
char *local_attr_set = NULL;
@@ -212,14 +213,39 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch
}
if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) {
- rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, XML_TAG_META_SETS, attr_set, attr_id,
+ rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, XML_TAG_META_SETS, attr_set, attr_id,
attr_name, &local_attr_id);
- if (rc == pcmk_ok) {
- printf("WARNING: There is already a meta attribute called %s (id=%s)\n", attr_name,
- local_attr_id);
+ if(rc == pcmk_ok && do_force == FALSE) {
+ if (BE_QUIET == FALSE) {
+ printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n",
+ uber_parent(rsc)->id, attr_name, local_attr_id);
+ printf(" Delete '%s' first or use --force to override\n", local_attr_id);
+ }
+ return -ENOTUNIQ;
+ }
+
+ } else if(rsc->parent) {
+
+ switch(rsc->parent->variant) {
+ case pe_group:
+ if (BE_QUIET == FALSE) {
+ printf("Updating '%s' for '%s' will not apply to its peers in '%s'\n", attr_name, rsc_id, rsc->parent->id);
+ }
+ break;
+ case pe_master:
+ case pe_clone:
+ rsc = rsc->parent;
+ if (BE_QUIET == FALSE) {
+ printf("Updating '%s' for '%s'...\n", rsc->id, rsc_id);
+ }
+ break;
+ default:
+ break;
}
}
- rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name,
+
+ lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
+ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
&local_attr_id);
if (rc == pcmk_ok) {
@@ -227,6 +253,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch
attr_id = local_attr_id;
} else if (rc != -ENXIO) {
+ free(lookup_id);
free(local_attr_id);
return rc;
@@ -250,7 +277,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch
free_xml(cib_top);
if (attr_set == NULL) {
- local_attr_set = crm_concat(rsc_id, attr_set_type, '-');
+ local_attr_set = crm_concat(lookup_id, attr_set_type, '-');
attr_set = local_attr_set;
}
if (attr_id == NULL) {
@@ -263,7 +290,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch
}
xml_top = create_xml_node(NULL, tag);
- crm_xml_add(xml_top, XML_ATTR_ID, rsc_id);
+ crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
xml_obj = create_xml_node(xml_top, attr_set_type);
crm_xml_add(xml_obj, XML_ATTR_ID, attr_set);
@@ -285,7 +312,15 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch
crm_log_xml_debug(xml_top, "Update");
rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options);
+ if (rc == pcmk_ok && BE_QUIET == FALSE) {
+ printf("Set '%s' option: id=%s%s%s%s%s=%s\n", lookup_id, local_attr_id,
+ attr_set ? " set=" : "", attr_set ? attr_set : "",
+ attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value);
+ }
+
free_xml(xml_top);
+
+ free(lookup_id);
free(local_attr_id);
free(local_attr_set);
@@ -330,6 +365,7 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch
xmlNode *xml_obj = NULL;
int rc = pcmk_ok;
+ char *lookup_id = NULL;
char *local_attr_id = NULL;
resource_t *rsc = find_rsc_or_clone(rsc_id, data_set);
@@ -337,7 +373,29 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch
return -ENXIO;
}
- rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name,
+ if(rsc->parent && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
+
+ switch(rsc->parent->variant) {
+ case pe_group:
+ if (BE_QUIET == FALSE) {
+ printf("Removing '%s' for '%s' will not apply to its peers in '%s'\n", attr_name, rsc_id, rsc->parent->id);
+ }
+ break;
+ case pe_master:
+ case pe_clone:
+ rsc = rsc->parent;
+ if (BE_QUIET == FALSE) {
+ printf("Removing '%s' from '%s' for '%s'...\n", attr_name, rsc->id, rsc_id);
+ }
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ lookup_id = clone_strip(rsc->id);
+ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
&local_attr_id);
if (rc == -ENXIO) {
@@ -360,8 +418,8 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch
CRM_ASSERT(cib);
rc = cib->cmds->delete(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options);
- if (rc == pcmk_ok) {
- printf("Deleted %s option: id=%s%s%s%s%s\n", rsc_id, local_attr_id,
+ if (rc == pcmk_ok && BE_QUIET == FALSE) {
+ printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id,
attr_set ? " set=" : "", attr_set ? attr_set : "",
attr_name ? " name=" : "", attr_name ? attr_name : "");
}
@@ -493,7 +551,10 @@ cli_resource_delete(cib_t *cib_conn, crm_ipc_t * crmd_channel, const char *host_
for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
resource_t *child = (resource_t *) lpc->data;
- cli_resource_delete(cib_conn, crmd_channel, host_uname, child, data_set);
+ rc = cli_resource_delete(cib_conn, crmd_channel, host_uname, child, data_set);
+ if(rc != pcmk_ok || is_not_set(rsc->flags, pe_rsc_unique)) {
+ return rc;
+ }
}
return pcmk_ok;
@@ -514,31 +575,78 @@ cli_resource_delete(cib_t *cib_conn, crm_ipc_t * crmd_channel, const char *host_
node = pe_find_node(data_set->nodes, host_uname);
if (node && node->details->rsc_discovery_enabled) {
- printf("Cleaning up %s on %s\n", rsc->id, host_uname);
+ printf("Cleaning up %s on %s", rsc->id, host_uname);
rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc->id, TRUE, data_set);
} else {
printf("Resource discovery disabled on %s. Unable to delete lrm state.\n", host_uname);
+ rc = -EOPNOTSUPP;
}
if (rc == pcmk_ok) {
char *attr_name = NULL;
- const char *id = rsc->id;
if(node && node->details->remote_rsc == NULL && node->details->rsc_discovery_enabled) {
crmd_replies_needed++;
}
- if (rsc->clone_name) {
- id = rsc->clone_name;
+
+ if(is_not_set(rsc->flags, pe_rsc_unique)) {
+ char *id = clone_strip(rsc->id);
+ attr_name = crm_strdup_printf("fail-count-%s", id);
+ free(id);
+
+ } else if (rsc->clone_name) {
+ attr_name = crm_strdup_printf("fail-count-%s", rsc->clone_name);
+
+ } else {
+ attr_name = crm_strdup_printf("fail-count-%s", rsc->id);
}
- attr_name = crm_concat("fail-count", id, '-');
+ printf(", removing %s\n", attr_name);
rc = attrd_update_delegate(NULL, 'D', host_uname, attr_name, NULL, XML_CIB_TAG_STATUS, NULL,
NULL, NULL, node ? is_remote_node(node) : FALSE);
free(attr_name);
+
+ } else if(rc != -EOPNOTSUPP) {
+ printf(" - FAILED\n");
}
+
return rc;
}
+void
+cli_resource_check(cib_t * cib_conn, resource_t *rsc)
+{
+
+ char *role_s = NULL;
+ char *managed = NULL;
+ resource_t *parent = uber_parent(rsc);
+
+ find_resource_attr(cib_conn, XML_ATTR_ID, parent->id,
+ XML_TAG_META_SETS, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed);
+
+ find_resource_attr(cib_conn, XML_ATTR_ID, parent->id,
+ XML_TAG_META_SETS, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s);
+
+ if(managed == NULL) {
+ managed = strdup("1");
+ }
+ if(crm_is_true(managed) == FALSE) {
+ printf("\n\t*Resource %s is configured to not be managed by the cluster\n", parent->id);
+ }
+ if(role_s) {
+ enum rsc_role_e role = text2role(role_s);
+ if(role == RSC_ROLE_UNKNOWN) {
+ // Treated as if unset
+
+ } else if(role == RSC_ROLE_STOPPED) {
+ printf("\n\t* The configuration specifies that '%s' should remain stopped\n", parent->id);
+
+ } else if(parent->variant > pe_clone && role != RSC_ROLE_MASTER) {
+ printf("\n\t* The configuration specifies that '%s' should not be promoted\n", parent->id);
+ }
+ }
+}
+
int
cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname,
const char *rsc_id, pe_working_set_t * data_set)
diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c
index 0051112..7d0a8eb 100644
--- a/tools/crm_simulate.c
+++ b/tools/crm_simulate.c
@@ -59,8 +59,11 @@ char *use_date = NULL;
static void
get_date(pe_working_set_t * data_set)
{
+ int value = 0;
time_t original_date = 0;
- crm_element_value_int(data_set->input, "execution-date", (int*)&original_date);
+
+ crm_element_value_int(data_set->input, "execution-date", &value);
+ original_date = value;
if (use_date) {
data_set->now = crm_time_new(use_date);
diff --git a/tools/crm_utils.py.in b/tools/crm_utils.py.in
deleted file mode 100644
index 67d6918..0000000
--- a/tools/crm_utils.py.in
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/bin/env python
-#
-#
-# pingd OCF Resource Agent
-# Records (in the CIB) the current number of ping nodes a
-# cluster node can connect to.
-#
-# Copyright (c) 2006 Andrew Beekhof
-# All Rights Reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of version 2 of the GNU General Public License as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it would be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-#
-# Further, this software is distributed without any warranty that it is
-# free of the rightful claim of any third person regarding infringement
-# or the like. Any license provided herein, whether implied or
-# otherwise, applies only to this software file. Patent licenses, if
-# any, provided herein do not apply to combinations of this program with
-# other software, or any other product whatsoever.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write the Free Software Foundation,
-# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-#######################################################################
-
-import os
-import sys
-import getopt
-import readline
-import traceback
-from popen2 import Popen3
-
-crm_topic = "crm"
-topic_stack = [ crm_topic ]
-hist_file = os.environ.get('HOME')+"/.crm_history"
-global_opts = {}
-
-def exit_(code=0):
- if global_opts["interactive"]:
- log_info("Exiting... ")
- try:
- readline.write_history_file(hist_file)
- log_debug("Wrote history to: "+hist_file)
- except:
- log_debug("Couldnt write history to: "+hist_file)
- sys.exit(code)
-
-def log_debug(log):
- if global_opts.has_key("debug") and global_opts["debug"]:
- print log
-
-def log_dev(log):
- if global_opts.has_key("devlog") and global_opts["devlog"]:
- print log
-
-def log_info(log):
- print log
-
-def log_err(log):
- print "ERROR: "+log
-
-def set_topic(name):
- global crm_topic
- if crm_topic != name:
- log_dev("topic: %s->%s" % (crm_topic, name))
- crm_topic = name
-
-def os_system(cmd, print_raw=False):
- log_debug("Performing command: "+cmd)
- p = Popen3(cmd, None)
- p.tochild.close()
- result = p.fromchild.readlines()
- p.fromchild.close()
- p.wait()
- if print_raw:
- for line in result:
- print line.rstrip()
- return result
-
-#
-# Creates an argv-style array (that preserves quoting) for use in shell-mode
-#
-def create_argv(text):
- args = []
- word = []
- index = 0
- total = len(text)
-
- in_word = False
- in_verbatum = False
-
- while index < total:
- finish_word = False
- append_word = False
- #log_debug("processing: "+text[index])
- if text[index] == '\\':
- index = index +1
- append_word = True
-
- elif text[index].isspace():
- if in_verbatum or in_word:
- append_word = True
- else:
- finish_word = True
-
- elif text[index] == '"':
- if in_verbatum:
- append_word = True
- else:
- finish_word = True
- if in_word:
- in_word = False
- else:
- in_word = True
-
- elif text[index] == '\'':
- finish_word = True
- if in_verbatum:
- in_verbatum = False
- else:
- in_verbatum = True
- else:
- append_word = True
-
- if finish_word:
- if word:
- args.append(''.join(word))
- word = []
- elif append_word:
- word.append(text[index])
- #log_debug("Added %s to word: %s" % (text[index], str(word)))
-
- index = index +1
-
- if in_verbatum or in_word:
- text=""
- if word:
- text=" after: '%s'"%''.join(word)
- raise QuotingError("Un-matched quoting%s"%text, args)
-
- elif word:
- args.append(''.join(word))
-
- return args
-
-def init_readline(func):
- readline.set_completer(func)
- readline.parse_and_bind("tab: complete")
- readline.set_history_length(100)
-
- try:
- readline.read_history_file(hist_file)
- except:
- pass
-
-def fancyopts(args, options, state):
- long = []
- short = ''
- map = {}
- dt = {}
-
- for s, l, d, c in options:
- pl = l.replace('-', '_')
- map['-'+s] = map['--'+l] = pl
- state[pl] = d
- dt[pl] = type(d)
- if not d is None and not callable(d):
- if s: s += ':'
- if l: l += '='
- if s: short = short + s
- if l: long.append(l)
-
- opts, args = getopt.getopt(args, short, long)
-
- for opt, arg in opts:
- if dt[map[opt]] is type(fancyopts): state[map[opt]](state,map[opt],arg)
- elif dt[map[opt]] is type(1): state[map[opt]] = int(arg)
- elif dt[map[opt]] is type(''): state[map[opt]] = arg
- elif dt[map[opt]] is type([]): state[map[opt]].append(arg)
- elif dt[map[opt]] is type(None): state[map[opt]] = 1
-
- return args
diff --git a/tools/regression.acls.exp b/tools/regression.acls.exp
index ae6735a..ac7ae0c 100644
--- a/tools/regression.acls.exp
+++ b/tools/regression.acls.exp
@@ -253,10 +253,10 @@ Error performing operation: Permission denied
=#=#=#= End test: unknownguy: Set stonith-enabled - Permission denied (13) =#=#=#=
* Passed: crm_attribute - unknownguy: Set stonith-enabled
=#=#=#= Begin test: unknownguy: Create a resource =#=#=#=
-__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
-__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
-__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
-__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
Call failed: Permission denied
=#=#=#= End test: unknownguy: Create a resource - Permission denied (13) =#=#=#=
* Passed: cibadmin - unknownguy: Create a resource
@@ -273,8 +273,8 @@ Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Set stonith-enabled - Permission denied (13) =#=#=#=
* Passed: crm_attribute - l33t-haxor: Set stonith-enabled
=#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#=
-__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent
-__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent
+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
Call failed: Permission denied
=#=#=#= End test: l33t-haxor: Create a resource - Permission denied (13) =#=#=#=
* Passed: cibadmin - l33t-haxor: Create a resource
@@ -323,13 +323,13 @@ Call failed: Permission denied
=#=#=#= End test: niceguy: Query configuration - OK (0) =#=#=#=
* Passed: cibadmin - niceguy: Query configuration
=#=#=#= Begin test: niceguy: Set enable-acl =#=#=#=
-__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default
+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default
Error performing operation: Permission denied
Error setting enable-acl=false (section=crm_config, set=<null>): Permission denied
=#=#=#= End test: niceguy: Set enable-acl - Permission denied (13) =#=#=#=
* Passed: crm_attribute - niceguy: Set enable-acl
=#=#=#= Begin test: niceguy: Set stonith-enabled =#=#=#=
-__xml_acl_post_process: Creation of nvpair=cib-bootstrap-options-stonith-enabled is allowed
+__xml_acl_post_process: Creation of nvpair=cib-bootstrap-options-stonith-enabled is allowed
=#=#=#= Current cib after: niceguy: Set stonith-enabled =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="0">
<configuration>
@@ -376,8 +376,8 @@ __xml_acl_post_process: Creation of nvpair=cib-bootstrap-options-stonith-enable
=#=#=#= End test: niceguy: Set stonith-enabled - OK (0) =#=#=#=
* Passed: crm_attribute - niceguy: Set stonith-enabled
=#=#=#= Begin test: niceguy: Create a resource =#=#=#=
-__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default
-__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default
+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
Call failed: Permission denied
=#=#=#= End test: niceguy: Create a resource - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Create a resource
@@ -533,10 +533,11 @@ Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Remove a resource meta attribute - Permission denied (13) =#=#=#=
* Passed: crm_resource - l33t-haxor: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
-error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
-error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
-error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
-__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed
+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed
+Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Stopped
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="0">
<configuration>
@@ -589,9 +590,9 @@ __xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: niceguy: Query a resource meta attribute =#=#=#=
-error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
-error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
-error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
Stopped
=#=#=#= Current cib after: niceguy: Query a resource meta attribute =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="0">
@@ -645,10 +646,10 @@ Stopped
=#=#=#= End test: niceguy: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Query a resource meta attribute
=#=#=#= Begin test: niceguy: Remove a resource meta attribute =#=#=#=
-error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
-error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
-error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
-Deleted dummy option: id=dummy-meta_attributes-target-role name=target-role
+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= Current cib after: niceguy: Remove a resource meta attribute =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="0">
<configuration>
@@ -699,10 +700,11 @@ Deleted dummy option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= End test: niceguy: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
-error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
-error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
-error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
-__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed
+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed
+Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Started
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
<cib epoch="12" num_updates="0" admin_epoch="0">
<configuration>
@@ -804,8 +806,8 @@ __xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - remove acls =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/acls: default
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/acls: default
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - remove acls - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - remove acls
@@ -859,9 +861,9 @@ Call failed: Permission denied
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - create resource =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default
-__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2']
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default
+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2']
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create resource - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create resource
@@ -914,8 +916,8 @@ Call failed: Permission denied
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - modify attribute (deny) =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - modify attribute (deny) - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - modify attribute (deny)
@@ -968,8 +970,8 @@ Call failed: Permission denied
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - delete attribute (deny) =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']: default
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']: default
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - delete attribute (deny) - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - delete attribute (deny)
@@ -1022,8 +1024,8 @@ Call failed: Permission denied
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - create attribute (deny) =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy'][@description]: default
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy'][@description]: default
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create attribute (deny) - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create attribute (deny)
@@ -1180,28 +1182,28 @@ Call failed: Permission denied
!#!#!#!#! Upgrading to pacemaker-2.0 and retesting !#!#!#!#!
=#=#=#= Begin test: root: Upgrade to pacemaker-2.0 =#=#=#=
-__xml_acl_post_process: Creation of acl_permission=observer-read-1 is allowed
-__xml_acl_post_process: Creation of acl_permission=observer-write-1 is allowed
-__xml_acl_post_process: Creation of acl_permission=observer-write-2 is allowed
-__xml_acl_post_process: Creation of acl_permission=admin-read-1 is allowed
-__xml_acl_post_process: Creation of acl_permission=admin-write-1 is allowed
-__xml_acl_post_process: Creation of acl_target=l33t-haxor is allowed
-__xml_acl_post_process: Creation of role=auto-l33t-haxor is allowed
-__xml_acl_post_process: Creation of acl_role=auto-l33t-haxor is allowed
-__xml_acl_post_process: Creation of acl_permission=crook-nothing is allowed
-__xml_acl_post_process: Creation of acl_target=niceguy is allowed
-__xml_acl_post_process: Creation of role=observer is allowed
-__xml_acl_post_process: Creation of acl_target=bob is allowed
-__xml_acl_post_process: Creation of role=admin is allowed
-__xml_acl_post_process: Creation of acl_target=badidea is allowed
-__xml_acl_post_process: Creation of role=auto-badidea is allowed
-__xml_acl_post_process: Creation of acl_role=auto-badidea is allowed
-__xml_acl_post_process: Creation of acl_permission=badidea-resources is allowed
-__xml_acl_post_process: Creation of acl_target=betteridea is allowed
-__xml_acl_post_process: Creation of role=auto-betteridea is allowed
-__xml_acl_post_process: Creation of acl_role=auto-betteridea is allowed
-__xml_acl_post_process: Creation of acl_permission=betteridea-nothing is allowed
-__xml_acl_post_process: Creation of acl_permission=betteridea-resources is allowed
+__xml_acl_post_process: Creation of acl_permission=observer-read-1 is allowed
+__xml_acl_post_process: Creation of acl_permission=observer-write-1 is allowed
+__xml_acl_post_process: Creation of acl_permission=observer-write-2 is allowed
+__xml_acl_post_process: Creation of acl_permission=admin-read-1 is allowed
+__xml_acl_post_process: Creation of acl_permission=admin-write-1 is allowed
+__xml_acl_post_process: Creation of acl_target=l33t-haxor is allowed
+__xml_acl_post_process: Creation of role=auto-l33t-haxor is allowed
+__xml_acl_post_process: Creation of acl_role=auto-l33t-haxor is allowed
+__xml_acl_post_process: Creation of acl_permission=crook-nothing is allowed
+__xml_acl_post_process: Creation of acl_target=niceguy is allowed
+__xml_acl_post_process: Creation of role=observer is allowed
+__xml_acl_post_process: Creation of acl_target=bob is allowed
+__xml_acl_post_process: Creation of role=admin is allowed
+__xml_acl_post_process: Creation of acl_target=badidea is allowed
+__xml_acl_post_process: Creation of role=auto-badidea is allowed
+__xml_acl_post_process: Creation of acl_role=auto-badidea is allowed
+__xml_acl_post_process: Creation of acl_permission=badidea-resources is allowed
+__xml_acl_post_process: Creation of acl_target=betteridea is allowed
+__xml_acl_post_process: Creation of role=auto-betteridea is allowed
+__xml_acl_post_process: Creation of acl_role=auto-betteridea is allowed
+__xml_acl_post_process: Creation of acl_permission=betteridea-nothing is allowed
+__xml_acl_post_process: Creation of acl_permission=betteridea-resources is allowed
=#=#=#= Current cib after: root: Upgrade to pacemaker-2.0 =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="1">
<configuration>
@@ -1271,10 +1273,10 @@ Error performing operation: Permission denied
=#=#=#= End test: unknownguy: Set stonith-enabled - Permission denied (13) =#=#=#=
* Passed: crm_attribute - unknownguy: Set stonith-enabled
=#=#=#= Begin test: unknownguy: Create a resource =#=#=#=
-__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
-__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
-__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
-__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs
Call failed: Permission denied
=#=#=#= End test: unknownguy: Create a resource - Permission denied (13) =#=#=#=
* Passed: cibadmin - unknownguy: Create a resource
@@ -1291,8 +1293,8 @@ Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Set stonith-enabled - Permission denied (13) =#=#=#=
* Passed: crm_attribute - l33t-haxor: Set stonith-enabled
=#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#=
-__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent
-__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent
+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
Call failed: Permission denied
=#=#=#= End test: l33t-haxor: Create a resource - Permission denied (13) =#=#=#=
* Passed: cibadmin - l33t-haxor: Create a resource
@@ -1351,7 +1353,7 @@ Call failed: Permission denied
=#=#=#= End test: niceguy: Query configuration - OK (0) =#=#=#=
* Passed: cibadmin - niceguy: Query configuration
=#=#=#= Begin test: niceguy: Set enable-acl =#=#=#=
-__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default
+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default
Error performing operation: Permission denied
Error setting enable-acl=false (section=crm_config, set=<null>): Permission denied
=#=#=#= End test: niceguy: Set enable-acl - Permission denied (13) =#=#=#=
@@ -1412,8 +1414,8 @@ Error setting enable-acl=false (section=crm_config, set=<null>): Permission deni
=#=#=#= End test: niceguy: Set stonith-enabled - OK (0) =#=#=#=
* Passed: crm_attribute - niceguy: Set stonith-enabled
=#=#=#= Begin test: niceguy: Create a resource =#=#=#=
-__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default
-__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default
+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
Call failed: Permission denied
=#=#=#= End test: niceguy: Create a resource - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Create a resource
@@ -1596,10 +1598,11 @@ Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Remove a resource meta attribute - Permission denied (13) =#=#=#=
* Passed: crm_resource - l33t-haxor: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
-error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
-error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
-error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
-__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed
+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed
+Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Stopped
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="0">
<configuration>
@@ -1661,9 +1664,9 @@ __xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: niceguy: Query a resource meta attribute =#=#=#=
-error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
-error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
-error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
Stopped
=#=#=#= Current cib after: niceguy: Query a resource meta attribute =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="0">
@@ -1726,10 +1729,10 @@ Stopped
=#=#=#= End test: niceguy: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Query a resource meta attribute
=#=#=#= Begin test: niceguy: Remove a resource meta attribute =#=#=#=
-error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
-error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
-error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
-Deleted dummy option: id=dummy-meta_attributes-target-role name=target-role
+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= Current cib after: niceguy: Remove a resource meta attribute =#=#=#=
<cib epoch="12" num_updates="0" admin_epoch="0">
<configuration>
@@ -1789,10 +1792,11 @@ Deleted dummy option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= End test: niceguy: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
-error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
-error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
-error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
-__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed
+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed
+Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Started
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
<cib epoch="13" num_updates="0" admin_epoch="0">
<configuration>
@@ -1903,8 +1907,8 @@ __xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - remove acls =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/acls: default
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/acls: default
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - remove acls - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - remove acls
@@ -1967,9 +1971,9 @@ Call failed: Permission denied
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - create resource =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default
-__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2']
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default
+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2']
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create resource - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create resource
@@ -2031,8 +2035,8 @@ Call failed: Permission denied
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - modify attribute (deny) =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - modify attribute (deny) - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - modify attribute (deny)
@@ -2094,8 +2098,8 @@ Call failed: Permission denied
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - delete attribute (deny) =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']: default
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']: default
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - delete attribute (deny) - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - delete attribute (deny)
@@ -2157,8 +2161,8 @@ Call failed: Permission denied
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - create attribute (deny) =#=#=#=
-__xml_acl_check: 400 access denied to /cib[@epoch]: default
-__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy'][@description]: default
+__xml_acl_check: 400 access denied to /cib[@epoch]: default
+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy'][@description]: default
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create attribute (deny) - Permission denied (13) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create attribute (deny)
diff --git a/tools/regression.tools.exp b/tools/regression.tools.exp
index 287caf9..b2f4df1 100644
--- a/tools/regression.tools.exp
+++ b/tools/regression.tools.exp
@@ -626,6 +626,7 @@ Deleted nodes attribute: id=nodes-node1-standby name=standby
=#=#=#= End test: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a resource
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
+Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
<cib epoch="15" num_updates="0" admin_epoch="1">
<configuration>
@@ -695,7 +696,7 @@ false
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
-Deleted dummy option: id=dummy-meta_attributes-is-managed name=is-managed
+Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
<cib epoch="16" num_updates="0" admin_epoch="1">
<configuration>
@@ -728,6 +729,7 @@ Deleted dummy option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create a resource attribute =#=#=#=
+Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
<cib epoch="17" num_updates="0" admin_epoch="1">
<configuration>
@@ -763,7 +765,7 @@ Deleted dummy option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
- dummy (ocf::pacemaker:Dummy): Stopped
+ dummy (ocf::pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
<cib epoch="17" num_updates="0" admin_epoch="1">
<configuration>
@@ -973,8 +975,8 @@ Error performing operation: No such device or address
Current cluster status:
Online: [ node1 ]
- dummy (ocf::pacemaker:Dummy): Stopped
- Fence (stonith:fence_true): Stopped
+ dummy (ocf::pacemaker:Dummy): Stopped
+ Fence (stonith:fence_true): Stopped
Transition Summary:
* Start dummy (node1)
@@ -990,8 +992,8 @@ Executing cluster transition:
Revised cluster status:
Online: [ node1 ]
- dummy (ocf::pacemaker:Dummy): Started node1
- Fence (stonith:fence_true): Started node1
+ dummy (ocf::pacemaker:Dummy): Started node1
+ Fence (stonith:fence_true): Started node1
=#=#=#= Current cib after: Bring resources online =#=#=#=
<cib epoch="18" num_updates="4" admin_epoch="1">
@@ -1710,8 +1712,8 @@ Error performing operation: No such device or address
Current cluster status:
Online: [ node1 ]
- dummy (ocf::pacemaker:Dummy): Started node1
- Fence (stonith:fence_true): Started node1
+ dummy (ocf::pacemaker:Dummy): Started node1
+ Fence (stonith:fence_true): Started node1
Performing requested modifications
+ Bringing node node2 online
@@ -1733,8 +1735,8 @@ Executing cluster transition:
Revised cluster status:
Online: [ node1 node2 node3 ]
- dummy (ocf::pacemaker:Dummy): Started node1
- Fence (stonith:fence_true): Started node2
+ dummy (ocf::pacemaker:Dummy): Started node1
+ Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
<cib epoch="22" num_updates="8" admin_epoch="1">
@@ -1996,8 +1998,8 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node2' with a score
Current cluster status:
Online: [ node1 node2 node3 ]
- dummy (ocf::pacemaker:Dummy): Started node1
- Fence (stonith:fence_true): Started node2
+ dummy (ocf::pacemaker:Dummy): Started node1
+ Fence (stonith:fence_true): Started node2
Transition Summary:
* Move dummy (Started node1 -> node3)
@@ -2010,8 +2012,8 @@ Executing cluster transition:
Revised cluster status:
Online: [ node1 node2 node3 ]
- dummy (ocf::pacemaker:Dummy): Started node3
- Fence (stonith:fence_true): Started node2
+ dummy (ocf::pacemaker:Dummy): Started node3
+ Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
<cib epoch="24" num_updates="2" admin_epoch="1">
diff --git a/valgrind-pcmk.suppressions b/valgrind-pcmk.suppressions
index 2e382df..0a47096 100644
--- a/valgrind-pcmk.suppressions
+++ b/valgrind-pcmk.suppressions
@@ -1,4 +1,4 @@
-# Valgrind suppressions for PE testing
+# Valgrind suppressions for Pacemaker testing
{
Valgrind bug
Memcheck:Addr8
@@ -57,6 +57,15 @@
}
{
+ Cman - Who cares if unused bytes are uninitialized
+ Memcheck:Param
+ sendmsg(msg)
+ fun:__sendmsg_nocancel
+ obj:*/libcman.so.3.0
+ obj:*/libcman.so.3.0
+}
+
+{
Cman - Jump or move depends on uninitialized values
Memcheck:Cond
obj:*/libcman.so.3.0