diff --git a/attrd/commands.c b/attrd/commands.c
index 12771ee..c5badc5 100644
--- a/attrd/commands.c
+++ b/attrd/commands.c
@@ -377,7 +377,17 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml)
attrd_peer_sync(peer, xml);
} else if(safe_str_eq(op, "peer-remove")) {
- attrd_peer_remove(0, host, TRUE, peer->uname);
+ int host_id = 0;
+ char *endptr = NULL;
+
+ host_id = strtol(host, &endptr, 10);
+ if (errno != 0 || endptr == host || *endptr != '\0') {
+ host_id = 0;
+ } else {
+ host = NULL;
+ }
+ attrd_peer_remove(host_id, host, TRUE, peer->uname);
+
} else if(safe_str_eq(op, "sync-response")
&& safe_str_neq(peer->uname, attrd_cluster->uname)) {
diff --git a/attrd/legacy.c b/attrd/legacy.c
index d4733ec..d7ed53e 100644
--- a/attrd/legacy.c
+++ b/attrd/legacy.c
@@ -768,6 +768,9 @@ attrd_local_callback(xmlNode * msg)
crm_notice("Sending full refresh (origin=%s)", from);
g_hash_table_foreach(attr_hash, update_for_hash_entry, NULL);
return;
+ } else if(safe_str_eq(op, "peer-remove")) {
+ /* The legacy code didn't understand this command - swallow silently */
+ return;
}
if (host != NULL && safe_str_neq(host, attrd_uname)) {
diff --git a/cib/main.c b/cib/main.c
index 00fca9b..69a957c 100644
--- a/cib/main.c
+++ b/cib/main.c
@@ -439,6 +439,11 @@ cib_peer_update_callback(enum crm_status_type type, crm_node_t * node, const voi
crm_info("No more peers");
terminate_cib(__FUNCTION__, FALSE);
}
+
+ if(type == crm_status_nstate && node->id && safe_str_eq(node->state, CRM_NODE_LOST)) {
+ /* Avoid conflicts, keep the membership list to active members */
+ reap_crm_member(node->id, NULL);
+ }
}
#if SUPPORT_HEARTBEAT
diff --git a/crmd/messages.c b/crmd/messages.c
index d38f2a3..eea4f7b 100644
--- a/crmd/messages.c
+++ b/crmd/messages.c
@@ -39,6 +39,7 @@
GListPtr fsa_message_queue = NULL;
extern void crm_shutdown(int nsig);
+extern crm_ipc_t *attrd_ipc;
void handle_response(xmlNode * stored_msg);
enum crmd_fsa_input handle_request(xmlNode * stored_msg, enum crmd_fsa_cause cause);
enum crmd_fsa_input handle_shutdown_request(xmlNode * stored_msg);
diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
index 0cbfeec..5852e7e 100644
--- a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
@@ -19,7 +19,7 @@ ifdef::pcs[]
Started: [ pcmk-1 pcmk-2 ]
Clone Set: WebSite-clone [WebSite]
Started: [ pcmk-1 pcmk-2 ]
-# pcs resource defaults
+# pcs resource rsc defaults
resource-stickiness: 100
# pcs resource op defaults
timeout: 240s
@@ -303,7 +303,7 @@ ifdef::pcs[]
* resource-stickiness - Specify the aversion to moving resources to other machines
[source,C]
----
-# pcs resource defaults
+# pcs resource rsc defaults
resource-stickiness: 100
----
endif::[]
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
index 5943c19..714a0d3 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
@@ -222,7 +222,7 @@ ifdef::pcs[]
WebFS (ocf::heartbeat:Filesystem) Stopped
Clone Set: dlm-clone [dlm]
Stopped: [ dlm:0 dlm:1 ]
-# pcs cluster cib-push dlm_cfg
+# pcs cluster push cib dlm_cfg
CIB updated
# pcs status
@@ -695,7 +695,7 @@ shell and watching the cluster's response
ifdef::pcs[]
[source,C]
-----
-# pcs cluster cib-push active_cfg
+# pcs cluster push cib active_cfg
# pcs resource enable WebFS
-----
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
index c91647b..7da8fca 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
@@ -656,8 +656,8 @@ resource, but it is often sufficient to change the default.
ifdef::pcs[]
[source,C]
----
-# pcs resource defaults resource-stickiness=100
-# pcs resource defaults
+# pcs resource rsc defaults resource-stickiness=100
+# pcs resource rsc defaults
resource-stickiness: 100
----
endif::[]
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
index 71777db..236bb77 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
@@ -125,7 +125,7 @@ it fails, the resource agent used by Pacemaker assumes the server-status
URL is available. Look for the following in '/etc/httpd/conf/httpd.conf'
and make sure it is not disabled or commented out:
-[source,Apache Configuration]
+[source,C]
-----
<Location /server-status>
SetHandler server-status
@@ -601,7 +601,7 @@ WebSite will be forced to move to pcmk-1.
[source,C]
-----
# pcs constraint location WebSite prefers pcmk-1=INFINITY
-# pcs constraint --full
+# pcs constraint all
Location Constraints:
Resource: WebSite
Enabled on: pcmk-1 (score:INFINITY) (id:location-WebSite-pcmk-1-INFINITY)
@@ -708,7 +708,7 @@ Ordering Constraints:
start ClusterIP then start WebSite (Mandatory) (id:order-ClusterIP-WebSite-mandatory)
Colocation Constraints:
WebSite with ClusterIP (INFINITY) (id:colocation-WebSite-ClusterIP-INFINITY)
-# pcs constraint remove location-WebSite-pcmk-1-INFINITY
+# pcs constraint rm location-WebSite-pcmk-1-INFINITY
# pcs constraint
Location Constraints:
Ordering Constraints:
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
index cf47602..aa0b4b9 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
@@ -210,11 +210,23 @@ outside world.
----
# setenforce 0
# sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config
+# systemctl disable firewalld.service
+# systemctl stop firewalld.service
+----
+
+or (on older Fedora)
+
+[source,C]
+----
+# setenforce 0
+# sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config
# systemctl disable iptables.service
# rm '/etc/systemd/system/basic.target.wants/iptables.service'
# systemctl stop iptables.service
----
+
+
=== Short Node Names ===
During installation, we filled in the machine's fully qualified domain
@@ -538,7 +550,7 @@ Password:
pcmk-1: Authorized
pcmk-2: Authorized
-# pcs cluster setup --name mycluster pcmk-1 pcmk-2
+# pcs cluster setup mycluster pcmk-1 pcmk-2
pcmk-1: Succeeded
pcmk-2: Succeeded
----
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt b/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt
index cc2cec6..f6b50d9 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt
@@ -334,7 +334,7 @@ cib.
[source,C]
----
-# pcs cluster cib-push drbd_cfg
+# pcs cluster push cib drbd_cfg
CIB updated
# pcs status
@@ -594,7 +594,7 @@ cluster put it into effect.
ifdef::pcs[]
[source,C]
----
-# pcs cluster cib-push fs_cfg
+# pcs cluster push cib fs_cfg
CIB updated
# pcs status
Last updated: Fri Aug 10 12:47:01 2012
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt b/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
index 9518fc2..123bd4b 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
@@ -81,7 +81,7 @@ ifdef::crmsh[]
endif::[]
ifdef::pcs[]
-. Commit the new configuration. +pcs cluster cib-push stonith_cfg+
+. Commit the new configuration. +pcs cluster push cib stonith_cfg+
endif::[]
. Once the stonith resource is running, you can test it by executing:
@@ -261,7 +261,7 @@ Now push the configuration into the cluster.
ifdef::pcs[]
[source,C]
----
-# pcs cluster cib-push stonith_cfg
+# pcs cluster push cib stonith_cfg
----
endif::[]
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt b/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt
index 530e37b..c62cae4 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt
@@ -217,6 +217,13 @@ Next, check for any ERRORs during startup - there shouldn't be any.
# grep -i error /var/log/messages
----
+or (on Fedora 20)
+
+[source,C]
+----
+# journalctl | grep -i error
+----
+
Repeat these checks on the other node. The results should be the same.
endif::[]
diff --git a/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po b/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po
index f9cc723..daefc41 100644
--- a/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po
+++ b/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po
@@ -379,7 +379,7 @@ msgstr "当有半数以上的节点在线时,这个集群就认为自己拥有
#. Tag: literallayout
#, no-c-format
msgid "total_nodes < 2 * active_nodes"
-msgstr "总节点数 < 2 * 活跃节点数"
+msgstr ""
#. Tag: para
#, no-c-format
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt b/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt
index 8498ce0..b4eaf49 100644
--- a/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt
+++ b/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt
@@ -36,28 +36,33 @@ nothing able to run anywhere and selectively enable allowed nodes.
=== Options ===
.Options for Simple Location Constraints
-[width="95%",cols="2m,5<",options="header",align="center"]
+[width="95%",cols="2m,1,5<",options="header",align="center"]
|=========================================================
|Field
+|Default
|Description
|id
+|
|A unique name for the constraint
indexterm:[id,Location Constraints]
indexterm:[Constraints,Location,id]
|rsc
+|
|A resource name
indexterm:[rsc,Location Constraints]
indexterm:[Constraints,Location,rsc]
|node
+|
|A node's name
indexterm:[node,Location Constraints]
indexterm:[Constraints,Location,node]
|score
+|
|Positive values indicate the resource should run on this
node. Negative values indicate the resource should not run on this
node.
@@ -67,6 +72,30 @@ indexterm:[Constraints,Location,node]
indexterm:[score,Location Constraints]
indexterm:[Constraints,Location,score]
+|resource-discovery
+|+always+
+|Indicates whether or not Pacemaker should perform resource discovery
+on this node for the specified resource. Limiting resource discovery to
+a subset of nodes the resource is physically capable of running on
+can significantly boost performance when a large set of nodes are preset.
+When pacemaker_remote is in use to expand the node count into the 100s of
+nodes range, this option should be considered.
+
+* 'always' - Always perform resource discovery for the specified resource on this node.
+
+* 'never' - Never perform resource discovery for the specified resource on this node.
+ This option should generally be used with a -INFINITY score. Although that is not strictly
+ required.
+
+* 'exclusive' - Only perform resource discovery for the specified resource on this node. Multiple
+ location constraints using 'exclusive' discovery for the same resource across different nodes
+ creates a subset of nodes resource-discovery is exclusive to. If a resource is marked
+ for 'exclusive' discovery on one or more nodes, that resource is only allowed to be placed
+ within that subset of nodes.
+
+indexterm:[Resource Discovery,Location Constraints]
+indexterm:[Constraints,Location,Resource Discovery]
+
|=========================================================
=== Asymmetrical "Opt-In" Clusters ===
diff --git a/fencing/commands.c b/fencing/commands.c
index 577ea95..c193a9d 100644
--- a/fencing/commands.c
+++ b/fencing/commands.c
@@ -2109,6 +2109,14 @@ handle_request(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * req
free_async_command(cmd);
free_xml(reply);
+ } else if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
+ int id = 0;
+ const char *name = NULL;
+
+ crm_element_value_int(request, XML_ATTR_ID, &id);
+ name = crm_element_value(request, XML_ATTR_UNAME);
+ reap_crm_member(id, name);
+
} else {
crm_err("Unknown %s from %s", op, client ? client->name : remote_peer);
crm_log_xml_warn(request, "UnknownOp");
diff --git a/fencing/main.c b/fencing/main.c
index 2694452..70b5bde 100644
--- a/fencing/main.c
+++ b/fencing/main.c
@@ -97,6 +97,7 @@ st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
int call_options = 0;
xmlNode *request = NULL;
crm_client_t *c = crm_client_get(qbc);
+ const char *op = NULL;
if (c == NULL) {
crm_info("Invalid client: %p", qbc);
@@ -109,6 +110,20 @@ st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
return 0;
}
+
+ op = crm_element_value(request, F_CRM_TASK);
+ if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
+ crm_xml_add(request, F_TYPE, T_STONITH_NG);
+ crm_xml_add(request, F_STONITH_OPERATION, op);
+ crm_xml_add(request, F_STONITH_CLIENTID, c->id);
+ crm_xml_add(request, F_STONITH_CLIENTNAME, crm_client_name(c));
+ crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
+
+ send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
+ free_xml(request);
+ return 0;
+ }
+
if (c->name == NULL) {
const char *value = crm_element_value(request, F_STONITH_CLIENTNAME);
@@ -1099,6 +1114,7 @@ stonith_cleanup(void)
static struct crm_option long_options[] = {
{"stand-alone", 0, 0, 's'},
{"stand-alone-w-cpg", 0, 0, 'c'},
+ {"logfile", 1, 0, 'l'},
{"verbose", 0, 0, 'V'},
{"version", 0, 0, '$'},
{"help", 0, 0, '?'},
@@ -1200,6 +1216,9 @@ main(int argc, char **argv)
case 'V':
crm_bump_log_level(argc, argv);
break;
+ case 'l':
+ crm_add_logfile(optarg);
+ break;
case 's':
stand_alone = TRUE;
break;
diff --git a/fencing/regression.py.in b/fencing/regression.py.in
index c4cb2d8..fe6d418 100644
--- a/fencing/regression.py.in
+++ b/fencing/regression.py.in
@@ -82,24 +82,34 @@ class Test:
test.wait()
if self.verbose:
+ self.stonith_options = self.stonith_options + " -V"
print "Starting stonithd with %s" % self.stonith_options
+ if os.path.exists("/tmp/stonith-regression.log"):
+ os.remove('/tmp/stonith-regression.log')
+
self.stonith_process = subprocess.Popen(
- shlex.split("@CRM_DAEMON_DIR@/stonithd %s -V" % self.stonith_options),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ shlex.split("@CRM_DAEMON_DIR@/stonithd %s -l /tmp/stonith-regression.log" % self.stonith_options))
time.sleep(1)
def clean_environment(self):
if self.stonith_process:
self.stonith_process.terminate()
+ self.stonith_process.wait()
- self.stonith_output = self.stonith_process.communicate()[1]
+ self.stonith_output = ""
self.stonith_process = None
+ f = open('/tmp/stonith-regression.log', 'r')
+ for line in f.readlines():
+ self.stonith_output = self.stonith_output + line
+
if self.verbose:
+ print "Daemon Output Start"
print self.stonith_output
+ print "Daemon Output End"
+ os.remove('/tmp/stonith-regression.log')
def add_stonith_log_pattern(self, pattern):
self.stonith_patterns.append(pattern)
@@ -953,7 +963,7 @@ if __name__ == "__main__":
self.stop_corosync()
if self.verbose and os.path.exists('/var/log/corosync.log'):
- print "Daemon output"
+ print "Corosync output"
f = open('/var/log/corosync.log', 'r')
for line in f.readlines():
print line.strip()
diff --git a/lib/cib/cib_remote.c b/lib/cib/cib_remote.c
index d9dde9b..9b98892 100644
--- a/lib/cib/cib_remote.c
+++ b/lib/cib/cib_remote.c
@@ -226,6 +226,8 @@ cib_tls_signon(cib_t * cib, crm_remote_t * connection, gboolean event_channel)
return -ENOTCONN;
}
+ connection->tcp_socket = sock;
+
if (private->encrypted) {
/* initialize GnuTls lib */
#ifdef HAVE_GNUTLS_GNUTLS_H
@@ -250,8 +252,6 @@ cib_tls_signon(cib_t * cib, crm_remote_t * connection, gboolean event_channel)
#else
return -EPROTONOSUPPORT;
#endif
- } else {
- connection->tcp_socket = sock;
}
/* login to server */
diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c
index 24700e5..70a0321 100644
--- a/lib/cluster/membership.c
+++ b/lib/cluster/membership.c
@@ -389,9 +389,15 @@ crm_find_peer(unsigned int id, const char *uname)
}
} else if(uname && by_id->uname) {
- crm_dump_peer_hash(LOG_INFO, __FUNCTION__);
- crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u %s", by_id->uname, by_name->uname, id, uname);
- crm_abort(__FILE__, __FUNCTION__, __LINE__, "member weirdness", TRUE, TRUE);
+ if(safe_str_eq(uname, by_id->uname)) {
+ crm_notice("Node '%s' has changed its ID from %u to %u", by_id->uname, by_name->id, by_id->id);
+ g_hash_table_foreach_remove(crm_peer_cache, crm_hash_find_by_data, by_name);
+
+ } else {
+ crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u %s", by_id->uname, by_name->uname, id, uname);
+ crm_dump_peer_hash(LOG_INFO, __FUNCTION__);
+ crm_abort(__FILE__, __FUNCTION__, __LINE__, "member weirdness", TRUE, TRUE);
+ }
} else if(id && by_name->id) {
crm_warn("Node %u and %u share the same name: '%s'", by_id->id, by_name->id, uname);
diff --git a/lib/common/remote.c b/lib/common/remote.c
index e2492b9..f11ebcd 100644
--- a/lib/common/remote.c
+++ b/lib/common/remote.c
@@ -308,13 +308,16 @@ crm_remote_sendv(crm_remote_t * remote, struct iovec * iov, int iovs)
int rc = -ESOCKTNOSUPPORT;
for(; lpc < iovs; lpc++) {
- if (remote->tcp_socket) {
- rc = crm_send_plaintext(remote->tcp_socket, iov[lpc].iov_base, iov[lpc].iov_len);
-#ifdef HAVE_GNUTLS_GNUTLS_H
- } else if (remote->tls_session) {
+#ifdef HAVE_GNUTLS_GNUTLS_H
+ if (remote->tls_session) {
rc = crm_send_tls(remote->tls_session, iov[lpc].iov_base, iov[lpc].iov_len);
+ } else if (remote->tcp_socket) {
+#else
+ if (remote->tcp_socket) {
#endif
+ rc = crm_send_plaintext(remote->tcp_socket, iov[lpc].iov_base, iov[lpc].iov_len);
+
} else {
crm_err("Unsupported connection type");
}
@@ -448,14 +451,16 @@ crm_remote_ready(crm_remote_t * remote, int timeout /* ms */ )
int rc = 0;
time_t start;
- if (remote->tcp_socket) {
- sock = remote->tcp_socket;
#ifdef HAVE_GNUTLS_GNUTLS_H
- } else if (remote->tls_session) {
+ if (remote->tls_session) {
void *sock_ptr = gnutls_transport_get_ptr(*remote->tls_session);
sock = GPOINTER_TO_INT(sock_ptr);
+ } else if (remote->tcp_socket) {
+#else
+ if (remote->tcp_socket) {
#endif
+ sock = remote->tcp_socket;
} else {
crm_err("Unsupported connection type");
}
@@ -519,17 +524,8 @@ crm_remote_recv_once(crm_remote_t * remote)
CRM_ASSERT(remote->buffer != NULL);
}
- if (remote->tcp_socket) {
- errno = 0;
- rc = read(remote->tcp_socket,
- remote->buffer + remote->buffer_offset,
- remote->buffer_size - remote->buffer_offset);
- if(rc < 0) {
- rc = -errno;
- }
-
#ifdef HAVE_GNUTLS_GNUTLS_H
- } else if (remote->tls_session) {
+ if (remote->tls_session) {
rc = gnutls_record_recv(*(remote->tls_session),
remote->buffer + remote->buffer_offset,
remote->buffer_size - remote->buffer_offset);
@@ -541,7 +537,18 @@ crm_remote_recv_once(crm_remote_t * remote)
crm_debug("TLS receive failed: %s (%d)", gnutls_strerror(rc), rc);
rc = -pcmk_err_generic;
}
+ } else if (remote->tcp_socket) {
+#else
+ if (remote->tcp_socket) {
#endif
+ errno = 0;
+ rc = read(remote->tcp_socket,
+ remote->buffer + remote->buffer_offset,
+ remote->buffer_size - remote->buffer_offset);
+ if(rc < 0) {
+ rc = -errno;
+ }
+
} else {
crm_err("Unsupported connection type");
return -ESOCKTNOSUPPORT;
diff --git a/lib/common/xml.c b/lib/common/xml.c
index 58d0a00..e63a582 100644
--- a/lib/common/xml.c
+++ b/lib/common/xml.c
@@ -1281,7 +1281,10 @@ __xml_build_changes(xmlNode * xml, xmlNode *patchset)
for (pIter = crm_first_attr(xml); pIter != NULL; pIter = pIter->next) {
const char *value = crm_element_value(xml, (const char *)pIter->name);
- crm_xml_add(result, (const char *)pIter->name, value);
+ p = pIter->_private;
+ if (is_not_set(p->flags, xpf_deleted)) {
+ crm_xml_add(result, (const char *)pIter->name, value);
+ }
}
}
@@ -5715,7 +5718,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g
}
} else {
- crm_notice("Upgrading %s-style configuration to %s with %s",
+ crm_debug("Upgrading %s-style configuration to %s with %s",
known_schemas[lpc].name, known_schemas[next].name,
known_schemas[lpc].transform ? known_schemas[lpc].transform : "no-op");
@@ -5746,7 +5749,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g
}
if (*best > match) {
- crm_notice("%s the configuration from %s to %s",
+ crm_info("%s the configuration from %s to %s",
transform?"Transformed":"Upgraded",
value ? value : "<none>", known_schemas[*best].name);
crm_xml_add(xml, XML_ATTR_VALIDATION, known_schemas[*best].name);
diff --git a/lib/services/services.c b/lib/services/services.c
index 582fbe1..c7b6c89 100644
--- a/lib/services/services.c
+++ b/lib/services/services.c
@@ -305,6 +305,7 @@ services_action_create_generic(const char *exec, const char *args[])
void
services_action_cleanup(svc_action_t * op)
{
+#if SUPPORT_DBUS
if(op->opaque->timerid != 0) {
crm_trace("Removing timer for call %s to %s", op->action, op->rsc);
g_source_remove(op->opaque->timerid);
@@ -330,6 +331,7 @@ services_action_cleanup(svc_action_t * op)
mainloop_del_fd(op->opaque->stdout_gsource);
op->opaque->stdout_gsource = NULL;
}
+#endif
}
void
diff --git a/lrmd/regression.py.in b/lrmd/regression.py.in
index 649c984..50e975e 100755
--- a/lrmd/regression.py.in
+++ b/lrmd/regression.py.in
@@ -435,14 +435,13 @@ if __name__ == "__main__":
os.system("cp %s/extra/resources/%s @OCF_RA_DIR@/pacemaker/%s" % (build_dir, ra, ra))
os.system("chmod a+x @OCF_RA_DIR@/pacemaker/%s" % (ra))
- else:
- # Assume it's installed
- print "Using @datadir@/@PACKAGE@/tests/cts/LSBDummy"
- os.system("cp @datadir@/@PACKAGE@/tests/cts/LSBDummy /etc/init.d/LSBDummy")
-
- os.system("chmod a+x /etc/init.d/LSBDummy")
- os.system("ls -al /etc/init.d/LSBDummy")
+ else:
+ # Assume it's installed
+ print "Using @datadir@/@PACKAGE@/tests/cts/LSBDummy"
+ os.system("cp @datadir@/@PACKAGE@/tests/cts/LSBDummy /etc/init.d/LSBDummy")
+ os.system("chmod a+x /etc/init.d/LSBDummy")
+ os.system("ls -al /etc/init.d/LSBDummy")
os.system("mkdir -p @CRM_CORE_DIR@/root")
if os.path.exists("/bin/systemctl"):
diff --git a/pengine/constraints.c b/pengine/constraints.c
index 88e382b..a2ce9c4 100644
--- a/pengine/constraints.c
+++ b/pengine/constraints.c
@@ -52,6 +52,8 @@ enum pe_order_kind {
enum pe_ordering get_flags(const char *id, enum pe_order_kind kind,
const char *action_first, const char *action_then, gboolean invert);
enum pe_ordering get_asymmetrical_flags(enum pe_order_kind kind);
+static rsc_to_node_t *generate_location_rule(resource_t * rsc, xmlNode * rule_xml,
+ const char *discovery, pe_working_set_t * data_set);
gboolean
unpack_constraints(xmlNode * xml_constraints, pe_working_set_t * data_set)
@@ -687,7 +689,7 @@ unpack_rsc_location(xmlNode * xml_obj, resource_t * rsc_lh, const char * role,
if (crm_str_eq((const char *)rule_xml->name, XML_TAG_RULE, TRUE)) {
empty = FALSE;
crm_trace("Unpacking %s/%s", id, ID(rule_xml));
- generate_location_rule(rsc_lh, rule_xml, data_set);
+ generate_location_rule(rsc_lh, rule_xml, discovery, data_set);
}
}
@@ -917,8 +919,8 @@ get_node_score(const char *rule, const char *score, gboolean raw, node_t * node)
return score_f;
}
-rsc_to_node_t *
-generate_location_rule(resource_t * rsc, xmlNode * rule_xml, pe_working_set_t * data_set)
+static rsc_to_node_t *
+generate_location_rule(resource_t * rsc, xmlNode * rule_xml, const char *discovery, pe_working_set_t * data_set)
{
const char *rule_id = NULL;
const char *score = NULL;
@@ -960,7 +962,7 @@ generate_location_rule(resource_t * rsc, xmlNode * rule_xml, pe_working_set_t *
do_and = FALSE;
}
- location_rule = rsc2node_new(rule_id, rsc, 0, NULL, NULL, data_set);
+ location_rule = rsc2node_new(rule_id, rsc, 0, discovery, NULL, data_set);
if (location_rule == NULL) {
return NULL;
diff --git a/pengine/test10/resource-discovery.xml b/pengine/test10/resource-discovery.xml
index 8b517df..1692cdb 100644
--- a/pengine/test10/resource-discovery.xml
+++ b/pengine/test10/resource-discovery.xml
@@ -97,7 +97,13 @@
<rsc_location id="location-FAKE1-remote1" node="remote1" resource-discovery="never" rsc="FAKE1" score="-INFINITY"/>
<rsc_location id="location-FAKE1-18node1" node="18node1" resource-discovery="never" rsc="FAKE1" score="-INFINITY"/>
<rsc_location id="location-FAKE1-18node2" node="18node2" resource-discovery="never" rsc="FAKE1" score="-INFINITY"/>
- <rsc_location id="location-FAKE1-18node3" node="18node3" resource-discovery="never" rsc="FAKE1" score="-INFINITY"/>
+
+ <rsc_location id="location-FAKE1-18node3" resource-discovery="never" rsc="FAKE1" >
+ <rule score="-INFINITY" id="vlan003-on-cluster-nodes-rule">
+ <expression attribute="#uname" operation="eq" value="18node3" id="vlan003-on-cluster-nodes-rule-expression"/>
+ </rule>
+ </rsc_location>
+
<rsc_location id="location-FAKE2-18node1" node="18node1" resource-discovery="exclusive" rsc="FAKE2" score="10"/>
<rsc_location id="location-FAKE2-18node2" node="18node2" resource-discovery="exclusive" rsc="FAKE2" score="100"/>
<rsc_location id="location-FAKE3-18node3--INFINITY" node="18node3" resource-discovery="exclusive" rsc="FAKE3" score="INFINITY"/>
diff --git a/pengine/utils.h b/pengine/utils.h
index 5142e68..270d32a 100644
--- a/pengine/utils.h
+++ b/pengine/utils.h
@@ -39,9 +39,6 @@ extern gboolean rsc_ticket_new(const char *id, resource_t * rsc_lh, ticket_t * t
const char *state_lh, const char *loss_policy,
pe_working_set_t * data_set);
-extern rsc_to_node_t *generate_location_rule(resource_t * rsc, xmlNode * location_rule,
- pe_working_set_t * data_set);
-
extern gint sort_node_weight(gconstpointer a, gconstpointer b, gpointer data_set);
extern gboolean can_run_resources(const node_t * node);
diff --git a/tools/crm_node.c b/tools/crm_node.c
index 98847f3..5932f98 100644
--- a/tools/crm_node.c
+++ b/tools/crm_node.c
@@ -34,6 +34,7 @@
#include <crm/common/mainloop.h>
#include <crm/msg_xml.h>
#include <crm/cib.h>
+#include <crm/attrd.h>
int command = 0;
int ccm_fd = 0;
@@ -92,7 +93,7 @@ cib_remove_node(uint32_t id, const char *name)
crm_trace("Removing %s from the CIB", name);
/* TODO: Use 'id' instead */
- if(name == NULL) {
+ if(name == NULL && id == 0) {
return -ENOTUNIQ;
}
@@ -101,17 +102,24 @@ cib_remove_node(uint32_t id, const char *name)
crm_xml_add(node, XML_ATTR_UNAME, name);
crm_xml_add(node_state, XML_ATTR_UNAME, name);
+ if(id) {
+ char buffer[64];
+ if(snprintf(buffer, 63, "%u", id) > 0) {
+ crm_xml_add(node, XML_ATTR_ID, buffer);
+ crm_xml_add(node_state, XML_ATTR_ID, buffer);
+ }
+ }
cib = cib_new();
cib->cmds->signon(cib, crm_system_name, cib_command);
rc = cib->cmds->delete(cib, XML_CIB_TAG_NODES, node, cib_sync_call);
if (rc != pcmk_ok) {
- printf("Could not remove %s from " XML_CIB_TAG_NODES ": %s", name, pcmk_strerror(rc));
+ printf("Could not remove %s/%u from " XML_CIB_TAG_NODES ": %s", name, id, pcmk_strerror(rc));
}
rc = cib->cmds->delete(cib, XML_CIB_TAG_STATUS, node_state, cib_sync_call);
if (rc != pcmk_ok) {
- printf("Could not remove %s from " XML_CIB_TAG_STATUS ": %s", name, pcmk_strerror(rc));
+ printf("Could not remove %s/%u from " XML_CIB_TAG_STATUS ": %s", name, id, pcmk_strerror(rc));
}
cib->cmds->signoff(cib);
@@ -156,6 +164,7 @@ int tools_remove_node_cache(const char *node, const char *target)
}
}
+
errno = 0;
n = strtol(node, &endptr, 10);
if (errno != 0 || endptr == node || *endptr != '\0') {
@@ -166,21 +175,39 @@ int tools_remove_node_cache(const char *node, const char *target)
name = get_node_name(n);
}
- crm_trace("Removing %s aka. %s from the membership cache", name, node);
+ crm_trace("Removing %s aka. %s (%u) from the membership cache", name, node, n);
- cmd = create_request(CRM_OP_RM_NODE_CACHE,
- NULL, NULL, target, "crm_node", admin_uuid);
+ if(safe_str_eq(target, T_ATTRD)) {
+ cmd = create_xml_node(NULL, __FUNCTION__);
- if (n) {
- char buffer[64];
+ crm_xml_add(cmd, F_TYPE, T_ATTRD);
+ crm_xml_add(cmd, F_ORIG, crm_system_name);
- if(snprintf(buffer, 63, "%u", n) > 0) {
- crm_xml_add(cmd, XML_ATTR_ID, buffer);
+ crm_xml_add(cmd, F_ATTRD_TASK, "peer-remove");
+ crm_xml_add(cmd, F_ATTRD_HOST, name);
+
+ if (n) {
+ char buffer[64];
+ if(snprintf(buffer, 63, "%u", n) > 0) {
+ crm_xml_add(cmd, F_ATTRD_HOST_ID, buffer);
+ }
+ }
+
+ } else {
+ cmd = create_request(CRM_OP_RM_NODE_CACHE,
+ NULL, NULL, target, crm_system_name, admin_uuid);
+ if (n) {
+ char buffer[64];
+ if(snprintf(buffer, 63, "%u", n) > 0) {
+ crm_xml_add(cmd, XML_ATTR_ID, buffer);
+ }
}
+ crm_xml_add(cmd, XML_ATTR_UNAME, name);
}
- crm_xml_add(cmd, XML_ATTR_UNAME, name);
rc = crm_ipc_send(conn, cmd, 0, 0, NULL);
+ crm_debug("%s peer cache cleanup for %s (%u): %d", target, name, n, rc);
+
if (rc > 0) {
rc = cib_remove_node(n, name);
}
@@ -189,8 +216,8 @@ int tools_remove_node_cache(const char *node, const char *target)
crm_ipc_close(conn);
crm_ipc_destroy(conn);
}
- free_xml(cmd);
free(admin_uuid);
+ free_xml(cmd);
free(name);
return rc > 0 ? 0 : rc;
}
@@ -649,6 +676,12 @@ try_corosync(int command, enum cluster_type_e stack)
mainloop_io_t *ipc = NULL;
GMainLoop *amainloop = NULL;
+ const char *daemons[] = {
+ CRM_SYSTEM_CRMD,
+ "stonith-ng",
+ T_ATTRD,
+ CRM_SYSTEM_MCP,
+ };
struct ipc_client_callbacks node_callbacks = {
.dispatch = node_mcp_dispatch,
@@ -657,13 +690,11 @@ try_corosync(int command, enum cluster_type_e stack)
switch (command) {
case 'R':
- if (tools_remove_node_cache(target_uname, CRM_SYSTEM_CRMD)) {
- crm_err("Failed to connect to "CRM_SYSTEM_CRMD" to remove node '%s'", target_uname);
- crm_exit(pcmk_err_generic);
- }
- if (tools_remove_node_cache(target_uname, CRM_SYSTEM_MCP)) {
- crm_err("Failed to connect to "CRM_SYSTEM_MCP" to remove node '%s'", target_uname);
- crm_exit(pcmk_err_generic);
+ for(rc = 0; rc < DIMOF(daemons); rc++) {
+ if (tools_remove_node_cache(target_uname, daemons[rc])) {
+ crm_err("Failed to connect to %s to remove node '%s'", daemons[rc], target_uname);
+ crm_exit(pcmk_err_generic);
+ }
}
crm_exit(pcmk_ok);
break;
@@ -834,8 +865,8 @@ main(int argc, char **argv)
force_flag = TRUE;
break;
case 'R':
- dangerous_cmd = TRUE;
command = flag;
+ dangerous_cmd = TRUE;
target_uname = optarg;
break;
case 'N':