|
|
1f016a |
diff --git a/attrd/commands.c b/attrd/commands.c
|
|
|
1f016a |
index 12771ee..c5badc5 100644
|
|
|
1f016a |
--- a/attrd/commands.c
|
|
|
1f016a |
+++ b/attrd/commands.c
|
|
|
1f016a |
@@ -377,7 +377,17 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml)
|
|
|
1f016a |
attrd_peer_sync(peer, xml);
|
|
|
1f016a |
|
|
|
1f016a |
} else if(safe_str_eq(op, "peer-remove")) {
|
|
|
1f016a |
- attrd_peer_remove(0, host, TRUE, peer->uname);
|
|
|
1f016a |
+ int host_id = 0;
|
|
|
1f016a |
+ char *endptr = NULL;
|
|
|
1f016a |
+
|
|
|
1f016a |
+ host_id = strtol(host, &endptr, 10);
|
|
|
1f016a |
+ if (errno != 0 || endptr == host || *endptr != '\0') {
|
|
|
1f016a |
+ host_id = 0;
|
|
|
1f016a |
+ } else {
|
|
|
1f016a |
+ host = NULL;
|
|
|
1f016a |
+ }
|
|
|
1f016a |
+ attrd_peer_remove(host_id, host, TRUE, peer->uname);
|
|
|
1f016a |
+
|
|
|
1f016a |
|
|
|
1f016a |
} else if(safe_str_eq(op, "sync-response")
|
|
|
1f016a |
&& safe_str_neq(peer->uname, attrd_cluster->uname)) {
|
|
|
1f016a |
diff --git a/attrd/legacy.c b/attrd/legacy.c
|
|
|
1f016a |
index d4733ec..d7ed53e 100644
|
|
|
1f016a |
--- a/attrd/legacy.c
|
|
|
1f016a |
+++ b/attrd/legacy.c
|
|
|
1f016a |
@@ -768,6 +768,9 @@ attrd_local_callback(xmlNode * msg)
|
|
|
1f016a |
crm_notice("Sending full refresh (origin=%s)", from);
|
|
|
1f016a |
g_hash_table_foreach(attr_hash, update_for_hash_entry, NULL);
|
|
|
1f016a |
return;
|
|
|
1f016a |
+ } else if(safe_str_eq(op, "peer-remove")) {
|
|
|
1f016a |
+ /* The legacy code didn't understand this command - swallow silently */
|
|
|
1f016a |
+ return;
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
if (host != NULL && safe_str_neq(host, attrd_uname)) {
|
|
|
1f016a |
diff --git a/cib/main.c b/cib/main.c
|
|
|
1f016a |
index 00fca9b..69a957c 100644
|
|
|
1f016a |
--- a/cib/main.c
|
|
|
1f016a |
+++ b/cib/main.c
|
|
|
1f016a |
@@ -439,6 +439,11 @@ cib_peer_update_callback(enum crm_status_type type, crm_node_t * node, const voi
|
|
|
1f016a |
crm_info("No more peers");
|
|
|
1f016a |
terminate_cib(__FUNCTION__, FALSE);
|
|
|
1f016a |
}
|
|
|
1f016a |
+
|
|
|
1f016a |
+ if(type == crm_status_nstate && node->id && safe_str_eq(node->state, CRM_NODE_LOST)) {
|
|
|
1f016a |
+ /* Avoid conflicts, keep the membership list to active members */
|
|
|
1f016a |
+ reap_crm_member(node->id, NULL);
|
|
|
1f016a |
+ }
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
#if SUPPORT_HEARTBEAT
|
|
|
1f016a |
diff --git a/crmd/messages.c b/crmd/messages.c
|
|
|
1f016a |
index d38f2a3..eea4f7b 100644
|
|
|
1f016a |
--- a/crmd/messages.c
|
|
|
1f016a |
+++ b/crmd/messages.c
|
|
|
1f016a |
@@ -39,6 +39,7 @@
|
|
|
1f016a |
GListPtr fsa_message_queue = NULL;
|
|
|
1f016a |
extern void crm_shutdown(int nsig);
|
|
|
1f016a |
|
|
|
1f016a |
+extern crm_ipc_t *attrd_ipc;
|
|
|
1f016a |
void handle_response(xmlNode * stored_msg);
|
|
|
1f016a |
enum crmd_fsa_input handle_request(xmlNode * stored_msg, enum crmd_fsa_cause cause);
|
|
|
1f016a |
enum crmd_fsa_input handle_shutdown_request(xmlNode * stored_msg);
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
|
|
|
1f016a |
index 0cbfeec..5852e7e 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
|
|
|
1f016a |
@@ -19,7 +19,7 @@ ifdef::pcs[]
|
|
|
1f016a |
Started: [ pcmk-1 pcmk-2 ]
|
|
|
1f016a |
Clone Set: WebSite-clone [WebSite]
|
|
|
1f016a |
Started: [ pcmk-1 pcmk-2 ]
|
|
|
1f016a |
-# pcs resource defaults
|
|
|
1f016a |
+# pcs resource rsc defaults
|
|
|
1f016a |
resource-stickiness: 100
|
|
|
1f016a |
# pcs resource op defaults
|
|
|
1f016a |
timeout: 240s
|
|
|
1f016a |
@@ -303,7 +303,7 @@ ifdef::pcs[]
|
|
|
1f016a |
* resource-stickiness - Specify the aversion to moving resources to other machines
|
|
|
1f016a |
[source,C]
|
|
|
1f016a |
----
|
|
|
1f016a |
-# pcs resource defaults
|
|
|
1f016a |
+# pcs resource rsc defaults
|
|
|
1f016a |
resource-stickiness: 100
|
|
|
1f016a |
----
|
|
|
1f016a |
endif::[]
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
|
|
|
1f016a |
index 5943c19..714a0d3 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
|
|
|
1f016a |
@@ -222,7 +222,7 @@ ifdef::pcs[]
|
|
|
1f016a |
WebFS (ocf::heartbeat:Filesystem) Stopped
|
|
|
1f016a |
Clone Set: dlm-clone [dlm]
|
|
|
1f016a |
Stopped: [ dlm:0 dlm:1 ]
|
|
|
1f016a |
-# pcs cluster cib-push dlm_cfg
|
|
|
1f016a |
+# pcs cluster push cib dlm_cfg
|
|
|
1f016a |
CIB updated
|
|
|
1f016a |
# pcs status
|
|
|
1f016a |
|
|
|
1f016a |
@@ -695,7 +695,7 @@ shell and watching the cluster's response
|
|
|
1f016a |
ifdef::pcs[]
|
|
|
1f016a |
[source,C]
|
|
|
1f016a |
-----
|
|
|
1f016a |
-# pcs cluster cib-push active_cfg
|
|
|
1f016a |
+# pcs cluster push cib active_cfg
|
|
|
1f016a |
# pcs resource enable WebFS
|
|
|
1f016a |
-----
|
|
|
1f016a |
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
|
|
|
1f016a |
index c91647b..7da8fca 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
|
|
|
1f016a |
@@ -656,8 +656,8 @@ resource, but it is often sufficient to change the default.
|
|
|
1f016a |
ifdef::pcs[]
|
|
|
1f016a |
[source,C]
|
|
|
1f016a |
----
|
|
|
1f016a |
-# pcs resource defaults resource-stickiness=100
|
|
|
1f016a |
-# pcs resource defaults
|
|
|
1f016a |
+# pcs resource rsc defaults resource-stickiness=100
|
|
|
1f016a |
+# pcs resource rsc defaults
|
|
|
1f016a |
resource-stickiness: 100
|
|
|
1f016a |
----
|
|
|
1f016a |
endif::[]
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
|
|
|
1f016a |
index 71777db..236bb77 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
|
|
|
1f016a |
@@ -125,7 +125,7 @@ it fails, the resource agent used by Pacemaker assumes the server-status
|
|
|
1f016a |
URL is available. Look for the following in '/etc/httpd/conf/httpd.conf'
|
|
|
1f016a |
and make sure it is not disabled or commented out:
|
|
|
1f016a |
|
|
|
1f016a |
-[source,Apache Configuration]
|
|
|
1f016a |
+[source,C]
|
|
|
1f016a |
-----
|
|
|
1f016a |
<Location /server-status>
|
|
|
1f016a |
SetHandler server-status
|
|
|
1f016a |
@@ -601,7 +601,7 @@ WebSite will be forced to move to pcmk-1.
|
|
|
1f016a |
[source,C]
|
|
|
1f016a |
-----
|
|
|
1f016a |
# pcs constraint location WebSite prefers pcmk-1=INFINITY
|
|
|
1f016a |
-# pcs constraint --full
|
|
|
1f016a |
+# pcs constraint all
|
|
|
1f016a |
Location Constraints:
|
|
|
1f016a |
Resource: WebSite
|
|
|
1f016a |
Enabled on: pcmk-1 (score:INFINITY) (id:location-WebSite-pcmk-1-INFINITY)
|
|
|
1f016a |
@@ -708,7 +708,7 @@ Ordering Constraints:
|
|
|
1f016a |
start ClusterIP then start WebSite (Mandatory) (id:order-ClusterIP-WebSite-mandatory)
|
|
|
1f016a |
Colocation Constraints:
|
|
|
1f016a |
WebSite with ClusterIP (INFINITY) (id:colocation-WebSite-ClusterIP-INFINITY)
|
|
|
1f016a |
-# pcs constraint remove location-WebSite-pcmk-1-INFINITY
|
|
|
1f016a |
+# pcs constraint rm location-WebSite-pcmk-1-INFINITY
|
|
|
1f016a |
# pcs constraint
|
|
|
1f016a |
Location Constraints:
|
|
|
1f016a |
Ordering Constraints:
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
|
|
|
1f016a |
index cf47602..aa0b4b9 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
|
|
|
1f016a |
@@ -210,11 +210,23 @@ outside world.
|
|
|
1f016a |
----
|
|
|
1f016a |
# setenforce 0
|
|
|
1f016a |
# sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config
|
|
|
1f016a |
+# systemctl disable firewalld.service
|
|
|
1f016a |
+# systemctl stop firewalld.service
|
|
|
1f016a |
+----
|
|
|
1f016a |
+
|
|
|
1f016a |
+or (on older Fedora)
|
|
|
1f016a |
+
|
|
|
1f016a |
+[source,C]
|
|
|
1f016a |
+----
|
|
|
1f016a |
+# setenforce 0
|
|
|
1f016a |
+# sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config
|
|
|
1f016a |
# systemctl disable iptables.service
|
|
|
1f016a |
# rm '/etc/systemd/system/basic.target.wants/iptables.service'
|
|
|
1f016a |
# systemctl stop iptables.service
|
|
|
1f016a |
----
|
|
|
1f016a |
|
|
|
1f016a |
+
|
|
|
1f016a |
+
|
|
|
1f016a |
=== Short Node Names ===
|
|
|
1f016a |
|
|
|
1f016a |
During installation, we filled in the machine's fully qualified domain
|
|
|
1f016a |
@@ -538,7 +550,7 @@ Password:
|
|
|
1f016a |
pcmk-1: Authorized
|
|
|
1f016a |
pcmk-2: Authorized
|
|
|
1f016a |
|
|
|
1f016a |
-# pcs cluster setup --name mycluster pcmk-1 pcmk-2
|
|
|
1f016a |
+# pcs cluster setup mycluster pcmk-1 pcmk-2
|
|
|
1f016a |
pcmk-1: Succeeded
|
|
|
1f016a |
pcmk-2: Succeeded
|
|
|
1f016a |
----
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt b/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt
|
|
|
1f016a |
index cc2cec6..f6b50d9 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt
|
|
|
1f016a |
@@ -334,7 +334,7 @@ cib.
|
|
|
1f016a |
|
|
|
1f016a |
[source,C]
|
|
|
1f016a |
----
|
|
|
1f016a |
-# pcs cluster cib-push drbd_cfg
|
|
|
1f016a |
+# pcs cluster push cib drbd_cfg
|
|
|
1f016a |
CIB updated
|
|
|
1f016a |
|
|
|
1f016a |
# pcs status
|
|
|
1f016a |
@@ -594,7 +594,7 @@ cluster put it into effect.
|
|
|
1f016a |
ifdef::pcs[]
|
|
|
1f016a |
[source,C]
|
|
|
1f016a |
----
|
|
|
1f016a |
-# pcs cluster cib-push fs_cfg
|
|
|
1f016a |
+# pcs cluster push cib fs_cfg
|
|
|
1f016a |
CIB updated
|
|
|
1f016a |
# pcs status
|
|
|
1f016a |
Last updated: Fri Aug 10 12:47:01 2012
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt b/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
|
|
|
1f016a |
index 9518fc2..123bd4b 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
|
|
|
1f016a |
@@ -81,7 +81,7 @@ ifdef::crmsh[]
|
|
|
1f016a |
endif::[]
|
|
|
1f016a |
|
|
|
1f016a |
ifdef::pcs[]
|
|
|
1f016a |
-. Commit the new configuration. +pcs cluster cib-push stonith_cfg+
|
|
|
1f016a |
+. Commit the new configuration. +pcs cluster push cib stonith_cfg+
|
|
|
1f016a |
endif::[]
|
|
|
1f016a |
|
|
|
1f016a |
. Once the stonith resource is running, you can test it by executing:
|
|
|
1f016a |
@@ -261,7 +261,7 @@ Now push the configuration into the cluster.
|
|
|
1f016a |
ifdef::pcs[]
|
|
|
1f016a |
[source,C]
|
|
|
1f016a |
----
|
|
|
1f016a |
-# pcs cluster cib-push stonith_cfg
|
|
|
1f016a |
+# pcs cluster push cib stonith_cfg
|
|
|
1f016a |
----
|
|
|
1f016a |
endif::[]
|
|
|
1f016a |
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt b/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt
|
|
|
1f016a |
index 530e37b..c62cae4 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt
|
|
|
1f016a |
@@ -217,6 +217,13 @@ Next, check for any ERRORs during startup - there shouldn't be any.
|
|
|
1f016a |
# grep -i error /var/log/messages
|
|
|
1f016a |
----
|
|
|
1f016a |
|
|
|
1f016a |
+or (on Fedora 20)
|
|
|
1f016a |
+
|
|
|
1f016a |
+[source,C]
|
|
|
1f016a |
+----
|
|
|
1f016a |
+# journalctl | grep -i error
|
|
|
1f016a |
+----
|
|
|
1f016a |
+
|
|
|
1f016a |
Repeat these checks on the other node. The results should be the same.
|
|
|
1f016a |
|
|
|
1f016a |
endif::[]
|
|
|
1f016a |
diff --git a/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po b/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po
|
|
|
1f016a |
index f9cc723..daefc41 100644
|
|
|
1f016a |
--- a/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po
|
|
|
1f016a |
+++ b/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po
|
|
|
1f016a |
@@ -379,7 +379,7 @@ msgstr "当有半数以上的节点在线时,这个集群就认为自己拥有
|
|
|
1f016a |
#. Tag: literallayout
|
|
|
1f016a |
#, no-c-format
|
|
|
1f016a |
msgid "total_nodes < 2 * active_nodes"
|
|
|
1f016a |
-msgstr "总节点数 < 2 * 活跃节点数"
|
|
|
1f016a |
+msgstr ""
|
|
|
1f016a |
|
|
|
1f016a |
#. Tag: para
|
|
|
1f016a |
#, no-c-format
|
|
|
1f016a |
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt b/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt
|
|
|
1f016a |
index 8498ce0..b4eaf49 100644
|
|
|
1f016a |
--- a/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt
|
|
|
1f016a |
+++ b/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt
|
|
|
1f016a |
@@ -36,28 +36,33 @@ nothing able to run anywhere and selectively enable allowed nodes.
|
|
|
1f016a |
=== Options ===
|
|
|
1f016a |
|
|
|
1f016a |
.Options for Simple Location Constraints
|
|
|
1f016a |
-[width="95%",cols="2m,5<",options="header",align="center"]
|
|
|
1f016a |
+[width="95%",cols="2m,1,5<",options="header",align="center"]
|
|
|
1f016a |
|=========================================================
|
|
|
1f016a |
|
|
|
1f016a |
|Field
|
|
|
1f016a |
+|Default
|
|
|
1f016a |
|Description
|
|
|
1f016a |
|
|
|
1f016a |
|id
|
|
|
1f016a |
+|
|
|
|
1f016a |
|A unique name for the constraint
|
|
|
1f016a |
indexterm:[id,Location Constraints]
|
|
|
1f016a |
indexterm:[Constraints,Location,id]
|
|
|
1f016a |
|
|
|
1f016a |
|rsc
|
|
|
1f016a |
+|
|
|
|
1f016a |
|A resource name
|
|
|
1f016a |
indexterm:[rsc,Location Constraints]
|
|
|
1f016a |
indexterm:[Constraints,Location,rsc]
|
|
|
1f016a |
|
|
|
1f016a |
|node
|
|
|
1f016a |
+|
|
|
|
1f016a |
|A node's name
|
|
|
1f016a |
indexterm:[node,Location Constraints]
|
|
|
1f016a |
indexterm:[Constraints,Location,node]
|
|
|
1f016a |
|
|
|
1f016a |
|score
|
|
|
1f016a |
+|
|
|
|
1f016a |
|Positive values indicate the resource should run on this
|
|
|
1f016a |
node. Negative values indicate the resource should not run on this
|
|
|
1f016a |
node.
|
|
|
1f016a |
@@ -67,6 +72,30 @@ indexterm:[Constraints,Location,node]
|
|
|
1f016a |
indexterm:[score,Location Constraints]
|
|
|
1f016a |
indexterm:[Constraints,Location,score]
|
|
|
1f016a |
|
|
|
1f016a |
+|resource-discovery
|
|
|
1f016a |
+|+always+
|
|
|
1f016a |
+|Indicates whether or not Pacemaker should perform resource discovery
|
|
|
1f016a |
+on this node for the specified resource. Limiting resource discovery to
|
|
|
1f016a |
+a subset of nodes the resource is physically capable of running on
|
|
|
1f016a |
+can significantly boost performance when a large set of nodes are preset.
|
|
|
1f016a |
+When pacemaker_remote is in use to expand the node count into the 100s of
|
|
|
1f016a |
+nodes range, this option should be considered.
|
|
|
1f016a |
+
|
|
|
1f016a |
+* 'always' - Always perform resource discovery for the specified resource on this node.
|
|
|
1f016a |
+
|
|
|
1f016a |
+* 'never' - Never perform resource discovery for the specified resource on this node.
|
|
|
1f016a |
+ This option should generally be used with a -INFINITY score. Although that is not strictly
|
|
|
1f016a |
+ required.
|
|
|
1f016a |
+
|
|
|
1f016a |
+* 'exclusive' - Only perform resource discovery for the specified resource on this node. Multiple
|
|
|
1f016a |
+ location constraints using 'exclusive' discovery for the same resource across different nodes
|
|
|
1f016a |
+ creates a subset of nodes resource-discovery is exclusive to. If a resource is marked
|
|
|
1f016a |
+ for 'exclusive' discovery on one or more nodes, that resource is only allowed to be placed
|
|
|
1f016a |
+ within that subset of nodes.
|
|
|
1f016a |
+
|
|
|
1f016a |
+indexterm:[Resource Discovery,Location Constraints]
|
|
|
1f016a |
+indexterm:[Constraints,Location,Resource Discovery]
|
|
|
1f016a |
+
|
|
|
1f016a |
|=========================================================
|
|
|
1f016a |
|
|
|
1f016a |
=== Asymmetrical "Opt-In" Clusters ===
|
|
|
1f016a |
diff --git a/fencing/commands.c b/fencing/commands.c
|
|
|
1f016a |
index 577ea95..c193a9d 100644
|
|
|
1f016a |
--- a/fencing/commands.c
|
|
|
1f016a |
+++ b/fencing/commands.c
|
|
|
1f016a |
@@ -2109,6 +2109,14 @@ handle_request(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * req
|
|
|
1f016a |
free_async_command(cmd);
|
|
|
1f016a |
free_xml(reply);
|
|
|
1f016a |
|
|
|
1f016a |
+ } else if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
|
|
|
1f016a |
+ int id = 0;
|
|
|
1f016a |
+ const char *name = NULL;
|
|
|
1f016a |
+
|
|
|
1f016a |
+ crm_element_value_int(request, XML_ATTR_ID, &id;;
|
|
|
1f016a |
+ name = crm_element_value(request, XML_ATTR_UNAME);
|
|
|
1f016a |
+ reap_crm_member(id, name);
|
|
|
1f016a |
+
|
|
|
1f016a |
} else {
|
|
|
1f016a |
crm_err("Unknown %s from %s", op, client ? client->name : remote_peer);
|
|
|
1f016a |
crm_log_xml_warn(request, "UnknownOp");
|
|
|
1f016a |
diff --git a/fencing/main.c b/fencing/main.c
|
|
|
1f016a |
index 2694452..70b5bde 100644
|
|
|
1f016a |
--- a/fencing/main.c
|
|
|
1f016a |
+++ b/fencing/main.c
|
|
|
1f016a |
@@ -97,6 +97,7 @@ st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
|
|
|
1f016a |
int call_options = 0;
|
|
|
1f016a |
xmlNode *request = NULL;
|
|
|
1f016a |
crm_client_t *c = crm_client_get(qbc);
|
|
|
1f016a |
+ const char *op = NULL;
|
|
|
1f016a |
|
|
|
1f016a |
if (c == NULL) {
|
|
|
1f016a |
crm_info("Invalid client: %p", qbc);
|
|
|
1f016a |
@@ -109,6 +110,20 @@ st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
|
|
|
1f016a |
return 0;
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
+
|
|
|
1f016a |
+ op = crm_element_value(request, F_CRM_TASK);
|
|
|
1f016a |
+ if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
|
|
|
1f016a |
+ crm_xml_add(request, F_TYPE, T_STONITH_NG);
|
|
|
1f016a |
+ crm_xml_add(request, F_STONITH_OPERATION, op);
|
|
|
1f016a |
+ crm_xml_add(request, F_STONITH_CLIENTID, c->id);
|
|
|
1f016a |
+ crm_xml_add(request, F_STONITH_CLIENTNAME, crm_client_name(c));
|
|
|
1f016a |
+ crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
|
|
|
1f016a |
+
|
|
|
1f016a |
+ send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
|
|
|
1f016a |
+ free_xml(request);
|
|
|
1f016a |
+ return 0;
|
|
|
1f016a |
+ }
|
|
|
1f016a |
+
|
|
|
1f016a |
if (c->name == NULL) {
|
|
|
1f016a |
const char *value = crm_element_value(request, F_STONITH_CLIENTNAME);
|
|
|
1f016a |
|
|
|
1f016a |
@@ -1099,6 +1114,7 @@ stonith_cleanup(void)
|
|
|
1f016a |
static struct crm_option long_options[] = {
|
|
|
1f016a |
{"stand-alone", 0, 0, 's'},
|
|
|
1f016a |
{"stand-alone-w-cpg", 0, 0, 'c'},
|
|
|
1f016a |
+ {"logfile", 1, 0, 'l'},
|
|
|
1f016a |
{"verbose", 0, 0, 'V'},
|
|
|
1f016a |
{"version", 0, 0, '$'},
|
|
|
1f016a |
{"help", 0, 0, '?'},
|
|
|
1f016a |
@@ -1200,6 +1216,9 @@ main(int argc, char **argv)
|
|
|
1f016a |
case 'V':
|
|
|
1f016a |
crm_bump_log_level(argc, argv);
|
|
|
1f016a |
break;
|
|
|
1f016a |
+ case 'l':
|
|
|
1f016a |
+ crm_add_logfile(optarg);
|
|
|
1f016a |
+ break;
|
|
|
1f016a |
case 's':
|
|
|
1f016a |
stand_alone = TRUE;
|
|
|
1f016a |
break;
|
|
|
1f016a |
diff --git a/fencing/regression.py.in b/fencing/regression.py.in
|
|
|
1f016a |
index c4cb2d8..fe6d418 100644
|
|
|
1f016a |
--- a/fencing/regression.py.in
|
|
|
1f016a |
+++ b/fencing/regression.py.in
|
|
|
1f016a |
@@ -82,24 +82,34 @@ class Test:
|
|
|
1f016a |
test.wait()
|
|
|
1f016a |
|
|
|
1f016a |
if self.verbose:
|
|
|
1f016a |
+ self.stonith_options = self.stonith_options + " -V"
|
|
|
1f016a |
print "Starting stonithd with %s" % self.stonith_options
|
|
|
1f016a |
|
|
|
1f016a |
+ if os.path.exists("/tmp/stonith-regression.log"):
|
|
|
1f016a |
+ os.remove('/tmp/stonith-regression.log')
|
|
|
1f016a |
+
|
|
|
1f016a |
self.stonith_process = subprocess.Popen(
|
|
|
1f016a |
- shlex.split("@CRM_DAEMON_DIR@/stonithd %s -V" % self.stonith_options),
|
|
|
1f016a |
- stdout=subprocess.PIPE,
|
|
|
1f016a |
- stderr=subprocess.PIPE)
|
|
|
1f016a |
+ shlex.split("@CRM_DAEMON_DIR@/stonithd %s -l /tmp/stonith-regression.log" % self.stonith_options))
|
|
|
1f016a |
|
|
|
1f016a |
time.sleep(1)
|
|
|
1f016a |
|
|
|
1f016a |
def clean_environment(self):
|
|
|
1f016a |
if self.stonith_process:
|
|
|
1f016a |
self.stonith_process.terminate()
|
|
|
1f016a |
+ self.stonith_process.wait()
|
|
|
1f016a |
|
|
|
1f016a |
- self.stonith_output = self.stonith_process.communicate()[1]
|
|
|
1f016a |
+ self.stonith_output = ""
|
|
|
1f016a |
self.stonith_process = None
|
|
|
1f016a |
|
|
|
1f016a |
+ f = open('/tmp/stonith-regression.log', 'r')
|
|
|
1f016a |
+ for line in f.readlines():
|
|
|
1f016a |
+ self.stonith_output = self.stonith_output + line
|
|
|
1f016a |
+
|
|
|
1f016a |
if self.verbose:
|
|
|
1f016a |
+ print "Daemon Output Start"
|
|
|
1f016a |
print self.stonith_output
|
|
|
1f016a |
+ print "Daemon Output End"
|
|
|
1f016a |
+ os.remove('/tmp/stonith-regression.log')
|
|
|
1f016a |
|
|
|
1f016a |
def add_stonith_log_pattern(self, pattern):
|
|
|
1f016a |
self.stonith_patterns.append(pattern)
|
|
|
1f016a |
@@ -953,7 +963,7 @@ if __name__ == "__main__":
|
|
|
1f016a |
self.stop_corosync()
|
|
|
1f016a |
|
|
|
1f016a |
if self.verbose and os.path.exists('/var/log/corosync.log'):
|
|
|
1f016a |
- print "Daemon output"
|
|
|
1f016a |
+ print "Corosync output"
|
|
|
1f016a |
f = open('/var/log/corosync.log', 'r')
|
|
|
1f016a |
for line in f.readlines():
|
|
|
1f016a |
print line.strip()
|
|
|
1f016a |
diff --git a/lib/cib/cib_remote.c b/lib/cib/cib_remote.c
|
|
|
1f016a |
index d9dde9b..9b98892 100644
|
|
|
1f016a |
--- a/lib/cib/cib_remote.c
|
|
|
1f016a |
+++ b/lib/cib/cib_remote.c
|
|
|
1f016a |
@@ -226,6 +226,8 @@ cib_tls_signon(cib_t * cib, crm_remote_t * connection, gboolean event_channel)
|
|
|
1f016a |
return -ENOTCONN;
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
+ connection->tcp_socket = sock;
|
|
|
1f016a |
+
|
|
|
1f016a |
if (private->encrypted) {
|
|
|
1f016a |
/* initialize GnuTls lib */
|
|
|
1f016a |
#ifdef HAVE_GNUTLS_GNUTLS_H
|
|
|
1f016a |
@@ -250,8 +252,6 @@ cib_tls_signon(cib_t * cib, crm_remote_t * connection, gboolean event_channel)
|
|
|
1f016a |
#else
|
|
|
1f016a |
return -EPROTONOSUPPORT;
|
|
|
1f016a |
#endif
|
|
|
1f016a |
- } else {
|
|
|
1f016a |
- connection->tcp_socket = sock;
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
/* login to server */
|
|
|
1f016a |
diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c
|
|
|
1f016a |
index 24700e5..70a0321 100644
|
|
|
1f016a |
--- a/lib/cluster/membership.c
|
|
|
1f016a |
+++ b/lib/cluster/membership.c
|
|
|
1f016a |
@@ -389,9 +389,15 @@ crm_find_peer(unsigned int id, const char *uname)
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
} else if(uname && by_id->uname) {
|
|
|
1f016a |
- crm_dump_peer_hash(LOG_INFO, __FUNCTION__);
|
|
|
1f016a |
- crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u %s", by_id->uname, by_name->uname, id, uname);
|
|
|
1f016a |
- crm_abort(__FILE__, __FUNCTION__, __LINE__, "member weirdness", TRUE, TRUE);
|
|
|
1f016a |
+ if(safe_str_eq(uname, by_id->uname)) {
|
|
|
1f016a |
+ crm_notice("Node '%s' has changed its ID from %u to %u", by_id->uname, by_name->id, by_id->id);
|
|
|
1f016a |
+ g_hash_table_foreach_remove(crm_peer_cache, crm_hash_find_by_data, by_name);
|
|
|
1f016a |
+
|
|
|
1f016a |
+ } else {
|
|
|
1f016a |
+ crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u %s", by_id->uname, by_name->uname, id, uname);
|
|
|
1f016a |
+ crm_dump_peer_hash(LOG_INFO, __FUNCTION__);
|
|
|
1f016a |
+ crm_abort(__FILE__, __FUNCTION__, __LINE__, "member weirdness", TRUE, TRUE);
|
|
|
1f016a |
+ }
|
|
|
1f016a |
|
|
|
1f016a |
} else if(id && by_name->id) {
|
|
|
1f016a |
crm_warn("Node %u and %u share the same name: '%s'", by_id->id, by_name->id, uname);
|
|
|
1f016a |
diff --git a/lib/common/remote.c b/lib/common/remote.c
|
|
|
1f016a |
index e2492b9..f11ebcd 100644
|
|
|
1f016a |
--- a/lib/common/remote.c
|
|
|
1f016a |
+++ b/lib/common/remote.c
|
|
|
1f016a |
@@ -308,13 +308,16 @@ crm_remote_sendv(crm_remote_t * remote, struct iovec * iov, int iovs)
|
|
|
1f016a |
int rc = -ESOCKTNOSUPPORT;
|
|
|
1f016a |
|
|
|
1f016a |
for(; lpc < iovs; lpc++) {
|
|
|
1f016a |
- if (remote->tcp_socket) {
|
|
|
1f016a |
- rc = crm_send_plaintext(remote->tcp_socket, iov[lpc].iov_base, iov[lpc].iov_len);
|
|
|
1f016a |
-#ifdef HAVE_GNUTLS_GNUTLS_H
|
|
|
1f016a |
|
|
|
1f016a |
- } else if (remote->tls_session) {
|
|
|
1f016a |
+#ifdef HAVE_GNUTLS_GNUTLS_H
|
|
|
1f016a |
+ if (remote->tls_session) {
|
|
|
1f016a |
rc = crm_send_tls(remote->tls_session, iov[lpc].iov_base, iov[lpc].iov_len);
|
|
|
1f016a |
+ } else if (remote->tcp_socket) {
|
|
|
1f016a |
+#else
|
|
|
1f016a |
+ if (remote->tcp_socket) {
|
|
|
1f016a |
#endif
|
|
|
1f016a |
+ rc = crm_send_plaintext(remote->tcp_socket, iov[lpc].iov_base, iov[lpc].iov_len);
|
|
|
1f016a |
+
|
|
|
1f016a |
} else {
|
|
|
1f016a |
crm_err("Unsupported connection type");
|
|
|
1f016a |
}
|
|
|
1f016a |
@@ -448,14 +451,16 @@ crm_remote_ready(crm_remote_t * remote, int timeout /* ms */ )
|
|
|
1f016a |
int rc = 0;
|
|
|
1f016a |
time_t start;
|
|
|
1f016a |
|
|
|
1f016a |
- if (remote->tcp_socket) {
|
|
|
1f016a |
- sock = remote->tcp_socket;
|
|
|
1f016a |
#ifdef HAVE_GNUTLS_GNUTLS_H
|
|
|
1f016a |
- } else if (remote->tls_session) {
|
|
|
1f016a |
+ if (remote->tls_session) {
|
|
|
1f016a |
void *sock_ptr = gnutls_transport_get_ptr(*remote->tls_session);
|
|
|
1f016a |
|
|
|
1f016a |
sock = GPOINTER_TO_INT(sock_ptr);
|
|
|
1f016a |
+ } else if (remote->tcp_socket) {
|
|
|
1f016a |
+#else
|
|
|
1f016a |
+ if (remote->tcp_socket) {
|
|
|
1f016a |
#endif
|
|
|
1f016a |
+ sock = remote->tcp_socket;
|
|
|
1f016a |
} else {
|
|
|
1f016a |
crm_err("Unsupported connection type");
|
|
|
1f016a |
}
|
|
|
1f016a |
@@ -519,17 +524,8 @@ crm_remote_recv_once(crm_remote_t * remote)
|
|
|
1f016a |
CRM_ASSERT(remote->buffer != NULL);
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
- if (remote->tcp_socket) {
|
|
|
1f016a |
- errno = 0;
|
|
|
1f016a |
- rc = read(remote->tcp_socket,
|
|
|
1f016a |
- remote->buffer + remote->buffer_offset,
|
|
|
1f016a |
- remote->buffer_size - remote->buffer_offset);
|
|
|
1f016a |
- if(rc < 0) {
|
|
|
1f016a |
- rc = -errno;
|
|
|
1f016a |
- }
|
|
|
1f016a |
-
|
|
|
1f016a |
#ifdef HAVE_GNUTLS_GNUTLS_H
|
|
|
1f016a |
- } else if (remote->tls_session) {
|
|
|
1f016a |
+ if (remote->tls_session) {
|
|
|
1f016a |
rc = gnutls_record_recv(*(remote->tls_session),
|
|
|
1f016a |
remote->buffer + remote->buffer_offset,
|
|
|
1f016a |
remote->buffer_size - remote->buffer_offset);
|
|
|
1f016a |
@@ -541,7 +537,18 @@ crm_remote_recv_once(crm_remote_t * remote)
|
|
|
1f016a |
crm_debug("TLS receive failed: %s (%d)", gnutls_strerror(rc), rc);
|
|
|
1f016a |
rc = -pcmk_err_generic;
|
|
|
1f016a |
}
|
|
|
1f016a |
+ } else if (remote->tcp_socket) {
|
|
|
1f016a |
+#else
|
|
|
1f016a |
+ if (remote->tcp_socket) {
|
|
|
1f016a |
#endif
|
|
|
1f016a |
+ errno = 0;
|
|
|
1f016a |
+ rc = read(remote->tcp_socket,
|
|
|
1f016a |
+ remote->buffer + remote->buffer_offset,
|
|
|
1f016a |
+ remote->buffer_size - remote->buffer_offset);
|
|
|
1f016a |
+ if(rc < 0) {
|
|
|
1f016a |
+ rc = -errno;
|
|
|
1f016a |
+ }
|
|
|
1f016a |
+
|
|
|
1f016a |
} else {
|
|
|
1f016a |
crm_err("Unsupported connection type");
|
|
|
1f016a |
return -ESOCKTNOSUPPORT;
|
|
|
1f016a |
diff --git a/lib/common/xml.c b/lib/common/xml.c
|
|
|
1f016a |
index 58d0a00..e63a582 100644
|
|
|
1f016a |
--- a/lib/common/xml.c
|
|
|
1f016a |
+++ b/lib/common/xml.c
|
|
|
1f016a |
@@ -1281,7 +1281,10 @@ __xml_build_changes(xmlNode * xml, xmlNode *patchset)
|
|
|
1f016a |
for (pIter = crm_first_attr(xml); pIter != NULL; pIter = pIter->next) {
|
|
|
1f016a |
const char *value = crm_element_value(xml, (const char *)pIter->name);
|
|
|
1f016a |
|
|
|
1f016a |
- crm_xml_add(result, (const char *)pIter->name, value);
|
|
|
1f016a |
+ p = pIter->_private;
|
|
|
1f016a |
+ if (is_not_set(p->flags, xpf_deleted)) {
|
|
|
1f016a |
+ crm_xml_add(result, (const char *)pIter->name, value);
|
|
|
1f016a |
+ }
|
|
|
1f016a |
}
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
@@ -5715,7 +5718,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
} else {
|
|
|
1f016a |
- crm_notice("Upgrading %s-style configuration to %s with %s",
|
|
|
1f016a |
+ crm_debug("Upgrading %s-style configuration to %s with %s",
|
|
|
1f016a |
known_schemas[lpc].name, known_schemas[next].name,
|
|
|
1f016a |
known_schemas[lpc].transform ? known_schemas[lpc].transform : "no-op");
|
|
|
1f016a |
|
|
|
1f016a |
@@ -5746,7 +5749,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
if (*best > match) {
|
|
|
1f016a |
- crm_notice("%s the configuration from %s to %s",
|
|
|
1f016a |
+ crm_info("%s the configuration from %s to %s",
|
|
|
1f016a |
transform?"Transformed":"Upgraded",
|
|
|
1f016a |
value ? value : "<none>", known_schemas[*best].name);
|
|
|
1f016a |
crm_xml_add(xml, XML_ATTR_VALIDATION, known_schemas[*best].name);
|
|
|
1f016a |
diff --git a/lib/services/services.c b/lib/services/services.c
|
|
|
1f016a |
index 582fbe1..c7b6c89 100644
|
|
|
1f016a |
--- a/lib/services/services.c
|
|
|
1f016a |
+++ b/lib/services/services.c
|
|
|
1f016a |
@@ -305,6 +305,7 @@ services_action_create_generic(const char *exec, const char *args[])
|
|
|
1f016a |
void
|
|
|
1f016a |
services_action_cleanup(svc_action_t * op)
|
|
|
1f016a |
{
|
|
|
1f016a |
+#if SUPPORT_DBUS
|
|
|
1f016a |
if(op->opaque->timerid != 0) {
|
|
|
1f016a |
crm_trace("Removing timer for call %s to %s", op->action, op->rsc);
|
|
|
1f016a |
g_source_remove(op->opaque->timerid);
|
|
|
1f016a |
@@ -330,6 +331,7 @@ services_action_cleanup(svc_action_t * op)
|
|
|
1f016a |
mainloop_del_fd(op->opaque->stdout_gsource);
|
|
|
1f016a |
op->opaque->stdout_gsource = NULL;
|
|
|
1f016a |
}
|
|
|
1f016a |
+#endif
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
void
|
|
|
1f016a |
diff --git a/lrmd/regression.py.in b/lrmd/regression.py.in
|
|
|
1f016a |
index 649c984..50e975e 100755
|
|
|
1f016a |
--- a/lrmd/regression.py.in
|
|
|
1f016a |
+++ b/lrmd/regression.py.in
|
|
|
1f016a |
@@ -435,14 +435,13 @@ if __name__ == "__main__":
|
|
|
1f016a |
os.system("cp %s/extra/resources/%s @OCF_RA_DIR@/pacemaker/%s" % (build_dir, ra, ra))
|
|
|
1f016a |
os.system("chmod a+x @OCF_RA_DIR@/pacemaker/%s" % (ra))
|
|
|
1f016a |
|
|
|
1f016a |
- else:
|
|
|
1f016a |
- # Assume it's installed
|
|
|
1f016a |
- print "Using @datadir@/@PACKAGE@/tests/cts/LSBDummy"
|
|
|
1f016a |
- os.system("cp @datadir@/@PACKAGE@/tests/cts/LSBDummy /etc/init.d/LSBDummy")
|
|
|
1f016a |
-
|
|
|
1f016a |
- os.system("chmod a+x /etc/init.d/LSBDummy")
|
|
|
1f016a |
- os.system("ls -al /etc/init.d/LSBDummy")
|
|
|
1f016a |
+ else:
|
|
|
1f016a |
+ # Assume it's installed
|
|
|
1f016a |
+ print "Using @datadir@/@PACKAGE@/tests/cts/LSBDummy"
|
|
|
1f016a |
+ os.system("cp @datadir@/@PACKAGE@/tests/cts/LSBDummy /etc/init.d/LSBDummy")
|
|
|
1f016a |
|
|
|
1f016a |
+ os.system("chmod a+x /etc/init.d/LSBDummy")
|
|
|
1f016a |
+ os.system("ls -al /etc/init.d/LSBDummy")
|
|
|
1f016a |
os.system("mkdir -p @CRM_CORE_DIR@/root")
|
|
|
1f016a |
|
|
|
1f016a |
if os.path.exists("/bin/systemctl"):
|
|
|
1f016a |
diff --git a/pengine/constraints.c b/pengine/constraints.c
|
|
|
1f016a |
index 88e382b..a2ce9c4 100644
|
|
|
1f016a |
--- a/pengine/constraints.c
|
|
|
1f016a |
+++ b/pengine/constraints.c
|
|
|
1f016a |
@@ -52,6 +52,8 @@ enum pe_order_kind {
|
|
|
1f016a |
enum pe_ordering get_flags(const char *id, enum pe_order_kind kind,
|
|
|
1f016a |
const char *action_first, const char *action_then, gboolean invert);
|
|
|
1f016a |
enum pe_ordering get_asymmetrical_flags(enum pe_order_kind kind);
|
|
|
1f016a |
+static rsc_to_node_t *generate_location_rule(resource_t * rsc, xmlNode * rule_xml,
|
|
|
1f016a |
+ const char *discovery, pe_working_set_t * data_set);
|
|
|
1f016a |
|
|
|
1f016a |
gboolean
|
|
|
1f016a |
unpack_constraints(xmlNode * xml_constraints, pe_working_set_t * data_set)
|
|
|
1f016a |
@@ -687,7 +689,7 @@ unpack_rsc_location(xmlNode * xml_obj, resource_t * rsc_lh, const char * role,
|
|
|
1f016a |
if (crm_str_eq((const char *)rule_xml->name, XML_TAG_RULE, TRUE)) {
|
|
|
1f016a |
empty = FALSE;
|
|
|
1f016a |
crm_trace("Unpacking %s/%s", id, ID(rule_xml));
|
|
|
1f016a |
- generate_location_rule(rsc_lh, rule_xml, data_set);
|
|
|
1f016a |
+ generate_location_rule(rsc_lh, rule_xml, discovery, data_set);
|
|
|
1f016a |
}
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
@@ -917,8 +919,8 @@ get_node_score(const char *rule, const char *score, gboolean raw, node_t * node)
|
|
|
1f016a |
return score_f;
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
-rsc_to_node_t *
|
|
|
1f016a |
-generate_location_rule(resource_t * rsc, xmlNode * rule_xml, pe_working_set_t * data_set)
|
|
|
1f016a |
+static rsc_to_node_t *
|
|
|
1f016a |
+generate_location_rule(resource_t * rsc, xmlNode * rule_xml, const char *discovery, pe_working_set_t * data_set)
|
|
|
1f016a |
{
|
|
|
1f016a |
const char *rule_id = NULL;
|
|
|
1f016a |
const char *score = NULL;
|
|
|
1f016a |
@@ -960,7 +962,7 @@ generate_location_rule(resource_t * rsc, xmlNode * rule_xml, pe_working_set_t *
|
|
|
1f016a |
do_and = FALSE;
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
- location_rule = rsc2node_new(rule_id, rsc, 0, NULL, NULL, data_set);
|
|
|
1f016a |
+ location_rule = rsc2node_new(rule_id, rsc, 0, discovery, NULL, data_set);
|
|
|
1f016a |
|
|
|
1f016a |
if (location_rule == NULL) {
|
|
|
1f016a |
return NULL;
|
|
|
1f016a |
diff --git a/pengine/test10/resource-discovery.xml b/pengine/test10/resource-discovery.xml
|
|
|
1f016a |
index 8b517df..1692cdb 100644
|
|
|
1f016a |
--- a/pengine/test10/resource-discovery.xml
|
|
|
1f016a |
+++ b/pengine/test10/resource-discovery.xml
|
|
|
1f016a |
@@ -97,7 +97,13 @@
|
|
|
1f016a |
<rsc_location id="location-FAKE1-remote1" node="remote1" resource-discovery="never" rsc="FAKE1" score="-INFINITY"/>
|
|
|
1f016a |
<rsc_location id="location-FAKE1-18node1" node="18node1" resource-discovery="never" rsc="FAKE1" score="-INFINITY"/>
|
|
|
1f016a |
<rsc_location id="location-FAKE1-18node2" node="18node2" resource-discovery="never" rsc="FAKE1" score="-INFINITY"/>
|
|
|
1f016a |
- <rsc_location id="location-FAKE1-18node3" node="18node3" resource-discovery="never" rsc="FAKE1" score="-INFINITY"/>
|
|
|
1f016a |
+
|
|
|
1f016a |
+ <rsc_location id="location-FAKE1-18node3" resource-discovery="never" rsc="FAKE1" >
|
|
|
1f016a |
+ <rule score="-INFINITY" id="vlan003-on-cluster-nodes-rule">
|
|
|
1f016a |
+ <expression attribute="#uname" operation="eq" value="18node3" id="vlan003-on-cluster-nodes-rule-expression"/>
|
|
|
1f016a |
+ </rule>
|
|
|
1f016a |
+ </rsc_location>
|
|
|
1f016a |
+
|
|
|
1f016a |
<rsc_location id="location-FAKE2-18node1" node="18node1" resource-discovery="exclusive" rsc="FAKE2" score="10"/>
|
|
|
1f016a |
<rsc_location id="location-FAKE2-18node2" node="18node2" resource-discovery="exclusive" rsc="FAKE2" score="100"/>
|
|
|
1f016a |
<rsc_location id="location-FAKE3-18node3--INFINITY" node="18node3" resource-discovery="exclusive" rsc="FAKE3" score="INFINITY"/>
|
|
|
1f016a |
diff --git a/pengine/utils.h b/pengine/utils.h
|
|
|
1f016a |
index 5142e68..270d32a 100644
|
|
|
1f016a |
--- a/pengine/utils.h
|
|
|
1f016a |
+++ b/pengine/utils.h
|
|
|
1f016a |
@@ -39,9 +39,6 @@ extern gboolean rsc_ticket_new(const char *id, resource_t * rsc_lh, ticket_t * t
|
|
|
1f016a |
const char *state_lh, const char *loss_policy,
|
|
|
1f016a |
pe_working_set_t * data_set);
|
|
|
1f016a |
|
|
|
1f016a |
-extern rsc_to_node_t *generate_location_rule(resource_t * rsc, xmlNode * location_rule,
|
|
|
1f016a |
- pe_working_set_t * data_set);
|
|
|
1f016a |
-
|
|
|
1f016a |
extern gint sort_node_weight(gconstpointer a, gconstpointer b, gpointer data_set);
|
|
|
1f016a |
|
|
|
1f016a |
extern gboolean can_run_resources(const node_t * node);
|
|
|
1f016a |
diff --git a/tools/crm_node.c b/tools/crm_node.c
|
|
|
1f016a |
index 98847f3..5932f98 100644
|
|
|
1f016a |
--- a/tools/crm_node.c
|
|
|
1f016a |
+++ b/tools/crm_node.c
|
|
|
1f016a |
@@ -34,6 +34,7 @@
|
|
|
1f016a |
#include <crm/common/mainloop.h>
|
|
|
1f016a |
#include <crm/msg_xml.h>
|
|
|
1f016a |
#include <crm/cib.h>
|
|
|
1f016a |
+#include <crm/attrd.h>
|
|
|
1f016a |
|
|
|
1f016a |
int command = 0;
|
|
|
1f016a |
int ccm_fd = 0;
|
|
|
1f016a |
@@ -92,7 +93,7 @@ cib_remove_node(uint32_t id, const char *name)
|
|
|
1f016a |
crm_trace("Removing %s from the CIB", name);
|
|
|
1f016a |
|
|
|
1f016a |
/* TODO: Use 'id' instead */
|
|
|
1f016a |
- if(name == NULL) {
|
|
|
1f016a |
+ if(name == NULL && id == 0) {
|
|
|
1f016a |
return -ENOTUNIQ;
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
@@ -101,17 +102,24 @@ cib_remove_node(uint32_t id, const char *name)
|
|
|
1f016a |
|
|
|
1f016a |
crm_xml_add(node, XML_ATTR_UNAME, name);
|
|
|
1f016a |
crm_xml_add(node_state, XML_ATTR_UNAME, name);
|
|
|
1f016a |
+ if(id) {
|
|
|
1f016a |
+ char buffer[64];
|
|
|
1f016a |
+ if(snprintf(buffer, 63, "%u", id) > 0) {
|
|
|
1f016a |
+ crm_xml_add(node, XML_ATTR_ID, buffer);
|
|
|
1f016a |
+ crm_xml_add(node_state, XML_ATTR_ID, buffer);
|
|
|
1f016a |
+ }
|
|
|
1f016a |
+ }
|
|
|
1f016a |
|
|
|
1f016a |
cib = cib_new();
|
|
|
1f016a |
cib->cmds->signon(cib, crm_system_name, cib_command);
|
|
|
1f016a |
|
|
|
1f016a |
rc = cib->cmds->delete(cib, XML_CIB_TAG_NODES, node, cib_sync_call);
|
|
|
1f016a |
if (rc != pcmk_ok) {
|
|
|
1f016a |
- printf("Could not remove %s from " XML_CIB_TAG_NODES ": %s", name, pcmk_strerror(rc));
|
|
|
1f016a |
+ printf("Could not remove %s/%u from " XML_CIB_TAG_NODES ": %s", name, id, pcmk_strerror(rc));
|
|
|
1f016a |
}
|
|
|
1f016a |
rc = cib->cmds->delete(cib, XML_CIB_TAG_STATUS, node_state, cib_sync_call);
|
|
|
1f016a |
if (rc != pcmk_ok) {
|
|
|
1f016a |
- printf("Could not remove %s from " XML_CIB_TAG_STATUS ": %s", name, pcmk_strerror(rc));
|
|
|
1f016a |
+ printf("Could not remove %s/%u from " XML_CIB_TAG_STATUS ": %s", name, id, pcmk_strerror(rc));
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
cib->cmds->signoff(cib);
|
|
|
1f016a |
@@ -156,6 +164,7 @@ int tools_remove_node_cache(const char *node, const char *target)
|
|
|
1f016a |
}
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
+
|
|
|
1f016a |
errno = 0;
|
|
|
1f016a |
n = strtol(node, &endptr, 10);
|
|
|
1f016a |
if (errno != 0 || endptr == node || *endptr != '\0') {
|
|
|
1f016a |
@@ -166,21 +175,39 @@ int tools_remove_node_cache(const char *node, const char *target)
|
|
|
1f016a |
name = get_node_name(n);
|
|
|
1f016a |
}
|
|
|
1f016a |
|
|
|
1f016a |
- crm_trace("Removing %s aka. %s from the membership cache", name, node);
|
|
|
1f016a |
+ crm_trace("Removing %s aka. %s (%u) from the membership cache", name, node, n);
|
|
|
1f016a |
|
|
|
1f016a |
- cmd = create_request(CRM_OP_RM_NODE_CACHE,
|
|
|
1f016a |
- NULL, NULL, target, "crm_node", admin_uuid);
|
|
|
1f016a |
+ if(safe_str_eq(target, T_ATTRD)) {
|
|
|
1f016a |
+ cmd = create_xml_node(NULL, __FUNCTION__);
|
|
|
1f016a |
|
|
|
1f016a |
- if (n) {
|
|
|
1f016a |
- char buffer[64];
|
|
|
1f016a |
+ crm_xml_add(cmd, F_TYPE, T_ATTRD);
|
|
|
1f016a |
+ crm_xml_add(cmd, F_ORIG, crm_system_name);
|
|
|
1f016a |
|
|
|
1f016a |
- if(snprintf(buffer, 63, "%u", n) > 0) {
|
|
|
1f016a |
- crm_xml_add(cmd, XML_ATTR_ID, buffer);
|
|
|
1f016a |
+ crm_xml_add(cmd, F_ATTRD_TASK, "peer-remove");
|
|
|
1f016a |
+ crm_xml_add(cmd, F_ATTRD_HOST, name);
|
|
|
1f016a |
+
|
|
|
1f016a |
+ if (n) {
|
|
|
1f016a |
+ char buffer[64];
|
|
|
1f016a |
+ if(snprintf(buffer, 63, "%u", n) > 0) {
|
|
|
1f016a |
+ crm_xml_add(cmd, F_ATTRD_HOST_ID, buffer);
|
|
|
1f016a |
+ }
|
|
|
1f016a |
+ }
|
|
|
1f016a |
+
|
|
|
1f016a |
+ } else {
|
|
|
1f016a |
+ cmd = create_request(CRM_OP_RM_NODE_CACHE,
|
|
|
1f016a |
+ NULL, NULL, target, crm_system_name, admin_uuid);
|
|
|
1f016a |
+ if (n) {
|
|
|
1f016a |
+ char buffer[64];
|
|
|
1f016a |
+ if(snprintf(buffer, 63, "%u", n) > 0) {
|
|
|
1f016a |
+ crm_xml_add(cmd, XML_ATTR_ID, buffer);
|
|
|
1f016a |
+ }
|
|
|
1f016a |
}
|
|
|
1f016a |
+ crm_xml_add(cmd, XML_ATTR_UNAME, name);
|
|
|
1f016a |
}
|
|
|
1f016a |
- crm_xml_add(cmd, XML_ATTR_UNAME, name);
|
|
|
1f016a |
|
|
|
1f016a |
rc = crm_ipc_send(conn, cmd, 0, 0, NULL);
|
|
|
1f016a |
+ crm_debug("%s peer cache cleanup for %s (%u): %d", target, name, n, rc);
|
|
|
1f016a |
+
|
|
|
1f016a |
if (rc > 0) {
|
|
|
1f016a |
rc = cib_remove_node(n, name);
|
|
|
1f016a |
}
|
|
|
1f016a |
@@ -189,8 +216,8 @@ int tools_remove_node_cache(const char *node, const char *target)
|
|
|
1f016a |
crm_ipc_close(conn);
|
|
|
1f016a |
crm_ipc_destroy(conn);
|
|
|
1f016a |
}
|
|
|
1f016a |
- free_xml(cmd);
|
|
|
1f016a |
free(admin_uuid);
|
|
|
1f016a |
+ free_xml(cmd);
|
|
|
1f016a |
free(name);
|
|
|
1f016a |
return rc > 0 ? 0 : rc;
|
|
|
1f016a |
}
|
|
|
1f016a |
@@ -649,6 +676,12 @@ try_corosync(int command, enum cluster_type_e stack)
|
|
|
1f016a |
|
|
|
1f016a |
mainloop_io_t *ipc = NULL;
|
|
|
1f016a |
GMainLoop *amainloop = NULL;
|
|
|
1f016a |
+ const char *daemons[] = {
|
|
|
1f016a |
+ CRM_SYSTEM_CRMD,
|
|
|
1f016a |
+ "stonith-ng",
|
|
|
1f016a |
+ T_ATTRD,
|
|
|
1f016a |
+ CRM_SYSTEM_MCP,
|
|
|
1f016a |
+ };
|
|
|
1f016a |
|
|
|
1f016a |
struct ipc_client_callbacks node_callbacks = {
|
|
|
1f016a |
.dispatch = node_mcp_dispatch,
|
|
|
1f016a |
@@ -657,13 +690,11 @@ try_corosync(int command, enum cluster_type_e stack)
|
|
|
1f016a |
|
|
|
1f016a |
switch (command) {
|
|
|
1f016a |
case 'R':
|
|
|
1f016a |
- if (tools_remove_node_cache(target_uname, CRM_SYSTEM_CRMD)) {
|
|
|
1f016a |
- crm_err("Failed to connect to "CRM_SYSTEM_CRMD" to remove node '%s'", target_uname);
|
|
|
1f016a |
- crm_exit(pcmk_err_generic);
|
|
|
1f016a |
- }
|
|
|
1f016a |
- if (tools_remove_node_cache(target_uname, CRM_SYSTEM_MCP)) {
|
|
|
1f016a |
- crm_err("Failed to connect to "CRM_SYSTEM_MCP" to remove node '%s'", target_uname);
|
|
|
1f016a |
- crm_exit(pcmk_err_generic);
|
|
|
1f016a |
+ for(rc = 0; rc < DIMOF(daemons); rc++) {
|
|
|
1f016a |
+ if (tools_remove_node_cache(target_uname, daemons[rc])) {
|
|
|
1f016a |
+ crm_err("Failed to connect to %s to remove node '%s'", daemons[rc], target_uname);
|
|
|
1f016a |
+ crm_exit(pcmk_err_generic);
|
|
|
1f016a |
+ }
|
|
|
1f016a |
}
|
|
|
1f016a |
crm_exit(pcmk_ok);
|
|
|
1f016a |
break;
|
|
|
1f016a |
@@ -834,8 +865,8 @@ main(int argc, char **argv)
|
|
|
1f016a |
force_flag = TRUE;
|
|
|
1f016a |
break;
|
|
|
1f016a |
case 'R':
|
|
|
1f016a |
- dangerous_cmd = TRUE;
|
|
|
1f016a |
command = flag;
|
|
|
1f016a |
+ dangerous_cmd = TRUE;
|
|
|
1f016a |
target_uname = optarg;
|
|
|
1f016a |
break;
|
|
|
1f016a |
case 'N':
|