From 0d440890ade31a2050ac861270a39be5c91d4bbb Mon Sep 17 00:00:00 2001
From: Ivan Devat <idevat@redhat.com>
Date: Wed, 14 Sep 2016 15:29:06 +0200
Subject: [PATCH] squash bz1231858 resource/fence agent options form
6007fba70212 web UI: treat resource as managed by default
f1b60c3a2bac WebUI: fix node standby for pcs 0.9.138 and older
73adbedf268e webUI: allow change groups, clone and unclone of resource on clusters running older pcsd
1302b4e62e19 webUI: fix group list when managing cluster running older pcsd
f639c0dded12 webUI: don't show group selector in case cluster doesn't support it
584092ce7d04 webUI: consolidate backward compatibility code
---
pcsd/cluster_entity.rb | 2 +-
pcsd/pcs.rb | 20 ++++-
pcsd/pcsd.rb | 169 +++++++++++++++++++++++++++++++++++++-----
pcsd/public/js/nodes-ember.js | 11 ++-
pcsd/remote.rb | 6 +-
pcsd/views/main.erb | 20 ++---
6 files changed, 194 insertions(+), 34 deletions(-)
diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
index 4ffcd4b..b8f363a 100644
--- a/pcsd/cluster_entity.rb
+++ b/pcsd/cluster_entity.rb
@@ -120,7 +120,7 @@ module ClusterEntity
status = ClusterEntity::CRMResourceStatus.new
status.id = primitive.id
status.resource_agent = primitive.agentname
- status.managed = false
+ status.managed = true
status.failed = resource[:failed]
status.role = nil
status.active = resource[:active]
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 137bb3d..e05f3ef 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -1864,7 +1864,7 @@ end
def status_v1_to_v2(status)
new_status = status.select { |k,_|
[:cluster_name, :username, :is_cman_with_udpu_transport,
- :need_ring1_address, :cluster_settings, :constraints, :groups,
+ :need_ring1_address, :cluster_settings, :constraints,
:corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby,
:pacemaker_offline, :acls, :fence_levels
].include?(k)
@@ -1885,6 +1885,8 @@ def status_v1_to_v2(status)
].include?(k)
}
+ new_status[:groups] = get_group_list_from_tree_of_resources(resources)
+
new_status[:node].update(
{
:id => status[:node_id],
@@ -1901,6 +1903,22 @@ def status_v1_to_v2(status)
return new_status
end
+def get_group_list_from_tree_of_resources(tree)
+ group_list = []
+ tree.each { |resource|
+ if resource.instance_of?(ClusterEntity::Group)
+ group_list << resource.id
+ end
+ if (
+ resource.kind_of?(ClusterEntity::MultiInstance) and
+ resource.member.instance_of?(ClusterEntity::Group)
+ )
+ group_list << resource.member.id
+ end
+ }
+ return group_list
+end
+
def allowed_for_local_cluster(auth_user, action)
pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
return pcs_config.permissions_local.allows?(
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index 287cf03..dcfd5a0 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -908,7 +908,7 @@ already been added to pcsd. You may not add two clusters with the same name int
'type' => 'boolean',
'shortdesc' => 'Should deleted actions be cancelled',
'longdesc' => 'Should deleted actions be cancelled',
- 'readable_name' => 'top Orphan Actions',
+ 'readable_name' => 'Stop Orphan Actions',
'advanced' => false
},
'start-failure-is-fatal' => {
@@ -1215,33 +1215,168 @@ already been added to pcsd. You may not add two clusters with the same name int
return [200, "Node added successfully."]
end
+ def pcs_0_9_142_resource_change_group(auth_user, params)
+ parameters = {
+ :resource_id => params[:resource_id],
+ :resource_group => '',
+ :_orig_resource_group => '',
+ }
+ parameters[:resource_group] = params[:group_id] if params[:group_id]
+ if params[:old_group_id]
+ parameters[:_orig_resource_group] = params[:old_group_id]
+ end
+ return send_cluster_request_with_token(
+ auth_user, params[:cluster], 'update_resource', true, parameters
+ )
+ end
+
+ def pcs_0_9_142_resource_clone(auth_user, params)
+ parameters = {
+ :resource_id => params[:resource_id],
+ :resource_clone => true,
+ :_orig_resource_clone => 'false',
+ }
+ return send_cluster_request_with_token(
+ auth_user, params[:cluster], 'update_resource', true, parameters
+ )
+ end
+
+ def pcs_0_9_142_resource_unclone(auth_user, params)
+ parameters = {
+ :resource_id => params[:resource_id],
+ :resource_clone => nil,
+ :_orig_resource_clone => 'true',
+ }
+ return send_cluster_request_with_token(
+ auth_user, params[:cluster], 'update_resource', true, parameters
+ )
+ end
+
+ def pcs_0_9_142_resource_master(auth_user, params)
+ parameters = {
+ :resource_id => params[:resource_id],
+ :resource_ms => true,
+ :_orig_resource_ms => 'false',
+ }
+ return send_cluster_request_with_token(
+ auth_user, params[:cluster], 'update_resource', true, parameters
+ )
+ end
+
+ # There is a bug in pcs-0.9.138 and older in processing the standby and
+ # unstandby request. JS of that pcsd always sent nodename in "node"
+ # parameter, which caused pcsd daemon to run the standby command locally with
+ # param["node"] as node name. This worked fine if the local cluster was
+ # managed from JS, as pacemaker simply put the requested node into standby.
+ # However it didn't work for managing non-local clusters, as the command was
+ # run on the local cluster everytime. Pcsd daemon would send the request to a
+ # remote cluster if the param["name"] variable was set, and that never
+ # happened. That however wouldn't work either, as then the required parameter
+ # "node" wasn't sent in the request causing an exception on the receiving
+ # node. This is fixed in commit 053f63ca109d9ef9e7f0416e90aab8e140480f5b
+ #
+ # In order to be able to put nodes running pcs-0.9.138 into standby, the
+ # nodename must be sent in "node" param, and the "name" must not be sent.
+ def pcs_0_9_138_node_standby(auth_user, params)
+ translated_params = {
+ 'node' => params[:name],
+ }
+ return send_cluster_request_with_token(
+ auth_user, params[:cluster], 'node_standby', true, translated_params
+ )
+ end
+
+ def pcs_0_9_138_node_unstandby(auth_user, params)
+ translated_params = {
+ 'node' => params[:name],
+ }
+ return send_cluster_request_with_token(
+ auth_user, params[:cluster], 'node_unstandby', true, translated_params
+ )
+ end
+
post '/managec/:cluster/?*' do
auth_user = PCSAuth.sessionToAuthUser(session)
raw_data = request.env["rack.input"].read
if params[:cluster]
request = "/" + params[:splat].join("/")
- code, out = send_cluster_request_with_token(
- auth_user, params[:cluster], request, true, params, true, raw_data
- )
# backward compatibility layer BEGIN
- # This code correctly remove constraints on pcs/pcsd version 0.9.137 and older
- redirection = {
- "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
- "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
+ translate_for_version = {
+ '/node_standby' => [
+ [[0, 9, 138], method(:pcs_0_9_138_node_standby)],
+ ],
+ '/node_unstandby' => [
+ [[0, 9, 138], method(:pcs_0_9_138_node_unstandby)],
+ ],
}
- if code == 404 and redirection.key?(request)
+ if translate_for_version.key?(request)
+ target_pcsd_version = [0, 0, 0]
+ version_code, version_out = send_cluster_request_with_token(
+ auth_user, params[:cluster], 'get_sw_versions'
+ )
+ if version_code == 200
+ begin
+ versions = JSON.parse(version_out)
+ target_pcsd_version = versions['pcs'] if versions['pcs']
+ rescue JSON::ParserError
+ end
+ end
+ translate_function = nil
+ translate_for_version[request].each { |pair|
+ if (target_pcsd_version <=> pair[0]) != 1 # target <= pair
+ translate_function = pair[1]
+ break
+ end
+ }
+ end
+ # backward compatibility layer END
+
+ if translate_function
+ code, out = translate_function.call(auth_user, params)
+ else
code, out = send_cluster_request_with_token(
- auth_user,
- params[:cluster],
- redirection[request],
- true,
- params,
- false,
- raw_data
+ auth_user, params[:cluster], request, true, params, true, raw_data
)
end
- # bcl END
+
+ # backward compatibility layer BEGIN
+ if code == 404
+ case request
+ # supported since pcs-0.9.143 (tree view of resources)
+ when '/resource_change_group'
+ code, out = pcs_0_9_142_resource_change_group(auth_user, params)
+ # supported since pcs-0.9.143 (tree view of resources)
+ when '/resource_clone'
+ code, out = pcs_0_9_142_resource_clone(auth_user, params)
+ # supported since pcs-0.9.143 (tree view of resources)
+ when '/resource_unclone'
+ code, out = pcs_0_9_142_resource_unclone(auth_user, params)
+ # supported since pcs-0.9.143 (tree view of resources)
+ when '/resource_master'
+ code, out = pcs_0_9_142_resource_master(auth_user, params)
+ else
+ redirection = {
+ # constraints removal for pcs-0.9.137 and older
+ "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
+ # constraints removal for pcs-0.9.137 and older
+ "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
+ }
+ if redirection.key?(request)
+ code, out = send_cluster_request_with_token(
+ auth_user,
+ params[:cluster],
+ redirection[request],
+ true,
+ params,
+ false,
+ raw_data
+ )
+ end
+ end
+ end
+ # backward compatibility layer END
+
return code, out
end
end
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index 19caf14..6ef49e2 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -922,6 +922,15 @@ Pcs.ResourceObj = Ember.Object.extend({
return "";
}
}.property("status_val"),
+ show_group_selector: function() {
+ var parent = this.get("parent");
+ return !(
+ parent &&
+ parent.is_group &&
+ parent.get("parent") &&
+ Pcs.resourcesContainer.get("is_version_1")
+ );
+ }.property(),
location_constraints: [],
ordering_constraints: [],
@@ -1012,7 +1021,7 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({
is_unmanaged: function() {
var instance_status_list = this.get("instance_status");
if (!instance_status_list) {
- return false;
+ return true;
}
var is_managed = true;
$.each(instance_status_list, function(_, instance_status) {
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 7dc7951..97e63f1 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -334,9 +334,8 @@ end
def node_standby(params, request, auth_user)
if params[:name]
code, response = send_request_with_token(
- auth_user, params[:name], 'node_standby', true, {"node"=>params[:name]}
+ auth_user, params[:name], 'node_standby', true
)
- # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
else
if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
return 403, 'Permission denied'
@@ -350,9 +349,8 @@ end
def node_unstandby(params, request, auth_user)
if params[:name]
code, response = send_request_with_token(
- auth_user, params[:name], 'node_unstandby', true, {"node"=>params[:name]}
+ auth_user, params[:name], 'node_unstandby', true
)
- # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
else
if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
return 403, 'Permission denied'
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index 8de1c60..a138f68 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -246,7 +246,6 @@
<td class="bold" nowrap>Current Location:</td>
<td id="cur_res_loc" class="reg">{{resource.nodes_running_on_string}}</td>
</tr>
- {{#unless old_pcsd}}
{{#unless resource.parent}}
<tr>
<td class="bold" nowrap>Clone:</td>
@@ -268,6 +267,7 @@
</tr>
{{else}}
{{#if resource.parent.is_group}}
+ {{#if resource.show_group_selector}}
<tr>
<td class="bold" nowrap>Group:</td>
<td id="cur_res_loc" class="reg">
@@ -275,11 +275,10 @@
</td>
</tr>
{{/if}}
- {{/unless}}
+ {{/if}}
{{/unless}}
{{/if}}
{{/unless}}
- {{#unless old_pcsd}}
{{#if resource.is_group}}
{{#unless resource.parent}}
<tr>
@@ -294,12 +293,14 @@
<input type="button" onclick="resource_master(curResource());" value="Create master/slave">
</td>
</tr>
- <tr>
- <td class="bold" nowrap>Group:</td>
- <td id="cur_res_loc" class="reg">
- <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup">
- </td>
- </tr>
+ {{#unless old_pcsd}}
+ <tr>
+ <td class="bold" nowrap>Group:</td>
+ <td id="cur_res_loc" class="reg">
+ <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup">
+ </td>
+ </tr>
+ {{/unless}}
{{/unless}}
{{/if}}
{{#if resource.is_multi_instance}}
@@ -310,7 +311,6 @@
</td>
</tr>
{{/if}}
- {{/unless}}
</table>
{{#unless resource.stonith}}
{{location_constraints-table constraints=resource.location_constraints}}
--
1.8.3.1