diff --git a/.gitignore b/.gitignore
index 60be2e2..6e8512c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,17 +1,17 @@
 SOURCES/HAM-logo.png
-SOURCES/backports-3.11.3.gem
-SOURCES/ethon-0.11.0.gem
-SOURCES/ffi-1.9.25.gem
+SOURCES/backports-3.12.0.gem
+SOURCES/ethon-0.12.0.gem
+SOURCES/ffi-1.10.0.gem
 SOURCES/mock-1.0.1.tar.gz
 SOURCES/multi_json-1.13.1.gem
 SOURCES/open4-1.3.4.gem
 SOURCES/orderedhash-0.0.6.gem
-SOURCES/pcs-0.9.165.tar.gz
+SOURCES/pcs-0.9.167.tar.gz
 SOURCES/pyagentx-0.4.pcs.1.tar.gz
-SOURCES/rack-1.6.10.gem
+SOURCES/rack-1.6.11.gem
 SOURCES/rack-protection-1.5.5.gem
-SOURCES/rack-test-0.7.0.gem
+SOURCES/rack-test-0.8.3.gem
 SOURCES/rpam-ruby19-1.2.1.gem
 SOURCES/sinatra-1.4.8.gem
 SOURCES/sinatra-contrib-1.4.7.gem
-SOURCES/tilt-2.0.8.gem
+SOURCES/tilt-2.0.9.gem
diff --git a/.pcs.metadata b/.pcs.metadata
index 9e93a85..09d03b4 100644
--- a/.pcs.metadata
+++ b/.pcs.metadata
@@ -1,17 +1,17 @@
-80dc7788a3468fb7dd362a4b8bedd9efb373de89 SOURCES/HAM-logo.png
-b8887abb18c0435eb8d4c9535e5619f7f6a458ec SOURCES/backports-3.11.3.gem
-3c921ceeb2847be8cfa25704be74923e233786bd SOURCES/ethon-0.11.0.gem
-86fa011857f977254ccf39f507587310f9ade768 SOURCES/ffi-1.9.25.gem
+679a4ce22a33ffd4d704261a17c00cff98d9499a SOURCES/HAM-logo.png
+727cdc5c9138091d39e8dca877bad78238a4d539 SOURCES/backports-3.12.0.gem
+921ef1be44583a7644ee7f20fe5f26f21d018a04 SOURCES/ethon-0.12.0.gem
+15d8209c5c420a141e38763b153b41e5de2535ba SOURCES/ffi-1.10.0.gem
 baa3446eb63557a24c4522dc5a61cfad082fa395 SOURCES/mock-1.0.1.tar.gz
 ff6e0965061cb6f604ee4d87a2cf96a2917f9f88 SOURCES/multi_json-1.13.1.gem
 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4.gem
 709cc95025009e5d221e37cb0777e98582146809 SOURCES/orderedhash-0.0.6.gem
-3fde33fc0fceb5b251391011b3f2e059d64e0386 SOURCES/pcs-0.9.165.tar.gz
+99ab7aca9cb978c82da9727b4cfeae03c3cf1b28 SOURCES/pcs-0.9.167.tar.gz
 276a92c6d679a71bd0daaf12cb7b3616f1a89b72 SOURCES/pyagentx-0.4.pcs.1.tar.gz
-220afc472c53e0e0a0662c0dd6d4e74158f1845c SOURCES/rack-1.6.10.gem
+64a0cd32f46c0ff44ffda4055048fe6309903110 SOURCES/rack-1.6.11.gem
 f80ea6672253a90fa031db0c1e2e1fe056582118 SOURCES/rack-protection-1.5.5.gem
-3f41699c1c19ff2e2353583afa70799ced351a36 SOURCES/rack-test-0.7.0.gem
+908e2a877da8eb6745073c51709dc024c4457e44 SOURCES/rack-test-0.8.3.gem
 a90e5a60d99445404a3c29a66d953a5e9918976d SOURCES/rpam-ruby19-1.2.1.gem
 3377f6140321523d7751bed3b2cc8a5201d8ec9f SOURCES/sinatra-1.4.8.gem
 83742328f21b684d6ce6c4747710c6e975b608e7 SOURCES/sinatra-contrib-1.4.7.gem
-ac4b5bc216a961287b3c2ebde877c039d2f1a83d SOURCES/tilt-2.0.8.gem
+55a75a80e29731d072fe44dfaf865479b65c27fd SOURCES/tilt-2.0.9.gem
diff --git a/SOURCES/adapt-working-with-gems-to-rhel-7.patch b/SOURCES/adapt-working-with-gems-to-rhel-7.patch
deleted file mode 100644
index a75940b..0000000
--- a/SOURCES/adapt-working-with-gems-to-rhel-7.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From 8d958e9f63698bd05b19213ddbf71b57cb228135 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Tue, 24 May 2016 07:26:15 +0200
-Subject: [PATCH 3/5] adapt working with gems to rhel 7
-
----
- pcsd/Gemfile      | 1 -
- pcsd/Gemfile.lock | 2 --
- pcsd/Makefile     | 5 +----
- 3 files changed, 1 insertion(+), 7 deletions(-)
-
-diff --git a/pcsd/Gemfile b/pcsd/Gemfile
-index 6418fd41..3598f533 100644
---- a/pcsd/Gemfile
-+++ b/pcsd/Gemfile
-@@ -8,7 +8,6 @@ gem 'tilt'
- gem 'rack-test'
- gem 'backports'
- gem 'rpam-ruby19'
--gem 'json'
- gem 'multi_json'
- gem 'open4'
- gem 'ffi'
-diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock
-index 4b78bba6..137086ca 100644
---- a/pcsd/Gemfile.lock
-+++ b/pcsd/Gemfile.lock
-@@ -4,7 +4,6 @@ GEM
-     backports (3.11.3)
-     ethon (0.11.0)
-     ffi (1.9.25)
--    json (2.1.0)
-     multi_json (1.13.1)
-     open4 (1.3.4)
-     rack (1.6.10)
-@@ -33,7 +32,6 @@ DEPENDENCIES
-   backports
-   ethon
-   ffi
--  json
-   multi_json
-   open4
-   rack
-diff --git a/pcsd/Makefile b/pcsd/Makefile
-index 21550c5a..642d0c83 100644
---- a/pcsd/Makefile
-+++ b/pcsd/Makefile
-@@ -1,7 +1,7 @@
- FFI_VERSION="1.9.25"
- FFI_C_DIR=vendor/bundle/ruby/gems/ffi-${FFI_VERSION}/ext/ffi_c
- 
--build_gems: get_gems
-+build_gems:
- 	bundle install --local --deployment
- 	#ffi makes symlink with absolute path. Let's change it to relative path.
- 	for fname in `ls ${FFI_C_DIR}/libffi-*/include/ffitarget.h`; do \
-@@ -30,8 +30,5 @@ build_gems_without_bundler:
- 	vendor/cache/tilt-2.0.8.gem \
- 	-- '--with-ldflags="-Wl,-z,now -Wl,-z,relro"'
- 
--get_gems:
--	bundle package
--
- clean:
- 	rm -rfv vendor/
--- 
-2.13.6
-
diff --git a/SOURCES/bz1458153-01-give-back-orig.-master-behav.-resource-create.patch b/SOURCES/bz1458153-01-give-back-orig.-master-behav.-resource-create.patch
index fb97ff0..b3b3965 100644
--- a/SOURCES/bz1458153-01-give-back-orig.-master-behav.-resource-create.patch
+++ b/SOURCES/bz1458153-01-give-back-orig.-master-behav.-resource-create.patch
@@ -1,16 +1,16 @@
-From 4d6997edab3c3e478fb0d73e0dc6dc2a924ed664 Mon Sep 17 00:00:00 2001
+From 0b642f384198d9df09c33ba1473909b7aeb4a572 Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Mon, 5 Jun 2017 17:13:41 +0200
-Subject: [PATCH 1/5] give back orig. --master behav. (resource create)
+Subject: [PATCH] give back orig. --master behav. (resource create)
 
 ---
  pcs/cli/common/parse_args.py           |   8 +-
- pcs/cli/common/test/test_parse_args.py |  34 ++++++-
- pcs/resource.py                        |  19 ++++
- pcs/test/cib_resource/test_create.py   | 181 ++++++++++++++++++++++++++-------
- pcs/test/test_constraints.py           |  28 ++---
+ pcs/cli/common/test/test_parse_args.py |  34 ++++-
+ pcs/resource.py                        |  19 +++
+ pcs/test/cib_resource/test_create.py   | 181 ++++++++++++++++++++-----
+ pcs/test/test_constraints.py           |  28 ++--
  pcs/test/test_resource.py              |  10 +-
- pcs/utils.py                           |   7 ++
+ pcs/utils.py                           |   7 +
  7 files changed, 228 insertions(+), 59 deletions(-)
 
 diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
@@ -87,10 +87,10 @@ index efe38d0e..900094c9 100644
              ],
              upgrade_args([
 diff --git a/pcs/resource.py b/pcs/resource.py
-index c605cc6a..cdba2bfd 100644
+index c6bb0aca..f615f682 100644
 --- a/pcs/resource.py
 +++ b/pcs/resource.py
-@@ -384,6 +384,25 @@ def resource_create(lib, argv, modifiers):
+@@ -391,6 +391,25 @@ def resource_create(lib, argv, modifiers):
      ra_type = argv[1]
  
      parts = parse_create_args(argv[2:])
@@ -117,10 +117,10 @@ index c605cc6a..cdba2bfd 100644
      defined_options = [opt for opt in parts_sections if opt in parts]
      if modifiers["group"]:
 diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
-index 36202e34..e5f9dd4d 100644
+index ecb16384..57d95350 100644
 --- a/pcs/test/cib_resource/test_create.py
 +++ b/pcs/test/cib_resource/test_create.py
-@@ -233,7 +233,7 @@ class Success(ResourceTest):
+@@ -238,7 +238,7 @@ class Success(ResourceTestLocal):
      def test_with_master(self):
          self.assert_effect(
              [
@@ -129,8 +129,8 @@ index 36202e34..e5f9dd4d 100644
                  "resource create R ocf:heartbeat:Dummy --no-default-ops master",
              ],
              """<resources>
-@@ -654,7 +654,7 @@ class SuccessGroup(ResourceTest):
- class SuccessMaster(ResourceTest):
+@@ -659,7 +659,7 @@ class SuccessGroup(ResourceTestLocal):
+ class SuccessMaster(ResourceTestLocal):
      def test_disable_is_on_master_element(self):
          self.assert_effect(
 -            "resource create R ocf:heartbeat:Dummy --no-default-ops --disabled --master",
@@ -138,7 +138,7 @@ index 36202e34..e5f9dd4d 100644
              """<resources>
                  <master id="R-master">
                      <meta_attributes id="R-master-meta_attributes">
-@@ -675,13 +675,55 @@ class SuccessMaster(ResourceTest):
+@@ -680,13 +680,55 @@ class SuccessMaster(ResourceTestLocal):
              </resources>"""
          )
  
@@ -197,7 +197,7 @@ index 36202e34..e5f9dd4d 100644
              ,
              """<resources>
                  <master id="R-master">
-@@ -689,6 +731,9 @@ class SuccessMaster(ResourceTest):
+@@ -694,6 +736,9 @@ class SuccessMaster(ResourceTestLocal):
                          type="Dummy"
                      >
                          <instance_attributes id="R-instance_attributes">
@@ -207,7 +207,7 @@ index 36202e34..e5f9dd4d 100644
                              <nvpair id="R-instance_attributes-state"
                                  name="state" value="a"
                              />
-@@ -714,22 +759,58 @@ class SuccessMaster(ResourceTest):
+@@ -719,22 +764,58 @@ class SuccessMaster(ResourceTestLocal):
                              />
                          </operations>
                      </primitive>
@@ -276,7 +276,7 @@ index 36202e34..e5f9dd4d 100644
                  " --no-default-ops"
              ,
              """<resources>
-@@ -744,22 +825,53 @@ class SuccessMaster(ResourceTest):
+@@ -749,22 +830,53 @@ class SuccessMaster(ResourceTestLocal):
                          </instance_attributes>
                          <operations>
                              <op id="R-monitor-interval-10s" interval="10s"
@@ -340,7 +340,7 @@ index 36202e34..e5f9dd4d 100644
          """
          self.assert_effect(
              "resource create R ocf:heartbeat:Dummy meta a=b --master b=c"
-@@ -770,11 +882,6 @@ class SuccessMaster(ResourceTest):
+@@ -775,11 +887,6 @@ class SuccessMaster(ResourceTestLocal):
                      <primitive class="ocf" id="R" provider="heartbeat"
                          type="Dummy"
                      >
@@ -350,9 +350,9 @@ index 36202e34..e5f9dd4d 100644
 -                            />
 -                        </meta_attributes>
                          <operations>
-                             <op id="R-monitor-interval-10" interval="10"
-                                 name="monitor" timeout="20"
-@@ -782,18 +889,24 @@ class SuccessMaster(ResourceTest):
+                             <op id="R-monitor-interval-10s" interval="10s"
+                                 name="monitor" timeout="20s"
+@@ -787,18 +894,24 @@ class SuccessMaster(ResourceTestLocal):
                          </operations>
                      </primitive>
                      <meta_attributes id="R-master-meta_attributes">
@@ -378,7 +378,7 @@ index 36202e34..e5f9dd4d 100644
              ,
              """<resources>
                  <master id="R-master">
-@@ -1043,7 +1156,7 @@ class FailOrWarn(ResourceTest):
+@@ -1048,7 +1161,7 @@ class FailOrWarn(ResourceTestLocal):
      def test_error_master_clone_combination(self):
          self.assert_pcs_fail(
              "resource create R ocf:heartbeat:Dummy --no-default-ops --clone"
@@ -387,7 +387,7 @@ index 36202e34..e5f9dd4d 100644
              ,
              "Error: you can specify only one of clone, master, bundle or"
                  " --group\n"
-@@ -1051,7 +1164,7 @@ class FailOrWarn(ResourceTest):
+@@ -1056,7 +1169,7 @@ class FailOrWarn(ResourceTestLocal):
  
      def test_error_master_group_combination(self):
          self.assert_pcs_fail(
@@ -396,7 +396,7 @@ index 36202e34..e5f9dd4d 100644
                  " --group G"
              ,
              "Error: you can specify only one of clone, master, bundle or"
-@@ -1069,7 +1182,7 @@ class FailOrWarn(ResourceTest):
+@@ -1074,7 +1187,7 @@ class FailOrWarn(ResourceTestLocal):
  
      def test_error_bundle_master_combination(self):
          self.assert_pcs_fail(
@@ -406,10 +406,10 @@ index 36202e34..e5f9dd4d 100644
              ,
              "Error: you can specify only one of clone, master, bundle or"
 diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
-index 07226acf..d17230ac 100644
+index d2278e08..142fb6f7 100644
 --- a/pcs/test/test_constraints.py
 +++ b/pcs/test/test_constraints.py
-@@ -346,43 +346,43 @@ Ticket Constraints:
+@@ -349,43 +349,43 @@ Ticket Constraints:
  
      def testColocationConstraints(self):
          # see also BundleColocation
@@ -463,7 +463,7 @@ index 07226acf..d17230ac 100644
          output, returnVal = pcs(temp_cib, line)
          assert returnVal == 0 and output == ""
  
-@@ -929,7 +929,7 @@ Ticket Constraints:
+@@ -939,7 +939,7 @@ Ticket Constraints:
          assert returnVal == 1
  
      def testLocationBadRules(self):
@@ -472,7 +472,7 @@ index 07226acf..d17230ac 100644
          ac(o,"")
          assert r == 0
  
-@@ -950,7 +950,7 @@ Ticket Constraints:
+@@ -960,7 +960,7 @@ Ticket Constraints:
  """)
          assert r == 0
  
@@ -481,7 +481,7 @@ index 07226acf..d17230ac 100644
          ac(o,"")
          assert r == 0
  
-@@ -989,7 +989,7 @@ Ticket Constraints:
+@@ -999,7 +999,7 @@ Ticket Constraints:
          ac(o,"")
          assert r == 0
  
@@ -490,7 +490,7 @@ index 07226acf..d17230ac 100644
          ac(o, """\
  Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
  """)
-@@ -1110,7 +1110,7 @@ Ticket Constraints:
+@@ -1120,7 +1120,7 @@ Ticket Constraints:
          self.assertEqual(0, returnVal)
  
          output, returnVal = pcs(
@@ -500,10 +500,10 @@ index 07226acf..d17230ac 100644
          ac(output, """\
  Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
 diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
-index 59432999..7828efb4 100644
+index 3091517c..f514f9ed 100644
 --- a/pcs/test/test_resource.py
 +++ b/pcs/test/test_resource.py
-@@ -2840,7 +2840,7 @@ Ticket Constraints:
+@@ -2738,7 +2738,7 @@ Ticket Constraints:
  
          output, returnVal  = pcs(
              temp_cib,
@@ -512,7 +512,7 @@ index 59432999..7828efb4 100644
          )
          assert returnVal == 0
          assert output == "", [output]
-@@ -2933,7 +2933,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
+@@ -2851,7 +2851,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
          ac(o,"")
          assert r == 0
  
@@ -521,7 +521,7 @@ index 59432999..7828efb4 100644
          ac(o,"")
          assert r == 0
  
-@@ -3147,7 +3147,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
+@@ -3077,7 +3077,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
  
          output, returnVal = pcs(
              temp_cib,
@@ -530,7 +530,7 @@ index 59432999..7828efb4 100644
          )
          ac(output, "")
          self.assertEqual(0, returnVal)
-@@ -3741,7 +3741,7 @@ Error: Cannot remove more than one resource from cloned group
+@@ -3742,7 +3742,7 @@ Error: Cannot remove more than one resource from cloned group
          # However those test the pcs library. I'm leaving these tests here to
          # test the cli part for now.
          self.assert_pcs_success(
@@ -539,20 +539,20 @@ index 59432999..7828efb4 100644
              "Warning: changing a monitor operation interval from 10 to 11 to make the operation unique\n"
          )
  
-@@ -4775,7 +4775,7 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
+@@ -5355,7 +5355,7 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
  
      def test_no_op_allowed_in_master_update(self):
          self.assert_pcs_success(
 -            "resource create dummy ocf:heartbeat:Dummy --master"
 +            "resource create dummy ocf:heartbeat:Dummy master"
          )
-         self.assert_pcs_success("resource show dummy-master", outdent(
-             """\
+         self.assert_pcs_success(
+             "resource show dummy-master",
 diff --git a/pcs/utils.py b/pcs/utils.py
-index 8a989f52..343a611b 100644
+index 8515aae9..c5e3c171 100644
 --- a/pcs/utils.py
 +++ b/pcs/utils.py
-@@ -2904,6 +2904,13 @@ def get_modifiers():
+@@ -2962,6 +2962,13 @@ def get_modifiers():
          "wait": pcs_options.get("--wait", False),
          "watchdog": pcs_options.get("--watchdog", []),
          "no_watchdog_validation": "--no-watchdog-validation" in pcs_options,
@@ -567,5 +567,5 @@ index 8a989f52..343a611b 100644
  
  def exit_on_cmdline_input_errror(error, main_name, usage_name):
 -- 
-2.13.6
+2.17.0
 
diff --git a/SOURCES/bz1459503-01-OSP-workarounds-not-compatible-wi.patch b/SOURCES/bz1459503-01-OSP-workarounds-not-compatible-wi.patch
index b3e4b3f..f822579 100644
--- a/SOURCES/bz1459503-01-OSP-workarounds-not-compatible-wi.patch
+++ b/SOURCES/bz1459503-01-OSP-workarounds-not-compatible-wi.patch
@@ -1,7 +1,7 @@
-From bbb137919a73cffdb635ed77d7f2b2e35f2cd056 Mon Sep 17 00:00:00 2001
+From 2eb9635de627abfaa14ee83ba4c022f7ecd9d74b Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Wed, 7 Jun 2017 14:36:05 +0200
-Subject: [PATCH 2/5] squash bz1459503 OSP workarounds not compatible wi
+Subject: [PATCH] squash bz1459503 OSP workarounds not compatible wi
 
 reuse existing pcmk authkey during setup
 
@@ -14,10 +14,10 @@ show only warn if `resource create` creates remote
  4 files changed, 31 insertions(+), 22 deletions(-)
 
 diff --git a/pcs/cluster.py b/pcs/cluster.py
-index e8c94ab8..c2af8a8f 100644
+index b1f63d45..42e94a94 100644
 --- a/pcs/cluster.py
 +++ b/pcs/cluster.py
-@@ -517,13 +517,21 @@ def cluster_setup(argv):
+@@ -519,13 +519,21 @@ def cluster_setup(argv):
          print("Destroying cluster on nodes: {0}...".format(
              ", ".join(primary_addr_list)
          ))
@@ -41,7 +41,7 @@ index e8c94ab8..c2af8a8f 100644
              if modifiers["encryption"] == "1":
                  file_definitions.update(
 diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
-index 5637113c..6d59e8f9 100644
+index 90dab8f4..de5cfb4e 100644
 --- a/pcs/lib/commands/resource.py
 +++ b/pcs/lib/commands/resource.py
 @@ -76,7 +76,8 @@ def _validate_remote_connection(
@@ -65,10 +65,10 @@ index 5637113c..6d59e8f9 100644
      )
  
 diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
-index e5f9dd4d..8a445f09 100644
+index 57d95350..3cd49e74 100644
 --- a/pcs/test/cib_resource/test_create.py
 +++ b/pcs/test/cib_resource/test_create.py
-@@ -1583,11 +1583,10 @@ class FailOrWarnGroup(ResourceTest):
+@@ -1627,11 +1627,10 @@ class FailOrWarnGroup(ResourceTestLocal):
          )
  
      def test_fail_when_on_pacemaker_remote_attempt(self):
@@ -83,7 +83,7 @@ index e5f9dd4d..8a445f09 100644
          )
  
      def test_warn_when_on_pacemaker_remote_attempt(self):
-@@ -1687,10 +1686,10 @@ class FailOrWarnGroup(ResourceTest):
+@@ -1731,10 +1730,10 @@ class FailOrWarnGroup(ResourceTestLocal):
          )
  
      def test_fail_when_on_pacemaker_remote_guest_attempt(self):
@@ -98,10 +98,10 @@ index e5f9dd4d..8a445f09 100644
  
      def test_warn_when_on_pacemaker_remote_guest_attempt(self):
 diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
-index 7828efb4..8397df76 100644
+index f514f9ed..773b37cf 100644
 --- a/pcs/test/test_resource.py
 +++ b/pcs/test/test_resource.py
-@@ -5126,10 +5126,10 @@ class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+@@ -5712,10 +5712,10 @@ class ResourceUpdateRemoteAndGuestChecks(unittest.TestCase, AssertPcsMixin):
          self.assert_pcs_success(
              "resource create R ocf:heartbeat:Dummy",
          )
@@ -115,7 +115,7 @@ index 7828efb4..8397df76 100644
          )
      def test_update_warn_on_pacemaker_guest_attempt(self):
          self.assert_pcs_success(
-@@ -5148,10 +5148,10 @@ class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+@@ -5734,10 +5734,10 @@ class ResourceUpdateRemoteAndGuestChecks(unittest.TestCase, AssertPcsMixin):
              "Warning: this command is not sufficient for creating a guest node,"
              " use 'pcs cluster node add-guest'\n"
          )
@@ -129,7 +129,7 @@ index 7828efb4..8397df76 100644
          )
  
      def test_update_warn_on_pacemaker_guest_attempt_remove(self):
-@@ -5172,10 +5172,10 @@ class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+@@ -5758,10 +5758,10 @@ class ResourceUpdateRemoteAndGuestChecks(unittest.TestCase, AssertPcsMixin):
          self.assert_pcs_success(
              "resource create R ocf:heartbeat:Dummy",
          )
@@ -143,7 +143,7 @@ index 7828efb4..8397df76 100644
          )
  
      def test_meta_warn_on_pacemaker_guest_attempt(self):
-@@ -5196,10 +5196,10 @@ class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+@@ -5782,10 +5782,10 @@ class ResourceUpdateRemoteAndGuestChecks(unittest.TestCase, AssertPcsMixin):
              "Warning: this command is not sufficient for creating a guest node,"
              " use 'pcs cluster node add-guest'\n"
          )
@@ -158,5 +158,5 @@ index 7828efb4..8397df76 100644
  
      def test_meta_warn_on_pacemaker_guest_attempt_remove(self):
 -- 
-2.13.6
+2.17.0
 
diff --git a/SOURCES/bz1462248-01-fix-error-for-an-inaccessible-resource-in-a-bundle.patch b/SOURCES/bz1462248-01-fix-error-for-an-inaccessible-resource-in-a-bundle.patch
deleted file mode 100644
index d90d09b..0000000
--- a/SOURCES/bz1462248-01-fix-error-for-an-inaccessible-resource-in-a-bundle.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-From 8811ee0493b956207f3336e9e0eb1395a530af8f Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Mon, 6 Aug 2018 08:43:47 +0200
-Subject: [PATCH] fix error for an inaccessible resource in a bundle
-
----
- pcs/cli/common/console_report.py           | 6 +++---
- pcs/cli/common/test/test_console_report.py | 6 +++---
- pcs/resource.py                            | 1 +
- 3 files changed, 7 insertions(+), 6 deletions(-)
-
-diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
-index f2cee05e..06ea8f72 100644
---- a/pcs/cli/common/console_report.py
-+++ b/pcs/cli/common/console_report.py
-@@ -1434,8 +1434,8 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
-     ,
-     codes.RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE: lambda info:
-         (
--            "Resource '{resource_id}' will not be accessible by the cluster "
--            "inside bundle '{bundle_id}'. At least one of bundle options "
--            "'control-port' or 'ip-range-start' has to be specified."
-+            "Resource '{inner_resource_id}' will not be accessible by the "
-+            "cluster inside bundle '{bundle_id}', at least one of bundle "
-+            "options 'control-port' or 'ip-range-start' has to be specified"
-         ).format(**info)
- }
-diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
-index dee633ad..5fe49466 100644
---- a/pcs/cli/common/test/test_console_report.py
-+++ b/pcs/cli/common/test/test_console_report.py
-@@ -2126,12 +2126,12 @@ class ResourceInBundleNotAccessible(NameBuildTest):
-         self.assert_message_from_info(
-             (
-                 "Resource 'resourceA' will not be accessible by the cluster "
--                "inside bundle 'bundleA'. At least one of bundle options "
--                "'control-port' or 'ip-range-start' has to be specified."
-+                "inside bundle 'bundleA', at least one of bundle options "
-+                "'control-port' or 'ip-range-start' has to be specified"
-             ),
-             dict(
-                 bundle_id="bundleA",
--                resource_id="resourceA",
-+                inner_resource_id="resourceA",
-             )
-         )
- 
-diff --git a/pcs/resource.py b/pcs/resource.py
-index 001bad50..c605cc6a 100644
---- a/pcs/resource.py
-+++ b/pcs/resource.py
-@@ -439,6 +439,7 @@ def resource_create(lib, argv, modifiers):
-             **settings
-         )
-     elif "bundle" in parts:
-+        settings["allow_not_accessible_resource"] = modifiers["force"]
-         lib.resource.create_into_bundle(
-             ra_id, ra_type, parts["op"],
-             parts["meta"],
--- 
-2.13.6
-
diff --git a/SOURCES/bz1475318-01-rfe-validate-nodes-watchdog-device-by-using-sbd.patch b/SOURCES/bz1475318-01-rfe-validate-nodes-watchdog-device-by-using-sbd.patch
deleted file mode 100644
index 0c73465..0000000
--- a/SOURCES/bz1475318-01-rfe-validate-nodes-watchdog-device-by-using-sbd.patch
+++ /dev/null
@@ -1,111 +0,0 @@
-From 2e5005b822b1dab3d074361f46607af3bd696b71 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Mon, 6 Aug 2018 08:43:47 +0200
-Subject: [PATCH] squash bz1475318 RFE: Validate node's watchdog dev
-
-9d95b73a1b22 do not connect stdin of subprocess to pcs's stdin
-
-7607976d478e fix tests
----
- pcs/lib/external.py           | 10 +++++++++-
- pcs/test/test_lib_external.py |  8 ++++----
- pcs/utils.py                  |  8 +++++++-
- 3 files changed, 20 insertions(+), 6 deletions(-)
-
-diff --git a/pcs/lib/external.py b/pcs/lib/external.py
-index 5507543f..fe17a864 100644
---- a/pcs/lib/external.py
-+++ b/pcs/lib/external.py
-@@ -25,6 +25,12 @@ try:
- except ImportError:
-     # python3
-     from urllib.parse import urlencode as urllib_urlencode
-+try:
-+    # python 3
-+    from subprocess import DEVNULL
-+except ImportError:
-+    # python 2
-+    DEVNULL = open(os.devnull, "r")
- 
- from pcs import settings
- from pcs.common import pcs_pycurl as pycurl
-@@ -401,7 +407,9 @@ class CommandRunner(object):
-             process = subprocess.Popen(
-                 args,
-                 # Some commands react differently if they get anything via stdin
--                stdin=(subprocess.PIPE if stdin_string is not None else None),
-+                stdin=(
-+                    subprocess.PIPE if stdin_string is not None else DEVNULL
-+                ),
-                 stdout=subprocess.PIPE,
-                 stderr=subprocess.PIPE,
-                 preexec_fn=(
-diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
-index b249c47a..85c52a18 100644
---- a/pcs/test/test_lib_external.py
-+++ b/pcs/test/test_lib_external.py
-@@ -74,7 +74,7 @@ class CommandRunnerTest(TestCase):
-         self.assert_popen_called_with(
-             mock_popen,
-             command,
--            {"env": {}, "stdin": None,}
-+            {"env": {}, "stdin": lib.DEVNULL,}
-         )
-         logger_calls = [
-             mock.call("Running: {0}\nEnvironment:".format(command_str)),
-@@ -158,7 +158,7 @@ class CommandRunnerTest(TestCase):
-         self.assert_popen_called_with(
-             mock_popen,
-             command,
--            {"env": {"a": "a", "b": "B", "c": "{C}"}, "stdin": None,}
-+            {"env": {"a": "a", "b": "B", "c": "{C}"}, "stdin": lib.DEVNULL,}
-         )
-         logger_calls = [
-             mock.call(
-@@ -327,7 +327,7 @@ class CommandRunnerTest(TestCase):
-         self.assert_popen_called_with(
-             mock_popen,
-             command,
--            {"env": {}, "stdin": None,}
-+            {"env": {}, "stdin": lib.DEVNULL,}
-         )
-         logger_calls = [
-             mock.call("Running: {0}\nEnvironment:".format(command_str)),
-@@ -376,7 +376,7 @@ class CommandRunnerTest(TestCase):
-         self.assert_popen_called_with(
-             mock_popen,
-             command,
--            {"env": {}, "stdin": None,}
-+            {"env": {}, "stdin": lib.DEVNULL,}
-         )
-         logger_calls = [
-             mock.call("Running: {0}\nEnvironment:".format(command_str)),
-diff --git a/pcs/utils.py b/pcs/utils.py
-index eb02ca34..347ad73e 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -86,6 +86,12 @@ try:
- except ImportError:
-     # python3
-     from urllib.parse import urlencode as urllib_urlencode
-+try:
-+    # python 3
-+    from subprocess import DEVNULL
-+except ImportError:
-+    # python 2
-+    DEVNULL = open(os.devnull, "r")
- 
- 
- PYTHON2 = (sys.version_info.major == 2)
-@@ -1035,7 +1041,7 @@ def run(
-         if string_for_stdin != None:
-             stdin_pipe = subprocess.PIPE
-         else:
--            stdin_pipe = None
-+            stdin_pipe = DEVNULL
- 
-         p = subprocess.Popen(
-             args,
--- 
-2.13.6
-
diff --git a/SOURCES/bz1475318-02-rfe-validate-nodes-watchdog-device-by-using-sbd.patch b/SOURCES/bz1475318-02-rfe-validate-nodes-watchdog-device-by-using-sbd.patch
deleted file mode 100644
index 6aaf42d..0000000
--- a/SOURCES/bz1475318-02-rfe-validate-nodes-watchdog-device-by-using-sbd.patch
+++ /dev/null
@@ -1,86 +0,0 @@
-From e66477b89b6a0ffbb9220c1a384c2a283dddcf17 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Thu, 23 Aug 2018 14:49:05 +0200
-Subject: [PATCH] squash bz1475318 RFE: Validate node's watchdog dev
-
-6ae0b56ea1d9 fix watchdog device test error message
-
-3685516072c8 Mark all watchdogs listed by SBD as supported
----
- pcs/lib/sbd.py |  6 +++---
- pcs/stonith.py | 22 +++++-----------------
- pcsd/remote.rb |  5 ++++-
- 3 files changed, 12 insertions(+), 21 deletions(-)
-
-diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
-index caf86a18..0e7f5b92 100644
---- a/pcs/lib/sbd.py
-+++ b/pcs/lib/sbd.py
-@@ -302,10 +302,10 @@ def test_watchdog(cmd_runner, watchdog=None):
-     cmd = [settings.sbd_binary, "test-watchdog"]
-     if watchdog:
-         cmd.extend(["-w", watchdog])
--    dummy_std_out, std_err, ret_val = cmd_runner.run(cmd)
-+    std_out, dummy_std_err, ret_val = cmd_runner.run(cmd)
-     if ret_val:
--        if "Multiple watchdog devices discovered" in std_err:
-+        if "Multiple watchdog devices discovered" in std_out:
-             raise LibraryError(reports.sbd_watchdog_test_multiple_devices())
--        raise LibraryError(reports.sbd_watchdog_test_error(std_err))
-+        raise LibraryError(reports.sbd_watchdog_test_error(std_out))
-     else:
-         raise LibraryError(reports.sbd_watchdog_test_failed())
-diff --git a/pcs/stonith.py b/pcs/stonith.py
-index cc805da8..707321ca 100644
---- a/pcs/stonith.py
-+++ b/pcs/stonith.py
-@@ -503,25 +503,13 @@ def sbd_watchdog_list(lib, argv, modifiers):
-         raise CmdLineInputError()
- 
-     available_watchdogs = lib.sbd.get_local_available_watchdogs()
--    supported_watchdog_list = [
--        wd for wd, wd_info in available_watchdogs.items()
--        if wd_info["caution"] is None
--    ]
--    unsupported_watchdog_list = [
--        wd for wd in available_watchdogs
--        if wd not in supported_watchdog_list
--    ]
--
--    if supported_watchdog_list:
--        print("Supported watchdog(s):")
--        for watchdog in supported_watchdog_list:
--            print("  {}".format(watchdog))
- 
--    if unsupported_watchdog_list:
--        print("Unsupported watchdog(s):")
--        for watchdog in unsupported_watchdog_list:
-+    if available_watchdogs:
-+        print("Available watchdog(s):")
-+        for watchdog in sorted(available_watchdogs.keys()):
-             print("  {}".format(watchdog))
--
-+    else:
-+        print("No available watchdog")
- 
- def sbd_watchdog_list_json(lib, argv, modifiers):
-     if argv:
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 27af41b2..a74f28f5 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -2408,7 +2408,10 @@ def check_sbd(param, request, auth_user)
-         :path => watchdog,
-         :exist => exists,
-         :is_supported => (
--          exists and available_watchdogs[watchdog]['caution'] == nil
-+          # this method is not reliable so all watchdog devices listed by SBD
-+          # will be listed as supported for now
-+          # exists and available_watchdogs[watchdog]['caution'] == nil
-+          exists
-         ),
-       }
-     rescue JSON::ParserError
--- 
-2.13.6
-
diff --git a/SOURCES/bz1488044-01-fix-pcs-cluster-cib-push-for-old-feature-set.patch b/SOURCES/bz1488044-01-fix-pcs-cluster-cib-push-for-old-feature-set.patch
deleted file mode 100644
index f4ae99e..0000000
--- a/SOURCES/bz1488044-01-fix-pcs-cluster-cib-push-for-old-feature-set.patch
+++ /dev/null
@@ -1,229 +0,0 @@
-From e8d95b4ba03e62658ece669d6389b71b8553df1f Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Wed, 8 Aug 2018 13:48:24 +0200
-Subject: [PATCH] fix pcs cluster cib-push for old feature set
-
----
- pcs/cluster.py                 | 56 +++++++++++++++++++++++++++++++++++++-----
- pcs/lib/cib/test/test_tools.py | 29 ++++++++++++++++++++++
- pcs/lib/cib/tools.py           |  6 ++---
- pcs/lib/env.py                 | 10 ++++++--
- 4 files changed, 90 insertions(+), 11 deletions(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index b4d49d27..e8c94ab8 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -35,6 +35,7 @@ from pcs import (
- )
- from pcs.utils import parallel_for_nodes
- from pcs.common import report_codes
-+from pcs.common.tools import Version
- from pcs.cli.common.errors import (
-     CmdLineInputError,
-     ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE,
-@@ -46,6 +47,7 @@ from pcs.lib import (
-     reports as lib_reports,
- )
- from pcs.lib.booth import sync as booth_sync
-+from pcs.lib.cib.tools import VERSION_FORMAT
- from pcs.lib.commands.remote_node import _share_authkey, _destroy_pcmk_remote_env
- from pcs.lib.commands.quorum import _add_device_model_net
- from pcs.lib.communication.corosync import CheckCorosyncOffline
-@@ -74,6 +76,7 @@ from pcs.lib.external import (
-     NodeCommunicationException,
-     node_communicator_exception_to_report_item,
- )
-+from pcs.lib.env import  MIN_FEATURE_SET_VERSION_FOR_DIFF
- from pcs.lib.env_tools import get_nodes
- from pcs.lib.node import NodeAddresses
- from pcs.lib import node_communication_format
-@@ -1566,21 +1569,62 @@ def cluster_push(argv):
- 
-     if diff_against:
-         try:
--            xml.dom.minidom.parse(diff_against)
-+            original_cib = xml.dom.minidom.parse(diff_against)
-         except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
-             utils.err("unable to parse original cib: %s" % e)
-+
-+        def unable_to_diff(reason):
-+            return error(
-+                "unable to diff against original cib '{0}': {1}"
-+                .format(diff_against, reason)
-+            )
-+
-+        cib_element_list = original_cib.getElementsByTagName("cib")
-+
-+        if len(cib_element_list) != 1:
-+            raise unable_to_diff("there is not exactly one 'cib' element")
-+
-+        crm_feature_set = cib_element_list[0].getAttribute("crm_feature_set")
-+        if not crm_feature_set:
-+            raise unable_to_diff(
-+                "the 'cib' element is missing 'crm_feature_set' value"
-+            )
-+
-+        match = re.match(VERSION_FORMAT, crm_feature_set)
-+        if not match:
-+            raise unable_to_diff(
-+                "the attribute 'crm_feature_set' of the element 'cib' has an"
-+                " invalid value: '{0}'".format(crm_feature_set)
-+            )
-+        crm_feature_set_version = Version(
-+            int(match.group("major")),
-+            int(match.group("minor")),
-+            int(match.group("rev")) if match.group("rev") else None
-+        )
-+
-+        if crm_feature_set_version < MIN_FEATURE_SET_VERSION_FOR_DIFF:
-+            raise unable_to_diff(
-+                (
-+                    "the 'crm_feature_set' version is '{0}'"
-+                    " but at least version '{1}' is required"
-+                ).format(
-+                    crm_feature_set_version,
-+                    MIN_FEATURE_SET_VERSION_FOR_DIFF,
-+                )
-+            )
-+
-         runner = utils.cmd_runner()
-         command = [
-             "crm_diff", "--original", diff_against, "--new", filename,
-             "--no-version"
-         ]
--        patch, error, dummy_retval = runner.run(command)
-+        patch, stderr, dummy_retval = runner.run(command)
-         # dummy_retval == 1 means one of two things:
-         # a) an error has occured
-         # b) --original and --new differ
-         # therefore it's of no use to see if an error occurred
--        if error.strip():
--            utils.err("unable to diff the CIBs:\n" + error)
-+        if stderr.strip():
-+            utils.err("unable to diff the CIBs:\n" + stderr)
-         if not patch.strip():
-             print(
-                 "The new CIB is the same as the original CIB, nothing to push."
-@@ -1588,9 +1632,9 @@ def cluster_push(argv):
-             sys.exit(0)
- 
-         command = ["cibadmin", "--patch", "--xml-pipe"]
--        output, error, retval = runner.run(command, patch)
-+        output, stderr, retval = runner.run(command, patch)
-         if retval != 0:
--            utils.err("unable to push cib\n" + error + output)
-+            utils.err("unable to push cib\n" + stderr + output)
- 
-     else:
-         command = ["cibadmin", "--replace", "--xml-file", filename]
-diff --git a/pcs/lib/cib/test/test_tools.py b/pcs/lib/cib/test/test_tools.py
-index fab39ce7..2bdc7695 100644
---- a/pcs/lib/cib/test/test_tools.py
-+++ b/pcs/lib/cib/test/test_tools.py
-@@ -436,6 +436,21 @@ class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
-             )
-         )
- 
-+    def test_invalid_version_at_end(self):
-+        assert_raise_library_error(
-+            lambda: lib.get_pacemaker_version_by_which_cib_was_validated(
-+                etree.XML('<cib validate-with="pacemaker-1.2.3x"/>')
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
-+                {
-+                    "reason": "the attribute 'validate-with' of the element"
-+                        " 'cib' has an invalid value: 'pacemaker-1.2.3x'"
-+                }
-+            )
-+        )
-+
-     def test_no_revision(self):
-         self.assertEqual(
-             Version(1, 2),
-@@ -507,6 +522,20 @@ class getCibCrmFeatureSet(TestCase):
-             )
-         )
- 
-+    def test_invalid_version_at_end(self):
-+        assert_raise_library_error(
-+            lambda: lib.get_cib_crm_feature_set(
-+                etree.XML('<cib crm_feature_set="3.0.9x" />')
-+            ),
-+            fixture.error(
-+                report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
-+                reason=(
-+                    "the attribute 'crm_feature_set' of the element 'cib' has "
-+                    "an invalid value: '3.0.9x'"
-+                )
-+            )
-+        )
-+
- 
- find_group = partial(lib.find_element_by_tag_and_id, "group")
- class FindTagWithId(TestCase):
-diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
-index 2cff96f3..ab2a9df5 100644
---- a/pcs/lib/cib/tools.py
-+++ b/pcs/lib/cib/tools.py
-@@ -16,7 +16,7 @@ from pcs.lib.pacemaker.values import (
- )
- from pcs.lib.xml_tools import get_root, get_sub_element
- 
--_VERSION_FORMAT = r"(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<rev>\d+))?"
-+VERSION_FORMAT = r"(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<rev>\d+))?$"
- 
- class IdProvider(object):
-     """
-@@ -289,7 +289,7 @@ def get_pacemaker_version_by_which_cib_was_validated(cib):
-     return _get_cib_version(
-         cib,
-         "validate-with",
--        re.compile(r"pacemaker-{0}".format(_VERSION_FORMAT))
-+        re.compile(r"pacemaker-{0}".format(VERSION_FORMAT))
-     )
- 
- def get_cib_crm_feature_set(cib, none_if_missing=False):
-@@ -303,6 +303,6 @@ def get_cib_crm_feature_set(cib, none_if_missing=False):
-     return _get_cib_version(
-         cib,
-         "crm_feature_set",
--        re.compile(_VERSION_FORMAT),
-+        re.compile(r"^{0}".format(VERSION_FORMAT)),
-         none_if_missing=none_if_missing
-     )
-diff --git a/pcs/lib/env.py b/pcs/lib/env.py
-index 86f67b64..3b2c06b6 100644
---- a/pcs/lib/env.py
-+++ b/pcs/lib/env.py
-@@ -57,6 +57,8 @@ from pcs.lib.pacemaker.values import get_valid_timeout_seconds
- from pcs.lib.tools import write_tmpfile
- from pcs.lib.xml_tools import etree_to_str
- 
-+MIN_FEATURE_SET_VERSION_FOR_DIFF = Version(3, 0, 9)
-+
- class LibraryEnvironment(object):
-     # pylint: disable=too-many-instance-attributes
- 
-@@ -211,10 +213,14 @@ class LibraryEnvironment(object):
-         # only check the version if a CIB has been loaded, otherwise the push
-         # fails anyway. By my testing it seems that only the source CIB's
-         # version matters.
--        if self.__loaded_cib_diff_source_feature_set < Version(3, 0, 9):
-+        if(
-+            self.__loaded_cib_diff_source_feature_set
-+            <
-+            MIN_FEATURE_SET_VERSION_FOR_DIFF
-+        ):
-             self.report_processor.process(
-                 reports.cib_push_forced_full_due_to_crm_feature_set(
--                    Version(3, 0, 9),
-+                    MIN_FEATURE_SET_VERSION_FOR_DIFF,
-                     self.__loaded_cib_diff_source_feature_set
-                 )
-             )
--- 
-2.13.6
-
diff --git a/SOURCES/bz1572886-01-fix-syntax-multiple-except.-as-parenthes.-tuple.patch b/SOURCES/bz1572886-01-fix-syntax-multiple-except.-as-parenthes.-tuple.patch
deleted file mode 100644
index 0d51b6c..0000000
--- a/SOURCES/bz1572886-01-fix-syntax-multiple-except.-as-parenthes.-tuple.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 19ad28a9be0344b8c0bcde7c711ecbf5062a95ea Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Mon, 6 Aug 2018 08:43:47 +0200
-Subject: [PATCH] fix syntax (multiple except. as parenthes. tuple)
-
-See https://docs.python.org/2/tutorial/errors.html#handling-exceptions
-
-...the parentheses around this tuple are required, because except
-ValueError, e: was the syntax used for what is normally written as
-except ValueError as e: in modern Python...
----
- pcs/cluster.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index a64fd5fd..b4d49d27 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1138,7 +1138,7 @@ class IsComponentStartSupported(object):
-                     data["pcsd_capabilities"]
-                 ):
-                     self.supported = True
--            except KeyError, ValueError:
-+            except (KeyError, ValueError):
-                 # not a valid json or 404 => not supported
-                 pass
- 
--- 
-2.13.6
-
diff --git a/SOURCES/bz1599758-01-fix-node-communicator-getter.patch b/SOURCES/bz1599758-01-fix-node-communicator-getter.patch
deleted file mode 100644
index e10c4b6..0000000
--- a/SOURCES/bz1599758-01-fix-node-communicator-getter.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From 47400e6389452291ac44294181789665f90061d1 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Mon, 6 Aug 2018 08:43:47 +0200
-Subject: [PATCH] fix node communicator getter
-
----
- pcs/lib/env.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/pcs/lib/env.py b/pcs/lib/env.py
-index 46eb9467..86f67b64 100644
---- a/pcs/lib/env.py
-+++ b/pcs/lib/env.py
-@@ -417,7 +417,7 @@ class LibraryEnvironment(object):
-         return NodeCommunicator(
-             self.logger,
-             self.report_processor,
--            self.__get_auth_tokens(),
-+            self.__get_token_file()["tokens"],
-             self.user_login,
-             self.user_groups,
-             self._request_timeout
--- 
-2.13.6
-
diff --git a/SOURCES/bz1600169-01-disable-usage-of-Expect-HTTP-header.patch b/SOURCES/bz1600169-01-disable-usage-of-Expect-HTTP-header.patch
deleted file mode 100644
index 23a0085..0000000
--- a/SOURCES/bz1600169-01-disable-usage-of-Expect-HTTP-header.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From 5df7d647e3cdb4b73e0bfd0a4fa83b78efd861f5 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Mon, 6 Aug 2018 08:43:47 +0200
-Subject: [PATCH] disable usage of 'Expect' HTTP header
-
----
- pcs/common/node_communicator.py | 1 +
- pcs/lib/external.py             | 1 +
- pcs/test/tools/custom_mock.py   | 4 ++++
- pcs/utils.py                    | 1 +
- pcsd/pcs.rb                     | 1 +
- 5 files changed, 8 insertions(+)
-
-diff --git a/pcs/common/node_communicator.py b/pcs/common/node_communicator.py
-index f7fe2419..d54e8566 100644
---- a/pcs/common/node_communicator.py
-+++ b/pcs/common/node_communicator.py
-@@ -532,6 +532,7 @@ def _create_request_handle(request, cookies, timeout):
-     handle.setopt(pycurl.SSL_VERIFYHOST, 0)
-     handle.setopt(pycurl.SSL_VERIFYPEER, 0)
-     handle.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading
-+    handle.setopt(pycurl.HTTPHEADER, ["Expect: "])
-     if cookies:
-         handle.setopt(
-             pycurl.COOKIE, _dict_to_cookies(cookies).encode("utf-8")
-diff --git a/pcs/lib/external.py b/pcs/lib/external.py
-index fe17a864..e53a54ee 100644
---- a/pcs/lib/external.py
-+++ b/pcs/lib/external.py
-@@ -620,6 +620,7 @@ class NodeCommunicator(object):
-         handler.setopt(pycurl.SSL_VERIFYHOST, 0)
-         handler.setopt(pycurl.SSL_VERIFYPEER, 0)
-         handler.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading
-+        handler.setopt(pycurl.HTTPHEADER, ["Expect: "])
-         if cookies:
-             handler.setopt(pycurl.COOKIE, ";".join(cookies).encode("utf-8"))
-         if data:
-diff --git a/pcs/test/tools/custom_mock.py b/pcs/test/tools/custom_mock.py
-index c05a5a45..849f83fb 100644
---- a/pcs/test/tools/custom_mock.py
-+++ b/pcs/test/tools/custom_mock.py
-@@ -75,6 +75,10 @@ class MockCurl(object):
-         self._opts = {}
- 
-     def setopt(self, opt, val):
-+        if isinstance(val, list):
-+           # in tests we use set operations (e.g. assertLessEqual) which
-+           # require hashable values
-+           val = tuple(val)
-         if val is None:
-             self.unsetopt(opt)
-         else:
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 347ad73e..8a989f52 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -498,6 +498,7 @@ def sendHTTPRequest(
-     handler.setopt(pycurl.TIMEOUT_MS, int(timeout * 1000))
-     handler.setopt(pycurl.SSL_VERIFYHOST, 0)
-     handler.setopt(pycurl.SSL_VERIFYPEER, 0)
-+    handler.setopt(pycurl.HTTPHEADER, ["Expect: "])
-     if cookies:
-         handler.setopt(pycurl.COOKIE, ";".join(cookies).encode("utf-8"))
-     if data:
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index 66919c0c..ad153f62 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -513,6 +513,7 @@ def send_request(
-     :httpget => (post ? 0 : 1),
-     :nosignal => 1, # required for multi-threading
-   })
-+  req.compose_header('Expect', '')
-   return_code = req.perform
-   if return_code == :ok
-     return req.response_code, req.response_body
--- 
-2.13.6
-
diff --git a/SOURCES/bz1623181-01-fix-instance-attr-setting-for-OSP-agents.patch b/SOURCES/bz1623181-01-fix-instance-attr-setting-for-OSP-agents.patch
deleted file mode 100644
index dfe488e..0000000
--- a/SOURCES/bz1623181-01-fix-instance-attr-setting-for-OSP-agents.patch
+++ /dev/null
@@ -1,207 +0,0 @@
-From b52c3ed9ec342b021357b915eaf6581f9f6a57d2 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Fri, 31 Aug 2018 10:12:18 +0200
-Subject: [PATCH] fix allowed instance attrs for some fence agents
-
-Fix is effective only for agents `fence_compute` and `fence_evacuate`
----
- pcs/lib/resource_agent.py           | 22 ++++++++++++-
- pcs/lib/test/test_resource_agent.py | 50 +++++++++++++++++++++++++++++
- pcs/test/test_stonith.py            | 63 +++++++++++++++++++++++++++++++++++++
- 3 files changed, 134 insertions(+), 1 deletion(-)
-
-diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
-index 34b18b9b..447cf1fe 100644
---- a/pcs/lib/resource_agent.py
-+++ b/pcs/lib/resource_agent.py
-@@ -469,6 +469,14 @@ class Agent(object):
-             "obsoletes": parameter_element.get("obsoletes", None),
-         })
- 
-+    def _get_always_allowed_parameters(self):
-+        """
-+        This method should be overriden in descendants.
-+
-+        Returns set of always allowed parameters of a agent.
-+        """
-+        return set()
-+
-     def validate_parameters(
-         self, parameters,
-         parameters_type="resource",
-@@ -518,13 +526,17 @@ class Agent(object):
-         agent_params = self.get_parameters()
- 
-         required_missing = []
-+        always_allowed = self._get_always_allowed_parameters()
-         for attr in agent_params:
-             if attr["required"] and attr["name"] not in parameters_values:
-                 required_missing.append(attr["name"])
- 
-         valid_attrs = [attr["name"] for attr in agent_params]
-         return (
--            [attr for attr in parameters_values if attr not in valid_attrs],
-+            [
-+                attr for attr in parameters_values
-+                if attr not in valid_attrs and attr not in always_allowed
-+            ],
-             required_missing
-         )
- 
-@@ -858,6 +870,14 @@ class StonithAgent(CrmAgent):
-             self._get_stonithd_metadata().get_parameters()
-         )
- 
-+    def _get_always_allowed_parameters(self):
-+        if self.get_name() in ("fence_compute", "fence_evacuate"):
-+            return set([
-+                "project-domain", "project_domain", "user-domain",
-+                "user_domain", "compute-domain", "compute_domain",
-+            ])
-+        return set()
-+
-     def validate_parameters(
-         self, parameters,
-         parameters_type="stonith",
-diff --git a/pcs/lib/test/test_resource_agent.py b/pcs/lib/test/test_resource_agent.py
-index 4ec94e26..2396bf30 100644
---- a/pcs/lib/test/test_resource_agent.py
-+++ b/pcs/lib/test/test_resource_agent.py
-@@ -1275,6 +1275,23 @@ class AgentMetadataValidateParametersValuesTest(TestCase):
-             (["obsoletes"], ["deprecated"])
-         )
- 
-+    @patch_agent_object(
-+        "_get_always_allowed_parameters",
-+        lambda self: set(["always_allowed", "another-one", "last_one"])
-+    )
-+    def test_always_allowed(self, mock_metadata):
-+        mock_metadata.return_value = self.metadata
-+        self.assertEqual(
-+            self.agent.validate_parameters_values({
-+                "another_required_param": "value1",
-+                "required_param": "value2",
-+                "test_param": "value3",
-+                "always_allowed": "value4",
-+                "another-one": "value5",
-+            }),
-+            ([], [])
-+        )
-+
- 
- class AgentMetadataValidateParameters(TestCase):
-     def setUp(self):
-@@ -2175,3 +2192,36 @@ class AbsentResourceAgentTest(TestCase):
-         self.assertEqual(([], []), absent.validate_parameters_values({
-             "whatever": "anything"
-         }))
-+
-+
-+class StonithAgentAlwaysAllowedParametersTest(TestCase):
-+    def setUp(self):
-+        self.runner = mock.MagicMock(spec_set=CommandRunner)
-+        self.always_allowed = set([
-+            "project-domain", "project_domain", "user-domain", "user_domain",
-+            "compute-domain", "compute_domain",
-+        ])
-+
-+    def test_fence_compute(self):
-+        self.assertEquals(
-+            self.always_allowed,
-+            lib_ra.StonithAgent(
-+                self.runner, "fence_compute"
-+            )._get_always_allowed_parameters()
-+        )
-+
-+    def test_fence_evacuate(self):
-+        self.assertEquals(
-+            self.always_allowed,
-+            lib_ra.StonithAgent(
-+                self.runner, "fence_evacuate"
-+            )._get_always_allowed_parameters()
-+        )
-+
-+    def test_some_other_agent(self):
-+        self.assertEquals(
-+            set(),
-+            lib_ra.StonithAgent(
-+                self.runner, "fence_dummy"
-+            )._get_always_allowed_parameters()
-+        )
-diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
-index cdb2ecee..ff981acc 100644
---- a/pcs/test/test_stonith.py
-+++ b/pcs/test/test_stonith.py
-@@ -469,6 +469,69 @@ class StonithTest(TestCase, AssertPcsMixin):
-             )
-         )
- 
-+    def test_stonith_compute_evacuate_always_allowed_parameters(self):
-+        self.assert_pcs_success(
-+            "stonith create test1 fence_compute auth_url=test1 project_domain=val1 project-domain=val2 user_domain=val3 user-domain=val4 compute_domain=val5 compute-domain=val6",
-+        )
-+        self.assert_pcs_success(
-+            "stonith show --full",
-+            outdent(
-+                """\
-+                 Resource: test1 (class=stonith type=fence_compute)
-+                  Attributes: auth_url=test1 compute-domain=val6 compute_domain=val5 project-domain=val2 project_domain=val1 user-domain=val4 user_domain=val3
-+                  Operations: monitor interval=60s (test1-monitor-interval-60s)
-+                """
-+            )
-+        )
-+        self.assert_pcs_success(
-+            "stonith create test2 fence_evacuate auth_url=test2 project_domain=val0 project-domain=val1 user_domain=val2 user-domain=val3 compute_domain=val4 compute-domain=val5",
-+        )
-+        self.assert_pcs_success(
-+            "stonith show --full",
-+            outdent(
-+                """\
-+                 Resource: test1 (class=stonith type=fence_compute)
-+                  Attributes: auth_url=test1 compute-domain=val6 compute_domain=val5 project-domain=val2 project_domain=val1 user-domain=val4 user_domain=val3
-+                  Operations: monitor interval=60s (test1-monitor-interval-60s)
-+                 Resource: test2 (class=stonith type=fence_evacuate)
-+                  Attributes: auth_url=test2 compute-domain=val5 compute_domain=val4 project-domain=val1 project_domain=val0 user-domain=val3 user_domain=val2
-+                  Operations: monitor interval=60s (test2-monitor-interval-60s)
-+                """
-+            )
-+        )
-+        self.assert_pcs_success(
-+            "stonith update test1 auth_url=new0 project_domain=new1 project-domain=new2 user_domain=new3 user-domain=new4 compute_domain=new5 compute-domain=new6",
-+        )
-+        self.assert_pcs_success(
-+            "stonith show --full",
-+            outdent(
-+                """\
-+                 Resource: test1 (class=stonith type=fence_compute)
-+                  Attributes: auth_url=new0 compute-domain=new6 compute_domain=new5 project-domain=new2 project_domain=new1 user-domain=new4 user_domain=new3
-+                  Operations: monitor interval=60s (test1-monitor-interval-60s)
-+                 Resource: test2 (class=stonith type=fence_evacuate)
-+                  Attributes: auth_url=test2 compute-domain=val5 compute_domain=val4 project-domain=val1 project_domain=val0 user-domain=val3 user_domain=val2
-+                  Operations: monitor interval=60s (test2-monitor-interval-60s)
-+                """
-+            )
-+        )
-+        self.assert_pcs_success(
-+            "stonith update test2 auth_url=new1 project_domain=new2 project-domain=new3 user_domain=new4 user-domain=new5 compute_domain=new6 compute-domain=new7",
-+        )
-+        self.assert_pcs_success(
-+            "stonith show --full",
-+            outdent(
-+                """\
-+                 Resource: test1 (class=stonith type=fence_compute)
-+                  Attributes: auth_url=new0 compute-domain=new6 compute_domain=new5 project-domain=new2 project_domain=new1 user-domain=new4 user_domain=new3
-+                  Operations: monitor interval=60s (test1-monitor-interval-60s)
-+                 Resource: test2 (class=stonith type=fence_evacuate)
-+                  Attributes: auth_url=new1 compute-domain=new7 compute_domain=new6 project-domain=new3 project_domain=new2 user-domain=new5 user_domain=new4
-+                  Operations: monitor interval=60s (test2-monitor-interval-60s)
-+                """
-+            )
-+        )
-+
-     def testStonithFenceConfirm(self):
-         output, returnVal = pcs(temp_cib, "stonith fence blah blah")
-         assert returnVal == 1
--- 
-2.13.6
-
diff --git a/SOURCES/cache-tokens-read-from-pcsd.patch b/SOURCES/cache-tokens-read-from-pcsd.patch
deleted file mode 100644
index 3290bca..0000000
--- a/SOURCES/cache-tokens-read-from-pcsd.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From 7bfa0658bd7e6ffa526ed965fb4a9680414320e5 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 19 Feb 2019 15:40:05 +0100
-Subject: [PATCH 1/3] cache tokens read from pcsd
-
----
- pcs/cluster.py | 4 ++++
- pcs/status.py  | 1 +
- pcs/utils.py   | 1 +
- 3 files changed, 6 insertions(+)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 7d13e11e..8cb27c82 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1167,6 +1167,7 @@ def start_cluster_nodes(nodes):
-     )
-     was_error = False
- 
-+    utils.read_token_file() # cache node tokens
-     task_list = [
-         IsComponentStartSupported(node) for node in nodes
-     ]
-@@ -1278,6 +1279,7 @@ def wait_for_nodes_started(node_list, timeout=None):
-         else:
-             print(output)
-     else:
-+        utils.read_token_file() # cache node tokens
-         node_errors = parallel_for_nodes(
-             wait_for_remote_node_started, node_list, stop_at, interval
-         )
-@@ -1296,6 +1298,7 @@ def stop_cluster_nodes(nodes):
-             % "', '".join(unknown_nodes)
-         )
- 
-+    utils.read_token_file() # cache node tokens
-     stopping_all = set(nodes) >= set(all_nodes)
-     if "--force" not in utils.pcs_options and not stopping_all:
-         error_list = []
-@@ -1406,6 +1409,7 @@ def destroy_cluster(argv, keep_going=False):
-     if len(argv) > 0:
-         # stop pacemaker and resources while cluster is still quorate
-         nodes = argv
-+        utils.read_token_file() # cache node tokens
-         node_errors = parallel_for_nodes(
-             utils.repeat_if_timeout(utils.stopPacemaker),
-             nodes,
-diff --git a/pcs/status.py b/pcs/status.py
-index 69cea716..647faed8 100644
---- a/pcs/status.py
-+++ b/pcs/status.py
-@@ -401,6 +401,7 @@ def check_nodes(node_list, prefix=""):
-         ))
-         status_list.append(returncode)
- 
-+    utils.read_token_file() # cache node tokens
-     utils.run_parallel(
-         utils.create_task_list(report, utils.checkAuthorization, node_list)
-     )
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 343a611b..839e931f 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -245,6 +245,7 @@ def remove_uid_gid_file(uid,gid):
- def readTokens():
-     return read_token_file()["tokens"]
- 
-+@simple_cache
- def read_token_file():
-     data = {
-         "tokens": {},
--- 
-2.17.0
-
diff --git a/SOURCES/change-cman-to-rhel6-in-messages.patch b/SOURCES/change-cman-to-rhel6-in-messages.patch
index 928ca1b..3d6024c 100644
--- a/SOURCES/change-cman-to-rhel6-in-messages.patch
+++ b/SOURCES/change-cman-to-rhel6-in-messages.patch
@@ -1,7 +1,7 @@
-From 584e0e4d16e7e811aaac24f0db11bf59c909b895 Mon Sep 17 00:00:00 2001
+From 2f1dd5e33e00cd36f47ca91ed21a3071f0ef0c6e Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Mon, 23 May 2016 17:00:13 +0200
-Subject: [PATCH 4/5] change cman to rhel6 in messages
+Subject: [PATCH] change cman to rhel6 in messages
 
 ---
  pcs/cli/common/console_report.py |  8 ++++----
@@ -17,10 +17,10 @@ Subject: [PATCH 4/5] change cman to rhel6 in messages
  10 files changed, 48 insertions(+), 48 deletions(-)
 
 diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
-index 06ea8f72..973b2cbb 100644
+index 4e0ae436..945b83f6 100644
 --- a/pcs/cli/common/console_report.py
 +++ b/pcs/cli/common/console_report.py
-@@ -676,7 +676,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
+@@ -698,7 +698,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
      ,
  
      codes.CMAN_UNSUPPORTED_COMMAND:
@@ -29,7 +29,7 @@ index 06ea8f72..973b2cbb 100644
      ,
  
      codes.ID_ALREADY_EXISTS: lambda info:
-@@ -932,7 +932,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
+@@ -958,7 +958,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
      ,
  
      codes.IGNORED_CMAN_UNSUPPORTED_OPTION: lambda info:
@@ -38,7 +38,7 @@ index 06ea8f72..973b2cbb 100644
          .format(**info)
      ,
  
-@@ -941,12 +941,12 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
+@@ -967,12 +967,12 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
      ,
  
      codes.CMAN_UDPU_RESTART_REQUIRED: (
@@ -54,10 +54,10 @@ index 06ea8f72..973b2cbb 100644
      ),
  
 diff --git a/pcs/cluster.py b/pcs/cluster.py
-index c2af8a8f..7d13e11e 100644
+index 42e94a94..d54d8fb9 100644
 --- a/pcs/cluster.py
 +++ b/pcs/cluster.py
-@@ -2105,7 +2105,7 @@ def node_add(lib_env, node0, node1, modifiers):
+@@ -2111,7 +2111,7 @@ def node_add(lib_env, node0, node1, modifiers):
      else:
          utils.err("Unable to update any nodes")
      if utils.is_cman_with_udpu_transport():
@@ -66,7 +66,7 @@ index c2af8a8f..7d13e11e 100644
              + "cluster restart is required to apply node addition")
      if wait:
          print()
-@@ -2181,7 +2181,7 @@ def node_remove(lib_env, node0, modifiers):
+@@ -2187,7 +2187,7 @@ def node_remove(lib_env, node0, modifiers):
      output, retval = utils.reloadCorosync()
      output, retval = utils.run(["crm_node", "--force", "-R", node0])
      if utils.is_cman_with_udpu_transport():
@@ -75,7 +75,7 @@ index c2af8a8f..7d13e11e 100644
              + "cluster restart is required to apply node removal")
  
  def cluster_localnode(argv):
-@@ -2349,7 +2349,7 @@ def cluster_uidgid(argv, silent_list = False):
+@@ -2355,7 +2355,7 @@ def cluster_uidgid(argv, silent_list = False):
  
  def cluster_get_corosync_conf(argv):
      if utils.is_rhel6():
@@ -85,10 +85,10 @@ index c2af8a8f..7d13e11e 100644
      if len(argv) > 1:
          usage.cluster()
 diff --git a/pcs/config.py b/pcs/config.py
-index 11ddb901..747d1b24 100644
+index 0afcd85d..8f31e79a 100644
 --- a/pcs/config.py
 +++ b/pcs/config.py
-@@ -613,7 +613,7 @@ def config_checkpoint_restore(argv):
+@@ -740,7 +740,7 @@ def config_checkpoint_restore(argv):
  
  def config_import_cman(argv):
      if no_clufter:
@@ -98,10 +98,10 @@ index 11ddb901..747d1b24 100644
      cluster_conf = settings.cluster_conf_file
      dry_run_output = None
 diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 8b1ed840..3bf6d553 100644
+index 6440bb70..0ec4359a 100644
 --- a/pcs/pcs.8
 +++ b/pcs/pcs.8
-@@ -209,13 +209,13 @@ auth [<node>[:<port>]] [...] [\fB\-u\fR <username>] [\fB\-p\fR <password>] [\fB\
+@@ -215,13 +215,13 @@ auth [<node>[:<port>]] [...] [\fB\-u\fR <username>] [\fB\-p\fR <password>] [\fB\
  Authenticate pcs to pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re\-authentication to occur.
  .TP
  setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]] [\fB\-\-wait_for_all\fR=<0|1>] [\fB\-\-auto_tie_breaker\fR=<0|1>] [\fB\-\-last_man_standing\fR=<0|1> [\fB\-\-last_man_standing_window\fR=<time in ms>]] [\fB\-\-ipv6\fR] [\fB\-\-token\fR <timeout>] [\fB\-\-token_coefficient\fR <timeout>] [\fB\-\-join\fR <timeout>] [\fB\-\-consensus\fR <timeout>] [\fB\-\-netmtu\fR <size>] [\fB\-\-miss_count_const\fR <count>] [\fB\-\-fail_recv_const\fR <failures>] [\fB\-\-encryption\fR 0|1]
@@ -119,7 +119,7 @@ index 8b1ed840..3bf6d553 100644
  
  \fB\-\-join\fR <timeout> sets time in milliseconds to wait for join messages (default 50 ms)
  
-@@ -762,10 +762,10 @@ checkpoint restore <checkpoint_number>
+@@ -778,10 +778,10 @@ checkpoint restore <checkpoint_number>
  Restore cluster configuration to specified checkpoint.
  .TP
  import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>]
@@ -146,10 +146,10 @@ index 51f1b545..d384e9de 100644
      output, retval = utils.run(
          ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
 diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
-index 1dc055b9..4d9a028b 100644
+index a65da7f9..98476190 100644
 --- a/pcs/test/test_cluster.py
 +++ b/pcs/test/test_cluster.py
-@@ -1335,7 +1335,7 @@ logging {
+@@ -1352,7 +1352,7 @@ logging {
              .format(cluster_conf_tmp)
          )
          ac(output, """\
@@ -158,7 +158,7 @@ index 1dc055b9..4d9a028b 100644
  """)
          self.assertEqual(returnVal, 0)
          with open(cluster_conf_tmp) as f:
-@@ -1427,7 +1427,7 @@ logging {
+@@ -1444,7 +1444,7 @@ logging {
              .format(cluster_conf_tmp)
          )
          ac(output, """\
@@ -167,7 +167,7 @@ index 1dc055b9..4d9a028b 100644
  """)
          self.assertEqual(returnVal, 0)
          with open(cluster_conf_tmp) as f:
-@@ -2021,7 +2021,7 @@ logging {
+@@ -2038,7 +2038,7 @@ logging {
          )
          ac(output, """\
  Error: 'blah' is not a valid RRP mode value, use active, passive, use --force to override
@@ -176,7 +176,7 @@ index 1dc055b9..4d9a028b 100644
  """)
          self.assertEqual(returnVal, 1)
  
-@@ -2300,7 +2300,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
+@@ -2317,7 +2317,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
          )
          ac(output, """\
  Error: using a RRP mode of 'active' is not supported or tested, use --force to override
@@ -185,7 +185,7 @@ index 1dc055b9..4d9a028b 100644
  """)
          self.assertEqual(returnVal, 1)
  
-@@ -2310,7 +2310,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
+@@ -2327,7 +2327,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
              .format(cluster_conf_tmp)
          )
          ac(output, """\
@@ -194,7 +194,7 @@ index 1dc055b9..4d9a028b 100644
  Warning: using a RRP mode of 'active' is not supported or tested
  """)
          self.assertEqual(returnVal, 0)
-@@ -2379,7 +2379,7 @@ Error: if one node is configured for RRP, all nodes must be configured for RRP
+@@ -2396,7 +2396,7 @@ Error: if one node is configured for RRP, all nodes must be configured for RRP
          )
          ac(output, """\
  Error: --addr0 and --addr1 can only be used with --transport=udp
@@ -203,7 +203,7 @@ index 1dc055b9..4d9a028b 100644
  """)
          self.assertEqual(returnVal, 1)
  
-@@ -2469,7 +2469,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
+@@ -2486,7 +2486,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
              .format(cluster_conf_tmp)
          )
          ac(output, """\
@@ -212,7 +212,7 @@ index 1dc055b9..4d9a028b 100644
  """)
          self.assertEqual(returnVal, 0)
          with open(cluster_conf_tmp) as f:
-@@ -2484,7 +2484,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
+@@ -2501,7 +2501,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
              .format(cluster_conf_tmp)
          )
          ac(output, """\
@@ -221,7 +221,7 @@ index 1dc055b9..4d9a028b 100644
  """)
          self.assertEqual(returnVal, 0)
          with open(cluster_conf_tmp) as f:
-@@ -2502,10 +2502,10 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
+@@ -2519,10 +2519,10 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
              .format(cluster_conf_tmp)
          )
          ac(output, """\
@@ -236,7 +236,7 @@ index 1dc055b9..4d9a028b 100644
  """)
          self.assertEqual(returnVal, 0)
          with open(cluster_conf_tmp) as f:
-@@ -2602,7 +2602,7 @@ logging {
+@@ -2619,7 +2619,7 @@ logging {
              .format(cluster_conf_tmp)
          )
          ac(output, """\
@@ -246,10 +246,10 @@ index 1dc055b9..4d9a028b 100644
          self.assertEqual(returnVal, 0)
          with open(cluster_conf_tmp) as f:
 diff --git a/pcs/usage.py b/pcs/usage.py
-index 1f93213d..fecb3b4d 100644
+index 7614a85a..80ba9168 100644
 --- a/pcs/usage.py
 +++ b/pcs/usage.py
-@@ -606,16 +606,16 @@ Commands:
+@@ -623,16 +623,16 @@ Commands:
          --wait will wait up to 'n' seconds for the nodes to start,
          --enable will enable corosync and pacemaker on node startup,
          --transport allows specification of corosync transport (default: udpu;
@@ -269,7 +269,7 @@ index 1f93213d..fecb3b4d 100644
          --token <timeout> sets time in milliseconds until a token loss is
              declared after not receiving a token (default 1000 ms;
              10000 ms for CMAN clusters)
-@@ -623,7 +623,7 @@ Commands:
+@@ -640,7 +640,7 @@ Commands:
              clusters with at least 3 nodes as a coefficient for real token
              timeout calculation
              (token + (number_of_nodes - 2) * token_coefficient) (default 650 ms)
@@ -278,7 +278,7 @@ index 1f93213d..fecb3b4d 100644
          --join <timeout> sets time in milliseconds to wait for join messages
              (default 50 ms)
          --consensus <timeout> sets time in milliseconds to wait for consensus
-@@ -1394,9 +1394,9 @@ Commands:
+@@ -1416,9 +1416,9 @@ Commands:
  
      import-cman output=<filename> [input=<filename>] [--interactive]
              [output-format=corosync.conf|cluster.conf] [dist=<dist>]
@@ -291,7 +291,7 @@ index 1f93213d..fecb3b4d 100644
          command can be used.  If --interactive is specified you will be
          prompted to solve incompatibilities manually.  If no input is specified
          /etc/cluster/cluster.conf will be used.  You can force to create output
-@@ -1410,9 +1410,9 @@ Commands:
+@@ -1432,9 +1432,9 @@ Commands:
  
      import-cman output=<filename> [input=<filename>] [--interactive]
              output-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
@@ -396,5 +396,5 @@ index caf58842..0476d9c5 100644
        {{/if}}
      </table>
 -- 
-2.13.6
+2.17.0
 
diff --git a/SOURCES/fix-crashes-in-pcs-cluster-auth.patch b/SOURCES/fix-crashes-in-pcs-cluster-auth.patch
index 7e4d47c..8bc3c7f 100644
--- a/SOURCES/fix-crashes-in-pcs-cluster-auth.patch
+++ b/SOURCES/fix-crashes-in-pcs-cluster-auth.patch
@@ -1,6 +1,6 @@
-From 949efde392e33171e411e86799e530f330b5a791 Mon Sep 17 00:00:00 2001
+From 5bf0361c6452d2e2171d94363a5a6084268b7335 Mon Sep 17 00:00:00 2001
 From: Tomas Jelinek <tojeline@redhat.com>
-Date: Fri, 21 Jun 2019 12:38:07 +0200
+Date: Mon, 17 Jun 2019 13:49:30 +0200
 Subject: [PATCH] fix crashes in 'pcs cluster auth'
 
 ---
diff --git a/SOURCES/fix-logging-in-pcsd.patch b/SOURCES/fix-logging-in-pcsd.patch
deleted file mode 100644
index c4b76a3..0000000
--- a/SOURCES/fix-logging-in-pcsd.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From 0673479bb25bc000808465eae23d049804dc1415 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Thu, 21 Feb 2019 12:26:33 +0100
-Subject: [PATCH 2/3] fix logging in pcsd
-
-Fix a bug causing most of the messages not being logged. Introduced
-in pcs-0.9.165 in commit 04d7e6a99beca700a2072406db671ef33d85c180.
----
- pcsd/bootstrap.rb | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
-index 0246c48b..035574b5 100644
---- a/pcsd/bootstrap.rb
-+++ b/pcsd/bootstrap.rb
-@@ -67,11 +67,15 @@ if not defined? $cur_node_name
- end
- 
- def configure_logger(log_device)
-+  # Open the file ourselves so we can set its permissions for the case the file
-+  # does not exist. Logger is able to create and open the file for us but it
-+  # does not allow specifying file permissions.
-   if log_device.is_a?(String)
-     # File.open(path, mode, options)
-     # File.open(path, mode, perm, options)
-     # In order to set permissions, the method must be called with 4 arguments.
-     log_device = File.open(log_device, "a+", 0600, {})
-+    log_device.sync = true
-   end
-   logger = Logger.new(log_device)
-   if ENV['PCSD_DEBUG'] and ENV['PCSD_DEBUG'].downcase == "true" then
--- 
-2.17.0
-
diff --git a/SOURCES/lower-load-created-by-config-files-syncing-in-pcsd.patch b/SOURCES/lower-load-created-by-config-files-syncing-in-pcsd.patch
deleted file mode 100644
index 02d7a75..0000000
--- a/SOURCES/lower-load-created-by-config-files-syncing-in-pcsd.patch
+++ /dev/null
@@ -1,490 +0,0 @@
-From 016aa2bb9553a9a64ec6645db40ef95dd8de7041 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 19 Feb 2019 17:53:17 +0100
-Subject: [PATCH 3/3] lower load created by config files syncing in pcsd
-
-* make the sync less frequent (10 minutes instead of 1 minute) by
-  default
-* if previous attempt for syncing was unable to connect to other nodes,
-  try again sooner (in 1 minute by default)
----
- pcsd/cfgsync.rb           |  60 ++++++++++++++++----
- pcsd/pcsd.8               |   9 ++-
- pcsd/pcsd.rb              |  24 ++++++--
- pcsd/test/test_cfgsync.rb | 114 ++++++++++++++++++++++++++++++--------
- 4 files changed, 167 insertions(+), 40 deletions(-)
-
-diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
-index 9acd8d0f..44e6d853 100644
---- a/pcsd/cfgsync.rb
-+++ b/pcsd/cfgsync.rb
-@@ -313,8 +313,11 @@ module Cfgsync
- 
- 
-   class ConfigSyncControl
--    @thread_interval_default = 60
--    @thread_interval_minimum = 20
-+    # intervals in seconds
-+    @thread_interval_default = 600
-+    @thread_interval_minimum = 60
-+    @thread_interval_previous_not_connected_default = 60
-+    @thread_interval_previous_not_connected_minimum = 20
-     @file_backup_count_default = 50
-     @file_backup_count_minimum = 0
- 
-@@ -349,6 +352,20 @@ module Cfgsync
-       return self.save(data)
-     end
- 
-+    def self.sync_thread_interval_previous_not_connected()
-+      return self.get_integer_value(
-+        self.load()['thread_interval_previous_not_connected'],
-+        @thread_interval_previous_not_connected_default,
-+        @thread_interval_previous_not_connected_minimum
-+      )
-+    end
-+
-+    def self.sync_thread_interval_previous_not_connected=(seconds)
-+      data = self.load()
-+      data['thread_interval_previous_not_connected'] = seconds
-+      return self.save(data)
-+    end
-+
-     def self.sync_thread_pause(semaphore_cfgsync, seconds=300)
-       # wait for the thread to finish current run and disable it
-       semaphore_cfgsync.synchronize {
-@@ -585,14 +602,17 @@ module Cfgsync
-     end
- 
-     def fetch_all()
--      return self.filter_configs_cluster(
--        self.get_configs_cluster(@nodes, @cluster_name),
--        @config_classes
-+      node_configs, node_connected = self.get_configs_cluster(
-+        @nodes, @cluster_name
-       )
-+      filtered_configs = self.filter_configs_cluster(
-+        node_configs, @config_classes
-+      )
-+      return filtered_configs, node_connected
-     end
- 
-     def fetch()
--      configs_cluster = self.fetch_all()
-+      configs_cluster, node_connected = self.fetch_all()
- 
-       newest_configs_cluster = {}
-       configs_cluster.each { |name, cfgs|
-@@ -613,7 +633,7 @@ module Cfgsync
-           end
-         end
-       }
--      return to_update_locally, to_update_in_cluster
-+      return to_update_locally, to_update_in_cluster, node_connected
-     end
- 
-     protected
-@@ -630,12 +650,15 @@ module Cfgsync
-       $logger.debug 'Fetching configs from the cluster'
-       threads = []
-       node_configs = {}
-+      connected_to = {}
-       nodes.each { |node|
-         threads << Thread.new {
-           code, out = send_request_with_token(
-             @auth_user, node, 'get_configs', false, data
-           )
-+          connected_to[node] = false
-           if 200 == code
-+            connected_to[node] = true
-             begin
-               parsed = JSON::parse(out)
-               if 'ok' == parsed['status'] and cluster_name == parsed['cluster_name']
-@@ -647,7 +670,24 @@ module Cfgsync
-         }
-       }
-       threads.each { |t| t.join }
--      return node_configs
-+
-+      node_connected = false
-+      if connected_to.empty?()
-+        node_connected = true # no nodes to connect to => no connection errors
-+      else
-+        connected_count = 0
-+        connected_to.each { |node, connected|
-+          if connected
-+            connected_count += 1
-+          end
-+        }
-+        # If we only connected to one node, consider it a fail and continue as
-+        # if we could not connect anywhere. The one node is probably the local
-+        # node.
-+        node_connected = connected_count > 1
-+      end
-+
-+      return node_configs, node_connected
-     end
- 
-     def filter_configs_cluster(node_configs, wanted_configs_classes)
-@@ -752,7 +792,7 @@ module Cfgsync
-           fetcher = ConfigFetcher.new(
-             PCSAuth.getSuperuserAuth(), [config.class], nodes, cluster_name
-           )
--          cfgs_to_save, _ = fetcher.fetch()
-+          cfgs_to_save, _, _ = fetcher.fetch()
-           cfgs_to_save.each { |cfg_to_save|
-             cfg_to_save.save() if cfg_to_save.class == config.class
-           }
-@@ -812,7 +852,7 @@ module Cfgsync
-     fetcher = ConfigFetcher.new(
-       PCSAuth.getSuperuserAuth(), [config_new.class], nodes, cluster_name
-     )
--    fetched_tokens = fetcher.fetch_all()[config_new.class.name]
-+    fetched_tokens, _ = fetcher.fetch_all()[config_new.class.name]
-     config_new = Cfgsync::merge_tokens_files(
-       config, fetched_tokens, new_tokens, new_ports
-     )
-diff --git a/pcsd/pcsd.8 b/pcsd/pcsd.8
-index e58b7ff6..bd405043 100644
---- a/pcsd/pcsd.8
-+++ b/pcsd/pcsd.8
-@@ -63,9 +63,11 @@ Example:
- .br
-   "thread_disabled": false,
- .br
--  "thread_interval": 60,
-+  "thread_interval": 600,
- .br
--  "thread_paused_until": 1487780453,
-+  "thread_interval_previous_not_connected": 60,
-+.br
-+  "thread_paused_until": 1487780453
- .br
- }
- 
-@@ -79,6 +81,9 @@ Set this to \fBtrue\fR to completely disable the synchronization.
- .B thread_interval
- How often in seconds should pcsd ask other nodes if the synchronized files have changed.
- .TP
-+.B thread_interval_previous_not_connected
-+How often in seconds should pcsd ask other nodes if the synchronized files have changed if during the previous attempt pcsd was unable to connect to at least two nodes.
-+.TP
- .B thread_paused_until
- Disable the synchronization until the set unix timestamp.
- 
-diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
-index 9f9bd091..6e5e27e0 100644
---- a/pcsd/pcsd.rb
-+++ b/pcsd/pcsd.rb
-@@ -132,14 +132,15 @@ set :run, false
- 
- $thread_cfgsync = Thread.new {
-   while true
-+    node_connected = true
-     $semaphore_cfgsync.synchronize {
--      $logger.debug('Config files sync thread started')
-       if Cfgsync::ConfigSyncControl.sync_thread_allowed?()
-+        $logger.info('Config files sync thread started')
-         begin
-           # do not sync if this host is not in a cluster
-           cluster_name = get_cluster_name()
-           cluster_nodes = get_corosync_nodes()
--          if cluster_name and !cluster_name.empty?() and cluster_nodes and !cluster_nodes.empty?
-+          if cluster_name and !cluster_name.empty?() and cluster_nodes and cluster_nodes.count > 1
-             $logger.debug('Config files sync thread fetching')
-             fetcher = Cfgsync::ConfigFetcher.new(
-               PCSAuth.getSuperuserAuth(),
-@@ -147,18 +148,31 @@ $thread_cfgsync = Thread.new {
-               cluster_nodes,
-               cluster_name
-             )
--            cfgs_to_save, _ = fetcher.fetch()
-+            cfgs_to_save, _, node_connected = fetcher.fetch()
-             cfgs_to_save.each { |cfg_to_save|
-               cfg_to_save.save()
-             }
-+            $logger.info('Config files sync thread finished')
-+          else
-+            $logger.info(
-+              'Config files sync skipped, this host does not seem to be in ' +
-+              'a cluster of at least 2 nodes'
-+            )
-           end
-         rescue => e
-           $logger.warn("Config files sync thread exception: #{e}")
-         end
-+      else
-+        $logger.info('Config files sync is disabled or paused, skipping')
-       end
--      $logger.debug('Config files sync thread finished')
-     }
--    sleep(Cfgsync::ConfigSyncControl.sync_thread_interval())
-+    if node_connected
-+      sleep(Cfgsync::ConfigSyncControl.sync_thread_interval())
-+    else
-+      sleep(
-+        Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
-+      )
-+    end
-   end
- }
- 
-diff --git a/pcsd/test/test_cfgsync.rb b/pcsd/test/test_cfgsync.rb
-index 9b0317ce..b49c44d2 100644
---- a/pcsd/test/test_cfgsync.rb
-+++ b/pcsd/test/test_cfgsync.rb
-@@ -287,8 +287,10 @@ class TestConfigSyncControll < Test::Unit::TestCase
-     file = File.open(CFG_SYNC_CONTROL, 'w')
-     file.write(JSON.pretty_generate({}))
-     file.close()
--    @thread_interval_default = 60
--    @thread_interval_minimum = 20
-+    @thread_interval_default = 600
-+    @thread_interval_minimum = 60
-+    @thread_interval_previous_not_connected_default = 60
-+    @thread_interval_previous_not_connected_minimum = 20
-     @file_backup_count_default = 50
-     @file_backup_count_minimum = 0
-   end
-@@ -441,6 +443,65 @@ class TestConfigSyncControll < Test::Unit::TestCase
-     )
-   end
- 
-+  def test_interval_previous_not_connected()
-+    assert_equal(
-+      @thread_interval_previous_not_connected_default,
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
-+    )
-+
-+    interval = (
-+      @thread_interval_previous_not_connected_default +
-+      @thread_interval_previous_not_connected_minimum
-+    )
-+    assert(
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(
-+        interval
-+      )
-+    )
-+    assert_equal(
-+      interval,
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
-+    )
-+
-+    assert(
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(
-+        @thread_interval_previous_not_connected_minimum / 2
-+      )
-+    )
-+    assert_equal(
-+      @thread_interval_previous_not_connected_minimum,
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
-+    )
-+
-+    assert(
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(0)
-+    )
-+    assert_equal(
-+      @thread_interval_previous_not_connected_minimum,
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
-+    )
-+
-+    assert(
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(
-+        -100
-+      )
-+    )
-+    assert_equal(
-+      @thread_interval_previous_not_connected_minimum,
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
-+    )
-+
-+    assert(
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(
-+        'abcd'
-+      )
-+    )
-+    assert_equal(
-+      @thread_interval_previous_not_connected_default,
-+      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
-+    )
-+  end
-+
-   def test_file_backup_count()
-     assert_equal(
-       @file_backup_count_default,
-@@ -495,11 +556,12 @@ class TestConfigFetcher < Test::Unit::TestCase
-     end
- 
-     def get_configs_cluster(nodes, cluster_name)
--      return @configs_cluster
-+      return @configs_cluster, @node_connected
-     end
- 
--    def set_configs_cluster(configs)
-+    def set_configs_cluster(configs, node_connected=true)
-       @configs_cluster = configs
-+      @node_connected = node_connected
-       return self
-     end
- 
-@@ -569,31 +631,37 @@ class TestConfigFetcher < Test::Unit::TestCase
-     cfg_name = Cfgsync::ClusterConf.name
-     fetcher = ConfigFetcherMock.new({}, [Cfgsync::ClusterConf], nil, nil)
- 
-+    # unable to connect to any nodes
-+    fetcher.set_configs_local({cfg_name => cfg1})
-+
-+    fetcher.set_configs_cluster({}, false)
-+    assert_equal([[], [], false], fetcher.fetch())
-+
-     # local config is synced
-     fetcher.set_configs_local({cfg_name => cfg1})
- 
-     fetcher.set_configs_cluster({
-       'node1' => {'configs' => {cfg_name => cfg1}},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {'configs' => {cfg_name => cfg2}},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {'configs' => {cfg_name => cfg1}},
-       'node2' => {'configs' => {cfg_name => cfg2}},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {'configs' => {cfg_name => cfg1}},
-       'node2' => {'configs' => {cfg_name => cfg2}},
-       'node3' => {'configs' => {cfg_name => cfg2}},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
- 
-     # local config is older
-     fetcher.set_configs_local({cfg_name => cfg1})
-@@ -601,20 +669,20 @@ class TestConfigFetcher < Test::Unit::TestCase
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-     })
--    assert_equal([[cfg3], []], fetcher.fetch())
-+    assert_equal([[cfg3], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-       'node2' => {cfg_name => cfg4},
-     })
--    assert_equal([[cfg4], []], fetcher.fetch())
-+    assert_equal([[cfg4], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-       'node2' => {cfg_name => cfg4},
-       'node3' => {cfg_name => cfg3},
-     })
--    assert_equal([[cfg3], []], fetcher.fetch())
-+    assert_equal([[cfg3], [], true], fetcher.fetch())
- 
-     # local config is newer
-     fetcher.set_configs_local({cfg_name => cfg3})
-@@ -622,13 +690,13 @@ class TestConfigFetcher < Test::Unit::TestCase
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg1},
-     })
--    assert_equal([[], [cfg3]], fetcher.fetch())
-+    assert_equal([[], [cfg3], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg1},
-       'node2' => {cfg_name => cfg1},
-     })
--    assert_equal([[], [cfg3]], fetcher.fetch())
-+    assert_equal([[], [cfg3], true], fetcher.fetch())
- 
-     # local config is the same version
-     fetcher.set_configs_local({cfg_name => cfg3})
-@@ -636,32 +704,32 @@ class TestConfigFetcher < Test::Unit::TestCase
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg4},
-     })
--    assert_equal([[cfg4], []], fetcher.fetch())
-+    assert_equal([[cfg4], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-       'node2' => {cfg_name => cfg4},
-     })
--    assert_equal([[cfg4], []], fetcher.fetch())
-+    assert_equal([[cfg4], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-       'node2' => {cfg_name => cfg4},
-       'node3' => {cfg_name => cfg3},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-       'node2' => {cfg_name => cfg4},
-       'node3' => {cfg_name => cfg4},
-     })
--    assert_equal([[cfg4], []], fetcher.fetch())
-+    assert_equal([[cfg4], [], true], fetcher.fetch())
- 
-     # local config is the same version
-     fetcher.set_configs_local({cfg_name => cfg4})
-@@ -669,32 +737,32 @@ class TestConfigFetcher < Test::Unit::TestCase
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-     })
--    assert_equal([[cfg3], []], fetcher.fetch())
-+    assert_equal([[cfg3], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg4},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-       'node2' => {cfg_name => cfg4},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-       'node2' => {cfg_name => cfg4},
-       'node3' => {cfg_name => cfg3},
-     })
--    assert_equal([[cfg3], []], fetcher.fetch())
-+    assert_equal([[cfg3], [], true], fetcher.fetch())
- 
-     fetcher.set_configs_cluster({
-       'node1' => {cfg_name => cfg3},
-       'node2' => {cfg_name => cfg4},
-       'node3' => {cfg_name => cfg4},
-     })
--    assert_equal([[], []], fetcher.fetch())
-+    assert_equal([[], [], true], fetcher.fetch())
-   end
- end
- 
--- 
-2.17.0
-
diff --git a/SOURCES/replace-favicon.patch b/SOURCES/replace-favicon.patch
new file mode 100644
index 0000000..936615b
--- /dev/null
+++ b/SOURCES/replace-favicon.patch
@@ -0,0 +1,54 @@
+From 0a07e4408586a6760528b5be31e4c850f3882dd0 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Mon, 29 Apr 2019 13:38:40 +0200
+Subject: [PATCH] replace favicon
+
+---
+ pcsd/public/favicon.ico        | Bin 318 -> 0 bytes
+ pcsd/public/images/favicon.png | Bin 0 -> 134 bytes
+ pcsd/views/main.erb            |   1 +
+ 3 files changed, 1 insertion(+)
+ delete mode 100644 pcsd/public/favicon.ico
+ create mode 100644 pcsd/public/images/favicon.png
+
+diff --git a/pcsd/public/favicon.ico b/pcsd/public/favicon.ico
+deleted file mode 100644
+index cbb61249380dc73f5d98ebf47ad622f844a33bee..0000000000000000000000000000000000000000
+GIT binary patch
+literal 0
+HcmV?d00001
+
+literal 318
+zcmZQzU<5(|0RaXO&|qX>5ChRb3=&ZQVnzlQAc+bfsu>sx3tJd|{`|@C`t@sug$oxm
+z`1*!0oIih-fswHfh}Q!(-2l=_3|w5AK=vshJ_9!B|NsAB`W*<!fysC8Orf-{9f<Mp
+z!s8__@08xX3w^rwZt%sYE(|Pd@2(9rnO<IAw)U1m^^P-VPE_5>Inlj)_wJL+Pqb(5
+z*?p#SPy4K!dv<Tn+OczG#_1hdcUR8b(X;!^+`D&g@0xa|eeK=5H@8)9+jjTX-CGr>
+NXU)317VHQL2mq$QZ5jXo
+
+diff --git a/pcsd/public/images/favicon.png b/pcsd/public/images/favicon.png
+new file mode 100644
+index 0000000000000000000000000000000000000000..d335590f985d31491ad4dc713cb47907b1403e88
+GIT binary patch
+literal 134
+zcmeAS@N?(olHy`uVBq!ia0vp^0wB!93?!50ihlx9oB=)|u0R?HTvwivP}RQxWHFWm
+z`2{mLJiCzw<Y;@kIEHXUCnqFaNML9R<>66^ky7H(iQ!@5(cw`@x~e77IG<ZQnU|NB
+Zfnojwo}yRn8MPqeJYD@<);T3K0RZE%9!CHG
+
+literal 0
+HcmV?d00001
+
+diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
+index 000401c6..6476e0a4 100644
+--- a/pcsd/views/main.erb
++++ b/pcsd/views/main.erb
+@@ -1,6 +1,7 @@
+ <!DOCTYPE html>
+ <head>
+   <title>Pacemaker/Corosync Configuration</title>
++  <link rel="shortcut icon" type="image/png" href="/images/favicon.png">
+   <link rel="stylesheet" type="text/css" href="/css/style.css" />
+   <link rel="stylesheet" type="text/css" href="/css/overpass.css" />
+   <link rel="stylesheet" type="text/css" href="/css/liberation.css" />
+-- 
+2.20.1
+
diff --git a/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch b/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch
index 1fba6bb..1457776 100644
--- a/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch
+++ b/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch
@@ -1,7 +1,7 @@
-From 3be6c98efe92a057c7e2f93ca7d85beeda36dfaa Mon Sep 17 00:00:00 2001
+From 8ddcfc16477068c8c123f12cbe5444c52cc959ba Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Mon, 29 Aug 2016 18:16:41 +0200
-Subject: [PATCH 5/5] show only warning when crm_mon xml is invalid
+Subject: [PATCH] show only warning when crm_mon xml is invalid
 
 ---
  pcs/lib/pacemaker/state.py           | 13 ++++++++++---
@@ -82,5 +82,5 @@ index 13628f44..5ea20d98 100644
  class WorkWithClusterStatusNodesTest(TestBase):
      def fixture_node_string(self, **kwargs):
 -- 
-2.13.6
+2.17.0
 
diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec
index 0310524..bf69bd1 100644
--- a/SPECS/pcs.spec
+++ b/SPECS/pcs.spec
@@ -1,62 +1,67 @@
 Name: pcs
-Version: 0.9.165
-Release: 6%{?dist}.2
+Version: 0.9.167
+Release: 3%{?dist}
 License: GPLv2
 URL: https://github.com/ClusterLabs/pcs
 Group: System Environment/Base
 Summary: Pacemaker Configuration System
 #building only for architectures with pacemaker and corosync available
-ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %{arm}
+ExclusiveArch: i686 x86_64 s390x ppc64le aarch64
 
 %global pcs_snmp_pkg_name  pcs-snmp
 %global pyagentx_version   0.4.pcs.1
 %global bundled_lib_dir    pcs/bundled
 %global pyagentx_dir       %{bundled_lib_dir}/pyagentx
 
+%global version_rubygem_backports 3.12.0
+%global version_rubygem_multi_json 1.13.1
+%global version_rubygem_open4 1.3.4
+%global version_rubygem_orderedhash 0.0.6
+%global version_rubygem_rack_protection 1.5.5
+%global version_rubygem_rack_test 0.8.3
+%global version_rubygem_rack 1.6.11
+%global version_rubygem_rpam_ruby19 1.2.1
+%global version_rubygem_sinatra_contrib  1.4.7
+%global version_rubygem_sinatra  1.4.8
+%global version_rubygem_tilt  2.0.9
+%global version_rubygem_ethon  0.12.0
+%global version_rubygem_ffi  1.10.0
+
+%global pcs_libdir %{_prefix}/lib
+%global rubygem_bundle_dir pcsd/vendor/bundle/ruby
+%global rubygem_cache_dir pcsd/vendor/cache
 #part after last slash is recognized as filename in look-aside repository
 #desired name is achived by trick with hash anchor
 Source0: %{url}/archive/%{version}.tar.gz#/%{name}-%{version}.tar.gz
 Source1: HAM-logo.png
 Source2: pcsd-bundle-config-1
 
-Source11: https://rubygems.org/downloads/backports-3.11.3.gem
-Source12: https://rubygems.org/downloads/multi_json-1.13.1.gem
-Source13: https://rubygems.org/downloads/open4-1.3.4.gem
-Source14: https://rubygems.org/downloads/orderedhash-0.0.6.gem
-Source15: https://rubygems.org/downloads/rack-protection-1.5.5.gem
-Source16: https://rubygems.org/downloads/rack-test-0.7.0.gem
-Source17: https://rubygems.org/downloads/rack-1.6.10.gem
-Source18: https://rubygems.org/downloads/rpam-ruby19-1.2.1.gem
-Source19: https://rubygems.org/downloads/sinatra-contrib-1.4.7.gem
-Source20: https://rubygems.org/downloads/sinatra-1.4.8.gem
-Source21: https://rubygems.org/downloads/tilt-2.0.8.gem
-Source22: https://rubygems.org/downloads/ethon-0.11.0.gem
-Source23: https://rubygems.org/downloads/ffi-1.9.25.gem
+Source11: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem
+Source12: https://rubygems.org/downloads/multi_json-%{version_rubygem_multi_json}.gem
+Source13: https://rubygems.org/downloads/open4-%{version_rubygem_open4}.gem
+Source14: https://rubygems.org/downloads/orderedhash-%{version_rubygem_orderedhash}.gem
+Source15: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_protection}.gem
+Source16: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem
+Source17: https://rubygems.org/downloads/rack-%{version_rubygem_rack}.gem
+Source18: https://rubygems.org/downloads/rpam-ruby19-%{version_rubygem_rpam_ruby19}.gem
+Source19: https://rubygems.org/downloads/sinatra-contrib-%{version_rubygem_sinatra_contrib}.gem
+Source20: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem
+Source21: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem
+Source22: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem
+Source23: https://rubygems.org/downloads/ffi-%{version_rubygem_ffi}.gem
 
 Source31: https://github.com/testing-cabal/mock/archive/1.0.1.tar.gz#/mock-1.0.1.tar.gz
 Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}.tar.gz#/pyagentx-%{pyagentx_version}.tar.gz
-Source99: favicon.ico
 
-Patch1: bz1462248-01-fix-error-for-an-inaccessible-resource-in-a-bundle.patch
-Patch2: bz1572886-01-fix-syntax-multiple-except.-as-parenthes.-tuple.patch
-Patch3: bz1475318-01-rfe-validate-nodes-watchdog-device-by-using-sbd.patch
-Patch4: bz1599758-01-fix-node-communicator-getter.patch
-Patch5: bz1600169-01-disable-usage-of-Expect-HTTP-header.patch
-Patch6: bz1488044-01-fix-pcs-cluster-cib-push-for-old-feature-set.patch
-Patch7: bz1475318-02-rfe-validate-nodes-watchdog-device-by-using-sbd.patch
-Patch8: bz1623181-01-fix-instance-attr-setting-for-OSP-agents.patch
+Patch1: fix-crashes-in-pcs-cluster-auth.patch
 
 Patch98: bz1458153-01-give-back-orig.-master-behav.-resource-create.patch
 Patch99: bz1459503-01-OSP-workarounds-not-compatible-wi.patch
-Patch100: adapt-working-with-gems-to-rhel-7.patch
 #next patch is needed for situation when the rhel6 cluster is controlled from
 #rhel7 gui
-Patch101: change-cman-to-rhel6-in-messages.patch
-Patch102: show-only-warning-when-crm_mon-xml-is-invalid.patch
-Patch103: cache-tokens-read-from-pcsd.patch
-Patch104: fix-logging-in-pcsd.patch
-Patch105: lower-load-created-by-config-files-syncing-in-pcsd.patch
-Patch106: fix-crashes-in-pcs-cluster-auth.patch
+Patch100: change-cman-to-rhel6-in-messages.patch
+Patch101: show-only-warning-when-crm_mon-xml-is-invalid.patch
+Patch102: replace-favicon.patch
 
 # git for patches
 BuildRequires: git
@@ -120,20 +125,22 @@ Requires(postun): systemd
 # pcsd fonts
 Requires: liberation-sans-fonts
 Requires: overpass-fonts
-
-Provides: bundled(rubygem-backports) = 3.11.3
-Provides: bundled(rubygem-multi_json) = 1.13.1
-Provides: bundled(rubygem-open4) = 1.3.4
-Provides: bundled(rubygem-orderedhash) = 0.0.6
-Provides: bundled(rubygem-rack) = 1.6.10
-Provides: bundled(rubygem-rack-protection) = 1.5.5
-Provides: bundled(rubygem-rack-test) = 0.6.3
-Provides: bundled(rubygem-rpam-ruby19) = 1.2.1
-Provides: bundled(rubygem-sinatra) = 1.4.8
-Provides: bundled(rubygem-sinatra-contrib) = 1.4.7
-Provides: bundled(rubygem-tilt) = 2.0.6
-Provides: bundled(rubygem-ethon) = 0.11.0
-Provides: bundled(rubygem-ffi) = 1.9.25
+# favicon
+Requires: redhat-logos
+
+Provides: bundled(rubygem-backports) = %{version_rubygem_backports}
+Provides: bundled(rubygem-multi_json) = %{version_rubygem_multi_json}
+Provides: bundled(rubygem-open4) = %{version_rubygem_open4}
+Provides: bundled(rubygem-orderedhash) = %{version_rubygem_orderedhash}
+Provides: bundled(rubygem-rack) = %{version_rubygem_rack}
+Provides: bundled(rubygem-rack-protection) = %{version_rubygem_rack_protection}
+Provides: bundled(rubygem-rack-test) = %{version_rubygem_rack_test}
+Provides: bundled(rubygem-rpam-ruby19) = %{version_rubygem_rpam_ruby19}
+Provides: bundled(rubygem-sinatra) = %{version_rubygem_sinatra}
+Provides: bundled(rubygem-sinatra-contrib) = %{version_rubygem_sinatra_contrib}
+Provides: bundled(rubygem-tilt) = %{version_rubygem_tilt}
+Provides: bundled(rubygem-ethon) = %{version_rubygem_ethon}
+Provides: bundled(rubygem-ffi) = %{version_rubygem_ffi}
 
 %description
 pcs is a corosync and pacemaker configuration tool.  It permits users to
@@ -158,7 +165,6 @@ Provides: bundled(pyagentx) = %{pyagentx_version}
 %description -n %{pcs_snmp_pkg_name}
 SNMP agent that provides information about pacemaker cluster to the master agent (snmpd)
 
-%define PCS_PREFIX /usr
 %prep
 %autosetup -p1 -S git
 
@@ -175,24 +181,20 @@ UpdateTimestamps() {
     touch -r $PatchFile $f
   done
 }
-UpdateTimestamps -p1 %{PATCH1}
-UpdateTimestamps -p1 %{PATCH2}
-UpdateTimestamps -p1 %{PATCH3}
-UpdateTimestamps -p1 %{PATCH4}
-UpdateTimestamps -p1 %{PATCH5}
-UpdateTimestamps -p1 %{PATCH6}
-UpdateTimestamps -p1 %{PATCH7}
-UpdateTimestamps -p1 %{PATCH8}
 
 UpdateTimestamps -p1 %{PATCH98}
 UpdateTimestamps -p1 %{PATCH99}
 UpdateTimestamps -p1 %{PATCH100}
 UpdateTimestamps -p1 %{PATCH101}
 UpdateTimestamps -p1 %{PATCH102}
-UpdateTimestamps -p1 %{PATCH106}
 
 cp -f %SOURCE1 pcsd/public/images
 
+# use redhat favicon
+rm pcsd/public/images/favicon.png
+ln -s /etc/favicon.png pcsd/public/images/favicon.png
+
+# prepare rubygem config
 mkdir -p pcsd/.bundle
 cp -f %SOURCE2 pcsd/.bundle/config
 
@@ -219,17 +221,40 @@ mv %{bundled_lib_dir}/pyagentx-%{pyagentx_version} %{pyagentx_dir}
 cp %{pyagentx_dir}/LICENSE.txt pyagentx_LICENSE.txt
 cp %{pyagentx_dir}/CONTRIBUTORS.txt pyagentx_CONTRIBUTORS.txt
 cp %{pyagentx_dir}/README.md pyagentx_README.md
-cp -f %{SOURCE99} pcsd/public
 
 %build
 
 %install
 rm -rf $RPM_BUILD_ROOT
 pwd
+
+# build bundled rubygems (in main install it is disabled by BUILD_GEMS=false)
+mkdir -p %{rubygem_bundle_dir}
+gem install \
+  --force --verbose --no-rdoc --no-ri -l --ignore-dependencies \
+  -i %{rubygem_bundle_dir} \
+  %{rubygem_cache_dir}/backports-%{version_rubygem_backports}.gem \
+  %{rubygem_cache_dir}/ethon-%{version_rubygem_ethon}.gem \
+  %{rubygem_cache_dir}/ffi-%{version_rubygem_ffi}.gem \
+  %{rubygem_cache_dir}/multi_json-%{version_rubygem_multi_json}.gem \
+  %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem \
+  %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \
+  %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \
+  %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \
+  %{rubygem_cache_dir}/rpam-ruby19-%{version_rubygem_rpam_ruby19}.gem \
+  %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \
+  %{rubygem_cache_dir}/sinatra-contrib-%{version_rubygem_sinatra_contrib}.gem \
+  %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \
+  -- '--with-ldflags="-Wl,-z,now -Wl,-z,relro"'
+
+# We can remove files required for gem compilation
+rm -rf %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext
+rm -rf %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext
+
 make install \
   DESTDIR=$RPM_BUILD_ROOT \
   PYTHON_SITELIB=%{python_sitelib} \
-  PREFIX=%{PCS_PREFIX} \
+  PREFIX=%{_prefix} \
   BASH_COMPLETION_DIR=$RPM_BUILD_ROOT/usr/share/bash-completion/completions \
   PYAGENTX_DIR=`readlink -f %{pyagentx_dir}` \
   SYSTEMCTL_OVERRIDE=true
@@ -241,17 +266,17 @@ make install_pcsd \
   hdrdir="%{_includedir}" \
   rubyhdrdir="%{_includedir}" \
   includedir="%{_includedir}" \
-  PREFIX=%{PCS_PREFIX} \
+  PREFIX=%{_prefix} \
+  BUILD_GEMS=false \
   SYSTEMCTL_OVERRIDE=true
 
 #after the ruby gem compilation we do not need ruby gems in the cache
-rm -r -v $RPM_BUILD_ROOT%{PCS_PREFIX}/lib/pcsd/vendor/cache
+rm -r -v $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_cache_dir}
 
 %check
 run_all_tests(){
   #prepare environment for tests
   sitelib=$RPM_BUILD_ROOT%{python_sitelib}
-  pcsd_dir=$RPM_BUILD_ROOT%{PCS_PREFIX}/lib/pcsd
 
   #run pcs tests and remove them, we do not distribute them in rpm
   #python2-mock package is required but is only in epel so we will install it
@@ -348,12 +373,10 @@ run_all_tests(){
   # command: quorum device add model net host=127.0.0.1 algorithm=ffsplit heuristics mode=on
   # stdout:
   # ----------------------------------------------------------------------
-  #
-  # Tests after pcs.test.test_stonith.StonithDescribeTest.test_nonextisting_agent (included) are broken because it uses metadata from resource-agents that changed. There is no problem with code just with tests.
 
   export PYTHONPATH="${PYTHONPATH}:${sitelib}"
   easy_install -d ${sitelib} %SOURCE31
-  python ${sitelib}/pcs/test/suite.py -v --vanilla --all-but \
+  python pcs/test/suite.py -v --vanilla --all-but \
     pcs.test.test_cluster.ClusterTest.testUIDGID \
     pcs.test.test_stonith.StonithTest.test_stonith_create_provides_unfencing \
     pcs.test.cib_resource.test_create.Success.test_base_create_with_agent_name_including_systemd_instance \
@@ -363,88 +386,10 @@ run_all_tests(){
     pcs.lib.commands.test.test_stonith.CreateInGroup.test_minimal_wait_ok_run_ok \
     pcs.test.test_quorum.DeviceAddTest.test_succes_model_options_and_heuristics \
     pcs.test.test_quorum.DeviceAddTest.test_succes_model_options_and_heuristics_no_exec \
-    pcs.test.test_cluster.ClusterTest.testRemoteNode \
-    pcs.test.test_stonith.StonithDescribeTest.test_nonextisting_agent \
-    pcs.test.test_stonith.StonithTest.testStonithCreation \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_disallowed_option_appear \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_guest_node_conflicts_with_existing_guest \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_guest_node_conflicts_with_existing_id \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_guest_node_conflicts_with_existing_remote \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_guest_node_name_conflicts_with_existing_remote \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_invalid_interval_appear \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_invalid_port_appear \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_option_remote_node_specified \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_success \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_success_when_guest_node_matches_with_existing_guest \
-    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_success_with_options \
-    pcs.test.test_cluster_pcmk_remote.NodeRemoveGuest.test_success_remove_by_node_name \
-    pcs.test.test_cluster_pcmk_remote.NodeRemoveGuest.test_success_remove_by_resource_host \
-    pcs.test.test_cluster_pcmk_remote.NodeRemoveGuest.test_success_remove_by_resource_id \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_monitor \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_monitor_disabled \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_more \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_nonexistent \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_one \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_monitor \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_monitor_enabled \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_more \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_nonexistent \
-    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_one \
-    pcs.test.cib_resource.test_stonith_create.PlainStonith.test_error_when_not_valid_agent \
-    pcs.test.cib_resource.test_stonith_create.PlainStonith.test_warning_when_not_valid_agent \
-    pcs.test.cib_resource.test_operation_add.OperationAdd.test_add_with_OCF_CHECK_LEVEL \
-    pcs.test.cib_resource.test_operation_add.OperationAdd.test_base_add \
-    pcs.test.cib_resource.test_operation_add.OperationAdd.test_can_multiple_operation_add \
-    pcs.test.cib_resource.test_operation_add.OperationAdd.test_id_specified \
-    pcs.test.cib_resource.test_create.Bundle.test_success \
-    pcs.test.cib_resource.test_create.FailOrWarnGroup.test_fail_when_try_use_id_of_another_element \
-    pcs.test.cib_resource.test_create.Success.test_base_create \
-    pcs.test.cib_resource.test_create.Success.test_base_create_with_default_ops \
-    pcs.test.cib_resource.test_create.Success.test_create_disabled \
-    pcs.test.cib_resource.test_create.Success.test_create_with_trace_options \
-    pcs.test.cib_resource.test_create.Success.test_with_clone \
-    pcs.test.cib_resource.test_create.Success.test_with_clone_options \
-    pcs.test.cib_resource.test_create.Success.test_with_master \
-    pcs.test.cib_resource.test_create.SuccessClone.test_clone_does_not_overshadow_meta_options \
-    pcs.test.cib_resource.test_create.SuccessClone.test_clone_does_not_overshadow_operations \
-    pcs.test.cib_resource.test_create.SuccessClone.test_clone_places_disabled_correctly \
-    pcs.test.cib_resource.test_create.SuccessGroup.test_with_existing_group \
-    pcs.test.cib_resource.test_create.SuccessGroup.test_with_group \
-    pcs.test.cib_resource.test_create.SuccessGroup.test_with_group_with_after \
-    pcs.test.cib_resource.test_create.SuccessGroup.test_with_group_with_before \
-    pcs.test.cib_resource.test_create.SuccessMaster.test_disable_is_on_master_element \
-    pcs.test.cib_resource.test_create.SuccessMaster.test_put_options_after_master_as_primitive_options__original_behaviour \
-    pcs.test.cib_resource.test_create.SuccessMaster.test_steals_primitive_meta_options__original_behaviour \
-    pcs.test.cib_resource.test_create.SuccessOperations.test_completing_monitor_operation \
-    pcs.test.cib_resource.test_create.SuccessOperations.test_default_ops_only \
-    pcs.test.cib_resource.test_create.SuccessOperations.test_merging_default_ops_explictly_specified \
-    pcs.test.cib_resource.test_create.SuccessOperations.test_warn_on_forced_unknown_operation \
-    pcs.test.test_resource.CloneMasterUpdate.test_no_op_allowed_in_clone_update \
-    pcs.test.test_resource.CloneMasterUpdate.test_no_op_allowed_in_master_update \
-    pcs.test.test_resource.ResourceTest.testAddOperation \
-    pcs.test.test_resource.ResourceTest.testAddResourcesLargeCib \
-    pcs.test.test_resource.ResourceTest.testCloneMaster \
-    pcs.test.test_resource.ResourceTest.testCloneRemove \
-    pcs.test.test_resource.ResourceTest.testClonedGroup \
-    pcs.test.test_resource.ResourceTest.testClonedMasteredGroup \
-    pcs.test.test_resource.ResourceTest.testGroupRemoveTest \
-    pcs.test.test_resource.ResourceTest.testGroupRemoveWithConstraints2 \
-    pcs.test.test_resource.ResourceTest.testMSGroup \
-    pcs.test.test_resource.ResourceTest.testMasteredGroup \
-    pcs.test.test_resource.ResourceTest.testNoMoveMSClone \
-    pcs.test.test_resource.ResourceTest.testOPOption \
-    pcs.test.test_resource.ResourceTest.testResourceCloneId \
-    pcs.test.test_resource.ResourceTest.testResourceCloneUpdate \
-    pcs.test.test_resource.ResourceTest.testResourceEnable \
-    pcs.test.test_resource.ResourceTest.testResourceEnableClone \
-    pcs.test.test_resource.ResourceTest.testResourceMasterId \
-    pcs.test.test_resource.ResourceTest.testResourceMissingValues \
-    pcs.test.test_resource.ResourceTest.testUnclone \
-    pcs.test.test_resource.ResourceTest.testUpdateOperation \
 
   test_result_python=$?
 
-  find ${sitelib}/pcs -name test -type d -print0|xargs -0 rm -r -v --
+  # find ${sitelib}/pcs -name test -type d -print0|xargs -0 rm -r -v --
   #we installed python2-mock inside $RPM_BUILD_ROOT and now we need to remove
   #it because it does not belong into pcs package
   #easy_install does not provide uninstall and pip is not an option (is in
@@ -456,7 +401,8 @@ run_all_tests(){
 
 
   #run pcsd tests and remove them
-  GEM_HOME=${pcsd_dir}/vendor/bundle/ruby ruby \
+  pcsd_dir=$RPM_BUILD_ROOT%{pcs_libdir}/pcsd
+  GEM_HOME=$RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir} ruby \
     -I${pcsd_dir} \
     -I${pcsd_dir}/test \
     ${pcsd_dir}/test/test_all_suite.rb
@@ -537,16 +483,17 @@ run_all_tests
 %doc pyagentx_README.md
 
 %changelog
-* Tue Jun 25 2019 Ivan Devat <idevat@redhat.com> - 0.9.165-6.el7_6.2
+* Fri Jun 28 2019 Ivan Devat <idevat@redhat.com> - 0.9.167-3
 - Fixed crashes in the `pcs cluster auth` command
-- Resolves: rhbz#1721235
-
-* Thu Feb 28 2019 Ivan Devat <idevat@redhat.com> - 0.9.165-6.el7_6.1
-- `pcs` no longer spawns unnecessary processes for reading node tokens
-- Fixed a bug causing most of the messages not being logged into pcsd.log
-- Lower load caused by periodical config files syncing in pcsd by making it sync less frequently
-- Improve logging of periodical config files syncing in pcsd
-- Resolves: rhbz#1683959 rhbz#1683957 rhbz#1683958
+- Resolves: rhbz#1676956
+
+* Tue Apr 30 2019 Ivan Devat <idevat@redhat.com> - 0.9.167-2
+- Updated logo and favicon in web UI
+- Resolves: rhbz#1700542
+
+* Wed Mar 20 2019 Ivan Devat <idevat@redhat.com> - 0.9.167-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Resolves: rhbz#1649732 rhbz#1554302 rhbz#1447349 rhbz#1647640 rhbz#1594738 rhbz#1676956 rhbz#1474747 rhbz#1679534 rhbz#1642514 rhbz#1389140 rhbz#1522858 rhbz#1676944 rhbz#1638376 rhbz#1515021 rhbz#1598197
 
 * Fri Aug 31 2018 Ondrej Mular <omular@redhat.com> - 0.9.165-6
 - Fix instance attributes setting for fence agents `fence_compute` and