diff --git a/SOURCES/cache-tokens-read-from-pcsd.patch b/SOURCES/cache-tokens-read-from-pcsd.patch
new file mode 100644
index 0000000..3290bca
--- /dev/null
+++ b/SOURCES/cache-tokens-read-from-pcsd.patch
@@ -0,0 +1,74 @@
+From 7bfa0658bd7e6ffa526ed965fb4a9680414320e5 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Tue, 19 Feb 2019 15:40:05 +0100
+Subject: [PATCH 1/3] cache tokens read from pcsd
+
+---
+ pcs/cluster.py | 4 ++++
+ pcs/status.py  | 1 +
+ pcs/utils.py   | 1 +
+ 3 files changed, 6 insertions(+)
+
+diff --git a/pcs/cluster.py b/pcs/cluster.py
+index 7d13e11e..8cb27c82 100644
+--- a/pcs/cluster.py
++++ b/pcs/cluster.py
+@@ -1167,6 +1167,7 @@ def start_cluster_nodes(nodes):
+     )
+     was_error = False
+ 
++    utils.read_token_file() # cache node tokens
+     task_list = [
+         IsComponentStartSupported(node) for node in nodes
+     ]
+@@ -1278,6 +1279,7 @@ def wait_for_nodes_started(node_list, timeout=None):
+         else:
+             print(output)
+     else:
++        utils.read_token_file() # cache node tokens
+         node_errors = parallel_for_nodes(
+             wait_for_remote_node_started, node_list, stop_at, interval
+         )
+@@ -1296,6 +1298,7 @@ def stop_cluster_nodes(nodes):
+             % "', '".join(unknown_nodes)
+         )
+ 
++    utils.read_token_file() # cache node tokens
+     stopping_all = set(nodes) >= set(all_nodes)
+     if "--force" not in utils.pcs_options and not stopping_all:
+         error_list = []
+@@ -1406,6 +1409,7 @@ def destroy_cluster(argv, keep_going=False):
+     if len(argv) > 0:
+         # stop pacemaker and resources while cluster is still quorate
+         nodes = argv
++        utils.read_token_file() # cache node tokens
+         node_errors = parallel_for_nodes(
+             utils.repeat_if_timeout(utils.stopPacemaker),
+             nodes,
+diff --git a/pcs/status.py b/pcs/status.py
+index 69cea716..647faed8 100644
+--- a/pcs/status.py
++++ b/pcs/status.py
+@@ -401,6 +401,7 @@ def check_nodes(node_list, prefix=""):
+         ))
+         status_list.append(returncode)
+ 
++    utils.read_token_file() # cache node tokens
+     utils.run_parallel(
+         utils.create_task_list(report, utils.checkAuthorization, node_list)
+     )
+diff --git a/pcs/utils.py b/pcs/utils.py
+index 343a611b..839e931f 100644
+--- a/pcs/utils.py
++++ b/pcs/utils.py
+@@ -245,6 +245,7 @@ def remove_uid_gid_file(uid,gid):
+ def readTokens():
+     return read_token_file()["tokens"]
+ 
++@simple_cache
+ def read_token_file():
+     data = {
+         "tokens": {},
+-- 
+2.17.0
+
diff --git a/SOURCES/fix-logging-in-pcsd.patch b/SOURCES/fix-logging-in-pcsd.patch
new file mode 100644
index 0000000..c4b76a3
--- /dev/null
+++ b/SOURCES/fix-logging-in-pcsd.patch
@@ -0,0 +1,34 @@
+From 0673479bb25bc000808465eae23d049804dc1415 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Thu, 21 Feb 2019 12:26:33 +0100
+Subject: [PATCH 2/3] fix logging in pcsd
+
+Fix a bug causing most of the messages not being logged. Introduced
+in pcs-0.9.165 in commit 04d7e6a99beca700a2072406db671ef33d85c180.
+---
+ pcsd/bootstrap.rb | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
+index 0246c48b..035574b5 100644
+--- a/pcsd/bootstrap.rb
++++ b/pcsd/bootstrap.rb
+@@ -67,11 +67,15 @@ if not defined? $cur_node_name
+ end
+ 
+ def configure_logger(log_device)
++  # Open the file ourselves so we can set its permissions for the case the file
++  # does not exist. Logger is able to create and open the file for us but it
++  # does not allow specifying file permissions.
+   if log_device.is_a?(String)
+     # File.open(path, mode, options)
+     # File.open(path, mode, perm, options)
+     # In order to set permissions, the method must be called with 4 arguments.
+     log_device = File.open(log_device, "a+", 0600, {})
++    log_device.sync = true
+   end
+   logger = Logger.new(log_device)
+   if ENV['PCSD_DEBUG'] and ENV['PCSD_DEBUG'].downcase == "true" then
+-- 
+2.17.0
+
diff --git a/SOURCES/lower-load-created-by-config-files-syncing-in-pcsd.patch b/SOURCES/lower-load-created-by-config-files-syncing-in-pcsd.patch
new file mode 100644
index 0000000..02d7a75
--- /dev/null
+++ b/SOURCES/lower-load-created-by-config-files-syncing-in-pcsd.patch
@@ -0,0 +1,490 @@
+From 016aa2bb9553a9a64ec6645db40ef95dd8de7041 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Tue, 19 Feb 2019 17:53:17 +0100
+Subject: [PATCH 3/3] lower load created by config files syncing in pcsd
+
+* make the sync less frequent (10 minutes instead of 1 minute) by
+  default
+* if previous attempt for syncing was unable to connect to other nodes,
+  try again sooner (in 1 minute by default)
+---
+ pcsd/cfgsync.rb           |  60 ++++++++++++++++----
+ pcsd/pcsd.8               |   9 ++-
+ pcsd/pcsd.rb              |  24 ++++++--
+ pcsd/test/test_cfgsync.rb | 114 ++++++++++++++++++++++++++++++--------
+ 4 files changed, 167 insertions(+), 40 deletions(-)
+
+diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
+index 9acd8d0f..44e6d853 100644
+--- a/pcsd/cfgsync.rb
++++ b/pcsd/cfgsync.rb
+@@ -313,8 +313,11 @@ module Cfgsync
+ 
+ 
+   class ConfigSyncControl
+-    @thread_interval_default = 60
+-    @thread_interval_minimum = 20
++    # intervals in seconds
++    @thread_interval_default = 600
++    @thread_interval_minimum = 60
++    @thread_interval_previous_not_connected_default = 60
++    @thread_interval_previous_not_connected_minimum = 20
+     @file_backup_count_default = 50
+     @file_backup_count_minimum = 0
+ 
+@@ -349,6 +352,20 @@ module Cfgsync
+       return self.save(data)
+     end
+ 
++    def self.sync_thread_interval_previous_not_connected()
++      return self.get_integer_value(
++        self.load()['thread_interval_previous_not_connected'],
++        @thread_interval_previous_not_connected_default,
++        @thread_interval_previous_not_connected_minimum
++      )
++    end
++
++    def self.sync_thread_interval_previous_not_connected=(seconds)
++      data = self.load()
++      data['thread_interval_previous_not_connected'] = seconds
++      return self.save(data)
++    end
++
+     def self.sync_thread_pause(semaphore_cfgsync, seconds=300)
+       # wait for the thread to finish current run and disable it
+       semaphore_cfgsync.synchronize {
+@@ -585,14 +602,17 @@ module Cfgsync
+     end
+ 
+     def fetch_all()
+-      return self.filter_configs_cluster(
+-        self.get_configs_cluster(@nodes, @cluster_name),
+-        @config_classes
++      node_configs, node_connected = self.get_configs_cluster(
++        @nodes, @cluster_name
+       )
++      filtered_configs = self.filter_configs_cluster(
++        node_configs, @config_classes
++      )
++      return filtered_configs, node_connected
+     end
+ 
+     def fetch()
+-      configs_cluster = self.fetch_all()
++      configs_cluster, node_connected = self.fetch_all()
+ 
+       newest_configs_cluster = {}
+       configs_cluster.each { |name, cfgs|
+@@ -613,7 +633,7 @@ module Cfgsync
+           end
+         end
+       }
+-      return to_update_locally, to_update_in_cluster
++      return to_update_locally, to_update_in_cluster, node_connected
+     end
+ 
+     protected
+@@ -630,12 +650,15 @@ module Cfgsync
+       $logger.debug 'Fetching configs from the cluster'
+       threads = []
+       node_configs = {}
++      connected_to = {}
+       nodes.each { |node|
+         threads << Thread.new {
+           code, out = send_request_with_token(
+             @auth_user, node, 'get_configs', false, data
+           )
++          connected_to[node] = false
+           if 200 == code
++            connected_to[node] = true
+             begin
+               parsed = JSON::parse(out)
+               if 'ok' == parsed['status'] and cluster_name == parsed['cluster_name']
+@@ -647,7 +670,24 @@ module Cfgsync
+         }
+       }
+       threads.each { |t| t.join }
+-      return node_configs
++
++      node_connected = false
++      if connected_to.empty?()
++        node_connected = true # no nodes to connect to => no connection errors
++      else
++        connected_count = 0
++        connected_to.each { |node, connected|
++          if connected
++            connected_count += 1
++          end
++        }
++        # If we only connected to one node, consider it a fail and continue as
++        # if we could not connect anywhere. The one node is probably the local
++        # node.
++        node_connected = connected_count > 1
++      end
++
++      return node_configs, node_connected
+     end
+ 
+     def filter_configs_cluster(node_configs, wanted_configs_classes)
+@@ -752,7 +792,7 @@ module Cfgsync
+           fetcher = ConfigFetcher.new(
+             PCSAuth.getSuperuserAuth(), [config.class], nodes, cluster_name
+           )
+-          cfgs_to_save, _ = fetcher.fetch()
++          cfgs_to_save, _, _ = fetcher.fetch()
+           cfgs_to_save.each { |cfg_to_save|
+             cfg_to_save.save() if cfg_to_save.class == config.class
+           }
+@@ -812,7 +852,7 @@ module Cfgsync
+     fetcher = ConfigFetcher.new(
+       PCSAuth.getSuperuserAuth(), [config_new.class], nodes, cluster_name
+     )
+-    fetched_tokens = fetcher.fetch_all()[config_new.class.name]
++    fetched_tokens, _ = fetcher.fetch_all()[config_new.class.name]
+     config_new = Cfgsync::merge_tokens_files(
+       config, fetched_tokens, new_tokens, new_ports
+     )
+diff --git a/pcsd/pcsd.8 b/pcsd/pcsd.8
+index e58b7ff6..bd405043 100644
+--- a/pcsd/pcsd.8
++++ b/pcsd/pcsd.8
+@@ -63,9 +63,11 @@ Example:
+ .br
+   "thread_disabled": false,
+ .br
+-  "thread_interval": 60,
++  "thread_interval": 600,
+ .br
+-  "thread_paused_until": 1487780453,
++  "thread_interval_previous_not_connected": 60,
++.br
++  "thread_paused_until": 1487780453
+ .br
+ }
+ 
+@@ -79,6 +81,9 @@ Set this to \fBtrue\fR to completely disable the synchronization.
+ .B thread_interval
+ How often in seconds should pcsd ask other nodes if the synchronized files have changed.
+ .TP
++.B thread_interval_previous_not_connected
++How often in seconds should pcsd ask other nodes if the synchronized files have changed if during the previous attempt pcsd was unable to connect to at least two nodes.
++.TP
+ .B thread_paused_until
+ Disable the synchronization until the set unix timestamp.
+ 
+diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
+index 9f9bd091..6e5e27e0 100644
+--- a/pcsd/pcsd.rb
++++ b/pcsd/pcsd.rb
+@@ -132,14 +132,15 @@ set :run, false
+ 
+ $thread_cfgsync = Thread.new {
+   while true
++    node_connected = true
+     $semaphore_cfgsync.synchronize {
+-      $logger.debug('Config files sync thread started')
+       if Cfgsync::ConfigSyncControl.sync_thread_allowed?()
++        $logger.info('Config files sync thread started')
+         begin
+           # do not sync if this host is not in a cluster
+           cluster_name = get_cluster_name()
+           cluster_nodes = get_corosync_nodes()
+-          if cluster_name and !cluster_name.empty?() and cluster_nodes and !cluster_nodes.empty?
++          if cluster_name and !cluster_name.empty?() and cluster_nodes and cluster_nodes.count > 1
+             $logger.debug('Config files sync thread fetching')
+             fetcher = Cfgsync::ConfigFetcher.new(
+               PCSAuth.getSuperuserAuth(),
+@@ -147,18 +148,31 @@ $thread_cfgsync = Thread.new {
+               cluster_nodes,
+               cluster_name
+             )
+-            cfgs_to_save, _ = fetcher.fetch()
++            cfgs_to_save, _, node_connected = fetcher.fetch()
+             cfgs_to_save.each { |cfg_to_save|
+               cfg_to_save.save()
+             }
++            $logger.info('Config files sync thread finished')
++          else
++            $logger.info(
++              'Config files sync skipped, this host does not seem to be in ' +
++              'a cluster of at least 2 nodes'
++            )
+           end
+         rescue => e
+           $logger.warn("Config files sync thread exception: #{e}")
+         end
++      else
++        $logger.info('Config files sync is disabled or paused, skipping')
+       end
+-      $logger.debug('Config files sync thread finished')
+     }
+-    sleep(Cfgsync::ConfigSyncControl.sync_thread_interval())
++    if node_connected
++      sleep(Cfgsync::ConfigSyncControl.sync_thread_interval())
++    else
++      sleep(
++        Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
++      )
++    end
+   end
+ }
+ 
+diff --git a/pcsd/test/test_cfgsync.rb b/pcsd/test/test_cfgsync.rb
+index 9b0317ce..b49c44d2 100644
+--- a/pcsd/test/test_cfgsync.rb
++++ b/pcsd/test/test_cfgsync.rb
+@@ -287,8 +287,10 @@ class TestConfigSyncControll < Test::Unit::TestCase
+     file = File.open(CFG_SYNC_CONTROL, 'w')
+     file.write(JSON.pretty_generate({}))
+     file.close()
+-    @thread_interval_default = 60
+-    @thread_interval_minimum = 20
++    @thread_interval_default = 600
++    @thread_interval_minimum = 60
++    @thread_interval_previous_not_connected_default = 60
++    @thread_interval_previous_not_connected_minimum = 20
+     @file_backup_count_default = 50
+     @file_backup_count_minimum = 0
+   end
+@@ -441,6 +443,65 @@ class TestConfigSyncControll < Test::Unit::TestCase
+     )
+   end
+ 
++  def test_interval_previous_not_connected()
++    assert_equal(
++      @thread_interval_previous_not_connected_default,
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
++    )
++
++    interval = (
++      @thread_interval_previous_not_connected_default +
++      @thread_interval_previous_not_connected_minimum
++    )
++    assert(
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(
++        interval
++      )
++    )
++    assert_equal(
++      interval,
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
++    )
++
++    assert(
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(
++        @thread_interval_previous_not_connected_minimum / 2
++      )
++    )
++    assert_equal(
++      @thread_interval_previous_not_connected_minimum,
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
++    )
++
++    assert(
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(0)
++    )
++    assert_equal(
++      @thread_interval_previous_not_connected_minimum,
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
++    )
++
++    assert(
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(
++        -100
++      )
++    )
++    assert_equal(
++      @thread_interval_previous_not_connected_minimum,
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
++    )
++
++    assert(
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected=(
++        'abcd'
++      )
++    )
++    assert_equal(
++      @thread_interval_previous_not_connected_default,
++      Cfgsync::ConfigSyncControl.sync_thread_interval_previous_not_connected()
++    )
++  end
++
+   def test_file_backup_count()
+     assert_equal(
+       @file_backup_count_default,
+@@ -495,11 +556,12 @@ class TestConfigFetcher < Test::Unit::TestCase
+     end
+ 
+     def get_configs_cluster(nodes, cluster_name)
+-      return @configs_cluster
++      return @configs_cluster, @node_connected
+     end
+ 
+-    def set_configs_cluster(configs)
++    def set_configs_cluster(configs, node_connected=true)
+       @configs_cluster = configs
++      @node_connected = node_connected
+       return self
+     end
+ 
+@@ -569,31 +631,37 @@ class TestConfigFetcher < Test::Unit::TestCase
+     cfg_name = Cfgsync::ClusterConf.name
+     fetcher = ConfigFetcherMock.new({}, [Cfgsync::ClusterConf], nil, nil)
+ 
++    # unable to connect to any nodes
++    fetcher.set_configs_local({cfg_name => cfg1})
++
++    fetcher.set_configs_cluster({}, false)
++    assert_equal([[], [], false], fetcher.fetch())
++
+     # local config is synced
+     fetcher.set_configs_local({cfg_name => cfg1})
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {'configs' => {cfg_name => cfg1}},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {'configs' => {cfg_name => cfg2}},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {'configs' => {cfg_name => cfg1}},
+       'node2' => {'configs' => {cfg_name => cfg2}},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {'configs' => {cfg_name => cfg1}},
+       'node2' => {'configs' => {cfg_name => cfg2}},
+       'node3' => {'configs' => {cfg_name => cfg2}},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+ 
+     # local config is older
+     fetcher.set_configs_local({cfg_name => cfg1})
+@@ -601,20 +669,20 @@ class TestConfigFetcher < Test::Unit::TestCase
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+     })
+-    assert_equal([[cfg3], []], fetcher.fetch())
++    assert_equal([[cfg3], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+       'node2' => {cfg_name => cfg4},
+     })
+-    assert_equal([[cfg4], []], fetcher.fetch())
++    assert_equal([[cfg4], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+       'node2' => {cfg_name => cfg4},
+       'node3' => {cfg_name => cfg3},
+     })
+-    assert_equal([[cfg3], []], fetcher.fetch())
++    assert_equal([[cfg3], [], true], fetcher.fetch())
+ 
+     # local config is newer
+     fetcher.set_configs_local({cfg_name => cfg3})
+@@ -622,13 +690,13 @@ class TestConfigFetcher < Test::Unit::TestCase
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg1},
+     })
+-    assert_equal([[], [cfg3]], fetcher.fetch())
++    assert_equal([[], [cfg3], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg1},
+       'node2' => {cfg_name => cfg1},
+     })
+-    assert_equal([[], [cfg3]], fetcher.fetch())
++    assert_equal([[], [cfg3], true], fetcher.fetch())
+ 
+     # local config is the same version
+     fetcher.set_configs_local({cfg_name => cfg3})
+@@ -636,32 +704,32 @@ class TestConfigFetcher < Test::Unit::TestCase
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg4},
+     })
+-    assert_equal([[cfg4], []], fetcher.fetch())
++    assert_equal([[cfg4], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+       'node2' => {cfg_name => cfg4},
+     })
+-    assert_equal([[cfg4], []], fetcher.fetch())
++    assert_equal([[cfg4], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+       'node2' => {cfg_name => cfg4},
+       'node3' => {cfg_name => cfg3},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+       'node2' => {cfg_name => cfg4},
+       'node3' => {cfg_name => cfg4},
+     })
+-    assert_equal([[cfg4], []], fetcher.fetch())
++    assert_equal([[cfg4], [], true], fetcher.fetch())
+ 
+     # local config is the same version
+     fetcher.set_configs_local({cfg_name => cfg4})
+@@ -669,32 +737,32 @@ class TestConfigFetcher < Test::Unit::TestCase
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+     })
+-    assert_equal([[cfg3], []], fetcher.fetch())
++    assert_equal([[cfg3], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg4},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+       'node2' => {cfg_name => cfg4},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+       'node2' => {cfg_name => cfg4},
+       'node3' => {cfg_name => cfg3},
+     })
+-    assert_equal([[cfg3], []], fetcher.fetch())
++    assert_equal([[cfg3], [], true], fetcher.fetch())
+ 
+     fetcher.set_configs_cluster({
+       'node1' => {cfg_name => cfg3},
+       'node2' => {cfg_name => cfg4},
+       'node3' => {cfg_name => cfg4},
+     })
+-    assert_equal([[], []], fetcher.fetch())
++    assert_equal([[], [], true], fetcher.fetch())
+   end
+ end
+ 
+-- 
+2.17.0
+
diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec
index f25e058..2624d77 100644
--- a/SPECS/pcs.spec
+++ b/SPECS/pcs.spec
@@ -1,12 +1,12 @@
 Name: pcs
 Version: 0.9.165
-Release: 6%{?dist}
+Release: 6%{?dist}.1
 License: GPLv2
 URL: https://github.com/ClusterLabs/pcs
 Group: System Environment/Base
 Summary: Pacemaker Configuration System
 #building only for architectures with pacemaker and corosync available
-ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %{arm}
+ExclusiveArch: i686 x86_64 s390x ppc64le aarch64
 
 %global pcs_snmp_pkg_name  pcs-snmp
 %global pyagentx_version   0.4.pcs.1
@@ -35,7 +35,6 @@ Source23: https://rubygems.org/downloads/ffi-1.9.25.gem
 
 Source31: https://github.com/testing-cabal/mock/archive/1.0.1.tar.gz#/mock-1.0.1.tar.gz
 Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}.tar.gz#/pyagentx-%{pyagentx_version}.tar.gz
-Source99: favicon.ico
 
 Patch1: bz1462248-01-fix-error-for-an-inaccessible-resource-in-a-bundle.patch
 Patch2: bz1572886-01-fix-syntax-multiple-except.-as-parenthes.-tuple.patch
@@ -53,6 +52,9 @@ Patch100: adapt-working-with-gems-to-rhel-7.patch
 #rhel7 gui
 Patch101: change-cman-to-rhel6-in-messages.patch
 Patch102: show-only-warning-when-crm_mon-xml-is-invalid.patch
+Patch103: cache-tokens-read-from-pcsd.patch
+Patch104: fix-logging-in-pcsd.patch
+Patch105: lower-load-created-by-config-files-syncing-in-pcsd.patch
 
 # git for patches
 BuildRequires: git
@@ -214,7 +216,6 @@ mv %{bundled_lib_dir}/pyagentx-%{pyagentx_version} %{pyagentx_dir}
 cp %{pyagentx_dir}/LICENSE.txt pyagentx_LICENSE.txt
 cp %{pyagentx_dir}/CONTRIBUTORS.txt pyagentx_CONTRIBUTORS.txt
 cp %{pyagentx_dir}/README.md pyagentx_README.md
-cp -f %{SOURCE99} pcsd/public
 
 %build
 
@@ -343,6 +344,8 @@ run_all_tests(){
   # command: quorum device add model net host=127.0.0.1 algorithm=ffsplit heuristics mode=on
   # stdout:
   # ----------------------------------------------------------------------
+  #
+  # Tests after pcs.test.test_stonith.StonithDescribeTest.test_nonextisting_agent (included) are broken because it uses metadata from resource-agents that changed. There is no problem with code just with tests.
 
   export PYTHONPATH="${PYTHONPATH}:${sitelib}"
   easy_install -d ${sitelib} %SOURCE31
@@ -356,6 +359,84 @@ run_all_tests(){
     pcs.lib.commands.test.test_stonith.CreateInGroup.test_minimal_wait_ok_run_ok \
     pcs.test.test_quorum.DeviceAddTest.test_succes_model_options_and_heuristics \
     pcs.test.test_quorum.DeviceAddTest.test_succes_model_options_and_heuristics_no_exec \
+    pcs.test.test_cluster.ClusterTest.testRemoteNode \
+    pcs.test.test_stonith.StonithDescribeTest.test_nonextisting_agent \
+    pcs.test.test_stonith.StonithTest.testStonithCreation \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_disallowed_option_appear \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_guest_node_conflicts_with_existing_guest \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_guest_node_conflicts_with_existing_id \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_guest_node_conflicts_with_existing_remote \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_guest_node_name_conflicts_with_existing_remote \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_invalid_interval_appear \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_invalid_port_appear \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_fail_when_option_remote_node_specified \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_success \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_success_when_guest_node_matches_with_existing_guest \
+    pcs.test.test_cluster_pcmk_remote.NodeAddGuest.test_success_with_options \
+    pcs.test.test_cluster_pcmk_remote.NodeRemoveGuest.test_success_remove_by_node_name \
+    pcs.test.test_cluster_pcmk_remote.NodeRemoveGuest.test_success_remove_by_resource_host \
+    pcs.test.test_cluster_pcmk_remote.NodeRemoveGuest.test_success_remove_by_resource_id \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_monitor \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_monitor_disabled \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_more \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_nonexistent \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_manage_one \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_monitor \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_monitor_enabled \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_more \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_nonexistent \
+    pcs.test.cib_resource.test_manage_unmanage.ManageUnmanage.test_unmanage_one \
+    pcs.test.cib_resource.test_stonith_create.PlainStonith.test_error_when_not_valid_agent \
+    pcs.test.cib_resource.test_stonith_create.PlainStonith.test_warning_when_not_valid_agent \
+    pcs.test.cib_resource.test_operation_add.OperationAdd.test_add_with_OCF_CHECK_LEVEL \
+    pcs.test.cib_resource.test_operation_add.OperationAdd.test_base_add \
+    pcs.test.cib_resource.test_operation_add.OperationAdd.test_can_multiple_operation_add \
+    pcs.test.cib_resource.test_operation_add.OperationAdd.test_id_specified \
+    pcs.test.cib_resource.test_create.Bundle.test_success \
+    pcs.test.cib_resource.test_create.FailOrWarnGroup.test_fail_when_try_use_id_of_another_element \
+    pcs.test.cib_resource.test_create.Success.test_base_create \
+    pcs.test.cib_resource.test_create.Success.test_base_create_with_default_ops \
+    pcs.test.cib_resource.test_create.Success.test_create_disabled \
+    pcs.test.cib_resource.test_create.Success.test_create_with_trace_options \
+    pcs.test.cib_resource.test_create.Success.test_with_clone \
+    pcs.test.cib_resource.test_create.Success.test_with_clone_options \
+    pcs.test.cib_resource.test_create.Success.test_with_master \
+    pcs.test.cib_resource.test_create.SuccessClone.test_clone_does_not_overshadow_meta_options \
+    pcs.test.cib_resource.test_create.SuccessClone.test_clone_does_not_overshadow_operations \
+    pcs.test.cib_resource.test_create.SuccessClone.test_clone_places_disabled_correctly \
+    pcs.test.cib_resource.test_create.SuccessGroup.test_with_existing_group \
+    pcs.test.cib_resource.test_create.SuccessGroup.test_with_group \
+    pcs.test.cib_resource.test_create.SuccessGroup.test_with_group_with_after \
+    pcs.test.cib_resource.test_create.SuccessGroup.test_with_group_with_before \
+    pcs.test.cib_resource.test_create.SuccessMaster.test_disable_is_on_master_element \
+    pcs.test.cib_resource.test_create.SuccessMaster.test_put_options_after_master_as_primitive_options__original_behaviour \
+    pcs.test.cib_resource.test_create.SuccessMaster.test_steals_primitive_meta_options__original_behaviour \
+    pcs.test.cib_resource.test_create.SuccessOperations.test_completing_monitor_operation \
+    pcs.test.cib_resource.test_create.SuccessOperations.test_default_ops_only \
+    pcs.test.cib_resource.test_create.SuccessOperations.test_merging_default_ops_explictly_specified \
+    pcs.test.cib_resource.test_create.SuccessOperations.test_warn_on_forced_unknown_operation \
+    pcs.test.test_resource.CloneMasterUpdate.test_no_op_allowed_in_clone_update \
+    pcs.test.test_resource.CloneMasterUpdate.test_no_op_allowed_in_master_update \
+    pcs.test.test_resource.ResourceTest.testAddOperation \
+    pcs.test.test_resource.ResourceTest.testAddResourcesLargeCib \
+    pcs.test.test_resource.ResourceTest.testCloneMaster \
+    pcs.test.test_resource.ResourceTest.testCloneRemove \
+    pcs.test.test_resource.ResourceTest.testClonedGroup \
+    pcs.test.test_resource.ResourceTest.testClonedMasteredGroup \
+    pcs.test.test_resource.ResourceTest.testGroupRemoveTest \
+    pcs.test.test_resource.ResourceTest.testGroupRemoveWithConstraints2 \
+    pcs.test.test_resource.ResourceTest.testMSGroup \
+    pcs.test.test_resource.ResourceTest.testMasteredGroup \
+    pcs.test.test_resource.ResourceTest.testNoMoveMSClone \
+    pcs.test.test_resource.ResourceTest.testOPOption \
+    pcs.test.test_resource.ResourceTest.testResourceCloneId \
+    pcs.test.test_resource.ResourceTest.testResourceCloneUpdate \
+    pcs.test.test_resource.ResourceTest.testResourceEnable \
+    pcs.test.test_resource.ResourceTest.testResourceEnableClone \
+    pcs.test.test_resource.ResourceTest.testResourceMasterId \
+    pcs.test.test_resource.ResourceTest.testResourceMissingValues \
+    pcs.test.test_resource.ResourceTest.testUnclone \
+    pcs.test.test_resource.ResourceTest.testUpdateOperation \
 
   test_result_python=$?
 
@@ -452,10 +533,13 @@ run_all_tests
 %doc pyagentx_README.md
 
 %changelog
-* Tue Oct 30 2018 Johnny Hughes <johnny@centos.org> - 0.9.165-6
-- Manual CentOS Debrnading
+* Thu Feb 28 2019 Ivan Devat <idevat@redhat.com> - 0.9.165-6.el7_6.1
+- `pcs` no longer spawns unnecessary processes for reading node tokens
+- Fixed a bug causing most of the messages not being logged into pcsd.log
+- Lower load caused by periodical config files syncing in pcsd by making it sync less frequently
+- Improve logging of periodical config files syncing in pcsd
+- Resolves: rhbz#1683959 rhbz#1683957 rhbz#1683958
 
-- Modify to build on armhfp 
 * Fri Aug 31 2018 Ondrej Mular <omular@redhat.com> - 0.9.165-6
 - Fix instance attributes setting for fence agents `fence_compute` and
   `fence_evacuate`