diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..bcfcb86
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+SOURCES/nagios-agents-metadata-105ab8a.tar.gz
+SOURCES/pacemaker-f14e36f.tar.gz
diff --git a/.pacemaker.metadata b/.pacemaker.metadata
new file mode 100644
index 0000000..ec07f17
--- /dev/null
+++ b/.pacemaker.metadata
@@ -0,0 +1,2 @@
+ea6c0a27fd0ae8ce02f84a11f08a0d79377041c3 SOURCES/nagios-agents-metadata-105ab8a.tar.gz
+1affd72b4a9a8190e5e87761b16c32935120e65c SOURCES/pacemaker-f14e36f.tar.gz
diff --git a/SOURCES/01-rollup.patch b/SOURCES/01-rollup.patch
new file mode 100644
index 0000000..b06cebe
--- /dev/null
+++ b/SOURCES/01-rollup.patch
@@ -0,0 +1,11661 @@
+From c46477fe38bdede01a070183052e5fa76f3631ef Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Thu, 6 Jun 2019 14:47:40 +0200
+Subject: [PATCH 01/96] Maint: introduce mock/experimental skeletons, now for
+ cib (based) daemon
+
+[this is a rebase from 2.0, occurrences of "based" mean "cib", but they
+are not changed everywhere, especially not in the name of the mocked
+daemon, for easy comparability amongst the branches]
+
+This is meant for multiple purposes (non-exhaustive):
+
+- simulating scenarios that are rather hard to achieve in practice
+  (overwhelming number of nodes or artificially capped resources)
+
+- robustness testing (will other daemons survive intermittently, yet
+  frequently missing responses to the based queries?  to what extent
+  do they rely on some causality relations like notify-after-write?
+  what if a chance of continuously changed CIB contents causing
+  troubles?)
+
+- anything not fitting the former two categories
+
+Another option would be to start from a full-fledged daemon, cutting it
+to pieces self-contained, patchable pieces, but since in case of based,
+it has grown rather organically as a monolith, it would be substantially
+more hassle than start grab the most vital pieces and start with a clean
+room.  The aim is to keep additional functionality on top of said
+skeleton, and let the additional functionality stay apart in opt-in
+loosly coupled modules.  We shall see how this works out.
+---
+ maint/mocked/Makefile |  42 +++++++
+ maint/mocked/based.c  | 328 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ maint/mocked/based.h  |  47 ++++++++
+ 4 files changed, 418 insertions(+)
+ create mode 100644 maint/mocked/Makefile
+ create mode 100644 maint/mocked/based.c
+ create mode 100644 maint/mocked/based.h
+
+diff --git a/maint/mocked/Makefile b/maint/mocked/Makefile
+new file mode 100644
+index 0000000..05b3cb4
+--- /dev/null
++++ b/maint/mocked/Makefile
+@@ -0,0 +1,42 @@
++#
++# Copyright 2019 the Pacemaker project contributors
++#
++# The version control history for this file may have further details.
++#
++# Copying and distribution of this file, with or without modification,
++# are permitted in any medium without royalty provided the copyright
++# notice and this notice are preserved.  This file is offered as-is,
++# without any warranty.
++#
++
++#BASED_LDFLAGS = $$(pkgconf -libs glib-2.0) \
++#	$$(pkgconf -libs libxml-2.0) \
++#	$$(pkgconf -libs libqb) \
++#	$$(pkgconf -libs pacemaker)
++BASED_LDFLAGS = $$(pkgconf -libs glib-2.0) \
++	$$(pkgconf -libs libxml-2.0) \
++	$$(pkgconf -libs libqb) \
++	-Wl,-rpath=$(CURDIR)/../../lib/common/.libs \
++	  -L../../lib/common/.libs -lcrmcommon \
++	  -L../../lib/pacemaker/.libs -lpacemaker
++
++BASED_CPPFLAGS = $$(pkgconf -cflags glib-2.0) \
++	$$(pkgconf -cflags libxml-2.0) \
++	$$(pkgconf -cflags libqb) \
++	-DCS_USES_LIBQB \
++	-I ../.. -I ../../include -g
++
++PROGRAMS = based
++
++BASED_OBJECTS = based.o
++
++all: ${PROGRAMS}
++
++based: $(BASED_OBJECTS)
++	$(CC) $(BASED_LDFLAGS) $^ -o $@
++
++$(BASED_OBJECTS): %.o: %.c
++	$(CC) $(BASED_CPPFLAGS) $(BASED_LDFLAGS) -c $< -o $@
++
++clean:
++	rm ${PROGRAMS} $(BASED_OBJECTS)
+diff --git a/maint/mocked/based.c b/maint/mocked/based.c
+new file mode 100644
+index 0000000..451a384
+--- /dev/null
++++ b/maint/mocked/based.c
+@@ -0,0 +1,328 @@
++/*
++ * Copyright 2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
++ *
++ * Licensed under the GNU General Public License version 2 or later (GPLv2+).
++ */
++
++/*
++ * Clean room attempt (admittedly with lot of code borrowed or inspired from
++ * the full-blown daemon), minimalistic implementation of based daemon, with
++ * only important aspects being implemented at the moment.
++ *
++ * Hopefully easy to adapt for variety of purposes.
++ *
++ * NOTE: currently, only cib_rw API end-point is opened, future refinements
++ *       as new modules are added should conditionalize per what the module
++ *       indicates in the context (which is intentionally very loose data glue
++ *       between the skeleton and modules themselves (like CGI variables so
++ *       to say, but more structurally predestined so as to avoid complexities
++ *       of hash table lookups etc.)
++ */
++
++#include <crm_internal.h>
++#if 0
++#include "crm/common/ipcs.h"  /* crm_client_t */
++#include "crm/common/xml.h"  /* crm_xml_add */
++#endif
++#include "crm/cib/internal.h"  /* T_CIB */
++#include "crm/msg_xml.h"  /* F_SUBTYPE */
++#include "cib/callbacks.h"  /* cib_notify_diff */
++
++#include <qb/qbipcs.h>  /* qb_ipcs_connection_t */
++
++#include "based.h"
++
++
++/* direct global access violated in one case only
++   - mock_based_ipc_accept adds a reference to it to crm_cient_t->userdata */
++mock_based_context_t mock_based_context;
++
++
++/* see based/based_callbacks.c:cib_ipc_accept */
++static int32_t
++mock_based_ipc_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
++{
++    int32_t ret = 0;
++    crm_client_t *cib_client;
++
++    crm_trace("Connection %p", c);
++    if ((cib_client = crm_client_new(c, uid, gid)) == NULL) {
++        ret = -EIO;
++    }
++
++    cib_client->userdata = &mock_based_context;
++
++    return ret;
++}
++
++/* see based/based_callbacks.c:cib_ipc_created */
++static void
++mock_based_ipc_created(qb_ipcs_connection_t *c)
++{
++    crm_trace("Connection %p", c);
++}
++
++/* see based/based_callbacks.c:cib_ipc_closed */
++static int32_t
++mock_based_ipc_closed(qb_ipcs_connection_t *c)
++{
++    crm_client_t *client = crm_client_get(c);
++
++    if (client != NULL) {
++        crm_trace("Connection %p", c);
++        crm_client_destroy(client);
++    }
++
++    return 0;
++}
++
++/* see based/based_callbacks.c:cib_ipc_destroy */
++static void
++mock_based_ipc_destroy(qb_ipcs_connection_t *c)
++{
++    crm_trace("Connection %p", c);
++    mock_based_ipc_closed(c);
++}
++
++/* see based/based_callbacks.c:cib_process_command (and more) */
++static void
++mock_based_handle_query(crm_client_t *cib_client, uint32_t flags,
++                        const xmlNode *op_request)
++{
++    xmlNode *reply, *cib;
++    const char cib_str[] =
++#if 0
++"<cib/>";
++#else
++"<cib validate-with='pacemaker-1.2' admin_epoch='0' epoch='0' num_updates='0'>"\
++"  <configuration>"\
++"    <crm_config/>"\
++"    <nodes/>"\
++"    <resources/>"\
++"    <constraints/>"\
++"  </configuration>"\
++"  <status/>"\
++"</cib>";
++#endif
++    cib = xmlReadMemory(cib_str, sizeof(cib_str), "file:///tmp/foo", NULL, 0)->children;
++
++    reply = create_xml_node(NULL, "cib-reply");
++    crm_xml_add(reply, F_TYPE, T_CIB);
++    crm_xml_add(reply, F_CIB_OPERATION,
++                crm_element_value(op_request, F_CIB_OPERATION));
++    crm_xml_add(reply, F_CIB_CALLID,
++                crm_element_value(op_request, F_CIB_CALLID));
++    crm_xml_add(reply, F_CIB_CLIENTID,
++                crm_element_value(op_request, F_CIB_CLIENTID));
++    crm_xml_add_int(reply, F_CIB_CALLOPTS, flags);
++    crm_xml_add_int(reply, F_CIB_RC, pcmk_ok);
++
++    if (cib != NULL) {
++        crm_trace("Attaching reply output");
++        add_message_xml(reply, F_CIB_CALLDATA, cib);
++    }
++
++    crm_ipcs_send(cib_client, cib_client->request_id, reply,
++                  (flags & cib_sync_call) ? crm_ipc_flags_none
++                                          : crm_ipc_server_event);
++
++    free_xml(reply);
++    free_xml(cib);
++}
++
++/* see based/based_callbacks.c:cib_common_callback_worker */
++static void
++mock_based_common_callback_worker(uint32_t id, uint32_t flags,
++                                  xmlNode *op_request, crm_client_t *cib_client)
++{
++    const char *op = crm_element_value(op_request, F_CIB_OPERATION);
++
++    if (!strcmp(op, CRM_OP_REGISTER)) {
++        if (flags & crm_ipc_client_response) {
++            xmlNode *ack = create_xml_node(NULL, __FUNCTION__);
++            crm_xml_add(ack, F_CIB_OPERATION, CRM_OP_REGISTER);
++            crm_xml_add(ack, F_CIB_CLIENTID, cib_client->id);
++            crm_ipcs_send(cib_client, id, ack, flags);
++            cib_client->request_id = 0;
++            free_xml(ack);
++        }
++
++    } else if (!strcmp(op, T_CIB_NOTIFY)) {
++        int on_off = 0;
++        const char *type = crm_element_value(op_request, F_CIB_NOTIFY_TYPE);
++        crm_element_value_int(op_request, F_CIB_NOTIFY_ACTIVATE, &on_off);
++
++        crm_debug("Setting %s callbacks for %s (%s): %s",
++                  type, cib_client->name, cib_client->id, on_off ? "on" : "off");
++
++        if (!strcmp(type, T_CIB_DIFF_NOTIFY) && on_off) {
++            cib_client->options |= cib_notify_diff;
++        }
++
++        if (flags & crm_ipc_client_response) {
++            crm_ipcs_send_ack(cib_client, id, flags, "ack", __FUNCTION__, __LINE__);
++        }
++
++    } else if (!strcmp(op, CIB_OP_QUERY)) {
++        mock_based_handle_query(cib_client, flags, op_request);
++
++    } else {
++        crm_notice("Discarded request %s", op);
++    }
++}
++
++/* see based/based_callbacks.c:cib_ipc_dispatch_rw */
++static int32_t
++mock_based_dispatch_command(qb_ipcs_connection_t *c, void *data, size_t size)
++{
++    uint32_t id = 0, flags = 0;
++    int call_options = 0;
++    crm_client_t *cib_client = crm_client_get(c);
++    xmlNode *op_request = crm_ipcs_recv(cib_client, data, size, &id, &flags);
++
++    crm_notice("Got connection %p", c);
++    assert(op_request != NULL);
++
++    if (cib_client == NULL || op_request == NULL) {
++        if (op_request == NULL) {
++            crm_trace("Invalid message from %p", c);
++            crm_ipcs_send_ack(cib_client, id, flags, "nack", __FUNCTION__, __LINE__);
++        }
++        return 0;
++    }
++
++    crm_element_value_int(op_request, F_CIB_CALLOPTS, &call_options);
++    if (call_options & cib_sync_call) {
++        assert(flags & crm_ipc_client_response);
++        cib_client->request_id = id;  /* reply only to last in-flight request */
++    }
++
++    assert(cib_client->name == NULL);
++    crm_element_value_int(op_request, F_CIB_CALLOPTS, &call_options);
++    crm_xml_add(op_request, F_CIB_CLIENTID, cib_client->id);
++    crm_xml_add(op_request, F_CIB_CLIENTNAME, cib_client->name);
++
++    mock_based_common_callback_worker(id, flags, op_request, cib_client);
++    free_xml(op_request);
++
++    return 0;
++}
++
++/* * */
++
++size_t mock_based_register_module(module_t mod) {
++    module_t *module;
++    size_t ret = mock_based_context.modules_cnt++;
++
++    mock_based_context.modules = realloc(mock_based_context.modules,
++                                         sizeof(*mock_based_context.modules)
++                                          * mock_based_context.modules_cnt);
++    if (mock_based_context.modules == NULL
++            || (module = malloc(sizeof(module_t))) == NULL) {
++        abort();
++    }
++
++    memcpy(module, &mod, sizeof(mod));
++    mock_based_context.modules[mock_based_context.modules_cnt - 1] = module;
++
++    return ret;
++}
++
++static int
++mock_based_options(mock_based_context_t *ctxt,
++                   bool usage, int argc, const char *argv[])
++{
++    const char **args2argv;
++    char *s;
++    int ret = 0;
++
++    if (argc <= 1) {
++        const char *help_argv[] = {argv[0], "-h"};
++        return mock_based_options(ctxt, false, 2, (const char **) &help_argv);
++    }
++
++    for (size_t i = 1; i < argc; i++) {
++        if (argv[i][0] == '-' && argv[i][1] != '-' && argv[i][1] != '\0') {
++            if (usage) {
++                printf("\t-%c\t", argv[i][1]);
++            }
++            switch(argv[i][1]) {
++            case 'h':
++                if (usage) {
++                    printf("show this help message\n");
++                    ret = 1;
++
++                } else {
++                    if ((args2argv
++                            = malloc((ctxt->modules_cnt + 2) * sizeof(*args2argv))) == NULL
++                        || (s
++                            = malloc((ctxt->modules_cnt * 2 + 2) * sizeof(*s))) == NULL) {
++                        return -1;
++                    }
++                    s[0] = 'h';
++                    args2argv[ctxt->modules_cnt + 1] = (char[]){'-', 'h', '\0'};
++                    for (size_t c = ctxt->modules_cnt; c > 0; c--) {
++                        args2argv[c] = (char[]){'-', ctxt->modules[c - 1]->shortopt, '\0'};
++                        s[(ctxt->modules_cnt - i) + 1] = '|';
++                        s[(ctxt->modules_cnt - i) + 2] = ctxt->modules[c - 1]->shortopt;
++                    }
++                    s[ctxt->modules_cnt * 2 + 1] = '\0';
++                    printf("Usage: %s [-{%s}]\n", argv[0], s);
++                    (void) mock_based_options(ctxt, true, 2 + ctxt->modules_cnt, args2argv);
++                    free(args2argv);
++                    free(s);
++                }
++                return ret;
++            default:
++                for (size_t c = ctxt->modules_cnt; c > 0; c--) {
++                    if (ctxt->modules[c - 1]->shortopt == argv[i][1]) {
++                        ret = ctxt->modules[c - 1]->hooks.argparse(ctxt, usage, argc - i, &argv[i]);
++                        if (ret < 0) {
++                            break;
++                        } else if (ret > 1) {
++                            i += (ret - 1);
++                        }
++                    }
++                }
++                if (ret == 0) {
++                    printf("uknown option \"%s\"\n", argv[i]);
++                }
++                break;
++            }
++        }
++    }
++    return ret;
++}
++
++int main(int argc, char *argv[])
++{
++    mock_based_context_t *ctxt = &mock_based_context;
++
++    if (mock_based_options(ctxt, false, argc, (const char **) argv) > 0) {
++        struct qb_ipcs_service_handlers cib_ipc_callbacks = {
++            .connection_accept = mock_based_ipc_accept,
++            .connection_created = mock_based_ipc_created,
++            .msg_process = mock_based_dispatch_command,
++            .connection_closed = mock_based_ipc_closed,
++            .connection_destroyed = mock_based_ipc_destroy,
++        };
++        crm_log_preinit(NULL, argc, argv);
++        crm_log_init(NULL, LOG_DEBUG, false, true, argc, argv, false);
++        qb_ipcs_service_t *ipcs_command =
++            mainloop_add_ipc_server(cib_channel_rw, QB_IPC_NATIVE,
++                                    &cib_ipc_callbacks);
++        g_main_loop_run(g_main_loop_new(NULL, false));
++        qb_ipcs_destroy(ipcs_command);
++    }
++
++    for (size_t c = ctxt->modules_cnt; c > 0; c--) {
++        if (ctxt->modules[c - 1]->hooks.destroy != NULL) {
++            ctxt->modules[c - 1]->hooks.destroy(ctxt->modules[c - 1]);
++        }
++        free(mock_based_context.modules[c - 1]);
++    }
++
++    free(mock_based_context.modules);
++}
+diff --git a/maint/mocked/based.h b/maint/mocked/based.h
+new file mode 100644
+index 0000000..04d8eed
+--- /dev/null
++++ b/maint/mocked/based.h
+@@ -0,0 +1,47 @@
++/*
++ * Copyright 2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
++ *
++ * Licensed under the GNU General Public License version 2 or later (GPLv2+).
++ */
++
++#pragma once
++
++#include <stdlib.h>  /* size_t */
++#include <stdbool.h>  /* bool */
++
++#include <crm/common/ipcs.h>  /* crm_client_t */
++
++
++struct module_s;
++
++typedef struct mock_based_context_s {
++    size_t modules_cnt;
++    struct module_s** modules;
++} mock_based_context_t;
++
++
++typedef int (*mock_based_argparse_hook)(mock_based_context_t *,
++                                        bool, int,
++                                        const char *[]);
++
++typedef void (*mock_based_destroy_hook)(struct module_s *);
++
++/* specialized callbacks... */
++
++typedef struct mock_based_hooks_s {
++    /* generic ones */
++    mock_based_argparse_hook argparse;
++    mock_based_destroy_hook destroy;
++
++    /* specialized callbacks... */
++} mock_based_hooks_t;
++
++typedef struct module_s {
++    char shortopt;
++    mock_based_hooks_t hooks;
++    void *priv;
++} module_t;
++
++size_t mock_based_register_module(module_t mod);
+-- 
+1.8.3.1
+
+
+From 1498a811283a00623c4410c7e9abbd8b9ff3fe53 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Wed, 15 May 2019 18:23:56 +0200
+Subject: [PATCH 02/96] Maint: mocked/based: add based-notifyfenced module
+
+[this is a rebase from 2.0, occurrences of "based" mean "cib", just as
+"fenced" means "stonithd" in this context , but these are not changed
+everywhere, especially not in the name of the mocked daemon + modules,
+for easy comparability amongst the branches]
+
+This is to demonstrate how the current arrangement at _other_
+daemons (fenced in particular, as the name may suggest) is sometimes
+insufficient (see the following fix for fenced daemon) regarding
+liveness requirements, since high-rate stream of notifications from
+pacemaker-based can effectively block any progress regarding their
+own native services they provide (starving their own clients out).
+
+It would be rather difficult to achieve the same triggering
+circumstances artificially in vanilla settings, especially when
+constrained at number-of-nodes/resources dimension (bothersome
+artificial-messaging-load-through-configuration) -- leveraging the
+skeleton from the previous commit, we can now emulate the same just
+with a single node under test and with next to zero configuration
+-- just configure a single node cluster through corosync.conf, start
+corosync, run "./based -N" (preferably as hacluster:haclient), only
+then pacemaker-fenced and try to communicate with it (e.g. via
+stonith_admin) -- see the in-line comment wrt. how to use this module.
+
+Note that this first module has some parts ifdef'd out since it's
+intented also as a template for writing additional modules -- you'll:
+
+- copy based-notifyspam.c as based-mymodule.c, and edit it, so that...
+
+- OPTCHAR is a new, unique short option character
+  (preferably uppercase, leaving lower-cased letters reserved as
+  action modifiers, cf. xml/regression.sh)
+
+- drop everything unneeded except for mock_based_MOD_argparse_hook
+  and mock_based_MOD_init, configure the callbacks there respectively
+
+- should the new hook mounting place be needed, declare new hook
+  prototype in based.h, append such respective member
+  to the struct mock_based_hooks_s there, locate the corresponding
+  location for its application in based.c and apply it here
+  (follow the example of hooks.cib_notify)
+
+- add the respective "BASED_OBJECTS += based-MOD.o" to Makefile
+
+- test...
+---
+ maint/mocked/Makefile             |   3 +
+ maint/mocked/based-notifyfenced.c | 247 ++++++++++++++++++++++++++++++++++++++
+ maint/mocked/based.c              |   8 ++
+ maint/mocked/based.h              |   2 +
+ 4 files changed, 260 insertions(+)
+ create mode 100644 maint/mocked/based-notifyfenced.c
+
+diff --git a/maint/mocked/Makefile b/maint/mocked/Makefile
+index 05b3cb4..5e3a1a8 100644
+--- a/maint/mocked/Makefile
++++ b/maint/mocked/Makefile
+@@ -30,6 +30,9 @@ PROGRAMS = based
+ 
+ BASED_OBJECTS = based.o
+ 
++# include or not the modules as you wish
++BASED_OBJECTS += based-notifyfenced.o
++
+ all: ${PROGRAMS}
+ 
+ based: $(BASED_OBJECTS)
+diff --git a/maint/mocked/based-notifyfenced.c b/maint/mocked/based-notifyfenced.c
+new file mode 100644
+index 0000000..90cad48
+--- /dev/null
++++ b/maint/mocked/based-notifyfenced.c
+@@ -0,0 +1,247 @@
++/*
++ * Copyright 2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
++ *
++ * Licensed under the GNU General Public License version 2 or later (GPLv2+).
++ */
++
++/*
++ * Intended demo use case:
++ *
++ * - as root, start corosync
++ * - start "./based -N"; hint:
++ *   su -s /bin/sh -c './based -N' hacluster
++ * - start stonithd; hint:
++ *   su -c 'env PCMK_logpriority=crit ../../fencing/stonithd'
++ * - wait a bit (5 < seconds < 20)
++ * - as haclient group (or root), run "stonith_admin --list-registered"
++ * - observe whether such invocation is blocked or not
++ */
++
++
++#include <stdio.h>  /* printf, perror */
++
++#include "crm/cib.h"  /* cib_zero_copy */
++#include "crm/cib/internal.h"  /* CIB_OP_CREATE */
++#include "crm/msg_xml.h"  /* F_SUBTYPE */
++#include "cib/callbacks.h"  /* cib_notify_diff */
++
++#include "based.h"
++
++
++#define OPTCHAR 'N'
++static size_t module_handle;
++
++
++struct cib_notification_s {
++    xmlNode *msg;
++    struct iovec *iov;
++    int32_t iov_size;
++};
++
++/* see based/based_notify.c:cib_notify_send_one */
++static bool
++mock_based_cib_notify_send_one(crm_client_t *client, xmlNode *xml)
++{
++    const char *type = NULL;
++    bool do_send = false;
++
++    struct iovec *iov;
++    ssize_t rc = crm_ipc_prepare(0, xml, &iov, 0);
++    struct cib_notification_s update = {
++        .msg = xml,
++        .iov = iov,
++        .iov_size = rc,
++    };
++
++    CRM_CHECK(client != NULL, return true);
++    if (client->ipcs == NULL && client->remote == NULL) {
++        crm_warn("Skipping client with NULL channel");
++        return FALSE;
++    }
++
++    type = crm_element_value(update.msg, F_SUBTYPE);
++    CRM_LOG_ASSERT(type != NULL);
++    if (is_set(client->options, cib_notify_diff)
++            && safe_str_eq(type, T_CIB_DIFF_NOTIFY)) {
++
++        if (crm_ipcs_sendv(client, update.iov, crm_ipc_server_event) < 0)
++            crm_warn("Notification of client %s/%s failed", client->name, client->id);
++
++    }
++    if (iov) {
++        free(iov[0].iov_base);
++        free(iov[1].iov_base);
++        free(iov);
++    }
++
++    return FALSE;
++}
++
++/* see based/based_notify.c:do_cib_notify + cib_notify_send */
++void
++do_cib_notify(crm_client_t *cib_client, int options, const char *op,
++              xmlNode *update, int result, xmlNode *result_data,
++              const char *msg_type)
++{
++    xmlNode *update_msg = NULL;
++    const char *id = NULL;
++
++    update_msg = create_xml_node(NULL, "notify");
++
++
++    crm_xml_add(update_msg, F_TYPE, T_CIB_NOTIFY);
++    crm_xml_add(update_msg, F_SUBTYPE, msg_type);
++    crm_xml_add(update_msg, F_CIB_OPERATION, op);
++    crm_xml_add_int(update_msg, F_CIB_RC, result);
++
++    if (result_data != NULL) {
++        id = crm_element_value(result_data, XML_ATTR_ID);
++        if (id != NULL)
++            crm_xml_add(update_msg, F_CIB_OBJID, id);
++    }
++
++    if (update != NULL) {
++        crm_trace("Setting type to update->name: %s", crm_element_name(update));
++        crm_xml_add(update_msg, F_CIB_OBJTYPE, crm_element_name(update));
++
++    } else if (result_data != NULL) {
++        crm_trace("Setting type to new_obj->name: %s", crm_element_name(result_data));
++        crm_xml_add(update_msg, F_CIB_OBJTYPE, crm_element_name(result_data));
++
++    } else {
++        crm_trace("Not Setting type");
++    }
++
++#if 0
++    attach_cib_generation(update_msg, "cib_generation", the_cib);
++#endif
++
++    if (update != NULL) {
++        add_message_xml(update_msg, F_CIB_UPDATE, update);
++    }
++    if (result_data != NULL) {
++        add_message_xml(update_msg, F_CIB_UPDATE_RESULT, result_data);
++    }
++
++    mock_based_cib_notify_send_one(cib_client, update_msg);
++    free_xml(update_msg);
++}
++
++static gboolean
++mock_based_notifyfencedmer_callback_worker(gpointer data)
++{
++    crm_client_t *cib_client = (crm_client_t *) data;
++
++    xmlNode *result_data;
++    xmlNode *input, *update;
++    int options;
++    char update_str[4096];
++
++    options |= cib_zero_copy;
++
++
++    input = create_xml_node(NULL, "cib");
++
++    /* spam it */
++#if 0
++    for (size_t i = 0; i < SIZE_MAX - 1; i++) {
++#else
++    for (size_t i = 0; i < 10000; i++) {
++#endif
++        /* NOTE: we need to trigger fenced attention, add new fence device */
++        snprintf(update_str, sizeof(update_str),
++"<diff crm_feature_set='3.1.0' format='1'>\n"
++"  <diff-removed admin_epoch='%1$llu' epoch='%1$llu' num_updates='%1$llu'>\n"
++"    <cib admin_epoch='%1$llu' epoch='%1$llu' num_updates='%1$llu'/>\n"
++"  </diff-removed>\n"
++"  <diff-added admin_epoch='%2$llu' epoch='%2$llu' num_updates='%2$llu'>\n"
++"    <cib validate-with='pacemaker-1.2' admin_epoch='%2$llu' epoch='%2$llu' num_updates='%2$llu'>\n"
++"      <configuration>\n"
++"        <resources>\n"
++"          <primitive id='FENCEDEV-fence-dummy-%2$llu' class='stonith' type='__apparently_bogus__' __crm_diff_marker__='added:top'/>\n"
++"        </resources>\n"
++"      </configuration>\n"
++"    </cib>\n"
++"  </diff-added>\n"
++"</diff>\n", i, i+1);
++        update = xmlReadMemory(update_str, sizeof(update_str),
++                               "file:///tmp/update", NULL, 0)->children;
++        do_cib_notify(cib_client, options, CIB_OP_CREATE, input, pcmk_ok,
++                      update, T_CIB_DIFF_NOTIFY);
++        free_xml(update);
++    };
++
++    free_xml(input);
++}
++
++static void
++mock_based_notifyfenced_cib_notify_hook(crm_client_t *cib_client)
++{
++
++    /* MOCK: client asked for upcoming diff's, let's
++             spam it a bit after a while... */
++    crm_info("Going to spam %s (%s) in 5 seconds...",
++             cib_client->name, cib_client->id);
++    mainloop_timer_start(mainloop_timer_add("spammer", 5000, FALSE,
++                         mock_based_notifyfencedmer_callback_worker,
++                         cib_client));
++}
++
++/* * */
++
++static int
++mock_based_notifyfenced_argparse_hook(struct mock_based_context_s *ctxt,
++                                    bool usage, int argc_to_go,
++                                    const char *argv_to_go[])
++{
++    const char *opt = *argv_to_go;
++restart:
++    switch(*opt) {
++    case '-':
++        if (opt == *argv_to_go) {
++            opt++;
++            goto restart;
++        }
++        break;
++    case OPTCHAR:
++        if (usage) {
++            printf("spam the \"cib diff\" notification client"
++                   " (targeting pacemaker-fenced in particular)\n");
++
++        } else {
++#if 0
++            ctxt->modules[module_handle]->priv =
++                malloc(sizeof(mock_based_notifyfenced_priv_t));
++            if (ctxt->modules[module_handle]->priv == NULL) {
++                perror("malloc");
++                return -1;
++            }
++#endif
++        }
++        return 1;
++    }
++    return 0;
++}
++
++#if 0
++static void
++mock_based_notifyfenced_destroy_hook(module_t *mod) {
++    free(mod->priv);
++}
++#endif
++
++__attribute__((__constructor__))
++void
++mock_based_notifyfenced_init(void) {
++    module_handle = mock_based_register_module((module_t){
++        .shortopt = OPTCHAR,
++        .hooks = {
++            .argparse = mock_based_notifyfenced_argparse_hook,
++            //.destroy = mock_based_notifyfenced_destroy_hook,
++            /* specialized hooks */
++            .cib_notify = mock_based_notifyfenced_cib_notify_hook,
++        }
++    });
++}
+diff --git a/maint/mocked/based.c b/maint/mocked/based.c
+index 451a384..d340274 100644
+--- a/maint/mocked/based.c
++++ b/maint/mocked/based.c
+@@ -138,6 +138,7 @@ mock_based_common_callback_worker(uint32_t id, uint32_t flags,
+                                   xmlNode *op_request, crm_client_t *cib_client)
+ {
+     const char *op = crm_element_value(op_request, F_CIB_OPERATION);
++    mock_based_context_t *ctxt;
+ 
+     if (!strcmp(op, CRM_OP_REGISTER)) {
+         if (flags & crm_ipc_client_response) {
+@@ -161,6 +162,13 @@ mock_based_common_callback_worker(uint32_t id, uint32_t flags,
+             cib_client->options |= cib_notify_diff;
+         }
+ 
++        ctxt = (mock_based_context_t *) cib_client->userdata;
++        for (size_t c = ctxt->modules_cnt; c > 0; c--) {
++            if (ctxt->modules[c - 1]->hooks.cib_notify != NULL) {
++                ctxt->modules[c - 1]->hooks.cib_notify(cib_client);
++            }
++        }
++
+         if (flags & crm_ipc_client_response) {
+             crm_ipcs_send_ack(cib_client, id, flags, "ack", __FUNCTION__, __LINE__);
+         }
+diff --git a/maint/mocked/based.h b/maint/mocked/based.h
+index 04d8eed..dcebf0e 100644
+--- a/maint/mocked/based.h
++++ b/maint/mocked/based.h
+@@ -29,6 +29,7 @@ typedef int (*mock_based_argparse_hook)(mock_based_context_t *,
+ typedef void (*mock_based_destroy_hook)(struct module_s *);
+ 
+ /* specialized callbacks... */
++typedef void (*mock_based_cib_notify_hook)(crm_client_t *);
+ 
+ typedef struct mock_based_hooks_s {
+     /* generic ones */
+@@ -36,6 +37,7 @@ typedef struct mock_based_hooks_s {
+     mock_based_destroy_hook destroy;
+ 
+     /* specialized callbacks... */
++    mock_based_cib_notify_hook cib_notify;
+ } mock_based_hooks_t;
+ 
+ typedef struct module_s {
+-- 
+1.8.3.1
+
+
+From eee76118f2a557a79bda0214ea5c0974a7cd40c2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Wed, 29 Aug 2018 15:49:58 +0200
+Subject: [PATCH 03/96] Low: mainloop: make it possible to specify server's
+ priority in mainloop
+
+---
+ include/crm/common/mainloop.h | 24 +++++++++++++
+ lib/common/mainloop.c         | 82 +++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 103 insertions(+), 3 deletions(-)
+
+diff --git a/include/crm/common/mainloop.h b/include/crm/common/mainloop.h
+index eab31ac..e00da48 100644
+--- a/include/crm/common/mainloop.h
++++ b/include/crm/common/mainloop.h
+@@ -67,6 +67,30 @@ struct ipc_client_callbacks {
+ qb_ipcs_service_t *mainloop_add_ipc_server(const char *name, enum qb_ipc_type type,
+                                            struct qb_ipcs_service_handlers *callbacks);
+ 
++/*!
++ * \brief Start server-side API end-point, hooked into the internal event loop
++ *
++ * \param[in] name    name of the IPC end-point ("address" for the client)
++ * \param[in] type    selects libqb's IPC back-end (or use #QB_IPC_NATIVE)
++ * \param[in] callbacks  defines libqb's IPC service-level handlers
++ * \param[in] priority  priority relative to other events handled in the
++ *                      abstract handling loop, use #QB_LOOP_MED when unsure
++ *
++ * \return libqb's opaque handle to the created service abstraction
++ *
++ * \note For portability concerns, do not use this function if you keep
++ *       \p priority as #QB_LOOP_MED, stick with #mainloop_add_ipc_server
++ *       (with exactly such semantics) instead (once you link with this new
++ *       symbol employed, you can't downgrade the library freely anymore).
++ *
++ * \note The intended effect will only get fully reflected when run-time
++ *       linked to patched libqb: https://github.com/ClusterLabs/libqb/pull/352
++ */
++qb_ipcs_service_t *mainloop_add_ipc_server_with_prio(const char *name,
++                                                    enum qb_ipc_type type,
++                                                    struct qb_ipcs_service_handlers *callbacks,
++                                                    enum qb_loop_priority prio);
++
+ void mainloop_del_ipc_server(qb_ipcs_service_t * server);
+ 
+ mainloop_io_t *mainloop_add_ipc_client(const char *name, int priority, size_t max_size,
+diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c
+index 60726cb..9bdd026 100644
+--- a/lib/common/mainloop.c
++++ b/lib/common/mainloop.c
+@@ -456,6 +456,65 @@ gio_poll_destroy(gpointer data)
+     }
+ }
+ 
++/*!
++ * \internal
++ * \brief Convert libqb's poll priority into GLib's one
++ *
++ * \param[in] prio  libqb's poll priority (#QB_LOOP_MED assumed as fallback)
++ *
++ * \return  best matching GLib's priority
++ */
++static gint
++conv_prio_libqb2glib(enum qb_loop_priority prio)
++{
++    gint ret = G_PRIORITY_DEFAULT;
++    switch (prio) {
++        case QB_LOOP_LOW:
++            ret = G_PRIORITY_LOW;
++            break;
++        case QB_LOOP_HIGH:
++            ret = G_PRIORITY_HIGH;
++            break;
++        default:
++            crm_trace("Invalid libqb's loop priority %d, assuming QB_LOOP_MED",
++                      prio);
++            /* fall-through */
++        case QB_LOOP_MED:
++            break;
++    }
++    return ret;
++}
++
++/*!
++ * \internal
++ * \brief Convert libqb's poll priority to rate limiting spec
++ *
++ * \param[in] prio  libqb's poll priority (#QB_LOOP_MED assumed as fallback)
++ *
++ * \return  best matching rate limiting spec
++ */
++static enum qb_ipcs_rate_limit
++conv_libqb_prio2ratelimit(enum qb_loop_priority prio)
++{
++    /* this is an inversion of what libqb's qb_ipcs_request_rate_limit does */
++    enum qb_ipcs_rate_limit ret = QB_IPCS_RATE_NORMAL;
++    switch (prio) {
++        case QB_LOOP_LOW:
++            ret = QB_IPCS_RATE_SLOW;
++            break;
++        case QB_LOOP_HIGH:
++            ret = QB_IPCS_RATE_FAST;
++            break;
++        default:
++            crm_trace("Invalid libqb's loop priority %d, assuming QB_LOOP_MED",
++                      prio);
++            /* fall-through */
++        case QB_LOOP_MED:
++            break;
++    }
++    return ret;
++}
++
+ static int32_t
+ gio_poll_dispatch_update(enum qb_loop_priority p, int32_t fd, int32_t evts,
+                          void *data, qb_ipcs_dispatch_fn_t fn, int32_t add)
+@@ -502,8 +561,8 @@ gio_poll_dispatch_update(enum qb_loop_priority p, int32_t fd, int32_t evts,
+     adaptor->p = p;
+     adaptor->is_used++;
+     adaptor->source =
+-        g_io_add_watch_full(channel, G_PRIORITY_DEFAULT, evts, gio_read_socket, adaptor,
+-                            gio_poll_destroy);
++        g_io_add_watch_full(channel, conv_prio_libqb2glib(p), evts,
++                            gio_read_socket, adaptor, gio_poll_destroy);
+ 
+     /* Now that mainloop now holds a reference to channel,
+      * thanks to g_io_add_watch_full(), drop ours from g_io_channel_unix_new().
+@@ -587,7 +646,15 @@ pick_ipc_type(enum qb_ipc_type requested)
+ 
+ qb_ipcs_service_t *
+ mainloop_add_ipc_server(const char *name, enum qb_ipc_type type,
+-                        struct qb_ipcs_service_handlers * callbacks)
++                        struct qb_ipcs_service_handlers *callbacks)
++{
++    return mainloop_add_ipc_server_with_prio(name, type, callbacks, QB_LOOP_MED);
++}
++
++qb_ipcs_service_t *
++mainloop_add_ipc_server_with_prio(const char *name, enum qb_ipc_type type,
++                                  struct qb_ipcs_service_handlers *callbacks,
++                                  enum qb_loop_priority prio)
+ {
+     int rc = 0;
+     qb_ipcs_service_t *server = NULL;
+@@ -599,6 +666,15 @@ mainloop_add_ipc_server(const char *name, enum qb_ipc_type type,
+     crm_client_init();
+     server = qb_ipcs_create(name, 0, pick_ipc_type(type), callbacks);
+ 
++    if (server == NULL) {
++        crm_err("Could not create %s IPC server: %s (%d)", name, pcmk_strerror(rc), rc);
++        return NULL;
++    }
++
++    if (prio != QB_LOOP_MED) {
++        qb_ipcs_request_rate_limit(server, conv_libqb_prio2ratelimit(prio));
++    }
++
+ #ifdef HAVE_IPCS_GET_BUFFER_SIZE
+     /* All clients should use at least ipc_buffer_max as their buffer size */
+     qb_ipcs_enforce_buffer_size(server, crm_ipc_default_buffer_size());
+-- 
+1.8.3.1
+
+
+From 428a9c873b661947af1e142ec8fa9fcf85328dcd Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Wed, 29 Aug 2018 15:50:57 +0200
+Subject: [PATCH 04/96] High: stonith-ng's function cannot be blocked with CIB
+ updates forever
+
+In the high-load (or high-rate-config-change) scenarios,
+pacemaker-fenced would be unable to provide service when basically DoS'd
+with CIB update notifications.  Try to reconcile that with elevated
+priority of the server's proper listening interface in the mainloop, at
+worst, it will try to fence with slightly outdated config, but appears
+to be less bad than not carrying the execution at all, for instance.
+Other daemons might be considered as well.
+
+Prerequisites:
+- https://github.com/ClusterLabs/libqb/pull/352
+  (libqb used to contain a bug due to which one particular step in the
+  initial-client-connection-accepting-at-the-server procedure that would
+  be carried out with hard-coded (and hence possibly lower than competing
+  events') priority, which backfires exactly in this case (once the
+  pacemaker part is fixed -- by the means of elevating priority for
+  the API end-point of fenced so that it won't get consistently
+  overridden with a non-socket-based event source/trigger)
+
+How to verify:
+- mocked/based -N (see commit adding that module to mocked based daemon)
+---
+ lib/common/utils.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/lib/common/utils.c b/lib/common/utils.c
+index f3f60ed..b87454e 100644
+--- a/lib/common/utils.c
++++ b/lib/common/utils.c
+@@ -1223,7 +1223,8 @@ attrd_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers
+ void
+ stonith_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb)
+ {
+-    *ipcs = mainloop_add_ipc_server("stonith-ng", QB_IPC_NATIVE, cb);
++    *ipcs = mainloop_add_ipc_server_with_prio("stonith-ng", QB_IPC_NATIVE, cb,
++                                              QB_LOOP_HIGH);
+ 
+     if (*ipcs == NULL) {
+         crm_err("Failed to create stonith-ng servers: exiting and inhibiting respawn.");
+-- 
+1.8.3.1
+
+
+From 75f507e6432e414c78938fc83d18493a998c98b6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Wed, 5 Jun 2019 15:12:23 +0200
+Subject: [PATCH 05/96] Doc: Pacemaker Development: intro of mocked daemons
+
+Partly as a documentation for ourselves.
+---
+ doc/Pacemaker_Development/en-US/Ch-Hacking.txt     | 52 ++++++++++++++++++++++
+ .../en-US/Pacemaker_Development.xml                | 11 ++---
+ .../en-US/Revision_History.xml                     | 13 ++++++
+ 3 files changed, 71 insertions(+), 5 deletions(-)
+ create mode 100644 doc/Pacemaker_Development/en-US/Ch-Hacking.txt
+
+diff --git a/doc/Pacemaker_Development/en-US/Ch-Hacking.txt b/doc/Pacemaker_Development/en-US/Ch-Hacking.txt
+new file mode 100644
+index 0000000..d8d8173
+--- /dev/null
++++ b/doc/Pacemaker_Development/en-US/Ch-Hacking.txt
+@@ -0,0 +1,52 @@
++:compat-mode: legacy
++= Advanced Hacking on the Project =
++
++anchor:ch-hacking[Chapter 4. Hacking on Pacemaker]
++
++[id="hacking-foreword"]
++== Foreword ==
++
++This chapter aims to be a gentle introduction (or perhaps, rather
++a summarization of advanced techniques we developed for backreferences)
++to how deal with the Pacemaker internals effectively.
++for instance, how to:
++
++* verify various interesting interaction-based properties
++
++or simply put, all that is in the interest of the core contributors
++on the project to know, master, and (preferably) also evolve
++-- way beyond what is in the presumed repertoire of a generic
++contributor role, which is detailed in other chapters of this guide.
++
++Therefore, if you think you will not benefit from any such details
++in the scope of this chapter, feel free to skip it.
++
++== Working with mocked daemons ==
++
++Since the Pacemaker run-time consists of multiple co-operating daemons
++as detailed elsewhere, tracking down the interaction details amongst
++them can be rather cumbersome.  Since rebuilding existing daemons in
++a more modular way as opposed to clusters of mutually dependent
++functions, we elected to grow separate bare-bones counterparts built
++evolutionary as skeletons just to get the basic (long-term stabilized)
++communication with typical daemon clients going, and to add new modules
++in their outer circles (plus minimalistic hook support at those cores)
++on a demand-driven basis.
++
++The code for these is located at `maint/mocked`; for instance,
++`based-notifyfenced.c` module of `based.c` skeleton mocking
++`pacemaker-based` daemon was exactly to fulfill investigation helper
++role (the case at hand was also an impulse to kick off this very
++sort of maintenance support material, to begin with).
++
++Non-trivial knowledge of Pacemaker internals and other skills are
++needed to use such devised helpers, but given the other way around,
++some sorts of investigation may be even heftier, it may be the least
++effort choice.  And when that's the case, advanced contributors are
++expected to contribute their own extensions they used to validate
++the reproducibility/actual correctness of the fix along the actual
++code modifications.  This way, the rest of the development teams is
++not required to deal with elaborate preconditions, be at guess, or
++even forced to use a blind faith regarding the causes, consequences
++and validity regarding the raised issues/fixes, for the greater
++benefit of all.
+diff --git a/doc/Pacemaker_Development/en-US/Pacemaker_Development.xml b/doc/Pacemaker_Development/en-US/Pacemaker_Development.xml
+index 854d77a..6641d3b 100644
+--- a/doc/Pacemaker_Development/en-US/Pacemaker_Development.xml
++++ b/doc/Pacemaker_Development/en-US/Pacemaker_Development.xml
+@@ -4,10 +4,11 @@
+ %BOOK_ENTITIES;
+ ]>
+ <book>
+-    <xi:include href="Book_Info.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+-    <xi:include href="Ch-FAQ.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+-    <xi:include href="Ch-Coding.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+-    <xi:include href="Ch-Python.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
+-    <xi:include href="Revision_History.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
++    <xi:include href="Book_Info.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
++    <xi:include href="Ch-FAQ.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
++    <xi:include href="Ch-Coding.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
++    <xi:include href="Ch-Python.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
++    <xi:include href="Ch-Hacking.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
++    <xi:include href="Revision_History.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+     <index></index>
+ </book>
+diff --git a/doc/Pacemaker_Development/en-US/Revision_History.xml b/doc/Pacemaker_Development/en-US/Revision_History.xml
+index fd29d52..10ae102 100644
+--- a/doc/Pacemaker_Development/en-US/Revision_History.xml
++++ b/doc/Pacemaker_Development/en-US/Revision_History.xml
+@@ -35,6 +35,19 @@
+         </revdescription>
+       </revision>
+ 
++      <revision>
++        <revnumber>1-2</revnumber>
++        <date>Fri 17 May 2019</date>
++        <author>
++          <firstname>Jan</firstname><surname>Pokorný</surname>
++          <email>poki@redhat.com</email>
++        </author>
++        <revdescription>
++          <simplelist><member>Start capturing hacking howto
++                              for advanced contributors</member></simplelist>
++        </revdescription>
++      </revision>
++
+     </revhistory>
+   </simpara>
+ </appendix>
+-- 
+1.8.3.1
+
+
+From 96244e2c9bb4f32dc38bd4fa1c091c9f0d5bbf57 Mon Sep 17 00:00:00 2001
+From: "Gao,Yan" <ygao@suse.com>
+Date: Fri, 26 Apr 2019 11:52:59 +0200
+Subject: [PATCH 06/96] Fix: libcrmcommon: correctly apply XML diffs with
+ multiple move/create changes
+
+Given a resource group:
+```
+<group id="dummies">
+  <primitive id="dummy0"/>
+  <primitive id="dummy1"/>
+  <primitive id="dummy2"/>
+  <primitive id="dummy3"/>
+  <primitive id="dummy4"/>
+</group>
+```
+
+, if we'd like to change it to:
+```
+<group id="dummies">
+  <primitive id="dummy3"/>
+  <primitive id="dummy4"/>
+  <primitive id="dummy2"/>
+  <primitive id="dummy0"/>
+  <primitive id="dummy1"/>
+</group>
+```
+
+, the generated XML diff would be like:
+```
+<diff format="2">
+  <change operation="move" path="//primitive[@id=dummy3]" position="0"/>
+  <change operation="move" path="//primitive[@id=dummy4]" position="1"/>
+  <change operation="move" path="//primitive[@id=dummy0]" position="3"/>
+  <change operation="move" path="//primitive[@id=dummy1]" position="4"/>
+</diff>
+```
+
+Previously after applying the XML diff, the resulting XML would be a mess:
+```
+<group id="dummies">
+  <primitive id="dummy3"/>
+  <primitive id="dummy4"/>
+  <primitive id="dummy0"/>
+  <primitive id="dummy2"/>
+  <primitive id="dummy1"/>
+</group>
+```
+It's because the positions of the already moved XML objects could be
+affected by the later moved objects.
+
+This commit fixes it by temporarily putting "move" objects after the
+last sibling and also delaying the adding of any "create" objects, then
+placing them to the target positions in the right order.
+---
+ lib/common/xml.c | 126 ++++++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 97 insertions(+), 29 deletions(-)
+
+diff --git a/lib/common/xml.c b/lib/common/xml.c
+index 6728247..f7d6c70 100644
+--- a/lib/common/xml.c
++++ b/lib/common/xml.c
+@@ -1467,11 +1467,40 @@ __xml_find_path(xmlNode *top, const char *key, int target_position)
+     return target;
+ }
+ 
++typedef struct xml_change_obj_s {
++    xmlNode *change;
++    xmlNode *match;
++} xml_change_obj_t;
++
++static gint
++sort_change_obj_by_position(gconstpointer a, gconstpointer b)
++{
++    const xml_change_obj_t *change_obj_a = a;
++    const xml_change_obj_t *change_obj_b = b;
++    int position_a = -1;
++    int position_b = -1;
++
++    crm_element_value_int(change_obj_a->change, XML_DIFF_POSITION, &position_a);
++    crm_element_value_int(change_obj_b->change, XML_DIFF_POSITION, &position_b);
++
++    if (position_a < position_b) {
++        return -1;
++
++    } else if (position_a > position_b) {
++        return 1;
++    }
++
++    return 0;
++}
++
+ static int
+ xml_apply_patchset_v2(xmlNode *xml, xmlNode *patchset)
+ {
+     int rc = pcmk_ok;
+     xmlNode *change = NULL;
++    GListPtr change_objs = NULL;
++    GListPtr gIter = NULL;
++
+     for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) {
+         xmlNode *match = NULL;
+         const char *op = crm_element_value(change, XML_DIFF_OP);
+@@ -1483,6 +1512,7 @@ xml_apply_patchset_v2(xmlNode *xml, xmlNode *patchset)
+             continue;
+         }
+ 
++        // "delete" changes for XML comments are generated with "position"
+         if(strcmp(op, "delete") == 0) {
+             crm_element_value_int(change, XML_DIFF_POSITION, &position);
+         }
+@@ -1502,7 +1532,71 @@ xml_apply_patchset_v2(xmlNode *xml, xmlNode *patchset)
+             rc = -pcmk_err_diff_failed;
+             continue;
+ 
+-        } else if(strcmp(op, "create") == 0) {
++        } else if (strcmp(op, "create") == 0 || strcmp(op, "move") == 0) {
++            // Delay the adding of a "create" object
++            xml_change_obj_t *change_obj = calloc(1, sizeof(xml_change_obj_t));
++
++            CRM_ASSERT(change_obj != NULL);
++
++            change_obj->change = change;
++            change_obj->match = match;
++
++            change_objs = g_list_append(change_objs, change_obj);
++
++            if (strcmp(op, "move") == 0) {
++                // Temporarily put the "move" object after the last sibling
++                if (match->parent != NULL && match->parent->last != NULL) {
++                    xmlAddNextSibling(match->parent->last, match);
++                }
++            }
++
++        } else if(strcmp(op, "delete") == 0) {
++            free_xml(match);
++
++        } else if(strcmp(op, "modify") == 0) {
++            xmlAttr *pIter = pcmk__first_xml_attr(match);
++            xmlNode *attrs = __xml_first_child(first_named_child(change, XML_DIFF_RESULT));
++
++            if(attrs == NULL) {
++                rc = -ENOMSG;
++                continue;
++            }
++            while(pIter != NULL) {
++                const char *name = (const char *)pIter->name;
++
++                pIter = pIter->next;
++                xml_remove_prop(match, name);
++            }
++
++            for (pIter = pcmk__first_xml_attr(attrs); pIter != NULL; pIter = pIter->next) {
++                const char *name = (const char *)pIter->name;
++                const char *value = crm_element_value(attrs, name);
++
++                crm_xml_add(match, name, value);
++            }
++
++        } else {
++            crm_err("Unknown operation: %s", op);
++        }
++    }
++
++    // Changes should be generated in the right order. Double checking.
++    change_objs = g_list_sort(change_objs, sort_change_obj_by_position);
++
++    for (gIter = change_objs; gIter; gIter = gIter->next) {
++        xml_change_obj_t *change_obj = gIter->data;
++        xmlNode *match = change_obj->match;
++        const char *op = NULL;
++        const char *xpath = NULL;
++
++        change = change_obj->change;
++
++        op = crm_element_value(change, XML_DIFF_OP);
++        xpath = crm_element_value(change, XML_DIFF_PATH);
++
++        crm_trace("Continue performing %s on %s with %p", op, xpath, match);
++
++        if(strcmp(op, "create") == 0) {
+             int position = 0;
+             xmlNode *child = NULL;
+             xmlNode *match_child = NULL;
+@@ -1570,36 +1664,10 @@ xml_apply_patchset_v2(xmlNode *xml, xmlNode *patchset)
+                         match->name, ID(match), __xml_offset(match), position, match->prev);
+                 rc = -pcmk_err_diff_failed;
+             }
+-
+-        } else if(strcmp(op, "delete") == 0) {
+-            free_xml(match);
+-
+-        } else if(strcmp(op, "modify") == 0) {
+-            xmlAttr *pIter = pcmk__first_xml_attr(match);
+-            xmlNode *attrs = __xml_first_child(first_named_child(change, XML_DIFF_RESULT));
+-
+-            if(attrs == NULL) {
+-                rc = -ENOMSG;
+-                continue;
+-            }
+-            while(pIter != NULL) {
+-                const char *name = (const char *)pIter->name;
+-
+-                pIter = pIter->next;
+-                xml_remove_prop(match, name);
+-            }
+-
+-            for (pIter = pcmk__first_xml_attr(attrs); pIter != NULL; pIter = pIter->next) {
+-                const char *name = (const char *)pIter->name;
+-                const char *value = crm_element_value(attrs, name);
+-
+-                crm_xml_add(match, name, value);
+-            }
+-
+-        } else {
+-            crm_err("Unknown operation: %s", op);
+         }
+     }
++
++    g_list_free_full(change_objs, free);
+     return rc;
+ }
+ 
+-- 
+1.8.3.1
+
+
+From a3de5c611febf265880c17a8b49267eaa968c741 Mon Sep 17 00:00:00 2001
+From: "Gao,Yan" <ygao@suse.com>
+Date: Tue, 30 Apr 2019 00:15:03 +0200
+Subject: [PATCH 07/96] Fix: libcrmcommon: avoid possible use-of-NULL when
+ applying XML diffs
+
+---
+ lib/common/xml.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/lib/common/xml.c b/lib/common/xml.c
+index f7d6c70..5f52600 100644
+--- a/lib/common/xml.c
++++ b/lib/common/xml.c
+@@ -1507,11 +1507,12 @@ xml_apply_patchset_v2(xmlNode *xml, xmlNode *patchset)
+         const char *xpath = crm_element_value(change, XML_DIFF_PATH);
+         int position = -1;
+ 
+-        crm_trace("Processing %s %s", change->name, op);
+         if(op == NULL) {
+             continue;
+         }
+ 
++        crm_trace("Processing %s %s", change->name, op);
++
+         // "delete" changes for XML comments are generated with "position"
+         if(strcmp(op, "delete") == 0) {
+             crm_element_value_int(change, XML_DIFF_POSITION, &position);
+-- 
+1.8.3.1
+
+
+From f40dad3645bfee99ad6b2bafbe69242171c44cf3 Mon Sep 17 00:00:00 2001
+From: "Gao,Yan" <ygao@suse.com>
+Date: Tue, 30 Apr 2019 00:19:46 +0200
+Subject: [PATCH 08/96] Fix: libcrmcommon: return error when applying XML diffs
+ containing unknown operations
+
+---
+ lib/common/xml.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/lib/common/xml.c b/lib/common/xml.c
+index 5f52600..91c0edb 100644
+--- a/lib/common/xml.c
++++ b/lib/common/xml.c
+@@ -1578,6 +1578,7 @@ xml_apply_patchset_v2(xmlNode *xml, xmlNode *patchset)
+ 
+         } else {
+             crm_err("Unknown operation: %s", op);
++            rc = -pcmk_err_diff_failed;
+         }
+     }
+ 
+-- 
+1.8.3.1
+
+
+From 3a8c61bc744a3f59d097403231a4f3f3c39a990c Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 22 Mar 2019 17:49:30 -0500
+Subject: [PATCH 09/96] Log: controller: improve failed recurring action
+ messages
+
+Recurring action status changes can be reported in any later transition, not
+just the one they were initially scheduled in. Previously, they would be logged
+as an "Old event". Now, distinguish this situation.
+---
+ crmd/te_events.c | 34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+diff --git a/crmd/te_events.c b/crmd/te_events.c
+index 1f7a34c..eb1a8ca 100644
+--- a/crmd/te_events.c
++++ b/crmd/te_events.c
+@@ -1,19 +1,10 @@
+ /*
+- * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
++ * Copyright 2004-2019 the Pacemaker project contributors
+  *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public
+- * License as published by the Free Software Foundation; either
+- * version 2 of the License, or (at your option) any later version.
++ * The version control history for this file may have further details.
+  *
+- * This software is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public
+- * License along with this library; if not, write to the Free Software
+- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ * This source code is licensed under the GNU General Public License version 2
++ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+  */
+ 
+ #include <crm_internal.h>
+@@ -495,8 +486,21 @@ process_graph_event(xmlNode *event, const char *event_node)
+         abort_transition(INFINITY, tg_restart, "Foreign event", event);
+ 
+     } else if (transition_graph->id != transition_num) {
+-        desc = "arrived really late";
+-        abort_transition(INFINITY, tg_restart, "Old event", event);
++        int interval_ms = 0;
++
++        if (parse_op_key(id, NULL, NULL, &interval_ms)
++            && (interval_ms != 0)) {
++            /* Recurring actions have the transition number they were first
++             * scheduled in.
++             */
++            desc = "arrived after initial scheduling";
++            abort_transition(INFINITY, tg_restart, "Change in recurring result",
++                             event);
++
++        } else {
++            desc = "arrived really late";
++            abort_transition(INFINITY, tg_restart, "Old event", event);
++        }
+ 
+     } else if (transition_graph->complete) {
+         desc = "arrived late";
+-- 
+1.8.3.1
+
+
+From dac669b390cfb1be265c71cb588fa0a67ecdd929 Mon Sep 17 00:00:00 2001
+From: "Gao,Yan" <ygao@suse.com>
+Date: Thu, 9 May 2019 13:24:35 +0200
+Subject: [PATCH 10/96] Fix: controller: confirm cancel of failed monitors
+
+Usually after a monitor has been cancelled from executor, contoller
+erases the corresponding lrm_rsc_op from the cib, and DC will confirm
+the cancel action by process_op_deletion() according to the cib diff.
+
+But if a monitor has failed, the lrm_rsc_op will be recorded as
+"last_failure". When cancelling it, the lrm_rsc_op won't get erased from
+the cib given the logic on purpose in erase_lrm_history_by_op(). So that
+the cancel action won't have a chance to get confirmed by DC with
+process_op_deletion().
+
+Previously cluster transition would get stuck waiting for the remaining
+action timer to time out.
+
+This commit fixes the issue by directly acknowledging the cancel action
+in this case and enabling DC to be able to confirm it.
+
+This also moves get_node_id() function into controld_utils.c for common
+use.
+
+Producer:
+```
+ # Insert a 10s sleep in the monitor action of RA
+ # /usr/lib/ocf/resource.d/pacemaker/Stateful:
+
+  stateful_monitor() {
+ +    sleep 10
+      stateful_check_state "master"
+
+ # Add a promotable clone resource:
+
+ crm configure primitive stateful ocf:pacemaker:Stateful \
+         op monitor interval=5 role=Master \
+         op monitor interval=10 role=Slave
+ crm configure clone p-clone stateful \
+         meta promotable=true
+
+ # Wait for the resource instance to be started, promoted to be master,
+ # and monitor for master role to complete.
+
+ # Set is-managed=false for the promotable clone:
+ crm_resource --meta -p is-managed -v false -r p-clone
+
+ # Change the status of the master instance to be slave and immediately
+ # enforce refresh of it:
+ echo slave > /var/run/Stateful-stateful.state; crm_resource --refresh -r stateful --force
+
+ # Wait for probe to complete, and then monitor for slave role to be
+ # issued:
+ sleep 15
+
+ # While the monitor for slave role is still in progress, change the
+ # status to be master again:
+ echo master > /var/run/Stateful-stateful.state
+
+ # The monitor for slave role returns error. Cluster issues monitor for
+ # master role instead and tries to cancel the failed one for slave role.
+ # But cluster transition gets stuck. Depending on the monitor timeout
+ # configured for the slave role plus cluster-delay, only after that
+ # controller eventually says:
+
+ pacemaker-controld[21205] error: Node opensuse150 did not send cancel result (via controller) within 20000ms (action timeout plus cluster-delay)
+ pacemaker-controld[21205] error: [Action    1]: In-flight rsc op stateful_monitor_10000            on opensuse150 (priority: 0, waiting: none)
+ pacemaker-controld[21205] notice: Transition 6 aborted: Action lost
+
+```
+---
+ crmd/crmd_utils.h   |  2 ++
+ crmd/lrm.c          | 38 ++++++++++++++++++++++++++++++++++++++
+ crmd/te_callbacks.c | 21 ++-------------------
+ crmd/te_events.c    | 32 ++++++++++++++++++++++++++++++++
+ crmd/tengine.h      |  1 +
+ crmd/utils.c        | 13 +++++++++++++
+ 6 files changed, 88 insertions(+), 19 deletions(-)
+
+diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h
+index d49642f..a754487 100644
+--- a/crmd/crmd_utils.h
++++ b/crmd/crmd_utils.h
+@@ -112,6 +112,8 @@ void crmd_peer_down(crm_node_t *peer, bool full);
+ unsigned int cib_op_timeout(void);
+ bool controld_action_is_recordable(const char *action);
+ 
++const char *get_node_id(xmlNode *lrm_rsc_op);
++
+ /* Convenience macro for registering a CIB callback
+  * (assumes that data can be freed with free())
+  */
+diff --git a/crmd/lrm.c b/crmd/lrm.c
+index 1c9a276..776c02b 100644
+--- a/crmd/lrm.c
++++ b/crmd/lrm.c
+@@ -2487,6 +2487,30 @@ unescape_newlines(const char *string)
+     return ret;
+ }
+ 
++static bool
++did_lrm_rsc_op_fail(lrm_state_t *lrm_state, const char * rsc_id,
++                    const char * op_type, guint interval_ms)
++{
++    rsc_history_t *entry = NULL;
++
++    CRM_CHECK(lrm_state != NULL, return FALSE);
++    CRM_CHECK(rsc_id != NULL, return FALSE);
++    CRM_CHECK(op_type != NULL, return FALSE);
++
++    entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
++    if (entry == NULL || entry->failed == NULL) {
++        return FALSE;
++    }
++
++    if (crm_str_eq(entry->failed->rsc_id, rsc_id, TRUE)
++        && safe_str_eq(entry->failed->op_type, op_type)
++        && entry->failed->interval == interval_ms) {
++        return TRUE;
++    }
++
++    return FALSE;
++}
++
+ void
+ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op,
+                   struct recurring_op_s *pending, xmlNode *action_xml)
+@@ -2616,6 +2640,20 @@ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op,
+             erase_lrm_history_by_op(lrm_state, op);
+         }
+ 
++        /* If the recurring operation had failed, the lrm_rsc_op is recorded as
++         * "last_failure" which won't get erased from the cib given the logic on
++         * purpose in erase_lrm_history_by_op(). So that the cancel action won't
++         * have a chance to get confirmed by DC with process_op_deletion().
++         * Cluster transition would get stuck waiting for the remaining action
++         * timer to time out.
++         *
++         * Directly acknowledge the cancel operation in this case.
++         */
++        if (did_lrm_rsc_op_fail(lrm_state, pending->rsc_id,
++                                pending->op_type, pending->interval)) {
++            need_direct_ack = TRUE;
++        }
++
+     } else if (op->rsc_deleted) {
+         /* This recurring operation was cancelled (but not by us, and the
+          * executor does not have resource information, likely due to resource
+diff --git a/crmd/te_callbacks.c b/crmd/te_callbacks.c
+index 087f3e1..9faf932 100644
+--- a/crmd/te_callbacks.c
++++ b/crmd/te_callbacks.c
+@@ -42,19 +42,6 @@ static unsigned long int stonith_max_attempts = 10;
+ /* #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_CIB_TAG_STATE"[@uname='%s']"//"XML_LRM_TAG_RSC_OP"[@id='%s]" */
+ #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_LRM_TAG_RSC_OP"[@id='%s']"
+ 
+-static const char *
+-get_node_id(xmlNode * rsc_op)
+-{
+-    xmlNode *node = rsc_op;
+-
+-    while (node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) {
+-        node = node->parent;
+-    }
+-
+-    CRM_CHECK(node != NULL, return NULL);
+-    return ID(node);
+-}
+-
+ void
+ update_stonith_max_attempts(const char* value)
+ {
+@@ -384,12 +371,8 @@ process_op_deletion(const char *xpath, xmlNode *change)
+     node_uuid = extract_node_uuid(xpath);
+     cancel = get_cancel_action(key, node_uuid);
+     if (cancel) {
+-        crm_info("Cancellation of %s on %s confirmed (%d)",
+-                 key, node_uuid, cancel->id);
+-        stop_te_timer(cancel->timer);
+-        te_action_confirmed(cancel);
+-        update_graph(transition_graph, cancel);
+-        trigger_graph();
++        confirm_cancel_action(cancel);
++
+     } else {
+         abort_transition(INFINITY, tg_restart, "Resource operation removal",
+                          change);
+diff --git a/crmd/te_events.c b/crmd/te_events.c
+index eb1a8ca..b398739 100644
+--- a/crmd/te_events.c
++++ b/crmd/te_events.c
+@@ -373,6 +373,27 @@ get_cancel_action(const char *id, const char *node)
+     return NULL;
+ }
+ 
++void
++confirm_cancel_action(crm_action_t *cancel)
++{
++    const char *op_key = NULL;
++    const char *node_name = NULL;
++
++    CRM_ASSERT(cancel != NULL);
++
++    op_key = crm_element_value(cancel->xml, XML_LRM_ATTR_TASK_KEY);
++    node_name = crm_element_value(cancel->xml, XML_LRM_ATTR_TARGET);
++
++    stop_te_timer(cancel->timer);
++    te_action_confirmed(cancel);
++    update_graph(transition_graph, cancel);
++
++    crm_info("Cancellation of %s on %s confirmed (action %d)",
++             op_key, node_name, cancel->id);
++
++    trigger_graph();
++}
++
+ /* downed nodes are listed like: <downed> <node id="UUID1" /> ... </downed> */
+ #define XPATH_DOWNED "//" XML_GRAPH_TAG_DOWNED \
+                      "/" XML_CIB_TAG_NODE "[@" XML_ATTR_UUID "='%s']"
+@@ -493,6 +514,17 @@ process_graph_event(xmlNode *event, const char *event_node)
+             /* Recurring actions have the transition number they were first
+              * scheduled in.
+              */
++
++            if (status == PCMK_LRM_OP_CANCELLED) {
++                const char *node_id = get_node_id(event);
++
++                action = get_cancel_action(id, node_id);
++                if (action) {
++                    confirm_cancel_action(action);
++                }
++                goto bail;
++            }
++
+             desc = "arrived after initial scheduling";
+             abort_transition(INFINITY, tg_restart, "Change in recurring result",
+                              event);
+diff --git a/crmd/tengine.h b/crmd/tengine.h
+index b5141a0..1a9b2d2 100644
+--- a/crmd/tengine.h
++++ b/crmd/tengine.h
+@@ -35,6 +35,7 @@ void execute_stonith_cleanup(void);
+ /* tengine */
+ extern crm_action_t *match_down_event(const char *target, bool quiet);
+ extern crm_action_t *get_cancel_action(const char *id, const char *node);
++void confirm_cancel_action(crm_action_t *cancel);
+ 
+ void controld_record_action_timeout(crm_action_t *action);
+ extern gboolean fail_incompletable_actions(crm_graph_t * graph, const char *down_node);
+diff --git a/crmd/utils.c b/crmd/utils.c
+index 08abc6e..761f5a7 100644
+--- a/crmd/utils.c
++++ b/crmd/utils.c
+@@ -1054,3 +1054,16 @@ cib_op_timeout()
+     }
+     return calculated_timeout;
+ }
++
++const char *
++get_node_id(xmlNode *lrm_rsc_op)
++{
++    xmlNode *node = lrm_rsc_op;
++
++    while (node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) {
++        node = node->parent;
++    }
++
++    CRM_CHECK(node != NULL, return NULL);
++    return ID(node);
++}
+-- 
+1.8.3.1
+
+
+From a81ca9625e8d1ccd7f79fbe464b9f4221e8671f2 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 9 May 2019 20:26:08 -0500
+Subject: [PATCH 11/96] Refactor: libpe_status: functionize unfencing digest
+ code more
+
+... for readability, reusability, and avoiding unnecessary function calls or
+memory allocation.
+---
+ lib/pengine/utils.c | 159 ++++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 118 insertions(+), 41 deletions(-)
+
+diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
+index d09b0d8..b6a31d1 100644
+--- a/lib/pengine/utils.c
++++ b/lib/pengine/utils.c
+@@ -2091,57 +2091,134 @@ rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node,
+     return data;
+ }
+ 
++/*!
++ * \internal
++ * \brief Create an unfencing summary for use in special node attribute
++ *
++ * Create a string combining a fence device's resource ID, agent type, and
++ * parameter digest (whether for all parameters or just non-private parameters).
++ * This can be stored in a special node attribute, allowing us to detect changes
++ * in either the agent type or parameters, to know whether unfencing must be
++ * redone or can be safely skipped when the device's history is cleaned.
++ *
++ * \param[in] rsc_id        Fence device resource ID
++ * \param[in] agent_type    Fence device agent
++ * \param[in] param_digest  Fence device parameter digest
++ *
++ * \return Newly allocated string with unfencing digest
++ * \note The caller is responsible for freeing the result.
++ */
++static inline char *
++create_unfencing_summary(const char *rsc_id, const char *agent_type,
++                         const char *param_digest)
++{
++    return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest);
++}
++
++/*!
++ * \internal
++ * \brief Check whether a node can skip unfencing
++ *
++ * Check whether a fence device's current definition matches a node's
++ * stored summary of when it was last unfenced by the device.
++ *
++ * \param[in] rsc_id        Fence device's resource ID
++ * \param[in] agent         Fence device's agent type
++ * \param[in] digest_calc   Fence device's current parameter digest
++ * \param[in] node_summary  Value of node's special unfencing node attribute
++ *                          (a comma-separated list of unfencing summaries for
++ *                          all devices that have unfenced this node)
++ *
++ * \return TRUE if digest matches, FALSE otherwise
++ */
++static bool
++unfencing_digest_matches(const char *rsc_id, const char *agent,
++                         const char *digest_calc, const char *node_summary)
++{
++    bool matches = FALSE;
++
++    if (rsc_id && agent && digest_calc && node_summary) {
++        char *search_secure = create_unfencing_summary(rsc_id, agent,
++                                                       digest_calc);
++
++        /* The digest was calculated including the device ID and agent,
++         * so there is no risk of collision using strstr().
++         */
++        matches = (strstr(node_summary, search_secure) != NULL);
++        crm_trace("Calculated unfencing digest '%s' %sfound in '%s'",
++                  search_secure, matches? "" : "not ", node_summary);
++        free(search_secure);
++    }
++    return matches;
++}
++
++/* Magic string to use as action name for digest cache entries used for
++ * unfencing checks. This is not a real action name (i.e. "on"), so
++ * check_action_definition() won't confuse these entries with real actions.
++ */
+ #define STONITH_DIGEST_TASK "stonith-on"
+ 
++/*!
++ * \internal
++ * \brief Calculate fence device digests and digest comparison result
++ *
++ * \param[in] rsc       Fence device resource
++ * \param[in] agent     Fence device's agent type
++ * \param[in] node      Node with digest cache to use
++ * \param[in] data_set  Cluster working set
++ *
++ * \return Node's digest cache entry
++ */
+ static op_digest_cache_t *
+-fencing_action_digest_cmp(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
++fencing_action_digest_cmp(pe_resource_t *rsc, const char *agent,
++                          pe_node_t *node, pe_working_set_t *data_set)
+ {
+-    char *key = generate_op_key(rsc->id, STONITH_DIGEST_TASK, 0);
+-    op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, node, NULL, data_set);
++    const char *node_summary = NULL;
+ 
+-    const char *digest_all = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
+-    const char *digest_secure = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
++    // Calculate device's current parameter digests
++    char *key = generate_op_key(rsc->id, STONITH_DIGEST_TASK, 0);
++    op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key,
++                                                node, NULL, data_set);
+ 
+-    /* No 'reloads' for fencing device changes
+-     *
+-     * We use the resource id + agent + digest so that we can detect
+-     * changes to the agent and/or the parameters used
+-     */
+-    char *search_all = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_all_calc);
+-    char *search_secure = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc);
++    free(key);
+ 
+-    data->rc = RSC_DIGEST_ALL;
+-    if (digest_all == NULL) {
+-        /* it is unknown what the previous op digest was */
++    // Check whether node has special unfencing summary node attribute
++    node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
++    if (node_summary == NULL) {
+         data->rc = RSC_DIGEST_UNKNOWN;
++        return data;
++    }
+ 
+-    } else if (strstr(digest_all, search_all)) {
++    // Check whether full parameter digest matches
++    if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
++                                 node_summary)) {
+         data->rc = RSC_DIGEST_MATCH;
++        return data;
++    }
+ 
+-    } else if(digest_secure && data->digest_secure_calc) {
+-        if(strstr(digest_secure, search_secure)) {
+-            if (is_set(data_set->flags, pe_flag_stdout)) {
+-                printf("Only 'private' parameters to %s for unfencing %s changed\n",
+-                       rsc->id, node->details->uname);
+-            }
+-            data->rc = RSC_DIGEST_MATCH;
++    // Check whether secure parameter digest matches
++    node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
++    if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
++                                 node_summary)) {
++        data->rc = RSC_DIGEST_MATCH;
++        if (is_set(data_set->flags, pe_flag_stdout)) {
++            printf("Only 'private' parameters to %s for unfencing %s changed\n",
++                   rsc->id, node->details->uname);
+         }
++        return data;
+     }
+ 
+-    if (is_set(data_set->flags, pe_flag_sanitized)
+-        && is_set(data_set->flags, pe_flag_stdout)
+-        && (data->rc == RSC_DIGEST_ALL)
++    // Parameters don't match
++    data->rc = RSC_DIGEST_ALL;
++    if (is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout))
+         && data->digest_secure_calc) {
+-        printf("Parameters to %s for unfencing %s changed, try '%s:%s:%s'\n",
+-               rsc->id, node->details->uname, rsc->id,
+-               (const char *) g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE),
+-               data->digest_secure_calc);
+-    }
+-
+-    free(key);
+-    free(search_all);
+-    free(search_secure);
++        char *digest = create_unfencing_summary(rsc->id, agent,
++                                                data->digest_secure_calc);
+ 
++        printf("Parameters to %s for unfencing %s changed, try '%s'\n",
++               rsc->id, node->details->uname, digest);
++        free(digest);
++    }
+     return data;
+ }
+ 
+@@ -2228,9 +2305,6 @@ pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe
+              *
+              * We may do this for all nodes in the future, but for now
+              * the check_action_definition() based stuff works fine.
+-             *
+-             * Use "stonith-on" to avoid creating cache entries for
+-             * operations check_action_definition() would look for.
+              */
+             long max = 1024;
+             long digests_all_offset = 0;
+@@ -2242,8 +2316,11 @@ pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe
+ 
+             for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) {
+                 resource_t *match = gIter->data;
+-                op_digest_cache_t *data = fencing_action_digest_cmp(match, node, data_set);
++                const char *agent = g_hash_table_lookup(match->meta,
++                                                        XML_ATTR_TYPE);
++                op_digest_cache_t *data = NULL;
+ 
++                data = fencing_action_digest_cmp(match, agent, node, data_set);
+                 if(data->rc == RSC_DIGEST_ALL) {
+                     optional = FALSE;
+                     crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id);
+@@ -2254,11 +2331,11 @@ pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe
+ 
+                 digests_all_offset += snprintf(
+                     digests_all+digests_all_offset, max-digests_all_offset,
+-                    "%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_all_calc);
++                    "%s:%s:%s,", match->id, agent, data->digest_all_calc);
+ 
+                 digests_secure_offset += snprintf(
+                     digests_secure+digests_secure_offset, max-digests_secure_offset,
+-                    "%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_secure_calc);
++                    "%s:%s:%s,", match->id, agent, data->digest_secure_calc);
+             }
+             g_hash_table_insert(stonith_op->meta,
+                                 strdup(XML_OP_ATTR_DIGESTS_ALL),
+-- 
+1.8.3.1
+
+
+From be34a73f9cfb6abdb3e2799593cb0358c01c2521 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 10 May 2019 11:57:31 -0500
+Subject: [PATCH 12/96] Fix: libpe_status: calculate secure digests for
+ unfencing ops
+
+The calculation of digests for detection of when unfencing is needed reused
+rsc_action_digest(). However that would only add secure digests when the
+pe_flag_sanitized flag was set, which is only set by crm_simulate, so secure
+digests would never be added in normal cluster operation. This led to
+node attributes like name="#digests-secure"
+value="stonith-fence_compute-fence-nova:fence_compute:(null),".
+
+Now, rsc_action_digest() takes a new argument to select whether secure digests
+are added, which is always set to TRUE when calculating unfencing digests.
+---
+ lib/pengine/utils.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
+index b6a31d1..f52f1c7 100644
+--- a/lib/pengine/utils.c
++++ b/lib/pengine/utils.c
+@@ -1948,9 +1948,24 @@ append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNo
+ }
+ #endif
+ 
++/*!
++ * \internal
++ * \brief Calculate action digests and store in node's digest cache
++ *
++ * \param[in] rsc          Resource that action was for
++ * \param[in] task         Name of action performed
++ * \param[in] key          Action's task key
++ * \param[in] node         Node action was performed on
++ * \param[in] xml_op       XML of operation in CIB status (if available)
++ * \param[in] calc_secure  Whether to calculate secure digest
++ * \param[in] data_set     Cluster working set
++ *
++ * \return Pointer to node's digest cache entry
++ */
+ static op_digest_cache_t *
+-rsc_action_digest(resource_t * rsc, const char *task, const char *key,
+-                  node_t * node, xmlNode * xml_op, pe_working_set_t * data_set) 
++rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key,
++                  pe_node_t *node, xmlNode *xml_op, bool calc_secure,
++                  pe_working_set_t *data_set)
+ {
+     op_digest_cache_t *data = NULL;
+ 
+@@ -2018,7 +2033,7 @@ rsc_action_digest(resource_t * rsc, const char *task, const char *key,
+ 
+         data->digest_all_calc = calculate_operation_digest(data->params_all, op_version);
+ 
+-        if (is_set(data_set->flags, pe_flag_sanitized)) {
++        if (calc_secure) {
+             data->params_secure = copy_xml(data->params_all);
+             if(secure_list) {
+                 filter_parameters(data->params_secure, secure_list, FALSE);
+@@ -2064,7 +2079,9 @@ rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node,
+ 
+     interval = crm_parse_int(interval_s, "0");
+     key = generate_op_key(rsc->id, task, interval);
+-    data = rsc_action_digest(rsc, task, key, node, xml_op, data_set);
++    data = rsc_action_digest(rsc, task, key, node, xml_op,
++                             is_set(data_set->flags, pe_flag_sanitized),
++                             data_set);
+ 
+     data->rc = RSC_DIGEST_MATCH;
+     if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
+@@ -2178,7 +2195,7 @@ fencing_action_digest_cmp(pe_resource_t *rsc, const char *agent,
+     // Calculate device's current parameter digests
+     char *key = generate_op_key(rsc->id, STONITH_DIGEST_TASK, 0);
+     op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key,
+-                                                node, NULL, data_set);
++                                                node, NULL, TRUE, data_set);
+ 
+     free(key);
+ 
+-- 
+1.8.3.1
+
+
+From 8819c2f96f74ab4b4979df5ed04c16dd6bdad5f1 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Sat, 8 Jun 2019 16:25:04 -0500
+Subject: [PATCH 13/96] Refactor: libpe_status: add function for checking
+ shutdown attribute
+
+... to reduce code duplication and allow further reuse
+---
+ include/crm/pengine/internal.h |  2 ++
+ lib/pengine/unpack.c           |  8 ++------
+ lib/pengine/utils.c            | 20 ++++++++++++++++++++
+ 3 files changed, 24 insertions(+), 6 deletions(-)
+
+diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
+index c40b075..c3f9f70 100644
+--- a/include/crm/pengine/internal.h
++++ b/include/crm/pengine/internal.h
+@@ -362,4 +362,6 @@ void pe__foreach_param_check(pe_working_set_t *data_set,
+                                         enum pe_check_parameters,
+                                         pe_working_set_t*));
+ void pe__free_param_checks(pe_working_set_t *data_set);
++
++bool pe__shutdown_requested(pe_node_t *node);
+ #endif
+diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
+index 619ccbf..cf725a1 100644
+--- a/lib/pengine/unpack.c
++++ b/lib/pengine/unpack.c
+@@ -1013,7 +1013,6 @@ unpack_handle_remote_attrs(node_t *this_node, xmlNode *state, pe_working_set_t *
+     const char *resource_discovery_enabled = NULL;
+     xmlNode *attrs = NULL;
+     resource_t *rsc = NULL;
+-    const char *shutdown = NULL;
+ 
+     if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
+         return;
+@@ -1035,8 +1034,7 @@ unpack_handle_remote_attrs(node_t *this_node, xmlNode *state, pe_working_set_t *
+     attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
+     add_node_attrs(attrs, this_node, TRUE, data_set);
+ 
+-    shutdown = pe_node_attribute_raw(this_node, XML_CIB_ATTR_SHUTDOWN);
+-    if (shutdown != NULL && safe_str_neq("0", shutdown)) {
++    if (pe__shutdown_requested(this_node)) {
+         crm_info("Node %s is shutting down", this_node->details->uname);
+         this_node->details->shutdown = TRUE;
+         if (rsc) {
+@@ -1512,7 +1510,6 @@ gboolean
+ determine_online_status(xmlNode * node_state, node_t * this_node, pe_working_set_t * data_set)
+ {
+     gboolean online = FALSE;
+-    const char *shutdown = NULL;
+     const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+ 
+     if (this_node == NULL) {
+@@ -1522,9 +1519,8 @@ determine_online_status(xmlNode * node_state, node_t * this_node, pe_working_set
+ 
+     this_node->details->shutdown = FALSE;
+     this_node->details->expected_up = FALSE;
+-    shutdown = pe_node_attribute_raw(this_node, XML_CIB_ATTR_SHUTDOWN);
+ 
+-    if (shutdown != NULL && safe_str_neq("0", shutdown)) {
++    if (pe__shutdown_requested(this_node)) {
+         this_node->details->shutdown = TRUE;
+ 
+     } else if (safe_str_eq(exp_state, CRMD_JOINSTATE_MEMBER)) {
+diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
+index f52f1c7..8eac2ce 100644
+--- a/lib/pengine/utils.c
++++ b/lib/pengine/utils.c
+@@ -2522,3 +2522,23 @@ void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrit
+         }
+     }
+ }
++
++/*!
++ * \internal
++ * \brief Check whether shutdown has been requested for a node
++ *
++ * \param[in] node  Node to check
++ *
++ * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
++ * \note This differs from simply using node->details->shutdown in that it can
++ *       be used before that has been determined (and in fact to determine it),
++ *       and it can also be used to distinguish requested shutdown from implicit
++ *       shutdown of remote nodes by virtue of their connection stopping.
++ */
++bool
++pe__shutdown_requested(pe_node_t *node)
++{
++    const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
++
++    return shutdown && strcmp(shutdown, "0");
++}
+-- 
+1.8.3.1
+
+
+From 938e99f29ed5faaeb4015247e363ddc7e77208a3 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 5 Jun 2019 16:37:26 -0500
+Subject: [PATCH 14/96] Fix: scheduler: remote state is failed if node is
+ shutting down with connection failure
+
+When determining remote state, if the connection resource is failed and not
+being started again, we consider the state to be unknown if the connection has
+a reconnect interval, because we won't know whether the connection can be
+recovered until the interval expires and we re-attempt connection.
+
+However, if the node is shutting down at the time, we won't re-attempt
+connection, so consider the state failed in that case. (Note that we check the
+actual shutdown node attribute, rather than node->details->shutdown, since that
+is set for remote nodes whenever the connection is stopping.)
+
+This avoids a situation where actions that cannot succeed can be scheduled on a
+remote node that's shutting down.
+---
+ pengine/allocate.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/pengine/allocate.c b/pengine/allocate.c
+index 578db2f..c9877a4 100644
+--- a/pengine/allocate.c
++++ b/pengine/allocate.c
+@@ -1998,7 +1998,8 @@ get_remote_node_state(pe_node_t *node)
+ 
+         if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
+             && remote_rsc->remote_reconnect_interval
+-            && node->details->remote_was_fenced) {
++            && node->details->remote_was_fenced
++            && !pe__shutdown_requested(node)) {
+ 
+             /* We won't know whether the connection is recoverable until the
+              * reconnect interval expires and we reattempt connection.
+-- 
+1.8.3.1
+
+
+From c20f8920634f47bbdf699d80dafd50c6a72eac8b Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 5 Jun 2019 16:43:19 -0500
+Subject: [PATCH 15/96] Fix: libpe_status: don't order implied stops relative
+ to a remote connection
+
+Actions behind a remote connection are ordered relative to any start or stop of
+the remote connection. However, if the action is a stop implied due to fencing,
+it does not require the remote connection, and the ordering should not be done.
+
+This avoids a delay in the remote connection recovery if it is failed, e.g.
+previously the ordering would look like:
+
+   fence remote node -> implied stop of resource on remote -> stop connection
+
+Now, the connection stop can proceed simultaneously with the remote node
+fencing.
+---
+ pengine/allocate.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/pengine/allocate.c b/pengine/allocate.c
+index c9877a4..c7c68f8 100644
+--- a/pengine/allocate.c
++++ b/pengine/allocate.c
+@@ -2091,14 +2091,13 @@ apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
+                                        pe_order_implies_first, data_set);
+ 
+             } else if(state == remote_state_failed) {
+-                /* We would only be here if the resource is
+-                 * running on the remote node.  Since we have no
+-                 * way to stop it, it is necessary to fence the
+-                 * node.
++                /* The resource is active on the node, but since we don't have a
++                 * valid connection, the only way to stop the resource is by
++                 * fencing the node. There is no need to order the stop relative
++                 * to the remote connection, since the stop will become implied
++                 * by the fencing.
+                  */
+                 pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable");
+-                order_action_then_stop(action, remote_rsc,
+-                                       pe_order_implies_first, data_set);
+ 
+             } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) {
+                 /* State must be remote_state_unknown or remote_state_stopped.
+-- 
+1.8.3.1
+
+
+From 26a28ee80b7fc110125eedac377dfa4c0a8e8294 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 14 Jun 2019 14:08:47 -0500
+Subject: [PATCH 16/96] Test: pengine: update regression tests for remote
+ connection ordering change
+
+---
+ pengine/test10/remote-connection-unrecoverable.dot     | 2 --
+ pengine/test10/remote-connection-unrecoverable.exp     | 6 ------
+ pengine/test10/remote-connection-unrecoverable.summary | 2 +-
+ pengine/test10/remote-fence-before-reconnect.dot       | 1 -
+ pengine/test10/remote-fence-before-reconnect.exp       | 6 +-----
+ pengine/test10/remote-fence-before-reconnect.summary   | 2 +-
+ pengine/test10/remote-recover-all.dot                  | 2 --
+ pengine/test10/remote-recover-all.exp                  | 6 ------
+ pengine/test10/remote-recover-all.summary              | 4 ++--
+ pengine/test10/remote-recover-no-resources.dot         | 1 -
+ pengine/test10/remote-recover-no-resources.exp         | 3 ---
+ pengine/test10/remote-recover-no-resources.summary     | 2 +-
+ pengine/test10/remote-recover-unknown.dot              | 1 -
+ pengine/test10/remote-recover-unknown.exp              | 3 ---
+ pengine/test10/remote-recover-unknown.summary          | 2 +-
+ 15 files changed, 7 insertions(+), 36 deletions(-)
+
+diff --git a/pengine/test10/remote-connection-unrecoverable.dot b/pengine/test10/remote-connection-unrecoverable.dot
+index 0360cd0..b5caca6 100644
+--- a/pengine/test10/remote-connection-unrecoverable.dot
++++ b/pengine/test10/remote-connection-unrecoverable.dot
+@@ -7,14 +7,12 @@ digraph "g" {
+ "remote1_stop_0 node1" [ style=bold color="green" fontcolor="orange"]
+ "rsc1_delete_0 remote1" -> "rsc1_start_0 node2" [ style = dashed]
+ "rsc1_delete_0 remote1" [ style=dashed color="red" fontcolor="black"]
+-"rsc1_monitor_0 node2" -> "remote1_stop_0 node1" [ style = bold]
+ "rsc1_monitor_0 node2" -> "rsc1_start_0 node2" [ style = bold]
+ "rsc1_monitor_0 node2" -> "rsc2-master_demote_0" [ style = bold]
+ "rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+ "rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+ "rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold]
+ "rsc1_start_0 node2" [ style=bold color="green" fontcolor="black"]
+-"rsc1_stop_0 remote1" -> "remote1_stop_0 node1" [ style = bold]
+ "rsc1_stop_0 remote1" -> "rsc1_delete_0 remote1" [ style = dashed]
+ "rsc1_stop_0 remote1" -> "rsc1_start_0 node2" [ style = bold]
+ "rsc1_stop_0 remote1" -> "rsc2-master_demote_0" [ style = bold]
+diff --git a/pengine/test10/remote-connection-unrecoverable.exp b/pengine/test10/remote-connection-unrecoverable.exp
+index 73fa7a1..339ad56 100644
+--- a/pengine/test10/remote-connection-unrecoverable.exp
++++ b/pengine/test10/remote-connection-unrecoverable.exp
+@@ -9,12 +9,6 @@
+       <trigger>
+         <crm_event id="1" operation="stonith" operation_key="stonith-node1-reboot" on_node="node1" on_node_uuid="1"/>
+       </trigger>
+-      <trigger>
+-        <pseudo_event id="6" operation="stop" operation_key="rsc1_stop_0"/>
+-      </trigger>
+-      <trigger>
+-        <rsc_op id="8" operation="monitor" operation_key="rsc1_monitor_0" on_node="node2" on_node_uuid="2"/>
+-      </trigger>
+     </inputs>
+   </synapse>
+   <synapse id="1">
+diff --git a/pengine/test10/remote-connection-unrecoverable.summary b/pengine/test10/remote-connection-unrecoverable.summary
+index efeb765..18f7dc7 100644
+--- a/pengine/test10/remote-connection-unrecoverable.summary
++++ b/pengine/test10/remote-connection-unrecoverable.summary
+@@ -24,12 +24,12 @@ Executing cluster transition:
+  * Resource action: killer          stop on node2
+  * Resource action: rsc1            monitor on node2
+  * Fencing node1 (reboot)
++ * Pseudo action:   remote1_stop_0
+  * Fencing remote1 (reboot)
+  * Resource action: killer          start on node2
+  * Resource action: killer          monitor=60000 on node2
+  * Pseudo action:   rsc1_stop_0
+  * Pseudo action:   rsc2-master_demote_0
+- * Pseudo action:   remote1_stop_0
+  * Resource action: rsc1            start on node2
+  * Pseudo action:   rsc2_demote_0
+  * Pseudo action:   rsc2-master_demoted_0
+diff --git a/pengine/test10/remote-fence-before-reconnect.dot b/pengine/test10/remote-fence-before-reconnect.dot
+index 4ced43e..5812b7f 100644
+--- a/pengine/test10/remote-fence-before-reconnect.dot
++++ b/pengine/test10/remote-fence-before-reconnect.dot
+@@ -3,7 +3,6 @@
+ "fake2_monitor_10000 c7auto1" [ style=bold color="green" fontcolor="black"]
+ "fake2_start_0 c7auto1" -> "fake2_monitor_10000 c7auto1" [ style = bold]
+ "fake2_start_0 c7auto1" [ style=bold color="green" fontcolor="black"]
+-"fake2_stop_0 c7auto4" -> "c7auto4_stop_0 c7auto1" [ style = bold]
+ "fake2_stop_0 c7auto4" -> "fake2_start_0 c7auto1" [ style = bold]
+ "fake2_stop_0 c7auto4" [ style=bold color="green" fontcolor="orange"]
+ "stonith 'reboot' c7auto4" -> "fake2_start_0 c7auto1" [ style = bold]
+diff --git a/pengine/test10/remote-fence-before-reconnect.exp b/pengine/test10/remote-fence-before-reconnect.exp
+index f99d9ef..f506f85 100644
+--- a/pengine/test10/remote-fence-before-reconnect.exp
++++ b/pengine/test10/remote-fence-before-reconnect.exp
+@@ -9,11 +9,7 @@
+         </downed>
+       </rsc_op>
+     </action_set>
+-    <inputs>
+-      <trigger>
+-        <pseudo_event id="13" operation="stop" operation_key="fake2_stop_0"/>
+-      </trigger>
+-    </inputs>
++    <inputs/>
+   </synapse>
+   <synapse id="1">
+     <action_set>
+diff --git a/pengine/test10/remote-fence-before-reconnect.summary b/pengine/test10/remote-fence-before-reconnect.summary
+index f61e18b..03eac20 100644
+--- a/pengine/test10/remote-fence-before-reconnect.summary
++++ b/pengine/test10/remote-fence-before-reconnect.summary
+@@ -17,9 +17,9 @@ Transition Summary:
+  * Move       fake2       ( c7auto4 -> c7auto1 )  
+ 
+ Executing cluster transition:
++ * Resource action: c7auto4         stop on c7auto1
+  * Fencing c7auto4 (reboot)
+  * Pseudo action:   fake2_stop_0
+- * Resource action: c7auto4         stop on c7auto1
+  * Resource action: fake2           start on c7auto1
+  * Resource action: fake2           monitor=10000 on c7auto1
+ 
+diff --git a/pengine/test10/remote-recover-all.dot b/pengine/test10/remote-recover-all.dot
+index 1f967c5..b48b04e 100644
+--- a/pengine/test10/remote-recover-all.dot
++++ b/pengine/test10/remote-recover-all.dot
+@@ -19,7 +19,6 @@ digraph "g" {
+ "galera_demote_0 galera-2" -> "galera_stop_0 galera-2" [ style = bold]
+ "galera_demote_0 galera-2" [ style=bold color="green" fontcolor="orange"]
+ "galera_monitor_10000 galera-0" [ style=bold color="green" fontcolor="black"]
+-"galera_stop_0 galera-2" -> "galera-2_stop_0 controller-1" [ style = bold]
+ "galera_stop_0 galera-2" -> "galera-master_stopped_0" [ style = bold]
+ "galera_stop_0 galera-2" [ style=bold color="green" fontcolor="orange"]
+ "haproxy-clone_stop_0" -> "haproxy-clone_stopped_0" [ style = bold]
+@@ -60,7 +59,6 @@ digraph "g" {
+ "rabbitmq_post_notify_stonith_0" -> "rabbitmq_post_notify_stonith_0 messaging-0" [ style = bold]
+ "rabbitmq_post_notify_stonith_0" -> "rabbitmq_post_notify_stonith_0 messaging-2" [ style = bold]
+ "rabbitmq_post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"]
+-"rabbitmq_stop_0 messaging-1" -> "messaging-1_stop_0 controller-1" [ style = bold]
+ "rabbitmq_stop_0 messaging-1" -> "rabbitmq-clone_stopped_0" [ style = bold]
+ "rabbitmq_stop_0 messaging-1" [ style=bold color="green" fontcolor="orange"]
+ "redis-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
+diff --git a/pengine/test10/remote-recover-all.exp b/pengine/test10/remote-recover-all.exp
+index 900781c..e61ad6a 100644
+--- a/pengine/test10/remote-recover-all.exp
++++ b/pengine/test10/remote-recover-all.exp
+@@ -9,9 +9,6 @@
+       <trigger>
+         <crm_event id="1" operation="stonith" operation_key="stonith-controller-1-reboot" on_node="controller-1" on_node_uuid="2"/>
+       </trigger>
+-      <trigger>
+-        <pseudo_event id="39" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:2_stop_0"/>
+-      </trigger>
+     </inputs>
+   </synapse>
+   <synapse id="1">
+@@ -64,9 +61,6 @@
+       <trigger>
+         <crm_event id="1" operation="stonith" operation_key="stonith-controller-1-reboot" on_node="controller-1" on_node_uuid="2"/>
+       </trigger>
+-      <trigger>
+-        <pseudo_event id="49" operation="stop" operation_key="galera_stop_0" internal_operation_key="galera:1_stop_0"/>
+-      </trigger>
+     </inputs>
+   </synapse>
+   <synapse id="5" priority="1000000">
+diff --git a/pengine/test10/remote-recover-all.summary b/pengine/test10/remote-recover-all.summary
+index 865f39a..cfeac3a 100644
+--- a/pengine/test10/remote-recover-all.summary
++++ b/pengine/test10/remote-recover-all.summary
+@@ -63,6 +63,8 @@ Executing cluster transition:
+  * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
+  * Pseudo action:   stonith-fence_ipmilan-5254005bdbb5_stop_0
+  * Fencing controller-1 (reboot)
++ * Pseudo action:   messaging-1_stop_0
++ * Pseudo action:   galera-2_stop_0
+  * Pseudo action:   redis_post_notify_stop_0
+  * Resource action: redis           notify on controller-0
+  * Resource action: redis           notify on controller-2
+@@ -94,7 +96,6 @@ Executing cluster transition:
+  * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
+  * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
+  * Resource action: galera-0        monitor=20000 on controller-2
+- * Pseudo action:   galera-2_stop_0
+  * Resource action: rabbitmq        notify on messaging-2
+  * Resource action: rabbitmq        notify on messaging-0
+  * Pseudo action:   rabbitmq_notified_0
+@@ -107,7 +108,6 @@ Executing cluster transition:
+  * Resource action: ip-172.17.1.17  start on controller-2
+  * Resource action: ip-172.17.4.11  start on controller-2
+  * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
+- * Pseudo action:   messaging-1_stop_0
+  * Pseudo action:   redis_notified_0
+  * Resource action: ip-172.17.1.14  monitor=10000 on controller-2
+  * Resource action: ip-172.17.1.17  monitor=10000 on controller-2
+diff --git a/pengine/test10/remote-recover-no-resources.dot b/pengine/test10/remote-recover-no-resources.dot
+index a46c305..a0b1ecc 100644
+--- a/pengine/test10/remote-recover-no-resources.dot
++++ b/pengine/test10/remote-recover-no-resources.dot
+@@ -45,7 +45,6 @@ digraph "g" {
+ "rabbitmq_post_notify_stonith_0" -> "rabbitmq_post_notify_stonith_0 messaging-0" [ style = bold]
+ "rabbitmq_post_notify_stonith_0" -> "rabbitmq_post_notify_stonith_0 messaging-2" [ style = bold]
+ "rabbitmq_post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"]
+-"rabbitmq_stop_0 messaging-1" -> "messaging-1_stop_0 controller-1" [ style = bold]
+ "rabbitmq_stop_0 messaging-1" -> "rabbitmq-clone_stopped_0" [ style = bold]
+ "rabbitmq_stop_0 messaging-1" [ style=bold color="green" fontcolor="orange"]
+ "redis-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
+diff --git a/pengine/test10/remote-recover-no-resources.exp b/pengine/test10/remote-recover-no-resources.exp
+index 4d82aa4..27f18b5 100644
+--- a/pengine/test10/remote-recover-no-resources.exp
++++ b/pengine/test10/remote-recover-no-resources.exp
+@@ -9,9 +9,6 @@
+       <trigger>
+         <crm_event id="1" operation="stonith" operation_key="stonith-controller-1-reboot" on_node="controller-1" on_node_uuid="2"/>
+       </trigger>
+-      <trigger>
+-        <pseudo_event id="38" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:2_stop_0"/>
+-      </trigger>
+     </inputs>
+   </synapse>
+   <synapse id="1">
+diff --git a/pengine/test10/remote-recover-no-resources.summary b/pengine/test10/remote-recover-no-resources.summary
+index 9527161..c01eb87 100644
+--- a/pengine/test10/remote-recover-no-resources.summary
++++ b/pengine/test10/remote-recover-no-resources.summary
+@@ -60,6 +60,7 @@ Executing cluster transition:
+  * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
+  * Pseudo action:   stonith-fence_ipmilan-5254005bdbb5_stop_0
+  * Fencing controller-1 (reboot)
++ * Pseudo action:   messaging-1_stop_0
+  * Pseudo action:   galera-2_stop_0
+  * Pseudo action:   redis_post_notify_stop_0
+  * Resource action: redis           notify on controller-0
+@@ -92,7 +93,6 @@ Executing cluster transition:
+  * Pseudo action:   ip-172.17.1.17_stop_0
+  * Pseudo action:   ip-172.17.4.11_stop_0
+  * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
+- * Pseudo action:   messaging-1_stop_0
+  * Resource action: redis           notify on controller-0
+  * Resource action: redis           notify on controller-2
+  * Pseudo action:   redis-master_confirmed-post_notify_stopped_0
+diff --git a/pengine/test10/remote-recover-unknown.dot b/pengine/test10/remote-recover-unknown.dot
+index a883eb4..1d13e50 100644
+--- a/pengine/test10/remote-recover-unknown.dot
++++ b/pengine/test10/remote-recover-unknown.dot
+@@ -46,7 +46,6 @@ digraph "g" {
+ "rabbitmq_post_notify_stonith_0" -> "rabbitmq_post_notify_stonith_0 messaging-0" [ style = bold]
+ "rabbitmq_post_notify_stonith_0" -> "rabbitmq_post_notify_stonith_0 messaging-2" [ style = bold]
+ "rabbitmq_post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"]
+-"rabbitmq_stop_0 messaging-1" -> "messaging-1_stop_0 controller-1" [ style = bold]
+ "rabbitmq_stop_0 messaging-1" -> "rabbitmq-clone_stopped_0" [ style = bold]
+ "rabbitmq_stop_0 messaging-1" [ style=bold color="green" fontcolor="orange"]
+ "redis-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
+diff --git a/pengine/test10/remote-recover-unknown.exp b/pengine/test10/remote-recover-unknown.exp
+index 65677b4..13bd295 100644
+--- a/pengine/test10/remote-recover-unknown.exp
++++ b/pengine/test10/remote-recover-unknown.exp
+@@ -9,9 +9,6 @@
+       <trigger>
+         <crm_event id="1" operation="stonith" operation_key="stonith-controller-1-reboot" on_node="controller-1" on_node_uuid="2"/>
+       </trigger>
+-      <trigger>
+-        <pseudo_event id="39" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:2_stop_0"/>
+-      </trigger>
+     </inputs>
+   </synapse>
+   <synapse id="1">
+diff --git a/pengine/test10/remote-recover-unknown.summary b/pengine/test10/remote-recover-unknown.summary
+index 78a60d0..64f37cb 100644
+--- a/pengine/test10/remote-recover-unknown.summary
++++ b/pengine/test10/remote-recover-unknown.summary
+@@ -61,6 +61,7 @@ Executing cluster transition:
+  * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
+  * Pseudo action:   stonith-fence_ipmilan-5254005bdbb5_stop_0
+  * Fencing controller-1 (reboot)
++ * Pseudo action:   messaging-1_stop_0
+  * Pseudo action:   galera-2_stop_0
+  * Pseudo action:   redis_post_notify_stop_0
+  * Resource action: redis           notify on controller-0
+@@ -94,7 +95,6 @@ Executing cluster transition:
+  * Pseudo action:   ip-172.17.1.17_stop_0
+  * Pseudo action:   ip-172.17.4.11_stop_0
+  * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
+- * Pseudo action:   messaging-1_stop_0
+  * Resource action: redis           notify on controller-0
+  * Resource action: redis           notify on controller-2
+  * Pseudo action:   redis-master_confirmed-post_notify_stopped_0
+-- 
+1.8.3.1
+
+
+From 71142273e6b5108224ecdb0082b36f533b604fad Mon Sep 17 00:00:00 2001
+From: Klaus Wenninger <klaus.wenninger@aon.at>
+Date: Mon, 8 Jul 2019 18:17:28 +0200
+Subject: [PATCH 17/96] Fix: fence-history: fail leftover pending-actions after
+ stonithd-restart
+
+---
+ fencing/history.c  | 15 +++++++++++++++
+ fencing/internal.h |  8 ++++++++
+ fencing/remote.c   |  6 +++---
+ 3 files changed, 26 insertions(+), 3 deletions(-)
+
+diff --git a/fencing/history.c b/fencing/history.c
+index 0f98058..c487848 100644
+--- a/fencing/history.c
++++ b/fencing/history.c
+@@ -347,6 +347,21 @@ stonith_merge_in_history_list(GHashTable *history)
+ 
+         updated = TRUE;
+         g_hash_table_iter_steal(&iter);
++
++        if ((op->state != st_failed) &&
++            (op->state != st_done) &&
++            safe_str_eq(op->originator, stonith_our_uname)) {
++            crm_warn("received pending action we are supposed to be the "
++                     "owner but it's not in our records -> fail it");
++            op->state = st_failed;
++            op->completed = time(NULL);
++            /* use -EHOSTUNREACH to not introduce a new return-code that might
++               trigger unexpected results at other places and to prevent
++               remote_op_done from setting the delegate if not present
++             */
++            stonith_bcast_result_to_peers(op, -EHOSTUNREACH);
++        }
++
+         g_hash_table_insert(stonith_remote_op_list, op->id, op);
+         /* we could trim the history here but if we bail
+          * out after trim we might miss more recent entries
+diff --git a/fencing/internal.h b/fencing/internal.h
+index 028137f..cd48b53 100644
+--- a/fencing/internal.h
++++ b/fencing/internal.h
+@@ -142,6 +142,14 @@ typedef struct remote_fencing_op_s {
+ 
+ } remote_fencing_op_t;
+ 
++/*!
++ * \internal
++ * \brief Broadcast the result of an operation to the peers.
++ * \param op, Operation whose result should be broadcast
++ * \param rc, Result of the operation
++ */
++void stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc);
++
+ enum st_callback_flags {
+     st_callback_unknown        = 0x0000,
+     st_callback_notify_fence   = 0x0001,
+diff --git a/fencing/remote.c b/fencing/remote.c
+index 866112f..6c5b9b8 100644
+--- a/fencing/remote.c
++++ b/fencing/remote.c
+@@ -379,8 +379,8 @@ create_op_done_notify(remote_fencing_op_t * op, int rc)
+     return notify_data;
+ }
+ 
+-static void
+-bcast_result_to_peers(remote_fencing_op_t * op, int rc)
++void
++stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc)
+ {
+     static int count = 0;
+     xmlNode *bcast = create_xml_node(NULL, T_STONITH_REPLY);
+@@ -518,7 +518,7 @@ remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup)
+     subt = crm_element_value(data, F_SUBTYPE);
+     if (dup == FALSE && safe_str_neq(subt, "broadcast")) {
+         /* Defer notification until the bcast message arrives */
+-        bcast_result_to_peers(op, rc);
++        stonith_bcast_result_to_peers(op, rc);
+         goto remote_op_done_cleanup;
+     }
+ 
+-- 
+1.8.3.1
+
+
+From 89c87a604b7a318df5a6fd66d5e077362d6717aa Mon Sep 17 00:00:00 2001
+From: Klaus Wenninger <klaus.wenninger@aon.at>
+Date: Mon, 8 Jul 2019 18:49:18 +0200
+Subject: [PATCH 18/96] Fix: st_client: make safe to remove notifications from
+ notifications
+
+While cycling over the notification-list just mark for deletion
+and delete afterwards.
+---
+ lib/fencing/st_client.c | 58 +++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 54 insertions(+), 4 deletions(-)
+
+diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c
+index c38f356..feb8c73 100644
+--- a/lib/fencing/st_client.c
++++ b/lib/fencing/st_client.c
+@@ -77,6 +77,8 @@ typedef struct stonith_private_s {
+     mainloop_io_t *source;
+     GHashTable *stonith_op_callback_table;
+     GList *notify_list;
++    int notify_refcnt;
++    bool notify_deletes;
+ 
+     void (*op_callback) (stonith_t * st, stonith_callback_data_t * data);
+ 
+@@ -87,6 +89,7 @@ typedef struct stonith_notify_client_s {
+     const char *obj_id;         /* implement one day */
+     const char *obj_type;       /* implement one day */
+     void (*notify) (stonith_t * st, stonith_event_t * e);
++    bool delete;
+ 
+ } stonith_notify_client_t;
+ 
+@@ -223,6 +226,38 @@ log_action(stonith_action_t *action, pid_t pid)
+     }
+ }
+ 
++/* when cycling through the list we don't want to delete items
++   so just mark them and when we know nobody is using the list
++   loop over it to remove the marked items
++ */
++static void
++foreach_notify_entry (stonith_private_t *private,
++                GFunc func,
++                gpointer user_data)
++{
++    private->notify_refcnt++;
++    g_list_foreach(private->notify_list, func, user_data);
++    private->notify_refcnt--;
++    if ((private->notify_refcnt == 0) &&
++        private->notify_deletes) {
++        GList *list_item = private->notify_list;
++
++        private->notify_deletes = FALSE;
++        while (list_item != NULL)
++        {
++            stonith_notify_client_t *list_client = list_item->data;
++            GList *next = g_list_next(list_item);
++
++            if (list_client->delete) {
++                free(list_client);
++                private->notify_list =
++                    g_list_delete_link(private->notify_list, list_item);
++            }
++            list_item = next;
++        }
++    }
++}
++
+ static void
+ stonith_connection_destroy(gpointer user_data)
+ {
+@@ -242,7 +277,7 @@ stonith_connection_destroy(gpointer user_data)
+     crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY);
+     crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT);
+ 
+-    g_list_foreach(native->notify_list, stonith_send_notification, &blob);
++    foreach_notify_entry(native, stonith_send_notification, &blob);
+     free_xml(blob.xml);
+ }
+ 
+@@ -1244,6 +1279,10 @@ stonithlib_GCompareFunc(gconstpointer a, gconstpointer b)
+     const stonith_notify_client_t *a_client = a;
+     const stonith_notify_client_t *b_client = b;
+ 
++    if (a_client->delete || b_client->delete) {
++        /* make entries marked for deletion not findable */
++        return -1;
++    }
+     CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0);
+     rc = strcmp(a_client->event, b_client->event);
+     if (rc == 0) {
+@@ -1502,8 +1541,13 @@ stonith_api_del_notification(stonith_t * stonith, const char *event)
+     if (list_item != NULL) {
+         stonith_notify_client_t *list_client = list_item->data;
+ 
+-        private->notify_list = g_list_remove(private->notify_list, list_client);
+-        free(list_client);
++        if (private->notify_refcnt) {
++            list_client->delete = TRUE;
++            private->notify_deletes = TRUE;
++        } else {
++            private->notify_list = g_list_remove(private->notify_list, list_client);
++            free(list_client);
++        }
+ 
+         crm_trace("Removed callback");
+ 
+@@ -1807,6 +1851,10 @@ stonith_send_notification(gpointer data, gpointer user_data)
+         crm_warn("Skipping callback - NULL callback client");
+         return;
+ 
++    } else if (entry->delete) {
++        crm_trace("Skipping callback - marked for deletion");
++        return;
++
+     } else if (entry->notify == NULL) {
+         crm_warn("Skipping callback - NULL callback");
+         return;
+@@ -1988,7 +2036,7 @@ stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata)
+         stonith_perform_callback(st, blob.xml, 0, 0);
+ 
+     } else if (safe_str_eq(type, T_STONITH_NOTIFY)) {
+-        g_list_foreach(private->notify_list, stonith_send_notification, &blob);
++        foreach_notify_entry(private, stonith_send_notification, &blob);
+     } else if (safe_str_eq(type, T_STONITH_TIMEOUT_VALUE)) {
+         int call_id = 0;
+         int timeout = 0;
+@@ -2135,6 +2183,8 @@ stonith_api_new(void)
+     private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
+                                                                NULL, stonith_destroy_op_callback);
+     private->notify_list = NULL;
++    private->notify_refcnt = 0;
++    private->notify_deletes = FALSE;
+ 
+     new_stonith->call_id = 1;
+     new_stonith->state = stonith_disconnected;
+-- 
+1.8.3.1
+
+
+From 4625dd976d9e9cd08e0fefa0bbe057fb91510d98 Mon Sep 17 00:00:00 2001
+From: Klaus Wenninger <klaus.wenninger@aon.at>
+Date: Mon, 8 Jul 2019 19:12:53 +0200
+Subject: [PATCH 19/96] Feature: fence-history: add notification upon
+ history-synced
+
+---
+ fencing/history.c        |  5 +++++
+ fencing/internal.h       | 11 ++++++-----
+ fencing/main.c           |  3 +++
+ include/crm/stonith-ng.h |  1 +
+ 4 files changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/fencing/history.c b/fencing/history.c
+index c487848..4f1fc4a 100644
+--- a/fencing/history.c
++++ b/fencing/history.c
+@@ -420,6 +420,11 @@ stonith_fence_history(xmlNode *msg, xmlNode **output,
+         stonith_fence_history_cleanup(target,
+             crm_element_value(msg, F_STONITH_CALLID) != NULL);
+     } else if (options & st_opt_broadcast) {
++        /* there is no clear sign atm for when a history sync
++           is done so send a notification for anything
++           that smells like history-sync
++         */
++        do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY_SYNCED, 0, NULL);
+         if (crm_element_value(msg, F_STONITH_CALLID)) {
+             /* this is coming from the stonith-API
+             *
+diff --git a/fencing/internal.h b/fencing/internal.h
+index cd48b53..a51b0e6 100644
+--- a/fencing/internal.h
++++ b/fencing/internal.h
+@@ -151,11 +151,12 @@ typedef struct remote_fencing_op_s {
+ void stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc);
+ 
+ enum st_callback_flags {
+-    st_callback_unknown        = 0x0000,
+-    st_callback_notify_fence   = 0x0001,
+-    st_callback_device_add     = 0x0004,
+-    st_callback_device_del     = 0x0010,
+-    st_callback_notify_history = 0x0020
++    st_callback_unknown               = 0x0000,
++    st_callback_notify_fence          = 0x0001,
++    st_callback_device_add            = 0x0004,
++    st_callback_device_del            = 0x0010,
++    st_callback_notify_history        = 0x0020,
++    st_callback_notify_history_synced = 0x0040
+ };
+ 
+ /*
+diff --git a/fencing/main.c b/fencing/main.c
+index 82bee86..624937e 100644
+--- a/fencing/main.c
++++ b/fencing/main.c
+@@ -301,6 +301,9 @@ get_stonith_flag(const char *name)
+     } else if (safe_str_eq(name, T_STONITH_NOTIFY_HISTORY)) {
+         return st_callback_notify_history;
+ 
++    } else if (safe_str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED)) {
++        return st_callback_notify_history_synced;
++
+     }
+     return st_callback_unknown;
+ }
+diff --git a/include/crm/stonith-ng.h b/include/crm/stonith-ng.h
+index 045521a..23f879b 100644
+--- a/include/crm/stonith-ng.h
++++ b/include/crm/stonith-ng.h
+@@ -35,6 +35,7 @@
+ #  define T_STONITH_NOTIFY_DISCONNECT     "st_notify_disconnect"
+ #  define T_STONITH_NOTIFY_FENCE          "st_notify_fence"
+ #  define T_STONITH_NOTIFY_HISTORY        "st_notify_history"
++#  define T_STONITH_NOTIFY_HISTORY_SYNCED "st_notify_history_synced"
+ 
+ /* *INDENT-OFF* */
+ enum stonith_state {
+-- 
+1.8.3.1
+
+
+From 732f069557e490a9ce1d1a5adfc74081c3e309c1 Mon Sep 17 00:00:00 2001
+From: Klaus Wenninger <klaus.wenninger@aon.at>
+Date: Mon, 8 Jul 2019 19:32:28 +0200
+Subject: [PATCH 20/96] Fix: crmd: remove-stonith-notifications upon
+ connection-destroy
+
+---
+ crmd/te_utils.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/crmd/te_utils.c b/crmd/te_utils.c
+index 280fc95..f5bcb84 100644
+--- a/crmd/te_utils.c
++++ b/crmd/te_utils.c
+@@ -183,8 +183,15 @@ tengine_stonith_connection_destroy(stonith_t * st, stonith_event_t * e)
+     }
+ 
+     /* cbchan will be garbage at this point, arrange for it to be reset */
+-    if(stonith_api) {
+-        stonith_api->state = stonith_disconnected;
++    if (stonith_api) {
++        /* the client API won't properly reconnect notifications
++         * if they are still in the table - so remove them
++         */
++        stonith_api->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT);
++        stonith_api->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE);
++        if (stonith_api->state != stonith_disconnected) {
++            stonith_api->cmds->disconnect(st);
++        }
+     }
+ 
+     if (AM_I_DC) {
+-- 
+1.8.3.1
+
+
+From 199d9df653e8264fb09655dd5f8f1e94533612ee Mon Sep 17 00:00:00 2001
+From: Klaus Wenninger <klaus.wenninger@aon.at>
+Date: Mon, 8 Jul 2019 19:36:02 +0200
+Subject: [PATCH 21/96] Fix: crmd: add notice-log for successful fencer-connect
+
+---
+ crmd/te_utils.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/crmd/te_utils.c b/crmd/te_utils.c
+index f5bcb84..2805ea9 100644
+--- a/crmd/te_utils.c
++++ b/crmd/te_utils.c
+@@ -496,7 +496,7 @@ te_connect_stonith(gpointer user_data)
+     stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_FENCE,
+                                              tengine_stonith_notify);
+ 
+-    crm_trace("Connected");
++    crm_notice("Fencer successfully connected");
+     return TRUE;
+ }
+ 
+-- 
+1.8.3.1
+
+
+From ec0bb46caba533fa03b9bf4376f0f1ac5f70d393 Mon Sep 17 00:00:00 2001
+From: Klaus Wenninger <klaus.wenninger@aon.at>
+Date: Mon, 8 Jul 2019 19:40:42 +0200
+Subject: [PATCH 22/96] Test: CTS: new pattern to identify fenced reconnected
+
+Now that we are removing notifications upon disconnect a duplicate
+notification can't be used as sign for reconnection any more.
+---
+ cts/patterns.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/cts/patterns.py b/cts/patterns.py
+index ccd753d..87b44a9 100644
+--- a/cts/patterns.py
++++ b/cts/patterns.py
+@@ -376,7 +376,7 @@ class crm_cs_v0(BasePatterns):
+             "LRMD lost STONITH connection",
+             "Connection to stonith-ng.* closed",
+             "Fencing daemon connection failed",
+-            r"crmd.*:\s*warn.*:\s*Callback already present",
++            r"pacemaker-controld.*Fencer successfully connected",
+         ]
+         self.components["stonith-ignore"] = [
+             r"pengine.*: Recover Fencing",
+-- 
+1.8.3.1
+
+
+From 5141b03fe7bffd493cd26c20412bbcf3041fa495 Mon Sep 17 00:00:00 2001
+From: Klaus Wenninger <klaus.wenninger@aon.at>
+Date: Mon, 8 Jul 2019 21:15:51 +0200
+Subject: [PATCH 23/96] Fix: fence-history: resync fence-history after stonithd
+ crash
+
+Setting up a 30s fallback timer to trigger history-sync if the
+sync via DC doesn't happen
+---
+ crmd/callbacks.c |  2 +-
+ crmd/control.c   |  9 ++++++-
+ crmd/te_utils.c  | 78 ++++++++++++++++++++++++++++++++++++++++++++++++--------
+ crmd/tengine.h   |  3 ++-
+ 4 files changed, 79 insertions(+), 13 deletions(-)
+
+diff --git a/crmd/callbacks.c b/crmd/callbacks.c
+index c51b215..7560470 100644
+--- a/crmd/callbacks.c
++++ b/crmd/callbacks.c
+@@ -212,7 +212,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d
+                 } else {
+                     crm_info("New peer %s we want to sync fence history with",
+                              node->uname);
+-                    te_trigger_stonith_history_sync();
++                    te_trigger_stonith_history_sync(FALSE);
+                 }
+             }
+             break;
+diff --git a/crmd/control.c b/crmd/control.c
+index 488ea88..04935c7 100644
+--- a/crmd/control.c
++++ b/crmd/control.c
+@@ -192,7 +192,12 @@ do_shutdown(long long action,
+         clear_bit(fsa_input_register, R_ST_REQUIRED);
+ 
+         crm_info("Disconnecting STONITH...");
+-        stonith_api->cmds->disconnect(stonith_api);
++        if (stonith_api->state != stonith_disconnected) {
++            stonith_api->cmds->disconnect(stonith_api);
++        }
++        stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
++        stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_FENCE);
++        stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED);
+     }
+ }
+ 
+@@ -369,6 +374,8 @@ crmd_exit(int rc)
+     crm_timer_stop(wait_timer);
+     crm_timer_stop(recheck_timer);
+ 
++    te_cleanup_stonith_history_sync(NULL, TRUE);
++
+     free(transition_timer); transition_timer = NULL;
+     free(integration_timer); integration_timer = NULL;
+     free(finalization_timer); finalization_timer = NULL;
+diff --git a/crmd/te_utils.c b/crmd/te_utils.c
+index 2805ea9..29b411f 100644
+--- a/crmd/te_utils.c
++++ b/crmd/te_utils.c
+@@ -33,7 +33,33 @@
+ 
+ crm_trigger_t *stonith_reconnect = NULL;
+ static crm_trigger_t *stonith_history_sync_trigger = NULL;
+-static mainloop_timer_t *stonith_history_sync_timer = NULL;
++static mainloop_timer_t *stonith_history_sync_timer_short = NULL;
++static mainloop_timer_t *stonith_history_sync_timer_long = NULL;
++
++void
++te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers)
++{
++    if (free_timers) {
++        mainloop_timer_del(stonith_history_sync_timer_short);
++        stonith_history_sync_timer_short = NULL;
++        mainloop_timer_del(stonith_history_sync_timer_long);
++        stonith_history_sync_timer_long = NULL;
++    } else {
++        mainloop_timer_stop(stonith_history_sync_timer_short);
++        mainloop_timer_stop(stonith_history_sync_timer_long);
++    }
++
++    if (st) {
++        st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY_SYNCED);
++    }
++}
++
++static void
++tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event)
++{
++    te_cleanup_stonith_history_sync(st, FALSE);
++    crm_debug("Fence-history synced - cancel all timers");
++}
+ 
+ /*
+  * stonith cleanup list
+@@ -174,6 +200,8 @@ fail_incompletable_stonith(crm_graph_t * graph)
+ static void
+ tengine_stonith_connection_destroy(stonith_t * st, stonith_event_t * e)
+ {
++    te_cleanup_stonith_history_sync(st, FALSE);
++
+     if (is_set(fsa_input_register, R_ST_REQUIRED)) {
+         crm_crit("Fencing daemon connection failed");
+         mainloop_set_trigger(stonith_reconnect);
+@@ -187,11 +215,12 @@ tengine_stonith_connection_destroy(stonith_t * st, stonith_event_t * e)
+         /* the client API won't properly reconnect notifications
+          * if they are still in the table - so remove them
+          */
+-        stonith_api->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT);
+-        stonith_api->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE);
+         if (stonith_api->state != stonith_disconnected) {
+             stonith_api->cmds->disconnect(st);
+         }
++        stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
++        stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_FENCE);
++        stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED);
+     }
+ 
+     if (AM_I_DC) {
+@@ -212,6 +241,9 @@ char *te_client_id = NULL;
+ #endif
+ 
+ static void
++tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event);
++
++static void
+ tengine_stonith_notify(stonith_t * st, stonith_event_t * st_event)
+ {
+     if(te_client_id == NULL) {
+@@ -404,6 +436,7 @@ do_stonith_history_sync(gpointer user_data)
+     if (stonith_api && (stonith_api->state != stonith_disconnected)) {
+         stonith_history_t *history = NULL;
+ 
++        te_cleanup_stonith_history_sync(stonith_api, FALSE);
+         stonith_api->cmds->history(stonith_api,
+                                    st_opt_sync_call | st_opt_broadcast,
+                                    NULL, &history, 5);
+@@ -423,11 +456,18 @@ stonith_history_sync_set_trigger(gpointer user_data)
+ }
+ 
+ void
+-te_trigger_stonith_history_sync(void)
++te_trigger_stonith_history_sync(bool long_timeout)
+ {
+     /* trigger a sync in 5s to give more nodes the
+      * chance to show up so that we don't create
+      * unnecessary stonith-history-sync traffic
++     *
++     * the long timeout of 30s is there as a fallback
++     * so that after a successful connection to fenced
++     * we will wait for 30s for the DC to trigger a
++     * history-sync
++     * if this doesn't happen we trigger a sync locally
++     * (e.g. fenced segfaults and is restarted by pacemakerd)
+      */
+ 
+     /* as we are finally checking the stonith-connection
+@@ -441,13 +481,25 @@ te_trigger_stonith_history_sync(void)
+                                  do_stonith_history_sync, NULL);
+     }
+ 
+-    if(stonith_history_sync_timer == NULL) {
+-        stonith_history_sync_timer =
+-            mainloop_timer_add("history_sync", 5000,
+-                               FALSE, stonith_history_sync_set_trigger,
+-                               NULL);
++    if (long_timeout) {
++        if(stonith_history_sync_timer_long == NULL) {
++            stonith_history_sync_timer_long =
++                mainloop_timer_add("history_sync_long", 30000,
++                                   FALSE, stonith_history_sync_set_trigger,
++                                   NULL);
++        }
++        crm_info("Fence history will be synchronized cluster-wide within 30 seconds");
++        mainloop_timer_start(stonith_history_sync_timer_long);
++    } else {
++        if(stonith_history_sync_timer_short == NULL) {
++            stonith_history_sync_timer_short =
++                mainloop_timer_add("history_sync_short", 5000,
++                                   FALSE, stonith_history_sync_set_trigger,
++                                   NULL);
++        }
++        crm_info("Fence history will be synchronized cluster-wide within 5 seconds");
++        mainloop_timer_start(stonith_history_sync_timer_short);
+     }
+-    mainloop_timer_start(stonith_history_sync_timer);
+ }
+ 
+ gboolean
+@@ -496,6 +548,12 @@ te_connect_stonith(gpointer user_data)
+     stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_FENCE,
+                                              tengine_stonith_notify);
+ 
++    stonith_api->cmds->register_notification(stonith_api,
++                                             T_STONITH_NOTIFY_HISTORY_SYNCED,
++                                             tengine_stonith_history_synced);
++
++    te_trigger_stonith_history_sync(TRUE);
++
+     crm_notice("Fencer successfully connected");
+     return TRUE;
+ }
+diff --git a/crmd/tengine.h b/crmd/tengine.h
+index 1a9b2d2..a20760c 100644
+--- a/crmd/tengine.h
++++ b/crmd/tengine.h
+@@ -72,7 +72,8 @@ extern void abort_transition_graph(int abort_priority, enum transition_action ab
+ 
+ extern gboolean te_connect_stonith(gpointer user_data);
+ 
+-extern void te_trigger_stonith_history_sync(void);
++extern void te_trigger_stonith_history_sync(bool long_timeout);
++extern void te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers);
+ 
+ extern crm_trigger_t *transition_trigger;
+ extern crm_trigger_t *stonith_reconnect;
+-- 
+1.8.3.1
+
+
+From 926ae820d6406885dce8ba794215002b57bce17f Mon Sep 17 00:00:00 2001
+From: Klaus Wenninger <klaus.wenninger@aon.at>
+Date: Wed, 10 Jul 2019 17:57:02 +0200
+Subject: [PATCH 24/96] Fix: st_client: cleanup token whenever setting api to
+ disconnected
+
+---
+ lib/fencing/st_client.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c
+index feb8c73..60c07d5 100644
+--- a/lib/fencing/st_client.c
++++ b/lib/fencing/st_client.c
+@@ -273,6 +273,7 @@ stonith_connection_destroy(gpointer user_data)
+     native->ipc = NULL;
+     native->source = NULL;
+ 
++    free(native->token); native->token = NULL;
+     stonith->state = stonith_disconnected;
+     crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY);
+     crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT);
+@@ -1975,6 +1976,7 @@ stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNod
+   done:
+     if (crm_ipc_connected(native->ipc) == FALSE) {
+         crm_err("STONITH disconnected");
++        free(native->token); native->token = NULL;
+         stonith->state = stonith_disconnected;
+     }
+ 
+-- 
+1.8.3.1
+
+
+From 365c29581f0a88f106622c000e8fe4b3e9edd024 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 27 Feb 2019 15:39:59 -0600
+Subject: [PATCH 25/96] Low: executor: consider stonith resource stopped only
+ if stop succeeded
+
+Previously, the executor would consider a stonith resource stopped whenever a
+stop was requested, regardless of the actual result of remove_device(). One
+could make a case for ignoring device unregistration failures, but it would be
+unusual enough result that it makes sense to draw attention to it.
+---
+ lrmd/lrmd.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/lrmd/lrmd.c b/lrmd/lrmd.c
+index d6a431d..ecf0cc7 100644
+--- a/lrmd/lrmd.c
++++ b/lrmd/lrmd.c
+@@ -1057,8 +1057,12 @@ stonith_action_complete(lrmd_cmd_t * cmd, int rc)
+     } else {
+         /* command successful */
+         cmd->lrmd_op_status = PCMK_LRM_OP_DONE;
+-        if (safe_str_eq(cmd->action, "start") && rsc) {
+-            rsc->stonith_started = 1;
++        if (rsc) {
++            if (safe_str_eq(cmd->action, "start")) {
++                rsc->stonith_started = 1;
++            } else if (safe_str_eq(cmd->action, "stop")) {
++                rsc->stonith_started = 0;
++            }
+         }
+     }
+ 
+@@ -1160,7 +1164,6 @@ lrmd_rsc_execute_stonith(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
+         }
+     } else if (safe_str_eq(cmd->action, "stop")) {
+         rc = stonith_api->cmds->remove_device(stonith_api, st_opt_sync_call, cmd->rsc_id);
+-        rsc->stonith_started = 0;
+     } else if (safe_str_eq(cmd->action, "monitor")) {
+         if (cmd->interval) {
+             do_monitor = 1;
+-- 
+1.8.3.1
+
+
+From 4e535413aaacb21aa11de3d1b48a90ed3a173825 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 10 Apr 2019 12:43:21 -0500
+Subject: [PATCH 26/96] Fix: executor: return error for stonith probes if
+ stonith connection was lost
+
+Previously, stonith probes could return only PCMK_OCF_OK (if the executor had
+registered the device with the fencer) or PCMK_OCF_NOT_RUNNING (if the executor
+had unregistered or not yet registered the device).
+
+However if the stonith connection is lost, the executor doesn't know whether
+the device is still registered or not, and thus could be giving wrong
+information back to the controller.
+
+This fixes that by refactoring lrmd_rsc_t's stonith_started member from a
+boolean (0 = not started, 1 = started) to an rc code (pcmk_ok = started,
+-ENODEV = not started, pcmk_err_generic = stonith connection lost).
+stonith_rc2status() will map these to PCMK_OCF_OK, PCMK_OCF_NOT_RUNNING, or
+PCMK_OCF_UNKNOWN_ERROR.
+
+This ensures that probes after the connection is lost will fail, which is
+especially important if the controller respawned at the same time as the fencer
+and so didn't receive client notification of failed monitors.
+
+This means that if the executor loses its stonith connection, probes for *all*
+stonith devices on that node will fail and require a stop on that node, which
+may be unexpected for users accustomed to the old behavior, but is more
+correct.
+---
+ lrmd/lrmd.c         | 15 ++++++++++++---
+ lrmd/lrmd_private.h |  2 +-
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/lrmd/lrmd.c b/lrmd/lrmd.c
+index ecf0cc7..59751ec 100644
+--- a/lrmd/lrmd.c
++++ b/lrmd/lrmd.c
+@@ -158,6 +158,7 @@ build_rsc_from_xml(xmlNode * msg)
+     rsc->provider = crm_element_value_copy(rsc_xml, F_LRMD_PROVIDER);
+     rsc->type = crm_element_value_copy(rsc_xml, F_LRMD_TYPE);
+     rsc->work = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_rsc_dispatch, rsc);
++    rsc->st_probe_rc = -ENODEV; // if stonith, initialize to "not running"
+     return rsc;
+ }
+ 
+@@ -1059,9 +1060,9 @@ stonith_action_complete(lrmd_cmd_t * cmd, int rc)
+         cmd->lrmd_op_status = PCMK_LRM_OP_DONE;
+         if (rsc) {
+             if (safe_str_eq(cmd->action, "start")) {
+-                rsc->stonith_started = 1;
++                rsc->st_probe_rc = pcmk_ok; // maps to PCMK_OCF_OK
+             } else if (safe_str_eq(cmd->action, "stop")) {
+-                rsc->stonith_started = 0;
++                rsc->st_probe_rc = -ENODEV; // maps to PCMK_OCF_NOT_RUNNING
+             }
+         }
+     }
+@@ -1094,6 +1095,14 @@ stonith_connection_failed(void)
+     g_hash_table_iter_init(&iter, rsc_list);
+     while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) {
+         if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) {
++            /* This will cause future probes to return PCMK_OCF_UNKNOWN_ERROR
++             * until the resource is stopped or started successfully. This is
++             * especially important if the controller also went away (possibly
++             * due to a cluster layer restart) and won't receive our client
++             * notification of any monitors finalized below.
++             */
++            rsc->st_probe_rc = pcmk_err_generic;
++
+             if (rsc->active) {
+                 cmd_list = g_list_append(cmd_list, rsc->active);
+             }
+@@ -1168,7 +1177,7 @@ lrmd_rsc_execute_stonith(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
+         if (cmd->interval) {
+             do_monitor = 1;
+         } else {
+-            rc = rsc->stonith_started ? 0 : -ENODEV;
++            rc = rsc->st_probe_rc;
+         }
+     }
+ 
+diff --git a/lrmd/lrmd_private.h b/lrmd/lrmd_private.h
+index 4449bb0..44de453 100644
+--- a/lrmd/lrmd_private.h
++++ b/lrmd/lrmd_private.h
+@@ -54,7 +54,7 @@ typedef struct lrmd_rsc_s {
+      * that have been handed off from the pending ops list. */
+     GList *recurring_ops;
+ 
+-    int stonith_started;
++    int st_probe_rc; // What value should be returned for a probe if stonith
+ 
+     crm_trigger_t *work;
+ } lrmd_rsc_t;
+-- 
+1.8.3.1
+
+
+From 1268ce78d1be9841f5861892e4ed3d9013d38fdb Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 10 Apr 2019 12:51:48 -0500
+Subject: [PATCH 27/96] Fix: executor: don't cancel stonith monitors when
+ device is not registered
+
+This essentially reverts 53532a7. Now that failed stonith connections will
+cause failures of future probes, we can properly leave the decision to cancel
+monitors to the controller. Otherwise, the controller won't know that the
+recurring action is no longer active, and could get action timeouts in some
+cases.
+---
+ lrmd/lrmd.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+diff --git a/lrmd/lrmd.c b/lrmd/lrmd.c
+index 59751ec..5d33324 100644
+--- a/lrmd/lrmd.c
++++ b/lrmd/lrmd.c
+@@ -1029,17 +1029,7 @@ stonith_action_complete(lrmd_cmd_t * cmd, int rc)
+     } else if (rc == -ENODEV && safe_str_eq(cmd->action, "monitor")) {
+         // The device is not registered with the fencer
+ 
+-        if (recurring) {
+-            /* If we get here, the fencer somehow lost the registration of a
+-             * previously active device (possibly due to crash and respawn). In
+-             * that case, we need to indicate that the recurring monitor needs
+-             * to be cancelled.
+-             */
+-            cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED;
+-            recurring = FALSE;
+-        } else {
+-            cmd->lrmd_op_status = PCMK_LRM_OP_DONE;
+-        }
++        cmd->lrmd_op_status = PCMK_LRM_OP_ERROR;
+         cmd->exec_rc = PCMK_OCF_NOT_RUNNING;
+ 
+     } else if (rc) {
+-- 
+1.8.3.1
+
+
+From ffd57df21eff2e0c96966be7069152b5b57c1870 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 28 Jun 2019 15:27:56 -0500
+Subject: [PATCH 28/96] Refactor: controller: functionize access to last
+ scheduler request ID
+
+This will come in handy for adding a new timer
+---
+ crmd/control.c    |  3 +--
+ crmd/crmd_utils.h |  2 ++
+ crmd/pengine.c    | 49 ++++++++++++++++++++++++++++++++++++++-----------
+ crmd/te_utils.c   |  5 +----
+ 4 files changed, 42 insertions(+), 17 deletions(-)
+
+diff --git a/crmd/control.c b/crmd/control.c
+index 04935c7..73a2b08 100644
+--- a/crmd/control.c
++++ b/crmd/control.c
+@@ -339,7 +339,6 @@ crmd_exit(int rc)
+     clear_bit(fsa_input_register, R_MEMBERSHIP);
+     g_list_free(fsa_message_queue); fsa_message_queue = NULL;
+ 
+-    free(pe_subsystem); pe_subsystem = NULL;
+     free(te_subsystem); te_subsystem = NULL;
+     free(cib_subsystem); cib_subsystem = NULL;
+ 
+@@ -375,6 +374,7 @@ crmd_exit(int rc)
+     crm_timer_stop(recheck_timer);
+ 
+     te_cleanup_stonith_history_sync(NULL, TRUE);
++    controld_sched_cleanup();
+ 
+     free(transition_timer); transition_timer = NULL;
+     free(integration_timer); integration_timer = NULL;
+@@ -393,7 +393,6 @@ crmd_exit(int rc)
+ 
+     free(te_uuid); te_uuid = NULL;
+     free(te_client_id); te_client_id = NULL;
+-    free(fsa_pe_ref); fsa_pe_ref = NULL;
+     free(failed_stop_offset); failed_stop_offset = NULL;
+     free(failed_start_offset); failed_start_offset = NULL;
+ 
+diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h
+index a754487..ce856b7 100644
+--- a/crmd/crmd_utils.h
++++ b/crmd/crmd_utils.h
+@@ -83,6 +83,8 @@ int crmd_exit(int rc);
+ int crmd_fast_exit(int rc);
+ gboolean stop_subsystem(struct crm_subsystem_s *centry, gboolean force_quit);
+ gboolean start_subsystem(struct crm_subsystem_s *centry);
++void controld_expect_sched_reply(xmlNode *msg);
++void controld_sched_cleanup(void);
+ 
+ void fsa_dump_actions(long long action, const char *text);
+ void fsa_dump_inputs(int log_level, const char *text, long long input_register);
+diff --git a/crmd/pengine.c b/crmd/pengine.c
+index 8ecb21d..b7d996a 100644
+--- a/crmd/pengine.c
++++ b/crmd/pengine.c
+@@ -1,5 +1,7 @@
+ /* 
+- * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
++ * Copyright 2004-2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
+  * 
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public
+@@ -172,6 +174,36 @@ do_pe_control(long long action,
+ int fsa_pe_query = 0;
+ char *fsa_pe_ref = NULL;
+ 
++/*!
++ * \internal
++ * \brief Set the scheduler request currently being waited on
++ *
++ * \param[in] msg  Request to expect reply to (or NULL for none)
++ */
++void
++controld_expect_sched_reply(xmlNode *msg)
++{
++    char *ref = NULL;
++
++    if (msg) {
++        ref = crm_element_value_copy(msg, XML_ATTR_REFERENCE);
++        CRM_ASSERT(ref != NULL);
++    }
++    free(fsa_pe_ref);
++    fsa_pe_ref = ref;
++}
++
++/*!
++ * \internal
++ * \brief Clean up all memory used by controller scheduler handling
++ */
++void
++controld_sched_cleanup()
++{
++    free(pe_subsystem); pe_subsystem = NULL;
++    controld_expect_sched_reply(NULL);
++}
++
+ /*	 A_PE_INVOKE	*/
+ void
+ do_pe_invoke(long long action,
+@@ -216,10 +248,7 @@ do_pe_invoke(long long action,
+     crm_debug("Query %d: Requesting the current CIB: %s", fsa_pe_query,
+               fsa_state2string(fsa_state));
+ 
+-    /* Make sure any queued calculations are discarded */
+-    free(fsa_pe_ref);
+-    fsa_pe_ref = NULL;
+-
++    controld_expect_sched_reply(NULL);
+     fsa_register_cib_callback(fsa_pe_query, FALSE, NULL, do_pe_invoke_callback);
+ }
+ 
+@@ -335,16 +364,14 @@ do_pe_invoke_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void
+ 
+     cmd = create_request(CRM_OP_PECALC, output, NULL, CRM_SYSTEM_PENGINE, CRM_SYSTEM_DC, NULL);
+ 
+-    free(fsa_pe_ref);
+-    fsa_pe_ref = crm_element_value_copy(cmd, XML_ATTR_REFERENCE);
+-
+     sent = crm_ipc_send(mainloop_get_ipc_client(pe_subsystem->source), cmd, 0, 0, NULL);
+     if (sent <= 0) {
+         crm_err("Could not contact the pengine: %d", sent);
+         register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __FUNCTION__);
++    } else {
++        controld_expect_sched_reply(cmd);
++        crm_debug("Invoking the PE: query=%d, ref=%s, seq=%llu, quorate=%d",
++                  fsa_pe_query, fsa_pe_ref, crm_peer_seq, fsa_has_quorum);
+     }
+-
+-    crm_debug("Invoking the PE: query=%d, ref=%s, seq=%llu, quorate=%d",
+-              fsa_pe_query, fsa_pe_ref, crm_peer_seq, fsa_has_quorum);
+     free_xml(cmd);
+ }
+diff --git a/crmd/te_utils.c b/crmd/te_utils.c
+index 29b411f..14570cd 100644
+--- a/crmd/te_utils.c
++++ b/crmd/te_utils.c
+@@ -717,10 +717,7 @@ abort_transition_graph(int abort_priority, enum transition_action abort_action,
+     }
+ 
+     abort_timer.aborted = TRUE;
+-
+-    /* Make sure any queued calculations are discarded ASAP */
+-    free(fsa_pe_ref);
+-    fsa_pe_ref = NULL;
++    controld_expect_sched_reply(NULL);
+ 
+     if (transition_graph->complete == FALSE) {
+         if(update_abort_priority(transition_graph, abort_priority, abort_action, abort_text)) {
+-- 
+1.8.3.1
+
+
+From 61000ec5247d77ee539cc6988f826c20a6903105 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 28 Jun 2019 16:20:03 -0500
+Subject: [PATCH 29/96] Fix: controller: set timeout on scheduler responses
+
+Previously, once the DC successfully read the CIB and sent a calculation
+request to the scheduler, it wouldn't do anything further with the request,
+aside from the message handler for the scheduler's response.
+
+This meant that if the scheduler successfully accepted the request, but then
+was unable to reply (such as not getting enough CPU cycles), the controller
+would never detect anything wrong, and the cluster would be blocked.
+
+Now, the controller sets a 2-minute timer after handing off the request to the
+scheduler, and if it doesn't get a response in that time, it exits and stays
+down (if a node is elected DC but can't run the scheduler, we want to ensure it
+doesn't interfere with further elections).
+---
+ crmd/crmd_utils.h |  1 +
+ crmd/election.c   |  1 +
+ crmd/messages.c   |  2 +-
+ crmd/pengine.c    | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 56 insertions(+), 1 deletion(-)
+
+diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h
+index ce856b7..a704380 100644
+--- a/crmd/crmd_utils.h
++++ b/crmd/crmd_utils.h
+@@ -83,6 +83,7 @@ int crmd_exit(int rc);
+ int crmd_fast_exit(int rc);
+ gboolean stop_subsystem(struct crm_subsystem_s *centry, gboolean force_quit);
+ gboolean start_subsystem(struct crm_subsystem_s *centry);
++void controld_stop_sched_timer(void);
+ void controld_expect_sched_reply(xmlNode *msg);
+ void controld_sched_cleanup(void);
+ 
+diff --git a/crmd/election.c b/crmd/election.c
+index cef82d0..a4313cc 100644
+--- a/crmd/election.c
++++ b/crmd/election.c
+@@ -280,6 +280,7 @@ do_dc_release(long long action,
+     if (action & A_DC_RELEASE) {
+         crm_debug("Releasing the role of DC");
+         clear_bit(fsa_input_register, R_THE_DC);
++        controld_expect_sched_reply(NULL);
+ 
+     } else if (action & A_DC_RELEASED) {
+         crm_info("DC role released");
+diff --git a/crmd/messages.c b/crmd/messages.c
+index 24ffac6..f1599ab 100644
+--- a/crmd/messages.c
++++ b/crmd/messages.c
+@@ -1000,9 +1000,9 @@ handle_response(xmlNode * stored_msg)
+         } else if (safe_str_eq(msg_ref, fsa_pe_ref)) {
+             ha_msg_input_t fsa_input;
+ 
++            controld_stop_sched_timer();
+             fsa_input.msg = stored_msg;
+             register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input);
+-            crm_trace("Completed: %s...", fsa_pe_ref);
+ 
+         } else {
+             crm_info("%s calculation %s is obsolete", op, msg_ref);
+diff --git a/crmd/pengine.c b/crmd/pengine.c
+index b7d996a..1630e7b 100644
+--- a/crmd/pengine.c
++++ b/crmd/pengine.c
+@@ -173,6 +173,45 @@ do_pe_control(long long action,
+ 
+ int fsa_pe_query = 0;
+ char *fsa_pe_ref = NULL;
++static mainloop_timer_t *controld_sched_timer = NULL;
++
++// @TODO Make this a configurable cluster option if there's demand for it
++#define SCHED_TIMEOUT_MS (120000)
++
++/*!
++ * \internal
++ * \brief Handle a timeout waiting for scheduler reply
++ *
++ * \param[in] user_data  Ignored
++ *
++ * \return FALSE (indicating that timer should not be restarted)
++ */
++static gboolean
++controld_sched_timeout(gpointer user_data)
++{
++    if (AM_I_DC) {
++        /* If this node is the DC but can't communicate with the scheduler, just
++         * exit (and likely get fenced) so this node doesn't interfere with any
++         * further DC elections.
++         *
++         * @TODO We could try something less drastic first, like disconnecting
++         * and reconnecting to the scheduler, but something is likely going
++         * seriously wrong, so perhaps it's better to just fail as quickly as
++         * possible.
++         */
++        crmd_exit(DAEMON_RESPAWN_STOP);
++    }
++    return FALSE;
++}
++
++void
++controld_stop_sched_timer()
++{
++    if (controld_sched_timer && fsa_pe_ref) {
++        crm_trace("Stopping timer for scheduler reply %s", fsa_pe_ref);
++    }
++    mainloop_timer_stop(controld_sched_timer);
++}
+ 
+ /*!
+  * \internal
+@@ -188,6 +227,16 @@ controld_expect_sched_reply(xmlNode *msg)
+     if (msg) {
+         ref = crm_element_value_copy(msg, XML_ATTR_REFERENCE);
+         CRM_ASSERT(ref != NULL);
++
++        if (controld_sched_timer == NULL) {
++            controld_sched_timer = mainloop_timer_add("scheduler_reply_timer",
++                                                      SCHED_TIMEOUT_MS, FALSE,
++                                                      controld_sched_timeout,
++                                                      NULL);
++        }
++        mainloop_timer_start(controld_sched_timer);
++    } else {
++        controld_stop_sched_timer();
+     }
+     free(fsa_pe_ref);
+     fsa_pe_ref = ref;
+@@ -200,6 +249,10 @@ controld_expect_sched_reply(xmlNode *msg)
+ void
+ controld_sched_cleanup()
+ {
++    if (controld_sched_timer != NULL) {
++        mainloop_timer_del(controld_sched_timer);
++        controld_sched_timer = NULL;
++    }
+     free(pe_subsystem); pe_subsystem = NULL;
+     controld_expect_sched_reply(NULL);
+ }
+-- 
+1.8.3.1
+
+
+From ab4ddb17c9b69f182a71026ee9752eec519fb10c Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 30 May 2019 08:37:52 -0500
+Subject: [PATCH 30/96] Low: libpe_status: offer compile-time option to change
+ concurrent-fencing default
+
+We most likely want to make concurrent-fencing default to true at some point.
+For now, offer that possibility via a compile-time constant, for experimenting.
+---
+ lib/pengine/common.c | 8 +++++++-
+ lib/pengine/status.c | 3 +++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/lib/pengine/common.c b/lib/pengine/common.c
+index d03a6aa..e82434a 100644
+--- a/lib/pengine/common.c
++++ b/lib/pengine/common.c
+@@ -111,7 +111,13 @@ pe_cluster_option pe_opts[] = {
+ 	  "How long to wait for the STONITH action (reboot,on,off) to complete", NULL },
+ 	{ XML_ATTR_HAVE_WATCHDOG, NULL, "boolean", NULL, "false", &check_boolean,
+ 	  "Enable watchdog integration", "Set automatically by the cluster if SBD is detected.  User configured values are ignored." },
+-	{ "concurrent-fencing", NULL, "boolean", NULL, "false", &check_boolean,
++	{ "concurrent-fencing", NULL, "boolean", NULL,
++#ifdef DEFAULT_CONCURRENT_FENCING_TRUE
++      "true",
++#else
++      "false",
++#endif
++      &check_boolean,
+ 	  "Allow performing fencing operations in parallel", NULL },
+ 	{ "startup-fencing", "startup_fencing", "boolean", NULL, "true", &check_boolean,
+ 	  "STONITH unseen nodes", "Advanced Use Only!  Not using the default is very unsafe!" },
+diff --git a/lib/pengine/status.c b/lib/pengine/status.c
+index 6810c78..fdebaa2 100644
+--- a/lib/pengine/status.c
++++ b/lib/pengine/status.c
+@@ -376,6 +376,9 @@ set_working_set_defaults(pe_working_set_t * data_set)
+     set_bit(data_set->flags, pe_flag_symmetric_cluster);
+     set_bit(data_set->flags, pe_flag_is_managed_default);
+     set_bit(data_set->flags, pe_flag_stop_action_orphans);
++#ifdef DEFAULT_CONCURRENT_FENCING_TRUE
++    set_bit(data_set->flags, pe_flag_concurrent_fencing);
++#endif
+ }
+ 
+ resource_t *
+-- 
+1.8.3.1
+
+
+From deac9823e08c51a2683c127527a2b047b516f393 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 6 Jun 2019 14:18:37 -0500
+Subject: [PATCH 31/96] Test: scheduler: explicitly set concurrent-fencing in
+ relevant regression tests
+
+... since concurrent-fencing's default is likely to eventually change,
+which would otherwise affect the results of these tests
+---
+ pengine/test10/rec-node-14.xml                     | 1 +
+ pengine/test10/remote-connection-unrecoverable.xml | 1 +
+ pengine/test10/remote-recover-all.xml              | 1 +
+ pengine/test10/remote-recover-no-resources.xml     | 1 +
+ pengine/test10/remote-recover-unknown.xml          | 1 +
+ pengine/test10/stonith-4.xml                       | 1 +
+ pengine/test10/suicide-needed-inquorate.xml        | 1 +
+ pengine/test10/ticket-clone-21.xml                 | 1 +
+ pengine/test10/ticket-clone-9.xml                  | 1 +
+ 9 files changed, 9 insertions(+)
+
+diff --git a/pengine/test10/rec-node-14.xml b/pengine/test10/rec-node-14.xml
+index 456ea80..8582c17 100644
+--- a/pengine/test10/rec-node-14.xml
++++ b/pengine/test10/rec-node-14.xml
+@@ -4,6 +4,7 @@
+       <cluster_property_set id="cib-bootstrap-options">
+         <nvpair id="nvpair.id21835" name="stonith-enabled" value="true"/>
+         <nvpair id="nvpair.id21844" name="no-quorum-policy" value="ignore"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+     </crm_config>
+     <nodes>
+diff --git a/pengine/test10/remote-connection-unrecoverable.xml b/pengine/test10/remote-connection-unrecoverable.xml
+index 4dda833..8096e25 100644
+--- a/pengine/test10/remote-connection-unrecoverable.xml
++++ b/pengine/test10/remote-connection-unrecoverable.xml
+@@ -7,6 +7,7 @@
+         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+         <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster"/>
+         <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1459735110"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+     </crm_config>
+     <nodes>
+diff --git a/pengine/test10/remote-recover-all.xml b/pengine/test10/remote-recover-all.xml
+index 30d2451..f56e641 100644
+--- a/pengine/test10/remote-recover-all.xml
++++ b/pengine/test10/remote-recover-all.xml
+@@ -10,6 +10,7 @@
+         <nvpair id="cib-bootstrap-options-cluster-recheck-interval" name="cluster-recheck-interval" value="60s"/>
+         <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+         <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1493817755"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+       <cluster_property_set id="redis_replication">
+         <nvpair id="redis_replication-redis_REPL_INFO" name="redis_REPL_INFO" value="controller-0"/>
+diff --git a/pengine/test10/remote-recover-no-resources.xml b/pengine/test10/remote-recover-no-resources.xml
+index d2fa0df..36f424b 100644
+--- a/pengine/test10/remote-recover-no-resources.xml
++++ b/pengine/test10/remote-recover-no-resources.xml
+@@ -10,6 +10,7 @@
+         <nvpair id="cib-bootstrap-options-cluster-recheck-interval" name="cluster-recheck-interval" value="60s"/>
+         <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+         <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1493817755"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+       <cluster_property_set id="redis_replication">
+         <nvpair id="redis_replication-redis_REPL_INFO" name="redis_REPL_INFO" value="controller-0"/>
+diff --git a/pengine/test10/remote-recover-unknown.xml b/pengine/test10/remote-recover-unknown.xml
+index 3992b03..dd7807c 100644
+--- a/pengine/test10/remote-recover-unknown.xml
++++ b/pengine/test10/remote-recover-unknown.xml
+@@ -10,6 +10,7 @@
+         <nvpair id="cib-bootstrap-options-cluster-recheck-interval" name="cluster-recheck-interval" value="60s"/>
+         <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+         <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1493817755"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+       <cluster_property_set id="redis_replication">
+         <nvpair id="redis_replication-redis_REPL_INFO" name="redis_REPL_INFO" value="controller-0"/>
+diff --git a/pengine/test10/stonith-4.xml b/pengine/test10/stonith-4.xml
+index f9a4d44..4f185de 100644
+--- a/pengine/test10/stonith-4.xml
++++ b/pengine/test10/stonith-4.xml
+@@ -4,6 +4,7 @@
+       <cluster_property_set id="cib-bootstrap-options">
+         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.8-0.772.26fe3e5.git.fc17-26fe3e52d259e4726699300d27991fc1a80c556b"/>
+         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+     </crm_config>
+     <nodes>
+diff --git a/pengine/test10/suicide-needed-inquorate.xml b/pengine/test10/suicide-needed-inquorate.xml
+index 160af00..6add7fd 100644
+--- a/pengine/test10/suicide-needed-inquorate.xml
++++ b/pengine/test10/suicide-needed-inquorate.xml
+@@ -7,6 +7,7 @@
+         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.17-1"/>
+         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="suicide"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+     </crm_config>
+     <nodes>
+diff --git a/pengine/test10/ticket-clone-21.xml b/pengine/test10/ticket-clone-21.xml
+index c29d89f..4a9fce9 100644
+--- a/pengine/test10/ticket-clone-21.xml
++++ b/pengine/test10/ticket-clone-21.xml
+@@ -4,6 +4,7 @@
+       <cluster_property_set id="cib-bootstrap-options">
+         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+     </crm_config>
+     <nodes>
+diff --git a/pengine/test10/ticket-clone-9.xml b/pengine/test10/ticket-clone-9.xml
+index 7b2a62f..c6139d3 100644
+--- a/pengine/test10/ticket-clone-9.xml
++++ b/pengine/test10/ticket-clone-9.xml
+@@ -4,6 +4,7 @@
+       <cluster_property_set id="cib-bootstrap-options">
+         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
++        <nvpair id="options-concurrent-fencing" name="concurrent-fencing" value="false"/>
+       </cluster_property_set>
+     </crm_config>
+     <nodes>
+-- 
+1.8.3.1
+
+
+From 7c3bc762a9cede20a0193f64ca1a36f507aeeeb3 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 20 Apr 2018 13:23:10 -0500
+Subject: [PATCH 32/96] Build: libcrmcommon: configure option to specify GnuTLS
+ cipher priorities
+
+Default to current behavior, i.e. "NORMAL". Spec file overrides with "@SYSTEM"
+on distros that have it.
+
+Pacemaker does not use option value as-is; it adds "+ANON-DH" for CIB remote
+commands and "+DHE-PSK:+PSK" for Pacemaker Remote connections. In the longer
+term, we could consider moving to certificate-based connections in both cases,
+but that has backward compatibility issues as well as additional administrative
+burden.
+---
+ configure.ac        | 9 +++++++++
+ lib/common/remote.c | 4 ++--
+ pacemaker.spec.in   | 4 ++++
+ 3 files changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index ce02777..a7084e2 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -290,6 +290,12 @@ AC_ARG_WITH(cibsecrets,
+     [ SUPPORT_CIBSECRETS=no ],
+ )
+ 
++AC_ARG_WITH(gnutls-priorities,
++    [  --with-gnutls-priorities  GnuTLS cipher priorities @<:@NORMAL@:>@ ],
++    [ PCMK_GNUTLS_PRIORITIES="$withval" ],
++    [ PCMK_GNUTLS_PRIORITIES="NORMAL" ],
++)
++
+ CSPREFIX=""
+ AC_ARG_WITH(ais-prefix,
+     [  --with-ais-prefix=DIR  Prefix used when Corosync was installed [$prefix]],
+@@ -453,6 +459,9 @@ if test x"${BUG_URL}" = x""; then
+ fi
+ AC_SUBST(BUG_URL)
+ 
++AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"],
++		   [GnuTLS cipher priorities])
++
+ for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
+     sharedstatedir localstatedir libdir includedir oldincludedir infodir \
+     mandir INITDIR docdir CONFIGDIR
+diff --git a/lib/common/remote.c b/lib/common/remote.c
+index 12d25fa..1e4f8d8 100644
+--- a/lib/common/remote.c
++++ b/lib/common/remote.c
+@@ -244,9 +244,9 @@ pcmk__new_tls_session(int csock, unsigned int conn_type,
+ #  ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT
+     if (cred_type == GNUTLS_CRD_ANON) {
+         // http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication
+-        prio = "NORMAL:+ANON-DH";
++        prio = PCMK_GNUTLS_PRIORITIES ":+ANON-DH";
+     } else {
+-        prio = "NORMAL:+DHE-PSK:+PSK";
++        prio = PCMK_GNUTLS_PRIORITIES ":+DHE-PSK:+PSK";
+     }
+ #  endif
+ 
+diff --git a/pacemaker.spec.in b/pacemaker.spec.in
+index 3a26572..fd0e3c8 100644
+--- a/pacemaker.spec.in
++++ b/pacemaker.spec.in
+@@ -80,6 +80,9 @@
+   } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \
+   } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?))
+ 
++%if 0%{?fedora} > 20 || 0%{?rhel} > 7
++%global gnutls_priorities @SYSTEM
++%endif
+ 
+ # Definitions for backward compatibility with older RPM versions
+ 
+@@ -403,6 +406,7 @@ export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}"
+         --without-heartbeat                        \
+         %{!?with_doc:        --with-brand=}        \
+         %{!?with_hardening:  --disable-hardening}  \
++        %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \
+         --with-initdir=%{_initrddir}               \
+         --localstatedir=%{_var}                    \
+         --with-version=%{version}-%{release}
+-- 
+1.8.3.1
+
+
+From 99a83b172544102ec32585514e5808585f2ce31c Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 8 Jul 2019 17:39:12 -0500
+Subject: [PATCH 33/96] Feature: remote: allow run-time configurable TLS
+ priorities
+
+This also restores compilability with GnuTLS <2.1.7 (not that anyone is still
+using that ...), unintentionally broken in 5bded36 (1.1.20).
+---
+ lib/common/remote.c     | 34 +++++++++++++++++++++++++++-------
+ mcp/pacemaker.sysconfig |  9 +++++++++
+ 2 files changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/lib/common/remote.c b/lib/common/remote.c
+index 1e4f8d8..ccd0342 100644
+--- a/lib/common/remote.c
++++ b/lib/common/remote.c
+@@ -237,17 +237,25 @@ pcmk__new_tls_session(int csock, unsigned int conn_type,
+ {
+     int rc = GNUTLS_E_SUCCESS;
+ #  ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT
+-    const char *prio = NULL;
++    const char *prio_base = NULL;
++    char *prio = NULL;
+ #  endif
+     gnutls_session_t *session = NULL;
+ 
+ #  ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT
+-    if (cred_type == GNUTLS_CRD_ANON) {
+-        // http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication
+-        prio = PCMK_GNUTLS_PRIORITIES ":+ANON-DH";
+-    } else {
+-        prio = PCMK_GNUTLS_PRIORITIES ":+DHE-PSK:+PSK";
++    /* Determine list of acceptable ciphers, etc. Pacemaker always adds the
++     * values required for its functionality.
++     *
++     * For an example of anonymous authentication, see:
++     * http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication
++     */
++
++    prio_base = getenv("PCMK_tls_priorities");
++    if (prio_base == NULL) {
++        prio_base = PCMK_GNUTLS_PRIORITIES;
+     }
++    prio = crm_strdup_printf("%s:%s", prio_base,
++                             (cred_type == GNUTLS_CRD_ANON)? "+ANON-DH" : "+DHE-PSK:+PSK");
+ #  endif
+ 
+     session = gnutls_malloc(sizeof(gnutls_session_t));
+@@ -285,6 +293,9 @@ pcmk__new_tls_session(int csock, unsigned int conn_type,
+     if (rc != GNUTLS_E_SUCCESS) {
+         goto error;
+     }
++#  ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT
++    free(prio);
++#  endif
+     return session;
+ 
+ error:
+@@ -292,7 +303,16 @@ error:
+             CRM_XS " rc=%d priority='%s'",
+             (cred_type == GNUTLS_CRD_ANON)? "anonymous" : "PSK",
+             (conn_type == GNUTLS_SERVER)? "server" : "client",
+-            gnutls_strerror(rc), rc, prio);
++            gnutls_strerror(rc), rc,
++#  ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT
++            prio
++#  else
++            "default"
++#  endif
++            );
++#  ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT
++    free(prio);
++#  endif
+     if (session != NULL) {
+         gnutls_free(session);
+     }
+diff --git a/mcp/pacemaker.sysconfig b/mcp/pacemaker.sysconfig
+index a983011..0da401e 100644
+--- a/mcp/pacemaker.sysconfig
++++ b/mcp/pacemaker.sysconfig
+@@ -101,6 +101,15 @@
+ # value must be the same on all nodes. The default is "3121".
+ # PCMK_remote_port=3121
+ 
++# Use these GnuTLS cipher priorities for TLS connections. See:
++#
++#   https://gnutls.org/manual/html_node/Priority-Strings.html
++#
++# Pacemaker will append ":+ANON-DH" for remote CIB access (when enabled) and
++# ":+DHE-PSK:+PSK" for Pacemaker Remote connections, as they are required for
++# the respective functionality.
++# PCMK_tls_priorities="NORMAL"
++
+ # Set bounds on the bit length of the prime number generated for Diffie-Hellman
+ # parameters needed by TLS connections. The default is not to set any bounds.
+ #
+-- 
+1.8.3.1
+
+
+From bb7f4be166e4a8d9e851377aeb3b69a2a6b429a4 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 5 Jul 2019 15:34:30 -0500
+Subject: [PATCH 34/96] Low: controller: reset expected reply when
+ disconnecting from scheduler
+
+---
+ crmd/control.c    |  7 ++++++-
+ crmd/crmd_utils.h |  2 +-
+ crmd/pengine.c    | 12 ++++++++----
+ 3 files changed, 15 insertions(+), 6 deletions(-)
+
+diff --git a/crmd/control.c b/crmd/control.c
+index 73a2b08..4b19114 100644
+--- a/crmd/control.c
++++ b/crmd/control.c
+@@ -297,6 +297,10 @@ crmd_exit(int rc)
+ 
+     if (pe_subsystem && pe_subsystem->client && pe_subsystem->client->ipcs) {
+         crm_trace("Disconnecting Policy Engine");
++
++        // If we aren't connected to the scheduler, we can't expect a reply
++        controld_expect_sched_reply(NULL);
++
+         qb_ipcs_disconnect(pe_subsystem->client->ipcs);
+     }
+ 
+@@ -339,6 +343,7 @@ crmd_exit(int rc)
+     clear_bit(fsa_input_register, R_MEMBERSHIP);
+     g_list_free(fsa_message_queue); fsa_message_queue = NULL;
+ 
++    free(pe_subsystem); pe_subsystem = NULL;
+     free(te_subsystem); te_subsystem = NULL;
+     free(cib_subsystem); cib_subsystem = NULL;
+ 
+@@ -374,7 +379,7 @@ crmd_exit(int rc)
+     crm_timer_stop(recheck_timer);
+ 
+     te_cleanup_stonith_history_sync(NULL, TRUE);
+-    controld_sched_cleanup();
++    controld_free_sched_timer();
+ 
+     free(transition_timer); transition_timer = NULL;
+     free(integration_timer); integration_timer = NULL;
+diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h
+index a704380..955d859 100644
+--- a/crmd/crmd_utils.h
++++ b/crmd/crmd_utils.h
+@@ -84,8 +84,8 @@ int crmd_fast_exit(int rc);
+ gboolean stop_subsystem(struct crm_subsystem_s *centry, gboolean force_quit);
+ gboolean start_subsystem(struct crm_subsystem_s *centry);
+ void controld_stop_sched_timer(void);
++void controld_free_sched_timer(void);
+ void controld_expect_sched_reply(xmlNode *msg);
+-void controld_sched_cleanup(void);
+ 
+ void fsa_dump_actions(long long action, const char *text);
+ void fsa_dump_inputs(int log_level, const char *text, long long input_register);
+diff --git a/crmd/pengine.c b/crmd/pengine.c
+index 1630e7b..3512952 100644
+--- a/crmd/pengine.c
++++ b/crmd/pengine.c
+@@ -97,6 +97,9 @@ pe_ipc_destroy(gpointer user_data)
+         crm_info("Connection to the Policy Engine released");
+     }
+ 
++    // If we aren't connected to the scheduler, we can't expect a reply
++    controld_expect_sched_reply(NULL);
++
+     clear_bit(fsa_input_register, pe_subsystem->flag_connected);
+     pe_subsystem->pid = -1;
+     pe_subsystem->source = NULL;
+@@ -137,6 +140,9 @@ do_pe_control(long long action,
+     };
+ 
+     if (action & stop_actions) {
++        // If we aren't connected to the scheduler, we can't expect a reply
++        controld_expect_sched_reply(NULL);
++
+         clear_bit(fsa_input_register, pe_subsystem->flag_required);
+ 
+         mainloop_del_ipc_client(pe_subsystem->source);
+@@ -244,17 +250,15 @@ controld_expect_sched_reply(xmlNode *msg)
+ 
+ /*!
+  * \internal
+- * \brief Clean up all memory used by controller scheduler handling
++ * \brief Free the scheduler reply timer
+  */
+ void
+-controld_sched_cleanup()
++controld_free_sched_timer()
+ {
+     if (controld_sched_timer != NULL) {
+         mainloop_timer_del(controld_sched_timer);
+         controld_sched_timer = NULL;
+     }
+-    free(pe_subsystem); pe_subsystem = NULL;
+-    controld_expect_sched_reply(NULL);
+ }
+ 
+ /*	 A_PE_INVOKE	*/
+-- 
+1.8.3.1
+
+
+From e2981df8681d7721d576eacd443fa3cc08c17a02 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 11 Jul 2019 13:49:11 -0500
+Subject: [PATCH 35/96] Test: CTS: update pattern for 1.1 backport
+
+---
+ cts/patterns.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/cts/patterns.py b/cts/patterns.py
+index 87b44a9..00c26ff 100644
+--- a/cts/patterns.py
++++ b/cts/patterns.py
+@@ -376,7 +376,7 @@ class crm_cs_v0(BasePatterns):
+             "LRMD lost STONITH connection",
+             "Connection to stonith-ng.* closed",
+             "Fencing daemon connection failed",
+-            r"pacemaker-controld.*Fencer successfully connected",
++            r"crmd:.*Fencer successfully connected",
+         ]
+         self.components["stonith-ignore"] = [
+             r"pengine.*: Recover Fencing",
+-- 
+1.8.3.1
+
+
+From 7903d2a0ad53f4906248d26e13d5aaf7c4c824e6 Mon Sep 17 00:00:00 2001
+From: "Gao,Yan" <ygao@suse.com>
+Date: Mon, 8 Apr 2019 15:01:20 +0200
+Subject: [PATCH 36/96] Fix: scheduler: wait for probe actions to complete to
+ prevent unnecessary restart/re-promote of dependent resources
+
+This addresses the issue brought up from:
+https://github.com/ClusterLabs/pacemaker/commit/faf44d811e4f5598dae085c61fdef410c8d18882#commitcomment-22262090
+
+Given an ordering chain in a transition graph like:
+
+A.probe -> A.start -> [...] -> B.start
+
+, if B was already started, it would be scheduled to restart.
+
+Previously, B would be directly stopped, which could turn out to be
+unnecessary if A was probed being already started as well. Such
+unnecessary restart could be very expensive for heavy workload.
+
+With this commit, a new order will be created:
+
+A.probe -> B.stop
+
+So that any potential restart of B will wait for A.probe to complete. In
+case that A is already started, transition will abort and restart of B
+won't need to be performed any more.
+
+Similarly for an ordering chain like:
+
+A.probe -> A.start -> [...] -> B.promote
+
+A new order will be created to prevent unnecessary re-promote:
+A.probe -> B.demote
+---
+ pengine/allocate.c | 204 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 202 insertions(+), 2 deletions(-)
+
+diff --git a/pengine/allocate.c b/pengine/allocate.c
+index c7c68f8..d600bbf 100644
+--- a/pengine/allocate.c
++++ b/pengine/allocate.c
+@@ -2294,9 +2294,8 @@ order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action)
+     return FALSE;
+ }
+ 
+-
+ static void
+-order_first_probes(pe_working_set_t * data_set)
++order_first_probes_imply_stops(pe_working_set_t * data_set)
+ {
+     GListPtr gIter = NULL;
+ 
+@@ -2424,6 +2423,207 @@ order_first_probes(pe_working_set_t * data_set)
+ }
+ 
+ static void
++order_first_probe_then_restart_repromote(pe_action_t * probe,
++                                         pe_action_t * after,
++                                         pe_working_set_t * data_set)
++{
++    GListPtr gIter = NULL;
++    bool interleave = FALSE;
++    pe_resource_t *compatible_rsc = NULL;
++
++    if (probe == NULL
++        || probe->rsc == NULL
++        || probe->rsc->variant != pe_native) {
++        return;
++    }
++
++    if (after == NULL
++        // Avoid running into any possible loop
++        || is_set(after->flags, pe_action_tracking)) {
++        return;
++    }
++
++    if (safe_str_neq(probe->task, RSC_STATUS)) {
++        return;
++    }
++
++    pe_set_action_bit(after, pe_action_tracking);
++
++    crm_trace("Processing based on %s %s -> %s %s",
++              probe->uuid,
++              probe->node ? probe->node->details->uname: "",
++              after->uuid,
++              after->node ? after->node->details->uname : "");
++
++    if (after->rsc
++        /* Better not build a dependency directly with a clone/group.
++         * We are going to proceed through the ordering chain and build
++         * dependencies with its children.
++         */
++        && after->rsc->variant == pe_native
++        && probe->rsc != after->rsc) {
++
++            GListPtr then_actions = NULL;
++            enum pe_ordering probe_order_type = pe_order_optional;
++
++            if (safe_str_eq(after->task, RSC_START)) {
++                char *key = generate_op_key(after->rsc->id, RSC_STOP, 0);
++
++                then_actions = find_actions(after->rsc->actions, key, NULL);
++                free(key);
++
++            } else if (safe_str_eq(after->task, RSC_PROMOTE)) {
++                char *key = generate_op_key(after->rsc->id, RSC_DEMOTE, 0);
++
++                then_actions = find_actions(after->rsc->actions, key, NULL);
++                free(key);
++            }
++
++            for (gIter = then_actions; gIter != NULL; gIter = gIter->next) {
++                pe_action_t *then = (pe_action_t *) gIter->data;
++
++                // Skip any pseudo action which for example is implied by fencing
++                if (is_set(then->flags, pe_action_pseudo)) {
++                    continue;
++                }
++
++                order_actions(probe, then, probe_order_type);
++            }
++            g_list_free(then_actions);
++    }
++
++    if (after->rsc
++        && after->rsc->variant > pe_group) {
++        const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
++                                                       XML_RSC_ATTR_INTERLEAVE);
++
++        interleave = crm_is_true(interleave_s);
++
++        if (interleave) {
++            /* For an interleaved clone, we should build a dependency only
++             * with the relevant clone child.
++             */
++            compatible_rsc = find_compatible_child(probe->rsc,
++                                                   after->rsc,
++                                                   RSC_ROLE_UNKNOWN,
++                                                   FALSE);
++        }
++    }
++
++    for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) {
++        action_wrapper_t *after_wrapper = (action_wrapper_t *) gIter->data;
++        /* pe_order_implies_then is the reason why a required A.start
++         * implies/enforces B.start to be required too, which is the cause of
++         * B.restart/re-promote.
++         *
++         * Not sure about pe_order_implies_then_on_node though. It's now only
++         * used for unfencing case, which tends to introduce transition
++         * loops...
++         */
++
++        if (is_not_set(after_wrapper->type, pe_order_implies_then)) {
++            /* The order type between a group/clone and its child such as
++             * B.start-> B_child.start is:
++             * pe_order_implies_first_printed | pe_order_runnable_left
++             *
++             * Proceed through the ordering chain and build dependencies with
++             * its children.
++             */
++            if (after->rsc == NULL
++                || after->rsc->variant < pe_group
++                || probe->rsc->parent == after->rsc
++                || after_wrapper->action->rsc == NULL
++                || after_wrapper->action->rsc->variant > pe_group
++                || after->rsc != after_wrapper->action->rsc->parent) {
++                continue;
++            }
++
++            /* Proceed to the children of a group or a non-interleaved clone.
++             * For an interleaved clone, proceed only to the relevant child.
++             */
++            if (after->rsc->variant > pe_group
++                && interleave == TRUE
++                && (compatible_rsc == NULL
++                    || compatible_rsc != after_wrapper->action->rsc)) {
++                continue;
++            }
++        }
++
++        crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)",
++                  after->uuid,
++                  after->node ? after->node->details->uname: "",
++                  after_wrapper->action->uuid,
++                  after_wrapper->action->node ? after_wrapper->action->node->details->uname : "",
++                  after_wrapper->type);
++
++        order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
++    }
++}
++
++static void clear_actions_tracking_flag(pe_working_set_t * data_set)
++{
++    GListPtr gIter = NULL;
++
++    for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
++        pe_action_t *action = (pe_action_t *) gIter->data;
++
++        if (is_set(action->flags, pe_action_tracking)) {
++            pe_clear_action_bit(action, pe_action_tracking);
++        }
++    }
++}
++
++static void
++order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set)
++{
++    GListPtr gIter = NULL;
++    GListPtr probes = NULL;
++    char *key = NULL;
++
++    for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
++        pe_resource_t * child = (pe_resource_t *) gIter->data;
++
++        order_first_rsc_probes(child, data_set);
++    }
++
++    if (rsc->variant != pe_native) {
++        return;
++    }
++
++    key = generate_op_key(rsc->id, RSC_STATUS, 0);
++    probes = find_actions(rsc->actions, key, NULL);
++    free(key);
++
++    for (gIter = probes; gIter != NULL; gIter= gIter->next) {
++        pe_action_t *probe = (pe_action_t *) gIter->data;
++        GListPtr aIter = NULL;
++
++        for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) {
++            action_wrapper_t *after_wrapper = (action_wrapper_t *) aIter->data;
++
++            order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
++            clear_actions_tracking_flag(data_set);
++        }
++    }
++
++    g_list_free(probes);
++}
++
++static void
++order_first_probes(pe_working_set_t * data_set)
++{
++    GListPtr gIter = NULL;
++
++    for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
++        pe_resource_t *rsc = (pe_resource_t *) gIter->data;
++
++        order_first_rsc_probes(rsc, data_set);
++    }
++
++    order_first_probes_imply_stops(data_set);
++}
++
++static void
+ order_then_probes(pe_working_set_t * data_set)
+ {
+ #if 0
+-- 
+1.8.3.1
+
+
+From 3228b6b4df624db4b72d40bd366433f673a8780e Mon Sep 17 00:00:00 2001
+From: "Gao,Yan" <ygao@suse.com>
+Date: Tue, 30 Apr 2019 03:47:07 +0200
+Subject: [PATCH 37/96] Test: scheduler: wait for probe actions to complete to
+ prevent unnecessary restart/re-promote of dependent resources (update tests)
+
+---
+ .../test10/11-a-then-bm-b-move-a-clone-starting.dot |  1 +
+ .../test10/11-a-then-bm-b-move-a-clone-starting.exp |  6 +++++-
+ pengine/test10/bug-n-387749.dot                     |  7 +++++++
+ pengine/test10/bug-n-387749.exp                     | 21 +++++++++++++++++++++
+ pengine/test10/group5.dot                           |  3 +++
+ pengine/test10/group5.exp                           |  9 +++++++++
+ pengine/test10/group6.dot                           |  6 ++++++
+ pengine/test10/group6.exp                           | 18 ++++++++++++++++++
+ pengine/test10/group9.dot                           |  6 ++++++
+ pengine/test10/group9.exp                           | 18 ++++++++++++++++++
+ pengine/test10/order6.dot                           |  2 ++
+ pengine/test10/order6.exp                           | 12 ++++++++++--
+ pengine/test10/reload-becomes-restart.dot           |  1 +
+ pengine/test10/reload-becomes-restart.exp           |  3 +++
+ 14 files changed, 110 insertions(+), 3 deletions(-)
+
+diff --git a/pengine/test10/11-a-then-bm-b-move-a-clone-starting.dot b/pengine/test10/11-a-then-bm-b-move-a-clone-starting.dot
+index 4a89db6..4fd6a7d 100644
+--- a/pengine/test10/11-a-then-bm-b-move-a-clone-starting.dot
++++ b/pengine/test10/11-a-then-bm-b-move-a-clone-starting.dot
+@@ -12,6 +12,7 @@
+ "myclone-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
+ "myclone_monitor_0 f20node2" -> "myclone-clone_start_0" [ style = bold]
+ "myclone_monitor_0 f20node2" -> "myclone-clone_stopped_0" [ style = bold]
++"myclone_monitor_0 f20node2" -> "vm_stop_0 f20node1" [ style = bold]
+ "myclone_monitor_0 f20node2" [ style=bold color="green" fontcolor="black"]
+ "myclone_start_0 f20node2" -> "myclone-clone_running_0" [ style = bold]
+ "myclone_start_0 f20node2" [ style=bold color="green" fontcolor="black"]
+diff --git a/pengine/test10/11-a-then-bm-b-move-a-clone-starting.exp b/pengine/test10/11-a-then-bm-b-move-a-clone-starting.exp
+index 4eeb086..d3ce8b7 100644
+--- a/pengine/test10/11-a-then-bm-b-move-a-clone-starting.exp
++++ b/pengine/test10/11-a-then-bm-b-move-a-clone-starting.exp
+@@ -120,6 +120,10 @@
+         <attributes CRM_meta_on_node="f20node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+       </rsc_op>
+     </action_set>
+-    <inputs/>
++    <inputs>
++      <trigger>
++        <rsc_op id="1" operation="monitor" operation_key="myclone_monitor_0" internal_operation_key="myclone:0_monitor_0" on_node="f20node2" on_node_uuid="2"/>
++      </trigger>
++    </inputs>
+   </synapse>
+ </transition_graph>
+diff --git a/pengine/test10/bug-n-387749.dot b/pengine/test10/bug-n-387749.dot
+index 5095351..a820108 100644
+--- a/pengine/test10/bug-n-387749.dot
++++ b/pengine/test10/bug-n-387749.dot
+@@ -1,17 +1,23 @@
+ digraph "g" {
+ "export_home_ocfs2:0_monitor_0 power720-1" -> "export_home_ocfs2_clone_set_start_0" [ style = bold]
++"export_home_ocfs2:0_monitor_0 power720-1" -> "resource_ipaddr1_single_stop_0 power720-2" [ style = bold]
++"export_home_ocfs2:0_monitor_0 power720-1" -> "resource_nfsserver_single_stop_0 power720-2" [ style = bold]
+ "export_home_ocfs2:0_monitor_0 power720-1" [ style=bold color="green" fontcolor="black" ]
+ "export_home_ocfs2:0_post_notify_start_0 power720-1" -> "export_home_ocfs2_clone_set_confirmed-post_notify_running_0" [ style = bold]
+ "export_home_ocfs2:0_post_notify_start_0 power720-1" [ style=bold color="green" fontcolor="black" ]
+ "export_home_ocfs2:0_start_0 power720-1" -> "export_home_ocfs2_clone_set_running_0" [ style = bold]
+ "export_home_ocfs2:0_start_0 power720-1" [ style=bold color="green" fontcolor="black" ]
+ "export_home_ocfs2:1_monitor_0 power720-1" -> "export_home_ocfs2_clone_set_start_0" [ style = bold]
++"export_home_ocfs2:1_monitor_0 power720-1" -> "resource_ipaddr1_single_stop_0 power720-2" [ style = bold]
++"export_home_ocfs2:1_monitor_0 power720-1" -> "resource_nfsserver_single_stop_0 power720-2" [ style = bold]
+ "export_home_ocfs2:1_monitor_0 power720-1" [ style=bold color="green" fontcolor="black" ]
+ "export_home_ocfs2:1_post_notify_start_0 power720-2" -> "export_home_ocfs2_clone_set_confirmed-post_notify_running_0" [ style = bold]
+ "export_home_ocfs2:1_post_notify_start_0 power720-2" [ style=bold color="green" fontcolor="black" ]
+ "export_home_ocfs2:1_pre_notify_start_0 power720-2" -> "export_home_ocfs2_clone_set_confirmed-pre_notify_start_0" [ style = bold]
+ "export_home_ocfs2:1_pre_notify_start_0 power720-2" [ style=bold color="green" fontcolor="black" ]
+ "export_home_ocfs2:2_monitor_0 power720-1" -> "export_home_ocfs2_clone_set_start_0" [ style = bold]
++"export_home_ocfs2:2_monitor_0 power720-1" -> "resource_ipaddr1_single_stop_0 power720-2" [ style = bold]
++"export_home_ocfs2:2_monitor_0 power720-1" -> "resource_nfsserver_single_stop_0 power720-2" [ style = bold]
+ "export_home_ocfs2:2_monitor_0 power720-1" [ style=bold color="green" fontcolor="black" ]
+ "export_home_ocfs2_clone_set_confirmed-post_notify_running_0" -> "group_nfs_start_0" [ style = bold]
+ "export_home_ocfs2_clone_set_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ]
+@@ -43,6 +49,7 @@ digraph "g" {
+ "group_nfs_stopped_0" [ style=bold color="green" fontcolor="orange" ]
+ "resource_ipaddr1_single_monitor_0 power720-1" -> "group_nfs_stopped_0" [ style = bold]
+ "resource_ipaddr1_single_monitor_0 power720-1" -> "resource_ipaddr1_single_start_0 power720-1" [ style = bold]
++"resource_ipaddr1_single_monitor_0 power720-1" -> "resource_nfsserver_single_stop_0 power720-2" [ style = bold]
+ "resource_ipaddr1_single_monitor_0 power720-1" [ style=bold color="green" fontcolor="black" ]
+ "resource_ipaddr1_single_monitor_5000 power720-1" [ style=bold color="green" fontcolor="black" ]
+ "resource_ipaddr1_single_start_0 power720-1" -> "group_nfs_running_0" [ style = bold]
+diff --git a/pengine/test10/bug-n-387749.exp b/pengine/test10/bug-n-387749.exp
+index d6fe8e4..5b646bc 100644
+--- a/pengine/test10/bug-n-387749.exp
++++ b/pengine/test10/bug-n-387749.exp
+@@ -276,6 +276,15 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="3" operation="monitor" operation_key="export_home_ocfs2:0_monitor_0" on_node="power720-1" on_node_uuid="ac446085-4c9d-4d4a-a94d-5e63b6e421e3"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="4" operation="monitor" operation_key="export_home_ocfs2:1_monitor_0" on_node="power720-1" on_node_uuid="ac446085-4c9d-4d4a-a94d-5e63b6e421e3"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="5" operation="monitor" operation_key="export_home_ocfs2:2_monitor_0" on_node="power720-1" on_node_uuid="ac446085-4c9d-4d4a-a94d-5e63b6e421e3"/>
++      </trigger>
++      <trigger>
+         <rsc_op id="7" operation="monitor" operation_key="resource_nfsserver_single_monitor_0" on_node="power720-1" on_node_uuid="ac446085-4c9d-4d4a-a94d-5e63b6e421e3"/>
+       </trigger>
+       <trigger>
+@@ -339,6 +348,18 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="3" operation="monitor" operation_key="export_home_ocfs2:0_monitor_0" on_node="power720-1" on_node_uuid="ac446085-4c9d-4d4a-a94d-5e63b6e421e3"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="4" operation="monitor" operation_key="export_home_ocfs2:1_monitor_0" on_node="power720-1" on_node_uuid="ac446085-4c9d-4d4a-a94d-5e63b6e421e3"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="5" operation="monitor" operation_key="export_home_ocfs2:2_monitor_0" on_node="power720-1" on_node_uuid="ac446085-4c9d-4d4a-a94d-5e63b6e421e3"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="6" operation="monitor" operation_key="resource_ipaddr1_single_monitor_0" on_node="power720-1" on_node_uuid="ac446085-4c9d-4d4a-a94d-5e63b6e421e3"/>
++      </trigger>
++      <trigger>
+         <pseudo_event id="31" operation="stop" operation_key="group_nfs_stop_0"/>
+        </trigger>
+      </inputs>
+diff --git a/pengine/test10/group5.dot b/pengine/test10/group5.dot
+index 3fe0193..4776b1e 100644
+--- a/pengine/test10/group5.dot
++++ b/pengine/test10/group5.dot
+@@ -1,5 +1,7 @@
+  digraph "g" {
+ "child_rsc1_monitor_0 node2" -> "child_rsc1_start_0 node2" [ style = bold]
++"child_rsc1_monitor_0 node2" -> "child_rsc2_stop_0 node1" [ style = bold]
++"child_rsc1_monitor_0 node2" -> "child_rsc3_stop_0 node1" [ style = bold]
+ "child_rsc1_monitor_0 node2" -> "rsc2_stopped_0" [ style = bold]
+ "child_rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc1_start_0 node2" -> "child_rsc2_start_0 node2" [ style = bold]
+@@ -10,6 +12,7 @@
+ "child_rsc1_stop_0 node1" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc2_monitor_0 node2" -> "child_rsc1_stop_0 node1" [ style = bold]
+ "child_rsc2_monitor_0 node2" -> "child_rsc2_start_0 node2" [ style = bold]
++"child_rsc2_monitor_0 node2" -> "child_rsc3_stop_0 node1" [ style = bold]
+ "child_rsc2_monitor_0 node2" -> "rsc2_stopped_0" [ style = bold]
+ "child_rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc2_start_0 node2" -> "child_rsc3_start_0 node2" [ style = bold]
+diff --git a/pengine/test10/group5.exp b/pengine/test10/group5.exp
+index 4ea2b08..0f55341 100644
+--- a/pengine/test10/group5.exp
++++ b/pengine/test10/group5.exp
+@@ -196,6 +196,9 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="2" operation="monitor" operation_key="child_rsc1_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
+         <rsc_op id="4" operation="monitor" operation_key="child_rsc3_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
+       </trigger>
+       <trigger>
+@@ -246,6 +249,12 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="2" operation="monitor" operation_key="child_rsc1_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="3" operation="monitor" operation_key="child_rsc2_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
+         <pseudo_event id="16" operation="stop" operation_key="rsc2_stop_0"/>
+        </trigger>
+      </inputs>
+diff --git a/pengine/test10/group6.dot b/pengine/test10/group6.dot
+index a563e05..536f56b 100644
+--- a/pengine/test10/group6.dot
++++ b/pengine/test10/group6.dot
+@@ -1,5 +1,7 @@
+  digraph "g" {
+ "child_rsc1_monitor_0 node2" -> "child_rsc1_start_0 node2" [ style = bold]
++"child_rsc1_monitor_0 node2" -> "child_rsc2_stop_0 node1" [ style = bold]
++"child_rsc1_monitor_0 node2" -> "child_rsc3_stop_0 node1" [ style = bold]
+ "child_rsc1_monitor_0 node2" -> "rsc1_stopped_0" [ style = bold]
+ "child_rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc1_start_0 node2" -> "child_rsc2_start_0 node2" [ style = bold]
+@@ -10,6 +12,7 @@
+ "child_rsc1_stop_0 node1" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc2_monitor_0 node2" -> "child_rsc1_stop_0 node1" [ style = bold]
+ "child_rsc2_monitor_0 node2" -> "child_rsc2_start_0 node2" [ style = bold]
++"child_rsc2_monitor_0 node2" -> "child_rsc3_stop_0 node1" [ style = bold]
+ "child_rsc2_monitor_0 node2" -> "rsc1_stopped_0" [ style = bold]
+ "child_rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc2_start_0 node2" -> "child_rsc3_start_0 node2" [ style = bold]
+@@ -30,6 +33,8 @@
+ "child_rsc3_stop_0 node1" -> "rsc1_stopped_0" [ style = bold]
+ "child_rsc3_stop_0 node1" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc4_monitor_0 node2" -> "child_rsc4_start_0 node2" [ style = bold]
++"child_rsc4_monitor_0 node2" -> "child_rsc5_stop_0 node1" [ style = bold]
++"child_rsc4_monitor_0 node2" -> "child_rsc6_stop_0 node1" [ style = bold]
+ "child_rsc4_monitor_0 node2" -> "rsc2_stopped_0" [ style = bold]
+ "child_rsc4_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc4_start_0 node2" -> "child_rsc5_start_0 node2" [ style = bold]
+@@ -40,6 +45,7 @@
+ "child_rsc4_stop_0 node1" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc5_monitor_0 node2" -> "child_rsc4_stop_0 node1" [ style = bold]
+ "child_rsc5_monitor_0 node2" -> "child_rsc5_start_0 node2" [ style = bold]
++"child_rsc5_monitor_0 node2" -> "child_rsc6_stop_0 node1" [ style = bold]
+ "child_rsc5_monitor_0 node2" -> "rsc2_stopped_0" [ style = bold]
+ "child_rsc5_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "child_rsc5_start_0 node2" -> "child_rsc6_start_0 node2" [ style = bold]
+diff --git a/pengine/test10/group6.exp b/pengine/test10/group6.exp
+index cddd6f4..097d23d 100644
+--- a/pengine/test10/group6.exp
++++ b/pengine/test10/group6.exp
+@@ -152,6 +152,9 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="1" operation="monitor" operation_key="child_rsc1_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
+         <rsc_op id="3" operation="monitor" operation_key="child_rsc3_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
+       </trigger>
+       <trigger>
+@@ -202,6 +205,12 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="1" operation="monitor" operation_key="child_rsc1_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="2" operation="monitor" operation_key="child_rsc2_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
+         <pseudo_event id="15" operation="stop" operation_key="rsc1_stop_0"/>
+        </trigger>
+      </inputs>
+@@ -367,6 +376,9 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="4" operation="monitor" operation_key="child_rsc4_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
+         <rsc_op id="6" operation="monitor" operation_key="child_rsc6_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
+       </trigger>
+       <trigger>
+@@ -417,6 +429,12 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="4" operation="monitor" operation_key="child_rsc4_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="5" operation="monitor" operation_key="child_rsc5_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++      <trigger>
+         <pseudo_event id="25" operation="stop" operation_key="rsc2_stop_0"/>
+        </trigger>
+      </inputs>
+diff --git a/pengine/test10/group9.dot b/pengine/test10/group9.dot
+index 610fe93..5a93a31 100644
+--- a/pengine/test10/group9.dot
++++ b/pengine/test10/group9.dot
+@@ -26,9 +26,12 @@
+ "rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "rsc3_monitor_0 node2" -> "foo_stopped_0" [ style = bold]
++"rsc3_monitor_0 node2" -> "rsc4_stop_0 node1" [ style = bold]
++"rsc3_monitor_0 node2" -> "rsc5_stop_0 node1" [ style = bold]
+ "rsc3_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "rsc4_monitor_0 node2" -> "foo_stopped_0" [ style = bold]
+ "rsc4_monitor_0 node2" -> "rsc4_start_0 node1" [ style = bold]
++"rsc4_monitor_0 node2" -> "rsc5_stop_0 node1" [ style = bold]
+ "rsc4_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "rsc4_start_0 node1" -> "foo_running_0" [ style = bold]
+ "rsc4_start_0 node1" -> "rsc5_start_0 node1" [ style = bold]
+@@ -48,6 +51,8 @@
+ "rsc5_stop_0 node1" [ style=bold color="green" fontcolor="black" ]
+ "rsc6_monitor_0 node2" -> "bar_stopped_0" [ style = bold]
+ "rsc6_monitor_0 node2" -> "rsc6_start_0 node2" [ style = bold]
++"rsc6_monitor_0 node2" -> "rsc7_stop_0 node1" [ style = bold]
++"rsc6_monitor_0 node2" -> "rsc8_stop_0 node1" [ style = bold]
+ "rsc6_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "rsc6_start_0 node2" -> "bar_running_0" [ style = bold]
+ "rsc6_start_0 node2" -> "rsc7_start_0 node2" [ style = bold]
+@@ -58,6 +63,7 @@
+ "rsc7_monitor_0 node2" -> "bar_stopped_0" [ style = bold]
+ "rsc7_monitor_0 node2" -> "rsc6_stop_0 node1" [ style = bold]
+ "rsc7_monitor_0 node2" -> "rsc7_start_0 node2" [ style = bold]
++"rsc7_monitor_0 node2" -> "rsc8_stop_0 node1" [ style = bold]
+ "rsc7_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "rsc7_start_0 node2" -> "bar_running_0" [ style = bold]
+ "rsc7_start_0 node2" -> "rsc8_start_0 node2" [ style = bold]
+diff --git a/pengine/test10/group9.exp b/pengine/test10/group9.exp
+index f05c2c2..ac82825 100644
+--- a/pengine/test10/group9.exp
++++ b/pengine/test10/group9.exp
+@@ -128,6 +128,9 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="5" operation="monitor" operation_key="rsc3_monitor_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
+         <rsc_op id="7" operation="monitor" operation_key="rsc5_monitor_0" on_node="node2" on_node_uuid="node2"/>
+       </trigger>
+       <trigger>
+@@ -169,6 +172,12 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="5" operation="monitor" operation_key="rsc3_monitor_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="6" operation="monitor" operation_key="rsc4_monitor_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
+         <pseudo_event id="22" operation="stop" operation_key="foo_stop_0"/>
+        </trigger>
+      </inputs>
+@@ -340,6 +349,9 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="8" operation="monitor" operation_key="rsc6_monitor_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
+         <rsc_op id="10" operation="monitor" operation_key="rsc8_monitor_0" on_node="node2" on_node_uuid="node2"/>
+       </trigger>
+       <trigger>
+@@ -381,6 +393,12 @@
+      </action_set>
+      <inputs>
+        <trigger>
++        <rsc_op id="8" operation="monitor" operation_key="rsc6_monitor_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="9" operation="monitor" operation_key="rsc7_monitor_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
+         <pseudo_event id="31" operation="stop" operation_key="bar_stop_0"/>
+        </trigger>
+      </inputs>
+diff --git a/pengine/test10/order6.dot b/pengine/test10/order6.dot
+index 74f1c5b..0dfd73f 100644
+--- a/pengine/test10/order6.dot
++++ b/pengine/test10/order6.dot
+@@ -1,4 +1,5 @@
+  digraph "g" {
++"rsc1_monitor_0 node2" -> "rsc2_stop_0 node1" [ style = bold]
+ "rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "rsc2_monitor_0 node2" -> "rsc2_start_0 node2" [ style = bold]
+ "rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ]
+@@ -11,6 +12,7 @@
+ "rsc4_start_0 node2" [ style=bold color="green" fontcolor="black" ]
+ "rsc4_stop_0 node1" -> "rsc4_start_0 node2" [ style = bold]
+ "rsc4_stop_0 node1" [ style=bold color="green" fontcolor="black" ]
++"rsc5_monitor_0 node1" -> "rsc6_stop_0 node2" [ style = bold]
+ "rsc5_monitor_0 node1" [ style=bold color="green" fontcolor="black" ]
+ "rsc6_monitor_0 node1" -> "rsc6_start_0 node1" [ style = bold]
+ "rsc6_monitor_0 node1" [ style=bold color="green" fontcolor="black" ]
+diff --git a/pengine/test10/order6.exp b/pengine/test10/order6.exp
+index 47dc227..c3d74e6 100644
+--- a/pengine/test10/order6.exp
++++ b/pengine/test10/order6.exp
+@@ -31,7 +31,11 @@
+         <attributes CRM_meta_on_node="node1" CRM_meta_on_node_uuid="uuid1" CRM_meta_timeout="20000" />
+        </rsc_op>
+      </action_set>
+-    <inputs/>
++    <inputs>
++      <trigger>
++        <rsc_op id="5" operation="monitor" operation_key="rsc1_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
++      </trigger>
++    </inputs>
+    </synapse>
+    <synapse id="3">
+      <action_set>
+@@ -117,7 +121,11 @@
+         <attributes CRM_meta_on_node="node2" CRM_meta_on_node_uuid="uuid2" CRM_meta_timeout="20000" />
+        </rsc_op>
+      </action_set>
+-    <inputs/>
++    <inputs>
++      <trigger>
++        <rsc_op id="1" operation="monitor" operation_key="rsc5_monitor_0" on_node="node1" on_node_uuid="uuid1"/>
++      </trigger>
++    </inputs>
+    </synapse>
+   <synapse id="11">
+      <action_set>
+diff --git a/pengine/test10/reload-becomes-restart.dot b/pengine/test10/reload-becomes-restart.dot
+index a6616f9..36f8372 100644
+--- a/pengine/test10/reload-becomes-restart.dot
++++ b/pengine/test10/reload-becomes-restart.dot
+@@ -30,6 +30,7 @@ digraph "g" {
+ "rsc1:0_start_0 node2" -> "rsc2:1_start_0 node2" [ style = bold]
+ "rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black"]
+ "rsc1:1_monitor_0 node1" -> "cl-rsc1_start_0" [ style = bold]
++"rsc1:1_monitor_0 node1" -> "rsc2_stop_0 node1" [ style = bold]
+ "rsc1:1_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+ "rsc1:1_monitor_120000 node1" [ style=bold color="green" fontcolor="black"]
+ "rsc1:1_start_0 node1" -> "cl-rsc1_running_0" [ style = bold]
+diff --git a/pengine/test10/reload-becomes-restart.exp b/pengine/test10/reload-becomes-restart.exp
+index c3e3721..224b8d2 100644
+--- a/pengine/test10/reload-becomes-restart.exp
++++ b/pengine/test10/reload-becomes-restart.exp
+@@ -177,6 +177,9 @@
+     </action_set>
+     <inputs>
+       <trigger>
++        <rsc_op id="4" operation="monitor" operation_key="rsc1:1_monitor_0" on_node="node1" on_node_uuid="1"/>
++      </trigger>
++      <trigger>
+         <pseudo_event id="24" operation="stop" operation_key="cl-rsc2_stop_0"/>
+       </trigger>
+     </inputs>
+-- 
+1.8.3.1
+
+
+From 95791c21987c82dd5c821569a7f029e876f9bed1 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 12 Jul 2019 11:08:23 -0500
+Subject: [PATCH 38/96] Test: CTS: update patterns for stonith probe change
+
+Since 343ae2a, ComponentFail tests that result in the fencer respawning (i.e.
+when the target is the fencer or the CIB) will return additional (expected)
+errors due to stonith probes (correctly) being considered failed.
+---
+ cts/CTStests.py |  8 ++++++--
+ cts/patterns.py | 26 +++++++++++++++++++++++---
+ 2 files changed, 29 insertions(+), 5 deletions(-)
+
+diff --git a/cts/CTStests.py b/cts/CTStests.py
+index b6cb31d..e30fe86 100644
+--- a/cts/CTStests.py
++++ b/cts/CTStests.py
+@@ -1422,14 +1422,18 @@ class ComponentFail(CTSTest):
+                 self.okerrpatterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name))
+                 self.okerrpatterns.append(self.templates["Pat:ChildExit"])
+ 
+-        if chosen.name == "stonith":
+-            # Ignore actions for STONITH resources
++        # @TODO this should be a flag in the Component
++        if chosen.name in [ "corosync", "cib", "stonith" ]:
++            # Ignore actions for fence devices if fencer will respawn
++            # (their registration will be lost, and probes will fail)
+             (rc, lines) = self.rsh(node, "crm_resource -c", None)
+             for line in lines:
+                 if re.search("^Resource", line):
+                     r = AuditResource(self.CM, line)
+                     if r.rclass == "stonith":
+                         self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id)
++                        self.okerrpatterns.append(self.templates["Pat:Fencing_active"] % r.id)
++                        self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id)
+ 
+         # supply a copy so self.patterns doesn't end up empty
+         tmpPats = []
+diff --git a/cts/patterns.py b/cts/patterns.py
+index 00c26ff..0f9982e 100644
+--- a/cts/patterns.py
++++ b/cts/patterns.py
+@@ -59,9 +59,11 @@ class BasePatterns:
+             "Pat:They_dead"     : "node %s.*: is dead",
+             "Pat:TransitionComplete" : "Transition status: Complete: complete",
+ 
+-            "Pat:Fencing_start" : "(Initiating remote operation|Requesting peer fencing ).* (for|of) %s",
+-            "Pat:Fencing_ok"    : r"stonith.*:\s*Operation .* of %s by .* for .*@.*: OK",
+-            "Pat:Fencing_recover"    : r"pengine.*: Recover %s",
++            "Pat:Fencing_start"   : r"(Initiating remote operation|Requesting peer fencing ).* (for|of) %s",
++            "Pat:Fencing_ok"      : r"stonith.*:\s*Operation .* of %s by .* for .*@.*: OK",
++            "Pat:Fencing_recover" : r"pengine.*: Recover %s",
++            "Pat:Fencing_active"  : r"pengine.*: Resource %s is active on .* nodes",
++            "Pat:Fencing_probe"   : r"crmd.*: Result of probe operation for %s on .*: Error",
+ 
+             "Pat:RscOpOK"       : r"crmd.*:\s+Result of %s operation for %s.*: (0 \()?ok",
+             "Pat:RscRemoteOpOK" : r"crmd.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
+@@ -299,6 +301,12 @@ class crm_cs_v0(BasePatterns):
+             r"error:.*STONITH connection failed",
+             r"error: Connection to stonith-ng.* (failed|closed)",
+             r"crit: Fencing daemon connection failed",
++            # This is overbroad, but we don't have a way to say that only
++            # certain transition errors are acceptable (if the fencer respawns,
++            # fence devices may appear multiply active). We have to rely on
++            # other causes of a transition error logging their own error
++            # message, which is the usual practice.
++            r"pengine.* Calculated transition .*/pe-error",
+             ]
+ 
+         self.components["corosync"] = [
+@@ -316,6 +324,12 @@ class crm_cs_v0(BasePatterns):
+             "lrmd.*Connection to stonith-ng.* closed",
+             "lrmd.*LRMD lost STONITH connection",
+             "lrmd.*STONITH connection failed, finalizing .* pending operations",
++            # This is overbroad, but we don't have a way to say that only
++            # certain transition errors are acceptable (if the fencer respawns,
++            # fence devices may appear multiply active). We have to rely on
++            # other causes of a transition error logging their own error
++            # message, which is the usual practice.
++            r"pengine.* Calculated transition .*/pe-error",
+             ]
+ 
+         self.components["cib"] = [
+@@ -387,6 +401,12 @@ class crm_cs_v0(BasePatterns):
+             r"error:.*Sign-in failed: triggered a retry",
+             "STONITH connection failed, finalizing .* pending operations.",
+             r"crmd.*:\s+Result of .* operation for Fencing.*Error",
++            # This is overbroad, but we don't have a way to say that only
++            # certain transition errors are acceptable (if the fencer respawns,
++            # fence devices may appear multiply active). We have to rely on
++            # other causes of a transition error logging their own error
++            # message, which is the usual practice.
++            r"pengine.* Calculated transition .*/pe-error",
+         ]
+         self.components["stonith-ignore"].extend(self.components["common-ignore"])
+ 
+-- 
+1.8.3.1
+
+
+From 49593689fbe40f02376f8d540c4cbd554f9e8e9f Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 16 Jul 2019 14:03:52 -0500
+Subject: [PATCH 39/96] Test: CTS: update execd ComponentFail ignore patterns
+
+attrd connects to execd for sending alerts, so connection failures for it are
+expected when killing execd
+---
+ cts/patterns.py | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/cts/patterns.py b/cts/patterns.py
+index 0f9982e..1e9f0e5 100644
+--- a/cts/patterns.py
++++ b/cts/patterns.py
+@@ -358,7 +358,9 @@ class crm_cs_v0(BasePatterns):
+                     r"crmd.*: Input I_TERMINATE .*from do_recover",
+                     "crmd.*Could not recover from internal error",
+                     ]
+-        self.components["lrmd-ignore"] = []
++        self.components["lrmd-ignore"] = [
++            r"attrd.*Connection to lrmd (failed|closed)",
++        ]
+ 
+         self.components["crmd"] = [
+ #                    "WARN: determine_online_status: Node .* is unclean",
+-- 
+1.8.3.1
+
+
+From e9721446830e98f32be3082138c0a27ccbf6a452 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 23 Jul 2019 10:33:12 -0500
+Subject: [PATCH 40/96] Test: CTS: correct fencer connection pattern
+
+e2981df had a typo
+---
+ cts/patterns.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/cts/patterns.py b/cts/patterns.py
+index 1e9f0e5..cf1860a 100644
+--- a/cts/patterns.py
++++ b/cts/patterns.py
+@@ -392,7 +392,7 @@ class crm_cs_v0(BasePatterns):
+             "LRMD lost STONITH connection",
+             "Connection to stonith-ng.* closed",
+             "Fencing daemon connection failed",
+-            r"crmd:.*Fencer successfully connected",
++            r"crmd.*: Fencer successfully connected",
+         ]
+         self.components["stonith-ignore"] = [
+             r"pengine.*: Recover Fencing",
+-- 
+1.8.3.1
+
+
+From 9fe68fdb8c1355e3436934eb5812af696de39dad Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 19 Jul 2019 18:49:21 -0500
+Subject: [PATCH 41/96] Fix: controller: panic local host if notified of own
+ fencing
+
+Previously, we attempted to reboot, but that would always fail because the
+controller doesn't run as root, so it would fall back to exiting CRM_EX_FATAL.
+
+Now, we exit CRM_EX_PANIC, to tell pacemakerd to panic the local host, which
+is a better method of self-fencing.
+
+clbz#5386
+---
+ crmd/te_utils.c | 41 +++++++++++------------------------------
+ 1 file changed, 11 insertions(+), 30 deletions(-)
+
+diff --git a/crmd/te_utils.c b/crmd/te_utils.c
+index 14570cd..6c7f9a0 100644
+--- a/crmd/te_utils.c
++++ b/crmd/te_utils.c
+@@ -235,11 +235,6 @@ tengine_stonith_connection_destroy(stonith_t * st, stonith_event_t * e)
+ 
+ char *te_client_id = NULL;
+ 
+-#ifdef HAVE_SYS_REBOOT_H
+-#  include <unistd.h>
+-#  include <sys/reboot.h>
+-#endif
+-
+ static void
+ tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event);
+ 
+@@ -271,33 +266,19 @@ tengine_stonith_notify(stonith_t * st, stonith_event_t * st_event)
+         return;
+ 
+     } else if (st_event->result == pcmk_ok && crm_str_eq(st_event->target, fsa_our_uname, TRUE)) {
+-        crm_crit("We were allegedly just fenced by %s for %s!",
+-                 st_event->executioner ? st_event->executioner : "<anyone>", st_event->origin); /* Dumps blackbox if enabled */
+-
+-        qb_log_fini(); /* Try to get the above log message to disk - somehow */
+-
+-        /* Get out ASAP and do not come back up.
++        /* We were notified of our own fencing. Most likely, either fencing was
++         * misconfigured, or fabric fencing that doesn't cut cluster
++         * communication is in use.
+          *
+-         * Triggering a reboot is also not the worst idea either since
+-         * the rest of the cluster thinks we're safely down
++         * Either way, shutting down the local host is a good idea, to require
++         * administrator intervention. Also, other nodes would otherwise likely
++         * set our status to lost because of the fencing callback and discard
++         * our subsequent election votes as "not part of our cluster".
+          */
+-
+-#ifdef RB_HALT_SYSTEM
+-        reboot(RB_HALT_SYSTEM);
+-#endif
+-
+-        /*
+-         * If reboot() fails or is not supported, coming back up will
+-         * probably lead to a situation where the other nodes set our
+-         * status to 'lost' because of the fencing callback and will
+-         * discard subsequent election votes with:
+-         *
+-         * Election 87 (current: 5171, owner: 103): Processed vote from east-03 (Peer is not part of our cluster)
+-         *
+-         * So just stay dead, something is seriously messed up anyway.
+-         *
+-         */
+-        exit(100); /* None of our wrappers since we already called qb_log_fini() */
++        crm_crit("We were allegedly just fenced by %s for %s!",
++                 st_event->executioner? st_event->executioner : "the cluster",
++                 st_event->origin); /* Dumps blackbox if enabled */
++        pcmk_panic(__FUNCTION__);
+         return;
+     }
+ 
+-- 
+1.8.3.1
+
+
+From b7214e4b4229f4678e09a81f6afac15ef1690406 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 23 Jul 2019 13:49:33 -0500
+Subject: [PATCH 42/96] Low: executor: stonith probes should fail only if
+ previously registered
+
+343ae2a4 made stonith probes return an error if the executor's fencer
+connection was lost. However this is broader than necessary; we only need
+errors to be returned for devices that were registered. Any that weren't
+registered can still be assumed to be not registered.
+
+There's a theoretical possibility that the fencer connection could somehow be
+severed and some other entity register a device between then and when the
+executor reconnects. But that is not a realistic scenario, whereas probing a
+fence device on a node where the fencer respawned sometime in the past is.
+---
+ lrmd/lrmd.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/lrmd/lrmd.c b/lrmd/lrmd.c
+index 5d33324..2e8ea41 100644
+--- a/lrmd/lrmd.c
++++ b/lrmd/lrmd.c
+@@ -1085,13 +1085,17 @@ stonith_connection_failed(void)
+     g_hash_table_iter_init(&iter, rsc_list);
+     while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) {
+         if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) {
+-            /* This will cause future probes to return PCMK_OCF_UNKNOWN_ERROR
+-             * until the resource is stopped or started successfully. This is
+-             * especially important if the controller also went away (possibly
+-             * due to a cluster layer restart) and won't receive our client
+-             * notification of any monitors finalized below.
++            /* If we registered this fence device, we don't know whether the
++             * fencer still has the registration or not. Cause future probes to
++             * return PCMK_OCF_UNKNOWN_ERROR until the resource is stopped or
++             * started successfully. This is especially important if the
++             * controller also went away (possibly due to a cluster layer
++             * restart) and won't receive our client notification of any
++             * monitors finalized below.
+              */
+-            rsc->st_probe_rc = pcmk_err_generic;
++            if (rsc->st_probe_rc == pcmk_ok) {
++                rsc->st_probe_rc = pcmk_err_generic;
++            }
+ 
+             if (rsc->active) {
+                 cmd_list = g_list_append(cmd_list, rsc->active);
+-- 
+1.8.3.1
+
+
+From f392caf5dc9b26a0ba55474eb79e68c90baaca16 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 23 Jul 2019 14:25:32 -0500
+Subject: [PATCH 43/96] Feature: crmd: allow configurable reaction to local
+ node fencing
+
+9fe68fd fixed a bug so that when the local node is notified of its own fencing,
+it correctly panics.
+
+However, some users may have been relying on the previous behavior. In
+particular, some users may configure fabric fencing because they don't
+want the node ever intentionally hard-powered off.
+
+This creates a new cluster property, fence-reaction, that controls the
+behavior ("stop" for the original behavior, "panic" for the more correct
+behavior). It defaults to "stop", to preserve the current behavior.
+---
+ crmd/control.c        | 10 ++++++++++
+ crmd/te_utils.c       | 22 +++++++++++++++++++++-
+ crmd/tengine.h        |  3 +++
+ include/crm/msg_xml.h |  1 +
+ 4 files changed, 35 insertions(+), 1 deletion(-)
+
+diff --git a/crmd/control.c b/crmd/control.c
+index 4b19114..32340ec 100644
+--- a/crmd/control.c
++++ b/crmd/control.c
+@@ -932,6 +932,14 @@ pe_cluster_option crmd_opts[] = {
+         },
+ 	{ "node-action-limit", NULL, "integer", NULL, "0", &check_number,
+           "The maximum number of jobs that can be scheduled per node. Defaults to 2x cores"},
++    { XML_CONFIG_ATTR_FENCE_REACTION, NULL, "string", NULL, "stop", NULL,
++        "How a cluster node should react if notified of its own fencing",
++        "A cluster node may receive notification of its own fencing if fencing "
++        "is misconfigured, or if fabric fencing is in use that doesn't cut "
++        "cluster communication. Allowed values are \"stop\" to attempt to "
++        "immediately stop pacemaker and stay stopped, or \"panic\" to attempt "
++        "to immediately reboot the local node, falling back to stop on failure."
++    },
+ 	{ XML_CONFIG_ATTR_ELECTION_FAIL, "election_timeout", "time", NULL, "2min", &check_timer,
+           "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug."
+         },
+@@ -1053,6 +1061,8 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void
+         no_quorum_suicide_escalation = TRUE;
+     }
+ 
++    set_fence_reaction(crmd_pref(config_hash, XML_CONFIG_ATTR_FENCE_REACTION));
++
+     value = crmd_pref(config_hash,"stonith-max-attempts");
+     update_stonith_max_attempts(value);
+ 
+diff --git a/crmd/te_utils.c b/crmd/te_utils.c
+index 6c7f9a0..6052bc0 100644
+--- a/crmd/te_utils.c
++++ b/crmd/te_utils.c
+@@ -35,6 +35,7 @@ crm_trigger_t *stonith_reconnect = NULL;
+ static crm_trigger_t *stonith_history_sync_trigger = NULL;
+ static mainloop_timer_t *stonith_history_sync_timer_short = NULL;
+ static mainloop_timer_t *stonith_history_sync_timer_long = NULL;
++static bool fence_reaction_panic = FALSE;
+ 
+ void
+ te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers)
+@@ -54,6 +55,21 @@ te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers)
+     }
+ }
+ 
++void
++set_fence_reaction(const char *reaction_s)
++{
++    if (safe_str_eq(reaction_s, "panic")) {
++        fence_reaction_panic = TRUE;
++
++    } else {
++        if (safe_str_neq(reaction_s, "stop")) {
++            crm_warn("Invalid value '%s' for %s, using 'stop'",
++                     reaction_s, XML_CONFIG_ATTR_FENCE_REACTION);
++        }
++        fence_reaction_panic = FALSE;
++    }
++}
++
+ static void
+ tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event)
+ {
+@@ -278,7 +294,11 @@ tengine_stonith_notify(stonith_t * st, stonith_event_t * st_event)
+         crm_crit("We were allegedly just fenced by %s for %s!",
+                  st_event->executioner? st_event->executioner : "the cluster",
+                  st_event->origin); /* Dumps blackbox if enabled */
+-        pcmk_panic(__FUNCTION__);
++        if (fence_reaction_panic) {
++            pcmk_panic(__FUNCTION__);
++        } else {
++            crm_exit(DAEMON_RESPAWN_STOP);
++        }
+         return;
+     }
+ 
+diff --git a/crmd/tengine.h b/crmd/tengine.h
+index a20760c..f5491a2 100644
+--- a/crmd/tengine.h
++++ b/crmd/tengine.h
+@@ -32,6 +32,9 @@ void remove_stonith_cleanup(const char *target);
+ void purge_stonith_cleanup(void);
+ void execute_stonith_cleanup(void);
+ 
++// reaction to notification of local node being fenced
++void set_fence_reaction(const char *reaction_s);
++
+ /* tengine */
+ extern crm_action_t *match_down_event(const char *target, bool quiet);
+ extern crm_action_t *get_cancel_action(const char *id, const char *node);
+diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h
+index 55f42c4..de99959 100644
+--- a/include/crm/msg_xml.h
++++ b/include/crm/msg_xml.h
+@@ -377,6 +377,7 @@
+ #  define XML_CONFIG_ATTR_ELECTION_FAIL	"election-timeout"
+ #  define XML_CONFIG_ATTR_FORCE_QUIT	"shutdown-escalation"
+ #  define XML_CONFIG_ATTR_RECHECK	"cluster-recheck-interval"
++#  define XML_CONFIG_ATTR_FENCE_REACTION	"fence-reaction"
+ 
+ #  define XML_ALERT_ATTR_PATH		"path"
+ #  define XML_ALERT_ATTR_TIMEOUT	"timeout"
+-- 
+1.8.3.1
+
+
+From 4fe2b0d9fac09a3228a558aaf5c1adadc7187217 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 25 Jul 2019 10:49:44 -0500
+Subject: [PATCH 44/96] Log: pacemakerd: backport pcmk_child_exit() log message
+ changes
+
+... from 2.0's f7e5558, along with corresponding CTS changes, to make patching
+easier.
+---
+ cts/CM_lha.py   | 27 +++++++++++++--------------
+ cts/patterns.py | 25 +++++++++++--------------
+ mcp/pacemaker.c | 22 +++++++++++++---------
+ 3 files changed, 37 insertions(+), 37 deletions(-)
+
+diff --git a/cts/CM_lha.py b/cts/CM_lha.py
+index 0ba4ba1..2f39063 100755
+--- a/cts/CM_lha.py
++++ b/cts/CM_lha.py
+@@ -377,9 +377,9 @@ class crm_lha(ClusterManager):
+                     "Exiting to recover from CCM connection failure",
+                     r"crmd.*: Could not recover from internal error",
+                     "crmd.*I_ERROR.*(ccm_dispatch|crmd_cib_connection_destroy)",
+-                    "crmd.*exited with return code 2.",
+-                    "attrd.*exited with return code 1.",
+-                    "cib.*exited with return code 2.",
++                    "crmd.*exited with status 2",
++                    "attrd.*exited with status 1",
++                    "cib.*exited with status 2",
+ 
+ # Not if it was fenced
+ #                    "A new node joined the cluster",
+@@ -400,8 +400,8 @@ class crm_lha(ClusterManager):
+                     r"crmd.*: Input I_TERMINATE .*from do_recover",
+                     "crmd.*I_ERROR.*crmd_cib_connection_destroy",
+                     r"crmd.*: Could not recover from internal error",
+-                    "crmd.*exited with return code 2.",
+-                    "attrd.*exited with return code 1.",
++                    "crmd.*exited with status 2",
++                    "attrd.*exited with status 1",
+                     ], badnews_ignore = common_ignore)
+ 
+         lrmd = Process(self, "lrmd", triggersreboot=self.fastfail, pats = [
+@@ -411,7 +411,7 @@ class crm_lha(ClusterManager):
+                     "State transition S_STARTING -> S_PENDING",
+                     r"crmd.*: Input I_TERMINATE .*from do_recover",
+                     r"crmd.*: Could not recover from internal error",
+-                    "crmd.*exited with return code 2.",
++                    "crmd.*exited with status 2",
+                     ], badnews_ignore = common_ignore)
+ 
+         crmd = Process(self, "crmd", triggersreboot=self.fastfail, pats = [
+@@ -425,12 +425,11 @@ class crm_lha(ClusterManager):
+ 
+         pengine = Process(self, "pengine", triggersreboot=self.fastfail, pats = [
+                     "State transition .* S_RECOVERY",
+-                    "crmd.*exited with return code 2.",
+                     r"crmd.*: Input I_TERMINATE .*from do_recover",
+                     r"crmd.*: Could not recover from internal error",
+                     r"crmd.*CRIT.*: Connection to the Policy Engine failed",
+                     "crmd.*I_ERROR.*save_cib_contents",
+-                    "crmd.*exited with return code 2.",
++                    "crmd.*exited with status 2",
+                     ], badnews_ignore = common_ignore, dc_only=1)
+ 
+         if self.Env["DoFencing"] == 1 :
+@@ -441,21 +440,21 @@ class crm_lha(ClusterManager):
+ 
+         if self.fastfail == 0:
+             ccm.pats.extend([
+-                "attrd .* exited with return code 1",
++                "attrd .* exited with status 1",
+                 "(ERROR|error): Respawning client .*attrd",
+-                "cib.* exited with return code 2",
++                "cib.* exited with status 2",
+                 "(ERROR|error): Respawning client .*cib",
+-                "crmd.* exited with return code 2",
++                "crmd.* exited with status 2",
+                 "(ERROR|error): Respawning client .*crmd" 
+                 ])
+             cib.pats.extend([
+-                "attrd.* exited with return code 1",
++                "attrd.* exited with status 1",
+                 "(ERROR|error): Respawning client .*attrd",
+-                "crmd.* exited with return code 2",
++                "crmd.* exited with status 2",
+                 "(ERROR|error): Respawning client .*crmd" 
+                 ])
+             lrmd.pats.extend([
+-                "crmd.* exited with return code 2",
++                "crmd.* exited with status 2",
+                 "(ERROR|error): Respawning client .*crmd" 
+                 ])
+             pengine.pats.extend([
+diff --git a/cts/patterns.py b/cts/patterns.py
+index cf1860a..c7f0035 100644
+--- a/cts/patterns.py
++++ b/cts/patterns.py
+@@ -130,7 +130,7 @@ class crm_lha(BasePatterns):
+                 r"input=I_INTEGRATED cause=C_TIMER_POPPED",
+                 r"input=I_FINALIZED cause=C_TIMER_POPPED",
+                 r"input=I_ERROR",
+-                r", exiting\.",
++                r"(pacemakerd|lrmd|crmd):.*, exiting",
+                 r"WARN.*Ignoring HA message.*vote.*not in our membership list",
+                 r"pengine.*Attempting recovery of resource",
+                 r"is taking more than 2x its timeout",
+@@ -210,7 +210,7 @@ class crm_cs_v0(BasePatterns):
+             r"input=I_INTEGRATED cause=C_TIMER_POPPED",
+             r"input=I_FINALIZED cause=C_TIMER_POPPED",
+             r"input=I_ERROR",
+-            r", exiting\.",
++            r"(pacemakerd|lrmd|crmd):.*, exiting",
+             r"(WARN|warn).*Ignoring HA message.*vote.*not in our membership list",
+             r"pengine.*Attempting recovery of resource",
+             r"is taking more than 2x its timeout",
+@@ -224,7 +224,7 @@ class crm_cs_v0(BasePatterns):
+             r"Faking parameter digest creation",
+             r"Parameters to .* action changed:",
+             r"Parameters to .* changed",
+-            r"The .* process .* terminated with signal",
++            r"\[[0-9]+\] terminated with signal [0-9]+ \(",
+             r"Child process .* terminated with signal",
+             r"pengine:.*Recover .*\(.* -\> .*\)",
+             r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
+@@ -281,13 +281,10 @@ class crm_cs_v0(BasePatterns):
+         
+         self.components["corosync-ignore"] = [
+             r"error:.*Connection to the CPG API failed: Library error",
+-            r"The .* process .* exited",
++            r"\[[0-9]+\] exited with status [0-9]+ \(",
+             r"pacemakerd.*error:.*Child process .* exited",
+             r"cib.*error:.*Corosync connection lost",
+             r"stonith-ng.*error:.*Corosync connection terminated",
+-            r"The cib process .* exited: Invalid argument",
+-            r"The attrd process .* exited: Transport endpoint is not connected",
+-            r"The crmd process .* exited: Link has been severed",
+             r"error:.*Child process cib .* exited: Invalid argument",
+             r"error:.*Child process attrd .* exited: Transport endpoint is not connected",
+             r"error:.*Child process crmd .* exited: Link has been severed",
+@@ -340,8 +337,8 @@ class crm_cs_v0(BasePatterns):
+                     "Connection to cib_.* closed",
+                     r"crmd.*:.*Connection to the CIB terminated...",
+                     r"attrd.*:.*(Lost connection to CIB service|Connection to the CIB terminated)",
+-                    "(Child process|The) crmd .* exited: Generic Pacemaker error",
+-                    "(Child process|The) attrd .* exited: (Connection reset by peer|Transport endpoint is not connected)",
++                    r"crmd\[[0-9]+\] exited with status 2",
++                    r"attrd\[[0-9]+\] exited with status 1",
+                     r"crmd.*: Input I_TERMINATE .*from do_recover",
+                     "crmd.*I_ERROR.*crmd_cib_connection_destroy",
+                     "crmd.*Could not recover from internal error",
+@@ -354,7 +351,7 @@ class crm_cs_v0(BasePatterns):
+                     "Connection to lrmd failed",
+                     "Connection to lrmd.* closed",
+                     "crmd.*I_ERROR.*lrm_connection_destroy",
+-                    "(Child process|The) crmd .* exited: Generic Pacemaker error",
++                    r"crmd\[[0-9]+\] exited with status 2",
+                     r"crmd.*: Input I_TERMINATE .*from do_recover",
+                     "crmd.*Could not recover from internal error",
+                     ]
+@@ -377,7 +374,7 @@ class crm_cs_v0(BasePatterns):
+         self.components["pengine"] = [
+                     "State transition .* S_RECOVERY",
+                     "Respawning .* crmd",
+-                    "(The|Child process) crmd .* exited: Generic Pacemaker error",
++                    r"crmd\[[0-9]+\] exited with status 2",
+                     "Connection to pengine failed",
+                     "Connection to pengine.* closed",
+                     "Connection to the Policy Engine failed",
+@@ -437,8 +434,8 @@ class crm_mcp(crm_cs_v0):
+             "Pat:They_stopped" : "%s\W.*crmd.*Node %s(\[|\s).*state is now lost",
+             "Pat:They_dead"    : "crmd.*Node %s(\[|\s).*state is now lost",
+ 
+-            "Pat:ChildExit"    : "The .* process exited",
+-            "Pat:ChildKilled"  : "%s\W.*pacemakerd.*The %s process .* terminated with signal 9",
++            "Pat:ChildExit"    : r"\[[0-9]+\] exited with status [0-9]+ \(",
++            "Pat:ChildKilled"  : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated with signal 9",
+             "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
+ 
+             "Pat:PacemakerUp"  : "%s\W.*pacemakerd.*Starting Pacemaker",
+@@ -487,7 +484,7 @@ class crm_cman(crm_cs_v0):
+             "Pat:They_stopped" : "%s\W.*crmd.*Node %s(\[|\s).*state is now lost",
+             "Pat:They_dead"    : "crmd.*Node %s(\[|\s).*state is now lost",
+ 
+-            "Pat:ChildKilled"  : "%s\W.*pacemakerd.*The %s process .* terminated with signal 9",
++            "Pat:ChildKilled"  : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated with signal 9",
+             "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
+ 
+             "Pat:PacemakerUp"  : "%s\W.*pacemakerd.*Starting Pacemaker",
+diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c
+index 86df216..6a67b59 100644
+--- a/mcp/pacemaker.c
++++ b/mcp/pacemaker.c
+@@ -217,27 +217,30 @@ pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitco
+     pcmk_child_t *child = mainloop_child_userdata(p);
+     const char *name = mainloop_child_name(p);
+ 
+-    if (signo && signo == SIGKILL) {
+-        crm_warn("The %s process (%d) terminated with signal %d (core=%d)", name, pid, signo, core);
+-
+-    } else if (signo) {
+-        crm_err("The %s process (%d) terminated with signal %d (core=%d)", name, pid, signo, core);
++    if (signo) {
++        do_crm_log(((signo == SIGKILL)? LOG_WARNING : LOG_ERR),
++                   "%s[%d] terminated with signal %d (core=%d)",
++                   name, pid, signo, core);
+ 
+     } else {
+         switch(exitcode) {
+             case pcmk_ok:
+-                crm_info("The %s process (%d) exited: %s (%d)", name, pid, pcmk_strerror(exitcode), exitcode);
++                crm_info("%s[%d] exited with status %d (%s)",
++                         name, pid, exitcode, pcmk_strerror(exitcode));
+                 break;
+ 
+             case DAEMON_RESPAWN_STOP:
+-                crm_warn("The %s process (%d) can no longer be respawned, shutting the cluster down.", name, pid);
++                crm_warn("Shutting cluster down because %s[%d] had fatal failure",
++                         name, pid);
+                 child->respawn = FALSE;
+                 fatal_error = TRUE;
+                 pcmk_shutdown(SIGTERM);
+                 break;
+ 
+             case pcmk_err_panic:
+-                do_crm_log_always(LOG_EMERG, "The %s process (%d) instructed the machine to reset", name, pid);
++                do_crm_log_always(LOG_EMERG,
++                                  "%s[%d] instructed the machine to reset",
++                                  name, pid);
+                 child->respawn = FALSE;
+                 fatal_error = TRUE;
+                 pcmk_panic(__FUNCTION__);
+@@ -245,7 +248,8 @@ pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitco
+                 break;
+ 
+             default:
+-                crm_err("The %s process (%d) exited: %s (%d)", name, pid, pcmk_strerror(exitcode), exitcode);
++                crm_err("%s[%d] exited with status %d (%s)",
++                        name, pid, exitcode, pcmk_strerror(exitcode));
+                 break;
+         }
+     }
+-- 
+1.8.3.1
+
+
+From 2cc1d945e53ea8a8bae38b1d8fe36b78c731bef0 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 25 Jul 2019 10:00:14 -0500
+Subject: [PATCH 45/96] Log: pacemakerd: make daemon exit detection messages
+ more consistent
+
+Lost daemons may be detected via pcmk_child_exit() if the current pacemakerd
+launched the daemon, or check_active_before_startup_processes() if a previous
+pacemakerd launched it.
+
+Update the log messages in these cases to be more consistent, and update CTS
+to detect exit via either path.
+---
+ cts/patterns.py | 10 ++++++----
+ mcp/pacemaker.c | 15 +++++----------
+ 2 files changed, 11 insertions(+), 14 deletions(-)
+
+diff --git a/cts/patterns.py b/cts/patterns.py
+index c7f0035..e50daae 100644
+--- a/cts/patterns.py
++++ b/cts/patterns.py
+@@ -224,9 +224,9 @@ class crm_cs_v0(BasePatterns):
+             r"Faking parameter digest creation",
+             r"Parameters to .* action changed:",
+             r"Parameters to .* changed",
+-            r"\[[0-9]+\] terminated with signal [0-9]+ \(",
++            r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)",
+             r"Child process .* terminated with signal",
+-            r"pengine:.*Recover .*\(.* -\> .*\)",
++            r"pengine.*Recover .*\(.* -\> .*\)",
+             r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
+             r"Peer is not part of our cluster",
+             r"We appear to be in an election loop",
+@@ -435,7 +435,8 @@ class crm_mcp(crm_cs_v0):
+             "Pat:They_dead"    : "crmd.*Node %s(\[|\s).*state is now lost",
+ 
+             "Pat:ChildExit"    : r"\[[0-9]+\] exited with status [0-9]+ \(",
+-            "Pat:ChildKilled"  : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated with signal 9",
++            # "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
++            "Pat:ChildKilled"  : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
+             "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
+ 
+             "Pat:PacemakerUp"  : "%s\W.*pacemakerd.*Starting Pacemaker",
+@@ -484,7 +485,8 @@ class crm_cman(crm_cs_v0):
+             "Pat:They_stopped" : "%s\W.*crmd.*Node %s(\[|\s).*state is now lost",
+             "Pat:They_dead"    : "crmd.*Node %s(\[|\s).*state is now lost",
+ 
+-            "Pat:ChildKilled"  : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated with signal 9",
++            # "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
++            "Pat:ChildKilled"  : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
+             "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
+ 
+             "Pat:PacemakerUp"  : "%s\W.*pacemakerd.*Starting Pacemaker",
+diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c
+index 6a67b59..1cc72af 100644
+--- a/mcp/pacemaker.c
++++ b/mcp/pacemaker.c
+@@ -897,19 +897,14 @@ check_active_before_startup_processes(gpointer user_data)
+                     case 0:
+                     case 2:  /* this very case: it was OK once already */
+                         if (pcmk_children[lpc].respawn == TRUE) {
+-                            /* presumably after crash, hence critical */
+-                            crm_crit("Process %s terminated (pid=%lld)%s", \
+-                                     name, (long long)
+-                                     PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid),
+-                                     ret ? ", at least per IPC end-point that went AWOL"
+-                                         : "");
++                            crm_err("%s[%d] terminated%s", name,
++                                    PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid),
++                                    ret ? " as IPC server" : "");
+                         } else {
+                             /* orderly shutdown */
+-                            crm_notice("Process %s terminated (pid=%lld)%s", \
+-                                       name, (long long)
++                            crm_notice("%s[%d] terminated%s", name,
+                                        PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid),
+-                                       ret ? ", at least per IPC end-point that went AWOL"
+-                                           : "");
++                                       ret ? " as IPC server" : "");
+                         }
+                         pcmk_process_exit(&(pcmk_children[lpc]));
+                         continue;
+-- 
+1.8.3.1
+
+
+From ab09f0afd0d383da60411c6a719830f415e6102a Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 25 Jul 2019 11:20:04 -0500
+Subject: [PATCH 46/96] Test: CTS: alert failures are expected when executor is
+ killed
+
+---
+ cts/patterns.py | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/cts/patterns.py b/cts/patterns.py
+index e50daae..87418d4 100644
+--- a/cts/patterns.py
++++ b/cts/patterns.py
+@@ -357,6 +357,7 @@ class crm_cs_v0(BasePatterns):
+                     ]
+         self.components["lrmd-ignore"] = [
+             r"attrd.*Connection to lrmd (failed|closed)",
++            r"(attrd|controld).*Could not execute alert",
+         ]
+ 
+         self.components["crmd"] = [
+-- 
+1.8.3.1
+
+
+From 0654ed94b44930cd2762cad361ee19b229b6bd38 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 24 Jul 2019 12:06:21 -0500
+Subject: [PATCH 47/96] Doc: controller: document the cluster-name cluster
+ property
+
+---
+ crmd/control.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/crmd/control.c b/crmd/control.c
+index 32340ec..cd4223f 100644
+--- a/crmd/control.c
++++ b/crmd/control.c
+@@ -897,6 +897,13 @@ pe_cluster_option crmd_opts[] = {
+ 	{ "cluster-infrastructure", NULL, "string", NULL, "heartbeat", NULL,
+           "The messaging stack on which Pacemaker is currently running.",
+           "Used for informational and diagnostic purposes." },
++    { "cluster-name", NULL, "string", NULL, NULL, NULL,
++        "An arbitrary name for the cluster",
++        "This optional value is mostly for users' convenience as desired "
++        "in administration, but may also be used in Pacemaker configuration "
++        "rules via the #cluster-name node attribute, and by higher-level tools "
++        "and resource agents."
++    },
+ 	{ XML_CONFIG_ATTR_DC_DEADTIME, "dc_deadtime", "time", NULL, "20s", &check_time,
+           "How long to wait for a response from other nodes during startup.",
+           "The \"correct\" value will depend on the speed/load of your network and the type of switches used."
+-- 
+1.8.3.1
+
+
+From 065fabee559a62a400709957f514f33150a91442 Mon Sep 17 00:00:00 2001
+From: aleksei-burlakov <aleksei.burlakov@suse.com>
+Date: Fri, 17 May 2019 18:13:01 +0200
+Subject: [PATCH 48/96] Low: stonith_admin --help: specify the usage of
+ --cleanup
+
+---
+ fencing/admin.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fencing/admin.c b/fencing/admin.c
+index 8aef093..7da43e9 100644
+--- a/fencing/admin.c
++++ b/fencing/admin.c
+@@ -58,7 +58,7 @@ static struct crm_option long_options[] = {
+         "\tBe less descriptive in output."
+     },
+     {   "cleanup", no_argument, NULL, 'c',
+-        "\tCleanup wherever appropriate."
++        "\tCleanup wherever appropriate. Requires: --history."
+     },
+     {   "broadcast", no_argument, NULL, 'b',
+         "Broadcast wherever appropriate."
+-- 
+1.8.3.1
+
+
+From a87da1dc8512e210e892332326684aad60ec7ee0 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 15 Aug 2019 13:48:28 -0500
+Subject: [PATCH 49/96] Test: cts: check correct variable in bandwidth test
+
+found by static analysis (backport of 3b9dc32e from master branch)
+---
+ cts/CTStests.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/cts/CTStests.py b/cts/CTStests.py
+index e30fe86..f328170 100644
+--- a/cts/CTStests.py
++++ b/cts/CTStests.py
+@@ -1051,7 +1051,7 @@ class BandwidthTest(CTSTest):
+                 linessplit = string.split(line," ")
+                 for j in range(len(linessplit)-1):
+                     if linessplit[j] == "udp": break
+-                    if linesplit[j] == "length:": break
++                    if linessplit[j] == "length:": break
+                 try:
+                     sum = int(linessplit[j+1]) + sum
+                 except ValueError:
+-- 
+1.8.3.1
+
+
+From 719c34f4b2da87970058326cb14d8488f10f90f5 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 17 Jun 2019 20:30:19 -0500
+Subject: [PATCH 50/96] Refactor: libcrmcommon: functionize freeing an XML
+ subtree
+
+... to reduce code duplication, and draw a distinction between this and freeing
+the entire XML document the element is in (which free_xml() does).
+---
+ include/crm/common/xml.h |  1 +
+ lib/common/acl.c         |  7 ++++---
+ lib/common/xml.c         | 22 ++++++++++++++--------
+ 3 files changed, 19 insertions(+), 11 deletions(-)
+
+diff --git a/include/crm/common/xml.h b/include/crm/common/xml.h
+index 11a03f4..a9af963 100644
+--- a/include/crm/common/xml.h
++++ b/include/crm/common/xml.h
+@@ -271,6 +271,7 @@ __xml_next_element(xmlNode * child)
+     return NULL;
+ }
+ 
++void pcmk_free_xml_subtree(xmlNode *xml);
+ void free_xml(xmlNode * child);
+ 
+ xmlNode *first_named_child(xmlNode * parent, const char *name);
+diff --git a/lib/common/acl.c b/lib/common/acl.c
+index 80b1f6f..30adad8 100644
+--- a/lib/common/acl.c
++++ b/lib/common/acl.c
+@@ -1,5 +1,7 @@
+ /*
+- * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
++ * Copyright 2004-2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
+  *
+  * This source code is licensed under the GNU Lesser General Public License
+  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+@@ -501,8 +503,7 @@ pcmk__post_process_acl(xmlNode *xml)
+                           crm_element_name(xml), path);
+ 
+                 if (xml != xmlDocGetRootElement(xml->doc)) {
+-                    xmlUnlinkNode(xml);
+-                    xmlFreeNode(xml);
++                    pcmk_free_xml_subtree(xml);
+                 }
+                 free(path);
+                 return;
+diff --git a/lib/common/xml.c b/lib/common/xml.c
+index 91c0edb..dfa2d77 100644
+--- a/lib/common/xml.c
++++ b/lib/common/xml.c
+@@ -2021,6 +2021,18 @@ xml_get_path(xmlNode *xml)
+     return NULL;
+ }
+ 
++/*!
++ * Free an XML element and all of its children, removing it from its parent
++ *
++ * \param[in] xml  XML element to free
++ */
++void
++pcmk_free_xml_subtree(xmlNode *xml)
++{
++    xmlUnlinkNode(xml); // Detaches from parent and siblings
++    xmlFreeNode(xml);   // Frees
++}
++
+ static void
+ free_xml_with_position(xmlNode * child, int position)
+ {
+@@ -2075,12 +2087,7 @@ free_xml_with_position(xmlNode * child, int position)
+                     pcmk__set_xml_flag(child, xpf_dirty);
+                 }
+             }
+-
+-            /* Free this particular subtree
+-             * Make sure to unlink it from the parent first
+-             */
+-            xmlUnlinkNode(child);
+-            xmlFreeNode(child);
++            pcmk_free_xml_subtree(child);
+         }
+     }
+ }
+@@ -2296,8 +2303,7 @@ strip_text_nodes(xmlNode * xml)
+         switch (iter->type) {
+             case XML_TEXT_NODE:
+                 /* Remove it */
+-                xmlUnlinkNode(iter);
+-                xmlFreeNode(iter);
++                pcmk_free_xml_subtree(iter);
+                 break;
+ 
+             case XML_ELEMENT_NODE:
+-- 
+1.8.3.1
+
+
+From 310459de07dc6b3bb6e5a851fff1f25559caee2e Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 17 Jun 2019 21:08:57 -0500
+Subject: [PATCH 51/96] Refactor: libcrmcommon: make ACL creation checks more
+ efficient
+
+This does the simplest checks first, avoids doing the same check repeatedly,
+and doesn't gather information unless needed.
+
+Trace log messages are also improved, and the regression tests updated to
+match.
+---
+ lib/common/acl.c | 76 +++++++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 48 insertions(+), 28 deletions(-)
+
+diff --git a/lib/common/acl.c b/lib/common/acl.c
+index 30adad8..dea67cf 100644
+--- a/lib/common/acl.c
++++ b/lib/common/acl.c
+@@ -471,6 +471,41 @@ xml_acl_filtered_copy(const char *user, xmlNode *acl_source, xmlNode *xml,
+     return TRUE;
+ }
+ 
++/*!
++ * \internal
++ * \brief Check whether creation of an XML element is implicitly allowed
++ *
++ * Check whether XML is a "scaffolding" element whose creation is implicitly
++ * allowed regardless of ACLs (that is, it is not in the ACL section and has
++ * no attributes other than "id").
++ *
++ * \param[in] xml  XML element to check
++ *
++ * \return TRUE if XML element is implicitly allowed, FALSE otherwise
++ */
++static bool
++implicitly_allowed(xmlNode *xml)
++{
++    char *path = NULL;
++
++    for (xmlAttr *prop = xml->properties; prop != NULL; prop = prop->next) {
++        if (strcmp((const char *) prop->name, XML_ATTR_ID) != 0) {
++            return FALSE;
++        }
++    }
++
++    path = xml_get_path(xml);
++    if (strstr(path, "/" XML_CIB_TAG_ACLS "/") != NULL) {
++        free(path);
++        return FALSE;
++    }
++    free(path);
++
++    return TRUE;
++}
++
++#define display_id(xml) (ID(xml)? ID(xml) : "<unset>")
++
+ void
+ pcmk__post_process_acl(xmlNode *xml)
+ {
+@@ -478,38 +513,23 @@ pcmk__post_process_acl(xmlNode *xml)
+     xml_private_t *p = xml->_private;
+ 
+     if (is_set(p->flags, xpf_created)) {
+-        xmlAttr *xIter = NULL;
+-        char *path = xml_get_path(xml);
++        if (implicitly_allowed(xml)) {
++            crm_trace("Creation of <%s> scaffolding with id=\"%s\""
++                      " is implicitly allowed",
++                      crm_element_name(xml), display_id(xml));
+ 
+-        /* Always allow new scaffolding (e.g. node with no attributes or only an
+-         * 'id'), except in the ACLs section
+-         */
+-
+-        for (xIter = xml->properties; xIter != NULL; xIter = xIter->next) {
+-            const char *prop_name = (const char *)xIter->name;
+-
+-            if (!strcmp(prop_name, XML_ATTR_ID)
+-                && !strstr(path, "/"XML_CIB_TAG_ACLS"/")) {
+-                /* Delay the acl check */
+-                continue;
++        } else if (pcmk__check_acl(xml, NULL, xpf_acl_write)) {
++            crm_trace("ACLs allow creation of <%s> with id=\"%s\"",
++                      crm_element_name(xml), display_id(xml));
+ 
+-            } else if (pcmk__check_acl(xml, NULL, xpf_acl_write)) {
+-                crm_trace("Creation of %s=%s is allowed",
+-                          crm_element_name(xml), ID(xml));
+-                break;
+-
+-            } else {
+-                crm_trace("Cannot add new node %s at %s",
+-                          crm_element_name(xml), path);
+-
+-                if (xml != xmlDocGetRootElement(xml->doc)) {
+-                    pcmk_free_xml_subtree(xml);
+-                }
+-                free(path);
+-                return;
++        } else {
++            crm_trace("ACLs disallow creation of <%s> with id=\"%s\"",
++                      crm_element_name(xml), display_id(xml));
++            if (xml != xmlDocGetRootElement(xml->doc)) {
++                pcmk_free_xml_subtree(xml);
+             }
++            return;
+         }
+-        free(path);
+     }
+ 
+     while (cIter != NULL) {
+-- 
+1.8.3.1
+
+
+From fec0a2a12b099a14b7db9397fdba08d5b6c22456 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 15 Aug 2019 14:00:48 -0500
+Subject: [PATCH 52/96] Test: tools: update regression tests for ACL trace
+ message changes
+
+---
+ tools/regression.acls.exp | 70 +++++++++++++++++++++++++----------------------
+ 1 file changed, 37 insertions(+), 33 deletions(-)
+
+diff --git a/tools/regression.acls.exp b/tools/regression.acls.exp
+index 6bc6062..6508b2c 100644
+--- a/tools/regression.acls.exp
++++ b/tools/regression.acls.exp
+@@ -257,6 +257,7 @@ Error performing operation: Permission denied
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	Ordinary user unknownguy cannot access the CIB without any defined ACLs
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	Ordinary user unknownguy cannot access the CIB without any defined ACLs
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	Ordinary user unknownguy cannot access the CIB without any defined ACLs
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of <primitive> scaffolding with id="<unset>" is implicitly allowed
+ Call failed: Permission denied
+ =#=#=#= End test: unknownguy: Create a resource - Permission denied (13) =#=#=#=
+ * Passed: cibadmin       - unknownguy: Create a resource
+@@ -274,7 +275,7 @@ Error performing operation: Permission denied
+ * Passed: crm_attribute  - l33t-haxor: Set stonith-enabled
+ =#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#=
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs disallow creation of <primitive> with id="dummy"
+ Call failed: Permission denied
+ =#=#=#= End test: l33t-haxor: Create a resource - Permission denied (13) =#=#=#=
+ * Passed: cibadmin       - l33t-haxor: Create a resource
+@@ -329,7 +330,7 @@ Error setting enable-acl=false (section=crm_config, set=<null>): Permission deni
+ =#=#=#= End test: niceguy: Set enable-acl - Permission denied (13) =#=#=#=
+ * Passed: crm_attribute  - niceguy: Set enable-acl
+ =#=#=#= Begin test: niceguy: Set stonith-enabled =#=#=#=
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of nvpair=cib-bootstrap-options-stonith-enabled is allowed
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <nvpair> with id="cib-bootstrap-options-stonith-enabled"
+ =#=#=#= Current cib after: niceguy: Set stonith-enabled =#=#=#=
+ <cib epoch="7" num_updates="0" admin_epoch="0">
+   <configuration>
+@@ -377,7 +378,7 @@ Error setting enable-acl=false (section=crm_config, set=<null>): Permission deni
+ * Passed: crm_attribute  - niceguy: Set stonith-enabled
+ =#=#=#= Begin test: niceguy: Create a resource =#=#=#=
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs disallow creation of <primitive> with id="dummy"
+ Call failed: Permission denied
+ =#=#=#= End test: niceguy: Create a resource - Permission denied (13) =#=#=#=
+ * Passed: cibadmin       - niceguy: Create a resource
+@@ -536,7 +537,8 @@ Error performing operation: Permission denied
+ error: unpack_resources:	Resource start-up disabled since no STONITH resources have been defined
+ error: unpack_resources:	Either configure some or disable STONITH with the stonith-enabled option
+ error: unpack_resources:	NOTE: Clusters with shared data need STONITH to ensure data integrity
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of nvpair=dummy-meta_attributes-target-role is allowed
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of <meta_attributes> scaffolding with id="dummy-meta_attributes" is implicitly allowed
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <nvpair> with id="dummy-meta_attributes-target-role"
+ 
+ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Stopped
+ =#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
+@@ -704,7 +706,7 @@ Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
+ error: unpack_resources:	Resource start-up disabled since no STONITH resources have been defined
+ error: unpack_resources:	Either configure some or disable STONITH with the stonith-enabled option
+ error: unpack_resources:	NOTE: Clusters with shared data need STONITH to ensure data integrity
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of nvpair=dummy-meta_attributes-target-role is allowed
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <nvpair> with id="dummy-meta_attributes-target-role"
+ 
+ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Started
+ =#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
+@@ -865,7 +867,7 @@ Call failed: Permission denied
+ =#=#=#= Begin test: niceguy: Replace - create resource =#=#=#=
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	400 access denied to /cib[@epoch]: default
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2']
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs disallow creation of <primitive> with id="dummy2"
+ Call failed: Permission denied
+ =#=#=#= End test: niceguy: Replace - create resource - Permission denied (13) =#=#=#=
+ * Passed: cibadmin       - niceguy: Replace - create resource
+@@ -1184,28 +1186,28 @@ Call failed: Permission denied
+ 
+     !#!#!#!#! Upgrading to pacemaker-2.0 and retesting !#!#!#!#!
+ =#=#=#= Begin test: root: Upgrade to pacemaker-2.0 =#=#=#=
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=observer-read-1 is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=observer-write-1 is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=observer-write-2 is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=admin-read-1 is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=admin-write-1 is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_target=l33t-haxor is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of role=auto-l33t-haxor is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_role=auto-l33t-haxor is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=crook-nothing is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_target=niceguy is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of role=observer is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_target=bob is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of role=admin is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_target=badidea is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of role=auto-badidea is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_role=auto-badidea is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=badidea-resources is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_target=betteridea is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of role=auto-betteridea is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_role=auto-betteridea is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=betteridea-nothing is allowed
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of acl_permission=betteridea-resources is allowed
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="observer-read-1"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="observer-write-1"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="observer-write-2"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="admin-read-1"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="admin-write-1"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_target> with id="l33t-haxor"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <role> with id="auto-l33t-haxor"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_role> with id="auto-l33t-haxor"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="crook-nothing"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_target> with id="niceguy"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <role> with id="observer"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_target> with id="bob"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <role> with id="admin"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_target> with id="badidea"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <role> with id="auto-badidea"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_role> with id="auto-badidea"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="badidea-resources"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_target> with id="betteridea"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <role> with id="auto-betteridea"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_role> with id="auto-betteridea"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="betteridea-nothing"
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <acl_permission> with id="betteridea-resources"
+ =#=#=#= Current cib after: root: Upgrade to pacemaker-2.0 =#=#=#=
+ <cib epoch="2" num_updates="0" admin_epoch="1">
+   <configuration>
+@@ -1279,6 +1281,7 @@ Error performing operation: Permission denied
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	Ordinary user unknownguy cannot access the CIB without any defined ACLs
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	Ordinary user unknownguy cannot access the CIB without any defined ACLs
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	Ordinary user unknownguy cannot access the CIB without any defined ACLs
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of <primitive> scaffolding with id="<unset>" is implicitly allowed
+ Call failed: Permission denied
+ =#=#=#= End test: unknownguy: Create a resource - Permission denied (13) =#=#=#=
+ * Passed: cibadmin       - unknownguy: Create a resource
+@@ -1296,7 +1299,7 @@ Error performing operation: Permission denied
+ * Passed: crm_attribute  - l33t-haxor: Set stonith-enabled
+ =#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#=
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs disallow creation of <primitive> with id="dummy"
+ Call failed: Permission denied
+ =#=#=#= End test: l33t-haxor: Create a resource - Permission denied (13) =#=#=#=
+ * Passed: cibadmin       - l33t-haxor: Create a resource
+@@ -1417,7 +1420,7 @@ Error setting enable-acl=false (section=crm_config, set=<null>): Permission deni
+ * Passed: crm_attribute  - niceguy: Set stonith-enabled
+ =#=#=#= Begin test: niceguy: Create a resource =#=#=#=
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy']
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs disallow creation of <primitive> with id="dummy"
+ Call failed: Permission denied
+ =#=#=#= End test: niceguy: Create a resource - Permission denied (13) =#=#=#=
+ * Passed: cibadmin       - niceguy: Create a resource
+@@ -1603,7 +1606,8 @@ Error performing operation: Permission denied
+ error: unpack_resources:	Resource start-up disabled since no STONITH resources have been defined
+ error: unpack_resources:	Either configure some or disable STONITH with the stonith-enabled option
+ error: unpack_resources:	NOTE: Clusters with shared data need STONITH to ensure data integrity
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of nvpair=dummy-meta_attributes-target-role is allowed
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of <meta_attributes> scaffolding with id="dummy-meta_attributes" is implicitly allowed
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <nvpair> with id="dummy-meta_attributes-target-role"
+ 
+ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Stopped
+ =#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
+@@ -1798,7 +1802,7 @@ Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
+ error: unpack_resources:	Resource start-up disabled since no STONITH resources have been defined
+ error: unpack_resources:	Either configure some or disable STONITH with the stonith-enabled option
+ error: unpack_resources:	NOTE: Clusters with shared data need STONITH to ensure data integrity
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Creation of nvpair=dummy-meta_attributes-target-role is allowed
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs allow creation of <nvpair> with id="dummy-meta_attributes-target-role"
+ 
+ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Started
+ =#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
+@@ -1977,7 +1981,7 @@ Call failed: Permission denied
+ =#=#=#= Begin test: niceguy: Replace - create resource =#=#=#=
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	400 access denied to /cib[@epoch]: default
+ (       acl.c:NNN   )   trace: pcmk__check_acl:	400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default
+-(       acl.c:NNN   )   trace: pcmk__post_process_acl:	Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2']
++(       acl.c:NNN   )   trace: pcmk__post_process_acl:	ACLs disallow creation of <primitive> with id="dummy2"
+ Call failed: Permission denied
+ =#=#=#= End test: niceguy: Replace - create resource - Permission denied (13) =#=#=#=
+ * Passed: cibadmin       - niceguy: Replace - create resource
+-- 
+1.8.3.1
+
+
+From ecefc149f2a7f1212e678ba2824755b2d13d848a Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 18 Jun 2019 17:00:51 -0500
+Subject: [PATCH 53/96] Low: libcrmcommon: avoid use-after-free when enforcing
+ creation ACLs
+
+As detected by static analysis, pcmk__post_process_acl() can free its argument,
+yet some callers continued to use the argument afterward.
+
+The existing code apparently tried to get around this by freeing the argument
+only if it wasn't the root element of the XML document. However some callers
+do pass non-root elements.
+
+The circumstances where the use-after-free could occur seem limited enough
+that it hasn't been seen in practice.
+
+This avoids any chance of use-after-free by adding an argument indicating
+whether the argument itself should be checked, or just its children (replacing
+the root element check). All callers specify just children, except
+pcmk__post_process_acl()'s recursive calls for the children.
+
+__xml_diff_object() gets a similar argument to pass along to
+pcmk__post_process_acl().
+---
+ lib/common/acl.c               | 32 +++++++++++++++++++++++---------
+ lib/common/crmcommon_private.h |  6 ++++--
+ lib/common/xml.c               | 10 +++++-----
+ 3 files changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/lib/common/acl.c b/lib/common/acl.c
+index dea67cf..72dc707 100644
+--- a/lib/common/acl.c
++++ b/lib/common/acl.c
+@@ -506,10 +506,22 @@ implicitly_allowed(xmlNode *xml)
+ 
+ #define display_id(xml) (ID(xml)? ID(xml) : "<unset>")
+ 
++/*!
++ * \internal
++ * \brief Drop XML nodes created in violation of ACLs
++ *
++ * Given an XML element, free all of its descendent nodes created in violation
++ * of ACLs, with the exception of allowing "scaffolding" elements (i.e. those
++ * that aren't in the ACL section and don't have any attributes other than
++ * "id").
++ *
++ * \param[in,out] xml        XML to check
++ * \param[in]     check_top  Whether to apply checks to argument itself
++ *                           (if TRUE, xml might get freed)
++ */
+ void
+-pcmk__post_process_acl(xmlNode *xml)
++pcmk__post_process_acl(xmlNode *xml, bool check_top)
+ {
+-    xmlNode *cIter = __xml_first_child(xml);
+     xml_private_t *p = xml->_private;
+ 
+     if (is_set(p->flags, xpf_created)) {
+@@ -522,20 +534,22 @@ pcmk__post_process_acl(xmlNode *xml)
+             crm_trace("ACLs allow creation of <%s> with id=\"%s\"",
+                       crm_element_name(xml), display_id(xml));
+ 
+-        } else {
++        } else if (check_top) {
+             crm_trace("ACLs disallow creation of <%s> with id=\"%s\"",
+                       crm_element_name(xml), display_id(xml));
+-            if (xml != xmlDocGetRootElement(xml->doc)) {
+-                pcmk_free_xml_subtree(xml);
+-            }
++            pcmk_free_xml_subtree(xml);
+             return;
++
++        } else {
++            crm_trace("ACLs would disallow creation of <%s> with id=\"%s\"",
++                      crm_element_name(xml), display_id(xml));
+         }
+     }
+ 
+-    while (cIter != NULL) {
++    for (xmlNode *cIter = __xml_first_child(xml); cIter != NULL; ) {
+         xmlNode *child = cIter;
+         cIter = __xml_next(cIter); /* In case it is free'd */
+-        pcmk__post_process_acl(child);
++        pcmk__post_process_acl(child, TRUE);
+     }
+ }
+ 
+@@ -558,7 +572,7 @@ xml_acl_disable(xmlNode *xml)
+ 
+         /* Catch anything that was created but shouldn't have been */
+         pcmk__apply_acl(xml);
+-        pcmk__post_process_acl(xml);
++        pcmk__post_process_acl(xml, FALSE);
+         clear_bit(p->flags, xpf_acl_enabled);
+     }
+ }
+diff --git a/lib/common/crmcommon_private.h b/lib/common/crmcommon_private.h
+index 113f525..b153873 100644
+--- a/lib/common/crmcommon_private.h
++++ b/lib/common/crmcommon_private.h
+@@ -1,5 +1,7 @@
+ /*
+- * Copyright 2018-2019 Andrew Beekhof <andrew@beekhof.net>
++ * Copyright 2018-2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
+  *
+  * This source code is licensed under the GNU Lesser General Public License
+  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+@@ -66,7 +68,7 @@ G_GNUC_INTERNAL
+ void pcmk__apply_acl(xmlNode *xml);
+ 
+ G_GNUC_INTERNAL
+-void pcmk__post_process_acl(xmlNode *xml);
++void pcmk__post_process_acl(xmlNode *xml, bool check_top);
+ 
+ G_GNUC_INTERNAL
+ void pcmk__mark_xml_attr_dirty(xmlAttr *a);
+diff --git a/lib/common/xml.c b/lib/common/xml.c
+index dfa2d77..9fd83a8 100644
+--- a/lib/common/xml.c
++++ b/lib/common/xml.c
+@@ -3368,7 +3368,7 @@ apply_xml_diff(xmlNode * old, xmlNode * diff, xmlNode ** new)
+ }
+ 
+ static void
+-__xml_diff_object(xmlNode * old, xmlNode * new)
++__xml_diff_object(xmlNode * old, xmlNode * new, bool check_top)
+ {
+     xmlNode *cIter = NULL;
+     xmlAttr *pIter = NULL;
+@@ -3376,7 +3376,7 @@ __xml_diff_object(xmlNode * old, xmlNode * new)
+     CRM_CHECK(new != NULL, return);
+     if(old == NULL) {
+         crm_node_created(new);
+-        pcmk__post_process_acl(new); // Check creation is allowed
++        pcmk__post_process_acl(new, check_top); // Check creation is allowed
+         return;
+ 
+     } else {
+@@ -3483,7 +3483,7 @@ __xml_diff_object(xmlNode * old, xmlNode * new)
+ 
+         cIter = __xml_next(cIter);
+         if(new_child) {
+-            __xml_diff_object(old_child, new_child);
++            __xml_diff_object(old_child, new_child, TRUE);
+ 
+         } else {
+             /* Create then free (which will check the acls if necessary) */
+@@ -3511,7 +3511,7 @@ __xml_diff_object(xmlNode * old, xmlNode * new)
+         if(old_child == NULL) {
+             xml_private_t *p = new_child->_private;
+             p->flags |= xpf_skip;
+-            __xml_diff_object(old_child, new_child);
++            __xml_diff_object(old_child, new_child, TRUE);
+ 
+         } else {
+             /* Check for movement, we already checked for differences */
+@@ -3554,7 +3554,7 @@ xml_calculate_changes(xmlNode * old, xmlNode * new)
+         xml_track_changes(new, NULL, NULL, FALSE);
+     }
+ 
+-    __xml_diff_object(old, new);
++    __xml_diff_object(old, new, FALSE);
+ }
+ 
+ xmlNode *
+-- 
+1.8.3.1
+
+
+From b24cd86fa98fa042d40e6f0820c36d12e14ff3ed Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 15 Aug 2019 14:13:32 -0500
+Subject: [PATCH 54/96] Fix: extra: handle run-as-user properly in ClusterMon
+
+7b303943 improperly searched for the crm_mon process when the user option was
+set (regression since 1.1.16)
+---
+ extra/resources/ClusterMon | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/extra/resources/ClusterMon b/extra/resources/ClusterMon
+index 0604a26..0f4eb55 100755
+--- a/extra/resources/ClusterMon
++++ b/extra/resources/ClusterMon
+@@ -152,14 +152,24 @@ ClusterMon_stop() {
+ }
+ 
+ ClusterMon_monitor() {
++    local USERARG=""
++    local header
++    local pid
++
+     if [ -f $OCF_RESKEY_pidfile ]; then
+ 	pid=`cat $OCF_RESKEY_pidfile`
+ 	if [ ! -z $pid ]; then
+-	    str=$(echo "su - $OCF_RESKEY_user -c \"$CMON_CMD\"" | tr 'crmon, \t' 'xxxxxxxx')
+-	    ps -o "args=${str}" -p $pid 2>/dev/null | \
++            if [ -n "$OCF_RESKEY_user" ]; then
++                USERARG="-u $OCF_RESKEY_user"
++            fi
++
++            # Use column header wide as command, to ensure it's shown in full
++            header=$(echo $CMON_CMD | tr 'crmon, \t' 'xxxxxxxx')
++
++            ps $USERARG -o "args=${header}" -p $pid 2>/dev/null | \
+ 		grep -qE "[c]rm_mon.*${OCF_RESKEY_pidfile}"
+-	    rc=$?
+-	    case $rc in
++
++            case $? in
+ 		0) exit $OCF_SUCCESS;;
+ 		1) exit $OCF_NOT_RUNNING;;
+ 		*) exit $OCF_ERR_GENERIC;;
+-- 
+1.8.3.1
+
+
+From cdf23bd35084a13a02a394cada2d5baae857f47f Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 18 Jul 2019 20:36:16 -0500
+Subject: [PATCH 55/96] Fix: extra: calculate #health_disk correctly in SysInfo
+
+Previously, if SysInfo monitored multiple disks, the status of the last disk in
+the list was used as the value of #health_disk. Now, #health_disk matches the
+documentation in setting #health_disk red if any disk in the list is low on
+space.
+---
+ extra/resources/SysInfo | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/extra/resources/SysInfo b/extra/resources/SysInfo
+index 7d1c0a3..4441026 100755
+--- a/extra/resources/SysInfo
++++ b/extra/resources/SysInfo
+@@ -157,6 +157,7 @@ UpdateStat() {
+ }
+ 
+ SysInfoStats() {
++    local DISK_STATUS="green"
+ 
+     UpdateStat arch "`uname -m`"
+     UpdateStat os "`uname -s`-`uname -r`"
+@@ -241,15 +242,12 @@ SysInfoStats() {
+ 	    disk_label=`echo $disk | sed -e 's#^/$#root#;s#^/*##;s#/#_#g'`
+ 	    disk_free=`SysInfo_hdd_units $disk_free`
+ 	    UpdateStat ${disk_label}_free $disk_free
+-	    if [ -n "$MIN_FREE" ]; then
+-		if [ $disk_free -le $MIN_FREE ]; then
+-		    UpdateStat "#health_disk" "red"
+-		else
+-		    UpdateStat "#health_disk" "green"
+-		fi
++            if [ -n "$MIN_FREE" ] && [ $disk_free -le $MIN_FREE ]; then
++                DISK_STATUS="red"
+ 	    fi
+ 	fi
+     done
++    UpdateStat "#health_disk" "$DISK_STATUS"
+ }
+ 
+ SysInfo_megabytes() {
+-- 
+1.8.3.1
+
+
+From dd8c31f794eb3cbbc37bf1abd6a2ec90374b7c36 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 15 Aug 2019 14:43:09 -0500
+Subject: [PATCH 56/96] Test: cts-cli: add test for crm_diff
+
+---
+ tools/Makefile.am          |  8 ++++--
+ tools/crm_diff_new.xml     | 54 +++++++++++++++++++++++++++++++++++++
+ tools/crm_diff_old.xml     | 54 +++++++++++++++++++++++++++++++++++++
+ tools/regression.sh        |  5 ++++
+ tools/regression.tools.exp | 67 ++++++++++++++++++++++++++++++++++++++++++++--
+ 5 files changed, 184 insertions(+), 4 deletions(-)
+ create mode 100644 tools/crm_diff_new.xml
+ create mode 100644 tools/crm_diff_old.xml
+
+diff --git a/tools/Makefile.am b/tools/Makefile.am
+index 3548035..d8c3215 100644
+--- a/tools/Makefile.am
++++ b/tools/Makefile.am
+@@ -42,8 +42,12 @@ sbin_PROGRAMS		= crm_simulate crmadmin cibadmin crm_node crm_attribute crm_resou
+ 
+ testdir			= $(datadir)/$(PACKAGE)/tests/cli
+ test_SCRIPTS		= regression.sh
+-test_DATA               = regression.dates.exp regression.tools.exp regression.acls.exp \
+-			  regression.validity.exp
++test_DATA		= regression.dates.exp		\
++			  regression.tools.exp		\
++			  regression.acls.exp		\
++			  regression.validity.exp	\
++			  crm_diff_new.xml		\
++			  crm_diff_old.xml
+ 
+ if BUILD_HEARTBEAT_SUPPORT
+ sbin_PROGRAMS           += crm_uuid
+diff --git a/tools/crm_diff_new.xml b/tools/crm_diff_new.xml
+new file mode 100644
+index 0000000..7c2ec22
+--- /dev/null
++++ b/tools/crm_diff_new.xml
+@@ -0,0 +1,54 @@
++<cib crm_feature_set="3.0.14" validate-with="pacemaker-2.10" epoch="1" num_updates="0" admin_epoch="0">
++  <configuration>
++    <crm_config>
++      <cluster_property_set id="cib-bootstrap-options">
++        <!-- test: move attribute "value" before "name" -->
++        <nvpair id="cib-bootstrap-options-cluster-name" value="mycluster" name="cluster-name" />
++        <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="1" />
++      </cluster_property_set>
++    </crm_config>
++    <nodes>
++      <node id="1" uname="node1"/>
++      <node id="2" uname="node2"/>
++      <node id="3" uname="node3"/>
++      <!-- test: add element for node4 -->
++      <node id="4" uname="node4"/>
++    </nodes>
++    <!-- test: add a new comment below this one -->
++    <!-- hello world -->
++    <resources>
++      <!-- test: modify this comment to say something different -->
++      <primitive id="Fencing" class="stonith" type="fence_xvm">
++        <meta_attributes id="Fencing-meta">
++          <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
++        </meta_attributes>
++        <instance_attributes id="Fencing-params">
++          <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
++          <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
++          <!-- test: modify attribute value to add node4 -->
++          <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4"/>
++        </instance_attributes>
++        <operations>
++          <!-- test: add attribute timeout="120s" -->
++          <op id="Fencing-monitor-120s" interval="120s" timeout="120s" name="monitor" />
++          <op id="Fencing-stop-0" interval="0" name="stop" timeout="60s"/>
++          <!-- test: delete element Fencing-start-0 -->
++        </operations>
++      </primitive>
++      <primitive id="dummy" class="ocf" type="pacemaker" provider="Dummy">
++        <instance_attributes id="dummy-params">
++          <!-- test: move element dummy-fake below dummy-op_sleep -->
++          <nvpair id="dummy-op_sleep" name="op_sleep" value="3"/>
++          <nvpair id="dummy-fake" name="fake" value="0"/>
++        </instance_attributes>
++        <operations>
++          <!-- test: delete attribute timeout -->
++          <op id="dummy-monitor-5s" interval="5s" name="monitor"/>
++        </operations>
++      </primitive>
++    </resources>
++    <constraints/>
++    <!-- test: move this comment to end of configuration -->
++  </configuration>
++  <status/>
++</cib>
+diff --git a/tools/crm_diff_old.xml b/tools/crm_diff_old.xml
+new file mode 100644
+index 0000000..8a92edd
+--- /dev/null
++++ b/tools/crm_diff_old.xml
+@@ -0,0 +1,54 @@
++<cib crm_feature_set="3.0.14" validate-with="pacemaker-2.10" epoch="1" num_updates="0" admin_epoch="0">
++  <configuration>
++    <!-- test: move this comment to end of configuration -->
++    <crm_config>
++      <cluster_property_set id="cib-bootstrap-options">
++        <!-- test: move attribute "value" before "name" -->
++        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster" />
++        <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="1" />
++      </cluster_property_set>
++    </crm_config>
++    <!-- test: delete this comment -->
++    <nodes>
++      <node id="1" uname="node1"/>
++      <node id="2" uname="node2"/>
++      <node id="3" uname="node3"/>
++      <!-- test: add element for node4 -->
++    </nodes>
++    <!-- test: add a new comment below this one -->
++    <resources>
++      <!-- test: modify this comment -->
++      <primitive id="Fencing" class="stonith" type="fence_xvm">
++        <meta_attributes id="Fencing-meta">
++          <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
++        </meta_attributes>
++        <instance_attributes id="Fencing-params">
++          <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
++          <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
++          <!-- test: modify attribute value to add node4 -->
++          <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
++        </instance_attributes>
++        <operations>
++          <!-- test: add attribute timeout="120s" -->
++          <op id="Fencing-monitor-120s" interval="120s" name="monitor" />
++          <op id="Fencing-stop-0" interval="0" name="stop" timeout="60s"/>
++          <!-- test: delete element Fencing-start-0 -->
++          <op id="Fencing-start-0" interval="0" name="start" timeout="60s"/>
++        </operations>
++      </primitive>
++      <primitive id="dummy" class="ocf" type="pacemaker" provider="Dummy">
++        <instance_attributes id="dummy-params">
++          <!-- test: move element dummy-fake below dummy-op_sleep -->
++          <nvpair id="dummy-fake" name="fake" value="0"/>
++          <nvpair id="dummy-op_sleep" name="op_sleep" value="3"/>
++        </instance_attributes>
++        <operations>
++          <!-- test: delete attribute timeout -->
++          <op id="dummy-monitor-5s" interval="5s" timeout="10s" name="monitor"/>
++        </operations>
++      </primitive>
++    </resources>
++    <constraints/>
++  </configuration>
++  <status/>
++</cib>
+diff --git a/tools/regression.sh b/tools/regression.sh
+index 3680f13..2765595 100755
+--- a/tools/regression.sh
++++ b/tools/regression.sh
+@@ -9,6 +9,7 @@ verbose=0
+ tests="dates tools acls validity"
+ 
+ CRM_EX_OK=0
++CRM_EX_ERROR=1
+ 
+ function test_assert() {
+     target=$1; shift
+@@ -388,6 +389,10 @@ function test_tools() {
+     test_assert $CRM_EX_OK
+ 
+     rm -f /tmp/$$.existing.xml /tmp/$$.resources.xml
++
++    desc="Create an XML patchset"
++    cmd="crm_diff -o $test_home/crm_diff_old.xml -n $test_home/crm_diff_new.xml"
++    test_assert $CRM_EX_ERROR 0
+ }
+ 
+ function test_dates() {
+diff --git a/tools/regression.tools.exp b/tools/regression.tools.exp
+index 5be42c8..900544a 100644
+--- a/tools/regression.tools.exp
++++ b/tools/regression.tools.exp
+@@ -3074,8 +3074,8 @@ Migration will take effect until:
+ * Passed: crm_resource   - Try to move a resource previously moved with a lifetime
+ =#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#=
+ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
+-       This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
+-       This will be the case even if node1 is the last node in the cluster
++	This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
++	This will be the case even if node1 is the last node in the cluster
+ Migration will take effect until:
+ =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#=
+ <cib epoch="46" num_updates="0" admin_epoch="1">
+@@ -3174,3 +3174,66 @@ Migration will take effect until:
+ </cib>
+ =#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
+ * Passed: crm_resource   - Remove expired constraints
++=#=#=#= Begin test: Create an XML patchset =#=#=#=
++<diff format="2">
++  <version>
++    <source admin_epoch="0" epoch="1" num_updates="0"/>
++    <target admin_epoch="0" epoch="1" num_updates="0"/>
++  </version>
++  <change operation="delete" path="/cib/configuration/comment" position="0"/>
++  <change operation="delete" path="/cib/configuration/comment" position="1"/>
++  <change operation="delete" path="/cib/configuration/resources/comment" position="0"/>
++  <change operation="delete" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/operations/op[@id=&apos;Fencing-start-0&apos;]"/>
++  <change operation="modify" path="/cib/configuration/crm_config/cluster_property_set[@id=&apos;cib-bootstrap-options&apos;]/nvpair[@id=&apos;cib-bootstrap-options-cluster-name&apos;]">
++    <change-list>
++      <change-attr name="value" operation="set" value="mycluster"/>
++      <change-attr name="name" operation="set" value="cluster-name"/>
++    </change-list>
++    <change-result>
++      <nvpair id="cib-bootstrap-options-cluster-name" value="mycluster" name="cluster-name"/>
++    </change-result>
++  </change>
++  <change operation="create" path="/cib/configuration/nodes" position="4">
++    <node id="4" uname="node4"/>
++  </change>
++  <change operation="create" path="/cib/configuration" position="3">
++    <!-- hello world -->
++  </change>
++  <change operation="create" path="/cib/configuration/resources" position="0">
++    <!-- test: modify this comment to say something different -->
++  </change>
++  <change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/instance_attributes[@id=&apos;Fencing-params&apos;]/nvpair[@id=&apos;Fencing-pcmk_host_list&apos;]">
++    <change-list>
++      <change-attr name="value" operation="set" value="node1 node2 node3 node4"/>
++    </change-list>
++    <change-result>
++      <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4"/>
++    </change-result>
++  </change>
++  <change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/operations/op[@id=&apos;Fencing-monitor-120s&apos;]">
++    <change-list>
++      <change-attr name="timeout" operation="set" value="120s"/>
++      <change-attr name="name" operation="set" value="monitor"/>
++    </change-list>
++    <change-result>
++      <op id="Fencing-monitor-120s" interval="120s" timeout="120s" name="monitor"/>
++    </change-result>
++  </change>
++  <change operation="move" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/instance_attributes[@id=&apos;dummy-params&apos;]/nvpair[@id=&apos;dummy-op_sleep&apos;]" position="1"/>
++  <change operation="move" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/instance_attributes[@id=&apos;dummy-params&apos;]/nvpair[@id=&apos;dummy-fake&apos;]" position="2"/>
++  <change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/operations/op[@id=&apos;dummy-monitor-5s&apos;]">
++    <change-list>
++      <change-attr name="name" operation="set" value="monitor"/>
++      <change-attr name="timeout" operation="unset"/>
++    </change-list>
++    <change-result>
++      <op id="dummy-monitor-5s" interval="5s" name="monitor"/>
++    </change-result>
++  </change>
++  <change operation="create" path="/cib/configuration" position="6">
++    <!-- test: move this comment to end of configuration -->
++  </change>
++</diff>
++
++=#=#=#= End test: Create an XML patchset - Operation not permitted (1) =#=#=#=
++* Passed: crm_diff       - Create an XML patchset
+-- 
+1.8.3.1
+
+
+From d542a0579095471a5e3e21d7de2918051ab95ef1 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 15 Aug 2019 15:04:24 -0500
+Subject: [PATCH 57/96] Refactor: libcrmcommon: add assertion
+
+not really needed, but will hopefully make static analysis happy
+---
+ lib/common/xml.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/lib/common/xml.c b/lib/common/xml.c
+index 9fd83a8..ae4cc6a 100644
+--- a/lib/common/xml.c
++++ b/lib/common/xml.c
+@@ -3372,22 +3372,23 @@ __xml_diff_object(xmlNode * old, xmlNode * new, bool check_top)
+ {
+     xmlNode *cIter = NULL;
+     xmlAttr *pIter = NULL;
++    xml_private_t *p = NULL;
+ 
+     CRM_CHECK(new != NULL, return);
+     if(old == NULL) {
+         crm_node_created(new);
+         pcmk__post_process_acl(new, check_top); // Check creation is allowed
+         return;
++    }
+ 
+-    } else {
+-        xml_private_t *p = new->_private;
++    p = new->_private;
++    CRM_CHECK(p != NULL, return);
+ 
+-        if(p->flags & xpf_processed) {
+-            /* Avoid re-comparing nodes */
+-            return;
+-        }
+-        p->flags |= xpf_processed;
++    if(p->flags & xpf_processed) {
++        /* Avoid re-comparing nodes */
++        return;
+     }
++    p->flags |= xpf_processed;
+ 
+     for (pIter = pcmk__first_xml_attr(new); pIter != NULL; pIter = pIter->next) {
+         xml_private_t *p = pIter->_private;
+-- 
+1.8.3.1
+
+
+From 01f56916870c72f054829b2276111729fc9e52de Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 12 Aug 2019 13:18:24 -0500
+Subject: [PATCH 58/96] Log: pacemakerd: log a better warning if unable to
+ create /var/run/crm
+
+also makes static analysis happy
+---
+ mcp/pacemaker.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c
+index 1cc72af..dbe6220 100644
+--- a/mcp/pacemaker.c
++++ b/mcp/pacemaker.c
+@@ -1346,8 +1346,12 @@ main(int argc, char **argv)
+         crm_exit(ENOKEY);
+     }
+ 
+-    mkdir(CRM_STATE_DIR, 0750);
+-    mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid);
++    // Used by some resource agents
++    if ((mkdir(CRM_STATE_DIR, 0750) < 0) && (errno != EEXIST)) {
++        crm_warn("Could not create " CRM_STATE_DIR ": %s", pcmk_strerror(errno));
++    } else {
++        mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid);
++    }
+ 
+     /* Used to store core/blackbox/pengine/cib files in */
+     crm_build_path(CRM_PACEMAKER_DIR, 0750);
+-- 
+1.8.3.1
+
+
+From b7b7e5ca6df11a9a21b9b61352741f4c3d9f5bf6 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 12 Aug 2019 13:29:44 -0500
+Subject: [PATCH 59/96] Log: pacemakerd: tweak messages for checking for
+ existing instance
+
+and silence a static analysis warning about unused return value
+---
+ mcp/pacemaker.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c
+index dbe6220..6ec25be 100644
+--- a/mcp/pacemaker.c
++++ b/mcp/pacemaker.c
+@@ -1280,12 +1280,12 @@ main(int argc, char **argv)
+     /* Restore the original facility so that mcp_read_config() does the right thing */
+     set_daemon_option("logfacility", facility);
+ 
+-    crm_debug("Checking for old instances of %s", CRM_SYSTEM_MCP);
++    crm_debug("Checking for existing Pacemaker instance");
+     old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0);
+-    crm_ipc_connect(old_instance);
++    (void) crm_ipc_connect(old_instance);
+ 
+     if (shutdown) {
+-        crm_debug("Terminating previous instance");
++        crm_debug("Shutting down existing Pacemaker instance by request");
+         while (crm_ipc_connected(old_instance)) {
+             xmlNode *cmd =
+                 create_request(CRM_OP_QUIT, NULL, NULL, CRM_SYSTEM_MCP, CRM_SYSTEM_MCP, NULL);
+@@ -1303,7 +1303,7 @@ main(int argc, char **argv)
+     } else if (crm_ipc_connected(old_instance)) {
+         crm_ipc_close(old_instance);
+         crm_ipc_destroy(old_instance);
+-        crm_err("Pacemaker is already active, aborting startup");
++        crm_err("Aborting start-up because active Pacemaker instance found");
+         crm_exit(DAEMON_RESPAWN_STOP);
+     }
+ 
+-- 
+1.8.3.1
+
+
+From e32a3350ade774c1f10e397c994bfeb96e459a73 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 12 Aug 2019 13:41:43 -0500
+Subject: [PATCH 60/96] Refactor: controller: avoid memcpy() for two characters
+
+makes static analysis happy (which complained about copying 2 characters of a
+3-character literal)
+---
+ crmd/lrm.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/crmd/lrm.c b/crmd/lrm.c
+index 776c02b..437840f 100644
+--- a/crmd/lrm.c
++++ b/crmd/lrm.c
+@@ -2480,7 +2480,11 @@ unescape_newlines(const char *string)
+     ret = strdup(string);
+     pch = strstr(ret, escaped_newline);
+     while (pch != NULL) {
+-        memcpy(pch, "\n ", 2);
++        /* Replace newline escape pattern with actual newline (and a space so we
++         * don't have to shuffle the rest of the buffer)
++         */
++        pch[0] = '\n';
++        pch[1] = ' ';
+         pch = strstr(pch, escaped_newline);
+     }
+ 
+-- 
+1.8.3.1
+
+
+From e47691249062f46c7e2719994871717e5d42fb8f Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 12 Aug 2019 19:40:31 -0500
+Subject: [PATCH 61/96] Refactor: libcrmcommon: use constant for all uses of
+ XML parse options
+
+Mainly so we can comment an issue with it.
+---
+ lib/common/xml.c | 31 ++++++++++++++++++-------------
+ 1 file changed, 18 insertions(+), 13 deletions(-)
+
+diff --git a/lib/common/xml.c b/lib/common/xml.c
+index ae4cc6a..26cd78d 100644
+--- a/lib/common/xml.c
++++ b/lib/common/xml.c
+@@ -44,6 +44,17 @@
+ #define XML_BUFFER_SIZE	4096
+ #define XML_PARSER_DEBUG 0
+ 
++/* @TODO XML_PARSE_RECOVER allows some XML errors to be silently worked around
++ * by libxml2, which is potentially ambiguous and dangerous. We should drop it
++ * when we can break backward compatibility with configurations that might be
++ * relying on it (i.e. pacemaker 3.0.0).
++ *
++ * It might be a good idea to have a transitional period where we first try
++ * parsing without XML_PARSE_RECOVER, and if that fails, try parsing again with
++ * it, logging a warning if it succeeds.
++ */
++#define PCMK__XML_PARSE_OPTS    (XML_PARSE_NOBLANKS | XML_PARSE_RECOVER)
++
+ typedef struct {
+     int found;
+     const char *string;
+@@ -2154,14 +2165,10 @@ string2xml(const char *input)
+     ctxt = xmlNewParserCtxt();
+     CRM_CHECK(ctxt != NULL, return NULL);
+ 
+-    /* xmlCtxtUseOptions(ctxt, XML_PARSE_NOBLANKS|XML_PARSE_RECOVER); */
+-
+     xmlCtxtResetLastError(ctxt);
+     xmlSetGenericErrorFunc(ctxt, crm_xml_err);
+-    /* initGenericErrorDefaultFunc(crm_xml_err); */
+-    output =
+-        xmlCtxtReadDoc(ctxt, (const xmlChar *)input, NULL, NULL,
+-                       XML_PARSE_NOBLANKS | XML_PARSE_RECOVER);
++    output = xmlCtxtReadDoc(ctxt, (const xmlChar *) input, NULL, NULL,
++                            PCMK__XML_PARSE_OPTS);
+     if (output) {
+         xml = xmlDocGetRootElement(output);
+     }
+@@ -2328,17 +2335,13 @@ filename2xml(const char *filename)
+     gboolean uncompressed = TRUE;
+     xmlParserCtxtPtr ctxt = NULL;
+     xmlErrorPtr last_error = NULL;
+-    static int xml_options = XML_PARSE_NOBLANKS | XML_PARSE_RECOVER;
+ 
+     /* create a parser context */
+     ctxt = xmlNewParserCtxt();
+     CRM_CHECK(ctxt != NULL, return NULL);
+ 
+-    /* xmlCtxtUseOptions(ctxt, XML_PARSE_NOBLANKS|XML_PARSE_RECOVER); */
+-
+     xmlCtxtResetLastError(ctxt);
+     xmlSetGenericErrorFunc(ctxt, crm_xml_err);
+-    /* initGenericErrorDefaultFunc(crm_xml_err); */
+ 
+     if (filename) {
+         uncompressed = !crm_ends_with_ext(filename, ".bz2");
+@@ -2346,15 +2349,17 @@ filename2xml(const char *filename)
+ 
+     if (filename == NULL) {
+         /* STDIN_FILENO == fileno(stdin) */
+-        output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL, xml_options);
++        output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL,
++                               PCMK__XML_PARSE_OPTS);
+ 
+     } else if (uncompressed) {
+-        output = xmlCtxtReadFile(ctxt, filename, NULL, xml_options);
++        output = xmlCtxtReadFile(ctxt, filename, NULL, PCMK__XML_PARSE_OPTS);
+ 
+     } else {
+         char *input = decompress_file(filename);
+ 
+-        output = xmlCtxtReadDoc(ctxt, (const xmlChar *)input, NULL, NULL, xml_options);
++        output = xmlCtxtReadDoc(ctxt, (const xmlChar *) input, NULL, NULL,
++                                PCMK__XML_PARSE_OPTS);
+         free(input);
+     }
+ 
+-- 
+1.8.3.1
+
+
+From ff79b7755d6debd12329632e5a14f8dd1e827e96 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 13 Aug 2019 16:32:47 -0500
+Subject: [PATCH 62/96] Refactor: tools: avoid use-of-NULL false positives in
+ stonith_admin
+
+to make static analysis happy
+---
+ fencing/admin.c | 46 ++++++++++++++++++++++++++++++----------------
+ 1 file changed, 30 insertions(+), 16 deletions(-)
+
+diff --git a/fencing/admin.c b/fencing/admin.c
+index 7da43e9..5fedb7b 100644
+--- a/fencing/admin.c
++++ b/fencing/admin.c
+@@ -312,9 +312,15 @@ handle_level(stonith_t *st, char *target, int fence_level,
+     char *node = NULL;
+     char *pattern = NULL;
+     char *name = NULL;
+-    char *value = strchr(target, '=');
++    char *value = NULL;
++
++    if (target == NULL) {
++        // Not really possible, but makes static analysis happy
++        return -EINVAL;
++    }
+ 
+     /* Determine if targeting by attribute, node name pattern or node name */
++    value = strchr(target, '=');
+     if (value != NULL)  {
+         name = target;
+         *value++ = '\0';
+@@ -453,6 +459,28 @@ validate(stonith_t *st, const char *agent, const char *id,
+     return rc;
+ }
+ 
++static void
++show_last_fenced(int as_nodeid, const char *target)
++{
++    time_t when = 0;
++
++    if (target == NULL) {
++        // Not really possible, but makes static analysis happy
++        return;
++    }
++    if (as_nodeid) {
++        uint32_t nodeid = atol(target);
++        when = stonith_api_time(nodeid, NULL, FALSE);
++    } else {
++        when = stonith_api_time(0, target, FALSE);
++    }
++    if(when) {
++        printf("Node %s last kicked at: %s\n", target, ctime(&when));
++    } else {
++        printf("Node %s has never been kicked\n", target);
++    }
++}
++
+ int
+ main(int argc, char **argv)
+ {
+@@ -741,21 +769,7 @@ main(int argc, char **argv)
+             rc = mainloop_fencing(st, target, "on", timeout, tolerance);
+             break;
+         case 'h':
+-            {
+-                time_t when = 0;
+-
+-                if(as_nodeid) {
+-                    uint32_t nodeid = atol(target);
+-                    when = stonith_api_time(nodeid, NULL, FALSE);
+-                } else {
+-                    when = stonith_api_time(0, target, FALSE);
+-                }
+-                if(when) {
+-                    printf("Node %s last kicked at: %s\n", target, ctime(&when));
+-                } else {
+-                    printf("Node %s has never been kicked\n", target);
+-                }
+-            }
++            show_last_fenced(as_nodeid, target);
+             break;
+         case 'H':
+             rc = handle_history(st, target, timeout, quiet,
+-- 
+1.8.3.1
+
+
+From 9fe5ad7d746ed60e304fa5420f434fa7750289ca Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 13 Aug 2019 16:59:13 -0500
+Subject: [PATCH 63/96] Low: libcrmcommon: handle pcmk_strerror(INT_MIN)
+
+not realistic, but makes static analysis happy
+---
+ lib/common/logging.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/lib/common/logging.c b/lib/common/logging.c
+index b8b0f6f..c392468 100644
+--- a/lib/common/logging.c
++++ b/lib/common/logging.c
+@@ -1149,7 +1149,9 @@ pcmk_strerror(int rc)
+ 
+     if (error == 0) {
+         return "OK";
+-    } else if (error < PCMK_ERROR_OFFSET) {
++
++    // Of course error > 0 ... unless someone passed INT_MIN as rc
++    } else if ((error > 0) && (error < PCMK_ERROR_OFFSET)) {
+         return strerror(error);
+     }
+ 
+-- 
+1.8.3.1
+
+
+From 8201550e12973ace36684156d14262f34386cfb7 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 15 Aug 2019 15:33:18 -0500
+Subject: [PATCH 64/96] Build: GNUmakefile: improve coverity targets
+
+Most importantly, add the ability to specify the coverity aggressiveness level,
+and put outputs in the build directory. Otherwise mostly refactoring for
+best practices.
+
+The public coverity instance changed its upload process, so we can't do a
+simple curl anymore. We haven't been using it anyway, so just echo what needs
+to be done for that case.
+---
+ GNUmakefile | 95 ++++++++++++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 65 insertions(+), 30 deletions(-)
+
+diff --git a/GNUmakefile b/GNUmakefile
+index a084150..0822890 100644
+--- a/GNUmakefile
++++ b/GNUmakefile
+@@ -122,6 +122,11 @@ rpmbuild-with = \
+ init:
+ 	./autogen.sh init
+ 
++.PHONY: init-if-needed
++init-if-needed:
++	test -e configure || ./autogen.sh
++	test -e Makefile || ./configure
++
+ export:
+ 	rm -f $(PACKAGE)-dirty.tar.* $(PACKAGE)-tip.tar.* $(PACKAGE)-HEAD.tar.*
+ 	if [ ! -f $(TARFILE) ]; then						\
+@@ -265,37 +270,67 @@ rc:
+ dirty:
+ 	make TAG=dirty mock
+ 
+-COVERITY_DIR	 = $(shell pwd)/coverity-$(TAG)
+-COVFILE          = $(PACKAGE)-coverity-$(TAG).tgz
+-COVHOST		?= scan5.coverity.com
+-COVPASS		?= password
+ 
+-# Public coverity
+-coverity:
+-	test -e configure || ./autogen.sh
+-	test -e Makefile || ./configure
+-	make core-clean
+-	rm -rf $(COVERITY_DIR)
+-	cov-build --dir $(COVERITY_DIR) make core
+-	tar czf $(COVFILE) --transform=s@.*$(TAG)@cov-int@ $(COVERITY_DIR)
+-	@echo "Uploading to public Coverity instance..."
+-	curl --form file=@$(COVFILE) --form project=$(PACKAGE) --form password=$(COVPASS) --form email=andrew@beekhof.net http://$(COVHOST)/cgi-bin/upload.py
+-	rm -rf $(COVFILE) $(COVERITY_DIR)
+-
+-coverity-corp:
+-	test -e configure || ./autogen.sh
+-	test -e Makefile || ./configure
+-	make core-clean
+-	rm -rf $(COVERITY_DIR)
+-	cov-build --dir $(COVERITY_DIR) make core
+-	@echo "Waiting for a corporate Coverity license..."
+-	cov-analyze --dir $(COVERITY_DIR) --wait-for-license
+-	cov-format-errors --dir $(COVERITY_DIR) --emacs-style > $(TAG).coverity
+-	cov-format-errors --dir $(COVERITY_DIR)
+-	rsync $(RSYNC_OPTS) "$(COVERITY_DIR)/c/output/errors/" "$(RSYNC_DEST)/coverity/$(PACKAGE)/$(TAG)"
+-	make core-clean
+-#	cov-commit-defects --host $(COVHOST) --dir $(COVERITY_DIR) --stream $(PACKAGE) --user auto --password $(COVPASS)
+-	rm -rf $(COVERITY_DIR)
++## Static analysis via coverity
++
++# Aggressiveness (low, medium, or high)
++COVLEVEL	?= low
++
++# Generated outputs
++COVERITY_DIR	= $(builddir)/coverity-$(TAG)
++COVTAR		= $(builddir)/$(PACKAGE)-coverity-$(TAG).tgz
++COVEMACS	= $(builddir)/$(TAG).coverity
++COVHTML		= $(COVERITY_DIR)/output/errors
++
++# Coverity outputs are phony so they get rebuilt every invocation
++
++.PHONY: $(COVERITY_DIR)
++$(COVERITY_DIR): init-if-needed core-clean coverity-clean
++	$(AM_V_GEN)cov-build --dir "$@" $(MAKE) $(AM_MAKEFLAGS) core
++
++.PHONY: $(COVTAR)
++$(COVTAR): $(COVERITY_DIR)
++	$(AM_V_GEN)tar czf "$@" --transform="s@.*$(TAG)@cov-int@" "$<"
++
++# emacs/html output assume $(COVERITY_DIR) has been built (don't want rebuild)
++
++.PHONY: $(COVEMACS)
++$(COVEMACS):
++	$(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --emacs-style > "$@"
++
++.PHONY: $(COVHTML)
++$(COVHTML):
++	$(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --html-output "$@"
++
++# Public coverity instance
++.PHONY: coverity
++coverity: $(COVTAR)
++	@echo "Now go to https://scan.coverity.com/users/sign_in and upload:"
++	@echo "  $(COVTAR)"
++	@echo "then make core-clean coverity-clean"
++
++# Licensed coverity instance
++
++.PHONY: coverity-analyze
++coverity-analyze: $(COVERITY_DIR)
++	@echo ""
++	@echo "Analyzing (waiting for coverity license if necessary) ..."
++	cov-analyze --dir "$<" --wait-for-license --security		\
++		--aggressiveness-level "$(COVLEVEL)"
++
++.PHONY: coverity-corp
++coverity-corp: coverity-analyze $(COVEMACS) $(COVHTML) core-clean
++	@echo "Done. See:"
++	@echo "  file://$(abs_builddir)/$(COVERITY_DIR)/output/errors/index.html"
++	@echo "When no longer needed, make coverity-clean"
++
++# Remove all outputs regardless of tag
++.PHONY: coverity-clean
++coverity-clean:
++	-rm -rf "$(builddir)"/coverity-*			\
++		"$(builddir)"/$(PACKAGE)-coverity-*.tgz		\
++		"$(builddir)"/*.coverity
++
+ 
+ global: clean-generic
+ 	gtags -q
+-- 
+1.8.3.1
+
+
+From 6659ba3cf69f95e0cd724b55a29c926010b68df8 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 15 Aug 2019 15:50:46 -0500
+Subject: [PATCH 66/96] Build: GNUmakefile: don't depend on prerequisite order
+ for coverity target
+
+---
+ GNUmakefile | 30 +++++++++++++++++-------------
+ 1 file changed, 17 insertions(+), 13 deletions(-)
+
+diff --git a/GNUmakefile b/GNUmakefile
+index 0822890..b2d5a28 100644
+--- a/GNUmakefile
++++ b/GNUmakefile
+@@ -288,21 +288,12 @@ COVHTML		= $(COVERITY_DIR)/output/errors
+ $(COVERITY_DIR): init-if-needed core-clean coverity-clean
+ 	$(AM_V_GEN)cov-build --dir "$@" $(MAKE) $(AM_MAKEFLAGS) core
+ 
++# Public coverity instance
++
+ .PHONY: $(COVTAR)
+ $(COVTAR): $(COVERITY_DIR)
+ 	$(AM_V_GEN)tar czf "$@" --transform="s@.*$(TAG)@cov-int@" "$<"
+ 
+-# emacs/html output assume $(COVERITY_DIR) has been built (don't want rebuild)
+-
+-.PHONY: $(COVEMACS)
+-$(COVEMACS):
+-	$(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --emacs-style > "$@"
+-
+-.PHONY: $(COVHTML)
+-$(COVHTML):
+-	$(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --html-output "$@"
+-
+-# Public coverity instance
+ .PHONY: coverity
+ coverity: $(COVTAR)
+ 	@echo "Now go to https://scan.coverity.com/users/sign_in and upload:"
+@@ -310,6 +301,10 @@ coverity: $(COVTAR)
+ 	@echo "then make core-clean coverity-clean"
+ 
+ # Licensed coverity instance
++#
++# The prerequisites are a little hacky; rather than actually required, some
++# of them are designed so that things execute in the proper order (which is
++# not the same as GNU make's order-only prerequisites).
+ 
+ .PHONY: coverity-analyze
+ coverity-analyze: $(COVERITY_DIR)
+@@ -318,10 +313,19 @@ coverity-analyze: $(COVERITY_DIR)
+ 	cov-analyze --dir "$<" --wait-for-license --security		\
+ 		--aggressiveness-level "$(COVLEVEL)"
+ 
++.PHONY: $(COVEMACS)
++$(COVEMACS): coverity-analyze
++	$(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --emacs-style > "$@"
++
++.PHONY: $(COVHTML)
++$(COVHTML): $(COVEMACS)
++	$(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --html-output "$@"
++
+ .PHONY: coverity-corp
+-coverity-corp: coverity-analyze $(COVEMACS) $(COVHTML) core-clean
++coverity-corp: $(COVHTML)
++	$(MAKE) $(AM_MAKEFLAGS) core-clean
+ 	@echo "Done. See:"
+-	@echo "  file://$(abs_builddir)/$(COVERITY_DIR)/output/errors/index.html"
++	@echo "  file://$(abs_builddir)/$(COVHTML)/index.html"
+ 	@echo "When no longer needed, make coverity-clean"
+ 
+ # Remove all outputs regardless of tag
+-- 
+1.8.3.1
+
+
+From 0158f59b11b3e14f86258265a491f81d83cc6fcf Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 15 Aug 2019 17:38:41 -0500
+Subject: [PATCH 67/96] Fix: all: restrict XML children loops to XML elements
+ where appropriate
+
+__xml_first_child() and __xml_next() are intended to be used in "for" loops
+where all XML node types (elements, comments, etc.) are desired.
+
+__xml_first_child_element() and __xml_next_element() are intended when
+only element children are desired.
+
+Previously, many element-only loops properly used __xml_next_element() but
+started with __xml_first_child(). In most cases, this would (by lucky
+circumstance) work without harm. However, there were cases where a comment as
+the first child of an element would case problems (for example,
+unpack_resources() would log a configuration error).
+
+Now, __xml_first_child_element() is always used in such cases.
+
+Additionally, there were some loops using __xml_first_child()/__xml_next() that
+clearly were expecting only elements. These have been converted to
+__xml_first_child_element()/__xml_next_element().
+
+Many more cases exist where __xml_first_child()/__xml_next() is used with IPC
+messages and patchsets. This commit does not convert those, though that would
+probably be a good idea for the future.
+---
+ lib/common/acl.c           |  12 ++---
+ lib/common/xml.c           |   7 +--
+ lib/pengine/clone.c        |   2 +-
+ lib/pengine/complex.c      |  10 ++--
+ lib/pengine/group.c        |   6 ++-
+ lib/pengine/rules.c        |  51 ++++++++++---------
+ lib/pengine/rules_alerts.c |   4 +-
+ lib/pengine/unpack.c       |  61 ++++++++++++++++-------
+ lib/pengine/utils.c        |  14 ++++--
+ pengine/allocate.c         |  10 ++--
+ pengine/constraints.c      | 120 +++++++++++++++++++++++++++++++++------------
+ pengine/native.c           |  11 +++--
+ tools/crm_mon.c            |  19 ++++---
+ tools/crm_resource_print.c |   4 +-
+ tools/crmadmin.c           |   4 +-
+ tools/fake_transition.c    |   4 +-
+ 16 files changed, 226 insertions(+), 113 deletions(-)
+
+diff --git a/lib/common/acl.c b/lib/common/acl.c
+index 72dc707..b5c20bb 100644
+--- a/lib/common/acl.c
++++ b/lib/common/acl.c
+@@ -131,8 +131,8 @@ __xml_acl_parse_entry(xmlNode *acl_top, xmlNode *acl_entry, GList *acls)
+ {
+     xmlNode *child = NULL;
+ 
+-    for (child = __xml_first_child(acl_entry); child;
+-         child = __xml_next(child)) {
++    for (child = __xml_first_child_element(acl_entry); child;
++         child = __xml_next_element(child)) {
+         const char *tag = crm_element_name(child);
+         const char *kind = crm_element_value(child, XML_ACL_ATTR_KIND);
+ 
+@@ -151,8 +151,8 @@ __xml_acl_parse_entry(xmlNode *acl_top, xmlNode *acl_entry, GList *acls)
+             if (ref_role) {
+                 xmlNode *role = NULL;
+ 
+-                for (role = __xml_first_child(acl_top); role;
+-                     role = __xml_next(role)) {
++                for (role = __xml_first_child_element(acl_top); role;
++                     role = __xml_next_element(role)) {
+                     if (!strcmp(XML_ACL_TAG_ROLE, (const char *) role->name)) {
+                         const char *role_id = crm_element_value(role,
+                                                                 XML_ATTR_ID);
+@@ -306,8 +306,8 @@ pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user)
+         if (acls) {
+             xmlNode *child = NULL;
+ 
+-            for (child = __xml_first_child(acls); child;
+-                 child = __xml_next(child)) {
++            for (child = __xml_first_child_element(acls); child;
++                 child = __xml_next_element(child)) {
+                 const char *tag = crm_element_name(child);
+ 
+                 if (!strcmp(tag, XML_ACL_TAG_USER)
+diff --git a/lib/common/xml.c b/lib/common/xml.c
+index 26cd78d..2c4238d 100644
+--- a/lib/common/xml.c
++++ b/lib/common/xml.c
+@@ -4242,7 +4242,8 @@ first_named_child(xmlNode * parent, const char *name)
+ {
+     xmlNode *match = NULL;
+ 
+-    for (match = __xml_first_child(parent); match != NULL; match = __xml_next(match)) {
++    for (match = __xml_first_child_element(parent); match != NULL;
++         match = __xml_next_element(match)) {
+         /*
+          * name == NULL gives first child regardless of name; this is
+          * semantically incorrect in this function, but may be necessary
+@@ -4265,14 +4266,14 @@ first_named_child(xmlNode * parent, const char *name)
+ xmlNode *
+ crm_next_same_xml(xmlNode *sibling)
+ {
+-    xmlNode *match = __xml_next(sibling);
++    xmlNode *match = __xml_next_element(sibling);
+     const char *name = crm_element_name(sibling);
+ 
+     while (match != NULL) {
+         if (!strcmp(crm_element_name(match), name)) {
+             return match;
+         }
+-        match = __xml_next(match);
++        match = __xml_next_element(match);
+     }
+     return NULL;
+ }
+diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
+index 88015a9..07a383c 100644
+--- a/lib/pengine/clone.c
++++ b/lib/pengine/clone.c
+@@ -172,7 +172,7 @@ clone_unpack(resource_t * rsc, pe_working_set_t * data_set)
+                  is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
+ 
+     // Clones may contain a single group or primitive
+-    for (a_child = __xml_first_child(xml_obj); a_child != NULL;
++    for (a_child = __xml_first_child_element(xml_obj); a_child != NULL;
+          a_child = __xml_next_element(a_child)) {
+ 
+         if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_RESOURCE, TRUE)
+diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
+index cdd409a..42492c9 100644
+--- a/lib/pengine/complex.c
++++ b/lib/pengine/complex.c
+@@ -282,7 +282,7 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
+ 
+     template_ops = find_xml_node(new_xml, "operations", FALSE);
+ 
+-    for (child_xml = __xml_first_child(xml_obj); child_xml != NULL;
++    for (child_xml = __xml_first_child_element(xml_obj); child_xml != NULL;
+          child_xml = __xml_next_element(child_xml)) {
+         xmlNode *new_child = NULL;
+ 
+@@ -298,13 +298,17 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
+         GHashTable *rsc_ops_hash =
+             g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, NULL);
+ 
+-        for (op = __xml_first_child(rsc_ops); op != NULL; op = __xml_next_element(op)) {
++        for (op = __xml_first_child_element(rsc_ops); op != NULL;
++             op = __xml_next_element(op)) {
++
+             char *key = template_op_key(op);
+ 
+             g_hash_table_insert(rsc_ops_hash, key, op);
+         }
+ 
+-        for (op = __xml_first_child(template_ops); op != NULL; op = __xml_next_element(op)) {
++        for (op = __xml_first_child_element(template_ops); op != NULL;
++             op = __xml_next_element(op)) {
++
+             char *key = template_op_key(op);
+ 
+             if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
+diff --git a/lib/pengine/group.c b/lib/pengine/group.c
+index 258c6b5..72f066e 100644
+--- a/lib/pengine/group.c
++++ b/lib/pengine/group.c
+@@ -1,5 +1,7 @@
+ /*
+- * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
++ * Copyright 2004-2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
+  *
+  * This source code is licensed under the GNU Lesser General Public License
+  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+@@ -46,7 +48,7 @@ group_unpack(resource_t * rsc, pe_working_set_t * data_set)
+ 
+     clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
+ 
+-    for (xml_native_rsc = __xml_first_child(xml_obj); xml_native_rsc != NULL;
++    for (xml_native_rsc = __xml_first_child_element(xml_obj); xml_native_rsc != NULL;
+          xml_native_rsc = __xml_next_element(xml_native_rsc)) {
+         if (crm_str_eq((const char *)xml_native_rsc->name, XML_CIB_TAG_RESOURCE, TRUE)) {
+             resource_t *new_rsc = NULL;
+diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
+index 2d948b9..523ed15 100644
+--- a/lib/pengine/rules.c
++++ b/lib/pengine/rules.c
+@@ -1,19 +1,10 @@
+-/* 
+- * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
+- * 
+- * This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU Lesser General Public
+- * License as published by the Free Software Foundation; either
+- * version 2.1 of the License, or (at your option) any later version.
+- * 
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+- * Lesser General Public License for more details.
+- * 
+- * You should have received a copy of the GNU Lesser General Public
+- * License along with this library; if not, write to the Free Software
+- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++/*
++ * Copyright 2004-2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
++ *
++ * This source code is licensed under the GNU Lesser General Public License
++ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+  */
+ 
+ #include <crm_internal.h>
+@@ -46,7 +37,9 @@ test_ruleset(xmlNode * ruleset, GHashTable * node_hash, crm_time_t * now)
+     gboolean ruleset_default = TRUE;
+     xmlNode *rule = NULL;
+ 
+-    for (rule = __xml_first_child(ruleset); rule != NULL; rule = __xml_next_element(rule)) {
++    for (rule = __xml_first_child_element(ruleset); rule != NULL;
++         rule = __xml_next_element(rule)) {
++
+         if (crm_str_eq((const char *)rule->name, XML_TAG_RULE, TRUE)) {
+             ruleset_default = FALSE;
+             if (test_rule(rule, node_hash, RSC_ROLE_UNKNOWN, now)) {
+@@ -93,7 +86,9 @@ pe_test_rule_full(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role,
+     }
+ 
+     crm_trace("Testing rule %s", ID(rule));
+-    for (expr = __xml_first_child(rule); expr != NULL; expr = __xml_next_element(expr)) {
++    for (expr = __xml_first_child_element(rule); expr != NULL;
++         expr = __xml_next_element(expr)) {
++
+         test = pe_test_expression_full(expr, node_hash, role, now, match_data);
+         empty = FALSE;
+ 
+@@ -722,7 +717,9 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN
+         list = list->children;
+     }
+ 
+-    for (an_attr = __xml_first_child(list); an_attr != NULL; an_attr = __xml_next_element(an_attr)) {
++    for (an_attr = __xml_first_child_element(list); an_attr != NULL;
++         an_attr = __xml_next_element(an_attr)) {
++
+         if (crm_str_eq((const char *)an_attr->name, XML_CIB_TAG_NVPAIR, TRUE)) {
+             xmlNode *ref_nvpair = expand_idref(an_attr, top);
+ 
+@@ -769,9 +766,13 @@ get_versioned_rule(xmlNode * attr_set)
+     xmlNode * rule = NULL;
+     xmlNode * expr = NULL;
+ 
+-    for (rule = __xml_first_child(attr_set); rule != NULL; rule = __xml_next_element(rule)) {
++    for (rule = __xml_first_child_element(attr_set); rule != NULL;
++         rule = __xml_next_element(rule)) {
++
+         if (crm_str_eq((const char *)rule->name, XML_TAG_RULE, TRUE)) {
+-            for (expr = __xml_first_child(rule); expr != NULL; expr = __xml_next_element(expr)) {
++            for (expr = __xml_first_child_element(rule); expr != NULL;
++                 expr = __xml_next_element(expr)) {
++
+                 if (find_expression_type(expr) == version_expr) {
+                     return rule;
+                 }
+@@ -801,7 +802,7 @@ add_versioned_attributes(xmlNode * attr_set, xmlNode * versioned_attrs)
+         return;
+     }
+ 
+-    expr = __xml_first_child(rule);
++    expr = __xml_first_child_element(rule);
+     while (expr != NULL) {
+         if (find_expression_type(expr) != version_expr) {
+             xmlNode *node = expr;
+@@ -879,7 +880,9 @@ make_pairs_and_populate_data(xmlNode * top, xmlNode * xml_obj, const char *set_n
+     }
+ 
+     crm_trace("Checking for attributes");
+-    for (attr_set = __xml_first_child(xml_obj); attr_set != NULL; attr_set = __xml_next_element(attr_set)) {
++    for (attr_set = __xml_first_child_element(xml_obj); attr_set != NULL;
++         attr_set = __xml_next_element(attr_set)) {
++
+         /* Uncertain if set_name == NULL check is strictly necessary here */
+         if (set_name == NULL || crm_str_eq((const char *)attr_set->name, set_name, TRUE)) {
+             pair = NULL;
+@@ -1011,7 +1014,7 @@ pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version
+ 
+     if (versioned_params && ra_version) {
+         GHashTable *node_hash = crm_str_table_new();
+-        xmlNode *attr_set = __xml_first_child(versioned_params);
++        xmlNode *attr_set = __xml_first_child_element(versioned_params);
+ 
+         if (attr_set) {
+             g_hash_table_insert(node_hash, strdup(CRM_ATTR_RA_VERSION),
+diff --git a/lib/pengine/rules_alerts.c b/lib/pengine/rules_alerts.c
+index e2bab58..00be330 100644
+--- a/lib/pengine/rules_alerts.c
++++ b/lib/pengine/rules_alerts.c
+@@ -127,8 +127,8 @@ unpack_alert_filter(xmlNode *basenode, crm_alert_entry_t *entry)
+     xmlNode *event_type = NULL;
+     uint32_t flags = crm_alert_none;
+ 
+-    for (event_type = __xml_first_child(select); event_type != NULL;
+-         event_type = __xml_next(event_type)) {
++    for (event_type = __xml_first_child_element(select); event_type != NULL;
++         event_type = __xml_next_element(event_type)) {
+ 
+         const char *tagname = crm_element_name(event_type);
+ 
+diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
+index cf725a1..4282a7c 100644
+--- a/lib/pengine/unpack.c
++++ b/lib/pengine/unpack.c
+@@ -430,7 +430,9 @@ remote_id_conflict(const char *remote_name, pe_working_set_t *data)
+ #else
+     if (data->name_check == NULL) {
+         data->name_check = g_hash_table_new(crm_str_hash, g_str_equal);
+-        for (xml_rsc = __xml_first_child(parent); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(parent); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             const char *id = ID(xml_rsc);
+ 
+             /* avoiding heap allocation here because we know the duration of this hashtable allows us to */
+@@ -464,12 +466,14 @@ expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data
+     const char *remote_allow_migrate=NULL;
+     const char *container_managed = NULL;
+ 
+-    for (attr_set = __xml_first_child(xml_obj); attr_set != NULL; attr_set = __xml_next_element(attr_set)) {
++    for (attr_set = __xml_first_child_element(xml_obj); attr_set != NULL;
++         attr_set = __xml_next_element(attr_set)) {
+         if (safe_str_neq((const char *)attr_set->name, XML_TAG_META_SETS)) {
+             continue;
+         }
+ 
+-        for (attr = __xml_first_child(attr_set); attr != NULL; attr = __xml_next_element(attr)) {
++        for (attr = __xml_first_child_element(attr_set); attr != NULL;
++             attr = __xml_next_element(attr)) {
+             const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
+             const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
+ 
+@@ -538,7 +542,9 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
+     const char *type = NULL;
+     const char *score = NULL;
+ 
+-    for (xml_obj = __xml_first_child(xml_nodes); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) {
++    for (xml_obj = __xml_first_child_element(xml_nodes); xml_obj != NULL;
++         xml_obj = __xml_next_element(xml_obj)) {
++
+         if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, TRUE)) {
+             new_node = NULL;
+ 
+@@ -620,7 +626,9 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
+     xmlNode *xml_obj = NULL;
+ 
+     /* generate remote nodes from resource config before unpacking resources */
+-    for (xml_obj = __xml_first_child(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) {
++    for (xml_obj = __xml_first_child_element(xml_resources); xml_obj != NULL;
++         xml_obj = __xml_next_element(xml_obj)) {
++
+         const char *new_node_id = NULL;
+ 
+         /* first check if this is a bare metal remote node. Bare metal remote nodes
+@@ -659,7 +667,8 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
+         } else if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, TRUE)) {
+             xmlNode *xml_obj2 = NULL;
+             /* search through a group to see if any of the primitive contain a remote node. */
+-            for (xml_obj2 = __xml_first_child(xml_obj); xml_obj2 != NULL; xml_obj2 = __xml_next_element(xml_obj2)) {
++            for (xml_obj2 = __xml_first_child_element(xml_obj); xml_obj2 != NULL;
++                 xml_obj2 = __xml_next_element(xml_obj2)) {
+ 
+                 new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
+ 
+@@ -750,7 +759,9 @@ unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set)
+         g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str,
+                               destroy_tag);
+ 
+-    for (xml_obj = __xml_first_child(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) {
++    for (xml_obj = __xml_first_child_element(xml_resources); xml_obj != NULL;
++         xml_obj = __xml_next_element(xml_obj)) {
++
+         resource_t *new_rsc = NULL;
+ 
+         if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, TRUE)) {
+@@ -808,7 +819,9 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
+     data_set->tags =
+         g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_tag);
+ 
+-    for (xml_tag = __xml_first_child(xml_tags); xml_tag != NULL; xml_tag = __xml_next_element(xml_tag)) {
++    for (xml_tag = __xml_first_child_element(xml_tags); xml_tag != NULL;
++         xml_tag = __xml_next_element(xml_tag)) {
++
+         xmlNode *xml_obj_ref = NULL;
+         const char *tag_id = ID(xml_tag);
+ 
+@@ -822,7 +835,9 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
+             continue;
+         }
+ 
+-        for (xml_obj_ref = __xml_first_child(xml_tag); xml_obj_ref != NULL; xml_obj_ref = __xml_next_element(xml_obj_ref)) {
++        for (xml_obj_ref = __xml_first_child_element(xml_tag); xml_obj_ref != NULL;
++             xml_obj_ref = __xml_next_element(xml_obj_ref)) {
++
+             const char *obj_ref = ID(xml_obj_ref);
+ 
+             if (crm_str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, TRUE) == FALSE) {
+@@ -916,7 +931,9 @@ unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
+ {
+     xmlNode *xml_obj = NULL;
+ 
+-    for (xml_obj = __xml_first_child(xml_tickets); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) {
++    for (xml_obj = __xml_first_child_element(xml_tickets); xml_obj != NULL;
++         xml_obj = __xml_next_element(xml_obj)) {
++
+         if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, TRUE) == FALSE) {
+             continue;
+         }
+@@ -1075,7 +1092,9 @@ unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set)
+     bool changed = false;
+     xmlNode *lrm_rsc = NULL;
+ 
+-    for (xmlNode *state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) {
++    for (xmlNode *state = __xml_first_child_element(status); state != NULL;
++         state = __xml_next_element(state)) {
++
+         const char *id = NULL;
+         const char *uname = NULL;
+         node_t *this_node = NULL;
+@@ -1174,7 +1193,9 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set)
+             g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket);
+     }
+ 
+-    for (state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) {
++    for (state = __xml_first_child_element(status); state != NULL;
++         state = __xml_next_element(state)) {
++
+         if (crm_str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, TRUE)) {
+             xmlNode *xml_tickets = state;
+             GHashTable *state_hash = NULL;
+@@ -2285,7 +2306,8 @@ unpack_lrm_rsc_state(node_t * node, xmlNode * rsc_entry, pe_working_set_t * data
+     op_list = NULL;
+     sorted_op_list = NULL;
+ 
+-    for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
++    for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL;
++         rsc_op = __xml_next_element(rsc_op)) {
+         if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
+             op_list = g_list_prepend(op_list, rsc_op);
+         }
+@@ -2354,8 +2376,8 @@ static void
+ handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set)
+ {
+     xmlNode *rsc_entry = NULL;
+-    for (rsc_entry = __xml_first_child(lrm_rsc_list); rsc_entry != NULL;
+-        rsc_entry = __xml_next_element(rsc_entry)) {
++    for (rsc_entry = __xml_first_child_element(lrm_rsc_list); rsc_entry != NULL;
++         rsc_entry = __xml_next_element(rsc_entry)) {
+ 
+         resource_t *rsc;
+         resource_t *container;
+@@ -2400,7 +2422,7 @@ unpack_lrm_resources(node_t * node, xmlNode * lrm_rsc_list, pe_working_set_t * d
+ 
+     crm_trace("Unpacking resources on %s", node->details->uname);
+ 
+-    for (rsc_entry = __xml_first_child(lrm_rsc_list); rsc_entry != NULL;
++    for (rsc_entry = __xml_first_child_element(lrm_rsc_list); rsc_entry != NULL;
+          rsc_entry = __xml_next_element(rsc_entry)) {
+ 
+         if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
+@@ -3490,7 +3512,8 @@ extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gbool
+     op_list = NULL;
+     sorted_op_list = NULL;
+ 
+-    for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
++    for (rsc_op = __xml_first_child_element(rsc_entry);
++         rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
+         if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
+             crm_xml_add(rsc_op, "resource", rsc);
+             crm_xml_add(rsc_op, XML_ATTR_UNAME, node);
+@@ -3548,7 +3571,7 @@ find_operations(const char *rsc, const char *node, gboolean active_filter,
+ 
+     xmlNode *node_state = NULL;
+ 
+-    for (node_state = __xml_first_child(status); node_state != NULL;
++    for (node_state = __xml_first_child_element(status); node_state != NULL;
+          node_state = __xml_next_element(node_state)) {
+ 
+         if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
+@@ -3580,7 +3603,7 @@ find_operations(const char *rsc, const char *node, gboolean active_filter,
+                 tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
+                 tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE);
+ 
+-                for (lrm_rsc = __xml_first_child(tmp); lrm_rsc != NULL;
++                for (lrm_rsc = __xml_first_child_element(tmp); lrm_rsc != NULL;
+                      lrm_rsc = __xml_next_element(lrm_rsc)) {
+                     if (crm_str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, TRUE)) {
+ 
+diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
+index 8eac2ce..67c1c3d 100644
+--- a/lib/pengine/utils.c
++++ b/lib/pengine/utils.c
+@@ -646,7 +646,7 @@ unpack_operation_on_fail(action_t * action)
+ 
+         CRM_CHECK(action->rsc != NULL, return NULL);
+ 
+-        for (operation = __xml_first_child(action->rsc->ops_xml);
++        for (operation = __xml_first_child_element(action->rsc->ops_xml);
+              operation && !value; operation = __xml_next_element(operation)) {
+ 
+             if (!crm_str_eq((const char *)operation->name, "op", TRUE)) {
+@@ -685,7 +685,7 @@ find_min_interval_mon(resource_t * rsc, gboolean include_disabled)
+     xmlNode *op = NULL;
+     xmlNode *operation = NULL;
+ 
+-    for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
++    for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
+          operation = __xml_next_element(operation)) {
+ 
+         if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
+@@ -873,8 +873,12 @@ unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, unsigned long l
+     xmlNode *attrs = NULL;
+     xmlNode *attr = NULL;
+ 
+-    for (attrs = __xml_first_child(versioned_meta); attrs != NULL; attrs = __xml_next_element(attrs)) {
+-        for (attr = __xml_first_child(attrs); attr != NULL; attr = __xml_next_element(attr)) {
++    for (attrs = __xml_first_child_element(versioned_meta); attrs != NULL;
++         attrs = __xml_next_element(attrs)) {
++
++        for (attr = __xml_first_child_element(attrs); attr != NULL;
++             attr = __xml_next_element(attr)) {
++
+             const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
+             const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
+ 
+@@ -1233,7 +1237,7 @@ find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_dis
+     xmlNode *operation = NULL;
+ 
+   retry:
+-    for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
++    for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
+          operation = __xml_next_element(operation)) {
+         if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
+             name = crm_element_value(operation, "name");
+diff --git a/pengine/allocate.c b/pengine/allocate.c
+index d600bbf..28f58b3 100644
+--- a/pengine/allocate.c
++++ b/pengine/allocate.c
+@@ -465,7 +465,9 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki
+         DeleteRsc(rsc, node, FALSE, data_set);
+     }
+ 
+-    for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
++    for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL;
++         rsc_op = __xml_next_element(rsc_op)) {
++
+         if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
+             op_list = g_list_prepend(op_list, rsc_op);
+         }
+@@ -596,7 +598,7 @@ check_actions(pe_working_set_t * data_set)
+ 
+     xmlNode *node_state = NULL;
+ 
+-    for (node_state = __xml_first_child(status); node_state != NULL;
++    for (node_state = __xml_first_child_element(status); node_state != NULL;
+          node_state = __xml_next_element(node_state)) {
+         if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
+             id = crm_element_value(node_state, XML_ATTR_ID);
+@@ -619,8 +621,10 @@ check_actions(pe_working_set_t * data_set)
+             if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
+                 xmlNode *rsc_entry = NULL;
+ 
+-                for (rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL;
++                for (rsc_entry = __xml_first_child_element(lrm_rscs);
++                     rsc_entry != NULL;
+                      rsc_entry = __xml_next_element(rsc_entry)) {
++
+                     if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
+ 
+                         if (xml_has_children(rsc_entry)) {
+diff --git a/pengine/constraints.c b/pengine/constraints.c
+index 6bf5adf..776eee1 100644
+--- a/pengine/constraints.c
++++ b/pengine/constraints.c
+@@ -1,5 +1,7 @@
+ /*
+- * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
++ * Copyright 2004-2019 the Pacemaker project contributors
++ *
++ * The version control history for this file may have further details.
+  *
+  * This source code is licensed under the GNU General Public License version 2
+  * or later (GPLv2+) WITHOUT ANY WARRANTY.
+@@ -53,7 +55,7 @@ unpack_constraints(xmlNode * xml_constraints, pe_working_set_t * data_set)
+     xmlNode *xml_obj = NULL;
+     xmlNode *lifetime = NULL;
+ 
+-    for (xml_obj = __xml_first_child(xml_constraints); xml_obj != NULL;
++    for (xml_obj = __xml_first_child_element(xml_constraints); xml_obj != NULL;
+          xml_obj = __xml_next_element(xml_obj)) {
+         const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
+         const char *tag = crm_element_name(xml_obj);
+@@ -489,7 +491,9 @@ expand_tags_in_sets(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t
+     new_xml = copy_xml(xml_obj);
+     cons_id = ID(new_xml);
+ 
+-    for (set = __xml_first_child(new_xml); set != NULL; set = __xml_next_element(set)) {
++    for (set = __xml_first_child_element(new_xml); set != NULL;
++         set = __xml_next_element(set)) {
++
+         xmlNode *xml_rsc = NULL;
+         GListPtr tag_refs = NULL;
+         GListPtr gIter = NULL;
+@@ -498,7 +502,9 @@ expand_tags_in_sets(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t
+             continue;
+         }
+ 
+-        for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             resource_t *rsc = NULL;
+             tag_t *tag = NULL;
+             const char *id = ID(xml_rsc);
+@@ -783,7 +789,7 @@ unpack_rsc_location(xmlNode * xml_obj, resource_t * rsc_lh, const char * role,
+     } else {
+         xmlNode *rule_xml = NULL;
+ 
+-        for (rule_xml = __xml_first_child(xml_obj); rule_xml != NULL;
++        for (rule_xml = __xml_first_child_element(xml_obj); rule_xml != NULL;
+              rule_xml = __xml_next_element(rule_xml)) {
+             if (crm_str_eq((const char *)rule_xml->name, XML_TAG_RULE, TRUE)) {
+                 empty = FALSE;
+@@ -927,7 +933,9 @@ unpack_location_set(xmlNode * location, xmlNode * set, pe_working_set_t * data_s
+     role = crm_element_value(set, "role");
+     local_score = crm_element_value(set, XML_RULE_ATTR_SCORE);
+ 
+-    for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++    for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL;
++         xml_rsc = __xml_next_element(xml_rsc)) {
++
+         if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+             EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
+             unpack_rsc_location(location, resource, role, local_score, data_set, NULL);
+@@ -955,7 +963,9 @@ unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set)
+         xml_obj = expanded_xml;
+     }
+ 
+-    for (set = __xml_first_child(xml_obj); set != NULL; set = __xml_next_element(set)) {
++    for (set = __xml_first_child_element(xml_obj); set != NULL;
++         set = __xml_next_element(set)) {
++
+         if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) {
+             any_sets = TRUE;
+             set = expand_idref(set, data_set->input);
+@@ -1628,7 +1638,9 @@ unpack_order_set(xmlNode * set, enum pe_order_kind parent_kind, resource_t ** rs
+         flags = get_asymmetrical_flags(local_kind);
+     }
+ 
+-    for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++    for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL;
++         xml_rsc = __xml_next_element(xml_rsc)) {
++
+         if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+             EXPAND_CONSTRAINT_IDREF(id, resource, ID(xml_rsc));
+             resources = g_list_append(resources, resource);
+@@ -1805,7 +1817,9 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+         free(task);
+         update_action_flags(unordered_action, pe_action_requires_any, __FUNCTION__, __LINE__);
+ 
+-        for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (!crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 continue;
+             }
+@@ -1818,7 +1832,9 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+                                 NULL, NULL, unordered_action,
+                                 pe_order_one_or_more | pe_order_implies_then_printed, data_set);
+         }
+-        for (xml_rsc_2 = __xml_first_child(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next_element(xml_rsc_2)) {
++        for (xml_rsc_2 = __xml_first_child_element(set2); xml_rsc_2 != NULL;
++             xml_rsc_2 = __xml_next_element(xml_rsc_2)) {
++
+             if (!crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 continue;
+             }
+@@ -1840,7 +1856,9 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+             /* get the last one */
+             const char *rid = NULL;
+ 
+-            for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++            for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL;
++                 xml_rsc = __xml_next_element(xml_rsc)) {
++
+                 if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                     rid = ID(xml_rsc);
+                 }
+@@ -1849,7 +1867,9 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+ 
+         } else {
+             /* get the first one */
+-            for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++            for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL;
++                 xml_rsc = __xml_next_element(xml_rsc)) {
++
+                 if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                     EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+                     break;
+@@ -1861,7 +1881,9 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+     if (crm_is_true(sequential_2)) {
+         if (invert == FALSE) {
+             /* get the first one */
+-            for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++            for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL;
++                 xml_rsc = __xml_next_element(xml_rsc)) {
++
+                 if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                     EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
+                     break;
+@@ -1872,7 +1894,9 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+             /* get the last one */
+             const char *rid = NULL;
+ 
+-            for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++            for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL;
++                 xml_rsc = __xml_next_element(xml_rsc)) {
++
+                 if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                     rid = ID(xml_rsc);
+                 }
+@@ -1885,7 +1909,9 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+         new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set);
+ 
+     } else if (rsc_1 != NULL) {
+-        for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
+                 new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set);
+@@ -1895,7 +1921,9 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+     } else if (rsc_2 != NULL) {
+         xmlNode *xml_rsc = NULL;
+ 
+-        for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+                 new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set);
+@@ -1903,14 +1931,18 @@ order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kin
+         }
+ 
+     } else {
+-        for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 xmlNode *xml_rsc_2 = NULL;
+ 
+                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+ 
+-                for (xml_rsc_2 = __xml_first_child(set2); xml_rsc_2 != NULL;
++                for (xml_rsc_2 = __xml_first_child_element(set2);
++                     xml_rsc_2 != NULL;
+                      xml_rsc_2 = __xml_next_element(xml_rsc_2)) {
++
+                     if (crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                         EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
+                         new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set);
+@@ -2078,7 +2110,9 @@ unpack_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set)
+         return FALSE;
+     }
+ 
+-    for (set = __xml_first_child(xml_obj); set != NULL; set = __xml_next_element(set)) {
++    for (set = __xml_first_child_element(xml_obj); set != NULL;
++         set = __xml_next_element(set)) {
++
+         if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) {
+             any_sets = TRUE;
+             set = expand_idref(set, data_set->input);
+@@ -2183,7 +2217,9 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set)
+         return TRUE;
+ 
+     } else if (local_score >= 0 && safe_str_eq(ordering, "group")) {
+-        for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
+                 if (with != NULL) {
+@@ -2197,7 +2233,9 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set)
+         }
+     } else if (local_score >= 0) {
+         resource_t *last = NULL;
+-        for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
+                 if (last != NULL) {
+@@ -2216,14 +2254,18 @@ unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set)
+          * (i.e. that no one in the set can run with anyone else in the set)
+          */
+ 
+-        for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 xmlNode *xml_rsc_with = NULL;
+ 
+                 EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
+ 
+-                for (xml_rsc_with = __xml_first_child(set); xml_rsc_with != NULL;
++                for (xml_rsc_with = __xml_first_child_element(set);
++                     xml_rsc_with != NULL;
+                      xml_rsc_with = __xml_next_element(xml_rsc_with)) {
++
+                     if (crm_str_eq((const char *)xml_rsc_with->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                         if (safe_str_eq(resource->id, ID(xml_rsc_with))) {
+                             break;
+@@ -2258,7 +2300,9 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score,
+ 
+     if (sequential_1 == NULL || crm_is_true(sequential_1)) {
+         /* get the first one */
+-        for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+                 break;
+@@ -2270,7 +2314,9 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score,
+         /* get the last one */
+         const char *rid = NULL;
+ 
+-        for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 rid = ID(xml_rsc);
+             }
+@@ -2282,7 +2328,9 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score,
+         rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set);
+ 
+     } else if (rsc_1 != NULL) {
+-        for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set2); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
+                 rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set);
+@@ -2290,7 +2338,9 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score,
+         }
+ 
+     } else if (rsc_2 != NULL) {
+-        for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+                 rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set);
+@@ -2298,14 +2348,18 @@ colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score,
+         }
+ 
+     } else {
+-        for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) {
++        for (xml_rsc = __xml_first_child_element(set1); xml_rsc != NULL;
++             xml_rsc = __xml_next_element(xml_rsc)) {
++
+             if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                 xmlNode *xml_rsc_2 = NULL;
+ 
+                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+ 
+-                for (xml_rsc_2 = __xml_first_child(set2); xml_rsc_2 != NULL;
++                for (xml_rsc_2 = __xml_first_child_element(set2);
++                     xml_rsc_2 != NULL;
+                      xml_rsc_2 = __xml_next_element(xml_rsc_2)) {
++
+                     if (crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) {
+                         EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
+                         rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set);
+@@ -2537,7 +2591,9 @@ unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set)
+         return FALSE;
+     }
+ 
+-    for (set = __xml_first_child(xml_obj); set != NULL; set = __xml_next_element(set)) {
++    for (set = __xml_first_child_element(xml_obj); set != NULL;
++         set = __xml_next_element(set)) {
++
+         if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) {
+             any_sets = TRUE;
+             set = expand_idref(set, data_set->input);
+@@ -2885,7 +2941,9 @@ unpack_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set)
+         return FALSE;
+     }
+ 
+-    for (set = __xml_first_child(xml_obj); set != NULL; set = __xml_next_element(set)) {
++    for (set = __xml_first_child_element(xml_obj); set != NULL;
++         set = __xml_next_element(set)) {
++
+         if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) {
+             any_sets = TRUE;
+             set = expand_idref(set, data_set->input);
+diff --git a/pengine/native.c b/pengine/native.c
+index 747cb10..10f1264 100644
+--- a/pengine/native.c
++++ b/pengine/native.c
+@@ -606,8 +606,9 @@ is_op_dup(resource_t * rsc, const char *name, const char *interval)
+     xmlNode *operation = NULL;
+ 
+     CRM_ASSERT(rsc);
+-    for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
++    for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
+          operation = __xml_next_element(operation)) {
++
+         if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
+             value = crm_element_value(operation, "name");
+             if (safe_str_neq(value, name)) {
+@@ -840,8 +841,10 @@ Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t *
+         (node == NULL || node->details->maintenance == FALSE)) {
+         xmlNode *operation = NULL;
+ 
+-        for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
++        for (operation = __xml_first_child_element(rsc->ops_xml);
++             operation != NULL;
+              operation = __xml_next_element(operation)) {
++
+             if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
+                 RecurringOp(rsc, start, node, operation, data_set);
+             }
+@@ -1064,8 +1067,10 @@ Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_
+         (node == NULL || node->details->maintenance == FALSE)) {
+         xmlNode *operation = NULL;
+ 
+-        for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
++        for (operation = __xml_first_child_element(rsc->ops_xml);
++             operation != NULL;
+              operation = __xml_next_element(operation)) {
++
+             if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
+                 RecurringOp_Stopped(rsc, start, node, operation, data_set);
+             }
+diff --git a/tools/crm_mon.c b/tools/crm_mon.c
+index 82e691e..ee9a3ef 100644
+--- a/tools/crm_mon.c
++++ b/tools/crm_mon.c
+@@ -1685,7 +1685,8 @@ print_rsc_history(FILE *stream, pe_working_set_t *data_set, node_t *node,
+     }
+ 
+     /* Create a list of this resource's operations */
+-    for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) {
++    for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL;
++         rsc_op = __xml_next_element(rsc_op)) {
+         if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
+             op_list = g_list_append(op_list, rsc_op);
+         }
+@@ -1753,8 +1754,8 @@ print_node_history(FILE *stream, pe_working_set_t *data_set,
+         lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
+ 
+         /* Print history of each of the node's resources */
+-        for (rsc_entry = __xml_first_child(lrm_rsc); rsc_entry != NULL;
+-             rsc_entry = __xml_next(rsc_entry)) {
++        for (rsc_entry = __xml_first_child_element(lrm_rsc); rsc_entry != NULL;
++             rsc_entry = __xml_next_element(rsc_entry)) {
+ 
+             if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
+                 print_rsc_history(stream, data_set, node, rsc_entry, operations);
+@@ -1974,8 +1975,8 @@ print_node_summary(FILE *stream, pe_working_set_t * data_set, gboolean operation
+     }
+ 
+     /* Print each node in the CIB status */
+-    for (node_state = __xml_first_child(cib_status); node_state != NULL;
+-         node_state = __xml_next(node_state)) {
++    for (node_state = __xml_first_child_element(cib_status); node_state != NULL;
++         node_state = __xml_next_element(node_state)) {
+         if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
+             print_node_history(stream, data_set, node_state, operations);
+         }
+@@ -4590,7 +4591,9 @@ static void crm_diff_update_v2(const char *event, xmlNode * msg)
+             xmlNode *state = NULL;
+             xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS);
+ 
+-            for (state = __xml_first_child(status); state != NULL; state = __xml_next(state)) {
++            for (state = __xml_first_child_element(status); state != NULL;
++                 state = __xml_next_element(state)) {
++
+                 node = crm_element_value(state, XML_ATTR_UNAME);
+                 if (node == NULL) {
+                     node = ID(state);
+@@ -4601,7 +4604,9 @@ static void crm_diff_update_v2(const char *event, xmlNode * msg)
+         } else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) {
+             xmlNode *state = NULL;
+ 
+-            for (state = __xml_first_child(match); state != NULL; state = __xml_next(state)) {
++            for (state = __xml_first_child_element(match); state != NULL;
++                 state = __xml_next_element(state)) {
++
+                 node = crm_element_value(state, XML_ATTR_UNAME);
+                 if (node == NULL) {
+                     node = ID(state);
+diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
+index c4f96cd..906ea75 100644
+--- a/tools/crm_resource_print.c
++++ b/tools/crm_resource_print.c
+@@ -29,8 +29,8 @@ cli_resource_print_cts_constraints(pe_working_set_t * data_set)
+     xmlNode *lifetime = NULL;
+     xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
+ 
+-    for (xml_obj = __xml_first_child(cib_constraints); xml_obj != NULL;
+-         xml_obj = __xml_next(xml_obj)) {
++    for (xml_obj = __xml_first_child_element(cib_constraints); xml_obj != NULL;
++         xml_obj = __xml_next_element(xml_obj)) {
+         const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
+ 
+         if (id == NULL) {
+diff --git a/tools/crmadmin.c b/tools/crmadmin.c
+index 920c262..c91807f 100644
+--- a/tools/crmadmin.c
++++ b/tools/crmadmin.c
+@@ -546,7 +546,9 @@ do_find_node_list(xmlNode * xml_node)
+     xmlNode *node = NULL;
+     xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node);
+ 
+-    for (node = __xml_first_child(nodes); node != NULL; node = __xml_next(node)) {
++    for (node = __xml_first_child_element(nodes); node != NULL;
++         node = __xml_next_element(node)) {
++
+         if (crm_str_eq((const char *)node->name, XML_CIB_TAG_NODE, TRUE)) {
+ 
+             if (BASH_EXPORT) {
+diff --git a/tools/fake_transition.c b/tools/fake_transition.c
+index 5741fed..3cdd1f1 100644
+--- a/tools/fake_transition.c
++++ b/tools/fake_transition.c
+@@ -160,7 +160,9 @@ create_op(xmlNode * cib_resource, const char *task, int interval, int outcome)
+     op->t_rcchange = op->t_run;
+ 
+     op->call_id = 0;
+-    for (xop = __xml_first_child(cib_resource); xop != NULL; xop = __xml_next(xop)) {
++    for (xop = __xml_first_child_element(cib_resource); xop != NULL;
++         xop = __xml_next_element(xop)) {
++
+         int tmp = 0;
+ 
+         crm_element_value_int(xop, XML_LRM_ATTR_CALLID, &tmp);
+-- 
+1.8.3.1
+
+
+From b05f992627cf468c815370010b5f9e2e58d122d5 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 29 Jul 2019 11:52:51 -0500
+Subject: [PATCH 68/96] Build: include,doc: ensure non-installed files are
+ listed in makefiles
+
+This is especially important for headers, so they are distributed with
+"make dist" for building.
+---
+ cts/Makefile.am                |  2 ++
+ include/Makefile.am            | 20 ++++++--------------
+ include/crm/Makefile.am        |  2 ++
+ include/crm/common/Makefile.am |  6 ++----
+ 4 files changed, 12 insertions(+), 18 deletions(-)
+
+diff --git a/cts/Makefile.am b/cts/Makefile.am
+index bf56215..f3f169c 100644
+--- a/cts/Makefile.am
++++ b/cts/Makefile.am
+@@ -22,6 +22,8 @@ MAINTAINERCLEANFILES    = Makefile.in
+ CLEANFILES      = LSBDummy HBDummy
+ 
+ EXTRA_DIST      = $(cts_SCRIPTS) $(cts_DATA)
++noinst_SCRIPTS		= cluster_test		\
++			  OCFIPraTest.py
+ 
+ ctsdir		= $(datadir)/$(PACKAGE)/tests/cts
+ ctslibdir	= $(pyexecdir)/cts
+diff --git a/include/Makefile.am b/include/Makefile.am
+index f7f5714..e6444ea 100644
+--- a/include/Makefile.am
++++ b/include/Makefile.am
+@@ -1,24 +1,16 @@
+ #
+ # Copyright (C) 2004-2009 Andrew Beekhof
+ #
+-# This program is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU General Public License
+-# as published by the Free Software Foundation; either version 2
+-# of the License, or (at your option) any later version.
+-# 
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-# GNU General Public License for more details.
+-# 
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, write to the Free Software
+-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++# This source code is licensed under the GNU General Public License version 2
++# or later (GPLv2+) WITHOUT ANY WARRANTY.
+ #
+ 
+ MAINTAINERCLEANFILES    = Makefile.in config.h.in
+ 
+-noinst_HEADERS	        = portability.h config.h crm_internal.h
++noinst_HEADERS	        = config.h 			\
++			  crm_internal.h		\
++			  doxygen.h			\
++			  portability.h
+ pkginclude_HEADERS	= crm_config.h
+ 
+ SUBDIRS                 =  crm
+diff --git a/include/crm/Makefile.am b/include/crm/Makefile.am
+index 951d483..1226537 100644
+--- a/include/crm/Makefile.am
++++ b/include/crm/Makefile.am
+@@ -23,4 +23,6 @@ header_HEADERS		= attrd.h cib.h cluster.h compatibility.h crm.h \
+ 			  error.h lrmd.h msg_xml.h services.h stonith-ng.h \
+ 			  transition.h
+ 
++noinst_HEADERS		= lrmd_alerts_internal.h
++
+ SUBDIRS                 = common pengine cib fencing cluster
+diff --git a/include/crm/common/Makefile.am b/include/crm/common/Makefile.am
+index aacb6ff..7ed2360 100644
+--- a/include/crm/common/Makefile.am
++++ b/include/crm/common/Makefile.am
+@@ -13,8 +13,6 @@ headerdir=$(pkgincludedir)/crm/common
+ 
+ header_HEADERS = xml.h ipc.h util.h iso8601.h mainloop.h logging.h \
+ 		 nvpair.h
+-noinst_HEADERS = ipcs.h internal.h remote_internal.h xml_internal.h \
++noinst_HEADERS = cib_secrets.h ipcs.h internal.h alerts_internal.h \
++		 iso8601_internal.h remote_internal.h xml_internal.h \
+ 		 ipc_internal.h
+-if BUILD_CIBSECRETS
+-noinst_HEADERS += cib_secrets.h
+-endif
+-- 
+1.8.3.1
+
+
+From af96be2dd205fecee505ee9c86d21ed6123c466e Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 26 Jun 2019 16:44:30 -0500
+Subject: [PATCH 69/96] Build: libpe_status,libpe_rules: make sure pkg-config
+ files are built
+
+---
+ lib/Makefile.am | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/lib/Makefile.am b/lib/Makefile.am
+index d73bf2e..b00db48 100644
+--- a/lib/Makefile.am
++++ b/lib/Makefile.am
+@@ -25,9 +25,9 @@ target_LIBS		= $(LIBS:%=pacemaker-%.pc)
+ 
+ target_PACKAGE		= pacemaker.pc
+ 
+-all-local: $(target_LIBS) $(target_PACKAGE)
++all-local: $(target_LIBS) $(target_PACKAGE) pacemaker-pe_rules.pc pacemaker-pe_status.pc
+ 
+-install-exec-local: $(target_LIBS) $(target_PACKAGE)
++install-exec-local: $(target_LIBS) $(target_PACKAGE) pacemaker-pe_rules.pc pacemaker-pe_status.pc
+ 	$(INSTALL) -d $(DESTDIR)/$(libdir)/pkgconfig
+ 	$(INSTALL) -m 644 $(target_LIBS) $(target_PACKAGE) $(DESTDIR)/$(libdir)/pkgconfig
+ 
+-- 
+1.8.3.1
+
+
+From 941de1727bd07c2cc3f30f1f5508370896a8de53 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 22 Aug 2019 13:06:39 -0500
+Subject: [PATCH 70/96] Build: lib: reorganize makefile
+
+... for readability and simplicity. This takes care of a minor issue where not
+all installed files were uninstalled.
+---
+ lib/Makefile.am | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+diff --git a/lib/Makefile.am b/lib/Makefile.am
+index b00db48..617008a 100644
+--- a/lib/Makefile.am
++++ b/lib/Makefile.am
+@@ -17,26 +17,25 @@
+ #
+ MAINTAINERCLEANFILES    = Makefile.in
+ 
+-EXTRA_DIST		= pacemaker.pc.in $(target_LIBS:%=%.in)
+-
+ LIBS			= cib lrmd service pengine fencing cluster
+ 
+-target_LIBS		= $(LIBS:%=pacemaker-%.pc)
++PC_FILES		= $(LIBS:%=pacemaker-%.pc)	\
++			  pacemaker.pc
+ 
+-target_PACKAGE		= pacemaker.pc
++EXTRA_DIST		= $(PC_FILES:%=%.in)
+ 
+-all-local: $(target_LIBS) $(target_PACKAGE) pacemaker-pe_rules.pc pacemaker-pe_status.pc
++all-local: $(PC_FILES)
+ 
+-install-exec-local: $(target_LIBS) $(target_PACKAGE) pacemaker-pe_rules.pc pacemaker-pe_status.pc
++install-exec-local: $(PC_FILES)
+ 	$(INSTALL) -d $(DESTDIR)/$(libdir)/pkgconfig
+-	$(INSTALL) -m 644 $(target_LIBS) $(target_PACKAGE) $(DESTDIR)/$(libdir)/pkgconfig
++	$(INSTALL) -m 644 $(PC_FILES) $(DESTDIR)/$(libdir)/pkgconfig
+ 
+ uninstall-local:
+-	cd $(DESTDIR)/$(libdir)/pkgconfig && rm -f $(target_LIBS) $(target_PACKAGE)
+-	rmdir $(DESTDIR)/$(libdir)/pkgconfig 2> /dev/null || :
++	-cd $(DESTDIR)/$(libdir)/pkgconfig && rm -f $(PC_FILES)
++	-rmdir $(DESTDIR)/$(libdir)/pkgconfig 2> /dev/null
+ 
+ clean-local:
+-	rm -f *.pc
++	rm -f $(PC_FILES)
+ 
+ ## Subdirectories...
+ SUBDIRS	= gnu common pengine transition cib services fencing lrmd cluster
+-- 
+1.8.3.1
+
+
+From 021dadf515c547a82dd00ad33dded91f9f8dac10 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 9 Jan 2018 18:27:09 -0600
+Subject: [PATCH 71/96] Build: www: update makefiles for website restructuring
+
+---
+ GNUmakefile                                | 10 +++++-----
+ abi-check                                  |  7 +++++--
+ doc/Makefile.am                            |  6 +++---
+ doc/Pacemaker_Explained/en-US/Ch-Intro.txt |  4 +++-
+ 4 files changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/GNUmakefile b/GNUmakefile
+index b2d5a28..352903f 100644
+--- a/GNUmakefile
++++ b/GNUmakefile
+@@ -341,17 +341,17 @@ global: clean-generic
+ 
+ global-upload: global
+ 	htags -sanhIT
+-	rsync $(RSYNC_OPTS) HTML/ "$(RSYNC_DEST)/global/$(PACKAGE)/$(TAG)"
++	rsync $(RSYNC_OPTS) HTML/ "$(RSYNC_DEST)/$(PACKAGE)/global/$(TAG)/"
+ 
+ %.8.html: %.8
+ 	echo groff -mandoc `man -w ./$<` -T html > $@
+ 	groff -mandoc `man -w ./$<` -T html > $@
+-	rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/man/$(PACKAGE)/"
++	rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/$(PACKAGE)/man/"
+ 
+ %.7.html: %.7
+ 	echo groff -mandoc `man -w ./$<` -T html > $@
+ 	groff -mandoc `man -w ./$<` -T html > $@
+-	rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/man/$(PACKAGE)/"
++	rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/$(PACKAGE)/man/"
+ 
+ manhtml-upload: all
+ 	find . -name "[a-z]*.[78]" -exec make \{\}.html \;
+@@ -360,12 +360,12 @@ doxygen: Doxyfile
+ 	doxygen Doxyfile
+ 
+ doxygen-upload: doxygen
+-	rsync $(RSYNC_OPTS) doc/api/html/ "$(RSYNC_DEST)/doxygen/$(PACKAGE)/$(TAG)"
++	rsync $(RSYNC_OPTS) doc/api/html/ "$(RSYNC_DEST)/$(PACKAGE)/doxygen/$(TAG)/"
+ 
+ abi:
+ 	./abi-check pacemaker $(LAST_RELEASE) $(TAG)
+ abi-www:
+-	./abi-check -u pacemaker $(LAST_RELEASE) $(TAG)
++	export RSYNC_DEST=$(RSYNC_DEST); ./abi-check -u pacemaker $(LAST_RELEASE) $(TAG)
+ 
+ www:	manhtml-upload global-upload doxygen-upload
+ 	make RSYNC_DEST=$(RSYNC_DEST) -C doc www
+diff --git a/abi-check b/abi-check
+index d28192b..18002c8 100755
+--- a/abi-check
++++ b/abi-check
+@@ -1,6 +1,9 @@
+ #!/bin/bash
+-UPLOAD=0
+ 
++# toplevel rsync destination for www targets (without trailing slash)
++: ${RSYNC_DEST:=root@www.clusterlabs.org:/var/www/html}
++
++UPLOAD=0
+ if [ $1 = "-u" ]; then
+     UPLOAD=1; shift
+ fi
+@@ -98,6 +101,6 @@ if [ $# = 2 ]; then
+ 	-d2 abi_dumps/${PACKAGE}/${PACKAGE}_${V2}.abi.tar.gz
+ 
+     if [ $UPLOAD = 1 -a -d compat_reports/pacemaker/${V1}_to_${V2} ]; then
+-	rsync -azxlSD --progress compat_reports/pacemaker/${V1}_to_${V2} root@www.clusterlabs.org:/var/www/html/abi/pacemaker/
++        rsync -azxlSD --progress compat_reports/pacemaker/${V1}_to_${V2} ${RSYNC_DEST}/${PACKAGE}/abi/
+     fi
+ fi
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index 8b04007..98ae680 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -29,7 +29,7 @@ docbook		=	Clusters_from_Scratch		\
+ doc_DATA	= $(ascii) $(generated_docs)
+ 
+ # toplevel rsync destination for www targets (without trailing slash)
+-RSYNC_DEST      ?= root@www.clusterlabs.org:/var/www/html/
++RSYNC_DEST      ?= root@www.clusterlabs.org:/var/www/html
+ 
+ # recursive, preserve symlinks/permissions/times, verbose, compress,
+ # don't cross filesystems, sparse, show progress
+@@ -294,7 +294,7 @@ pdf:
+ 
+ www: clean-local $(generated_docs) $(ascii)
+ 	make www-cli
+-	rsync $(RSYNC_OPTS) $(generated_docs) $(ascii) $(asciiman) "$(RSYNC_DEST)/doc/"
++	rsync $(RSYNC_OPTS) $(generated_docs) $(ascii) $(asciiman) "$(RSYNC_DEST)/$(PACKAGE)/doc/"
+ 
+ www-pcs: www-cli
+ 
+@@ -313,7 +313,7 @@ if BUILD_DOCBOOK
+ 			mv $$book/publish/$$lang/Pacemaker/$(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE)/epub/$$book/Pacemaker-1.1{-$(ASCIIDOC_CLI_TYPE),}-$$book-$$lang.epub;	\
+ 			mv $$book/publish/$$lang/Pacemaker/$(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE)/pdf/$$book/Pacemaker-1.1{-$(ASCIIDOC_CLI_TYPE),}-$$book-$$lang.pdf;	\
+ 		done;											\
+-		rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/doc/";				\
++		rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/$(PACKAGE)/doc/";				\
+ 		sed -i.sed 's@version:.*@version: $(PACKAGE_SERIES)@' $$book/publican.cfg;		\
+ 	done
+ endif
+diff --git a/doc/Pacemaker_Explained/en-US/Ch-Intro.txt b/doc/Pacemaker_Explained/en-US/Ch-Intro.txt
+index e610651..dad0635 100644
+--- a/doc/Pacemaker_Explained/en-US/Ch-Intro.txt
++++ b/doc/Pacemaker_Explained/en-US/Ch-Intro.txt
+@@ -16,7 +16,9 @@ Additionally, this document is NOT a step-by-step how-to guide for
+ configuring a specific clustering scenario.
+ 
+ Although such guides exist,
+-footnote:[For example, see the http://www.clusterlabs.org/doc/[Clusters from Scratch] guide.]
++footnote:[
++For example, see https://www.clusterlabs.org/pacemaker/doc/[Clusters from Scratch]
++]
+ the purpose of this document is to provide an understanding of the building
+ blocks that can be used to construct any type of Pacemaker cluster.
+ 
+-- 
+1.8.3.1
+
+
+From 3d7c882c22bcb4f68a549297c11288aacd54fad9 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 11 Jan 2018 12:45:33 -0600
+Subject: [PATCH 72/96] Build: www: get rid of ASCIIDOC_CLI_TYPE
+
+Only pcs has been supported for a long while, and it only ever applied to
+"Clusters from Scratch" anyway, while being applied to all books.
+---
+ doc/Makefile.am | 28 ++++++++--------------------
+ 1 file changed, 8 insertions(+), 20 deletions(-)
+
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index 98ae680..a00b704 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -39,7 +39,6 @@ publican_docs   =
+ generated_docs	=
+ generated_mans	=
+ 
+-ASCIIDOC_CLI_TYPE := pcs
+ 
+ # What formats to build: pdf,html,html-single,html-desktop,epub
+ DOCBOOK_FORMATS := html-desktop
+@@ -290,33 +289,22 @@ brand-rpm-install: brand-rpm-build
+ 	find publican-clusterlabs -name "*.noarch.rpm" -exec sudo rpm -Uvh --force \{\} \;
+ 
+ pdf:
+-	make DOCBOOK_FORMATS="pdf" ASCIIDOC_CLI_TYPE=$(ASCIIDOC_CLI_TYPE) all-local
++	make DOCBOOK_FORMATS="pdf" all-local
+ 
+ www: clean-local $(generated_docs) $(ascii)
+-	make www-cli
+-	rsync $(RSYNC_OPTS) $(generated_docs) $(ascii) $(asciiman) "$(RSYNC_DEST)/$(PACKAGE)/doc/"
+-
+-www-pcs: www-cli
+-
+-www-cli:
+-	for book in $(docbook); do 										\
+-		sed -i.sed 's@brand:.*@brand: clusterlabs@' $$book/publican.cfg;				\
+-		sed -i.sed 's@version:.*@version: $(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE)@' $$book/publican.cfg;	\
++	for book in $(docbook); do 							\
++		sed -i.sed 's@^brand:.*@brand: clusterlabs@' $$book/publican.cfg;	\
+ 	done
+-	make DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="$(UPLOAD_LANGS)" ASCIIDOC_CLI_TYPE=$(ASCIIDOC_CLI_TYPE) all-local
+-	echo Uploading current $(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE) documentation set to clusterlabs.org
++	make DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="$(UPLOAD_LANGS)" all-local
++	echo Uploading current $(PACKAGE_SERIES) documentation set to clusterlabs.org
+ if BUILD_DOCBOOK
+ 	for book in $(docbook); do 									\
+ 		echo Uploading $$book...;								\
+-		echo "Generated on `date` from version: $(shell git log --pretty="format:%h %d" -n 1)" >> $$book/publish/build-$(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE).txt;	\
+-		for lang in `ls -1 $$book/publish | grep [a-z][a-z]-[A-Z][A-Z]`; do							\
+-			mv $$book/publish/$$lang/Pacemaker/$(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE)/epub/$$book/Pacemaker-1.1{-$(ASCIIDOC_CLI_TYPE),}-$$book-$$lang.epub;	\
+-			mv $$book/publish/$$lang/Pacemaker/$(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE)/pdf/$$book/Pacemaker-1.1{-$(ASCIIDOC_CLI_TYPE),}-$$book-$$lang.pdf;	\
+-		done;											\
+-		rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/$(PACKAGE)/doc/";				\
+-		sed -i.sed 's@version:.*@version: $(PACKAGE_SERIES)@' $$book/publican.cfg;		\
++		echo "Generated on `date` from version: $(shell git log --pretty="format:%h %d" -n 1)" >> $$book/publish/build-$(PACKAGE_SERIES).txt;	\
++		rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/$(PACKAGE)/doc/";			\
+ 	done
+ endif
++	rsync $(RSYNC_OPTS) $(generated_docs) $(ascii) "$(RSYNC_DEST)/$(PACKAGE)/doc/"
+ 
+ clean-local:
+ 	-rm -f $(PNGS_GENERATED)
+-- 
+1.8.3.1
+
+
+From c4b5a54893334bc11a7ceec31b780f664d315629 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Tue, 7 Aug 2018 18:10:42 +0200
+Subject: [PATCH 73/96] Build: Makefile.common: use determined symbolic
+ reference for AsciiDoc
+
+Also fix a typo and drop attribute passing relevant to crm/pcs duality
+in the former documentation, something finally ditched in 81b00e6da
+that just missed this last reference behind.
+---
+ Makefile.common | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Makefile.common b/Makefile.common
+index 469417f..feb68bd 100644
+--- a/Makefile.common
++++ b/Makefile.common
+@@ -52,11 +52,11 @@ endif
+ # Build docbook from asciidoc because XML is a PITA to edit
+ #
+ # Build each chapter as a book (since the numbering isn't right for
+-# articles and only books can have appendicies) and then strip out the
++# articles and only books can have appendices) and then strip out the
+ # bits we don't want/need
+ #
+ %.xml:  %.txt
+-	$(AM_V_ASCII)asciidoc -b docbook -a cli_name=$(ASCIIDOC_CLI_TYPE) -a $(ASCIIDOC_CLI_TYPE)=true -d book -o $@ $<
++	$(AM_V_ASCII)$(ASCIIDOC) -b docbook -d book -o $@ $<
+ 	$(AM_V_at)sed -i 's///' $@
+ 	$(AM_V_at)sed -i 's/
//' $@                 # Fix line endings
+ 	$(AM_V_at)sed -i 's/\ lang="en"//' $@        # Never specify a language in the chapters
+-- 
+1.8.3.1
+
+
+From 242bd026c7b5e815c8b5b135341837bb33a9a827 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Mon, 20 Aug 2018 20:29:27 +0200
+Subject: [PATCH 74/96] Build: Makefile.common: avoid using verbatim control
+ characters
+
+This is not a good practise, since it poses an imminent risk of
+"deceptive display (rendering)" at various places, especially
+tools that may be used for change reviews, incl. GitHub[1].
+
+When at it, arrange for periodical CI to capture undesired
+occurrences of such characters in the code base, and related
+to that, obtain pristine LGPLv2.1 license text anew using
+
+  curl  https://www.gnu.org/licenses/lgpl-2.1.txt \
+    | tr -d '\f' > licenses/LGPLv2.1
+
+so that embedded "form feed" characters are got rid of (beside
+that, also tabulators are changed into spaces, which is how this
+authoritative server currently carries it).
+
+[1] https://github.com/isaacs/github/issues/1329
+---
+ Makefile.common   |  5 ++---
+ licenses/LGPLv2.1 | 32 +++++++++++++++-----------------
+ 2 files changed, 17 insertions(+), 20 deletions(-)
+
+diff --git a/Makefile.common b/Makefile.common
+index feb68bd..e50441b 100644
+--- a/Makefile.common
++++ b/Makefile.common
+@@ -56,9 +56,8 @@ endif
+ # bits we don't want/need
+ #
+ %.xml:  %.txt
+-	$(AM_V_ASCII)$(ASCIIDOC) -b docbook -d book -o $@ $<
+-	$(AM_V_at)sed -i 's///' $@
+-	$(AM_V_at)sed -i 's/
//' $@                 # Fix line endings
++	$(AM_V_ASCII)$(ASCIIDOC) -b docbook -d book -o $@-t $<
++	$(AM_V_at)tr -d '\036\r' <$@-t >$@; rm "$@-t"# Fix line endings
+ 	$(AM_V_at)sed -i 's/\ lang="en"//' $@        # Never specify a language in the chapters
+ 	$(AM_V_at)sed -i 's/simpara/para/g' $@       # publican doesn't correctly render footnotes with simpara
+ 	$(AM_V_at)sed -i 's/.*<date>.*//g' $@	       # Remove dangling tag
+diff --git a/licenses/LGPLv2.1 b/licenses/LGPLv2.1
+index 602bfc9..e5ab03e 100644
+--- a/licenses/LGPLv2.1
++++ b/licenses/LGPLv2.1
+@@ -1,5 +1,5 @@
+-		  GNU LESSER GENERAL PUBLIC LICENSE
+-		       Version 2.1, February 1999
++                  GNU LESSER GENERAL PUBLIC LICENSE
++                       Version 2.1, February 1999
+ 
+  Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+  51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+@@ -10,7 +10,7 @@
+  as the successor of the GNU Library Public License, version 2, hence
+  the version number 2.1.]
+ 
+-			    Preamble
++                            Preamble
+ 
+   The licenses for most software are designed to take away your
+ freedom to share and change it.  By contrast, the GNU General Public
+@@ -55,7 +55,7 @@ modified by someone else and passed on, the recipients should know
+ that what they have is not the original version, so that the original
+ author's reputation will not be affected by problems that might be
+ introduced by others.
+-
++
+   Finally, software patents pose a constant threat to the existence of
+ any free program.  We wish to make sure that a company cannot
+ effectively restrict the users of a free program by obtaining a
+@@ -111,8 +111,8 @@ modification follow.  Pay close attention to the difference between a
+ "work based on the library" and a "work that uses the library".  The
+ former contains code derived from the library, whereas the latter must
+ be combined with the library in order to run.
+-
+-		  GNU LESSER GENERAL PUBLIC LICENSE
++
++                  GNU LESSER GENERAL PUBLIC LICENSE
+    TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+ 
+   0. This License Agreement applies to any software library or other
+@@ -158,7 +158,7 @@ Library.
+   You may charge a fee for the physical act of transferring a copy,
+ and you may at your option offer warranty protection in exchange for a
+ fee.
+-
++
+   2. You may modify your copy or copies of the Library or any portion
+ of it, thus forming a work based on the Library, and copy and
+ distribute such modifications or work under the terms of Section 1
+@@ -216,7 +216,7 @@ instead of to this License.  (If a newer version than version 2 of the
+ ordinary GNU General Public License has appeared, then you can specify
+ that version instead if you wish.)  Do not make any other change in
+ these notices.
+-
++
+   Once this change is made in a given copy, it is irreversible for
+ that copy, so the ordinary GNU General Public License applies to all
+ subsequent copies and derivative works made from that copy.
+@@ -267,7 +267,7 @@ Library will still fall under Section 6.)
+ distribute the object code for the work under the terms of Section 6.
+ Any executables containing that work also fall under Section 6,
+ whether or not they are linked directly with the Library itself.
+-
++
+   6. As an exception to the Sections above, you may also combine or
+ link a "work that uses the Library" with the Library to produce a
+ work containing portions of the Library, and distribute that work
+@@ -329,7 +329,7 @@ restrictions of other proprietary libraries that do not normally
+ accompany the operating system.  Such a contradiction means you cannot
+ use both them and the Library together in an executable that you
+ distribute.
+-
++
+   7. You may place library facilities that are a work based on the
+ Library side-by-side in a single library together with other library
+ facilities not covered by this License, and distribute such a combined
+@@ -370,7 +370,7 @@ subject to these terms and conditions.  You may not impose any further
+ restrictions on the recipients' exercise of the rights granted herein.
+ You are not responsible for enforcing compliance by third parties with
+ this License.
+-
++
+   11. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+@@ -422,7 +422,7 @@ conditions either of that version or of any later version published by
+ the Free Software Foundation.  If the Library does not specify a
+ license version number, you may choose any version ever published by
+ the Free Software Foundation.
+-
++
+   14. If you wish to incorporate parts of the Library into other free
+ programs whose distribution conditions are incompatible with these,
+ write to the author to ask for permission.  For software which is
+@@ -432,7 +432,7 @@ decision will be guided by the two goals of preserving the free status
+ of all derivatives of our free software and of promoting the sharing
+ and reuse of software generally.
+ 
+-			    NO WARRANTY
++                            NO WARRANTY
+ 
+   15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+ WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+@@ -455,8 +455,8 @@ FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+ SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+ DAMAGES.
+ 
+-		     END OF TERMS AND CONDITIONS
+-
++                     END OF TERMS AND CONDITIONS
++
+            How to Apply These Terms to Your New Libraries
+ 
+   If you develop a new library, and you want it to be of the greatest
+@@ -500,5 +500,3 @@ necessary.  Here is a sample; alter the names:
+   Ty Coon, President of Vice
+ 
+ That's all there is to it!
+-
+-
+-- 
+1.8.3.1
+
+
+From 73fa92b8d95b2f16cf8257504b5a7e5c8b6c5099 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Tue, 7 Aug 2018 21:45:51 +0200
+Subject: [PATCH 75/96] Build: add support for AsciiDoc's successor,
+ Asciidoctor
+
+Everything seems to be built just fine when Asciidoctor is present in
+the build environment and AsciiDoc is not (as it takes the precedence),
+except for Pacemake Explained that needs some more tweaking to be
+usable with Asciidoctor:
+
+> Ch-Options.xml:125: validity error : Element emphasis is not declared in literal list of possible children
+> Ch-Options.xml:419: validity error : Element xref is not declared in literal list of possible children
+> Ch-Options.xml:716: validity error : Element xref is not declared in literal list of possible children
+> Ch-Options.xml:824: validity error : Element emphasis is not declared in literal list of possible children
+> Ch-Options.xml:860: validity error : Element emphasis is not declared in literal list of possible children
+> Ch-Options.xml:896: validity error : Element emphasis is not declared in literal list of possible children
+> Ch-Resources.xml:608: validity error : Element xref is not declared in literal list of possible children
+> Ch-Constraints.xml:525: validity error : Element xref is not declared in literal list of possible children
+> Ch-Advanced-Options.xml:662: validity error : Element xref is not declared in literal list of possible children
+> Ch-Stonith.xml:153: validity error : Element emphasis is not declared in literal list of possible children
+> Ch-Stonith.xml:153: validity error : Element emphasis is not declared in literal list of possible children
+
+Also remove superfluous "inverse grep" conditionalizing amounting to
+a thinko how that's supposed to work.  Another logical problem was with
+"it's OK to generate the final product of the makefile's target early
+and adjust it in-place in the steps of the recipe to follow" broken
+assumption.
+---
+ INSTALL.md      |  2 +-
+ Makefile.common | 31 ++++++++++++++++++++-----------
+ configure.ac    |  7 ++++---
+ doc/Makefile.am |  6 +++++-
+ 4 files changed, 30 insertions(+), 16 deletions(-)
+
+diff --git a/INSTALL.md b/INSTALL.md
+index f02b589..8671ac2 100644
+--- a/INSTALL.md
++++ b/INSTALL.md
+@@ -29,7 +29,7 @@
+ * libesmtp-devel (crm_mon --mail-to option)
+ * lm_sensors-devel (crm_mon --snmp-traps option)
+ * net-snmp-devel (crm_mon --snmp-traps option)
+-* asciidoc (documentation)
++* asciidoc or asciidoctor (documentation)
+ * help2man (documentation)
+ * publican (documentation)
+ * inkscape (documentation)
+diff --git a/Makefile.common b/Makefile.common
+index e50441b..0b06ec4 100644
+--- a/Makefile.common
++++ b/Makefile.common
+@@ -55,17 +55,26 @@ endif
+ # articles and only books can have appendices) and then strip out the
+ # bits we don't want/need
+ #
++# XXX Sequence of tr/sed commands should be replaced with a single XSLT
++#
+ %.xml:  %.txt
+-	$(AM_V_ASCII)$(ASCIIDOC) -b docbook -d book -o $@-t $<
+-	$(AM_V_at)tr -d '\036\r' <$@-t >$@; rm "$@-t"# Fix line endings
+-	$(AM_V_at)sed -i 's/\ lang="en"//' $@        # Never specify a language in the chapters
+-	$(AM_V_at)sed -i 's/simpara/para/g' $@       # publican doesn't correctly render footnotes with simpara
+-	$(AM_V_at)sed -i 's/.*<date>.*//g' $@	       # Remove dangling tag
+-	$(AM_V_at)sed -i 's/.*preface>//g' $@        # Remove preface elements
+-	$(AM_V_at)sed -i 's:<title></title>::g' $@   # Remove empty title
+-	$(AM_V_at)sed -i 's/chapter/section/g' $@    # Chapters become sections, so that books can become chapters
+-	$(AM_V_at)sed -i 's/<.*bookinfo.*>//g' $@    # Strip out bookinfo, we don't need it
+-	-grep -qis "<appendix" $@ && sed -i 's/.*book>//' $@         # We just want the appendix tag
+-	-grep -vqis "<appendix" $@ && sed -i 's/book>/chapter>/g' $@ # Rename to chapter
++if IS_ASCIIDOC
++	$(AM_V_ASCII)$(ASCIIDOC_CONV) -b docbook -d book -o $@-tt $<
++else
++	$(AM_V_ASCII)$(ASCIIDOC_CONV) -b docbook45 -d book -o $@-tt $<
++endif
++	$(AM_V_at)tr -d '\036\r' <$@-tt >$@-t; rm -f $@-tt  # Fix line endings
++	$(AM_V_at)sed -i 's/\ lang="en"//' $@-t      # Never specify a language in the chapters
++	$(AM_V_at)sed -i 's/simpara/para/g' $@-t     # publican doesn't correctly render footnotes with simpara
++	$(AM_V_at)sed -i 's/.*<date>.*//g' $@-t      # Remove dangling tag
++	$(AM_V_at)sed -i 's/.*preface>//g' $@-t      # Remove preface elements
++	$(AM_V_at)sed -i 's:<title></title>::g' $@-t # Remove empty title
++	$(AM_V_at)sed -i 's/chapter/section/g' $@-t  # Chapters become sections, so that books can become chapters
++	$(AM_V_at)sed -i 's/<.*bookinfo.*>//g' $@-t  # Strip out bookinfo, we don't need it
++	$(AM_V_at)! grep -q "<appendix" $@-t || sed -i \
++	  's/.*book>//;tb;bf;:b;N;s/.*<title>.*<\/title>.*//;tb;/<appendix/{:i;n;/<\/appendix/{p;d};bi};bb;:f;p;d' \
++	  $@-t  # We just want the appendix tag (asciidoctor adds non-empty book-level title)
++	$(AM_V_at)sed -i 's/book>/chapter>/g' $@-t   # Rename to chapter (won't trigger if previous sed did)
++	$(AM_V_GEN)mv $@-t $@
+ 
+ #	echo Rebuilt $@ from $<
+diff --git a/configure.ac b/configure.ac
+index a7084e2..c58b556 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -605,7 +605,7 @@ AC_CHECK_PROGS(MAKE, gmake make)
+ AC_PATH_PROGS(HTML2TXT, lynx w3m)
+ AC_PATH_PROGS(HELP2MAN, help2man)
+ AC_PATH_PROGS(POD2MAN, pod2man, pod2man)
+-AC_PATH_PROGS(ASCIIDOC, asciidoc)
++AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor])
+ AC_PATH_PROGS(PUBLICAN, publican)
+ AC_PATH_PROGS(INKSCAPE, inkscape)
+ AC_PATH_PROGS(XSLTPROC, xsltproc)
+@@ -667,8 +667,9 @@ if test x"${MANPAGE_XSLT}" != x""; then
+    PCMK_FEATURES="$PCMK_FEATURES agent-manpages"
+ fi
+ 
+-AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"")
+-if test x"${ASCIIDOC}" != x""; then
++AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$'])
++AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x])
++if test "x${ASCIIDOC_CONV}" != x; then
+    PCMK_FEATURES="$PCMK_FEATURES ascii-docs"
+ fi
+ 
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index a00b704..d59cdcd 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -103,7 +103,11 @@ endif
+ EXTRA_DIST	= $(docbook:%=%.xml)
+ 
+ %.html: %.txt
+-	$(AM_V_ASCII)$(ASCIIDOC) --unsafe --backend=xhtml11 $<
++if IS_ASCIIDOC
++	$(AM_V_ASCII)$(ASCIIDOC_CONV) --unsafe --backend=xhtml11 $<
++else
++	$(AM_V_ASCII)$(ASCIIDOC_CONV) --backend=html5 $<
++endif
+ 
+ # publican-clusterlabs/xsl/{html,html-single,pdf}.xsl refer to URIs
+ # requiring Internet access, hence we shadow that with a XML catalog-based
+-- 
+1.8.3.1
+
+
+From 2032f3e990c8fd39c8618dd15e9f8738290ec30f Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 29 Jan 2018 14:01:01 -0600
+Subject: [PATCH 76/96] Doc: clear detritus from previous failed builds when
+ building
+
+---
+ doc/Makefile.am | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index d59cdcd..f1a3c63 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -152,7 +152,7 @@ endif
+ # With '%' the test for 'newness' fails
+ Clusters_from_Scratch.build: $(PNGS) $(wildcard Clusters_from_Scratch/en-US/*.xml) $(CFS_XML) $(CFS_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
+ 	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
+-	rm -rf $(@:%.build=%)/publish/*
++	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+ 	$(AM_V_PUB)cd $(@:%.build=%) \
+ 	&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
+@@ -175,7 +175,7 @@ PD_XML=$(PD_TXT:%.txt=%.xml)
+ # With '%' the test for 'newness' fails
+ Pacemaker_Development.build: $(wildcard Pacemaker_Development/en-US/*.xml) $(PD_XML) $(PUBLICAN_INTREE_DEPS)
+ 	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
+-	rm -rf $(@:%.build=%)/publish/*
++	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+ 	$(AM_V_PUB)cd $(@:%.build=%) \
+ 	&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
+@@ -202,7 +202,7 @@ $(PE_XML): $(PE_SHARED_XML)
+ # With '%' the test for 'newness' fails
+ Pacemaker_Explained.build: $(PNGS) $(wildcard Pacemaker_Explained/en-US/*.xml) $(PE_XML) $(PE_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
+ 	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
+-	rm -rf $(@:%.build=%)/publish/*
++	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+ 	$(AM_V_PUB)cd $(@:%.build=%) \
+ 	&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
+@@ -225,7 +225,7 @@ PR_XML=$(PR_TXT:%.txt=%.xml)
+ # With '%' the test for 'newness' fails
+ Pacemaker_Remote.build: $(PNGS) $(wildcard Pacemaker_Remote/en-US/*.xml) $(PR_XML) $(PUBLICAN_INTREE_DEPS)
+ 	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
+-	rm -rf $(@:%.build=%)/publish/*
++	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+ 	$(AM_V_PUB)cd $(@:%.build=%) \
+ 	&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
+-- 
+1.8.3.1
+
+
+From 0e072d3debfc2c03751ff69add211a20c9db01cb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Thu, 9 May 2019 17:41:21 +0200
+Subject: [PATCH 77/96] Build: configure: let "make dist" tarballs be
+ ustar/posix, not v7 format
+
+This avoids problem with "file name is too long (max 99); not dumped"
+-- we don't suffer from this currently, but eventually could (see also
+the subsequent, related change in the set).  Not also that some git
+hosting sites (GitHub) will also offer tarballs (effectively our current
+official, blessed and authoritative redistributables) using *only* even
+newer format (pax), which may effectively cause portability issues(!).
+---
+ configure.ac | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index c58b556..e21eb03 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -59,7 +59,8 @@ AC_ARG_WITH(pkg-name,
+     [ PACKAGE_NAME="$withval" ])
+ 
+ dnl Older distros may need: AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION)
+-AM_INIT_AUTOMAKE([foreign])
++dnl tar-ustar:      use (older) POSIX variant of generated tar rather than v7
++AM_INIT_AUTOMAKE([foreign tar-ustar])
+ AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version)
+ 
+ dnl Versioned attributes implementation is not yet production-ready
+-- 
+1.8.3.1
+
+
+From 9ddc343d609e3082ec8baead451456a7b7c959de Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Thu, 9 May 2019 17:47:48 +0200
+Subject: [PATCH 78/96] Build: fix "make dist" in one go deficiency (and if,
+ it's brokenness)
+
+Problem was with EXTRA_DIST variable that got rightfully assigned
+"$(docbook:%=%.xml)" in e99ccd1b0, but frightfully stayed unadjusted
+in 7382e6241 -- actually it sort of worked, but only as a byproduct
+of generating spurious empty <publication dir>.xml files, which
+moreover required multiple iteration over "make dist" for it to
+finish successfully -- in a limited sense of "successfully" since
+it didn't include the publications' sources at all.
+
+All this is now rectified.  Also note that preceding "Build: configure:
+let "make dist" tarballs be posix/pax, not v7 format" commit prepared
+the path for us, otherwise we'd be getting something like:
+
+pacemaker-<GITHASH>/doc/Pacemaker_Administration/en-US/Ch-Upgrading.txt:
+  file name is too long (max 99); not dumped
+---
+ doc/Makefile.am | 25 ++++++++++++++++++++-----
+ 1 file changed, 20 insertions(+), 5 deletions(-)
+
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index f1a3c63..c981009 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -100,7 +100,12 @@ publican_docs	+= $(docbook)
+ endif
+ endif
+ 
+-EXTRA_DIST	= $(docbook:%=%.xml)
++EXTRA_DIST	= $(ascii) $(SHARED_TXT) $(PNGS_ORIGINAL) $(SVGS)
++EXTRA_DIST	+= $(CFS_TXT) $(CFS_XML_ONLY)
++EXTRA_DIST	+= $(PA_TXT) $(PA_XML_ONLY)
++EXTRA_DIST	+= $(PD_TXT) $(PD_XML_ONLY)
++EXTRA_DIST	+= $(PE_TXT) $(PE_XML_ONLY)
++EXTRA_DIST	+= $(PR_TXT) $(PR_XML_ONLY)
+ 
+ %.html: %.txt
+ if IS_ASCIIDOC
+@@ -132,6 +137,8 @@ publican-catalog: publican-catalog-fallback
+ 	&& echo '</catalog>'
+ 	$(AM_V_GEN)mv $@-t $@
+ 
++COMMON_XML = Author_Group.xml Book_Info.xml Revision_History.xml
++
+ SHARED_TXT=$(wildcard shared/en-US/*.txt)
+ SHARED_XML=$(SHARED_TXT:%.txt=%.xml)
+ 
+@@ -140,6 +147,8 @@ CFS_SHARED_TXT=$(addprefix shared/en-US/,pacemaker-intro.txt)
+ CFS_SHARED_XML=$(CFS_SHARED_TXT:%.txt=%.xml)
+ CFS_TXT=$(wildcard Clusters_from_Scratch/en-US/*.txt)
+ CFS_XML=$(CFS_TXT:%.txt=%.xml)
++CFS_XML_ONLY=$(addprefix Clusters_from_Scratch/en-US/,$(COMMON_XML) \
++  Clusters_from_Scratch.ent Clusters_from_Scratch.xml Preface.xml)
+ 
+ $(CFS_XML): $(CFS_SHARED_XML)
+ 
+@@ -150,7 +159,7 @@ endif
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Clusters_from_Scratch.build: $(PNGS) $(wildcard Clusters_from_Scratch/en-US/*.xml) $(CFS_XML) $(CFS_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
++Clusters_from_Scratch.build: $(PNGS)  $(CFS_XML_ONLY) $(CFS_XML) $(CFS_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
+ 	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+@@ -170,10 +179,12 @@ endif
+ 
+ PD_TXT=$(wildcard Pacemaker_Development/en-US/*.txt)
+ PD_XML=$(PD_TXT:%.txt=%.xml)
++PD_XML_ONLY=$(addprefix Pacemaker_Development/en-US/,$(COMMON_XML) \
++  Pacemaker_Development.ent Pacemaker_Development.xml)
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Pacemaker_Development.build: $(wildcard Pacemaker_Development/en-US/*.xml) $(PD_XML) $(PUBLICAN_INTREE_DEPS)
++Pacemaker_Development.build: $(PD_XML_ONLY) $(PD_XML) $(PUBLICAN_INTREE_DEPS)
+ 	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+@@ -195,12 +206,14 @@ PE_SHARED_TXT=$(addprefix shared/en-US/,pacemaker-intro.txt)
+ PE_SHARED_XML=$(PE_SHARED_TXT:%.txt=%.xml)
+ PE_TXT=$(wildcard Pacemaker_Explained/en-US/*.txt)
+ PE_XML=$(PE_TXT:%.txt=%.xml)
++PE_XML_ONLY=$(addprefix Pacemaker_Explained/en-US/,$(COMMON_XML) \
++  Pacemaker_Explained.ent Pacemaker_Explained.xml Preface.xml)
+ 
+ $(PE_XML): $(PE_SHARED_XML)
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Pacemaker_Explained.build: $(PNGS) $(wildcard Pacemaker_Explained/en-US/*.xml) $(PE_XML) $(PE_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
++Pacemaker_Explained.build: $(PNGS) $(PE_XML_ONLY) $(PE_XML) $(PE_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
+ 	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+@@ -220,10 +233,12 @@ endif
+ 
+ PR_TXT=$(wildcard Pacemaker_Remote/en-US/*.txt)
+ PR_XML=$(PR_TXT:%.txt=%.xml)
++PR_XML_ONLY=$(addprefix Pacemaker_Remote/en-US/,$(COMMON_XML) \
++  Pacemaker_Remote.ent Pacemaker_Remote.xml)
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Pacemaker_Remote.build: $(PNGS) $(wildcard Pacemaker_Remote/en-US/*.xml) $(PR_XML) $(PUBLICAN_INTREE_DEPS)
++Pacemaker_Remote.build: $(PNGS) $(PR_XML_ONLY) $(PR_XML) $(PUBLICAN_INTREE_DEPS)
+ 	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+-- 
+1.8.3.1
+
+
+From 54b8f258f864d63596b71f38db49491873bddbdf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
+Date: Tue, 5 Jun 2018 17:10:28 +0200
+Subject: [PATCH 79/96] Build: xml: *.rng: ship needed non-generated (& only
+ such) RNG schemas
+
+Previously, "make dist" would omit those(!), and over-approximation
+of such files would get, undesirably, selected (since 0a7a4b4d7
+incl. cibtr-2.rng) in the respective source variable.
+Also fix some whitespace issues.
+---
+ xml/Makefile.am | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/xml/Makefile.am b/xml/Makefile.am
+index ffd09e3..1e5649f 100644
+--- a/xml/Makefile.am
++++ b/xml/Makefile.am
+@@ -58,14 +58,15 @@ RNG_version_pairs_last  = $(wordlist \
+ 
+ RNG_generated		= pacemaker.rng $(foreach base,$(RNG_versions),pacemaker-$(base).rng) versions.rng
+ 
+-RNG_cfg_base	 	= options nodes resources constraints fencing acls tags alerts
+-RNG_base	 	= cib $(RNG_cfg_base) status score rule nvset
+-RNG_files	 	= $(foreach base,$(RNG_base),$(wildcard $(base)*.rng))
++RNG_cfg_base		= options nodes resources constraints fencing acls tags alerts
++RNG_base		= cib $(RNG_cfg_base) status score rule nvset
++RNG_files		= $(foreach base,$(RNG_base),$(wildcard $(base).rng $(base)-*.rng))
+ 
+ # List of non-Pacemaker RNGs
+ RNG_extra		= crm_mon.rng
+ 
+-RNG_DATA		= $(RNG_files) $(RNG_generated) $(RNG_extra)
++dist_RNG_DATA		= $(RNG_files) $(RNG_extra)
++nodist_RNG_DATA		= $(RNG_generated)
+ 
+ EXTRA_DIST		= best-match.sh
+ 
+-- 
+1.8.3.1
+
+
+From 898accf5febfac5ddc4304ad4a0f92b8fc866964 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 5 Aug 2019 10:48:18 -0500
+Subject: [PATCH 80/96] Build: makefiles: make sure all files that should be
+ distributed are
+
+... so the result of "make dist" has everything needed to build,
+and everything of interest to end users
+---
+ Makefile.am                 | 19 +++++++++++---
+ Makefile.common             |  2 +-
+ cts/Makefile.am             | 56 ++++++++++++++++++++--------------------
+ cts/benchmark/Makefile.am   | 25 ++++--------------
+ doc/Makefile.am             |  9 ++++---
+ extra/Makefile.am           | 25 ++++++------------
+ extra/alerts/Makefile.am    | 23 ++++++-----------
+ extra/logrotate/Makefile.am | 21 +++++----------
+ extra/resources/Makefile.am | 62 ++++++++++++++++++---------------------------
+ fencing/Makefile.am         |  4 ++-
+ lrmd/Makefile.am            |  2 +-
+ mcp/Makefile.am             |  4 ++-
+ pengine/Makefile.am         |  6 ++---
+ tools/Makefile.am           | 11 ++++----
+ xml/Makefile.am             | 14 ++++++----
+ 15 files changed, 127 insertions(+), 156 deletions(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 874f6ed..3080445 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -18,7 +18,15 @@
+ # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ #
+ 
+-EXTRA_DIST              = autogen.sh ConfigureMe README.in m4/gnulib-cache.m4
++EXTRA_DIST		= CONTRIBUTING.md	\
++			  GNUmakefile		\
++			  INSTALL.md		\
++			  README.markdown	\
++			  autogen.sh		\
++			  pacemaker.spec.in	\
++			  rpmlintrc		\
++			  m4/gnulib-cache.m4	\
++			  m4/gnulib-tool.m4
+ 
+ MAINTAINERCLEANFILES    = Makefile.in aclocal.m4 configure DRF/config-h.in \
+                         DRF/stamp-h.in libtool.m4 ltdl.m4
+@@ -33,14 +41,17 @@ doc_DATA = README.markdown COPYING
+ ACLOCAL_AMFLAGS  = -I m4
+ 
+ licensedir              = $(docdir)/licenses/
+-license_DATA            = $(wildcard licenses/*)
++dist_license_DATA	= $(wildcard licenses/*)
+ 
+ # Test components
+ SUBDIRS	+= cts
+ 
+ testdir			= $(datadir)/$(PACKAGE)/tests/
+-test_SCRIPTS		= coverage.sh BasicSanity.sh
+-test_DATA		= valgrind-pcmk.suppressions
++test_SCRIPTS		= coverage.sh
++dist_test_SCRIPTS	= BasicSanity.sh
++dist_test_DATA		= valgrind-pcmk.suppressions
++
++EXTRA_SCRIPTS		= abi-check bumplibs.sh
+ 
+ # Scratch file for ad-hoc testing
+ noinst_PROGRAMS = scratch
+diff --git a/Makefile.common b/Makefile.common
+index 0b06ec4..a4842b7 100644
+--- a/Makefile.common
++++ b/Makefile.common
+@@ -33,7 +33,7 @@ AM_CPPFLAGS		= -I$(top_builddir)/include -I$(top_srcdir)/include   \
+ 			  -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl
+ 
+ if BUILD_HELP
+-man8_MANS		= $(sbin_PROGRAMS:%=%.8) $(sbin_SCRIPTS:%=%.8)
++man8_MANS		= $(sbin_PROGRAMS:%=%.8) $(sbin_SCRIPTS:%=%.8) $(dist_sbin_SCRIPTS:%=%.8)
+ endif
+ 
+ %.8:	% $(MAN8DEPS)
+diff --git a/cts/Makefile.am b/cts/Makefile.am
+index f3f169c..c0c5707 100644
+--- a/cts/Makefile.am
++++ b/cts/Makefile.am
+@@ -21,37 +21,37 @@ MAINTAINERCLEANFILES    = Makefile.in
+ 
+ CLEANFILES      = LSBDummy HBDummy
+ 
+-EXTRA_DIST      = $(cts_SCRIPTS) $(cts_DATA)
+-noinst_SCRIPTS		= cluster_test		\
++EXTRA_SCRIPTS		= cluster_test		\
+ 			  OCFIPraTest.py
+ 
+-ctsdir		= $(datadir)/$(PACKAGE)/tests/cts
+-ctslibdir	= $(pyexecdir)/cts
++testdir		= $(datadir)/$(PACKAGE)/tests
+ 
+-ctslib_PYTHON	=	__init__.py		\
+-			CTSvars.py		\
+-			CM_lha.py		\
+-			CM_ais.py		\
+-			CTS.py			\
+-			CTSaudits.py		\
+-			CTStests.py		\
+-			CTSscenarios.py		\
+-			CIB.py			\
+-			cib_xml.py		\
+-			environment.py		\
+-			logging.py		\
+-			patterns.py		\
+-			remote.py		\
+-			watcher.py
++ctslibdir		= $(pyexecdir)/cts
++ctslib_PYTHON		= __init__.py		\
++			  CIB.py		\
++			  cib_xml.py		\
++			  CM_lha.py		\
++			  CM_ais.py		\
++			  CTS.py		\
++			  CTSaudits.py		\
++			  CTSscenarios.py	\
++			  CTStests.py		\
++			  environment.py	\
++			  logging.py		\
++			  patterns.py		\
++			  remote.py		\
++			  watcher.py
++nodist_ctslib_PYTHON	= CTSvars.py
+ 
+-cts_DATA	=	README.md cts.supp pacemaker-cts-dummyd.service
+-
+-cts_SCRIPTS	=	cts		\
+-			CTSlab.py		\
+-			lxc_autogen.sh	\
+-			LSBDummy		\
+-			HBDummy		\
+-			pacemaker-cts-dummyd	\
+-			$(top_srcdir)/fencing/fence_dummy
++ctsdir		= 	$(testdir)/cts
++cts_DATA	=	pacemaker-cts-dummyd.service
++dist_cts_DATA	=	README.md cts.supp
++dist_cts_SCRIPTS	= cts			\
++			  CTSlab.py		\
++			  $(top_srcdir)/fencing/fence_dummy
++cts_SCRIPTS		= HBDummy		\
++			  LSBDummy		\
++			  lxc_autogen.sh	\
++			  pacemaker-cts-dummyd
+ 
+ SUBDIRS	= benchmark
+diff --git a/cts/benchmark/Makefile.am b/cts/benchmark/Makefile.am
+index 8a50ac7..1fd6171 100644
+--- a/cts/benchmark/Makefile.am
++++ b/cts/benchmark/Makefile.am
+@@ -3,26 +3,11 @@
+ #
+ # Copyright (C) 2001 Michael Moerz
+ #
+-# This program is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU General Public License
+-# as published by the Free Software Foundation; either version 2
+-# of the License, or (at your option) any later version.
+-# 
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-# GNU General Public License for more details.
+-# 
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, write to the Free Software
+-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++# This source code is licensed under the GNU General Public License version 2
++# or later (GPLv2+) WITHOUT ANY WARRANTY.
+ #
+ MAINTAINERCLEANFILES    = Makefile.in
+ 
+-EXTRA_DIST      = $(bench_SCRIPTS) $(bench_DATA)
+-
+-benchdir		= $(datadir)/$(PACKAGE)/tests/cts/benchmark
+-
+-bench_DATA	=	README.benchmark control
+-
+-bench_SCRIPTS	=	clubench
++benchdir	= $(datadir)/$(PACKAGE)/tests/cts/benchmark
++dist_bench_DATA	= README.benchmark control
++bench_SCRIPTS	= clubench
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index c981009..a929acb 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -56,11 +56,13 @@ UPLOAD_LANGS    = en-US
+ 
+ # Scheduler transition graphs
+ # @TODO Add original XML, and generate DOTs via crm_simulate
+-DOTS = $(wildcard shared/en-US/images/*.dot)
++DOTS	= $(wildcard Clusters_from_Scratch/en-US/images/*.dot)	\
++	  $(wildcard Pacemaker_Explained/en-US/images/*.dot)
+ 
+ # Vector sources for images
+ # @TODO Generate transition SVGs from DOTs via dot
+-SVGS =	$(wildcard shared/en-US/images/pcmk-*.svg)	\
++SVGS =	$(wildcard Clusters_from_Scratch/en-US/images/pcmk-*.svg)	\
++	$(wildcard Pacemaker_Explained/en-US/images/pcmk-*.svg)	\
+ 	$(DOTS:%.dot=%.svg)
+ 
+ # Final images
+@@ -100,12 +102,13 @@ publican_docs	+= $(docbook)
+ endif
+ endif
+ 
+-EXTRA_DIST	= $(ascii) $(SHARED_TXT) $(PNGS_ORIGINAL) $(SVGS)
++EXTRA_DIST	= $(ascii) $(SHARED_TXT) $(PNGS_ORIGINAL) $(DOTS) $(SVGS)
+ EXTRA_DIST	+= $(CFS_TXT) $(CFS_XML_ONLY)
+ EXTRA_DIST	+= $(PA_TXT) $(PA_XML_ONLY)
+ EXTRA_DIST	+= $(PD_TXT) $(PD_XML_ONLY)
+ EXTRA_DIST	+= $(PE_TXT) $(PE_XML_ONLY)
+ EXTRA_DIST	+= $(PR_TXT) $(PR_XML_ONLY)
++EXTRA_DIST	+= pcs-crmsh-quick-ref.md
+ 
+ %.html: %.txt
+ if IS_ASCIIDOC
+diff --git a/extra/Makefile.am b/extra/Makefile.am
+index d742ae2..fb23caf 100644
+--- a/extra/Makefile.am
++++ b/extra/Makefile.am
+@@ -1,24 +1,15 @@
+ #
+-# Copyright (C) 2004-2009 Andrew Beekhof
++# Copyright 2004-2019 the Pacemaker project contributors
+ #
+-# This program is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU General Public License
+-# as published by the Free Software Foundation; either version 2
+-# of the License, or (at your option) any later version.
+-# 
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-# GNU General Public License for more details.
+-# 
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, write to the Free Software
+-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++# The version control history for this file may have further details.
++#
++# This source code is licensed under the GNU General Public License version 2
++# or later (GPLv2+) WITHOUT ANY WARRANTY.
+ #
+ 
+ MAINTAINERCLEANFILES    = Makefile.in
+ 
+-SUBDIRS                 =  alerts resources logrotate
++SUBDIRS		= alerts resources logrotate
+ 
+-mibdir = $(datadir)/snmp/mibs
+-mib_DATA = PCMK-MIB.txt
++mibdir		= $(datadir)/snmp/mibs
++dist_mib_DATA	= PCMK-MIB.txt
+diff --git a/extra/alerts/Makefile.am b/extra/alerts/Makefile.am
+index 2cd3bd6..e798ae9 100644
+--- a/extra/alerts/Makefile.am
++++ b/extra/alerts/Makefile.am
+@@ -1,22 +1,15 @@
+ #
+-# Copyright (C) 2016 Ken Gaillot <kgaillot@redhat.com>
++# Copyright 2016-2019 the Pacemaker project contributors
+ #
+-# This program is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU General Public License
+-# as published by the Free Software Foundation; either version 2
+-# of the License, or (at your option) any later version.
++# The version control history for this file may have further details.
+ #
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-# GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, write to the Free Software
+-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++# This source code is licensed under the GNU General Public License version 2
++# or later (GPLv2+) WITHOUT ANY WARRANTY.
+ #
+ 
+ MAINTAINERCLEANFILES    = Makefile.in
+ 
+-samplesdir = $(datadir)/$(PACKAGE)/alerts/
+-samples_DATA = alert_file.sh.sample alert_smtp.sh.sample alert_snmp.sh.sample
++samplesdir		= $(datadir)/$(PACKAGE)/alerts/
++dist_samples_DATA	= alert_file.sh.sample	\
++			  alert_smtp.sh.sample	\
++			  alert_snmp.sh.sample
+diff --git a/extra/logrotate/Makefile.am b/extra/logrotate/Makefile.am
+index 55c669c..cafd0d5 100644
+--- a/extra/logrotate/Makefile.am
++++ b/extra/logrotate/Makefile.am
+@@ -1,22 +1,13 @@
+ #
+-# Copyright (C) 2014 Gao,Yan <ygao@suse.com>
++# Copyright 2014-2019 the Pacemaker project contributors
+ #
+-# This program is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU General Public License
+-# as published by the Free Software Foundation; either version 2
+-# of the License, or (at your option) any later version.
++# The version control history for this file may have further details.
+ #
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-# GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, write to the Free Software
+-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++# This source code is licensed under the GNU General Public License version 2
++# or later (GPLv2+) WITHOUT ANY WARRANTY.
+ #
+ 
+ MAINTAINERCLEANFILES    = Makefile.in
+ 
+-logrotatedir = $(sysconfdir)/logrotate.d
+-logrotate_DATA = pacemaker
++logrotatedir		= $(sysconfdir)/logrotate.d
++logrotate_DATA		= pacemaker
+diff --git a/extra/resources/Makefile.am b/extra/resources/Makefile.am
+index c84dfdf..e4b54cc 100644
+--- a/extra/resources/Makefile.am
++++ b/extra/resources/Makefile.am
+@@ -1,22 +1,12 @@
+-# Makefile.am for OCF RAs
+ #
+-# Author: Andrew Beekhof
+-# Copyright (C) 2008 Andrew Beekhof
++# Copyright 2008-2019 the Pacemaker project contributors
+ #
+-# This program is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU General Public License
+-# as published by the Free Software Foundation; either version 2
+-# of the License, or (at your option) any later version.
+-# 
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-# GNU General Public License for more details.
+-# 
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, write to the Free Software
+-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++# The version control history for this file may have further details.
+ #
++# This source code is licensed under the GNU General Public License version 2
++# or later (GPLv2+) WITHOUT ANY WARRANTY.
++#
++
+ include $(top_srcdir)/Makefile.common
+ 
+ EXTRA_DIST		= $(ocf_SCRIPTS)
+@@ -24,28 +14,27 @@ EXTRA_DIST		= $(ocf_SCRIPTS)
+ 
+ isolationtechdir    = @OCF_RA_DIR@/.isolation
+ 
+-ocfdir		    = @OCF_RA_DIR@/pacemaker
+-
+-ocf_SCRIPTS	     =  ClusterMon 	\
+-			controld	\
+-			Dummy		\
+-			HealthCPU	\
+-			HealthSMART	\
+-			ifspeed		\
+-			o2cb		\
+-			ping		\
+-			pingd		\
+-			Stateful	\
+-			SysInfo		\
+-			SystemHealth \
+-			attribute	\
+-			remote
+-
+-isolationtech_SCRIPTS	= docker-wrapper
++ocfdir			= @OCF_RA_DIR@/pacemaker
++dist_ocf_SCRIPTS	= attribute	\
++			  ClusterMon	\
++			  controld	\
++			  Dummy		\
++			  HealthCPU	\
++			  HealthSMART	\
++			  ifspeed	\
++			  o2cb		\
++			  ping		\
++			  pingd		\
++			  remote	\
++			  Stateful	\
++			  SysInfo	\
++			  SystemHealth
++
++dist_isolationtech_SCRIPTS	= docker-wrapper
+ 
+ if BUILD_XML_HELP
+ 
+-man7_MANS = $(ocf_SCRIPTS:%=ocf_pacemaker_%.7)
++man7_MANS = $(ocf_SCRIPTS:%=ocf_pacemaker_%.7) $(dist_ocf_SCRIPTS:%=ocf_pacemaker_%.7)
+ DBOOK_OPTS = --stringparam command.prefix ocf_pacemaker_ --stringparam variable.prefix OCF_RESKEY_ --param man.vol 7
+ 
+ ocf_pacemaker_%.xml:  %
+@@ -54,5 +43,4 @@ ocf_pacemaker_%.xml:  %
+ endif
+ 
+ clean-generic:
+-	rm -f $(man7_MANS) $(ocf_SCRIPTS:%=%.xml) *~
+-
++	rm -f $(man7_MANS) $(ocf_SCRIPTS:%=%.xml) $(dist_ocf_SCRIPTS:%=%.xml) *~
+diff --git a/fencing/Makefile.am b/fencing/Makefile.am
+index cb7b551..6cda8ef 100644
+--- a/fencing/Makefile.am
++++ b/fencing/Makefile.am
+@@ -25,7 +25,7 @@ halibdir	= $(CRM_DAEMON_DIR)
+ halib_PROGRAMS	= stonithd stonith-test
+ 
+ sbin_PROGRAMS	= stonith_admin
+-sbin_SCRIPTS	= fence_legacy fence_pcmk
++dist_sbin_SCRIPTS	= fence_legacy fence_pcmk
+ 
+ noinst_HEADERS	= internal.h standalone_config.h
+ 
+@@ -33,6 +33,8 @@ if BUILD_XML_HELP
+ man7_MANS	= stonithd.7
+ endif
+ 
++EXTRA_DIST	= README.md
++
+ stonith_test_SOURCES	= test.c
+ 
+ stonith_test_LDADD	= $(top_builddir)/lib/common/libcrmcommon.la	\
+diff --git a/lrmd/Makefile.am b/lrmd/Makefile.am
+index 33611cb..7aa5414 100644
+--- a/lrmd/Makefile.am
++++ b/lrmd/Makefile.am
+@@ -26,7 +26,7 @@ testdir			= $(datadir)/$(PACKAGE)/tests/lrmd
+ test_SCRIPTS		= regression.py
+ 
+ initdir			= $(INITDIR)
+-init_SCRIPTS		= pacemaker_remote
++dist_init_SCRIPTS	= pacemaker_remote
+ sbin_PROGRAMS		= pacemaker_remoted
+ 
+ if BUILD_SYSTEMD
+diff --git a/mcp/Makefile.am b/mcp/Makefile.am
+index 074d251..5e1147d 100644
+--- a/mcp/Makefile.am
++++ b/mcp/Makefile.am
+@@ -20,13 +20,15 @@ include $(top_srcdir)/Makefile.common
+ if BUILD_CS_SUPPORT
+ 
+ initdir			= $(INITDIR)
+-init_SCRIPTS		= pacemaker
++dist_init_SCRIPTS	= pacemaker
+ sbin_PROGRAMS		= pacemakerd
+ 
+ if BUILD_SYSTEMD
+ systemdunit_DATA	= pacemaker.service
+ endif
+ 
++EXTRA_DIST		= pacemaker.sysconfig
++
+ ## SOURCES
+ 
+ noinst_HEADERS		= pacemaker.h
+diff --git a/pengine/Makefile.am b/pengine/Makefile.am
+index fdac3e3..c121ab5 100644
+--- a/pengine/Makefile.am
++++ b/pengine/Makefile.am
+@@ -24,11 +24,11 @@ halibdir	= $(CRM_DAEMON_DIR)
+ PE_TESTS	= $(wildcard test10/*.scores)
+ 
+ testdir			= $(datadir)/$(PACKAGE)/tests/pengine
+-test_SCRIPTS		= regression.sh
+-test_DATA		= regression.core.sh
++dist_test_SCRIPTS	= regression.sh
++dist_test_DATA		= regression.core.sh
+ 
+ test10dir		= $(datadir)/$(PACKAGE)/tests/pengine/test10
+-test10_DATA		= $(PE_TESTS) $(PE_TESTS:%.scores=%.xml) $(PE_TESTS:%.scores=%.exp) $(PE_TESTS:%.scores=%.dot) $(PE_TESTS:%.scores=%.summary) $(wildcard test10/*.stderr)
++dist_test10_DATA	= $(PE_TESTS) $(PE_TESTS:%.scores=%.xml) $(PE_TESTS:%.scores=%.exp) $(PE_TESTS:%.scores=%.dot) $(PE_TESTS:%.scores=%.summary) $(wildcard test10/*.stderr)
+ 
+ beekhof:
+ 	echo $(shell ls -1 test10/*.xml)
+diff --git a/tools/Makefile.am b/tools/Makefile.am
+index d8c3215..6960548 100644
+--- a/tools/Makefile.am
++++ b/tools/Makefile.am
+@@ -31,18 +31,20 @@ noinst_HEADERS		= crm_resource.h fake_transition.h
+ pcmkdir			= $(datadir)/$(PACKAGE)
+ pcmk_DATA		= report.common report.collector
+ 
+-sbin_SCRIPTS		= crm_report crm_standby crm_master crm_failcount
++sbin_SCRIPTS		= crm_report
+ if BUILD_CIBSECRETS
+ sbin_SCRIPTS		+= cibsecret
+ endif
+-EXTRA_DIST		= $(sbin_SCRIPTS)
++dist_sbin_SCRIPTS	= crm_standby crm_master crm_failcount
++
++EXTRA_DIST		= crm_mon.sysconfig
+ 
+ sbin_PROGRAMS		= crm_simulate crmadmin cibadmin crm_node crm_attribute crm_resource crm_verify \
+ 			 crm_shadow attrd_updater crm_diff crm_mon iso8601 crm_ticket crm_error
+ 
+ testdir			= $(datadir)/$(PACKAGE)/tests/cli
+-test_SCRIPTS		= regression.sh
+-test_DATA		= regression.dates.exp		\
++dist_test_SCRIPTS	= regression.sh
++dist_test_DATA		= regression.dates.exp		\
+ 			  regression.tools.exp		\
+ 			  regression.acls.exp		\
+ 			  regression.validity.exp	\
+@@ -102,7 +104,6 @@ crm_mon_LDADD		= $(top_builddir)/lib/pengine/libpe_status.la		\
+ 			  $(top_builddir)/pengine/libpengine.la \
+ 			  $(COMMONLIBS) $(SNMPLIBS) $(ESMTPLIBS)
+ 
+-# Arguments could be made that this should live in crm/pengine
+ crm_verify_SOURCES	= crm_verify.c
+ crm_verify_LDADD	= $(top_builddir)/lib/pengine/libpe_status.la 	\
+ 			$(top_builddir)/pengine/libpengine.la		\
+diff --git a/xml/Makefile.am b/xml/Makefile.am
+index 1e5649f..c801842 100644
+--- a/xml/Makefile.am
++++ b/xml/Makefile.am
+@@ -18,12 +18,11 @@
+ MAINTAINERCLEANFILES    = Makefile.in
+ 
+ dtddir			= $(CRM_DTD_DIRECTORY)
+-dtd_DATA		= crm.dtd crm-transitional.dtd
++dist_dtd_DATA		= crm.dtd crm-transitional.dtd
+ 
+ xsltdir			= $(dtddir)
+-xslt_DATA		= $(top_srcdir)/xml/upgrade06.xsl $(top_srcdir)/xml/upgrade-*.xsl
+-
+-noinst_DATA		= context-of.xsl
++dist_xslt_DATA		= $(top_srcdir)/xml/upgrade06.xsl \
++			  $(top_srcdir)/xml/upgrade-*.xsl
+ 
+ RNGdir			= $(dtddir)
+ 
+@@ -68,7 +67,12 @@ RNG_extra		= crm_mon.rng
+ dist_RNG_DATA		= $(RNG_files) $(RNG_extra)
+ nodist_RNG_DATA		= $(RNG_generated)
+ 
+-EXTRA_DIST		= best-match.sh
++EXTRA_DIST		= Readme.md			\
++			  best-match.sh			\
++			  context-of.xsl		\
++			  ocf-meta2man.xsl		\
++			  regression.core.sh		\
++			  regression.sh
+ 
+ versions:
+ 	echo "Max: $(RNG_max)"
+-- 
+1.8.3.1
+
+
+From 50825612a2d3922fbeabef390303ca2ac596e846 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 5 Aug 2019 17:38:42 -0500
+Subject: [PATCH 81/96] Build: GNUmakefile: allow all/clean to work without git
+
+e.g. in a distribution rather than a checkout
+---
+ GNUmakefile | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/GNUmakefile b/GNUmakefile
+index 352903f..d790865 100644
+--- a/GNUmakefile
++++ b/GNUmakefile
+@@ -45,12 +45,12 @@ ARCH    ?= $(shell test -e /etc/fedora-release && rpm --eval %{_arch})
+ MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH))
+ DISTRO  ?= $(shell test -e /etc/SuSE-release && echo suse; echo fedora)
+ COMMIT  ?= HEAD
+-TAG     ?= $(shell T=$$(git describe --all '$(COMMIT)' | sed -n 's|tags/\(.*\)|\1|p'); \
++TAG     ?= $(shell T=$$(git describe --all '$(COMMIT)' 2>/dev/null | sed -n 's|tags/\(.*\)|\1|p'); \
+ 	     test -n "$${T}" && echo "$${T}" \
+-	       || git log --pretty=format:%H -n 1 '$(COMMIT)')
++	       || git log --pretty=format:%H -n 1 '$(COMMIT)' 2>/dev/null || echo DIST)
+ lparen = (
+ rparen = )
+-SHORTTAG ?= $(shell case $(TAG) in Pacemaker-*$(rparen) echo '$(TAG)' | cut -c11-;; \
++SHORTTAG ?= $(shell case $(TAG) in Pacemaker-*|DIST$(rparen) echo '$(TAG)' | cut -c11-;; \
+ 	      *$(rparen) git log --pretty=format:%h -n 1 '$(TAG)';; esac)
+ SHORTTAG_ABBREV = $(shell printf %s '$(SHORTTAG)' | wc -c)
+ WITH    ?= --without doc
+-- 
+1.8.3.1
+
+
+From d545f94c95dbf9c205b458aee6bdff49fa9c3dd9 Mon Sep 17 00:00:00 2001
+From: Chris Lumens <clumens@redhat.com>
+Date: Thu, 28 Feb 2019 10:28:35 -0500
+Subject: [PATCH 82/96] Feature: xml: Add a schema for API results.
+
+This describes the layout of all the XML that will be emitted by the
+formatted output patches.  That output can be validated against the
+schema with xmllint.
+
+[small portion of commit backported to 1.1]
+---
+ xml/Makefile.am | 31 ++++++++++++++++++-------------
+ 1 file changed, 18 insertions(+), 13 deletions(-)
+
+diff --git a/xml/Makefile.am b/xml/Makefile.am
+index c801842..09d3503 100644
+--- a/xml/Makefile.am
++++ b/xml/Makefile.am
+@@ -30,31 +30,36 @@ RNGdir			= $(dtddir)
+ 
+ # Sorted list of available numeric RNG versions,
+ # extracted from filenames like NAME-MAJOR[.MINOR][.MINOR-MINOR].rng
+-RNG_numeric_versions    = $(shell ls -1 $(top_srcdir)/xml/*.rng \
++numeric_versions = $(shell ls -1 $(1) \
+ 			  | sed -n -e 's/^.*-\([0-9][0-9.]*\).rng$$/\1/p' \
+ 			  | sort -u -t. -k 1,1n -k 2,2n -k 3,3n)
+ 
+-# The highest numeric version
+-RNG_max			?= $(lastword $(RNG_numeric_versions))
+-
+-# A sorted list of all RNG versions (numeric and "next")
+-RNG_versions		= next $(RNG_numeric_versions)
+-RNG_version_pairs	= $(join \
+-			    ${RNG_numeric_versions},$(addprefix \
++version_pairs = $(join \
++			    $(1),$(addprefix \
+ 			      -,$(wordlist \
+-			        2,$(words ${RNG_numeric_versions}),${RNG_numeric_versions} \
++			        2,$(words $(1)),$(1) \
+ 			      ) next \
+ 			    ) \
+ 			  )
+-RNG_version_pairs_cnt	= $(words ${RNG_version_pairs})
+-RNG_version_pairs_last  = $(wordlist \
++
++version_pairs_last = $(wordlist \
+ 			    $(words \
+ 			      $(wordlist \
+-			        2,${RNG_version_pairs_cnt},${RNG_version_pairs} \
++			        2,$(1),$(2) \
+ 			      ) \
+-			    ),${RNG_version_pairs_cnt},${RNG_version_pairs} \
++			    ),$(1),$(2) \
+ 			  )
+ 
++RNG_numeric_versions = $(call numeric_versions,${RNG_files})
++
++# The highest numeric version
++RNG_max			?= $(lastword $(RNG_numeric_versions))
++
++RNG_versions		= next $(RNG_numeric_versions)
++RNG_version_pairs	= $(call version_pairs,${RNG_numeric_versions})
++RNG_version_pairs_cnt	= $(words ${RNG_version_pairs})
++RNG_version_pairs_last  = $(call version_pairs_last,${RNG_version_pairs_cnt},${RNG_version_pairs})
++
+ RNG_generated		= pacemaker.rng $(foreach base,$(RNG_versions),pacemaker-$(base).rng) versions.rng
+ 
+ RNG_cfg_base		= options nodes resources constraints fencing acls tags alerts
+-- 
+1.8.3.1
+
+
+From fd3587f67b405c08473a636554f4822e72149495 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 2 Aug 2019 14:41:25 -0500
+Subject: [PATCH 83/96] Build: xml: remove broken and unneeded "make sync"
+ target
+
+---
+ xml/Makefile.am | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/xml/Makefile.am b/xml/Makefile.am
+index 09d3503..5734979 100644
+--- a/xml/Makefile.am
++++ b/xml/Makefile.am
+@@ -178,8 +178,4 @@ fulldiff: best-match.sh
+ 	@echo "#  Comparing all changes across all the subsequent increments"
+ 	$(call version_diff,${RNG_version_pairs})
+ 
+-sync:
+-	git rm -f $(wildcard *-next.rng)
+-	make pacemaker-next.rng
+-
+ CLEANFILES = $(RNG_generated)
+-- 
+1.8.3.1
+
+
+From 2e8384cef56365c187c3b80719cd49f72d66b8e4 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 6 Aug 2019 14:52:20 -0500
+Subject: [PATCH 84/96] Build: xml: rearrange and comment Makefile for
+ readability
+
+Similarly, rename some variables, and rename the versions target to
+cib-versions.
+---
+ xml/Makefile.am | 147 +++++++++++++++++++++++++++++---------------------------
+ 1 file changed, 75 insertions(+), 72 deletions(-)
+
+diff --git a/xml/Makefile.am b/xml/Makefile.am
+index 5734979..8ff805b 100644
+--- a/xml/Makefile.am
++++ b/xml/Makefile.am
+@@ -1,35 +1,22 @@
+ #
+ # Copyright (C) 2004 Andrew Beekhof
+ #
+-# This program is free software; you can redistribute it and/or
+-# modify it under the terms of the GNU General Public License
+-# as published by the Free Software Foundation; either version 2
+-# of the License, or (at your option) any later version.
+-# 
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-# GNU General Public License for more details.
+-# 
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, write to the Free Software
+-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++# This source code is licensed under the GNU General Public License version 2
++# or later (GPLv2+) WITHOUT ANY WARRANTY.
+ #
+ MAINTAINERCLEANFILES    = Makefile.in
+ 
+-dtddir			= $(CRM_DTD_DIRECTORY)
+-dist_dtd_DATA		= crm.dtd crm-transitional.dtd
+-
+-xsltdir			= $(dtddir)
+-dist_xslt_DATA		= $(top_srcdir)/xml/upgrade06.xsl \
+-			  $(top_srcdir)/xml/upgrade-*.xsl
+-
+-RNGdir			= $(dtddir)
++# Pacemaker 1.1 has 2 schemas: the CIB schema, and a schema for
++# crm_mon --as-xml.
++#
++# See Readme.md for details on updating CIB schema files
+ 
+-# See Readme.md for details on updating schema files
++# The CIB and crm_mon schemas are installed directly in CRM_DTD_DIRECTORY.
++CIBdir	= $(CRM_DTD_DIRECTORY)
++MONdir	= $(CRM_DTD_DIRECTORY)
+ 
+-# Sorted list of available numeric RNG versions,
+-# extracted from filenames like NAME-MAJOR[.MINOR][.MINOR-MINOR].rng
++# Extract a sorted list of available numeric schema versions
++# from filenames like NAME-MAJOR[.MINOR][.MINOR-MINOR].rng
+ numeric_versions = $(shell ls -1 $(1) \
+ 			  | sed -n -e 's/^.*-\([0-9][0-9.]*\).rng$$/\1/p' \
+ 			  | sort -u -t. -k 1,1n -k 2,2n -k 3,3n)
+@@ -50,27 +37,40 @@ version_pairs_last = $(wordlist \
+ 			    ),$(1),$(2) \
+ 			  )
+ 
+-RNG_numeric_versions = $(call numeric_versions,${RNG_files})
++# Names of CIB schemas that form the choices for cib/configuration content
++CIB_cfg_base		= options nodes resources constraints fencing acls tags alerts
++
++# Names of all schemas (including top level and those included by others)
++CIB_base		= cib $(CIB_cfg_base) status score rule nvset
++
++# All static schema files
++CIB_files		= $(foreach base,$(CIB_base),$(wildcard $(base).rng $(base)-*.rng))
++MON_files		= crm_mon.rng
++
++# Sorted lists of all numeric schema versions
++CIB_numeric_versions	= $(call numeric_versions,${CIB_files})
+ 
+-# The highest numeric version
+-RNG_max			?= $(lastword $(RNG_numeric_versions))
++# The highest numeric schema version
++CIB_max			?= $(lastword $(CIB_numeric_versions))
+ 
+-RNG_versions		= next $(RNG_numeric_versions)
+-RNG_version_pairs	= $(call version_pairs,${RNG_numeric_versions})
+-RNG_version_pairs_cnt	= $(words ${RNG_version_pairs})
+-RNG_version_pairs_last  = $(call version_pairs_last,${RNG_version_pairs_cnt},${RNG_version_pairs})
++# Sorted lists of all schema versions (including "next")
++CIB_versions		= next $(CIB_numeric_versions)
+ 
+-RNG_generated		= pacemaker.rng $(foreach base,$(RNG_versions),pacemaker-$(base).rng) versions.rng
++# Dynamically generated schema files
++CIB_generated		= pacemaker.rng $(foreach base,$(CIB_versions),pacemaker-$(base).rng) versions.rng
+ 
+-RNG_cfg_base		= options nodes resources constraints fencing acls tags alerts
+-RNG_base		= cib $(RNG_cfg_base) status score rule nvset
+-RNG_files		= $(foreach base,$(RNG_base),$(wildcard $(base).rng $(base)-*.rng))
++CIB_version_pairs	= $(call version_pairs,${CIB_numeric_versions})
++CIB_version_pairs_cnt	= $(words ${CIB_version_pairs})
++CIB_version_pairs_last  = $(call version_pairs_last,${CIB_version_pairs_cnt},${CIB_version_pairs})
+ 
+-# List of non-Pacemaker RNGs
+-RNG_extra		= crm_mon.rng
++dist_CIB_DATA		= $(CIB_files)					\
++			  upgrade06.xsl					\
++			  upgrade-1.3.xsl				\
++			  crm.dtd					\
++			  crm-transitional.dtd
++dist_MON_DATA		= $(MON_files)
+ 
+-dist_RNG_DATA		= $(RNG_files) $(RNG_extra)
+-nodist_RNG_DATA		= $(RNG_generated)
++nodist_CIB_DATA		= $(CIB_generated)
+ 
+ EXTRA_DIST		= Readme.md			\
+ 			  best-match.sh			\
+@@ -79,10 +79,38 @@ EXTRA_DIST		= Readme.md			\
+ 			  regression.core.sh		\
+ 			  regression.sh
+ 
+-versions:
+-	echo "Max: $(RNG_max)"
+-	echo "Available: $(RNG_versions)"
++cib-versions:
++	@echo "Max: $(CIB_max)"
++	@echo "Available: $(CIB_versions)"
+ 
++
++# Dynamically generated top-level CIB schema
++pacemaker.rng: pacemaker-$(CIB_max).rng
++	echo "  RNG      $@"
++	cp $(top_builddir)/xml/$< $@
++
++pacemaker-%.rng: $(CIB_files) best-match.sh Makefile.am
++	echo "  RNG      $@"
++	echo '<?xml version="1.0" encoding="UTF-8"?>' > $@
++	echo '<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">' >> $@
++	echo '  <start>' >> $@
++	echo '    <element name="cib">' >> $@
++	$(srcdir)/best-match.sh cib $(*) $(@) "      "
++	echo '      <element name="configuration">' >> $@
++	echo '        <interleave>' >> $@
++	for rng in $(CIB_cfg_base); do $(srcdir)/best-match.sh $$rng $(*) $(@) "          " || :; done
++	echo '        </interleave>' >> $@
++	echo '      </element>' >> $@
++	echo '      <optional>' >> $@
++	echo '        <element name="status">' >> $@
++	$(srcdir)/best-match.sh status $(*) $(@) "          "
++	echo '        </element>' >> $@
++	echo '      </optional>' >> $@
++	echo '    </element>' >> $@
++	echo '  </start>' >> $@
++	echo '</grammar>' >> $@
++
++# Dynamically generated CIB schema listing all pacemaker versions
+ versions.rng: Makefile.am
+ 	echo "  RNG      $@"
+ 	echo '<?xml version="1.0" encoding="UTF-8"?>' > $@
+@@ -97,7 +125,7 @@ versions.rng: Makefile.am
+ 	echo '          <value>transitional-0.6</value>' >> $@
+ 	echo '          <value>pacemaker-0.7</value>' >> $@
+ 	echo '          <value>pacemaker-1.1</value>' >> $@
+-	for rng in $(RNG_versions); do echo "          <value>pacemaker-$$rng</value>" >> $@; done
++	for rng in $(CIB_versions); do echo "          <value>pacemaker-$$rng</value>" >> $@; done
+ 	echo '        </choice>' >> $@
+ 	echo '      </attribute>' >> $@
+ 	echo '    </optional>' >> $@
+@@ -108,31 +136,6 @@ versions.rng: Makefile.am
+ 	echo '  </start>' >> $@
+ 	echo '</grammar>' >> $@
+ 
+-pacemaker.rng: pacemaker-$(RNG_max).rng
+-	echo "  RNG      $@"
+-	cp $(top_builddir)/xml/$< $@
+-
+-pacemaker-%.rng: $(RNG_files) best-match.sh Makefile.am
+-	echo "  RNG      $@"
+-	echo '<?xml version="1.0" encoding="UTF-8"?>' > $@
+-	echo '<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">' >> $@
+-	echo '  <start>' >> $@
+-	echo '    <element name="cib">' >> $@
+-	$(top_srcdir)/xml/best-match.sh cib $(*) $(@) "      "
+-	echo '      <element name="configuration">' >> $@
+-	echo '        <interleave>' >> $@
+-	for rng in $(RNG_cfg_base); do $(top_srcdir)/xml/best-match.sh $$rng $(*) $(@) "          " || :; done
+-	echo '        </interleave>' >> $@
+-	echo '      </element>' >> $@
+-	echo '      <optional>' >> $@
+-	echo '        <element name="status">' >> $@
+-	$(top_srcdir)/xml/best-match.sh status $(*) $(@) "          "
+-	echo '        </element>' >> $@
+-	echo '      </optional>' >> $@
+-	echo '    </element>' >> $@
+-	echo '  </start>' >> $@
+-	echo '</grammar>' >> $@
+-
+ # diff fails with ec=2 if no predecessor is found;
+ # this uses '=' GNU extension to sed, if that's not available,
+ # one can use: hline=`echo "$${p}" | grep -Fn "$${hunk}" | cut -d: -f1`;
+@@ -171,11 +174,11 @@ version_diff = \
+ 	done
+ 
+ diff: best-match.sh
+-	@echo "#  Comparing changes in + since $(RNG_max)"
+-	$(call version_diff,${RNG_version_pairs_last})
++	@echo "#  Comparing changes in + since $(CIB_max)"
++	$(call version_diff,${CIB_version_pairs_last})
+ 
+ fulldiff: best-match.sh
+ 	@echo "#  Comparing all changes across all the subsequent increments"
+-	$(call version_diff,${RNG_version_pairs})
++	$(call version_diff,${CIB_version_pairs})
+ 
+-CLEANFILES = $(RNG_generated)
++CLEANFILES = $(CIB_generated)
+-- 
+1.8.3.1
+
+
+From da2dd7c8aef6b19fe7b45a0df6ec2d042a3d0049 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 29 Jul 2019 18:33:29 -0500
+Subject: [PATCH 85/96] Build: xml: make schema files work with VPATH builds
+
+a.k.a pristine builds, where the source and build directories are different
+---
+ xml/Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/xml/Makefile.am b/xml/Makefile.am
+index 8ff805b..972750f 100644
+--- a/xml/Makefile.am
++++ b/xml/Makefile.am
+@@ -44,7 +44,7 @@ CIB_cfg_base		= options nodes resources constraints fencing acls tags alerts
+ CIB_base		= cib $(CIB_cfg_base) status score rule nvset
+ 
+ # All static schema files
+-CIB_files		= $(foreach base,$(CIB_base),$(wildcard $(base).rng $(base)-*.rng))
++CIB_files		= $(foreach base,$(CIB_base),$(wildcard $(srcdir)/$(base).rng $(srcdir)/$(base)-*.rng))
+ MON_files		= crm_mon.rng
+ 
+ # Sorted lists of all numeric schema versions
+-- 
+1.8.3.1
+
+
+From a08ebb7599992a783dadb7cf74f83e127c29f8cc Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 30 Jul 2019 12:27:06 -0500
+Subject: [PATCH 86/96] Build: doc: make HTML documents compatible with VPATH
+ builds
+
+---
+ doc/Makefile.am | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index a929acb..6c2a3c7 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -110,13 +110,15 @@ EXTRA_DIST	+= $(PE_TXT) $(PE_XML_ONLY)
+ EXTRA_DIST	+= $(PR_TXT) $(PR_XML_ONLY)
+ EXTRA_DIST	+= pcs-crmsh-quick-ref.md
+ 
+-%.html: %.txt
+ if IS_ASCIIDOC
+-	$(AM_V_ASCII)$(ASCIIDOC_CONV) --unsafe --backend=xhtml11 $<
++ASCIIDOC_HTML_ARGS	= --unsafe --backend=xhtml11
+ else
+-	$(AM_V_ASCII)$(ASCIIDOC_CONV) --backend=html5 $<
++ASCIIDOC_HTML_ARGS	= --backend=html5
+ endif
+ 
++%.html: %.txt
++	$(AM_V_ASCII)$(ASCIIDOC_CONV) $(ASCIIDOC_HTML_ARGS) --out-file=$@ $<
++
+ # publican-clusterlabs/xsl/{html,html-single,pdf}.xsl refer to URIs
+ # requiring Internet access, hence we shadow that with a XML catalog-based
+ # redirect to local files brought with Publican installation;
+-- 
+1.8.3.1
+
+
+From 736051ab087c97372891591b0aa907b5d7c1f2dc Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 1 Aug 2019 10:44:26 -0500
+Subject: [PATCH 87/96] Build: makefiles: simplify silent rules usage
+
+make output is terser with --enable-silent-rules at configure time or make V=0
+at make run-time. This simplifies our handling of such silent rules, mainly by
+using AM_V_at and AM_V_GEN appropriately.
+
+This gets rid of AM_V_IMG (which we never defined) and AM_V_ASCII and AM_V_XSL
+(which weren't particularly useful), and adds AM_V_SCHEMA (to replace the
+half-hearted attempt at RNG handling). Our PCMK_quiet now just silences stdout,
+not stderr.
+---
+ Makefile.common | 52 ++++++++++++++++++++--------------
+ doc/Makefile.am | 18 ++++++------
+ xml/Makefile.am | 88 ++++++++++++++++++++++++++++-----------------------------
+ 3 files changed, 83 insertions(+), 75 deletions(-)
+
+diff --git a/Makefile.common b/Makefile.common
+index a4842b7..386d59d 100644
+--- a/Makefile.common
++++ b/Makefile.common
+@@ -1,27 +1,37 @@
+-# Not all current distros support AM_V_P
++#
++# Copyright 2014-2019 the Pacemaker project contributors
++#
++# The version control history for this file may have further details.
++#
++# This source code is licensed under the GNU General Public License version 2
++# or later (GPLv2+) WITHOUT ANY WARRANTY.
++#
++
++#
++# Some variables to help with silent rules
+ # https://www.gnu.org/software/automake/manual/html_node/Automake-silent_002drules-Option.html
++#
++# We require a minimum automake version of 1.11, which includes AM_V_GEN and
++# AM_V_at, but AM_V_P is not available until 1.13.
+ 
+ V ?= $(AM_DEFAULT_VERBOSITY)
+ 
+-PCMK_V = $(pcmk__v_$(V))
+-pcmk__v_0 = :
+-pcmk__v_1 =
+-
++# When a make command is prefixed with one of the AM_V_* macros, it may also be
++# desirable to suffix the command with this, to silence stdout.
+ PCMK_quiet = $(pcmk_quiet_$(V))
+-pcmk_quiet_0 = >/dev/null 2>&1
++pcmk_quiet_0 = >/dev/null
+ pcmk_quiet_1 = 
+ 
+-AM_V_XSL = $(am__v_XSL_$(V))
+-am__v_XSL_0 = @echo "  XSL     " $@;
+-am__v_XSL_1 = 
+-
++# AM_V_GEN is intended to be used in custom pattern rules, and replaces echoing
++# the command used with a more concise line with "GEN" and the name of the file
++# being generated. Our AM_V_* macros are similar but more descriptive.
+ AM_V_MAN = $(am__v_MAN_$(V))
+-am__v_MAN_0 = @echo "  MAN     " $@;
++am__v_MAN_0 = @echo "  MAN      $@";
+ am__v_MAN_1 = 
+ 
+-AM_V_ASCII = $(am__v_ASCII_$(V))
+-am__v_ASCII_0 = @echo "  ASCII   " $@;
+-am__v_ASCII_1 = 
++AM_V_SCHEMA = $(am__v_SCHEMA_$(V))
++am__v_SCHEMA_0 = @echo "  SCHEMA   $@";
++am__v_SCHEMA_1 = 
+ 
+ AM_V_PUB = $(am__v_PUB_$(V))
+ am__v_PUB_0 = @echo "  PUB     $@: $(DOCBOOK_FORMATS)";
+@@ -37,18 +47,18 @@ man8_MANS		= $(sbin_PROGRAMS:%=%.8) $(sbin_SCRIPTS:%=%.8) $(dist_sbin_SCRIPTS:%=
+ endif
+ 
+ %.8:	% $(MAN8DEPS)
+-	chmod a+x $(abs_builddir)/$<
++	$(AM_V_at)chmod a+x $(abs_builddir)/$<
+ 	$(AM_V_MAN)PATH=$(abs_builddir):$$PATH $(HELP2MAN) --output $@ --no-info --section 8 --name "Part of the Pacemaker cluster resource manager" $(abs_builddir)/$<
+ 
+ %.xml:  %
+-	$(AM_V_GEN)$(abs_builddir)/$< metadata > $@
++	$(AM_V_at)$(abs_builddir)/$< metadata > $@
+ 
+ %.dbook: %.xml
+-	$(AM_V_XSL)$(XSLTPROC) --nonet --novalid --stringparam man.name $* $(DBOOK_OPTS) $(top_srcdir)/xml/ocf-meta2man.xsl $(abs_builddir)/$< > $(abs_builddir)/$@
++	$(AM_V_at)$(XSLTPROC) --nonet --novalid --stringparam man.name $* $(DBOOK_OPTS) $(top_srcdir)/xml/ocf-meta2man.xsl $(abs_builddir)/$< > $(abs_builddir)/$@
+ 
+ %.7:	%.dbook
+-	$(AM_V_XSL)$(XSLTPROC) $(MANPAGE_XSLT) $(abs_builddir)/$< $(PCMK_quiet)
+-
++	$(AM_V_MAN)$(XSLTPROC) $(MANPAGE_XSLT) $(abs_builddir)/$< $(PCMK_quiet)
++# 
+ # Build docbook from asciidoc because XML is a PITA to edit
+ #
+ # Build each chapter as a book (since the numbering isn't right for
+@@ -59,9 +69,9 @@ endif
+ #
+ %.xml:  %.txt
+ if IS_ASCIIDOC
+-	$(AM_V_ASCII)$(ASCIIDOC_CONV) -b docbook -d book -o $@-tt $<
++	$(AM_V_GEN)$(ASCIIDOC_CONV) -b docbook -d book -o $@-tt $<
+ else
+-	$(AM_V_ASCII)$(ASCIIDOC_CONV) -b docbook45 -d book -o $@-tt $<
++	$(AM_V_GEN)$(ASCIIDOC_CONV) -b docbook45 -d book -o $@-tt $<
+ endif
+ 	$(AM_V_at)tr -d '\036\r' <$@-tt >$@-t; rm -f $@-tt  # Fix line endings
+ 	$(AM_V_at)sed -i 's/\ lang="en"//' $@-t      # Never specify a language in the chapters
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index 6c2a3c7..a01423d 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -86,13 +86,13 @@ PNGS = $(PNGS_ORIGINAL) $(PNGS_GENERATED)
+ graphics: $(PNGS)
+ 
+ %.png: %.svg
+-	$(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=90 -C --export-png=$@
++	$(AM_V_GEN)$(INKSCAPE) --file=$< --export-dpi=90 -C --export-png=$@ $(PCMK_quiet)
+ 
+ %-small.png: %.svg
+-	$(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=45 -C --export-png=$@
++	$(AM_V_GEN)$(INKSCAPE) --file=$< --export-dpi=45 -C --export-png=$@ $(PCMK_quiet)
+ 
+ %-large.png: %.svg
+-	$(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=180 -C --export-png=$@
++	$(AM_V_GEN)$(INKSCAPE) --file=$< --export-dpi=180 -C --export-png=$@ $(PCMK_quiet)
+ 
+ if BUILD_ASCIIDOC
+ generated_docs	+= $(ascii:%.txt=%.html)
+@@ -117,7 +117,7 @@ ASCIIDOC_HTML_ARGS	= --backend=html5
+ endif
+ 
+ %.html: %.txt
+-	$(AM_V_ASCII)$(ASCIIDOC_CONV) $(ASCIIDOC_HTML_ARGS) --out-file=$@ $<
++	$(AM_V_GEN)$(ASCIIDOC_CONV) $(ASCIIDOC_HTML_ARGS) --out-file=$@ $< $(PCMK_quiet)
+ 
+ # publican-clusterlabs/xsl/{html,html-single,pdf}.xsl refer to URIs
+ # requiring Internet access, hence we shadow that with a XML catalog-based
+@@ -164,8 +164,8 @@ endif
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Clusters_from_Scratch.build: $(PNGS)  $(CFS_XML_ONLY) $(CFS_XML) $(CFS_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
+-	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
++Clusters_from_Scratch.build: $(PNGS) $(CFS_XML_ONLY) $(CFS_XML) $(CFS_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
++	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+ 	$(AM_V_PUB)cd $(@:%.build=%) \
+@@ -190,7 +190,7 @@ PD_XML_ONLY=$(addprefix Pacemaker_Development/en-US/,$(COMMON_XML) \
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+ Pacemaker_Development.build: $(PD_XML_ONLY) $(PD_XML) $(PUBLICAN_INTREE_DEPS)
+-	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
++	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+ 	$(AM_V_PUB)cd $(@:%.build=%) \
+@@ -219,7 +219,7 @@ $(PE_XML): $(PE_SHARED_XML)
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+ Pacemaker_Explained.build: $(PNGS) $(PE_XML_ONLY) $(PE_XML) $(PE_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
+-	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
++	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+ 	$(AM_V_PUB)cd $(@:%.build=%) \
+@@ -244,7 +244,7 @@ PR_XML_ONLY=$(addprefix Pacemaker_Remote/en-US/,$(COMMON_XML) \
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+ Pacemaker_Remote.build: $(PNGS) $(PR_XML_ONLY) $(PR_XML) $(PUBLICAN_INTREE_DEPS)
+-	$(PCMK_V) @echo Building $(@:%.build=%) because of $?
++	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ if PUBLICAN_INTREE_BRAND
+ 	$(AM_V_PUB)cd $(@:%.build=%) \
+diff --git a/xml/Makefile.am b/xml/Makefile.am
+index 972750f..af53a6d 100644
+--- a/xml/Makefile.am
++++ b/xml/Makefile.am
+@@ -4,7 +4,8 @@
+ # This source code is licensed under the GNU General Public License version 2
+ # or later (GPLv2+) WITHOUT ANY WARRANTY.
+ #
+-MAINTAINERCLEANFILES    = Makefile.in
++
++include $(top_srcdir)/Makefile.common
+ 
+ # Pacemaker 1.1 has 2 schemas: the CIB schema, and a schema for
+ # crm_mon --as-xml.
+@@ -86,55 +87,52 @@ cib-versions:
+ 
+ # Dynamically generated top-level CIB schema
+ pacemaker.rng: pacemaker-$(CIB_max).rng
+-	echo "  RNG      $@"
+-	cp $(top_builddir)/xml/$< $@
++	$(AM_V_SCHEMA)cp $(top_builddir)/xml/$< $@
+ 
+ pacemaker-%.rng: $(CIB_files) best-match.sh Makefile.am
+-	echo "  RNG      $@"
+-	echo '<?xml version="1.0" encoding="UTF-8"?>' > $@
+-	echo '<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">' >> $@
+-	echo '  <start>' >> $@
+-	echo '    <element name="cib">' >> $@
+-	$(srcdir)/best-match.sh cib $(*) $(@) "      "
+-	echo '      <element name="configuration">' >> $@
+-	echo '        <interleave>' >> $@
+-	for rng in $(CIB_cfg_base); do $(srcdir)/best-match.sh $$rng $(*) $(@) "          " || :; done
+-	echo '        </interleave>' >> $@
+-	echo '      </element>' >> $@
+-	echo '      <optional>' >> $@
+-	echo '        <element name="status">' >> $@
+-	$(srcdir)/best-match.sh status $(*) $(@) "          "
+-	echo '        </element>' >> $@
+-	echo '      </optional>' >> $@
+-	echo '    </element>' >> $@
+-	echo '  </start>' >> $@
+-	echo '</grammar>' >> $@
++	$(AM_V_at)echo '<?xml version="1.0" encoding="UTF-8"?>' > $@
++	$(AM_V_at)echo '<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">' >> $@
++	$(AM_V_at)echo '  <start>' >> $@
++	$(AM_V_at)echo '    <element name="cib">' >> $@
++	$(AM_V_at)$(srcdir)/best-match.sh cib $(*) $(@) "      "
++	$(AM_V_at)echo '      <element name="configuration">' >> $@
++	$(AM_V_at)echo '        <interleave>' >> $@
++	$(AM_V_at)for rng in $(CIB_cfg_base); do $(srcdir)/best-match.sh $$rng $(*) $(@) "          " || :; done
++	$(AM_V_at)echo '        </interleave>' >> $@
++	$(AM_V_at)echo '      </element>' >> $@
++	$(AM_V_at)echo '      <optional>' >> $@
++	$(AM_V_at)echo '        <element name="status">' >> $@
++	$(AM_V_at)$(srcdir)/best-match.sh status $(*) $(@) "          "
++	$(AM_V_at)echo '        </element>' >> $@
++	$(AM_V_at)echo '      </optional>' >> $@
++	$(AM_V_at)echo '    </element>' >> $@
++	$(AM_V_at)echo '  </start>' >> $@
++	$(AM_V_SCHEMA)echo '</grammar>' >> $@
+ 
+ # Dynamically generated CIB schema listing all pacemaker versions
+ versions.rng: Makefile.am
+-	echo "  RNG      $@"
+-	echo '<?xml version="1.0" encoding="UTF-8"?>' > $@
+-	echo '<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">' >> $@
+-	echo '  <start>' >> $@
+-	echo '   <interleave>' >> $@
+-	echo '    <optional>' >> $@
+-	echo '      <attribute name="validate-with">' >> $@
+-	echo '        <choice>' >> $@
+-	echo '          <value>none</value>' >> $@
+-	echo '          <value>pacemaker-0.6</value>' >> $@
+-	echo '          <value>transitional-0.6</value>' >> $@
+-	echo '          <value>pacemaker-0.7</value>' >> $@
+-	echo '          <value>pacemaker-1.1</value>' >> $@
+-	for rng in $(CIB_versions); do echo "          <value>pacemaker-$$rng</value>" >> $@; done
+-	echo '        </choice>' >> $@
+-	echo '      </attribute>' >> $@
+-	echo '    </optional>' >> $@
+-	echo '    <attribute name="admin_epoch"><data type="nonNegativeInteger"/></attribute>' >> $@
+-	echo '    <attribute name="epoch"><data type="nonNegativeInteger"/></attribute>' >> $@
+-	echo '    <attribute name="num_updates"><data type="nonNegativeInteger"/></attribute>' >> $@
+-	echo '   </interleave>' >> $@
+-	echo '  </start>' >> $@
+-	echo '</grammar>' >> $@
++	$(AM_V_at)echo '<?xml version="1.0" encoding="UTF-8"?>' > $@
++	$(AM_V_at)echo '<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">' >> $@
++	$(AM_V_at)echo '  <start>' >> $@
++	$(AM_V_at)echo '   <interleave>' >> $@
++	$(AM_V_at)echo '    <optional>' >> $@
++	$(AM_V_at)echo '      <attribute name="validate-with">' >> $@
++	$(AM_V_at)echo '        <choice>' >> $@
++	$(AM_V_at)echo '          <value>none</value>' >> $@
++	$(AM_V_at)echo '          <value>pacemaker-0.6</value>' >> $@
++	$(AM_V_at)echo '          <value>transitional-0.6</value>' >> $@
++	$(AM_V_at)echo '          <value>pacemaker-0.7</value>' >> $@
++	$(AM_V_at)echo '          <value>pacemaker-1.1</value>' >> $@
++	$(AM_V_at)for rng in $(CIB_versions); do echo "          <value>pacemaker-$$rng</value>" >> $@; done
++	$(AM_V_at)echo '        </choice>' >> $@
++	$(AM_V_at)echo '      </attribute>' >> $@
++	$(AM_V_at)echo '    </optional>' >> $@
++	$(AM_V_at)echo '    <attribute name="admin_epoch"><data type="nonNegativeInteger"/></attribute>' >> $@
++	$(AM_V_at)echo '    <attribute name="epoch"><data type="nonNegativeInteger"/></attribute>' >> $@
++	$(AM_V_at)echo '    <attribute name="num_updates"><data type="nonNegativeInteger"/></attribute>' >> $@
++	$(AM_V_at)echo '   </interleave>' >> $@
++	$(AM_V_at)echo '  </start>' >> $@
++	$(AM_V_SCHEMA)echo '</grammar>' >> $@
+ 
+ # diff fails with ec=2 if no predecessor is found;
+ # this uses '=' GNU extension to sed, if that's not available,
+-- 
+1.8.3.1
+
+
+From d801c43b35b457170604f2e9d0c16c81cf2c0f98 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 1 Aug 2019 11:09:59 -0500
+Subject: [PATCH 88/96] Build: makefiles: don't echo echo
+
+echo should usually be used as @echo in makefiles; there's no point in seeing:
+
+  echo blah
+  blah
+---
+ GNUmakefile     | 16 ++++++++--------
+ Makefile.am     | 15 ++++++++++++---
+ doc/Makefile.am | 14 +++++++-------
+ 3 files changed, 27 insertions(+), 18 deletions(-)
+
+diff --git a/GNUmakefile b/GNUmakefile
+index d790865..3dd1055 100644
+--- a/GNUmakefile
++++ b/GNUmakefile
+@@ -207,7 +207,7 @@ srpm-%:	export $(PACKAGE)-%.spec
+ 	$(call rpmbuild-with,$(WITH),-bs --define "dist .$*" $(RPM_OPTS),$(PACKAGE).spec)
+ 
+ chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG)
+-	echo "Done"
++	@echo "Done"
+ 
+ mock-next:
+ 	make F=$(shell expr 1 + $(F)) mock
+@@ -216,19 +216,19 @@ mock-rawhide:
+ 	make F=rawhide mock
+ 
+ mock-install-%:
+-	echo "Installing packages"
++	@echo "Installing packages"
+ 	mock --root=$* $(MOCK_OPTIONS) --install $(RPM_ROOT)/mock/*.rpm vi sudo valgrind lcov gdb fence-agents psmisc
+ 
+ mock-install: mock-install-$(MOCK_CFG)
+-	echo "Done"
++	@echo "Done"
+ 
+ mock-sh: mock-sh-$(MOCK_CFG)
+-	echo "Done"
++	@echo "Done"
+ 
+ mock-sh-%:
+-	echo "Connecting"
++	@echo "Connecting"
+ 	mock --root=$* $(MOCK_OPTIONS) --shell
+-	echo "Done"
++	@echo "Done"
+ 
+ # eg. WITH="--with cman" make rpm
+ mock-%:
+@@ -238,10 +238,10 @@ mock-%:
+ 	mock --root=$* --no-cleanup-after --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm
+ 
+ srpm:	srpm-$(DISTRO)
+-	echo "Done"
++	@echo "Done"
+ 
+ mock:   mock-$(MOCK_CFG)
+-	echo "Done"
++	@echo "Done"
+ 
+ rpm-dep: $(PACKAGE)-$(DISTRO).spec
+ 	if [ x != x`which yum-builddep 2>/dev/null` ]; then			\
+diff --git a/Makefile.am b/Makefile.am
+index 3080445..b47f488 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -63,15 +63,24 @@ scratch.c:
+ 
+ core:
+ 	@echo "Building only core components: $(CORE)"
+-	list='$(CORE)'; for subdir in $$list; do echo "Building $$subdir"; $(MAKE) -C $$subdir all || exit 1; done
++	@for subdir in $(CORE); do \
++		echo "Building $$subdir"; \
++		$(MAKE) -C $$subdir all || exit 1; \
++	done
+ 
+ core-install:
+ 	@echo "Installing only core components: $(CORE)"
+-	list='$(CORE)'; for subdir in $$list; do echo "Installing $$subdir"; $(MAKE) -C $$subdir install || exit 1; done
++	@for subdir in $(CORE); do \
++	    echo "Installing $$subdir"; \
++	    $(MAKE) -C $$subdir install || exit 1; \
++	done
+ 
+ core-clean:
+ 	@echo "Cleaning only core components: $(CORE)"
+-	list='$(CORE)'; for subdir in $$list; do echo "Cleaning $$subdir"; $(MAKE) -C $$subdir clean || exit 1; done
++	@for subdir in $(CORE); do \
++		echo "Cleaning $$subdir"; \
++		$(MAKE) -C $$subdir clean || exit 1; \
++	done
+ 
+ install-exec-local:
+ 	$(INSTALL) -d $(DESTDIR)/$(LCRSODIR)
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index a01423d..8389054 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -262,14 +262,14 @@ endif
+ 
+ # Update the translation template
+ pot:
+-	for book in $(docbook); do 				      \
++	@for book in $(docbook); do 				      \
+ 		echo "Updating translation templates in: $$book";     \
+ 		( cd $$book && RPM_BUILD_DIR="" $(PUBLICAN) update_pot ); \
+ 	done
+ 
+ # Update the actual translations
+ po: pot
+-	for book in $(docbook); do 				      \
++	@for book in $(docbook); do 				      \
+ 		echo "Updating translations in: $$book";     \
+ 		( cd $$book && RPM_BUILD_DIR="" $(PUBLICAN) update_po --langs=all );\
+ 	done
+@@ -300,7 +300,7 @@ brand-build: $(BRAND_DEPS)
+ 	cd publican-clusterlabs && publican build --formats=xml --langs=all --publish
+ 
+ brand: brand-build
+-	echo "Installing..."
++	@echo "Installing branded content..."
+ 	cd publican-clusterlabs && sudo publican install_brand --path=$(datadir)/publican/Common_Content
+ 
+ brand-rpm-clean:
+@@ -315,14 +315,14 @@ brand-rpm-install: brand-rpm-build
+ pdf:
+ 	make DOCBOOK_FORMATS="pdf" all-local
+ 
+-www: clean-local $(generated_docs) $(ascii)
++www: clean-local $(doc_DATA)
+ 	for book in $(docbook); do 							\
+ 		sed -i.sed 's@^brand:.*@brand: clusterlabs@' $$book/publican.cfg;	\
+ 	done
+-	make DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="$(UPLOAD_LANGS)" all-local
+-	echo Uploading current $(PACKAGE_SERIES) documentation set to clusterlabs.org
++	$(MAKE) DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="$(UPLOAD_LANGS)" all-local
++	@echo Uploading current $(PACKAGE_SERIES) documentation set to clusterlabs.org
+ if BUILD_DOCBOOK
+-	for book in $(docbook); do 									\
++	@for book in $(docbook); do 									\
+ 		echo Uploading $$book...;								\
+ 		echo "Generated on `date` from version: $(shell git log --pretty="format:%h %d" -n 1)" >> $$book/publish/build-$(PACKAGE_SERIES).txt;	\
+ 		rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/$(PACKAGE)/doc/";			\
+-- 
+1.8.3.1
+
+
+From 1ac3f3ec7702e5aaff3c77e75da817370602eab7 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 1 Aug 2019 11:11:25 -0500
+Subject: [PATCH 89/96] Build: doc: reorganize and comment makefile
+
+for simplicity and readability
+---
+ doc/Makefile.am | 208 +++++++++++++++++++++++++-------------------------------
+ 1 file changed, 94 insertions(+), 114 deletions(-)
+
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index 8389054..b1c9e06 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -19,14 +19,29 @@
+ #
+ include $(top_srcdir)/Makefile.common
+ 
+-helpdir		= $(datadir)/$(PACKAGE)
++# Deprecated plaintext documents (also dynamically converted to HTML)
++ascii		= acls.txt			\
++		  crm_fencing.txt
++generated_docs	=
++if BUILD_ASCIIDOC
++generated_docs	+= $(ascii:%.txt=%.html)
++endif
++
++# Current Publican/docbook-based documentation
++docbook		= Clusters_from_Scratch		\
++		  Pacemaker_Development		\
++		  Pacemaker_Explained		\
++		  Pacemaker_Remote
++docbook_build = $(docbook:%=%.build)
++
++doc_DATA		= $(ascii) $(generated_docs)
+ 
+-ascii		= crm_fencing.txt acls.txt
+-docbook		=	Clusters_from_Scratch		\
+-			Pacemaker_Development		\
+-			Pacemaker_Explained		\
+-			Pacemaker_Remote
+-doc_DATA	= $(ascii) $(generated_docs)
++EXTRA_DIST	= $(ascii) $(SHARED_TXT) $(PNGS_ORIGINAL) $(DOTS) $(SVGS)
++EXTRA_DIST	+= $(CFS_TXT) $(CFS_XML_ONLY)
++EXTRA_DIST	+= $(PD_TXT) $(PD_XML_ONLY)
++EXTRA_DIST	+= $(PE_TXT) $(PE_XML_ONLY)
++EXTRA_DIST	+= $(PR_TXT) $(PR_XML_ONLY)
++EXTRA_DIST	+= pcs-crmsh-quick-ref.md
+ 
+ # toplevel rsync destination for www targets (without trailing slash)
+ RSYNC_DEST      ?= root@www.clusterlabs.org:/var/www/html
+@@ -35,21 +50,13 @@ RSYNC_DEST      ?= root@www.clusterlabs.org:/var/www/html
+ # don't cross filesystems, sparse, show progress
+ RSYNC_OPTS      = -rlptvzxS --progress
+ 
+-publican_docs   =
+-generated_docs	=
+-generated_mans	=
+-
+-
+-# What formats to build: pdf,html,html-single,html-desktop,epub
++# What formats to build by default: pdf,html,html-single,html-desktop,epub
+ DOCBOOK_FORMATS := html-desktop
+ 
+-# What languages to build
++# What languages to build and upload to website by default
++# (currently only en-US because translations are out of date)
+ DOCBOOK_LANGS   := en-US
+ 
+-# What languages to build for uploading to website
+-# (currently only en-US because translations aren't up-to-date)
+-UPLOAD_LANGS    = en-US
+-
+ # @TODO We could simplify this (and .gitignore) by establishing a convention
+ # that original image source begins with an uppercase letter and generated
+ # files with lowercase.
+@@ -65,7 +72,7 @@ SVGS =	$(wildcard Clusters_from_Scratch/en-US/images/pcmk-*.svg)	\
+ 	$(wildcard Pacemaker_Explained/en-US/images/pcmk-*.svg)	\
+ 	$(DOTS:%.dot=%.svg)
+ 
+-# Final images
++# Final images (some originally in PNG, others generated from SVG)
+ PNGS_ORIGINAL =	Pacemaker_Remote/en-US/images/pcmk-ha-cluster-stack.png	\
+ 		Pacemaker_Remote/en-US/images/pcmk-ha-remote-stack.png	\
+ 		Clusters_from_Scratch/en-US/images/Console.png		\
+@@ -94,22 +101,6 @@ graphics: $(PNGS)
+ %-large.png: %.svg
+ 	$(AM_V_GEN)$(INKSCAPE) --file=$< --export-dpi=180 -C --export-png=$@ $(PCMK_quiet)
+ 
+-if BUILD_ASCIIDOC
+-generated_docs	+= $(ascii:%.txt=%.html)
+-
+-if BUILD_DOCBOOK
+-publican_docs	+= $(docbook)
+-endif
+-endif
+-
+-EXTRA_DIST	= $(ascii) $(SHARED_TXT) $(PNGS_ORIGINAL) $(DOTS) $(SVGS)
+-EXTRA_DIST	+= $(CFS_TXT) $(CFS_XML_ONLY)
+-EXTRA_DIST	+= $(PA_TXT) $(PA_XML_ONLY)
+-EXTRA_DIST	+= $(PD_TXT) $(PD_XML_ONLY)
+-EXTRA_DIST	+= $(PE_TXT) $(PE_XML_ONLY)
+-EXTRA_DIST	+= $(PR_TXT) $(PR_XML_ONLY)
+-EXTRA_DIST	+= pcs-crmsh-quick-ref.md
+-
+ if IS_ASCIIDOC
+ ASCIIDOC_HTML_ARGS	= --unsafe --backend=xhtml11
+ else
+@@ -147,119 +138,107 @@ COMMON_XML = Author_Group.xml Book_Info.xml Revision_History.xml
+ SHARED_TXT=$(wildcard shared/en-US/*.txt)
+ SHARED_XML=$(SHARED_TXT:%.txt=%.xml)
+ 
++if PUBLICAN_INTREE_BRAND
++PUBLICAN_INTREE_DEPS	= publican-catalog
++PUBLICAN_INTREE_ENV	= XML_CATALOG_FILES="$(CURDIR)/publican-catalog"
++PUBLICAN_INTREE_OPT	= --brand_dir=../publican-clusterlabs 
++else
++PUBLICAN_INTREE_DEPS	=
++PUBLICAN_INTREE_ENV	=
++PUBLICAN_INTREE_OPT	=
++endif
+ 
+-CFS_SHARED_TXT=$(addprefix shared/en-US/,pacemaker-intro.txt)
+-CFS_SHARED_XML=$(CFS_SHARED_TXT:%.txt=%.xml)
+-CFS_TXT=$(wildcard Clusters_from_Scratch/en-US/*.txt)
+-CFS_XML=$(CFS_TXT:%.txt=%.xml)
+-CFS_XML_ONLY=$(addprefix Clusters_from_Scratch/en-US/,$(COMMON_XML) \
+-  Clusters_from_Scratch.ent Clusters_from_Scratch.xml Preface.xml)
+ 
+-$(CFS_XML): $(CFS_SHARED_XML)
++# Clusters From Scratch
+ 
+-PUBLICAN_INTREE_DEPS =
+-if PUBLICAN_INTREE_BRAND
+-PUBLICAN_INTREE_DEPS += publican-catalog
+-endif
++CFS_SHARED_TXT	= $(addprefix shared/en-US/,pacemaker-intro.txt)
++CFS_SHARED_XML	= $(CFS_SHARED_TXT:%.txt=%.xml)
++CFS_TXT		= $(wildcard Clusters_from_Scratch/en-US/*.txt)
++CFS_XML_GEN	= $(CFS_TXT:%.txt=%.xml)
++CFS_XML_ONLY	= $(addprefix Clusters_from_Scratch/en-US/,$(COMMON_XML) \
++		  Clusters_from_Scratch.ent \
++		  Clusters_from_Scratch.xml \
++		  Preface.xml)
++CFS_DEPS	= $(PNGS) $(CFS_SHARED_XML) $(CFS_XML_ONLY) $(CFS_XML_GEN)
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Clusters_from_Scratch.build: $(PNGS) $(CFS_XML_ONLY) $(CFS_XML) $(CFS_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
++Clusters_from_Scratch.build: $(CFS_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+-if PUBLICAN_INTREE_BRAND
+-	$(AM_V_PUB)cd $(@:%.build=%) \
+-	&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
+-	   $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \
+-	   $(PCMK_quiet)
+-else
+-	$(AM_V_PUB)cd $(@:%.build=%) \
+-	&& RPM_BUILD_DIR="" \
+-	   $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \
+-	   $(PCMK_quiet)
+-endif
++	$(AM_V_PUB)cd $(@:%.build=%) && RPM_BUILD_DIR="" $(PUBLICAN_INTREE_ENV)	\
++	  $(PUBLICAN) build --publish --langs="$(DOCBOOK_LANGS)"		\
++	    --formats="$(DOCBOOK_FORMATS)" $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
+ 	rm -rf $(@:%.build=%)/tmp
+ 	touch $@
+ 
+ 
+-PD_TXT=$(wildcard Pacemaker_Development/en-US/*.txt)
+-PD_XML=$(PD_TXT:%.txt=%.xml)
+-PD_XML_ONLY=$(addprefix Pacemaker_Development/en-US/,$(COMMON_XML) \
+-  Pacemaker_Development.ent Pacemaker_Development.xml)
++# Pacemaker Development
++
++PD_TXT		= $(wildcard Pacemaker_Development/en-US/*.txt)
++PD_XML_GEN	= $(PD_TXT:%.txt=%.xml)
++PD_XML_ONLY	= $(addprefix Pacemaker_Development/en-US/,$(COMMON_XML) \
++		  Pacemaker_Development.ent \
++		  Pacemaker_Development.xml)
++PD_DEPS		= $(PD_XML_ONLY) $(PD_XML_GEN)
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Pacemaker_Development.build: $(PD_XML_ONLY) $(PD_XML) $(PUBLICAN_INTREE_DEPS)
++Pacemaker_Development.build: $(PD_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+-if PUBLICAN_INTREE_BRAND
+-	$(AM_V_PUB)cd $(@:%.build=%) \
+-	&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
+-	   $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \
+-	   $(PCMK_quiet)
+-else
+-	$(AM_V_PUB)cd $(@:%.build=%) \
+-	&& RPM_BUILD_DIR="" \
+-	   $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \
+-	   $(PCMK_quiet)
+-endif
++	$(AM_V_PUB)cd $(@:%.build=%) && RPM_BUILD_DIR="" $(PUBLICAN_INTREE_ENV)	\
++	  $(PUBLICAN) build --publish --langs="$(DOCBOOK_LANGS)"		\
++	    --formats="$(DOCBOOK_FORMATS)" $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
+ 	rm -rf $(@:%.build=%)/tmp
+ 	touch $@
+ 
+ 
+-PE_SHARED_TXT=$(addprefix shared/en-US/,pacemaker-intro.txt)
+-PE_SHARED_XML=$(PE_SHARED_TXT:%.txt=%.xml)
+-PE_TXT=$(wildcard Pacemaker_Explained/en-US/*.txt)
+-PE_XML=$(PE_TXT:%.txt=%.xml)
+-PE_XML_ONLY=$(addprefix Pacemaker_Explained/en-US/,$(COMMON_XML) \
+-  Pacemaker_Explained.ent Pacemaker_Explained.xml Preface.xml)
++# Pacemaker Explained
+ 
+-$(PE_XML): $(PE_SHARED_XML)
++PE_SHARED_TXT	= $(addprefix shared/en-US/,pacemaker-intro.txt)
++PE_SHARED_XML	= $(PE_SHARED_TXT:%.txt=%.xml)
++PE_TXT		= $(wildcard Pacemaker_Explained/en-US/*.txt)
++PE_XML_GEN	= $(PE_TXT:%.txt=%.xml)
++PE_XML_ONLY	= $(addprefix Pacemaker_Explained/en-US/,$(COMMON_XML) \
++		  Pacemaker_Explained.ent \
++		  Pacemaker_Explained.xml \
++		  Preface.xml)
++PE_DEPS		= $(PNGS) $(PE_SHARED_XML) $(PE_XML_ONLY) $(PE_XML_GEN)
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Pacemaker_Explained.build: $(PNGS) $(PE_XML_ONLY) $(PE_XML) $(PE_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
++Pacemaker_Explained.build: $(PE_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+-if PUBLICAN_INTREE_BRAND
+-	$(AM_V_PUB)cd $(@:%.build=%) \
+-	&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
+-	   $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \
+-	   $(PCMK_quiet)
+-else
+-	$(AM_V_PUB)cd $(@:%.build=%) \
+-	&& RPM_BUILD_DIR="" \
+-	   $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \
+-	   $(PCMK_quiet)
+-endif
++	$(AM_V_PUB)cd $(@:%.build=%) && RPM_BUILD_DIR="" $(PUBLICAN_INTREE_ENV)	\
++	  $(PUBLICAN) build --publish --langs="$(DOCBOOK_LANGS)"		\
++	    --formats="$(DOCBOOK_FORMATS)" $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
+ 	rm -rf $(@:%.build=%)/tmp
+ 	touch $@
+ 
+ 
+-PR_TXT=$(wildcard Pacemaker_Remote/en-US/*.txt)
+-PR_XML=$(PR_TXT:%.txt=%.xml)
+-PR_XML_ONLY=$(addprefix Pacemaker_Remote/en-US/,$(COMMON_XML) \
+-  Pacemaker_Remote.ent Pacemaker_Remote.xml)
++# Pacemaker Remote
++
++PR_TXT		= $(wildcard Pacemaker_Remote/en-US/*.txt)
++PR_XML_GEN	= $(PR_TXT:%.txt=%.xml)
++PR_XML_ONLY	= $(addprefix Pacemaker_Remote/en-US/,$(COMMON_XML) \
++		  Pacemaker_Remote.ent \
++		  Pacemaker_Remote.xml)
++PR_DEPS		= $(PR_XML_ONLY) $(PR_XML_GEN)
+ 
+ # We have to hardcode the book name
+ # With '%' the test for 'newness' fails
+-Pacemaker_Remote.build: $(PNGS) $(PR_XML_ONLY) $(PR_XML) $(PUBLICAN_INTREE_DEPS)
++Pacemaker_Remote.build: $(PNGS) $(PR_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+-if PUBLICAN_INTREE_BRAND
+-	$(AM_V_PUB)cd $(@:%.build=%) \
+-	&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
+-	   $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \
+-	   $(PCMK_quiet)
+-else
+-	$(AM_V_PUB)cd $(@:%.build=%) \
+-	&& RPM_BUILD_DIR="" \
+-	   $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \
+-	   $(PCMK_quiet)
+-endif
++	$(AM_V_PUB)cd $(@:%.build=%) && RPM_BUILD_DIR="" $(PUBLICAN_INTREE_ENV)	\
++	  $(PUBLICAN) build --publish --langs="$(DOCBOOK_LANGS)"		\
++	    --formats="$(DOCBOOK_FORMATS)" $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
+ 	rm -rf $(@:%.build=%)/tmp
+ 	touch $@
+ 
++
+ # Update the translation template
+ pot:
+ 	@for book in $(docbook); do 				      \
+@@ -275,8 +254,6 @@ po: pot
+ 	done
+ 
+ if BUILD_DOCBOOK
+-docbook_build = $(docbook:%=%.build)
+-
+ all-local: $(docbook_build) */publican.cfg
+ 
+ install-data-local: all-local
+@@ -316,10 +293,12 @@ pdf:
+ 	make DOCBOOK_FORMATS="pdf" all-local
+ 
+ www: clean-local $(doc_DATA)
++if BUILD_DOCBOOK
+ 	for book in $(docbook); do 							\
+ 		sed -i.sed 's@^brand:.*@brand: clusterlabs@' $$book/publican.cfg;	\
+ 	done
+-	$(MAKE) DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="$(UPLOAD_LANGS)" all-local
++endif
++	$(MAKE) DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="$(DOCBOOK_LANGS)" all-local
+ 	@echo Uploading current $(PACKAGE_SERIES) documentation set to clusterlabs.org
+ if BUILD_DOCBOOK
+ 	@for book in $(docbook); do 									\
+@@ -328,11 +307,12 @@ if BUILD_DOCBOOK
+ 		rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/$(PACKAGE)/doc/";			\
+ 	done
+ endif
+-	rsync $(RSYNC_OPTS) $(generated_docs) $(ascii) "$(RSYNC_DEST)/$(PACKAGE)/doc/"
++	rsync $(RSYNC_OPTS) $(doc_DATA) "$(RSYNC_DEST)/$(PACKAGE)/doc/"
++
+ 
+ clean-local:
+ 	-rm -f $(PNGS_GENERATED)
+ 	-rm -rf $(generated_docs) $(generated_mans) $(docbook_build)
+-	-rm -rf $(SHARED_XML) $(CFS_XML) $(PE_XML) $(PR_XML)
++	-rm -rf $(SHARED_XML) $(CFS_XML_GEN) $(PD_XML_GEN) $(PE_XML_GEN) $(PR_XML_GEN)
+ 	-rm -rf  publican-catalog-fallback publican-catalog
+ 	for book in $(docbook); do rm -rf $$book/tmp $$book/publish; done
+-- 
+1.8.3.1
+
+
+From 00c927f263760bc3163f45fabc2744842f128d3f Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 1 Aug 2019 14:29:41 -0500
+Subject: [PATCH 90/96] Build: doc: properly clean all generated files
+
+---
+ doc/Makefile.am | 25 +++++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index b1c9e06..dfc6732 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -281,7 +281,7 @@ brand: brand-build
+ 	cd publican-clusterlabs && sudo publican install_brand --path=$(datadir)/publican/Common_Content
+ 
+ brand-rpm-clean:
+-	find publican-clusterlabs -name "*.noarch.rpm" -exec rm -f \{\} \;
++	-find publican-clusterlabs -name "*.noarch.rpm" -exec rm -f \{\} \;
+ 
+ brand-rpm-build: brand-rpm-clean brand-build
+ 	cd publican-clusterlabs && $(PUBLICAN) package --binary
+@@ -309,10 +309,19 @@ if BUILD_DOCBOOK
+ endif
+ 	rsync $(RSYNC_OPTS) $(doc_DATA) "$(RSYNC_DEST)/$(PACKAGE)/doc/"
+ 
+-
+-clean-local:
+-	-rm -f $(PNGS_GENERATED)
+-	-rm -rf $(generated_docs) $(generated_mans) $(docbook_build)
+-	-rm -rf $(SHARED_XML) $(CFS_XML_GEN) $(PD_XML_GEN) $(PE_XML_GEN) $(PR_XML_GEN)
+-	-rm -rf  publican-catalog-fallback publican-catalog
+-	for book in $(docbook); do rm -rf $$book/tmp $$book/publish; done
++ALL_GEN	= $(generated_docs)		\
++	  $(docbook_build)		\
++	  $(PNGS_GENERATED)		\
++	  $(SHARED_XML)			\
++	  $(CFS_XML_GEN)		\
++	  $(PD_XML_GEN)			\
++	  $(PE_XML_GEN)			\
++	  $(PR_XML_GEN)			\
++	  publican-catalog-fallback	\
++	  publican-catalog
++
++clean-local: brand-rpm-clean
++	-rm -f $(ALL_GEN)
++	-for book in $(docbook); do			\
++		rm -rf $$book/tmp $$book/publish;	\
++	done
+-- 
+1.8.3.1
+
+
+From b022f4fe58424fe9149aebeef45f85c2abd6fcf9 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 1 Aug 2019 15:56:20 -0500
+Subject: [PATCH 91/96] Build: clean up Makefile.common
+
+Reorganize and comment for readability
+---
+ Makefile.common   | 43 ++++++++++++++++++++++++++++++++++++++-----
+ tools/Makefile.am |  5 ++++-
+ 2 files changed, 42 insertions(+), 6 deletions(-)
+
+diff --git a/Makefile.common b/Makefile.common
+index 386d59d..f7661c9 100644
+--- a/Makefile.common
++++ b/Makefile.common
+@@ -42,22 +42,57 @@ MAINTAINERCLEANFILES	= Makefile.in
+ AM_CPPFLAGS		= -I$(top_builddir)/include -I$(top_srcdir)/include   \
+ 			  -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl
+ 
++#
++# Man page builders
++#
++# We have three types of man pages:
++# - man pages for the tools
++# - man pages for OCF agents
++# - man pages for cluster properties used by daemons
++#
++# "BUILD_HELP" actually means "help2man is available", so it only controls the
++# tool man pages, which are generated by help2man. The other man pages are
++# generated via XSL transforms.
++#
++
+ if BUILD_HELP
+ man8_MANS		= $(sbin_PROGRAMS:%=%.8) $(sbin_SCRIPTS:%=%.8) $(dist_sbin_SCRIPTS:%=%.8)
+-endif
+ 
++HELP2MAN_ARGS = -N --section 8 --name "Part of the Pacemaker cluster resource manager"
++
++# Some of our tools' help are just shell script invocations of another tool's
++# help. Putting the real tool in MAN8DEPS helps detect when the wrapped help
++# needs updating.
++#
++# If a ".inc" file exists, the tool has been converted to use glib for
++# argument parsing, otherwise it still uses the libcrmcommon functions.
++#
++# @TODO Drop MAN8DEPS once we've converted all tools to libpacemaker API calls
++#       and all wrappers to C code.
+ %.8:	% $(MAN8DEPS)
+ 	$(AM_V_at)chmod a+x $(abs_builddir)/$<
+-	$(AM_V_MAN)PATH=$(abs_builddir):$$PATH $(HELP2MAN) --output $@ --no-info --section 8 --name "Part of the Pacemaker cluster resource manager" $(abs_builddir)/$<
++	$(AM_V_MAN)PATH=$(abs_builddir):$$PATH $(HELP2MAN) --output $@		\
++		$(HELP2MAN_ARGS) $(abs_builddir)/$<
++endif
+ 
++# Save raw XML meta-data from daemon executables, for later conversion into man
++# pages. (Note that more specific rules may override this for creating other
++# types of XML files.)
+ %.xml:  %
+ 	$(AM_V_at)$(abs_builddir)/$< metadata > $@
+ 
++# Process the raw daemon and OCF agent meta-data output using our
++# meta-data-to-docbook-XML tranform.
+ %.dbook: %.xml
+-	$(AM_V_at)$(XSLTPROC) --nonet --novalid --stringparam man.name $* $(DBOOK_OPTS) $(top_srcdir)/xml/ocf-meta2man.xsl $(abs_builddir)/$< > $(abs_builddir)/$@
++	$(AM_V_at)$(XSLTPROC) --nonet --novalid --stringparam man.name $* \
++		$(DBOOK_OPTS) $(top_srcdir)/xml/ocf-meta2man.xsl \
++		$(abs_builddir)/$< > $(abs_builddir)/$@
+ 
++# Generate the actual man page for an OCF resource agent from the intermediate
++# docbook XML.
+ %.7:	%.dbook
+ 	$(AM_V_MAN)$(XSLTPROC) $(MANPAGE_XSLT) $(abs_builddir)/$< $(PCMK_quiet)
++
+ # 
+ # Build docbook from asciidoc because XML is a PITA to edit
+ #
+@@ -86,5 +121,3 @@ endif
+ 	  $@-t  # We just want the appendix tag (asciidoctor adds non-empty book-level title)
+ 	$(AM_V_at)sed -i 's/book>/chapter>/g' $@-t   # Rename to chapter (won't trigger if previous sed did)
+ 	$(AM_V_GEN)mv $@-t $@
+-
+-#	echo Rebuilt $@ from $<
+diff --git a/tools/Makefile.am b/tools/Makefile.am
+index 6960548..e403849 100644
+--- a/tools/Makefile.am
++++ b/tools/Makefile.am
+@@ -63,7 +63,10 @@ endif
+ 
+ ## SOURCES
+ 
+-MAN8DEPS		= crm_attribute crm_node
++# A few tools are just thin wrappers around crm_attribute.
++# This makes their help get updated when crm_attribute changes
++# (see Makefile.common).
++MAN8DEPS		= crm_attribute
+ 
+ crmadmin_SOURCES	= crmadmin.c
+ crmadmin_LDADD		= $(top_builddir)/lib/pengine/libpe_status.la \
+-- 
+1.8.3.1
+
+
+From 49e892641dce5870407dcd3bd5322e04893461da Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 1 Aug 2019 16:50:21 -0500
+Subject: [PATCH 92/96] Build: doc: move text-to-DocBook rule from common to
+ doc
+
+... since it's only used there, and was a bit confusing next to the
+meta-data-to-DocBook rule in common.
+---
+ Makefile.common | 29 -----------------------------
+ doc/Makefile.am | 26 ++++++++++++++++++++++++++
+ 2 files changed, 26 insertions(+), 29 deletions(-)
+
+diff --git a/Makefile.common b/Makefile.common
+index f7661c9..2731922 100644
+--- a/Makefile.common
++++ b/Makefile.common
+@@ -92,32 +92,3 @@ endif
+ # docbook XML.
+ %.7:	%.dbook
+ 	$(AM_V_MAN)$(XSLTPROC) $(MANPAGE_XSLT) $(abs_builddir)/$< $(PCMK_quiet)
+-
+-# 
+-# Build docbook from asciidoc because XML is a PITA to edit
+-#
+-# Build each chapter as a book (since the numbering isn't right for
+-# articles and only books can have appendices) and then strip out the
+-# bits we don't want/need
+-#
+-# XXX Sequence of tr/sed commands should be replaced with a single XSLT
+-#
+-%.xml:  %.txt
+-if IS_ASCIIDOC
+-	$(AM_V_GEN)$(ASCIIDOC_CONV) -b docbook -d book -o $@-tt $<
+-else
+-	$(AM_V_GEN)$(ASCIIDOC_CONV) -b docbook45 -d book -o $@-tt $<
+-endif
+-	$(AM_V_at)tr -d '\036\r' <$@-tt >$@-t; rm -f $@-tt  # Fix line endings
+-	$(AM_V_at)sed -i 's/\ lang="en"//' $@-t      # Never specify a language in the chapters
+-	$(AM_V_at)sed -i 's/simpara/para/g' $@-t     # publican doesn't correctly render footnotes with simpara
+-	$(AM_V_at)sed -i 's/.*<date>.*//g' $@-t      # Remove dangling tag
+-	$(AM_V_at)sed -i 's/.*preface>//g' $@-t      # Remove preface elements
+-	$(AM_V_at)sed -i 's:<title></title>::g' $@-t # Remove empty title
+-	$(AM_V_at)sed -i 's/chapter/section/g' $@-t  # Chapters become sections, so that books can become chapters
+-	$(AM_V_at)sed -i 's/<.*bookinfo.*>//g' $@-t  # Strip out bookinfo, we don't need it
+-	$(AM_V_at)! grep -q "<appendix" $@-t || sed -i \
+-	  's/.*book>//;tb;bf;:b;N;s/.*<title>.*<\/title>.*//;tb;/<appendix/{:i;n;/<\/appendix/{p;d};bi};bb;:f;p;d' \
+-	  $@-t  # We just want the appendix tag (asciidoctor adds non-empty book-level title)
+-	$(AM_V_at)sed -i 's/book>/chapter>/g' $@-t   # Rename to chapter (won't trigger if previous sed did)
+-	$(AM_V_GEN)mv $@-t $@
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index dfc6732..5ff350c 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -103,13 +103,39 @@ graphics: $(PNGS)
+ 
+ if IS_ASCIIDOC
+ ASCIIDOC_HTML_ARGS	= --unsafe --backend=xhtml11
++ASCIIDOC_DBOOK_ARGS	= -b docbook -d book
+ else
+ ASCIIDOC_HTML_ARGS	= --backend=html5
++ASCIIDOC_DBOOK_ARGS	= -b docbook45 -d book
+ endif
+ 
+ %.html: %.txt
+ 	$(AM_V_GEN)$(ASCIIDOC_CONV) $(ASCIIDOC_HTML_ARGS) --out-file=$@ $< $(PCMK_quiet)
+ 
++# 
++# Generate DocBook XML from asciidoc text.
++#
++# Build each chapter as a book (since the numbering isn't right for
++# articles and only books can have appendices) and then strip out the
++# bits we don't want or need.
++#
++# XXX Sequence of tr/sed commands should be replaced with a single XSLT
++#
++%.xml:  %.txt
++	$(AM_V_at)$(ASCIIDOC_CONV) $(ASCIIDOC_DBOOK_ARGS) -o - $< | tr -d '\036\r' >$@-t # Convert, fix line endings
++	$(AM_V_at)sed -i 's/\ lang="en"//' $@-t      # Never specify a language in the chapters
++	$(AM_V_at)sed -i 's/simpara/para/g' $@-t     # publican doesn't correctly render footnotes with simpara
++	$(AM_V_at)sed -i 's/.*<date>.*//g' $@-t      # Remove dangling tag
++	$(AM_V_at)sed -i 's/.*preface>//g' $@-t      # Remove preface elements
++	$(AM_V_at)sed -i 's:<title></title>::g' $@-t # Remove empty title
++	$(AM_V_at)sed -i 's/chapter/section/g' $@-t  # Chapters become sections, so that books can become chapters
++	$(AM_V_at)sed -i 's/<.*bookinfo.*>//g' $@-t  # Strip out bookinfo, we don't need it
++	$(AM_V_at)! grep -q "<appendix" $@-t || sed -i \
++	  's/.*book>//;tb;bf;:b;N;s/.*<title>.*<\/title>.*//;tb;/<appendix/{:i;n;/<\/appendix/{p;d};bi};bb;:f;p;d' \
++	  $@-t  # We just want the appendix tag (asciidoctor adds non-empty book-level title)
++	$(AM_V_at)sed -i 's/book>/chapter>/g' $@-t   # Rename to chapter (won't trigger if previous sed did)
++	$(AM_V_GEN)mv $@-t $@
++
+ # publican-clusterlabs/xsl/{html,html-single,pdf}.xsl refer to URIs
+ # requiring Internet access, hence we shadow that with a XML catalog-based
+ # redirect to local files brought with Publican installation;
+-- 
+1.8.3.1
+
+
+From 2aa5764c84c7fac7c06ccebccee7295872ec8b40 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 1 Aug 2019 17:47:53 -0500
+Subject: [PATCH 93/96] Build: doc: skip publican documentation with "make
+ distcheck"
+
+I got publican partly working with VPATH builds, but ran into an issue
+that wasn't worth spending more time on.
+---
+ Makefile.am     |  9 +++++++++
+ doc/Makefile.am | 44 ++++++++++++++++++++++++++------------------
+ 2 files changed, 35 insertions(+), 18 deletions(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index b47f488..5db35c2 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -31,6 +31,15 @@ EXTRA_DIST		= CONTRIBUTING.md	\
+ MAINTAINERCLEANFILES    = Makefile.in aclocal.m4 configure DRF/config-h.in \
+                         DRF/stamp-h.in libtool.m4 ltdl.m4
+ 
++# Disable building Publican documentation when doing "make distcheck", because
++# some of our book sources are in the source directory, while others are
++# dynamically generated in the build directory, and publican can't handle that.
++#
++# @TODO To support VPATH builds for Publican, we'd probably have to create
++# a separate subtree of the build directory to use as Publican's source
++# directory, and copy the static sources into it.
++AM_DISTCHECK_CONFIGURE_FLAGS	= --with-brand=""
++
+ CORE	= replace include lib mcp attrd pengine cib crmd fencing lrmd tools xml
+ SUBDIRS	= $(CORE) extra doc
+ 
+diff --git a/doc/Makefile.am b/doc/Makefile.am
+index 5ff350c..3d4be7f 100644
+--- a/doc/Makefile.am
++++ b/doc/Makefile.am
+@@ -122,6 +122,7 @@ endif
+ # XXX Sequence of tr/sed commands should be replaced with a single XSLT
+ #
+ %.xml:  %.txt
++	$(AM_V_at)$(MKDIR_P) $(shell dirname $@)     # might not exist in VPATH build
+ 	$(AM_V_at)$(ASCIIDOC_CONV) $(ASCIIDOC_DBOOK_ARGS) -o - $< | tr -d '\036\r' >$@-t # Convert, fix line endings
+ 	$(AM_V_at)sed -i 's/\ lang="en"//' $@-t      # Never specify a language in the chapters
+ 	$(AM_V_at)sed -i 's/simpara/para/g' $@-t     # publican doesn't correctly render footnotes with simpara
+@@ -167,7 +168,7 @@ SHARED_XML=$(SHARED_TXT:%.txt=%.xml)
+ if PUBLICAN_INTREE_BRAND
+ PUBLICAN_INTREE_DEPS	= publican-catalog
+ PUBLICAN_INTREE_ENV	= XML_CATALOG_FILES="$(CURDIR)/publican-catalog"
+-PUBLICAN_INTREE_OPT	= --brand_dir=../publican-clusterlabs 
++PUBLICAN_INTREE_OPT	= --brand_dir="$(top_srcdir)/publican-clusterlabs"
+ else
+ PUBLICAN_INTREE_DEPS	=
+ PUBLICAN_INTREE_ENV	=
+@@ -181,7 +182,7 @@ CFS_SHARED_TXT	= $(addprefix shared/en-US/,pacemaker-intro.txt)
+ CFS_SHARED_XML	= $(CFS_SHARED_TXT:%.txt=%.xml)
+ CFS_TXT		= $(wildcard Clusters_from_Scratch/en-US/*.txt)
+ CFS_XML_GEN	= $(CFS_TXT:%.txt=%.xml)
+-CFS_XML_ONLY	= $(addprefix Clusters_from_Scratch/en-US/,$(COMMON_XML) \
++CFS_XML_ONLY	= $(addprefix $(srcdir)/Clusters_from_Scratch/en-US/,$(COMMON_XML) \
+ 		  Clusters_from_Scratch.ent \
+ 		  Clusters_from_Scratch.xml \
+ 		  Preface.xml)
+@@ -193,8 +194,9 @@ Clusters_from_Scratch.build: $(CFS_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ 	$(AM_V_PUB)cd $(@:%.build=%) && RPM_BUILD_DIR="" $(PUBLICAN_INTREE_ENV)	\
+-	  $(PUBLICAN) build --publish --langs="$(DOCBOOK_LANGS)"		\
+-	    --formats="$(DOCBOOK_FORMATS)" $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
++	  $(PUBLICAN) build --src_dir="$(srcdir)" --publish 			\
++	    --langs="$(DOCBOOK_LANGS)" --formats="$(DOCBOOK_FORMATS)"		\
++	    $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
+ 	rm -rf $(@:%.build=%)/tmp
+ 	touch $@
+ 
+@@ -203,7 +205,7 @@ Clusters_from_Scratch.build: $(CFS_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 
+ PD_TXT		= $(wildcard Pacemaker_Development/en-US/*.txt)
+ PD_XML_GEN	= $(PD_TXT:%.txt=%.xml)
+-PD_XML_ONLY	= $(addprefix Pacemaker_Development/en-US/,$(COMMON_XML) \
++PD_XML_ONLY	= $(addprefix $(srcdir)/Pacemaker_Development/en-US/,$(COMMON_XML) \
+ 		  Pacemaker_Development.ent \
+ 		  Pacemaker_Development.xml)
+ PD_DEPS		= $(PD_XML_ONLY) $(PD_XML_GEN)
+@@ -226,7 +228,7 @@ PE_SHARED_TXT	= $(addprefix shared/en-US/,pacemaker-intro.txt)
+ PE_SHARED_XML	= $(PE_SHARED_TXT:%.txt=%.xml)
+ PE_TXT		= $(wildcard Pacemaker_Explained/en-US/*.txt)
+ PE_XML_GEN	= $(PE_TXT:%.txt=%.xml)
+-PE_XML_ONLY	= $(addprefix Pacemaker_Explained/en-US/,$(COMMON_XML) \
++PE_XML_ONLY	= $(addprefix $(srcdir)/Pacemaker_Explained/en-US/,$(COMMON_XML) \
+ 		  Pacemaker_Explained.ent \
+ 		  Pacemaker_Explained.xml \
+ 		  Preface.xml)
+@@ -238,8 +240,9 @@ Pacemaker_Explained.build: $(PE_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ 	$(AM_V_PUB)cd $(@:%.build=%) && RPM_BUILD_DIR="" $(PUBLICAN_INTREE_ENV)	\
+-	  $(PUBLICAN) build --publish --langs="$(DOCBOOK_LANGS)"		\
+-	    --formats="$(DOCBOOK_FORMATS)" $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
++	  $(PUBLICAN) build --src_dir="$(srcdir)" --publish 			\
++	    --langs="$(DOCBOOK_LANGS)" --formats="$(DOCBOOK_FORMATS)"		\
++	    $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
+ 	rm -rf $(@:%.build=%)/tmp
+ 	touch $@
+ 
+@@ -248,7 +251,7 @@ Pacemaker_Explained.build: $(PE_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 
+ PR_TXT		= $(wildcard Pacemaker_Remote/en-US/*.txt)
+ PR_XML_GEN	= $(PR_TXT:%.txt=%.xml)
+-PR_XML_ONLY	= $(addprefix Pacemaker_Remote/en-US/,$(COMMON_XML) \
++PR_XML_ONLY	= $(addprefix $(srcdir)/Pacemaker_Remote/en-US/,$(COMMON_XML) \
+ 		  Pacemaker_Remote.ent \
+ 		  Pacemaker_Remote.xml)
+ PR_DEPS		= $(PR_XML_ONLY) $(PR_XML_GEN)
+@@ -259,24 +262,28 @@ Pacemaker_Remote.build: $(PNGS) $(PR_DEPS) $(PUBLICAN_INTREE_DEPS)
+ 	@echo Building $(@:%.build=%) because of $?
+ 	rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp
+ 	$(AM_V_PUB)cd $(@:%.build=%) && RPM_BUILD_DIR="" $(PUBLICAN_INTREE_ENV)	\
+-	  $(PUBLICAN) build --publish --langs="$(DOCBOOK_LANGS)"		\
+-	    --formats="$(DOCBOOK_FORMATS)" $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
++	  $(PUBLICAN) build --src_dir="$(srcdir)" --publish 			\
++	    --langs="$(DOCBOOK_LANGS)" --formats="$(DOCBOOK_FORMATS)"		\
++	    $(PUBLICAN_INTREE_OPT) $(PCMK_quiet)
+ 	rm -rf $(@:%.build=%)/tmp
+ 	touch $@
+ 
+ 
+ # Update the translation template
+ pot:
+-	@for book in $(docbook); do 				      \
+-		echo "Updating translation templates in: $$book";     \
+-		( cd $$book && RPM_BUILD_DIR="" $(PUBLICAN) update_pot ); \
++	@for book in $(docbook); do					\
++		echo "Updating translation templates in: $$book";	\
++		( cd $$book && RPM_BUILD_DIR=""				\
++		  $(PUBLICAN) --src_dir="$(srcdir)" update_pot );	\
+ 	done
+ 
+ # Update the actual translations
+ po: pot
+-	@for book in $(docbook); do 				      \
+-		echo "Updating translations in: $$book";     \
+-		( cd $$book && RPM_BUILD_DIR="" $(PUBLICAN) update_po --langs=all );\
++	@for book in $(docbook); do					\
++		echo "Updating translations in: $$book";		\
++		( cd $$book && RPM_BUILD_DIR=""				\
++		  $(PUBLICAN) --src_dir="$(srcdir)" update_po		\
++		  --langs=all );					\
+ 	done
+ 
+ if BUILD_DOCBOOK
+@@ -310,7 +317,8 @@ brand-rpm-clean:
+ 	-find publican-clusterlabs -name "*.noarch.rpm" -exec rm -f \{\} \;
+ 
+ brand-rpm-build: brand-rpm-clean brand-build
+-	cd publican-clusterlabs && $(PUBLICAN) package --binary
++	cd publican-clusterlabs && \
++		$(PUBLICAN) --src_dir="$(srcdir)" package --binary
+ 
+ brand-rpm-install: brand-rpm-build
+ 	find publican-clusterlabs -name "*.noarch.rpm" -exec sudo rpm -Uvh --force \{\} \;
+-- 
+1.8.3.1
+
+
+From bc0bd42b67c80102a4a838319bb8aa0a1310c76b Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 23 Aug 2019 17:28:49 -0500
+Subject: [PATCH 94/96] Fix: tools: correct crm_report argument parsing
+
+There were a few instances where crm_report's option names passed to getopt,
+option names listed in help, and option names checked for did not match.
+
+Where getopt and checks matched, I went with that, so that anything that
+worked before continues to work.
+---
+ tools/crm_report.in | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/tools/crm_report.in b/tools/crm_report.in
+index 63c137c..947140f 100755
+--- a/tools/crm_report.in
++++ b/tools/crm_report.in
+@@ -20,7 +20,7 @@
+ 
+ TEMP=`getopt				\
+     -o hv?xl:f:t:n:T:L:p:c:dSACHu:D:MVse:	\
+-    --long help,cts:,cts-log:,dest:,node:,nodes:,from:,to:,sos-mode,logfile:,as-directory,single-node,cluster:,user:,max-depth:,version,features,rsh:	\
++    --long help,corosync,cts:,cts-log:,dest:,heartbeat,node:,nodes:,--openais,from:,to:,sos-mode,logfile:,as-directory,single-node,cluster:,user:,max-depth:,version,features,rsh:	\
+     -n 'crm_report' -- "$@"`
+ # The quotes around $TEMP are essential
+ eval set -- "$TEMP"
+@@ -54,6 +54,7 @@ Required option:
+ 
+ Options:
+   -V                    increase verbosity (may be specified multiple times)
++  -h, --help            display this message
+   -v, --version         display software version
+   --features            display software features
+   -t, --to TIME         time at which all problems were resolved
+@@ -77,9 +78,10 @@ Options:
+   -H, --heartbeat       force the cluster type to be heartbeat
+   -u, --user USER       username to use when collecting data from other nodes
+                         (default root)
+-  -D, --depth           search depth to use when attempting to locate files
++  -D, --max-depth       search depth to use when attempting to locate files
+   -e, --rsh             command to use to run commands on other nodes
+                         (default ssh -T)
++  -d, --as-directory    leave result as a directory tree instead of archiving
+   --sos-mode            use defaults suitable for being called by sosreport tool
+                         (behavior subject to change and not useful to end users)
+   DEST, --dest DEST     custom destination directory or file name
+@@ -119,13 +121,13 @@ while true; do
+     case "$1" in
+ 	-x) set -x; shift;;
+ 	-V) verbose=`expr $verbose + 1`; shift;;
+-	-T|--cts-test) tests="$tests $2"; shift; shift;;
++	-T|--cts) tests="$tests $2"; shift; shift;;
+ 	   --cts-log) ctslog="$2"; shift; shift;;
+ 	-f|--from) start_time=`get_time "$2"`; shift; shift;;
+ 	-t|--to) end_time=`get_time "$2"`; shift; shift;;
+ 	-n|--node|--nodes) nodes="$nodes $2"; shift; shift;;
+ 	-S|--single-node) nodes="$host"; shift;;
+-	-E|-l|--logfile) extra_logs="$extra_logs $2"; shift; shift;;
++	-l|--logfile) extra_logs="$extra_logs $2"; shift; shift;;
+ 	-p) sanitize_patterns="$sanitize_patterns $2"; shift; shift;;
+ 	-L) log_patterns="$log_patterns `echo $2 | sed 's/ /\\\W/g'`"; shift; shift;;
+ 	-d|--as-directory) compress=0; shift;;
+-- 
+1.8.3.1
+
+
+From 49c3055b932b732e0904d91cf49d4f80b7aa0e7d Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 23 Aug 2019 17:39:45 -0500
+Subject: [PATCH 95/96] Fix: tools: don't ignore log if unrelated file is too
+ large
+
+This fixes a regression in 1.1.12: since cb420a04, findln_by_time() would skip
+a log if any file in the current working directory (rather than the log itself)
+was larger than 1GB.
+---
+ tools/report.common.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/report.common.in b/tools/report.common.in
+index ebdd8df..55b37da 100644
+--- a/tools/report.common.in
++++ b/tools/report.common.in
+@@ -535,7 +535,7 @@ findln_by_time() {
+     # Some logs can be massive (over 1,500,000,000 lines have been seen in the wild) 
+     # Even just 'wc -l' on these files can take 10+ minutes 
+ 
+-    local fileSize=`ls -lh | awk '{ print $5 }' | grep -ie G`
++    local fileSize=`ls -lh "$logf" | awk '{ print $5 }' | grep -ie G`
+     if [ x$fileSize != x ]; then
+         warning "$logf is ${fileSize} in size and could take many hours to process. Skipping."
+         return
+-- 
+1.8.3.1
+
+
+From 456668b5afd781c61576c9b2d2feaf058fe1cc22 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 23 Aug 2019 22:38:51 -0500
+Subject: [PATCH 96/96] Fix: tools: check for tar in crm_report
+
+crm_report requires tar, so check for its existence up front.
+---
+ tools/crm_report.in       |  4 ++++
+ tools/report.collector.in |  2 ++
+ tools/report.common.in    | 10 ++++++++++
+ 3 files changed, 16 insertions(+)
+
+diff --git a/tools/crm_report.in b/tools/crm_report.in
+index 947140f..0ef4e6f 100755
+--- a/tools/crm_report.in
++++ b/tools/crm_report.in
+@@ -475,6 +475,10 @@ getnodes() {
+     fi
+ }
+ 
++if [ $compress -eq 1 ]; then
++    require_tar
++fi
++
+ if [ "x$tests" != "x" ]; then
+     do_cts
+ 
+diff --git a/tools/report.collector.in b/tools/report.collector.in
+index 48ee075..ab41df1 100644
+--- a/tools/report.collector.in
++++ b/tools/report.collector.in
+@@ -821,6 +821,8 @@ collect_logs() {
+     trap "" 0
+ }
+ 
++require_tar
++
+ debug "Initializing $REPORT_TARGET subdir"
+ if [ "$REPORT_MASTER" != "$REPORT_TARGET" ]; then
+   if [ -e $REPORT_HOME/$REPORT_TARGET ]; then
+diff --git a/tools/report.common.in b/tools/report.common.in
+index 55b37da..6d4f193 100644
+--- a/tools/report.common.in
++++ b/tools/report.common.in
+@@ -128,6 +128,13 @@ fatal() {
+     exit 1
+ }
+ 
++require_tar() {
++    which tar >/dev/null 2>&1
++    if [ $? -ne 0 ]; then
++        fatal "Required program 'tar' not found, please install and re-run"
++    fi
++}
++
+ is_running() {
+     ps -ef | egrep -qs $(echo "$1" | sed -e 's/^\(.\)/[\1]/')
+ }
+@@ -522,6 +529,9 @@ shrink() {
+ 
+     cd $dir  >/dev/null 2>&1
+     tar $tar_options $target $base >/dev/null 2>&1
++    if [ $? -ne 0 ]; then
++        fatal "Could not archive $base, please investigate and collect manually"
++    fi
+     cd $olddir  >/dev/null 2>&1
+ 
+     echo $target
+-- 
+1.8.3.1
+
diff --git a/SOURCES/2.0-cleanup-behavior.patch b/SOURCES/2.0-cleanup-behavior.patch
new file mode 100644
index 0000000..039079e
--- /dev/null
+++ b/SOURCES/2.0-cleanup-behavior.patch
@@ -0,0 +1,76 @@
+From 612fcbb399cfaa558504eeaa6ab16064e9013238 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 7 Aug 2018 11:56:06 -0500
+Subject: [PATCH] Feature: tools: enable 2.0 behavior of crm_resource clean-up
+
+---
+ tools/crm_resource.c | 49 +++++----------
+ 1 file changed, 20 insertions(+), 29 deletions(-)
+
+diff --git a/tools/crm_resource.c b/tools/crm_resource.c
+index 128d075..bbdba25 100644
+--- a/tools/crm_resource.c
++++ b/tools/crm_resource.c
+@@ -214,8 +214,6 @@ static struct crm_option long_options[] = {
+     },
+     {
+         "cleanup", no_argument, NULL, 'C',
+-#if 0
+-        // new behavior disabled until 2.0.0
+         "\t\tIf resource has any past failures, clear its history and fail count.\n"
+         "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n"
+         "\t\t\t\t--operation and --interval apply to fail counts, but entire history is always cleared,\n"
+@@ -221,7 +219,6 @@ static struct crm_option long_options[] = {
+     },
+     {
+         "refresh", no_argument, NULL, 'R',
+-#endif
+         "\t\tDelete resource's history (including failures) so its current state is rechecked.\n"
+         "\t\t\t\tOptionally filtered by --resource and --node (otherwise all).\n"
+         "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be refreshed."
+@@ -352,13 +349,11 @@ static struct crm_option long_options[] = {
+     },
+     {
+         "operation", required_argument, NULL, 'n',
+-        "\tOperation to clear instead of all (with -C -r)",
+-        pcmk_option_hidden // only used with 2.0 -C behavior
++        "\tOperation to clear instead of all (with -C -r)"
+     },
+     {
+         "interval", required_argument, NULL, 'I',
+-        "\tInterval of operation to clear (default 0) (with -C -r -n)",
+-        pcmk_option_hidden // only used with 2.0 -C behavior
++        "\tInterval of operation to clear (default 0) (with -C -r -n)"
+     },
+     {
+         "set-name", required_argument, NULL, 's',
+@@ -388,7 +383,6 @@ static struct crm_option long_options[] = {
+     {"un-migrate", no_argument, NULL, 'U', NULL, pcmk_option_hidden},
+     {"un-move", no_argument, NULL, 'U', NULL, pcmk_option_hidden},
+ 
+-    {"refresh",    0, 0, 'R', NULL, pcmk_option_hidden}, // remove this line for 2.0.0
+     {"reprobe", no_argument, NULL, 'P', NULL, pcmk_option_hidden},
+ 
+     {"-spacer-", 1, NULL, '-', "\nExamples:", pcmk_option_paragraph},
+@@ -630,15 +624,16 @@ main(int argc, char **argv)
+                 require_resource = FALSE;
+                 break;
+ 
++            case 'P':
++                flag = 'R';
+             case 'C':
+             case 'R':
+-            case 'P':
+                 crm_log_args(argc, argv);
+                 require_resource = FALSE;
+                 if (cib_file == NULL) {
+                     require_crmd = TRUE;
+                 }
+-                rsc_cmd = 'R'; // disable new behavior until 2.0
++                rsc_cmd = flag;
+                 find_flags = pe_find_renamed|pe_find_anon;
+                 break;
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/2.0-record-pending-behavior.patch b/SOURCES/2.0-record-pending-behavior.patch
new file mode 100644
index 0000000..22f49f7
--- /dev/null
+++ b/SOURCES/2.0-record-pending-behavior.patch
@@ -0,0 +1,48 @@
+From b48ceeb041cee65a9b93b9b76235e475fa1a128f Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 16 Oct 2017 09:45:18 -0500
+Subject: [PATCH 2/2] Feature: crmd: default record-pending to TRUE
+
+---
+ crmd/lrm.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/crmd/lrm.c b/crmd/lrm.c
+index eb4e16e..36dc076 100644
+--- a/crmd/lrm.c
++++ b/crmd/lrm.c
+@@ -2061,25 +2061,22 @@ stop_recurring_actions(gpointer key, gpointer value, gpointer user_data)
+ static void
+ record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op)
+ {
++    const char *record_pending = NULL;
++
+     CRM_CHECK(node_name != NULL, return);
+     CRM_CHECK(rsc != NULL, return);
+     CRM_CHECK(op != NULL, return);
+ 
+     // Never record certain operation types as pending
+-    if (op->op_type == NULL
++    if ((op->op_type == NULL) || (op->params == NULL)
+         || !controld_action_is_recordable(op->op_type)) {
+         return;
+     }
+ 
+-    if (op->params == NULL) {
++    // defaults to true
++    record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING);
++    if (record_pending && !crm_is_true(record_pending)) {
+         return;
+-
+-    } else {
+-        const char *record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING);
+-
+-        if (record_pending == NULL || crm_is_true(record_pending) == FALSE) {
+-            return;
+-         }
+     }
+ 
+     op->call_id = -1;
+-- 
+1.8.3.1
+
diff --git a/SOURCES/lrmd-protocol-version.patch b/SOURCES/lrmd-protocol-version.patch
new file mode 100644
index 0000000..2a555e5
--- /dev/null
+++ b/SOURCES/lrmd-protocol-version.patch
@@ -0,0 +1,28 @@
+From 8c497bc794e1e6a3ed188a548da771d768cef8f1 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 26 Oct 2016 11:18:17 -0500
+Subject: [PATCH] Fix: lrmd: undo unnecessary LRMD protocol version change
+
+The change breaks rolling upgrades in a cluster with Pacemaker Remote nodes,
+and was never necessary. This introduces a divergence from upstream that
+will need to be reconciled in the future.
+---
+ include/crm/lrmd.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h
+index 446b39c..a099315 100644
+--- a/include/crm/lrmd.h
++++ b/include/crm/lrmd.h
+@@ -38,7 +38,7 @@ typedef struct lrmd_key_value_s {
+ /* This should be bumped every time there is an incompatible change that
+  * prevents older clients from connecting to this version of the server.
+  */
+-#define LRMD_PROTOCOL_VERSION "1.1"
++#define LRMD_PROTOCOL_VERSION "1.0"
+ 
+ /* This is the version that the client version will actually be compared
+  * against. This should be identical to LRMD_PROTOCOL_VERSION. However, we
+-- 
+1.8.3.1
+
diff --git a/SPECS/pacemaker.spec b/SPECS/pacemaker.spec
new file mode 100644
index 0000000..1bd6ace
--- /dev/null
+++ b/SPECS/pacemaker.spec
@@ -0,0 +1,2177 @@
+# Globals and defines to control package behavior (configure these as desired)
+
+## User and group to use for nonprivileged services
+%global uname hacluster
+%global gname haclient
+
+## Where to install Pacemaker documentation
+%global pcmk_docdir %{_docdir}/%{name}
+
+## GitHub entity that distributes source (for ease of using a fork)
+%global github_owner ClusterLabs
+
+## Upstream pacemaker version, and its package version (specversion
+## can be incremented to build packages reliably considered "newer"
+## than previously built packages with the same pcmkversion)
+%global pcmkversion 1.1.21
+%global specversion 2
+
+## Upstream commit (or git tag, such as "Pacemaker-" plus the
+## {pcmkversion} macro for an official release) to use for this package
+%global commit f14e36fd4336874705b34266c7cddbe12119106c
+## Since git v2.11, the extent of abbreviation is autoscaled by default
+## (used to be constant of 7), so we need to convey it for non-tags, too.
+%global commit_abbrev 7
+
+
+# Define globals for convenient use later
+
+## Workaround to use parentheses in other globals
+%global lparen (
+%global rparen )
+
+## Short version of git commit
+%define shortcommit %(c=%{commit}; case ${c} in
+                      Pacemaker-*%{rparen} echo ${c:10};;
+                      *%{rparen} echo ${c:0:%{commit_abbrev}};; esac)
+
+## Whether this is a tagged release
+%define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?)
+
+## Whether this is a release candidate (in case of a tagged release)
+%define pre_release %([ "%{tag_release}" -eq 0 ] || {
+                      case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;;
+                      esac; }; echo $?)
+
+## Turn off auto-compilation of python files outside site-packages directory,
+## so that the -libs-devel package is multilib-compliant (no *.py[co] files)
+%global __os_install_post %(echo '%{__os_install_post}' | {
+                            sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; })
+
+## Heuristic used to infer bleeding-edge deployments that are
+## less likely to have working versions of the documentation tools
+%define bleeding %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?)
+
+## Corosync version
+%define cs_version %(pkg-config corosync --modversion 2>/dev/null | awk -F . '{print $1}')
+
+## Where to install python site libraries (currently, this uses the unversioned
+## python_sitearch macro to get the default system python, but at some point,
+## we should explicitly choose python2_sitearch or python3_sitearch -- or both)
+%define py_site %{?python_sitearch}%{!?python_sitearch:%(
+  python -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)}
+
+## Whether this platform defaults to using CMAN
+%define cman_native (0%{?el6} || (0%{?fedora} > 0 && 0%{?fedora} < 17))
+
+## Whether this platform defaults to using systemd as an init system
+## (needs to be evaluated prior to BuildRequires being enumerated and
+## installed as it's intended to conditionally select some of these, and
+## for that there are only few indicators with varying reliability:
+## - presence of systemd-defined macros (when building in a full-fledged
+##   environment, which is not the case with ordinary mock-based builds)
+## - systemd-aware rpm as manifested with the presence of particular
+##   macro (rpm itself will trivially always be present when building)
+## - existence of /usr/lib/os-release file, which is something heavily
+##   propagated by systemd project
+## - when not good enough, there's always a possibility to check
+##   particular distro-specific macros (incl. version comparison)
+%define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \
+  } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \
+  } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?))
+
+# RHEL: harden the default GnuTLS cipher list
+%global gnutls_priorities NORMAL:-VERS-SSL3.0:-VERS-TLS1.0:-VERS-TLS1.1:-MD5:-3DES-CBC:-ARCFOUR-128:-ARCFOUR-40
+
+## Upstream commit to use for nagios-agents-metadata package
+%global nagios_hash 105ab8a
+
+
+# Definitions for backward compatibility with older RPM versions
+
+## Ensure the license macro behaves consistently (older RPM will otherwise
+## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts:
+## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77
+%if !%{defined _licensedir}
+%define description %{lua:
+    rpm.define("license %doc")
+    print("%description")
+}
+%endif
+
+
+# Define conditionals so that "rpmbuild --with <feature>" and
+# "rpmbuild --without <feature>" can enable and disable specific features
+
+## Add option to enable support for stonith/external fencing agents
+%bcond_with stonithd
+
+## Add option to create binaries suitable for use with profiling tools
+%bcond_with profiling
+
+## Add option to create binaries with coverage analysis
+%bcond_with coverage
+
+## Add option to generate documentation (requires Publican, Asciidoc and Inkscape)
+%bcond_with doc
+
+## Add option to prefix package version with "0."
+## (so later "official" packages will be considered updates)
+%bcond_with pre_release
+
+## Add option to ship Upstart job files
+%bcond_with upstart_job
+
+## Add option to enable CMAN support
+%bcond_with cman
+
+## Add option to turn on SNMP / ESMTP support
+%bcond_with snmp
+%bcond_with esmtp
+
+## Add option to turn off hardening of libraries and daemon executables
+%bcond_without hardening
+
+
+# Keep sane profiling data if requested
+%if %{with profiling}
+
+## Disable -debuginfo package and stripping binaries/libraries
+%define debug_package %{nil}
+
+%endif
+
+
+# Define the release version
+# (do not look at externally enforced pre-release flag for tagged releases
+# as only -rc tags, captured with the second condition, implies that then)
+%if (!%{tag_release} && %{with pre_release}) || 0%{pre_release}
+%if 0%{pre_release}
+%define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3})
+%else
+%define pcmk_release 0.%{specversion}.%{shortcommit}.git
+%endif
+%else
+%if 0%{tag_release}
+%define pcmk_release %{specversion}
+%else
+# Never use the short commit in a RHEL release number
+%define pcmk_release %{specversion}
+%endif
+%endif
+
+Name:          pacemaker
+Summary:       Scalable High-Availability cluster resource manager
+Version:       %{pcmkversion}
+Release:       %{pcmk_release}%{?dist}
+%if %{defined _unitdir}
+License:       GPLv2+ and LGPLv2+
+%else
+# initscript is Revised BSD
+License:       GPLv2+ and LGPLv2+ and BSD
+%endif
+Url:           http://www.clusterlabs.org
+Group:         System Environment/Daemons
+
+# Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL:
+# https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz
+Source0:       https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz
+Source1:       nagios-agents-metadata-%{nagios_hash}.tar.gz
+
+# upstream commits
+Patch1:        01-rollup.patch
+
+# patches that aren't from upstream
+Patch100:      lrmd-protocol-version.patch
+Patch101:      2.0-record-pending-behavior.patch
+Patch102:      2.0-cleanup-behavior.patch
+
+BuildRoot:     %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
+AutoReqProv:   on
+Requires:      resource-agents
+Requires:      %{name}-libs%{?_isa} = %{version}-%{release}
+Requires:      %{name}-cluster-libs%{?_isa} = %{version}-%{release}
+Requires:      %{name}-cli = %{version}-%{release}
+Obsoletes:     rgmanager < 3.2.0
+Provides:      rgmanager >= 3.2.0
+Provides:      pcmk-cluster-manager
+
+%{?systemd_requires}
+
+ExclusiveArch: aarch64 i686 ppc64le s390x x86_64
+
+# Pacemaker targets compatibility with python 2.6+ and 3.2+
+Requires:      python >= 2.6
+BuildRequires: python-devel >= 2.6
+
+# Pacemaker requires a minimum libqb functionality
+Requires:      libqb > 0.17.0
+BuildRequires: libqb-devel > 0.17.0
+
+# Basics required for the build (even if usually satisfied through other BRs)
+BuildRequires: coreutils findutils grep sed
+
+# Required for core functionality
+BuildRequires: automake autoconf libtool pkgconfig libtool-ltdl-devel
+## version lower bound for: G_GNUC_INTERNAL
+BuildRequires: pkgconfig(glib-2.0) >= 2.6
+BuildRequires: libxml2-devel libxslt-devel libuuid-devel
+BuildRequires: bzip2-devel pam-devel
+
+# Required for agent_config.h which specifies the correct scratch directory
+BuildRequires: resource-agents
+
+# RH patches are created by git, so we need git to apply them
+BuildRequires: git
+
+# Enables optional functionality
+BuildRequires: ncurses-devel docbook-style-xsl
+BuildRequires: bison byacc flex help2man gnutls-devel pkgconfig(dbus-1)
+
+%if %{systemd_native}
+BuildRequires: pkgconfig(systemd)
+%endif
+
+%if %{with cman} && %{cman_native}
+BuildRequires: clusterlib-devel
+# pacemaker initscript: cman initscript, fence_tool (+ some soft-dependencies)
+# "post" scriptlet: ccs_update_schema
+Requires:      cman
+%endif
+
+Requires:      corosync
+BuildRequires: corosynclib-devel
+
+%if %{with stonithd}
+BuildRequires: cluster-glue-libs-devel
+%endif
+
+## (note no avoiding effect when building through non-customized mock)
+%if !%{bleeding}
+%if %{with doc}
+BuildRequires: inkscape asciidoc publican
+%endif
+%endif
+
+%description
+Pacemaker is an advanced, scalable High-Availability cluster resource
+manager for Corosync, CMAN and/or Linux-HA.
+
+It supports more than 16 node clusters with significant capabilities
+for managing resources and dependencies.
+
+It will run scripts at initialization, when machines go up or down,
+when related resources fail and can be configured to periodically check
+resource health.
+
+Available rpmbuild rebuild options:
+  --with(out) : cman coverage doc stonithd hardening pre_release profiling
+
+%package cli
+License:       GPLv2+ and LGPLv2+
+Summary:       Command line tools for controlling Pacemaker clusters
+Group:         System Environment/Daemons
+Requires:      %{name}-libs%{?_isa} = %{version}-%{release}
+Requires:      perl-TimeDate
+
+%description cli
+Pacemaker is an advanced, scalable High-Availability cluster resource
+manager for Corosync, CMAN and/or Linux-HA.
+
+The %{name}-cli package contains command line tools that can be used
+to query and control the cluster from machines that may, or may not,
+be part of the cluster.
+
+%package -n %{name}-libs
+License:       GPLv2+ and LGPLv2+
+Summary:       Core Pacemaker libraries
+Group:         System Environment/Daemons
+Requires(pre): shadow-utils
+# sbd 1.4.0+ supports the libpe_status API for pe_working_set_t
+Conflicts:     sbd < 1.4.0
+
+%description -n %{name}-libs
+Pacemaker is an advanced, scalable High-Availability cluster resource
+manager for Corosync, CMAN and/or Linux-HA.
+
+The %{name}-libs package contains shared libraries needed for cluster
+nodes and those just running the CLI tools.
+
+%package -n %{name}-cluster-libs
+License:       GPLv2+ and LGPLv2+
+Summary:       Cluster Libraries used by Pacemaker
+Group:         System Environment/Daemons
+Requires:      %{name}-libs%{?_isa} = %{version}-%{release}
+
+%description -n %{name}-cluster-libs
+Pacemaker is an advanced, scalable High-Availability cluster resource
+manager for Corosync, CMAN and/or Linux-HA.
+
+The %{name}-cluster-libs package contains cluster-aware shared
+libraries needed for nodes that will form part of the cluster nodes.
+
+%package remote
+%if %{defined _unitdir}
+License:       GPLv2+ and LGPLv2+
+%else
+# initscript is Revised BSD
+License:       GPLv2+ and LGPLv2+ and BSD
+%endif
+Summary:       Pacemaker remote daemon for non-cluster nodes
+Group:         System Environment/Daemons
+Requires:      %{name}-libs%{?_isa} = %{version}-%{release}
+Requires:      %{name}-cli = %{version}-%{release}
+Requires:      resource-agents
+Provides:      pcmk-cluster-manager
+
+# -remote can be fully independent of systemd
+%{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}}
+
+%description remote
+Pacemaker is an advanced, scalable High-Availability cluster resource
+manager for Corosync, CMAN and/or Linux-HA.
+
+The %{name}-remote package contains the Pacemaker Remote daemon
+which is capable of extending pacemaker functionality to remote
+nodes not running the full corosync/cluster stack.
+
+%package -n %{name}-libs-devel
+License:       GPLv2+ and LGPLv2+
+Summary:       Pacemaker development package
+Group:         Development/Libraries
+Requires:      %{name}-cts = %{version}-%{release}
+Requires:      %{name}-libs = %{version}-%{release}
+Requires:      %{name}-cluster-libs = %{version}-%{release}
+Requires:      libtool-ltdl-devel libqb-devel libuuid-devel
+Requires:      libxml2-devel libxslt-devel bzip2-devel glib2-devel
+Requires:      corosynclib-devel
+
+%description -n %{name}-libs-devel
+Pacemaker is an advanced, scalable High-Availability cluster resource
+manager for Corosync, CMAN and/or Linux-HA.
+
+The %{name}-libs-devel package contains headers and shared libraries
+for developing tools for Pacemaker.
+
+# NOTE: can be noarch if lrmd_test is moved to another subpackage
+%package       cts
+License:       GPLv2+ and LGPLv2+
+Summary:       Test framework for cluster-related technologies like Pacemaker
+Group:         System Environment/Daemons
+Requires:      python >= 2.6
+Requires:      %{name}-libs = %{version}-%{release}
+
+# systemd python bindings are separate package in some distros
+%if %{defined systemd_requires}
+
+%if 0%{?fedora} > 22
+Requires:      python2-systemd
+%else
+%if 0%{?fedora} > 20 || 0%{?rhel} > 6
+Requires:      systemd-python
+%endif
+%endif
+
+%endif
+
+%description   cts
+Test framework for cluster-related technologies like Pacemaker
+
+%package       doc
+License:       CC-BY-SA-4.0
+Summary:       Documentation for Pacemaker
+Group:         Documentation
+
+%description   doc
+Documentation for Pacemaker.
+
+Pacemaker is an advanced, scalable High-Availability cluster resource
+manager for Corosync, CMAN and/or Linux-HA.
+
+%package       nagios-plugins-metadata
+License:       GPLv3
+Summary:       Pacemaker Nagios Metadata
+Group:         System Environment/Daemons
+# NOTE below are the plugins this metadata uses.
+# These plugin packages are currently not requirements
+# for the nagios metadata because rhel does not ship these
+# plugins. This metadata is providing 3rd party support
+# for nagios. Users may install the plugins via 3rd party
+# rpm packages, or source. If rhel ships the nagios plugins
+# in the future, we should consider enabling the following
+# required fields.
+#Requires:      nagios-plugins-http
+#Requires:      nagios-plugins-ldap
+#Requires:      nagios-plugins-mysql
+#Requires:      nagios-plugins-pgsql
+#Requires:      nagios-plugins-tcp
+Requires:      pcmk-cluster-manager
+
+%description   nagios-plugins-metadata
+The metadata files required for Pacemaker to execute the nagios plugin
+monitor resources.
+
+%prep
+%autosetup -a 1 -n %{name}-%{commit} -S git_am -p 1
+
+# Force the local time
+#
+# 'git' sets the file date to the date of the last commit.
+# This can result in files having been created in the future
+# when building on machines in timezones 'behind' the one the
+# commit occurred in - which seriously confuses 'make'
+find . -exec touch \{\} \;
+
+%build
+
+# Early versions of autotools (e.g. RHEL <= 5) do not support --docdir
+export docdir=%{pcmk_docdir}
+
+export systemdunitdir=%{?_unitdir}%{!?_unitdir:no}
+
+# RHEL: enable notification-agent/notification-recipient,
+# and change concurrent-fencing default to true
+export CPPFLAGS="-DRHEL7_COMPAT -DDEFAULT_CONCURRENT_FENCING_TRUE"
+
+%if %{with hardening}
+# prefer distro-provided hardening flags in case they are defined
+# through _hardening_{c,ld}flags macros, configure script will
+# use its own defaults otherwise; if such hardenings are completely
+# undesired, rpmbuild using "--without hardening"
+# (or "--define '_without_hardening 1'")
+export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}"
+export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}"
+export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}"
+export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}"
+%endif
+
+./autogen.sh
+
+%{configure}                                       \
+        %{?with_profiling:   --with-profiling}     \
+        %{?with_coverage:    --with-coverage}      \
+        %{!?with_cman:       --without-cman}       \
+        %{!?with_snmp:       --without-snmp}       \
+        %{!?with_esmtp:      --without-esmtp}      \
+        --without-heartbeat                        \
+        %{!?with_doc:        --with-brand=}        \
+        %{!?with_hardening:  --disable-hardening}  \
+        %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \
+        --with-initdir=%{_initrddir}               \
+        --localstatedir=%{_var}                    \
+        --with-bug-url=https://bugzilla.redhat.com/                               \
+        --with-nagios                                                             \
+        --with-nagios-metadata-dir=%{_datadir}/pacemaker/nagios/plugins-metadata/ \
+        --with-nagios-plugin-dir=%{_libdir}/nagios/plugins/                       \
+        --with-version=%{version}-%{release}
+
+%if 0%{?suse_version} >= 1200
+# Fedora handles rpath removal automagically
+sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool
+sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool
+%endif
+
+make %{_smp_mflags} V=1 all
+
+%check
+{ pengine/regression.sh --run one-or-more-unrunnable-instances \
+  && tools/regression.sh \
+  && touch .CHECKED
+} 2>&1 | sed 's/[fF]ail/faiil/g'  # prevent false positives in rpmlint
+[ -f .CHECKED ] && rm -f -- .CHECKED || false
+
+%install
+rm -rf %{buildroot}
+make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} V=1 install
+
+mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig
+install -m 644 mcp/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker
+install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon
+
+%if %{with upstart_job}
+mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init
+install -m 644 mcp/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf
+install -m 644 mcp/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf
+install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf
+%endif
+
+mkdir -p %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata
+for file in $(find nagios-agents-metadata-%{nagios_hash}/metadata -type f); do
+        install -m 644 $file %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata
+done
+
+%if %{defined _unitdir}
+mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name}
+%endif
+
+# Scripts that should be executable
+chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py
+
+# These are not actually scripts
+find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x
+
+# Don't package static libs
+find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f
+find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f
+
+# For now, don't package the servicelog-related binaries built only for
+# ppc64le when certain dependencies are installed. If they get more exercise by
+# advanced users, we can reconsider.
+rm -f %{buildroot}/%{_sbindir}/notifyServicelogEvent
+rm -f %{buildroot}/%{_sbindir}/ipmiservicelogd
+
+# Do not package these either
+rm -f %{buildroot}/%{_libdir}/service_crm.so
+rm -f %{buildroot}/%{_sbindir}/fence_legacy
+rm -f %{buildroot}/%{_mandir}/man8/fence_legacy.*
+find %{buildroot} -name '*o2cb*' -type f -print0 | xargs -0 rm -f
+
+# Don't ship init scripts for systemd based platforms
+%if %{defined _unitdir}
+rm -f %{buildroot}/%{_initrddir}/pacemaker
+rm -f %{buildroot}/%{_initrddir}/pacemaker_remote
+%endif
+
+# Don't ship fence_pcmk where it has no use
+%if %{without cman}
+rm -f %{buildroot}/%{_sbindir}/fence_pcmk
+%endif
+
+%if %{with coverage}
+GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov
+mkdir -p $GCOV_BASE
+find . -name '*.gcno' -type f | while read F ; do
+        D=`dirname $F`
+        mkdir -p ${GCOV_BASE}/$D
+        cp $F ${GCOV_BASE}/$D
+done
+%endif
+
+%clean
+rm -rf %{buildroot}
+
+%post
+%if %{defined _unitdir}
+%systemd_post pacemaker.service
+%else
+/sbin/chkconfig --add pacemaker || :
+%if %{with cman} && %{cman_native}
+# make fence_pcmk in cluster.conf valid instantly otherwise tools like ccs may
+# choke (until schema gets auto-regenerated on the next start of cluster),
+# per the protocol shared with other packages contributing to cluster.rng
+/usr/sbin/ccs_update_schema >/dev/null 2>&1 || :
+%endif
+%endif
+
+%preun
+%if %{defined _unitdir}
+%systemd_preun pacemaker.service
+%else
+/sbin/service pacemaker stop >/dev/null 2>&1 || :
+if [ $1 -eq 0 ]; then
+    # Package removal, not upgrade
+    /sbin/chkconfig --del pacemaker || :
+fi
+%endif
+
+%postun
+%if %{defined _unitdir}
+%systemd_postun_with_restart pacemaker.service
+%endif
+
+%pre remote
+%if %{defined _unitdir}
+# Stop the service before anything is touched, and remember to restart
+# it as one of the last actions (compared to using systemd_postun_with_restart,
+# this avoids suicide when sbd is in use)
+systemctl --quiet is-active pacemaker_remote
+if [ $? -eq 0 ] ; then
+    mkdir -p %{_localstatedir}/lib/rpm-state/%{name}
+    touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
+    systemctl stop pacemaker_remote >/dev/null 2>&1
+else
+    rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
+fi
+%endif
+
+%post remote
+%if %{defined _unitdir}
+%systemd_post pacemaker_remote.service
+%else
+/sbin/chkconfig --add pacemaker_remote || :
+%endif
+
+%preun remote
+%if %{defined _unitdir}
+%systemd_preun pacemaker_remote.service
+%else
+/sbin/service pacemaker_remote stop >/dev/null 2>&1 || :
+if [ $1 -eq 0 ]; then
+    # Package removal, not upgrade
+    /sbin/chkconfig --del pacemaker_remote || :
+fi
+%endif
+
+%postun remote
+%if %{defined _unitdir}
+# This next line is a no-op, because we stopped the service earlier, but
+# we leave it here because it allows us to revert to the standard behavior
+# in the future if desired
+%systemd_postun_with_restart pacemaker_remote.service
+# Explicitly take care of removing the flag-file(s) upon final removal
+if [ $1 -eq 0 ] ; then
+    rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
+fi
+%endif
+
+%posttrans remote
+%if %{defined _unitdir}
+if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then
+    systemctl start pacemaker_remote >/dev/null 2>&1
+    rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
+fi
+%endif
+
+%post cli
+%if %{defined _unitdir}
+%systemd_post crm_mon.service
+%endif
+
+%preun cli
+%if %{defined _unitdir}
+%systemd_preun crm_mon.service
+%endif
+
+%postun cli
+%if %{defined _unitdir}
+%systemd_postun_with_restart crm_mon.service
+%endif
+
+%pre -n %{name}-libs
+
+getent group %{gname} >/dev/null || groupadd -r %{gname} -g 189
+getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u 189 -s /sbin/nologin -c "cluster user" %{uname}
+exit 0
+
+%post -n %{name}-libs -p /sbin/ldconfig
+
+%postun -n %{name}-libs -p /sbin/ldconfig
+
+%post -n %{name}-cluster-libs -p /sbin/ldconfig
+
+%postun -n %{name}-cluster-libs -p /sbin/ldconfig
+
+%files
+###########################################################
+%defattr(-,root,root)
+
+%config(noreplace) %{_sysconfdir}/sysconfig/pacemaker
+%{_sbindir}/pacemakerd
+
+%if %{defined _unitdir}
+%{_unitdir}/pacemaker.service
+%else
+%{_initrddir}/pacemaker
+%endif
+
+%exclude %{_libexecdir}/pacemaker/lrmd_test
+%exclude %{_sbindir}/pacemaker_remoted
+%{_libexecdir}/pacemaker/*
+
+%{_sbindir}/crm_attribute
+%{_sbindir}/crm_master
+%{_sbindir}/crm_node
+%if %{with cman}
+%{_sbindir}/fence_pcmk
+%endif
+%{_sbindir}/stonith_admin
+
+%doc %{_mandir}/man7/crmd.*
+%doc %{_mandir}/man7/pengine.*
+%doc %{_mandir}/man7/stonithd.*
+%if %{without cman} || !%{cman_native}
+%doc %{_mandir}/man7/ocf_pacemaker_controld.*
+%endif
+%doc %{_mandir}/man7/ocf_pacemaker_remote.*
+%doc %{_mandir}/man8/crm_attribute.*
+%doc %{_mandir}/man8/crm_node.*
+%doc %{_mandir}/man8/crm_master.*
+%if %{with cman}
+%doc %{_mandir}/man8/fence_pcmk.*
+%endif
+%doc %{_mandir}/man8/pacemakerd.*
+%doc %{_mandir}/man8/stonith_admin.*
+
+%doc %{_datadir}/pacemaker/alerts
+
+%license licenses/GPLv2
+%doc COPYING
+%doc ChangeLog
+
+%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib
+%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine
+%if %{without cman} || !%{cman_native}
+/usr/lib/ocf/resource.d/pacemaker/controld
+%endif
+/usr/lib/ocf/resource.d/pacemaker/remote
+/usr/lib/ocf/resource.d/.isolation
+
+%if "%{?cs_version}" != "UNKNOWN"
+%if 0%{?cs_version} < 2
+%{_libexecdir}/lcrso/pacemaker.lcrso
+%endif
+%endif
+
+%if %{with upstart_job}
+%config(noreplace) %{_sysconfdir}/init/pacemaker.conf
+%config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf
+%endif
+
+%files cli
+%defattr(-,root,root)
+
+%config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker
+%config(noreplace) %{_sysconfdir}/sysconfig/crm_mon
+
+%if %{defined _unitdir}
+%{_unitdir}/crm_mon.service
+%endif
+
+%if %{with upstart_job}
+%config(noreplace) %{_sysconfdir}/init/crm_mon.conf
+%endif
+
+%{_sbindir}/attrd_updater
+%{_sbindir}/cibadmin
+%{_sbindir}/crm_diff
+%{_sbindir}/crm_error
+%{_sbindir}/crm_failcount
+%{_sbindir}/crm_mon
+%{_sbindir}/crm_resource
+%{_sbindir}/crm_standby
+%{_sbindir}/crm_verify
+%{_sbindir}/crmadmin
+%{_sbindir}/iso8601
+%{_sbindir}/crm_shadow
+%{_sbindir}/crm_simulate
+%{_sbindir}/crm_report
+%{_sbindir}/crm_ticket
+%exclude %{_datadir}/pacemaker/alerts
+%exclude %{_datadir}/pacemaker/tests
+%exclude %{_datadir}/pacemaker/nagios
+%{_datadir}/pacemaker
+%{_datadir}/snmp/mibs/PCMK-MIB.txt
+
+%exclude /usr/lib/ocf/resource.d/pacemaker/controld
+%exclude /usr/lib/ocf/resource.d/pacemaker/remote
+
+%dir /usr/lib/ocf
+%dir /usr/lib/ocf/resource.d
+/usr/lib/ocf/resource.d/pacemaker
+
+%doc %{_mandir}/man7/*
+%exclude %{_mandir}/man7/crmd.*
+%exclude %{_mandir}/man7/pengine.*
+%exclude %{_mandir}/man7/stonithd.*
+%exclude %{_mandir}/man7/ocf_pacemaker_controld.*
+%exclude %{_mandir}/man7/ocf_pacemaker_remote.*
+%doc %{_mandir}/man8/*
+%exclude %{_mandir}/man8/crm_attribute.*
+%exclude %{_mandir}/man8/crm_node.*
+%exclude %{_mandir}/man8/crm_master.*
+%exclude %{_mandir}/man8/fence_pcmk.*
+%exclude %{_mandir}/man8/pacemakerd.*
+%exclude %{_mandir}/man8/pacemaker_remoted.*
+%exclude %{_mandir}/man8/stonith_admin.*
+
+%license licenses/GPLv2
+%doc COPYING
+%doc ChangeLog
+
+%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker
+%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox
+%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores
+
+%files -n %{name}-libs
+%defattr(-,root,root)
+
+%{_libdir}/libcib.so.*
+%{_libdir}/liblrmd.so.*
+%{_libdir}/libcrmservice.so.*
+%{_libdir}/libcrmcommon.so.*
+%{_libdir}/libpe_status.so.*
+%{_libdir}/libpe_rules.so.*
+%{_libdir}/libpengine.so.*
+%{_libdir}/libstonithd.so.*
+%{_libdir}/libtransitioner.so.*
+%license licenses/LGPLv2.1
+%doc COPYING
+%doc ChangeLog
+
+%files -n %{name}-cluster-libs
+%defattr(-,root,root)
+%{_libdir}/libcrmcluster.so.*
+%license licenses/LGPLv2.1
+%doc COPYING
+%doc ChangeLog
+
+%files remote
+%defattr(-,root,root)
+
+%config(noreplace) %{_sysconfdir}/sysconfig/pacemaker
+%if %{defined _unitdir}
+# state directory is shared between the subpackets
+# let rpm take care of removing it once it isn't
+# referenced anymore and empty
+%ghost %dir %{_localstatedir}/lib/rpm-state/%{name}
+%{_unitdir}/pacemaker_remote.service
+%else
+%{_initrddir}/pacemaker_remote
+%endif
+
+%{_sbindir}/pacemaker_remoted
+%{_mandir}/man8/pacemaker_remoted.*
+%license licenses/GPLv2
+%doc COPYING
+%doc ChangeLog
+
+%files doc
+%defattr(-,root,root)
+%doc %{pcmk_docdir}
+%license licenses/CC-BY-SA-4.0
+
+%files cts
+%defattr(-,root,root)
+%{py_site}/cts
+%{_datadir}/pacemaker/tests/cts
+%{_libexecdir}/pacemaker/lrmd_test
+%license licenses/GPLv2
+%doc COPYING
+%doc ChangeLog
+
+%files -n %{name}-libs-devel
+%defattr(-,root,root)
+%exclude %{_datadir}/pacemaker/tests/cts
+%{_datadir}/pacemaker/tests
+%{_includedir}/pacemaker
+%{_libdir}/*.so
+%if %{with coverage}
+%{_var}/lib/pacemaker/gcov
+%endif
+%{_libdir}/pkgconfig/*.pc
+%license licenses/LGPLv2.1
+%doc COPYING
+%doc ChangeLog
+
+%files nagios-plugins-metadata
+%defattr(-,root,root)
+%dir %{_datadir}/pacemaker/nagios/plugins-metadata
+%attr(0644,root,root) %{_datadir}/pacemaker/nagios/plugins-metadata/*
+
+%changelog
+* Tue Aug 27 2019 Ken Gaillot <kgaillot@redhat.com> - 1.1.21-2
+- Add latest upstream bug fixes to rebase roll-up patch
+- Resolves: rhbz#1731189
+
+* Tue Jul 30 2019 Ken Gaillot <kgaillot@redhat.com> - 1.1.21-1
+- Recover from quiesced DC disk
+- Avoid timeouts and excessive stonithd CPU usage at start-up in large clusters
+- Default serialized order constraints to symmetrical=false
+- Avoid fence loops due to incorrect Pacemaker Remote ordering
+- Default concurrent-fencing to true
+- Harden GnuTLS priorities
+- Rebase on upstream 1.1.21 final version
+- Resolves: rhbz#1596125
+- Resolves: rhbz#1625671
+- Resolves: rhbz#1672225
+- Resolves: rhbz#1704870
+- Resolves: rhbz#1710422
+- Resolves: rhbz#1727280
+- Resolves: rhbz#1731189
+
+* Fri May 24 2019 Ken Gaillot <kgaillot@redhat.com> - 1.1.20-5
+- Correct memory issue in fence agent output fix
+- Resolves: rhbz#1549366
+
+* Fri Apr 19 2019 Ken Gaillot <kgaillot@redhat.com> - 1.1.20-4
+- Update security patches
+- Resolves: rhbz#1694556
+- Resolves: rhbz#1694559
+- Resolves: rhbz#1694907
+
+* Thu Apr 4 2019 Ken Gaillot <kgaillot@redhat.com> - 1.1.20-3
+- Support more than 64KB of fence agent output
+- Avoid unnecessary recovery of group member
+- Improve IPC clients' authentication of servers (CVE-2018-16877)
+- Improve pacemakerd authentication of running subdaemons (CVE-2018-16878)
+- Fix use-after-free with potential information disclosure (CVE-2019-3885)
+- Resolves: rhbz#1549366
+- Resolves: rhbz#1609453
+- Resolves: rhbz#1694556
+- Resolves: rhbz#1694559
+- Resolves: rhbz#1694907
+
+* Thu Mar 21 2019 Ken Gaillot <kgaillot@redhat.com> - 1.1.20-2
+- Assume unprivileged ACL if unable to get user information from host
+- Delay 2 seconds before re-attempting a failed node attribute write
+- SNMP alert sample script now sends all OIDs with every alert
+- Recover dependent resources correctly with asymmetric ordering
+- Rebase on upstream 1.1.20 final version
+- Resolves: rhbz#1596125
+- Resolves: rhbz#1597695
+- Resolves: rhbz#1608979
+- Resolves: rhbz#1628966
+- Resolves: rhbz#1644864
+
+* Fri Feb 1 2019 Ken Gaillot <kgaillot@redhat.com> - 1.1.20-1
+- pcs status now shows when a standby node still has active resources
+- Allow clean-up of guest nodes and bundles without unmanaging first
+- pcs status now shows pending and failed fence actions by default
+- Improve pcs status display when disconnected from cluster
+- Ensure node attributes are recorded if attrd writer is shutting down
+- Synchronize fencing history across all nodes
+- Add stonith_admin option to clear fencing history
+- Don't schedule unneeded bundle actions when connection is on different node
+- Allow use of sbd in clusters with guest nodes and bundles
+- Schedule bundle clone notifications correctly when connection is moving
+- Rebase on upstream 1.1.20-rc1 version
+- Avoid unneeded resource restarts when remote connection fails to start
+- Allow crm_resource --move to work when a previous move had a lifetime
+- Wait for all replies when refreshing a resource
+- Don't schedule clone notifications for a stopped bundle
+- Allow option to crm_resource --clear to clear only expired constraints
+- Fix result reporting when cleanup is done while an operation is in-flight
+- Resolves: rhbz#1419548
+- Resolves: rhbz#1448467
+- Resolves: rhbz#1461964
+- Resolves: rhbz#1486869
+- Resolves: rhbz#1535221
+- Resolves: rhbz#1555938
+- Resolves: rhbz#1595422
+- Resolves: rhbz#1627948
+- Resolves: rhbz#1638593
+- Resolves: rhbz#1644076
+- Resolves: rhbz#1644864
+- Resolves: rhbz#1648507
+- Resolves: rhbz#1648620
+- Resolves: rhbz#1652053
+- Resolves: rhbz#1652752
+- Resolves: rhbz#1658650
+- Resolves: rhbz#1665343
+
+* Mon Sep 24 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.19-8
+- Ensure crm_resource --force-* commands get stderr messages
+- Resolves: rhbz#1628947
+
+* Tue Aug 14 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.19-7
+- Shutdown corosync locally if fenced
+- Count start failure once
+- Resolves: rhbz#1448221
+- Resolves: rhbz#1549576
+
+* Tue Aug 7 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.19-6
+- Fix pre-release regressions in resource clean-up behavior
+- Resolves: rhbz#1612869
+
+* Mon Jul 30 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.19-5
+- Another fix for srpm build on ppc64le systems with OpenIPMI-devel
+- Resolves: rhbz#1478451
+
+* Wed Jul 25 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.19-4
+- Fix srpm build on ppc64le systems with libservicelog-devel, OpenIPMI-devel
+- Resolves: rhbz#1478451
+
+* Wed Jul 11 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.19-3
+- Rebase to upstream c3c624ea3d98a74a8a287671a156db126c99a7bb (1.1.19)
+- Resolves: rhbz#1562255
+
+* Thu Jul 5 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.19-2
+- Ensure resource agent meta-data actions can get local node name
+- Do not record pending clone notifications in CIB
+- Resolves: rhbz#1374175
+- Resolves: rhbz#1570130
+
+* Wed Jun 20 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.19-1
+- Rebase to upstream 29ac12ad21f73524f69c0580798e07cdf1fe3fa0 (1.1.19-rc1)
+- Allow crm_node to work on Pacemaker Remote nodes
+- Implement new --validate option to stonith_admin
+- Support .path, .mount, and .timer systemd unit files as resources
+- Handle INFINITY fail count correctly in crm_failcount
+- Resolves: rhbz#1374175
+- Resolves: rhbz#1434936
+- Resolves: rhbz#1562255
+- Resolves: rhbz#1590483
+- Resolves: rhbz#1591045
+
+* Fri Jun 1 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-13
+- Pull latest upstream 1.1 branch fixes
+- Resolves: rhbz#1478451
+- Resolves: rhbz#1501505
+- Resolves: rhbz#1514492
+- Resolves: rhbz#1550829
+- Resolves: rhbz#1564536
+- Resolves: rhbz#1576148
+- Resolves: rhbz#1577300
+
+* Tue Apr 17 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-12
+- Pull in latest fixes from upstream 1.1 branch
+- Build for aarch64
+- Resolves: rhbz#1422602
+- Resolves: rhbz#1463015
+- Resolves: rhbz#1469255
+- Resolves: rhbz#1519379
+- Resolves: rhbz#1543278
+- Resolves: rhbz#1545449
+- Resolves: rhbz#1550829
+- Resolves: rhbz#1561617
+- Resolves: rhbz#1562255
+- Resolves: rhbz#1565187
+- Resolves: rhbz#1568593
+- Resolves: rhbz#1570130
+
+* Fri Jan 26 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-11
+- Fix regression in crm_master
+- Resolves: rhbz#1539113
+
+* Wed Jan 24 2018 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-10
+- Always trigger transition when quorum changes
+- Match clone names correctly with crm_resource --cleanup
+- Fix pcs resource --wait timeout when bundles are used
+- Observe colocation constraints correctly with bundles in master role
+- Resolves: rhbz#1464068
+- Resolves: rhbz#1508350
+- Resolves: rhbz#1519812
+- Resolves: rhbz#1527072
+
+* Mon Dec 18 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-9
+- Fix small memory leak introduced by node attribute delay fix
+- Resolves: rhbz#1454960
+
+* Tue Dec 12 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-8
+- Regression fix for "pcs resource cleanup" was incomplete
+- Resolves: rhbz#1508350
+
+* Mon Dec 11 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-7
+- Avoid node attribute write delay when corosync.conf has only IP addresses
+- Fix regressions in "pcs resource cleanup" behavior
+- Restore ordering of unfencing before fence device starts
+- Ensure --wait options work when bundles are in use
+- Fix possible invalid transition with bundle ordering constraints
+- Resolves: rhbz#1454960
+- Resolves: rhbz#1508350
+- Resolves: rhbz#1517796
+- Resolves: rhbz#1519812
+- Resolves: rhbz#1522822
+
+* Wed Nov 15 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-6
+- Rebase to upstream 2b07d5c5a908998891c3317faa30328c108d3a91 (1.1.18)
+- If on-fail=ignore, migration-threshold should also be ignored
+- Resolves: rhbz#1474428
+- Resolves: rhbz#1507344
+
+* Fri Nov 3 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-5
+- Properly clean up primitive inside bundle
+- Scalability improvements
+- Resolves: rhbz#1499217
+- Resolves: rhbz#1508373
+
+* Fri Nov 3 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-4
+- Rebase to upstream 1a4ef7d180e77bcd6423f342d62e05e516c4e852 (1.1.18-rc4)
+- Resolves: rhbz#1381754
+- Resolves: rhbz#1474428
+- Resolves: rhbz#1499217
+- Resolves: rhbz#1508373
+
+* Tue Oct 24 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-3
+- Rebase to upstream 36d2962a8613322fc43d727d95720d61a47d0138 (1.1.18-rc3)
+- Resolves: rhbz#1474428
+
+* Mon Oct 16 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-2
+- Rebase to upstream 5cccc41c95d6288eab27d93901b650b071f976dc (1.1.18-rc2)
+- Default record-pending to true
+- Resolves: rhbz#1323546
+- Resolves: rhbz#1376556
+- Resolves: rhbz#1382364
+- Resolves: rhbz#1461976
+- Resolves: rhbz#1474428
+- Resolves: rhbz#1500509
+- Resolves: rhbz#1501903
+- Resolves: rhbz#1501924
+
+* Mon Oct 9 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.18-1
+- Rebase to upstream 1cb712c5369c98f03d42bcf8648cacd86a5f48f7 (1.1.18-rc1)
+- Resolves: rhbz#1298581
+- Resolves: rhbz#1394418
+- Resolves: rhbz#1427648
+- Resolves: rhbz#1454933
+- Resolves: rhbz#1454957
+- Resolves: rhbz#1454960
+- Resolves: rhbz#1462253
+- Resolves: rhbz#1464068
+- Resolves: rhbz#1465519
+- Resolves: rhbz#1470262
+- Resolves: rhbz#1471506
+- Resolves: rhbz#1474428
+- Resolves: rhbz#1474463
+- Resolves: rhbz#1482278
+- Resolves: rhbz#1489728
+- Resolves: rhbz#1489735
+
+* Tue Jun 20 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-12
+- Avoid unnecessary restarts when recovering remote connections
+- Resolves: rhbz#1448773
+
+* Fri Jun 9 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-11
+- Support bundle meta-attributes
+- Resolves: rhbz#1447903
+
+* Tue May 23 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-10
+- Fix issues when running bundles on Pacemaker Remote nodes
+- Reap orphaned processes when running Pacemaker Remote as pid 1
+- Order remote actions after remote connection recovery
+  (fixes regression in RHEL 7.3)
+- Avoid local resource manager daemon (lrmd) crash when an
+  in-flight systemd operation is cancelled
+- Resolves: rhbz#1432722
+- Resolves: rhbz#1441603
+- Resolves: rhbz#1448772
+- Resolves: rhbz#1451170
+
+* Tue May 9 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-9
+- Allow cleanup of guest nodes when guest is unmanaged
+- Allow bundles to run on Pacemaker Remote nodes
+- Handle slow IPC clients better
+- Update crmd throttle information when CPUs are hot-plugged in
+- Order pacemaker systemd unit after resource-agents-deps target
+- Resolves: rhbz#1303742
+- Resolves: rhbz#1432722
+- Resolves: rhbz#1435067
+- Resolves: rhbz#1444728
+- Resolves: rhbz#1446669
+
+* Tue Apr 18 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-8
+- Fix shell script syntax error introduced with URL patch
+- Resolves: rhbz#1410886
+
+* Tue Apr 18 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-7
+- Avoid fencing old DC if it is shutting down while another node is joining
+- Improve crmd's handling of repeated fencing failures
+- Correct behavior when guest created by bundle has a node attribute
+- Show Red Hat bugzilla URL rather than upstream when generating cluster report
+- Resolves: rhbz#1430112
+- Resolves: rhbz#1432722
+
+* Wed Apr 5 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-6
+- Allow container without IP to use underlying hostname
+- Resolves: rhbz#1432722
+
+* Tue Apr 4 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-5
+- Keep man pages compressed
+- Bugfixes for container bundles
+- Resolves: rhbz#1410886
+- Resolves: rhbz#1432722
+
+* Mon Apr 3 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-4
+- Add support for container bundles
+- Treat systemd reloading state as monitor success
+- Resolves: rhbz#1432722
+- Resolves: rhbz#1436696
+
+* Mon Mar 20 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-3
+- Avoid hang when shutting down unmanaged remote node connections
+- Get correct node name when crm_node or crm_attribute is run on remote node
+- Ignore action when configured as a stonith device parameter
+- Include recent upstream bug fixes
+- Resolves: rhbz#1388489
+- Resolves: rhbz#1410886
+- Resolves: rhbz#1417936
+- Resolves: rhbz#1421700
+
+* Thu Jan 19 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-2
+- Avoid grep crashes in crm_report when looking for system logs
+- Properly ignore version with crm_diff --no-version
+- Process guest node fencing properly
+- Ensure filename is valid before using
+- Build for ppc64le
+- Resolves: rhbz#1288261
+- Resolves: rhbz#1289662
+- Resolves: rhbz#1383462
+- Resolves: rhbz#1405635
+- Resolves: rhbz#1412309 
+
+* Thu Jan 12 2017 Ken Gaillot <kgaillot@redhat.com> - 1.1.16-1
+- Rebase to upstream 94ff4df51a55cc30d01843ea11b3292bac755432 (1.1.16)
+- Resolves: rhbz#1374777
+- Resolves: rhbz#1378817
+- Resolves: rhbz#1410886
+
+* Wed Oct 26 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-12
+- Preserve rolling upgrades involving Pacemaker Remote nodes
+- Resolves: rhbz#1388827
+
+* Fri Oct 21 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-11.1
+- Fix CVE-2016-7035
+- Resolves: rhbz#1374776
+
+* Thu Sep 22 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-11
+- Sanitize readable CIB output collected by crm_report
+- Document crm_report --sos-mode option
+- Speed up crm_report on Pacemaker Remote nodes
+- Avoid sbd fencing when upgrading pacemaker_remote package
+- Resolves: rhbz#1219188
+- Resolves: rhbz#1235434
+- Resolves: rhbz#1323544
+- Resolves: rhbz#1372009
+
+* Mon Aug 15 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-10
+- Only clear remote node operation history on startup
+- Resend a lost shutdown request
+- Correctly detect and report invalid configurations
+- Don't include manual page for resource agent that isn't included
+- Resolves: rhbz#1288929
+- Resolves: rhbz#1310486
+- Resolves: rhbz#1352039
+
+* Fri Aug 5 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-9
+- Make crm_mon XML schema handle multiple-active resources
+- Resolves: rhbz#1364500
+
+* Wed Aug 3 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-8
+- Quote timestamp-format correctly in alert_snmp.sh.sample
+- Unregister CIB callbacks correctly
+- Print resources section heading consistently in crm_mon output
+- Resolves: rhbz#773656
+- Resolves: rhbz#1361533
+
+* Tue Jul 26 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-7
+- Avoid null dereference
+- Resolves: rhbz#1290592
+
+* Tue Jul 26 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-6
+- Fix transition failure with start-then-stop order constraint + unfencing
+- Resolves: rhbz#1290592
+
+* Fri Jul 1 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-5
+- Update spec file for toolchain hardening
+- Resolves: rhbz#1242258
+
+* Tue Jun 28 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-4
+- Take advantage of toolchain hardening
+- Resolves: rhbz#1242258
+
+* Wed Jun 22 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-3
+- Rebase to upstream e174ec84857e087210b9dacee3318f8203176129 (1.1.15)
+- Resolves: rhbz#1304771
+  Resolves: rhbz#1303765
+  Resolves: rhbz#1327469
+  Resolves: rhbz#1337688
+  Resolves: rhbz#1345876
+  Resolves: rhbz#1346726
+
+* Fri Jun 10 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-2
+- Rebase to upstream 25920dbdbc7594fc944a963036996f724c63a8b8 (1.1.15-rc4)
+- Resolves: rhbz#1304771
+  Resolves: rhbz#773656
+  Resolves: rhbz#1240330
+  Resolves: rhbz#1281450
+  Resolves: rhbz#1286316
+  Resolves: rhbz#1287315
+  Resolves: rhbz#1323544
+
+* Tue May 31 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.15-1
+- Rebase to upstream 2c148ac30dfcc2cfb91dc367ed469b6f227a8abc (1.1.15-rc3+)
+- Resolves: rhbz#1304771
+  Resolves: rhbz#1040685
+  Resolves: rhbz#1219188
+  Resolves: rhbz#1235434
+  Resolves: rhbz#1268313
+  Resolves: rhbz#1284069
+  Resolves: rhbz#1287868
+  Resolves: rhbz#1288929
+  Resolves: rhbz#1312094
+  Resolves: rhbz#1314157
+  Resolves: rhbz#1321711
+  Resolves: rhbz#1338623
+
+* Thu Feb 18 2016 Ken Gaillot <kgaillot@redhat.com> - 1.1.14-11
+- Rebase to upstream 2cccd43d6b7f2525d406251e14ef37626e29c51f (1.1.14+)
+- Resolves: rhbz#1304771
+  Resolves: rhbz#1207388
+  Resolves: rhbz#1240330
+  Resolves: rhbz#1281450
+  Resolves: rhbz#1284069
+  Resolves: rhbz#1286316
+  Resolves: rhbz#1287315
+  Resolves: rhbz#1287868
+  Resolves: rhbz#1288929
+  Resolves: rhbz#1303765
+- This also updates the packaging to follow upstream more closely,
+  most importantly moving some files from the pacemaker package to
+  pacemaker-cli (including XML schemas, SNMP MIB, attrd_updater command,
+  most ocf:pacemaker resource agents, and related man pages),
+  and deploying /etc/sysconfig/crm_mon.
+
+* Thu Oct 08 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-10
+- More improvements when updating and deleting meta attributes
+- Resolves: rhbz#1267265
+
+* Mon Oct 05 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-9
+- Fix regression when updating child meta attributes
+- Resolves: rhbz#1267265
+
+* Wed Sep 16 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-8
+- Fix regression when setting attributes for remote nodes 
+- Resolves: rhbz#1206647
+
+* Thu Sep 10 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-7
+- Additional upstream patches
+- Resolves: rhbz#1234680
+
+* Wed Jul 22 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-6
+- Correctly apply and build patches
+- Resolves: rhbz#1234680
+
+* Wed Jul 22 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-5
+- Sync with upstream 63f8e9a
+- Resolves: rhbz#1234680
+
+* Mon Jul 20 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-4
+- Sync with upstream 63f8e9a
+- Resolves: rhbz#1234680
+
+* Fri Jun 26 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-3
+- New upstream tarball 44eb2ddf8d4f8fc05256aae2abc9fbf3ae4d1fbc
+- Resolves: rhbz#1234680
+
+* Thu Jun 11 2015 David Vossel <dvossel@redhat.com> - 1.1.13-2
+- Adds nagios metadata.
+
+  Resolves: rhbz#1203053
+
+* Tue May 12 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.13-0.1
+- New upstream tarball 8ae45302394b039fb098e150f156df29fc0cb576
+
+* Wed Mar 18 2015 David Vossel <dvossel@redhat.com> - 1.1.12-25
+- Convince systemd to shutdown dbus after pacemaker.
+
+  Resolves: rhbz#1198886
+
+* Wed Mar 18 2015 David Vossel <dvossel@redhat.com> - 1.1.12-23
+- Ensure B with A, that B can not run if A can not run.
+
+  Resolves: rhbz#1194475
+
+* Thu Jan 15 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-22
+- Fix segfault encountered with orphaned remote node connections
+
+  Resolves: rhbz#1176210
+
+* Thu Jan 15 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-21
+- Fix use-after-free in CLI tool when restarting a resource 
+
+* Tue Jan 13 2015 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-20
+- Expose the -N/--node option for attrd_updater to allow attributes to
+  be set for other nodes
+
+* Sun Jan 11 2015 David Vossel <dvossel@redhat.com> - 1.1.12-19
+- Imply stop on actions within containers during host fencing
+- acl correctly implement the reference acl direct
+
+  Resolves: rhbz#1117341
+
+* Tue Jan 6 2015 David Vossel <dvossel@redhat.com> - 1.1.12-18
+- clone order constraint require-all option.
+- fix memory leaks in crmd and pacemakerd
+
+  Resolves: rhbz#1176210
+
+* Tue Dec 16 2014 David Vossel <dvossel@redhat.com> - 1.1.12-15
+- Include ipc and pacemaker remote related upstream fixes.
+
+* Wed Nov 26 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-13
+- Update patch level to upstream a433de6
+- Ensure we wait for long running systemd stop operations to complete
+  Resolves: rhbz#1165423
+
+* Tue Nov 18 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-11
+- Update patch level to upstream 7dd9022
+- Ensure all internal caches are updated when nodes are removed from the cluster
+  Resolves: rhbz#1162727
+ 
+* Wed Nov 05 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-10
+- Update patch level to upstream 98b6688
+- Support an intelligent resource restart operation
+- Exclusive discovery implies running the resource is only possible on the listed nodes
+
+* Wed Nov 05 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-9
+- Update patch level to upstream fb94901
+- Prevent blocking by performing systemd reloads asynchronously
+
+* Tue Oct 28 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-8
+- Repair the ability to start when sbd is not enabled
+
+* Mon Oct 27 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-7
+- Update patch level to upstream afa0f33
+  - Resolve coverity defects
+
+* Fri Oct 24 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-5
+- Update patch level to upstream 031e46c
+  - Prevent glib assert triggered by timers being removed from mainloop more than once 
+  - Allow rsc discovery to be disabled in certain situations
+  - Allow remote-nodes to be placed in maintenance mode
+  - Improved sbd integration
+
+* Thu Oct 16 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-4
+- Add install dependancy on sbd
+
+* Wed Oct 01 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-3
+- Update patch level to upstream be1e835
+    Resolves: rhbz#1147989
+
+* Fri Sep 19 2014 Fabio M. Di Nitto <fdinitto@redhat.com> - 1.1.12-2
+- Enable build on s390x
+    Resolves: rhbz#1140917
+
+* Mon Sep 08 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.12-1
+- Rebase to upstream a14efad51ca8f1e3742fd8520e051cd7a0864f04 (1.1.12+)
+    Resolves: rhbz#1059626
+
+* Fri Jul 04 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-32
+
+- Fix: lrmd: Handle systemd reporting 'done' before a resource is actually stopped
+    Resolves: rhbz#1111747
+
+* Thu Apr 17 2014 David Vossel <dvossel@redhat.com> - 1.1.10-31
+
+- fencing: Fence using all required devices
+- fencing: Execute all required fencing devices regardless of what topology level they are at
+- fencing: default to 'off' when agent does not advertise 'reboot' in metadata
+    Resolves: rhbz#1078078
+
+* Mon Apr 14 2014 Andrew Beekhof <abeekhof@redhat.com> 1.1.10-30
+
+- crmd: Do not erase the status section for unfenced nodes
+- crmd: Correctly react to successful unfencing operations
+- crmd: Report unsuccessful unfencing operations
+- crmd: Do not overwrite existing node state when fencing completes
+- fencing: Correctly record which peer performed the fencing operation
+- fencing: Automatically switch from 'list' to 'status' to 'static-list' if those actions are not advertised in the metadata
+- fencing: Filter self-fencing at the peers to allow unfencing to work correctly
+- pengine: Automatically re-unfence a node if the fencing device definition changes
+- pengine: Fencing devices default to only requiring quorum in order to start
+- pengine: Delay unfencing until after we know the state of all resources that require unfencing
+- pengine: Ensure unfencing occurs before fencing devices are (re-)probed
+- pengine: Ensure unfencing only happens once, even if the transition is interrupted
+- pengine: Do not unfence nodes that are offline, unclean or shutting down
+- pengine: Unfencing is based on device probes, there is no need to unfence when normal resources are found active
+- logging: daemons always get a log file, unless explicitly set to configured 'none'
+- lrmd: Expose logging variables expected by OCF agents
+- crm_report: Suppress logging errors after the target directory has been compressed
+- crm_resource: Wait for the correct number of replies when cleaning up resources
+    Resolves: rhbz#1078078
+
+* Tue Mar 25 2014 David Vossel <dvossel@redhat.com> - 1.1.10-29
+
+- Low: controld: Remove '-q 0' from default dlm_controld arguments
+    Resolves: rhbz#1064519
+
+* Tue Mar 25 2014 David Vossel <dvossel@redhat.com> - 1.1.10-28
+
+- pengine: fixes invalid transition caused by clones with more than 10 instances
+    Resolves: rhbz#1078504
+
+* Fri Feb 28 2014 Andrew Beekhof <beekhof@redhat.com> - 1.1.10-27
+
+- crm_resource: Prevent use-of-NULL
+- systemd: Prevent use-of-NULL when determining if an agent exists
+- Fencing: Remove shadow definition and use of variable 'progress'
+    Resolves: rhbz#1070916
+
+* Thu Feb 27 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-26
+
+- Run automated regression tests after every build
+- Fencing: Send details of stonith_api_time() and stonith_api_kick() to syslog
+- Fencing: Pass the correct options when looking up the history by node name
+- Fencing: stonith_api_time_helper now returns when the most recent fencing operation completed
+- crm_report: Additional dlm detail if dlm_controld is running
+- crmd: Gracefully handle actions that cannot be initiated
+- pengine: Gracefully handle bad values for XML_ATTR_TRANSITION_MAGIC
+    Resolves: rhbz#1070916
+
+* Tue Feb 25 2014 David Vossel <dvossel@redhat.com> - 1.1.10-25
+
+- pengine: cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node
+    Resolves: rhbz#1069284
+
+* Thu Feb 20 2014 David Vossel <dvossel@redhat.com> - 1.1.10-24
+
+- controld: handling startup fencing within the controld agent, not the dlm
+    Resolves: rhbz#1064519
+- controld: Do not consider the dlm up until the address list is present
+    Resolves: rhbz#1067536
+
+* Wed Feb 12 2014 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-23
+
+- mcp: Tell systemd not to respawn us if we return 100
+- services: Detect missing agents and permission errors before forking
+- Use native DBus library for systemd support to avoid problematic use of threads
+    Resolves: rhbz#720543 (aka. 1057697)
+
+* Fri Dec 27 2013 Daniel Mach <dmach@redhat.com> - 1.1.10-22
+- Mass rebuild 2013-12-27
+
+* Wed Dec 04 2013 David Vossel <dvossel@redhat.com> - 1.1.10-21
+
+- Fix: Removes unnecessary newlines in crm_resource -O output
+    Resolves: rhbz#720543
+
+* Thu Nov 14 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-20
+
+- Fix: tools: Fixes formatting of remote-nodes in crm_mon and crm_simulate
+- Fix: Corosync: Attempt to retrieve a peers node name if it is not already known
+    Resolves: rhbz#720543
+
+* Thu Nov 14 2013 David Vossel <dvossel@redhat.com> - 1.1.10-19
+- Fix: controld: Use the correct variant of dlm_controld for
+  corosync-2 clusters
+
+    Resolves: rhbz#1028627
+
+* Thu Nov 07 2013 David Vossel <dvossel@redhat.com> - 1.1.10-18
+
+- High: remote: Add support for ipv6 into pacemaker_remote daemon
+    Resolves: rhbz#720543
+
+* Wed Nov 06 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-17
+
+    Resolves: rhbz#720543
+
+- Fix: core: Do not enabled blackbox for cli tools
+- Fix: Command-line tools should stop after an assertion failure
+- Fix: crmd: Dont add node_state to cib, if we have not seen or fenced this node yet
+- Fix: crmd: Correctly update expected state when the previous DC shuts down
+- Fix: crmd: Cache rsc_info retrieved from lrmd and pacemaker_remoted
+- Fix: crmd: Pad internal lrmd rsc_info and metadata retrieval timeout
+- Fix: crm_attribute: Detect orphaned remote-nodes when setting attributes
+- Fix: crm_mon: Prevent use-of-NULL when ping resources do not define a host list
+- Fix: crm_report: Record the output of the collector
+- Fix: crm_report: Do not print garbage when collecting from the local node
+- Fix: crm_resource: Wait for all replies when cleaning up resources
+- Fix: fencing: Do not broadcast suicide if the on action is being executed
+- Fix: fencing: Allow fencing for node after topology entries are deleted
+- Fix: fencing: Deep copy current topology level list on remote op
+- Fix: lrmd: Correctly cancel monitor actions for lsb/systemd/service resources on cleaning up
+- Fix: pengine: Dont prevent clones from running due to dependant resources
+- Fix: pengine: Probe containers not expected to be up
+- Fix: ipc: Raise the default buffer size to 128k
+- Fix: ipc: Use the higher of the configured buffer size or the default
+- Fix: iso8601: Prevent dates from jumping backwards a day in some timezones
+- Fix: remote: Properly version the remote connection protocol
+- Fix: remote: Handle endian changes between client and server and improve forward compatibility
+    Resolves: rhbz#720543
+
+* Mon Oct 07 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-16
+
+- Remove unsupported resource agent
+- Log: crmd: Supply arguments in the correct order
+- Fix: crm_report: Correctly redirect error message to /dev/null
+- Fix: Bug rhbz#1011618 - Consistently use 'Slave' as the role for unpromoted master/slave resources
+- Fix: pengine: Location constraints with role=Started should prevent masters from running at all
+- Fix: crm_resource: Observe --master modifier for --move
+- Provide a meaningful error if --master is used for primitives and groups
+- Fix: Fencing: Observe pcmk_host_list during automatic unfencing
+    Resolves: rhbz#996576
+
+* Fri Sep 27 2013 David Vossel  <dvossel@redhat.com> - 1.1.10-15
+  + Fix: crmd: Allow transient attributes to be set on remote-nodes.
+  + Fix: pengine: Handle orphaned remote-nodes properly
+  + Low: cts: Add RemoteLXC regression test.
+
+  Resolves: rhbz#1006465
+  Resolves: rhbz#1006471
+
+* Fri Aug 23 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-14
+  + Fix: xml: Location constraints are allowed to specify a role
+  + Bug rhbz#902407 - crm_resource: Handle --ban for master/slave resources as advertised
+    Resolves: rhbz#902407
+
+* Wed Aug 14 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-13
+  + Fencing: Support agents that need the host to be unfenced at startup
+    Resolves: rhbz#996576
+  + crm_report: Collect corosync quorum data
+    Resolves: rhbz#839342
+
+* Thu Aug 08 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-12
+- Regenerate patches to have meaningful names
+
+* Thu Aug 08 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-11
+  + Fix: systemd: Prevent glib assertion - only call g_error_free() with non-NULL arguments
+  + Fix: systemd: Prevent additional assertions in g_error_free
+  + Fix: logging: glib CRIT messages should not produce core files by default
+  + Doc: controld: Update the description
+  + Fix: pengine: Correctly account for the location preferences of things colocated with a group
+  + Fix: cib: Correctly log short-form xml diffs
+  + Fix: crmd: Correcty update the history cache when recurring ops change their return code
+  + Log: pengine: Better indicate when a resource has failed
+  + Log: crm_mon: Unmunge the output for failed operations
+
+* Fri Aug 02 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-10
+  + Fix: pengine: Do not re-allocate clone instances that are blocked in the Stopped state
+  + Fix: pengine: Do not allow colocation with blocked clone instances
+
+* Thu Aug 01 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-9
+  + Fix: crmd: Prevent crash by passing log arguments in the correct order
+
+* Thu Aug 01 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-8
+  + Fix: pengine: Do not restart resources that depend on unmanaged resources
+
+* Thu Aug 01 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-7
+  + Fix: crmd: Prevent recurring monitors being cancelled due to notify operations
+
+* Fri Jul 26 2013 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.10-6
+- Update source tarball to revision: 368c726 (Pacemaker-1.1.10-rc7)
+- Changesets: 18
+- Diff:       9 files changed, 245 insertions(+), 170 deletions(-)
+
+- Features added since Pacemaker-1.1.10-rc7
+  + crm_resource: Allow options to be set recursively
+
+- Changes since Pacemaker-1.1.10-rc7
+  + Bug cl#5161 - crmd: Prevent memory leak in operation cache
+  + cib: Correctly read back archived configurations if the primary is corrupted
+
+* Mon Jul 22 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-5
+- Streamline spec file
+
+- Upstream patch for:
+  + cman: Only build migration tools for targets that may use them
+  + cib: Ensure we set up hacluster's groups in stand-alone mode
+
+- Update for new upstream tarball: Pacemaker-1.1.10-rc7
+
+  + Bug cl#5157 - Allow migration in the absence of some colocation constraints
+  + Bug cl#5168 - Prevent clones from being bounced around the cluster due to location constraints
+  + Bug cl#5170 - Correctly support on-fail=block for clones
+  + crmd: CID#1036761 Dereference null return value
+  + crmd: cl#5164 - Fixes crmd crash when using pacemaker-remote
+  + crmd: Ensure operations for cleaned up resources don't block recovery
+  + crmd: Prevent messages for remote crmd clients from being relayed to wrong daemons
+  + crmd: Properly handle recurring monitor operations for remote-node agent
+  + fencing: Correctly detect existing device entries when registering a new one
+  + logging: If SIGTRAP is sent before tracing is turned on, turn it on
+  + lrmd: Prevent use-of-NULL in client library
+  + pengine: cl#5128 - Support maintenance mode for a single node
+  + pengine: cl#5164 - Pengine segfault when calculating transition with remote-nodes.
+  + pengine: Do the right thing when admins specify the internal resource instead of the clone
+  + systemd: Turn off auto-respawning of systemd services when the cluster starts them
+
+* Wed Jul 10 2013 David Vossel <dvossel@redhat.com> - 1.1.10-4
+- Fixes crmd crash when using pacemaker_remote.
+
+* Mon Jun 17 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-3
+- Update to upstream 838e41e
+
+  + Feature: pengine: Allow active nodes in our current membership to be fenced without quorum
+  + Fix: attrd: Fixes deleted attributes during dc election
+  + Fix: corosync: Fall back to uname for local nodes
+  + Fix: crm_report: Find logs in compressed files
+  + Fix: pengine: If fencing is unavailable or disabled, block further recovery for resources that fail to stop
+  + Fix: systemd: Ensure we get shut down correctly by systemd
+
+* Sun Jun 09 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-2
+- Update for new upstream tarball: Pacemaker-1.1.10-rc4
+
+- Features in Pacemaker-1.1.10-rc4:
+  + PE: Display a list of nodes on which stopped anonymous clones are not active instead of meaningless clone IDs
+  + crm_error: Add the ability to list and print error symbols
+  + crm_resource: Implement --ban for moving resources away from nodes and --clear (replaces --unmove)
+  + crm_resource: Support OCF tracing when using --force-(check|start|stop)
+
+- Changes since Pacemaker-1.1.10-rc1
+
+  + Bug cl#5133 - pengine: Correctly observe on-fail=block for failed demote operation
+  + Bug cl#5152 - Correctly clean up fenced nodes during membership changes
+  + Bug cl#5153 - Correctly display clone failcounts in crm_mon
+  + Bug cl#5154 - Do not expire failures when on-fail=block is present
+  + Bug pengine: cl#5155 - Block the stop of resources if any depending resource is unmanaged
+  + crm_report: Correctly collect logs when 'uname -n' reports fully qualified names
+  + Check for and replace non-printing characters with their octal equivalent while exporting xml text
+  + Convert all exit codes to positive errno values
+  + Core: Ensure the blackbox is saved on abnormal program termination
+  + corosync: Detect the loss of members for which we only know the nodeid
+  + corosync: Nodes that can persist in sending CPG messages must be alive afterall
+  + crmd: Do not get stuck in S_POLICY_ENGINE if a node we couldn't fence returns
+  + crmd: Ensure all membership operations can complete while trying to cancel a transition
+  + crmd: Everyone who gets a fencing notification should mark the node as down
+  + crmd: Initiate node shutdown if another node claims to have successfully fenced us
+  + crm_resource: Gracefully fail when --force-* is attempted for stonith resources
+  + fencing: Restore the ability to manually confirm that fencing completed
+  + pengine: Correctly handle resources that recover before we operate on them
+  + pengine: Ensure per-node resource parameters are used during probes
+  + pengine: Implement the rest of get_timet_now() and rename to get_effective_time
+  + pengine: Mark unrunnable stop actions as "blocked"
+  + pengine: Re-initiate active recurring monitors that previously failed but have timed out
+  + xml: Restore the ability to embed comments in the cib
+
+* Wed Apr 17 2013 Andrew Beekhof <abeekhof@redhat.com> - 1.1.10-1
+- Update for new upstream tarball: Pacemaker-1.1.10-rc1
+- Features added since Pacemaker-1.1.8
+  + Performance enhancements for supporting 16 node clusters
+  + corosync: Use queues to avoid blocking when sending CPG messages
+  + ipc: Compress messages that exceed the configured IPC message limit
+  + ipc: Use queues to prevent slow clients from blocking the server
+  + ipc: Use shared memory by default
+  + lrmd: Support nagios remote monitoring
+  + lrmd: Pacemaker Remote Daemon for extending pacemaker functionality outside corosync cluster.
+  + pengine: Check for master/slave resources that are not OCF agents
+  + pengine: Support a 'requires' resource meta-attribute for controlling whether it needs quorum, fencing or nothing
+  + pengine: Support for resource containers
+  + pengine: Support resources that require unfencing before start
+
+- Changes since Pacemaker-1.1.8
+  + attrd: Correctly handle deletion of non-existant attributes
+  + Bug cl#5135 - Improved detection of the active cluster type
+  + Bug rhbz#913093 - Use crm_node instead of uname
+  + cib: Prevent ordering changes when applying xml diffs
+  + cib: Remove text nodes from cib replace operations
+  + crmd: Prevent election storms caused by getrusage() values being too close
+  + date/time: Bug cl#5118 - Correctly convert seconds-since-epoch to the current time
+  + fencing: Attempt to provide more information that just 'generic error' for failed actions
+  + fencing: Correctly record completed but previously unknown fencing operations
+  + fencing: Correctly terminate when all device options have been exhausted
+  + fencing: cov#739453 - String not null terminated
+  + fencing: Do not merge new fencing requests with stale ones from dead nodes
+  + fencing: Do not start fencing until entire device topology is found or query results timeout.
+  + fencing: Do not wait for the query timeout if all replies have arrived
+  + fencing: Fix passing of parameters from CMAN containing '='
+  + fencing: Fix non-comparison when sorting devices by priority
+  + fencing: On failure, only try a topology device once from the remote level.
+  + fencing: Only try peers for non-topology based operations once
+  + fencing: Retry stonith device for duration of action's timeout period.
+  + ipc: Bug cl#5110 - Prevent 100% CPU usage when looking for synchronous replies
+  + mcp: Re-attach to existing pacemaker components when mcp fails
+  + pengine: Any location constraint for the slave role applies to all roles
+  + pengine: Bug cl#5101 - Ensure stop order is preserved for partially active groups
+  + pengine: Bug cl#5140 - Allow set members to be stopped when the subseqent set has require-all=false
+  + pengine: Bug cl#5143 - Prevent shuffling of anonymous master/slave instances
+  + pengine: Bug rhbz#880249 - Ensure orphan masters are demoted before being stopped
+  + pengine: Bug rhbz#880249 - Teach the PE how to recover masters into primitives
+  + pengine: cl#5025 - Automatically clear failcount for start/monitor failures after resource parameters change
+  + pengine: cl#5099 - Probe operation uses the timeout value from the minimum interval monitor by default (#bnc776386)
+  + pengine: cl#5111 - When clone/master child rsc has on-fail=stop, insure all children stop on failure.
+  + pengine: cl#5142 - Do not delete orphaned children of an anonymous clone
+  + pengine: Correctly unpack active anonymous clones
+  + pengine: Ensure previous migrations are closed out before attempting another one
+  + pengine: rhbz#902459 - Remove rsc node status for orphan resources
+  + Replace the use of the insecure mktemp(3) with mkstemp(3)
+
+* Thu Apr 04 2013 David Vossel <dvossel@redhat.com> - 1.1.8-6
+  Fixes depreciated use of gnutls 3.1
+
+* Thu Apr 04 2013 David Vossel <dvossel@redhat.com> - 1.1.8-5
+  Rebuilt for gnutls 3.1
+
+* Thu Oct 25 2012 Andrew Beekhof <abeekhof@redhat.com> - 1.1.8-4
+- Update for new upstream tarball: 5db5f53
+
+  + High: mcp: Re-attach to existing pacemaker components when pacemakerd fails
+  + High: pengine: cl#5111 - When clone/master child rsc has on-fail=stop, insure all children stop on failure.
+  + High: Replace the use of the insecure mktemp(3) with mkstemp(3)
+  + High: Core: Correctly process XML diff's involving element removal
+  + High: PE: Correctly unpack active anonymous clones
+  + High: PE: Fix clone_zero() and clone_strip() for single character resource names
+  + High: IPC: Bug cl#5110 - Prevent 100% CPU usage when looking for synchronous replies
+  + High: PE: Bug cl#5101 - Ensure stop order is preserved for partially active groups
+  + High: fencing: On failure, only try a topology device once from the remote level.
+  + High: fencing: Retry stonith device for duration of action's timeout period.
+  + High: PE: Fix memory leak on processing message (bnc#780224)
+  + High: fencing: Support 'on_target' option in fencing device metadata for forcing unfence on target node
+  + High: PE: Support resources that require unfencing before start
+  + High: PE: Support a 'requires' resource meta-attribute for controlling whether it needs quorum, fencing or nothing
+  + High: mcp: Only define HA_DEBUGLOG to avoid agent calls to ocf_log printing everything twice
+  + High: fencing: Do not start fencing until entire device topology is found or query results timeout.
+  + High: Cluster: Allow cman and corosync 2.0 nodes to use a name other than uname()
+
+* Fri Sep 21 2012 Andrew Beekhof <andrew@beekhof.net> 1.1.8-3
+- Only build for i386 and x86_64 as directed
+
+* Fri Sep 21 2012 Andrew Beekhof <andrew@beekhof.net> 1.1.8-1
+- Rebuild for upstream 1.1.8 release
+- Documentation disabled pending a functional publican/ImageMagick combination
+
+- Statistics:
+  Changesets: 1019
+  Diff:       2107 files changed, 117258 insertions(+), 73606 deletions(-)
+
+- See included ChangeLog file or https://raw.github.com/ClusterLabs/pacemaker/master/ChangeLog for full details
+
+  + New IPC implementation from libqb
+  + New logging implementation from libqb
+  + Quieter - info, debug and trace logs are no longer sent to syslog
+  + Dropped dependancy on cluster-glue
+  + Config and core directories no longer located in heartbeat directories
+  + Support for managing systemd services
+  + Rewritten local resource management daemon
+  + Version bumps for every shared library due to API cleanups
+  + Removes crm shell, install/use pcs shell and GUI instead
+
+* Fri Jul 20 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 1.1.7-2.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
+
+* Wed Mar 28 2012 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.7-2
+- Reinstate the ghost directive for /var/run/crm
+
+* Wed Mar 28 2012 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.7-1
+- Update source tarball to upstream release: Pacemaker-1.1.7
+- See included ChangeLog file or https://raw.github.com/ClusterLabs/pacemaker/master/ChangeLog for details
+
+* Thu Feb 16 2012 Andrew Beekhof <andrew@beekhof.net> 1.1.7-0.3-7742926.git
+- New upstream tarball: 7742926
+- Additional Provides and Obsoletes directives to enable upgrading from heartbeat
+- Rebuild now that the Corosync CFG API has been removed
+
+* Thu Feb 02 2012 Andrew Beekhof <andrew@beekhof.net> 1.1.7-0.2-bc7c125.git
+- Additional Provides and Obsoletes directives to enable upgrading from rgmanager
+
+* Thu Feb 02 2012 Andrew Beekhof <andrew@beekhof.net> 1.1.7-0.1-bc7c125.git
+- New upstream tarball: bc7c125
+- Pre-release 1.1.7 build to deal with the removal of cman and support for corosync plugins
+- Add libqb as a dependancy
+
+* Fri Jan 13 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 1.1.6-3.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild
+
+* Mon Sep 26 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.6-3
+- New upstream tarball: 89678d4
+- Move man pages to the correct subpackages
+
+* Mon Sep 26 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.6-2
+- Do not build in support for heartbeat, snmp, esmtp by default
+- Create a package for cluster unaware libraries to minimze our
+  footprint on non-cluster nodes
+- Better package descriptions
+
+* Wed Sep 07 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.6-1
+- Upstream release of 1.1.6
+- See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details
+
+- Disabled eSMTP and SNMP support.  Painful to configure and rarely used.
+- Created cli sub-package for non-cluster usage
+
+* Thu Jul 21 2011 Petr Sabata <contyk@redhat.com> - 1.1.5-3.2
+- Perl mass rebuild
+
+* Wed Jul 20 2011 Petr Sabata <contyk@redhat.com> - 1.1.5-3.1
+- Perl mass rebuild
+
+* Mon Jul 11 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.5-3
+- Rebuild for new snmp .so
+
+* Fri Jun 17 2011 Marcela Mašláňová <mmaslano@redhat.com> - 1.1.5-2.2
+- Perl mass rebuild
+
+* Fri Jun 10 2011 Marcela Mašláňová <mmaslano@redhat.com> - 1.1.5-2.1
+- Perl 5.14 mass rebuild
+
+* Wed Apr 27 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.5-2
+- Mark /var/run directories with ghost directive
+  Resolves: rhbz#656654
+
+* Wed Apr 27 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.5-1
+- New upstream release plus patches for CMAN integration
+
+* Tue Feb 08 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 1.1.4-5.1
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
+* Tue Jan 11 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.4-5
+- Re-enable corosync and heartbeat support with correct bcond variable
+  usage
+
+* Wed Dec  8 2010 Fabio M. Di Nitto <fdinitto@redhat.com> 1.1.4-4
+- Temporary drop publican doc build
+
+* Wed Dec  8 2010 Fabio M. Di Nitto <fdinitto@redhat.com> 1.1.4-3
+- Fix publican build on x86
+
+* Wed Dec  8 2010 Fabio M. Di Nitto <fdinitto@redhat.com> 1.1.4-2
+- Drop double source entry and 22Mb from the srpm
+
+* Mon Nov 15 2010 Andrew Beekhof <andrew@beekhof.net> 1.1.4-1
+- Upstream release of 1.1.4
+- See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details
+
+* Wed Sep 29 2010 jkeating - 1.1.3-1.1
+- Rebuilt for gcc bug 634757
+
+* Tue Sep 21 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.3-1
+- Upstream release of 1.1.3
+  + High: crmd: Use the correct define/size for lrm resource IDs
+  + High: crmd: Bug lf#2458 - Ensure stop actions always have the relevant resource attributes
+  + High: crmd: Ensure we activate the DC timer if we detect an alternate DC
+  + High: mcp: Correctly initialize the string containing the list of active daemons
+  + High: mcp: Fix the expansion of the pid file in the init script
+  + High: mcp: Tell chkconfig we need to shut down early on
+  + High: PE: Bug lf#2476 - Repair on-fail=block for groups and primitive resources
+  + High: PE: Do not demote resources because something that requires it can't run
+  + High: PE: Rewrite the ordering constraint logic to be simplicity, clarity and maintainability
+  + High: PE: Wait until stonith is available, don't fall back to shutdown for nodes requesting termination
+  + High: PE: Prevent segfault by ensuring the arguments to do_calculations() are initialized
+  + High: stonith: Bug lf#2461 - Prevent segfault by not looking up operations if the hashtable hasn't been initialized yet
+  + High: Stonith: Bug lf#2473 - Ensure stonith operations complete within the timeout and are terminated if they run too long
+  + High: stonith: Bug lf#2473 - Gracefully handle remote operations that arrive late (after we've done notifications)
+  + High: stonith: Bug lf#2473 - Add the timeout at the top level where the daemon is looking for it
+  + High: stonith: Bug lf#2473 - Ensure timeouts are included for fencing operations
+  + High: Stonith: Use the timeout specified by the user
+  + High: Tools: Bug lf#2456 - Fix assertion failure in crm_resource
+
+* Mon Jul 26 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.3-0.1-b3cb4f4a30ae.hg
+- Pre-release version of 1.1.3
+  + High: ais: Bug lf2401 - Improved processing when the peer crmd processes join/leave
+  + High: ais: fix list of active processes sent to clients (bnc#603685)
+  + High: ais: Move the code for finding uid before the fork so that the child does no logging
+  + High: ais: Resolve coverity CONSTANT_EXPRESSION_RESULT defects
+  + High: cib: Also free query result for xpath operations that return more than one hit
+  + High: cib: Attempt to resolve memory corruption when forking a child to write the cib to disk
+  + High: cib: Correctly free memory when writing out the cib to disk
+  + High: cib: Fix the application of unversioned diffs
+  + High: cib: Remove old developmental error logging
+  + High: cib: Restructure the 'valid peer' check for deciding which instructions to ignore
+  + High: Core: Bug lf#2401 - Backed out changeset 6e6980376f01
+  + High: Core: Correctly unpack HA_Messages containing multiple entries with the same name
+  + High: Core: crm_count_member() should only track nodes that have the full stack up
+  + High: Core: New developmental logging system inspired by the kernel and a PoC from Lars Ellenberg
+  + High: crmd: All nodes should see status updates, not just he DC
+  + High: crmd: Allow non-DC nodes to clear failcounts too
+  + High: crmd: Base DC election on process relative uptime
+  + High: crmd: Bug lf#2439 - cancel_op() can also return HA_RSCBUSY
+  + High: crmd: Bug lf#2439 - Handle asynchronous notification of resource deletion events
+  + High: crmd: Fix assertion failure when performing async resource failures
+  + High: crmd: Fix handling of async resource deletion results
+  + High: crmd: Include the action for crm graph operations
+  + High: crmd: Make sure the membership cache is accurate after a sucessful fencing operation
+  + High: crmd: Make sure we always poke the FSA after a transition to clear any TE_HALT actions
+  + High: crmd: Offer crm-level membership once the peer starts the crmd process
+  + High: crmd: Only need to request quorum update for plugin based clusters
+  + High: crmd: Prevent everyone from loosing DC elections by correctly initializing all relevant variables
+  + High: crmd: Prevent segmentation fault
+  + High: crmd: several fixes for async resource delete
+  + High: mcp: Add missing headers when built without heartbeat support
+  + High: mcp: New master control process for (re)spawning pacemaker daemons
+  + High: PE: Avoid creating invalid ordering constraints for probes that are not needed
+  + High: PE: Bug lf#1959 - Fail unmanaged resources should not prevent other services from shutting down
+  + High: PE: Bug lf#2422 - Ordering dependencies on partially active groups not observed properly
+  + High: PE: Bug lf#2424 - Use notify oepration definition if it exists in the configuration
+  + High: PE: Bug lf#2433 - No services should be stopped until probes finish
+  + High: PE: Bug lf#2453 - Enforce clone ordering in the absense of colocation constraints
+  + High: PE: Correctly detect when there is a real failcount that expired and needs to be cleared
+  + High: PE: Correctly handle pseudo action creation
+  + High: PE: Correctly order clone startup after group/clone start
+  + High: PE: Fix colocation for interleaved clones
+  + High: PE: Fix colocation with partially active groups
+  + High: PE: Fix potential use-after-free defect from coverity
+  + High: PE: Fix previous merge
+  + High: PE: Fix use-after-free in order_actions() reported by valgrind
+  + High: PE: Prevent endless loop when looking for operation definitions in the configuration
+  + High: Resolve coverity RESOURCE_LEAK defects
+  + High: Shell: Complete the transition to using crm_attribute instead of crm_failcount and crm_standby
+  + High: stonith: Advertise stonith-ng options in the metadata
+  + High: stonith: Correctly parse pcmk_host_list parameters that appear on a single line
+  + High: stonith: Map poweron/poweroff back to on/off expected by the stonith tool from cluster-glue
+  + High: stonith: pass the configuration to the stonith program via environment variables (bnc#620781)
+  + High: Support starting plugin-based Pacemaker clusters with the MCP as well
+  + High: tools: crm_report - corosync.conf wont necessarily contain the text 'pacemaker' anymore
+  + High: tools: crm_simulate - Resolve coverity USE_AFTER_FREE defect
+  + High: Tools: Drop the 'pingd' daemon and resource agent in favor of ocf:pacemaker:ping
+  + High: Tools: Fix recently introduced use-of-NULL
+  + High: Tools: Fix use-after-free defect from coverity
+
+* Wed Jul 21 2010 David Malcolm <dmalcolm@redhat.com> - 1.1.2-5.1
+- Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild
+
+* Fri Jul  9 2010 Dan Horák <dan[at]danny.cz> - 1.1.2-5
+- re-enable AIS cluster on s390(x)
+
+* Fri Jul  9 2010 Dan Horák <dan[at]danny.cz> - 1.1.2-4
+- AIS cluster not available on s390(x)
+
+* Mon Jun 21 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.2-3
+- publican is only available as a dependancy on i386/x86_64 machines
+
+* Fri Jun 11 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.2-2
+- Resolves rhbz#602239 - Added patch to documentation so that it passes validation
+- High: Core: Bug lf#2401 - Backed out changeset 6e6980376f01
+
+* Tue Jun 01 2010 Marcela Maslanova <mmaslano@redhat.com> - 1.1.2-1.1
+- Mass rebuild with perl-5.12.0
+
+* Wed May 12 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.2-1
+- Update the tarball from the upstream 1.1.2 release
+  + High: ais: Bug lf#2340 - Force rogue child processes to terminate after waiting 2.5 minutes
+  + High: ais: Bug lf#2359 - Default expected votes to 2 inside Corosync/OpenAIS plugin
+  + High: ais: Bug lf#2359 - expected-quorum-votes not correctly updated after membership change
+  + High: ais: Bug rhbz#525552 - Move non-threadsafe calls to setenv() to after the fork()
+  + High: ais: Do not count votes from offline nodes and calculate current votes before sending quorum data
+  + High: ais: Ensure the list of active processes sent to clients is always up-to-date
+  + High: ais: Fix previous commit, actually return a result in get_process_list()
+  + High: ais: Fix two more uses of getpwnam() in non-thread-safe locations
+  + High: ais: Look for the correct conf variable for turning on file logging
+  + High: ais: Need to find a better and thread-safe way to set core_uses_pid. Disable for now.
+  + High: ais: Use the threadsafe version of getpwnam
+  + High: Core: Bug lf#2414 - Prevent use-after-free reported by valgrind when doing xpath based deletions
+  + High: Core: Bump the feature set due to the new failcount expiry feature
+  + High: Core: Fix memory leak in replace_xml_child() reported by valgrind
+  + High: Core: fix memory leaks exposed by valgrind
+  + High: crmd: Bug 2401 - Improved detection of partially active peers
+  + High: crmd: Bug bnc#578644 - Improve handling of cancelled operations caused by resource cleanup
+  + High: crmd: Bug lf#2379 - Ensure the cluster terminates when the PE is not available
+  + High: crmd: Bug lf#2414 - Prevent use-after-free of the PE connection after it dies
+  + High: crmd: Bug lf#2414 - Prevent use-after-free of the stonith-ng connection
+  + High: crmd: Do not allow the target_rc to be misused by resource agents
+  + High: crmd: Do not ignore action timeouts based on FSA state
+  + High: crmd: Ensure we dont get stuck in S_PENDING if we loose an election to someone that never talks to us again
+  + High: crmd: Fix memory leaks exposed by valgrind
+  + High: crmd: Remove race condition that could lead to multiple instances of a clone being active on a machine
+  + High: crmd: Send erase_status_tag() calls to the local CIB when the DC is fenced, since there is no DC to accept them
+  + High: crmd: Use global fencing notifications to prevent secondary fencing operations of the DC
+  + High: fencing: Account for stonith_get_info() always returning a pointer to the same static buffer
+  + High: PE: Allow startup probes to be disabled - their calculation is a major bottleneck for very large clusters
+  + High: PE: Bug lf#2317 - Avoid needless restart of primitive depending on a clone
+  + High: PE: Bug lf#2358 - Fix master-master anti-colocation
+  + High: PE: Bug lf#2361 - Ensure clones observe mandatory ordering constraints if the LHS is unrunnable
+  + High: PE: Bug lf#2383 - Combine failcounts for all instances of an anonymous clone on a host
+  + High: PE: Bug lf#2384 - Fix intra-set colocation and ordering
+  + High: PE: Bug lf#2403 - Enforce mandatory promotion (colocation) constraints
+  + High: PE: Bug lf#2412 - Correctly locate clone instances by their prefix
+  + High: PE: Correctly implement optional colocation between primitives and clone resources
+  + High: PE: Do not be so quick to pull the trigger on nodes that are coming up
+  + High: PE: Fix memory leaks exposed by valgrind
+  + High: PE: Fix memory leaks reported by valgrind
+  + High: PE: Repair handling of unordered groups in RHS ordering constraints
+  + High: PE: Rewrite native_merge_weights() to avoid Fix use-after-free
+  + High: PE: Suppress duplicate ordering constraints to achieve orders of magnitude speed increases for large clusters
+  + High: Shell: add support for xml in cli
+  + High: Shell: always reload status if working with the cluster (bnc#590035)
+  + High: Shell: check timeouts also against the default-action-timeout property
+  + High: Shell: Default to using the status section from the live CIB (bnc#592762)
+  + High: Shell: edit multiple meta_attributes sets in resource management (lf#2315)
+  + High: Shell: enable comments (lf#2221)
+  + High: Shell: implement new cibstatus interface and commands (bnc#580492)
+  + High: Shell: improve configure commit (lf#2336)
+  + High: Shell: new cibstatus import command (bnc#585471)
+  + High: Shell: new configure filter command
+  + High: Shell: restore error reporting in options
+  + High: Shell: split shell into modules
+  + High: Shell: support for the utilization element (old patch for the new structure)
+  + High: Shell: update previous node lookup procedure to include the id where necessary
+  + High: Tools: crm_mon - fix memory leaks exposed by valgrind
+
+* Thu Feb 11 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.1-0.1-60b7753f7310.hg
+- Update the tarball from upstream to version 60b7753f7310
+  + First public release of the 1.1 series
+
+* Wed Dec 9 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-5
+- Include patch of changeset 66b7bfd467f3:
+  Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf
+
+* Thu Oct 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-4
+- Include the fixes from CoroSync integration testing
+- Move the resource templates - they are not documentation
+- Ensure documentation is placed in a standard location
+- Exclude documentation that is included elsewhere in the package
+
+- Update the tarball from upstream to version ee19d8e83c2a
+  + High: cib: Correctly clean up when both plaintext and tls remote ports are requested
+  + High: PE: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions
+  + High: PE: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints
+  + High: PE: Make sure promote/demote pseudo actions are created correctly
+  + High: PE: Prevent target-role from promoting more than master-max instances
+  + High: ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage
+  + High: ais: Prevent deadlock - dont try to release IPC message if the connection failed
+  + High: cib: For validation errors, send back the full CIB so the client can display the errors
+  + High: cib: Prevent use-after-free for remote plaintext connections
+  + High: crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat
+  + High: Core: Bug lf#2169 - Allow dtd/schema validation to be disabled
+  + High: PE: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change
+  + High: PE: Bug lf#2170 - stop-all-resources option had no effect
+  + High: PE: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which cannot
+  + High: PE: Disable resource management if stonith-enabled=true and no stonith resources are defined
+  + High: PE: Do not include master score if it would prevent allocation
+  + High: ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms)
+  + High: ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync
+  + High: ais: Gracefully handle changes to the AIS nodeid
+  + High: crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE
+  + High: crmd: Prevent use-after-free with LOG_DEBUG_3
+  + Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672)
+  + Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm
+  + Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild
+  + Medium: PE: Bug lf#2178 - Indicate unmanaged clones
+  + Medium: PE: Bug lf#2180 - Include node information for all failed ops
+  + Medium: PE: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint
+  + Medium: PE: Correctly log resources that would like to start but cannot
+  + Medium: PE: Stop ptest from logging to syslog
+  + Medium: ais: Include version details in plugin name
+  + Medium: crmd: Requery the resource metadata after every start operation
+
+* Fri Oct  9 2009 Fabio M. Di Nitto <fdinitto@redhat.com> - 1.0.5-3
+- rebuilt with new net-snmp
+
+* Fri Aug 21 2009 Tomas Mraz <tmraz@redhat.com> - 1.0.5-2.1
+- rebuilt with new openssl
+
+* Wed Aug 19 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-2
+- Add versioned perl dependancy as specified by
+    https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl
+- No longer remove RPATH data, it prevents us finding libperl.so and no other
+  libraries were being hardcoded
+- Compile in support for heartbeat
+- Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements
+  depending on which stacks are supported
+
+* Mon Aug 17 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-1
+- Add dependancy on resource-agents
+- Use the version of the configure macro that supplies --prefix, --libdir, etc
+- Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final)
+  + High: Tools: crm_resource - Advertise --move instead of --migrate
+  + Medium: Extra: New node connectivity RA that uses system ping and attrd_updater
+  + Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches
+
+* Tue Aug 11 2009 Ville Skyttä <ville.skytta@iki.fi> - 1.0.5-0.7.c9120a53a6ae.hg
+- Use bzipped upstream tarball.
+
+* Wed Jul  29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.6.c9120a53a6ae.hg
+- Add back missing build auto* dependancies
+- Minor cleanups to the install directive
+
+* Tue Jul  28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.5.c9120a53a6ae.hg
+- Add a leading zero to the revision when alphatag is used
+
+* Tue Jul  28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.4.c9120a53a6ae.hg
+- Incorporate the feedback from the cluster-glue review
+- Realistically, the version is a 1.0.5 pre-release
+- Use the global directive instead of define for variables
+- Use the haclient/hacluster group/user instead of daemon
+- Use the _configure macro
+- Fix install dependancies
+
+* Fri Jul  24 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-3
+- Include an AUTHORS and license file in each package
+- Change the library package name to pacemaker-libs to be more
+  Fedora compliant
+- Remove execute permissions from xml related files
+- Reference the new cluster-glue devel package name
+- Update the tarball from upstream to version c9120a53a6ae
+  + High: PE: Only prevent migration if the clone dependancy is stopping/starting on the target node
+  + High: PE: Bug 2160 - Dont shuffle clones due to colocation
+  + High: PE: New implementation of the resource migration (not stop/start) logic
+  + Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options
+  + Medium: PE: Prevent use-of-NULL in find_first_action()
+  + Low: Build: Include licensing files
+
+* Tue Jul 14 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-2
+- Reference authors from the project AUTHORS file instead of listing in description
+- Change Source0 to reference the project's Mercurial repo
+- Cleaned up the summaries and descriptions
+- Incorporate the results of Fedora package self-review
+
+* Tue Jul 14 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-1
+- Initial checkin