Blob Blame History Raw
From 26c59fb7d128f83d0b4d35ae9b9d088359103d31 Mon Sep 17 00:00:00 2001
From: Andrew Beekhof <andrew@beekhof.net>
Date: Wed, 12 Apr 2017 20:07:56 +1000
Subject: [PATCH 1/2] PE: Remote: Allow remote nodes that start containers with
 pacemaker remote inside

---
 include/crm/pengine/internal.h     |   2 +-
 include/crm/pengine/status.h       |   1 +
 lib/pengine/container.c            |   3 +-
 lib/pengine/unpack.c               | 275 ++++++++++++++++++-------------------
 lib/pengine/utils.c                |  39 ++++++
 pengine/container.c                |   8 ++
 pengine/test10/bug-cl-5247.summary |   4 +-
 7 files changed, 190 insertions(+), 142 deletions(-)

diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 0da02cc..1b6afd1 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -291,6 +291,6 @@ node_t *pe_create_node(const char *id, const char *uname, const char *type,
 bool remote_id_conflict(const char *remote_name, pe_working_set_t *data);
 void common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data);
 resource_t *find_container_child(const char *stem, resource_t * rsc, node_t *node);
-
+bool fix_remote_addr(resource_t * rsc);
 
 #endif
diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index 00b20ab..4cc3919 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -142,6 +142,7 @@ struct node_shared_s {
     gboolean shutdown;
     gboolean expected_up;
     gboolean is_dc;
+    gboolean unpacked;
 
     int num_resources;
     GListPtr running_rsc;       /* resource_t* */
diff --git a/lib/pengine/container.c b/lib/pengine/container.c
index 127b144..d06997a 100644
--- a/lib/pengine/container.c
+++ b/lib/pengine/container.c
@@ -368,7 +368,8 @@ create_remote_resource(
         if(tuple->ipaddr) {
             create_nvp(xml_obj, "addr", tuple->ipaddr);
         } else {
-            create_nvp(xml_obj, "addr", "localhost");
+            // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
+            create_nvp(xml_obj, "addr", "#uname");
         }
 
         if(data->control_port) {
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 6aa51dd..29a1013 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1017,6 +1017,133 @@ get_ticket_state_legacy(gpointer key, gpointer value, gpointer user_data)
     }
 }
 
+static void
+unpack_handle_remote_attrs(node_t *this_node, xmlNode *state, pe_working_set_t * data_set) 
+{
+    const char *resource_discovery_enabled = NULL;
+    xmlNode *attrs = NULL;
+    resource_t *rsc = NULL;
+    const char *shutdown = NULL;
+
+    if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
+        return;
+    }
+
+    if ((this_node == NULL) || (is_remote_node(this_node) == FALSE)) {
+        return;
+    }
+    crm_trace("Processing remote node id=%s, uname=%s", this_node->details->id, this_node->details->uname);
+
+    this_node->details->remote_maintenance =
+        crm_atoi(crm_element_value(state, XML_NODE_IS_MAINTENANCE), "0");
+
+    rsc = this_node->details->remote_rsc;
+    if (this_node->details->remote_requires_reset == FALSE) {
+        this_node->details->unclean = FALSE;
+        this_node->details->unseen = FALSE;
+    }
+    attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
+    add_node_attrs(attrs, this_node, TRUE, data_set);
+
+    shutdown = g_hash_table_lookup(this_node->details->attrs, XML_CIB_ATTR_SHUTDOWN);
+    if (shutdown != NULL && safe_str_neq("0", shutdown)) {
+        crm_info("Node %s is shutting down", this_node->details->uname);
+        this_node->details->shutdown = TRUE;
+        if (rsc) {
+            rsc->next_role = RSC_ROLE_STOPPED;
+        }
+    }
+ 
+    if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "standby"))) {
+        crm_info("Node %s is in standby-mode", this_node->details->uname);
+        this_node->details->standby = TRUE;
+    }
+
+    if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "maintenance")) ||
+        (rsc && !is_set(rsc->flags, pe_rsc_managed))) {
+        crm_info("Node %s is in maintenance-mode", this_node->details->uname);
+        this_node->details->maintenance = TRUE;
+    }
+
+    resource_discovery_enabled = g_hash_table_lookup(this_node->details->attrs, XML_NODE_ATTR_RSC_DISCOVERY);
+    if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
+        if (is_baremetal_remote_node(this_node) && is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
+            crm_warn("ignoring %s attribute on baremetal remote node %s, disabling resource discovery requires stonith to be enabled.",
+                     XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname);
+        } else {
+            /* if we're here, this is either a baremetal node and fencing is enabled,
+             * or this is a container node which we don't care if fencing is enabled 
+             * or not on. container nodes are 'fenced' by recovering the container resource
+             * regardless of whether fencing is enabled. */
+            crm_info("Node %s has resource discovery disabled", this_node->details->uname);
+            this_node->details->rsc_discovery_enabled = FALSE;
+        }
+    }
+}
+
+static bool
+unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) 
+{
+    bool changed = false;
+    xmlNode *lrm_rsc = NULL;
+
+    for (xmlNode *state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) {
+        const char *id = NULL;
+        const char *uname = NULL;
+        node_t *this_node = NULL;
+        bool process = FALSE;
+
+        if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
+            continue;
+        }
+
+        id = crm_element_value(state, XML_ATTR_ID);
+        uname = crm_element_value(state, XML_ATTR_UNAME);
+        this_node = pe_find_node_any(data_set->nodes, id, uname);
+
+        if (this_node == NULL) {
+            crm_info("Node %s is unknown", id);
+            continue;
+
+        } else if (this_node->details->unpacked) {
+            crm_info("Node %s is already processed", id);
+            continue;
+
+        } else if (is_remote_node(this_node) == FALSE && is_set(data_set->flags, pe_flag_stonith_enabled)) {
+            // A redundant test, but preserves the order for regression tests
+            process = TRUE;
+
+        } else if (is_remote_node(this_node)) {
+            resource_t *rsc = this_node->details->remote_rsc;
+
+            if (fence || (rsc && rsc->role == RSC_ROLE_STARTED)) {
+                determine_remote_online_status(data_set, this_node);
+                unpack_handle_remote_attrs(this_node, state, data_set);
+                process = TRUE;
+            }
+
+        } else if (this_node->details->online) {
+            process = TRUE;
+
+        } else if (fence) {
+            process = TRUE;
+        }
+
+        if(process) {
+            crm_trace("Processing lrm resource entries on %shealthy%s node: %s",
+                      fence?"un":"", is_remote_node(this_node)?" remote":"",
+                      this_node->details->uname);
+            changed = TRUE;
+            this_node->details->unpacked = TRUE;
+
+            lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE);
+            lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
+            unpack_lrm_resources(this_node, lrm_rsc, data_set);
+        }
+    }
+    return changed;
+}
+
 /* remove nodes that are down, stopping */
 /* create +ve rsc_to_node constraints between resources and the nodes they are running on */
 /* anything else? */
@@ -1027,7 +1154,6 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set)
     const char *uname = NULL;
 
     xmlNode *state = NULL;
-    xmlNode *lrm_rsc = NULL;
     node_t *this_node = NULL;
 
     crm_trace("Beginning unpack");
@@ -1125,152 +1251,25 @@ unpack_status(xmlNode * status, pe_working_set_t * data_set)
         }
     }
 
-    /* Now that we know all node states, we can safely handle migration ops */
-    for (state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) {
-        if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
-            continue;
-        }
-
-        id = crm_element_value(state, XML_ATTR_ID);
-        uname = crm_element_value(state, XML_ATTR_UNAME);
-        this_node = pe_find_node_any(data_set->nodes, id, uname);
-
-        if (this_node == NULL) {
-            crm_info("Node %s is unknown", id);
-            continue;
-
-        } else if (is_remote_node(this_node)) {
-
-            /* online status of remote node can not be determined until all other
-             * resource status is unpacked. */
-            continue;
-        } else if (this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
-            crm_trace("Processing lrm resource entries on healthy node: %s",
-                      this_node->details->uname);
-            lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE);
-            lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
-            unpack_lrm_resources(this_node, lrm_rsc, data_set);
-        }
-    }
-
-    /* now that the rest of the cluster's status is determined
-     * calculate remote-nodes */
-    unpack_remote_status(status, data_set);
 
-    return TRUE;
-}
-
-gboolean
-unpack_remote_status(xmlNode * status, pe_working_set_t * data_set)
-{
-    const char *id = NULL;
-    const char *uname = NULL;
-    const char *shutdown = NULL;
-    resource_t *rsc = NULL;
-
-    GListPtr gIter = NULL;
-
-    xmlNode *state = NULL;
-    xmlNode *lrm_rsc = NULL;
-    node_t *this_node = NULL;
-
-    if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) {
-        crm_trace("no remote nodes to unpack");
-        return TRUE;
+    while(unpack_node_loop(status, FALSE, data_set)) {
+        crm_trace("Start another loop");
     }
 
-    /* get online status */
-    for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
-        this_node = gIter->data;
+    // Now catch any nodes we didnt see
+    unpack_node_loop(status, is_set(data_set->flags, pe_flag_stonith_enabled), data_set);
 
-        if ((this_node == NULL) || (is_remote_node(this_node) == FALSE)) {
-            continue;
-        }
-        determine_remote_online_status(data_set, this_node);
-    }
+    for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+        node_t *this_node = gIter->data;
 
-    /* process attributes */
-    for (state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) {
-        const char *resource_discovery_enabled = NULL;
-        xmlNode *attrs = NULL;
-        if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
-            continue;
-        }
-
-        id = crm_element_value(state, XML_ATTR_ID);
-        uname = crm_element_value(state, XML_ATTR_UNAME);
-        this_node = pe_find_node_any(data_set->nodes, id, uname);
-
-        if ((this_node == NULL) || (is_remote_node(this_node) == FALSE)) {
+        if (this_node == NULL) {
             continue;
-        }
-        crm_trace("Processing remote node id=%s, uname=%s", id, uname);
-
-        this_node->details->remote_maintenance =
-            crm_atoi(crm_element_value(state, XML_NODE_IS_MAINTENANCE), "0");
-
-        rsc = this_node->details->remote_rsc;
-        if (this_node->details->remote_requires_reset == FALSE) {
-            this_node->details->unclean = FALSE;
-            this_node->details->unseen = FALSE;
-        }
-        attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
-        add_node_attrs(attrs, this_node, TRUE, data_set);
-
-        shutdown = g_hash_table_lookup(this_node->details->attrs, XML_CIB_ATTR_SHUTDOWN);
-        if (shutdown != NULL && safe_str_neq("0", shutdown)) {
-            crm_info("Node %s is shutting down", this_node->details->uname);
-            this_node->details->shutdown = TRUE;
-            if (rsc) {
-                rsc->next_role = RSC_ROLE_STOPPED;
-            }
-        }
- 
-        if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "standby"))) {
-            crm_info("Node %s is in standby-mode", this_node->details->uname);
-            this_node->details->standby = TRUE;
-        }
-
-        if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "maintenance")) ||
-            (rsc && !is_set(rsc->flags, pe_rsc_managed))) {
-            crm_info("Node %s is in maintenance-mode", this_node->details->uname);
-            this_node->details->maintenance = TRUE;
-        }
-
-        resource_discovery_enabled = g_hash_table_lookup(this_node->details->attrs, XML_NODE_ATTR_RSC_DISCOVERY);
-        if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
-            if (is_baremetal_remote_node(this_node) && is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
-                crm_warn("ignoring %s attribute on baremetal remote node %s, disabling resource discovery requires stonith to be enabled.",
-                    XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname);
-            } else {
-                /* if we're here, this is either a baremetal node and fencing is enabled,
-                 * or this is a container node which we don't care if fencing is enabled 
-                 * or not on. container nodes are 'fenced' by recovering the container resource
-                 * regardless of whether fencing is enabled. */
-                crm_info("Node %s has resource discovery disabled", this_node->details->uname);
-                this_node->details->rsc_discovery_enabled = FALSE;
-            }
-        }
-    }
-
-    /* process node rsc status */
-    for (state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) {
-        if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) {
+        } else if(is_remote_node(this_node) == FALSE) {
             continue;
-        }
-
-        id = crm_element_value(state, XML_ATTR_ID);
-        uname = crm_element_value(state, XML_ATTR_UNAME);
-        this_node = pe_find_node_any(data_set->nodes, id, uname);
-
-        if ((this_node == NULL) || (is_remote_node(this_node) == FALSE)) {
+        } else if(this_node->details->unpacked) {
             continue;
         }
-        crm_trace("Processing lrm resource entries on healthy remote node: %s",
-                  this_node->details->uname);
-        lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE);
-        lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
-        unpack_lrm_resources(this_node, lrm_rsc, data_set);
+        determine_remote_online_status(data_set, this_node);
     }
 
     return TRUE;
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index bff5a4c..177fb84 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1675,6 +1675,38 @@ filter_parameters(xmlNode * param_set, const char *param_string, bool need_prese
     }
 }
 
+bool fix_remote_addr(resource_t * rsc)
+{
+    const char *name;
+    const char *value;
+    const char *attr_list[] = {
+        XML_ATTR_TYPE,
+        XML_AGENT_ATTR_CLASS,
+        XML_AGENT_ATTR_PROVIDER
+    };
+    const char *value_list[] = {
+        "remote",
+        "ocf",
+        "pacemaker"
+    };
+
+    name = "addr";
+    value = g_hash_table_lookup(rsc->parameters, name);
+    if (safe_str_eq(value, "#uname") == FALSE) {
+        return FALSE;
+    }
+
+    for (int lpc = 0; rsc && lpc < DIMOF(attr_list); lpc++) {
+        name = attr_list[lpc];
+        value = crm_element_value(rsc->xml, attr_list[lpc]);
+        if (safe_str_eq(value, value_list[lpc]) == FALSE) {
+            return FALSE;
+        }
+    }
+
+    return TRUE;
+}
+
 op_digest_cache_t *
 rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node,
                       pe_working_set_t * data_set)
@@ -1724,6 +1756,13 @@ rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node,
                                              g_hash_destroy_str, g_hash_destroy_str);
     get_rsc_attributes(local_rsc_params, rsc, node, data_set);
     data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
+
+    if(fix_remote_addr(rsc) && node) {
+        // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
+        crm_xml_add(data->params_all, "addr", node->details->uname);
+        crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname);
+    }
+
     g_hash_table_foreach(local_rsc_params, hash2field, data->params_all);
     g_hash_table_foreach(action->extra, hash2field, data->params_all);
     g_hash_table_foreach(rsc->parameters, hash2field, data->params_all);
diff --git a/pengine/container.c b/pengine/container.c
index 3da19aa..8c70f54 100644
--- a/pengine/container.c
+++ b/pengine/container.c
@@ -263,7 +263,15 @@ container_expand(resource_t * rsc, pe_working_set_t * data_set)
     for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
 
+
         CRM_ASSERT(tuple);
+        if(fix_remote_addr(tuple->remote) && tuple->docker->allocated_to) {
+            // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
+            xmlNode *nvpair = get_xpath_object("//nvpair[@name='addr']", tuple->remote->xml, LOG_ERR);
+
+            g_hash_table_replace(tuple->remote->parameters, strdup("addr"), strdup(tuple->docker->allocated_to->details->uname));
+            crm_xml_add(nvpair, "value", tuple->docker->allocated_to->details->uname);
+        }
         if(tuple->ip) {
             tuple->ip->cmds->expand(tuple->ip, data_set);
         }
diff --git a/pengine/test10/bug-cl-5247.summary b/pengine/test10/bug-cl-5247.summary
index 09dc301..91ed8db 100644
--- a/pengine/test10/bug-cl-5247.summary
+++ b/pengine/test10/bug-cl-5247.summary
@@ -90,8 +90,8 @@ Containers: [ pgsr01:prmDB1 ]
  Resource Group: grpStonith2
      prmStonith2-2	(stonith:external/ipmi):	Started bl460g8n3
  Resource Group: master-group
-     vip-master	(ocf::heartbeat:Dummy):	FAILED[ pgsr02 pgsr01 ]
-     vip-rep	(ocf::heartbeat:Dummy):	FAILED[ pgsr02 pgsr01 ]
+     vip-master	(ocf::heartbeat:Dummy):	FAILED[ pgsr01 pgsr02 ]
+     vip-rep	(ocf::heartbeat:Dummy):	FAILED[ pgsr01 pgsr02 ]
  Master/Slave Set: msPostgresql [pgsql]
      Masters: [ pgsr01 ]
      Stopped: [ bl460g8n3 bl460g8n4 ]
-- 
1.8.3.1


From 8abdd82ba85ee384ab78ce1db617f51b692e9df6 Mon Sep 17 00:00:00 2001
From: Andrew Beekhof <andrew@beekhof.net>
Date: Wed, 19 Apr 2017 12:55:08 +1000
Subject: [PATCH 2/2] lrmd: Have pacemaker-remote reap zombies if it is running
 as pid 1

---
 configure.ac |  10 +++++
 lrmd/main.c  | 122 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 131 insertions(+), 1 deletion(-)

diff --git a/configure.ac b/configure.ac
index ee98f9b..5e08e7b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -823,6 +823,16 @@ if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then
    AC_MSG_ERROR(The libxslt developement headers were not found)
 fi
 
+AC_CACHE_CHECK(whether __progname and __progname_full are available,
+                pf_cv_var_progname,
+                AC_TRY_LINK([extern char *__progname, *__progname_full;],
+                    [__progname = "foo"; __progname_full = "foo bar";],
+                    pf_cv_var_progname="yes", pf_cv_var_progname="no"))
+
+if test "$pf_cv_var_progname" = "yes"; then
+    AC_DEFINE(HAVE___PROGNAME,1,[ ])
+fi
+
 dnl ========================================================================
 dnl Structures
 dnl ========================================================================
diff --git a/lrmd/main.c b/lrmd/main.c
index ca8cdf2..412ce24 100644
--- a/lrmd/main.c
+++ b/lrmd/main.c
@@ -21,6 +21,11 @@
 
 #include <glib.h>
 #include <unistd.h>
+#include <signal.h>
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/prctl.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
@@ -391,6 +396,119 @@ void handle_shutdown_nack()
     crm_debug("Ignoring unexpected shutdown nack");
 }
 
+
+static pid_t main_pid = 0;
+static void
+sigdone(void)
+{
+    exit(0);
+}
+
+static void
+sigreap(void)
+{
+    pid_t pid = 0;
+    int status;
+    do {
+        /*
+         * Opinions seem to differ as to what to put here:
+         *  -1, any child process
+         *  0,  any child process whose process group ID is equal to that of the calling process
+         */
+        pid = waitpid(-1, &status, WNOHANG);
+        if(pid == main_pid) {
+            /* Exit when pacemaker-remote exits and use the same return code */
+            if (WIFEXITED(status)) {
+                exit(WEXITSTATUS(status));
+            }
+            exit(1);
+        }
+
+    } while (pid > 0);
+}
+
+static struct {
+	int sig;
+	void (*handler)(void);
+} sigmap[] = {
+	{ SIGCHLD, sigreap },
+	{ SIGINT,  sigdone },
+};
+
+static void spawn_pidone(int argc, char **argv, char **envp)
+{
+    sigset_t set;
+
+    if (getpid() != 1) {
+        return;
+    }
+
+    sigfillset(&set);
+    sigprocmask(SIG_BLOCK, &set, 0);
+
+    main_pid = fork();
+    switch (main_pid) {
+	case 0:
+            sigprocmask(SIG_UNBLOCK, &set, NULL);
+            setsid();
+            setpgid(0, 0);
+
+            /* Child remains as pacemaker_remoted */
+            return;
+	case -1:
+            perror("fork");
+    }
+
+    /* Parent becomes the reaper of zombie processes */
+    /* Safe to initialize logging now if needed */
+
+#ifdef HAVE___PROGNAME
+    /* Differentiate ourselves in the 'ps' output */
+    {
+        char *p;
+        int i, maxlen;
+        char *LastArgv = NULL;
+        const char *name = "pcmk-init";
+
+	for(i = 0; i < argc; i++) {
+		if(!i || (LastArgv + 1 == argv[i]))
+			LastArgv = argv[i] + strlen(argv[i]);
+	}
+
+	for(i = 0; envp[i] != NULL; i++) {
+		if((LastArgv + 1) == envp[i]) {
+			LastArgv = envp[i] + strlen(envp[i]);
+		}
+	}
+
+        maxlen = (LastArgv - argv[0]) - 2;
+
+        i = strlen(name);
+        /* We can overwrite individual argv[] arguments */
+        snprintf(argv[0], maxlen, "%s", name);
+
+        /* Now zero out everything else */
+        p = &argv[0][i];
+        while(p < LastArgv)
+            *p++ = '\0';
+        argv[1] = NULL;
+    }
+#endif /* HAVE___PROGNAME */
+
+    while (1) {
+	int sig;
+	size_t i;
+
+        sigwait(&set, &sig);
+        for (i = 0; i < DIMOF(sigmap); i++) {
+            if (sigmap[i].sig == sig) {
+                sigmap[i].handler();
+                break;
+            }
+        }
+    }
+}
+
 /* *INDENT-OFF* */
 static struct crm_option long_options[] = {
     /* Top-level Options */
@@ -410,12 +528,14 @@ static struct crm_option long_options[] = {
 /* *INDENT-ON* */
 
 int
-main(int argc, char **argv)
+main(int argc, char **argv, char **envp)
 {
     int flag = 0;
     int index = 0;
     const char *option = NULL;
 
+    /* If necessary, create PID1 now before any FDs are opened */
+    spawn_pidone(argc, argv, envp);
 
 #ifndef ENABLE_PCMK_REMOTE
     crm_log_preinit("lrmd", argc, argv);
-- 
1.8.3.1