Blame SOURCES/005-glib-priorities.patch

4c8e44
From 65170ffd5fa10cbda176b3f88e817d534b6331d6 Mon Sep 17 00:00:00 2001
4c8e44
From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
4c8e44
Date: Wed, 29 Aug 2018 15:49:58 +0200
4c8e44
Subject: [PATCH 1/2] Low: mainloop: make it possible to specify server's
4c8e44
 priority in mainloop
4c8e44
4c8e44
---
4c8e44
 include/crm/common/mainloop.h | 24 +++++++++++++
4c8e44
 lib/common/mainloop.c         | 82 +++++++++++++++++++++++++++++++++++++++++--
4c8e44
 2 files changed, 103 insertions(+), 3 deletions(-)
4c8e44
4c8e44
diff --git a/include/crm/common/mainloop.h b/include/crm/common/mainloop.h
4c8e44
index 85da1cd..2cfb63e 100644
4c8e44
--- a/include/crm/common/mainloop.h
4c8e44
+++ b/include/crm/common/mainloop.h
4c8e44
@@ -79,6 +79,30 @@ struct ipc_client_callbacks {
4c8e44
 qb_ipcs_service_t *mainloop_add_ipc_server(const char *name, enum qb_ipc_type type,
4c8e44
                                            struct qb_ipcs_service_handlers *callbacks);
4c8e44
 
4c8e44
+/*!
4c8e44
+ * \brief Start server-side API end-point, hooked into the internal event loop
4c8e44
+ *
4c8e44
+ * \param[in] name    name of the IPC end-point ("address" for the client)
4c8e44
+ * \param[in] type    selects libqb's IPC back-end (or use #QB_IPC_NATIVE)
4c8e44
+ * \param[in] callbacks  defines libqb's IPC service-level handlers
4c8e44
+ * \param[in] priority  priority relative to other events handled in the
4c8e44
+ *                      abstract handling loop, use #QB_LOOP_MED when unsure
4c8e44
+ *
4c8e44
+ * \return libqb's opaque handle to the created service abstraction
4c8e44
+ *
4c8e44
+ * \note For portability concerns, do not use this function if you keep
4c8e44
+ *       \p priority as #QB_LOOP_MED, stick with #mainloop_add_ipc_server
4c8e44
+ *       (with exactly such semantics) instead (once you link with this new
4c8e44
+ *       symbol employed, you can't downgrade the library freely anymore).
4c8e44
+ *
4c8e44
+ * \note The intended effect will only get fully reflected when run-time
4c8e44
+ *       linked to patched libqb: https://github.com/ClusterLabs/libqb/pull/352
4c8e44
+ */
4c8e44
+qb_ipcs_service_t *mainloop_add_ipc_server_with_prio(const char *name,
4c8e44
+                                                    enum qb_ipc_type type,
4c8e44
+                                                    struct qb_ipcs_service_handlers *callbacks,
4c8e44
+                                                    enum qb_loop_priority prio);
4c8e44
+
4c8e44
 void mainloop_del_ipc_server(qb_ipcs_service_t * server);
4c8e44
 
4c8e44
 mainloop_io_t *mainloop_add_ipc_client(const char *name, int priority, size_t max_size,
4c8e44
diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c
4c8e44
index 18f7014..17e69f0 100644
4c8e44
--- a/lib/common/mainloop.c
4c8e44
+++ b/lib/common/mainloop.c
4c8e44
@@ -509,6 +509,65 @@ gio_poll_destroy(gpointer data)
4c8e44
     }
4c8e44
 }
4c8e44
 
4c8e44
+/*!
4c8e44
+ * \internal
4c8e44
+ * \brief Convert libqb's poll priority into GLib's one
4c8e44
+ *
4c8e44
+ * \param[in] prio  libqb's poll priority (#QB_LOOP_MED assumed as fallback)
4c8e44
+ *
4c8e44
+ * \return  best matching GLib's priority
4c8e44
+ */
4c8e44
+static gint
4c8e44
+conv_prio_libqb2glib(enum qb_loop_priority prio)
4c8e44
+{
4c8e44
+    gint ret = G_PRIORITY_DEFAULT;
4c8e44
+    switch (prio) {
4c8e44
+        case QB_LOOP_LOW:
4c8e44
+            ret = G_PRIORITY_LOW;
4c8e44
+            break;
4c8e44
+        case QB_LOOP_HIGH:
4c8e44
+            ret = G_PRIORITY_HIGH;
4c8e44
+            break;
4c8e44
+        default:
4c8e44
+            crm_trace("Invalid libqb's loop priority %d, assuming QB_LOOP_MED",
4c8e44
+                      prio);
4c8e44
+            /* fall-through */
4c8e44
+        case QB_LOOP_MED:
4c8e44
+            break;
4c8e44
+    }
4c8e44
+    return ret;
4c8e44
+}
4c8e44
+
4c8e44
+/*!
4c8e44
+ * \internal
4c8e44
+ * \brief Convert libqb's poll priority to rate limiting spec
4c8e44
+ *
4c8e44
+ * \param[in] prio  libqb's poll priority (#QB_LOOP_MED assumed as fallback)
4c8e44
+ *
4c8e44
+ * \return  best matching rate limiting spec
4c8e44
+ */
4c8e44
+static enum qb_ipcs_rate_limit
4c8e44
+conv_libqb_prio2ratelimit(enum qb_loop_priority prio)
4c8e44
+{
4c8e44
+    /* this is an inversion of what libqb's qb_ipcs_request_rate_limit does */
4c8e44
+    enum qb_ipcs_rate_limit ret = QB_IPCS_RATE_NORMAL;
4c8e44
+    switch (prio) {
4c8e44
+        case QB_LOOP_LOW:
4c8e44
+            ret = QB_IPCS_RATE_SLOW;
4c8e44
+            break;
4c8e44
+        case QB_LOOP_HIGH:
4c8e44
+            ret = QB_IPCS_RATE_FAST;
4c8e44
+            break;
4c8e44
+        default:
4c8e44
+            crm_trace("Invalid libqb's loop priority %d, assuming QB_LOOP_MED",
4c8e44
+                      prio);
4c8e44
+            /* fall-through */
4c8e44
+        case QB_LOOP_MED:
4c8e44
+            break;
4c8e44
+    }
4c8e44
+    return ret;
4c8e44
+}
4c8e44
+
4c8e44
 static int32_t
4c8e44
 gio_poll_dispatch_update(enum qb_loop_priority p, int32_t fd, int32_t evts,
4c8e44
                          void *data, qb_ipcs_dispatch_fn_t fn, int32_t add)
4c8e44
@@ -555,8 +614,8 @@ gio_poll_dispatch_update(enum qb_loop_priority p, int32_t fd, int32_t evts,
4c8e44
     adaptor->p = p;
4c8e44
     adaptor->is_used++;
4c8e44
     adaptor->source =
4c8e44
-        g_io_add_watch_full(channel, G_PRIORITY_DEFAULT, evts, gio_read_socket, adaptor,
4c8e44
-                            gio_poll_destroy);
4c8e44
+        g_io_add_watch_full(channel, conv_prio_libqb2glib(p), evts,
4c8e44
+                            gio_read_socket, adaptor, gio_poll_destroy);
4c8e44
 
4c8e44
     /* Now that mainloop now holds a reference to channel,
4c8e44
      * thanks to g_io_add_watch_full(), drop ours from g_io_channel_unix_new().
4c8e44
@@ -640,7 +699,15 @@ pick_ipc_type(enum qb_ipc_type requested)
4c8e44
 
4c8e44
 qb_ipcs_service_t *
4c8e44
 mainloop_add_ipc_server(const char *name, enum qb_ipc_type type,
4c8e44
-                        struct qb_ipcs_service_handlers * callbacks)
4c8e44
+                        struct qb_ipcs_service_handlers *callbacks)
4c8e44
+{
4c8e44
+    return mainloop_add_ipc_server_with_prio(name, type, callbacks, QB_LOOP_MED);
4c8e44
+}
4c8e44
+
4c8e44
+qb_ipcs_service_t *
4c8e44
+mainloop_add_ipc_server_with_prio(const char *name, enum qb_ipc_type type,
4c8e44
+                                  struct qb_ipcs_service_handlers *callbacks,
4c8e44
+                                  enum qb_loop_priority prio)
4c8e44
 {
4c8e44
     int rc = 0;
4c8e44
     qb_ipcs_service_t *server = NULL;
4c8e44
@@ -652,6 +719,15 @@ mainloop_add_ipc_server(const char *name, enum qb_ipc_type type,
4c8e44
     crm_client_init();
4c8e44
     server = qb_ipcs_create(name, 0, pick_ipc_type(type), callbacks);
4c8e44
 
4c8e44
+    if (server == NULL) {
4c8e44
+        crm_err("Could not create %s IPC server: %s (%d)", name, pcmk_strerror(rc), rc);
4c8e44
+        return NULL;
4c8e44
+    }
4c8e44
+
4c8e44
+    if (prio != QB_LOOP_MED) {
4c8e44
+        qb_ipcs_request_rate_limit(server, conv_libqb_prio2ratelimit(prio));
4c8e44
+    }
4c8e44
+
4c8e44
 #ifdef HAVE_IPCS_GET_BUFFER_SIZE
4c8e44
     /* All clients should use at least ipc_buffer_max as their buffer size */
4c8e44
     qb_ipcs_enforce_buffer_size(server, crm_ipc_default_buffer_size());
4c8e44
-- 
4c8e44
1.8.3.1
4c8e44
4c8e44
4c8e44
From 3401f25994e8cc059898550082f9b75f2d07f103 Mon Sep 17 00:00:00 2001
4c8e44
From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= <jpokorny@redhat.com>
4c8e44
Date: Wed, 29 Aug 2018 15:50:57 +0200
4c8e44
Subject: [PATCH 2/2] High: stonith-ng's function cannot be blocked with CIB
4c8e44
 updates forever
4c8e44
4c8e44
In the high-load (or high-rate-config-change) scenarios,
4c8e44
pacemaker-fenced would be unable to provide service when basically DoS'd
4c8e44
with CIB update notifications.  Try to reconcile that with elevated
4c8e44
priority of the server's proper listening interface in the mainloop, at
4c8e44
worst, it will try to fence with slightly outdated config, but appears
4c8e44
to be less bad than not carrying the execution at all, for instance.
4c8e44
Other daemons might be considered as well.
4c8e44
4c8e44
Prerequisites:
4c8e44
- https://github.com/ClusterLabs/libqb/pull/352
4c8e44
  (libqb used to contain a bug due to which one particular step in the
4c8e44
  initial-client-connection-accepting-at-the-server procedure that would
4c8e44
  be carried out with hard-coded (and hence possibly lower than competing
4c8e44
  events') priority, which backfires exactly in this case (once the
4c8e44
  pacemaker part is fixed -- by the means of elevating priority for
4c8e44
  the API end-point of fenced so that it won't get consistently
4c8e44
  overridden with a non-socket-based event source/trigger)
4c8e44
4c8e44
How to verify:
4c8e44
- mocked/based -N (see commit adding that module to mocked based daemon)
4c8e44
---
4c8e44
 lib/common/utils.c | 3 ++-
4c8e44
 1 file changed, 2 insertions(+), 1 deletion(-)
4c8e44
4c8e44
diff --git a/lib/common/utils.c b/lib/common/utils.c
4c8e44
index 758eb1b..d1c3e26 100644
4c8e44
--- a/lib/common/utils.c
4c8e44
+++ b/lib/common/utils.c
4c8e44
@@ -1031,7 +1031,8 @@ attrd_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers
4c8e44
 void
4c8e44
 stonith_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb)
4c8e44
 {
4c8e44
-    *ipcs = mainloop_add_ipc_server("stonith-ng", QB_IPC_NATIVE, cb);
4c8e44
+    *ipcs = mainloop_add_ipc_server_with_prio("stonith-ng", QB_IPC_NATIVE, cb,
4c8e44
+                                              QB_LOOP_HIGH);
4c8e44
 
4c8e44
     if (*ipcs == NULL) {
4c8e44
         crm_err("Failed to create fencer: exiting and inhibiting respawn.");
4c8e44
-- 
4c8e44
1.8.3.1
4c8e44