anitazha / rpms / systemd

Forked from rpms/systemd 3 years ago
Clone

Blame SOURCES/0075-journal-limit-the-number-of-entries-in-the-cache-bas.patch

ff6046
From de72fa6b0582b95216215cc1400412fe91bc8ba3 Mon Sep 17 00:00:00 2001
ff6046
From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
ff6046
Date: Tue, 22 Jan 2019 16:12:52 +0100
ff6046
Subject: [PATCH] journal: limit the number of entries in the cache based on
ff6046
 available memory
ff6046
ff6046
This is far from perfect, but should give mostly reasonable values. My
ff6046
assumption is that if somebody has a few hundred MB of memory, they are
ff6046
unlikely to have thousands of processes logging. A hundred would already be a
ff6046
lot. So let's scale the cache size propritionally to the total memory size,
ff6046
with clamping on both ends.
ff6046
ff6046
The formula gives 64 cache entries for each GB of RAM.
ff6046
ff6046
(cherry-picked from commit b12a480829c5ca8f4d4fa9cde8716b5f2f12a3ad)
ff6046
ff6046
Related: #1664976
ff6046
---
ff6046
 src/journal/journald-context.c | 35 ++++++++++++++++++++++++++++++++--
ff6046
 1 file changed, 33 insertions(+), 2 deletions(-)
ff6046
ff6046
diff --git a/src/journal/journald-context.c b/src/journal/journald-context.c
ff6046
index ce07de1bfb..0f0dc1de4d 100644
ff6046
--- a/src/journal/journald-context.c
ff6046
+++ b/src/journal/journald-context.c
ff6046
@@ -14,6 +14,7 @@
ff6046
 #include "journal-util.h"
ff6046
 #include "journald-context.h"
ff6046
 #include "process-util.h"
ff6046
+#include "procfs-util.h"
ff6046
 #include "string-util.h"
ff6046
 #include "syslog-util.h"
ff6046
 #include "unaligned.h"
ff6046
@@ -58,7 +59,37 @@
ff6046
 /* Keep at most 16K entries in the cache. (Note though that this limit may be violated if enough streams pin entries in
ff6046
  * the cache, in which case we *do* permit this limit to be breached. That's safe however, as the number of stream
ff6046
  * clients itself is limited.) */
ff6046
-#define CACHE_MAX (16*1024)
ff6046
+#define CACHE_MAX_FALLBACK 128U
ff6046
+#define CACHE_MAX_MAX (16*1024U)
ff6046
+#define CACHE_MAX_MIN 64U
ff6046
+
ff6046
+static size_t cache_max(void) {
ff6046
+        static size_t cached = -1;
ff6046
+
ff6046
+        if (cached == (size_t) -1) {
ff6046
+                uint64_t mem_total;
ff6046
+                int r;
ff6046
+
ff6046
+                r = procfs_memory_get(&mem_total, NULL);
ff6046
+                if (r < 0) {
ff6046
+                        log_warning_errno(r, "Cannot query /proc/meminfo for MemTotal: %m");
ff6046
+                        cached = CACHE_MAX_FALLBACK;
ff6046
+                } else {
ff6046
+                        /* Cache entries are usually a few kB, but the process cmdline is controlled by the
ff6046
+                         * user and can be up to _SC_ARG_MAX, usually 2MB. Let's say that approximately up to
ff6046
+                         * 1/8th of memory may be used by the cache.
ff6046
+                         *
ff6046
+                         * In the common case, this formula gives 64 cache entries for each GB of RAM.
ff6046
+                         */
ff6046
+                        long l = sysconf(_SC_ARG_MAX);
ff6046
+                        assert(l > 0);
ff6046
+
ff6046
+                        cached = CLAMP(mem_total / 8 / (uint64_t) l, CACHE_MAX_MIN, CACHE_MAX_MAX);
ff6046
+                }
ff6046
+        }
ff6046
+
ff6046
+        return cached;
ff6046
+}
ff6046
 
ff6046
 static int client_context_compare(const void *a, const void *b) {
ff6046
         const ClientContext *x = a, *y = b;
ff6046
@@ -587,7 +618,7 @@ static int client_context_get_internal(
ff6046
                 return 0;
ff6046
         }
ff6046
 
ff6046
-        client_context_try_shrink_to(s, CACHE_MAX-1);
ff6046
+        client_context_try_shrink_to(s, cache_max()-1);
ff6046
 
ff6046
         r = client_context_new(s, pid, &c);
ff6046
         if (r < 0)