From 3cc901acea41632df0c342639c4292c10bd90964 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Tue, 18 Dec 2018 15:39:14 +0530
Subject: [PATCH 487/493] mem-pool: add tracking of mem_pool that requested the
allocation
This renames the current 'struct mem_pool' to 'struct mem_pool_shared'.
The mem_pool_shared is globally allocated and not specific for
particular objects.
A new 'struct mem_pool' gets allocated when mem_pool_new() is called. It
points to the mem_pool_shared that handles the actual allocation
requests. The 'struct mem_pool' is only used for accounting of the
objects that the caller requested and free'd.
All of these changes will be used to collect all the memory pools a
glusterfs_ctx_t is consuming, so that statedumps can be collected per
context.
> Updates: #307
> Change-Id: I6355d3f0251c928e0bbfc71be3431307c6f3a3da
> Signed-off-by: Niels de Vos <ndevos@redhat.com>
> Reviewed-on: https://review.gluster.org/18073
> Smoke: Gluster Build System <jenkins@build.gluster.org>
> CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
> Reviewed-by: Amar Tumballi <amarts@redhat.com>
> Reviewed-by: Jeff Darcy <jeff@pl.atyp.us>
> Cherry picked from commit 2645e730b79b44fc035170657e43bb52f3e855c5
Change-Id: I6cce6284e4553c6ca59a90ad124c23c950db3148
BUG: 1648893
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Change-Id: I363d71152b1dd17eca53d9c327fcdf2f26c0fb61
Reviewed-on: https://code.engineering.redhat.com/gerrit/158930
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/mem-pool.c | 69 +++++++++++++++++++++++++++-----------------
libglusterfs/src/mem-pool.h | 20 +++++++++++--
libglusterfs/src/mem-types.h | 2 --
3 files changed, 60 insertions(+), 31 deletions(-)
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
index 8ff261c..a8a9347 100644
--- a/libglusterfs/src/mem-pool.c
+++ b/libglusterfs/src/mem-pool.c
@@ -14,15 +14,6 @@
#include <stdlib.h>
#include <stdarg.h>
-#define GF_MEM_POOL_LIST_BOUNDARY (sizeof(struct list_head))
-#define GF_MEM_POOL_PTR (sizeof(struct mem_pool*))
-#define GF_MEM_POOL_PAD_BOUNDARY (GF_MEM_POOL_LIST_BOUNDARY + GF_MEM_POOL_PTR + sizeof(int))
-#define mem_pool_chunkhead2ptr(head) ((head) + GF_MEM_POOL_PAD_BOUNDARY)
-#define mem_pool_ptr2chunkhead(ptr) ((ptr) - GF_MEM_POOL_PAD_BOUNDARY)
-#define is_mem_chunk_in_use(ptr) (*ptr == 1)
-#define mem_pool_from_ptr(ptr) ((ptr) + GF_MEM_POOL_LIST_BOUNDARY)
-
-#define GLUSTERFS_ENV_MEM_ACCT_STR "GLUSTERFS_DISABLE_MEM_ACCT"
#include "unittest/unittest.h"
#include "libglusterfs-messages.h"
@@ -380,7 +371,7 @@ static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER;
static struct list_head pool_threads;
static pthread_mutex_t pool_free_lock = PTHREAD_MUTEX_INITIALIZER;
static struct list_head pool_free_threads;
-static struct mem_pool pools[NPOOLS];
+static struct mem_pool_shared pools[NPOOLS];
static size_t pool_list_size;
#if !defined(GF_DISABLE_MEMPOOL)
@@ -689,6 +680,8 @@ mem_pool_new_fn (unsigned long sizeof_type,
unsigned long count, char *name)
{
unsigned int i;
+ struct mem_pool *new = NULL;
+ struct mem_pool_shared *pool = NULL;
if (!sizeof_type) {
gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL,
@@ -698,13 +691,27 @@ mem_pool_new_fn (unsigned long sizeof_type,
for (i = 0; i < NPOOLS; ++i) {
if (sizeof_type <= AVAILABLE_SIZE(pools[i].power_of_two)) {
- return &pools[i];
+ pool = &pools[i];
+ break;
}
}
- gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL,
- LG_MSG_INVALID_ARG, "invalid argument");
- return NULL;
+ if (!pool) {
+ gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL,
+ LG_MSG_INVALID_ARG, "invalid argument");
+ return NULL;
+ }
+
+ new = GF_CALLOC (sizeof (struct mem_pool), 1, gf_common_mt_mem_pool);
+ if (!new)
+ return NULL;
+
+ new->sizeof_type = sizeof_type;
+ new->count = count;
+ new->name = name;
+ new->pool = pool;
+
+ return new;
}
void*
@@ -721,7 +728,7 @@ mem_get0 (struct mem_pool *mem_pool)
ptr = mem_get(mem_pool);
if (ptr) {
- memset (ptr, 0, AVAILABLE_SIZE(mem_pool->power_of_two));
+ memset (ptr, 0, AVAILABLE_SIZE(mem_pool->pool->power_of_two));
}
return ptr;
@@ -784,7 +791,7 @@ mem_get_from_pool (struct mem_pool *mem_pool)
return NULL;
}
- pt_pool = &pool_list->pools[mem_pool->power_of_two-POOL_SMALLEST];
+ pt_pool = &pool_list->pools[mem_pool->pool->power_of_two-POOL_SMALLEST];
(void) pthread_spin_lock (&pool_list->lock);
@@ -802,7 +809,7 @@ mem_get_from_pool (struct mem_pool *mem_pool)
} else {
(void) pthread_spin_unlock (&pool_list->lock);
GF_ATOMIC_INC (pt_pool->parent->allocs_stdc);
- retval = malloc (1 << mem_pool->power_of_two);
+ retval = malloc (1 << mem_pool->pool->power_of_two);
}
}
@@ -810,7 +817,7 @@ mem_get_from_pool (struct mem_pool *mem_pool)
retval->magic = GF_MEM_HEADER_MAGIC;
retval->next = NULL;
retval->pool_list = pool_list;
- retval->power_of_two = mem_pool->power_of_two;
+ retval->power_of_two = mem_pool->pool->power_of_two;
}
return retval;
@@ -821,9 +828,10 @@ void *
mem_get (struct mem_pool *mem_pool)
{
#if defined(GF_DISABLE_MEMPOOL)
- return GF_CALLOC (1, AVAILABLE_SIZE (mem_pool->power_of_two),
+ return GF_CALLOC (1, AVAILABLE_SIZE (mem_pool->pool->power_of_two),
gf_common_mt_mem_pool);
#else
+ per_thread_pool_list_t *pool_list;
pooled_obj_hdr_t *retval;
if (!mem_pool) {
@@ -832,11 +840,22 @@ mem_get (struct mem_pool *mem_pool)
return NULL;
}
+ pool_list = mem_get_pool_list ();
+ if (!pool_list || pool_list->poison) {
+ return NULL;
+ }
+
retval = mem_get_from_pool (mem_pool);
+
if (!retval) {
return NULL;
}
+ retval->magic = GF_MEM_HEADER_MAGIC;
+ retval->pool = mem_pool;
+ retval->pool_list = pool_list;
+ retval->power_of_two = mem_pool->pool->power_of_two;
+
return retval + 1;
#endif /* GF_DISABLE_MEMPOOL */
}
@@ -886,14 +905,12 @@ mem_put (void *ptr)
void
mem_pool_destroy (struct mem_pool *pool)
{
- if (!pool)
- return;
+ GF_FREE (pool);
/*
- * Pools are now permanent, so this does nothing. Yes, this means we
- * can keep allocating from a pool after calling mem_destroy on it, but
- * that's kind of OK. All of the objects *in* the pool will eventually
- * be freed via the pool-sweeper thread, and this way we don't have to
- * add a lot of reference-counting complexity.
+ * Pools are now permanent, so the mem_pool->pool is kept around. All
+ * of the objects *in* the pool will eventually be freed via the
+ * pool-sweeper thread, and this way we don't have to add a lot of
+ * reference-counting complexity.
*/
}
diff --git a/libglusterfs/src/mem-pool.h b/libglusterfs/src/mem-pool.h
index dfe1f9a..057d957 100644
--- a/libglusterfs/src/mem-pool.h
+++ b/libglusterfs/src/mem-pool.h
@@ -204,18 +204,31 @@ out:
return dup_mem;
}
+/* kind of 'header' for the actual mem_pool_shared structure, this might make
+ * it possible to dump some more details in a statedump */
+struct mem_pool {
+ unsigned long sizeof_type;
+ unsigned long count;
+ char *name;
+
+ struct mem_pool_shared *pool;
+};
+
typedef struct pooled_obj_hdr {
unsigned long magic;
struct pooled_obj_hdr *next;
struct per_thread_pool_list *pool_list;
unsigned int power_of_two;
+
+ /* track the pool that was used to request this object */
+ struct mem_pool *pool;
} pooled_obj_hdr_t;
#define AVAILABLE_SIZE(p2) ((1 << (p2)) - sizeof(pooled_obj_hdr_t))
typedef struct per_thread_pool {
- /* This never changes, so doesn't need a lock. */
- struct mem_pool *parent;
+ /* the pool that was used to request this allocation */
+ struct mem_pool_shared *parent;
/* Everything else is protected by our own lock. */
pooled_obj_hdr_t *hot_list;
pooled_obj_hdr_t *cold_list;
@@ -243,7 +256,8 @@ typedef struct per_thread_pool_list {
per_thread_pool_t pools[1];
} per_thread_pool_list_t;
-struct mem_pool {
+/* actual pool structure, shared between different mem_pools */
+struct mem_pool_shared {
unsigned int power_of_two;
/*
* Updates to these are *not* protected by a global lock, so races
diff --git a/libglusterfs/src/mem-types.h b/libglusterfs/src/mem-types.h
index 85cb5d2..64d0e90 100644
--- a/libglusterfs/src/mem-types.h
+++ b/libglusterfs/src/mem-types.h
@@ -61,9 +61,7 @@ enum gf_common_mem_types_ {
gf_common_mt_char,
gf_common_mt_rbthash_table_t,
gf_common_mt_rbthash_bucket,
-#if defined(GF_DISABLE_MEMPOOL)
gf_common_mt_mem_pool,
-#endif
gf_common_mt_long,
gf_common_mt_rpcsvc_auth_list,
gf_common_mt_rpcsvc_t,
--
1.8.3.1