andykimpe / rpms / 389-ds-base

Forked from rpms/389-ds-base 5 months ago
Clone

Blame SOURCES/0014-Ticket-49873-Contention-on-virtual-attribute-lookup.patch

26521d
From 82b21b4b939acd4dfac8c061bf19ad2494680485 Mon Sep 17 00:00:00 2001
26521d
From: Thierry Bordaz <tbordaz@redhat.com>
26521d
Date: Tue, 15 Jan 2019 11:13:42 +0100
26521d
Subject: [PATCH] Ticket 49873 - Contention on virtual attribute lookup
26521d
26521d
Bug Description:
26521d
	During lookup of the virtual attribute table (filter evaluation and returned attribute)
26521d
	the lock is acquired many times in read. For example it is acquired for each targetfilter aci and for
26521d
	each evaluated entry.
26521d
	Unfortunately RW lock is expensive and appears frequently on pstacks.
26521d
	The lock exists because the table can be updated but update is very rare (addition of a new service provider).
26521d
	So it slows down general proceeding for exceptional events.
26521d
26521d
Fix Description:
26521d
	The fix is to acquire/release the read lock at the operation level and set a per-cpu flag, so that later lookup
26521d
	would just check the flag.
26521d
26521d
	SSL initialization does internal searches that access the vattr_global_lock
26521d
	Call of vattr_global_lock_create needs to be called before slapd_do_all_nss_ssl_init.
26521d
26521d
	Also, 'main' may or may not fork, the initialization fo the thread private variable
26521d
	is done either on the child or parent depending if main forks or not.
26521d
26521d
	The leak is fixed using a destructor callback of the private variable and so
26521d
	call PR_SetThreadPrivate only if there is no private variable.
26521d
26521d
	This patch is the merge of the four 49873 patches done in master
26521d
26521d
https://pagure.io/389-ds-base/issue/49873
26521d
26521d
Reviewed by: Ludwig Krispenz, William Brown , Simon Pichugi (thanks !!)
26521d
26521d
Platforms tested: F28
26521d
26521d
Flag Day: no
26521d
26521d
Doc impact: no
26521d
---
26521d
 ldap/servers/slapd/detach.c     |   9 ++
26521d
 ldap/servers/slapd/opshared.c   |   6 ++
26521d
 ldap/servers/slapd/proto-slap.h |   5 +
26521d
 ldap/servers/slapd/vattr.c      | 164 ++++++++++++++++++++++++++++----
26521d
 4 files changed, 167 insertions(+), 17 deletions(-)
26521d
26521d
diff --git a/ldap/servers/slapd/detach.c b/ldap/servers/slapd/detach.c
26521d
index 681e6a701..d5c95a04f 100644
26521d
--- a/ldap/servers/slapd/detach.c
26521d
+++ b/ldap/servers/slapd/detach.c
26521d
@@ -144,6 +144,10 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t *
26521d
             }
26521d
             break;
26521d
         }
26521d
+        /* The thread private counter needs to be allocated after the fork
26521d
+         * it is not inherited from parent process
26521d
+         */
26521d
+        vattr_global_lock_create();
26521d
 
26521d
         /* call this right after the fork, but before closing stdin */
26521d
         if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) {
26521d
@@ -174,6 +178,11 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t *
26521d
 
26521d
         g_set_detached(1);
26521d
     } else { /* not detaching - call nss/ssl init */
26521d
+        /* The thread private counter needs to be allocated after the fork
26521d
+         * it is not inherited from parent process
26521d
+         */
26521d
+        vattr_global_lock_create();
26521d
+
26521d
         if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) {
26521d
             return 1;
26521d
         }
26521d
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
26521d
index 50b7ae8f6..cf6cdff01 100644
26521d
--- a/ldap/servers/slapd/opshared.c
26521d
+++ b/ldap/servers/slapd/opshared.c
26521d
@@ -244,6 +244,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
26521d
     int pr_idx = -1;
26521d
     Slapi_DN *orig_sdn = NULL;
26521d
     int free_sdn = 0;
26521d
+    PRBool vattr_lock_acquired = PR_FALSE;
26521d
 
26521d
     be_list[0] = NULL;
26521d
     referral_list[0] = NULL;
26521d
@@ -511,6 +512,8 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
26521d
     }
26521d
 
26521d
     slapi_pblock_set(pb, SLAPI_BACKEND_COUNT, &index);
26521d
+    vattr_rdlock();
26521d
+    vattr_lock_acquired = PR_TRUE;
26521d
 
26521d
     if (be) {
26521d
         slapi_pblock_set(pb, SLAPI_BACKEND, be);
26521d
@@ -969,6 +972,9 @@ free_and_return:
26521d
     } else if (be_single) {
26521d
         slapi_be_Unlock(be_single);
26521d
     }
26521d
+    if (vattr_lock_acquired) {
26521d
+        vattr_rd_unlock();
26521d
+    }
26521d
 
26521d
 free_and_return_nolock:
26521d
     slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc);
26521d
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
26521d
index 7a429b238..79017e68d 100644
26521d
--- a/ldap/servers/slapd/proto-slap.h
26521d
+++ b/ldap/servers/slapd/proto-slap.h
26521d
@@ -1409,6 +1409,11 @@ void subentry_create_filter(Slapi_Filter **filter);
26521d
  * vattr.c
26521d
  */
26521d
 void vattr_init(void);
26521d
+void vattr_global_lock_create(void);
26521d
+void vattr_rdlock();
26521d
+void vattr_rd_unlock();
26521d
+void vattr_wrlock();
26521d
+void vattr_wr_unlock();
26521d
 void vattr_cleanup(void);
26521d
 
26521d
 /*
26521d
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
26521d
index f7c473ab1..852a887ce 100644
26521d
--- a/ldap/servers/slapd/vattr.c
26521d
+++ b/ldap/servers/slapd/vattr.c
26521d
@@ -102,6 +102,16 @@ int vattr_basic_sp_init();
26521d
 
26521d
 void **statechange_api;
26521d
 
26521d
+struct _vattr_map
26521d
+{
26521d
+    Slapi_RWLock *lock;
26521d
+    PLHashTable *hashtable; /* Hash table */
26521d
+};
26521d
+typedef struct _vattr_map vattr_map;
26521d
+
26521d
+static vattr_map *the_map = NULL;
26521d
+static PRUintn thread_private_global_vattr_lock;
26521d
+
26521d
 /* Housekeeping Functions, called by server startup/shutdown code */
26521d
 
26521d
 /* Called on server startup, init all structures etc */
26521d
@@ -115,7 +125,136 @@ vattr_init()
26521d
     vattr_basic_sp_init();
26521d
 #endif
26521d
 }
26521d
+/*
26521d
+ * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PR_NewThreadPrivateIndex
26521d
+ * It is called each time:
26521d
+ *  - PR_SetThreadPrivate is call with a not NULL private value
26521d
+ *  - on thread exit
26521d
+ */
26521d
+static void
26521d
+vattr_global_lock_free(void *ptr)
26521d
+{
26521d
+    int *nb_acquired = ptr;
26521d
+    if (nb_acquired) {
26521d
+        slapi_ch_free((void **)&nb_acquired);
26521d
+    }
26521d
+}
26521d
+/* Create a private variable for each individual thread of the current process */
26521d
+void
26521d
+vattr_global_lock_create()
26521d
+{
26521d
+    if (PR_NewThreadPrivateIndex(&thread_private_global_vattr_lock, vattr_global_lock_free) != PR_SUCCESS) {
26521d
+        slapi_log_err(SLAPI_LOG_ALERT,
26521d
+              "vattr_global_lock_create", "Failure to create global lock for virtual attribute !\n");
26521d
+        PR_ASSERT(0);
26521d
+    }
26521d
+}
26521d
+static int
26521d
+global_vattr_lock_get_acquired_count()
26521d
+{
26521d
+    int *nb_acquired;
26521d
+    nb_acquired = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock);
26521d
+    if (nb_acquired == NULL) {
26521d
+        /* if it was not initialized set it to zero */
26521d
+        nb_acquired = (int *) slapi_ch_calloc(1, sizeof(int));
26521d
+        PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) nb_acquired);
26521d
+    }
26521d
+    return *nb_acquired;
26521d
+}
26521d
+static void
26521d
+global_vattr_lock_set_acquired_count(int nb_acquired)
26521d
+{
26521d
+    int *val;
26521d
+    val = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock);
26521d
+    if (val == NULL) {
26521d
+        /* if it was not initialized set it to zero */
26521d
+        val = (int *) slapi_ch_calloc(1, sizeof(int));
26521d
+        PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) val);
26521d
+    }
26521d
+    *val = nb_acquired;
26521d
+}
26521d
+/* The map lock can be acquired recursively. So only the first rdlock
26521d
+ * will acquire the lock.
26521d
+ * A optimization acquires it at high level (op_shared_search), so that
26521d
+ * later calls during the operation processing will just increase/decrease a counter.
26521d
+ */
26521d
+void
26521d
+vattr_rdlock()
26521d
+{
26521d
+    int nb_acquire = global_vattr_lock_get_acquired_count();
26521d
+
26521d
+    if (nb_acquire == 0) {
26521d
+        /* The lock was not held just acquire it */
26521d
+        slapi_rwlock_rdlock(the_map->lock);
26521d
+    }
26521d
+    nb_acquire++;
26521d
+    global_vattr_lock_set_acquired_count(nb_acquire);
26521d
+
26521d
+}
26521d
+/* The map lock can be acquired recursively. So only the last unlock
26521d
+ * will release the lock.
26521d
+ * A optimization acquires it at high level (op_shared_search), so that
26521d
+ * later calls during the operation processing will just increase/decrease a counter.
26521d
+ */
26521d
+void
26521d
+vattr_rd_unlock()
26521d
+{
26521d
+    int nb_acquire = global_vattr_lock_get_acquired_count();
26521d
 
26521d
+    if (nb_acquire >= 1) {
26521d
+        nb_acquire--;
26521d
+        if (nb_acquire == 0) {
26521d
+            slapi_rwlock_unlock(the_map->lock);
26521d
+        }
26521d
+        global_vattr_lock_set_acquired_count(nb_acquire);
26521d
+    } else {
26521d
+        /* this is likely the consequence of lock acquire in read during an internal search
26521d
+         * but the search callback updated the map and release the readlock and acquired
26521d
+         * it in write.
26521d
+         * So after the update the lock was no longer held but when completing the internal
26521d
+         * search we release the global read lock, that now has nothing to do
26521d
+         */
26521d
+        slapi_log_err(SLAPI_LOG_DEBUG,
26521d
+          "vattr_rd_unlock", "vattr lock no longer acquired in read.\n");
26521d
+    }
26521d
+}
26521d
+
26521d
+/* The map lock is acquired in write (updating the map)
26521d
+ * It exists a possibility that lock is acquired in write while it is already
26521d
+ * hold in read by this thread (internal search with updating callback)
26521d
+ * In such situation, the we must abandon the read global lock and acquire in write
26521d
+ */
26521d
+void
26521d
+vattr_wrlock()
26521d
+{
26521d
+    int nb_read_acquire = global_vattr_lock_get_acquired_count();
26521d
+
26521d
+    if (nb_read_acquire) {
26521d
+        /* The lock was acquired in read but we need it in write
26521d
+         * release it and set the global vattr_lock counter to 0
26521d
+         */
26521d
+        slapi_rwlock_unlock(the_map->lock);
26521d
+        global_vattr_lock_set_acquired_count(0);
26521d
+    }
26521d
+    slapi_rwlock_wrlock(the_map->lock);
26521d
+}
26521d
+/* The map lock is release from a write write (updating the map)
26521d
+ */
26521d
+void
26521d
+vattr_wr_unlock()
26521d
+{
26521d
+    int nb_read_acquire = global_vattr_lock_get_acquired_count();
26521d
+
26521d
+    if (nb_read_acquire) {
26521d
+        /* The lock being acquired in write, the private thread counter
26521d
+         * (that count the number of time it was acquired in read) should be 0
26521d
+         */
26521d
+        slapi_log_err(SLAPI_LOG_INFO,
26521d
+          "vattr_unlock", "The lock was acquired in write. We should not be here\n");
26521d
+        PR_ASSERT(nb_read_acquire == 0);
26521d
+    }
26521d
+    slapi_rwlock_unlock(the_map->lock);
26521d
+}
26521d
 /* Called on server shutdown, free all structures, inform service providers that we're going down etc */
26521d
 void
26521d
 vattr_cleanup()
26521d
@@ -1811,15 +1950,6 @@ typedef struct _vattr_map_entry vattr_map_entry;
26521d
 
26521d
 vattr_map_entry test_entry = {NULL};
26521d
 
26521d
-struct _vattr_map
26521d
-{
26521d
-    Slapi_RWLock *lock;
26521d
-    PLHashTable *hashtable; /* Hash table */
26521d
-};
26521d
-typedef struct _vattr_map vattr_map;
26521d
-
26521d
-static vattr_map *the_map = NULL;
26521d
-
26521d
 static PRIntn
26521d
 vattr_hash_compare_keys(const void *v1, const void *v2)
26521d
 {
26521d
@@ -1939,11 +2069,11 @@ vattr_map_lookup(const char *type_to_find, vattr_map_entry **result)
26521d
     }
26521d
 
26521d
     /* Get the reader lock */
26521d
-    slapi_rwlock_rdlock(the_map->lock);
26521d
+    vattr_rdlock();
26521d
     *result = (vattr_map_entry *)PL_HashTableLookupConst(the_map->hashtable,
26521d
                                                          (void *)basetype);
26521d
     /* Release ze lock */
26521d
-    slapi_rwlock_unlock(the_map->lock);
26521d
+    vattr_rd_unlock();
26521d
 
26521d
     if (tmp) {
26521d
         slapi_ch_free_string(&tmp);
26521d
@@ -1962,13 +2092,13 @@ vattr_map_insert(vattr_map_entry *vae)
26521d
 {
26521d
     PR_ASSERT(the_map);
26521d
     /* Get the writer lock */
26521d
-    slapi_rwlock_wrlock(the_map->lock);
26521d
+    vattr_wrlock();
26521d
     /* Insert the thing */
26521d
     /* It's illegal to call this function if the entry is already there */
26521d
     PR_ASSERT(NULL == PL_HashTableLookupConst(the_map->hashtable, (void *)vae->type_name));
26521d
     PL_HashTableAdd(the_map->hashtable, (void *)vae->type_name, (void *)vae);
26521d
     /* Unlock and we're done */
26521d
-    slapi_rwlock_unlock(the_map->lock);
26521d
+    vattr_wr_unlock();
26521d
     return 0;
26521d
 }
26521d
 
26521d
@@ -2105,13 +2235,13 @@ schema_changed_callback(Slapi_Entry *e __attribute__((unused)),
26521d
                         void *caller_data __attribute__((unused)))
26521d
 {
26521d
     /* Get the writer lock */
26521d
-    slapi_rwlock_wrlock(the_map->lock);
26521d
+    vattr_wrlock();
26521d
 
26521d
     /* go through the list */
26521d
     PL_HashTableEnumerateEntries(the_map->hashtable, vattr_map_entry_rebuild_schema, 0);
26521d
 
26521d
     /* Unlock and we're done */
26521d
-    slapi_rwlock_unlock(the_map->lock);
26521d
+    vattr_wr_unlock();
26521d
 }
26521d
 
26521d
 
26521d
@@ -2131,7 +2261,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
26521d
                 objAttrValue *obj;
26521d
 
26521d
                 if (0 == vattr_map_lookup(type, &map_entry)) {
26521d
-                    slapi_rwlock_rdlock(the_map->lock);
26521d
+                    vattr_rdlock();
26521d
 
26521d
                     obj = map_entry->objectclasses;
26521d
 
26521d
@@ -2148,7 +2278,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
26521d
                         obj = obj->pNext;
26521d
                     }
26521d
 
26521d
-                    slapi_rwlock_unlock(the_map->lock);
26521d
+                    vattr_rd_unlock();
26521d
                 }
26521d
 
26521d
                 slapi_valueset_free(vs);
26521d
-- 
26521d
2.17.2
26521d