From 6a1af7917a86c0b7218d876852cd9c0d5027edeb Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 29 Apr 2021 09:29:44 +0200
Subject: [PATCH 2/2] Issue 4667 - incorrect accounting of readers in vattr
rwlock (#4732)
Bug description:
The fix #2932 (Contention on virtual attribute lookup) reduced
contention on vattr acquiring vattr lock at the operation
level rather than at the attribute level (filter and
returned attr).
The fix #2932 is invalid. it can lead to deadlock scenario
(3 threads). A vattr writer (new cos/schema) blocks
an update thread that hold DB pages and later needs vattr.
Then if a reader (holding vattr) blocks vattr writer and later
needs the same DB pages, there is a deadlock.
The decisions are:
- revert #2932 (this issue)
- Skip contention if deployement has no vattr #4678
- reduce contention with new approaches
(COW and/or cache vattr struct in each thread)
no issue opened
Fix description:
The fix reverts #2932
relates: https://github.com/389ds/389-ds-base/issues/4667
Reviewed by: William Brown, Simon Pichugin
Platforms tested: F33
---
ldap/servers/slapd/detach.c | 8 --
ldap/servers/slapd/opshared.c | 6 --
ldap/servers/slapd/proto-slap.h | 5 --
ldap/servers/slapd/vattr.c | 147 ++------------------------------
4 files changed, 8 insertions(+), 158 deletions(-)
diff --git a/ldap/servers/slapd/detach.c b/ldap/servers/slapd/detach.c
index d5c95a04f..8270b83c5 100644
--- a/ldap/servers/slapd/detach.c
+++ b/ldap/servers/slapd/detach.c
@@ -144,10 +144,6 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t *
}
break;
}
- /* The thread private counter needs to be allocated after the fork
- * it is not inherited from parent process
- */
- vattr_global_lock_create();
/* call this right after the fork, but before closing stdin */
if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) {
@@ -178,10 +174,6 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t *
g_set_detached(1);
} else { /* not detaching - call nss/ssl init */
- /* The thread private counter needs to be allocated after the fork
- * it is not inherited from parent process
- */
- vattr_global_lock_create();
if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) {
return 1;
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 05b9a1553..a1630e134 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -266,7 +266,6 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
int pr_idx = -1;
Slapi_DN *orig_sdn = NULL;
int free_sdn = 0;
- PRBool vattr_lock_acquired = PR_FALSE;
be_list[0] = NULL;
referral_list[0] = NULL;
@@ -538,8 +537,6 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
}
slapi_pblock_set(pb, SLAPI_BACKEND_COUNT, &index);
- vattr_rdlock();
- vattr_lock_acquired = PR_TRUE;
if (be) {
slapi_pblock_set(pb, SLAPI_BACKEND, be);
@@ -1007,9 +1004,6 @@ free_and_return:
} else if (be_single) {
slapi_be_Unlock(be_single);
}
- if (vattr_lock_acquired) {
- vattr_rd_unlock();
- }
free_and_return_nolock:
slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc);
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 13b41ffa4..50f1ff75a 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1415,11 +1415,6 @@ void subentry_create_filter(Slapi_Filter **filter);
* vattr.c
*/
void vattr_init(void);
-void vattr_global_lock_create(void);
-void vattr_rdlock();
-void vattr_rd_unlock();
-void vattr_wrlock();
-void vattr_wr_unlock();
void vattr_cleanup(void);
/*
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
index eef444270..b9a4ee965 100644
--- a/ldap/servers/slapd/vattr.c
+++ b/ldap/servers/slapd/vattr.c
@@ -110,7 +110,6 @@ struct _vattr_map
typedef struct _vattr_map vattr_map;
static vattr_map *the_map = NULL;
-static PRUintn thread_private_global_vattr_lock;
/* Housekeeping Functions, called by server startup/shutdown code */
@@ -125,136 +124,6 @@ vattr_init()
vattr_basic_sp_init();
#endif
}
-/*
- * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PR_NewThreadPrivateIndex
- * It is called each time:
- * - PR_SetThreadPrivate is call with a not NULL private value
- * - on thread exit
- */
-static void
-vattr_global_lock_free(void *ptr)
-{
- int *nb_acquired = ptr;
- if (nb_acquired) {
- slapi_ch_free((void **)&nb_acquired);
- }
-}
-/* Create a private variable for each individual thread of the current process */
-void
-vattr_global_lock_create()
-{
- if (PR_NewThreadPrivateIndex(&thread_private_global_vattr_lock, vattr_global_lock_free) != PR_SUCCESS) {
- slapi_log_err(SLAPI_LOG_ALERT,
- "vattr_global_lock_create", "Failure to create global lock for virtual attribute !\n");
- PR_ASSERT(0);
- }
-}
-static int
-global_vattr_lock_get_acquired_count()
-{
- int *nb_acquired;
- nb_acquired = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock);
- if (nb_acquired == NULL) {
- /* if it was not initialized set it to zero */
- nb_acquired = (int *) slapi_ch_calloc(1, sizeof(int));
- PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) nb_acquired);
- }
- return *nb_acquired;
-}
-static void
-global_vattr_lock_set_acquired_count(int nb_acquired)
-{
- int *val;
- val = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock);
- if (val == NULL) {
- /* if it was not initialized set it to zero */
- val = (int *) slapi_ch_calloc(1, sizeof(int));
- PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) val);
- }
- *val = nb_acquired;
-}
-/* The map lock can be acquired recursively. So only the first rdlock
- * will acquire the lock.
- * A optimization acquires it at high level (op_shared_search), so that
- * later calls during the operation processing will just increase/decrease a counter.
- */
-void
-vattr_rdlock()
-{
- int nb_acquire = global_vattr_lock_get_acquired_count();
-
- if (nb_acquire == 0) {
- /* The lock was not held just acquire it */
- slapi_rwlock_rdlock(the_map->lock);
- }
- nb_acquire++;
- global_vattr_lock_set_acquired_count(nb_acquire);
-
-}
-/* The map lock can be acquired recursively. So only the last unlock
- * will release the lock.
- * A optimization acquires it at high level (op_shared_search), so that
- * later calls during the operation processing will just increase/decrease a counter.
- */
-void
-vattr_rd_unlock()
-{
- int nb_acquire = global_vattr_lock_get_acquired_count();
-
- if (nb_acquire >= 1) {
- nb_acquire--;
- if (nb_acquire == 0) {
- slapi_rwlock_unlock(the_map->lock);
- }
- global_vattr_lock_set_acquired_count(nb_acquire);
- } else {
- /* this is likely the consequence of lock acquire in read during an internal search
- * but the search callback updated the map and release the readlock and acquired
- * it in write.
- * So after the update the lock was no longer held but when completing the internal
- * search we release the global read lock, that now has nothing to do
- */
- slapi_log_err(SLAPI_LOG_DEBUG,
- "vattr_rd_unlock", "vattr lock no longer acquired in read.\n");
- }
-}
-
-/* The map lock is acquired in write (updating the map)
- * It exists a possibility that lock is acquired in write while it is already
- * hold in read by this thread (internal search with updating callback)
- * In such situation, the we must abandon the read global lock and acquire in write
- */
-void
-vattr_wrlock()
-{
- int nb_read_acquire = global_vattr_lock_get_acquired_count();
-
- if (nb_read_acquire) {
- /* The lock was acquired in read but we need it in write
- * release it and set the global vattr_lock counter to 0
- */
- slapi_rwlock_unlock(the_map->lock);
- global_vattr_lock_set_acquired_count(0);
- }
- slapi_rwlock_wrlock(the_map->lock);
-}
-/* The map lock is release from a write write (updating the map)
- */
-void
-vattr_wr_unlock()
-{
- int nb_read_acquire = global_vattr_lock_get_acquired_count();
-
- if (nb_read_acquire) {
- /* The lock being acquired in write, the private thread counter
- * (that count the number of time it was acquired in read) should be 0
- */
- slapi_log_err(SLAPI_LOG_INFO,
- "vattr_unlock", "The lock was acquired in write. We should not be here\n");
- PR_ASSERT(nb_read_acquire == 0);
- }
- slapi_rwlock_unlock(the_map->lock);
-}
/* Called on server shutdown, free all structures, inform service providers that we're going down etc */
void
vattr_cleanup()
@@ -2069,11 +1938,11 @@ vattr_map_lookup(const char *type_to_find, vattr_map_entry **result)
}
/* Get the reader lock */
- vattr_rdlock();
+ slapi_rwlock_rdlock(the_map->lock);
*result = (vattr_map_entry *)PL_HashTableLookupConst(the_map->hashtable,
(void *)basetype);
/* Release ze lock */
- vattr_rd_unlock();
+ slapi_rwlock_unlock(the_map->lock);
if (tmp) {
slapi_ch_free_string(&tmp);
@@ -2092,13 +1961,13 @@ vattr_map_insert(vattr_map_entry *vae)
{
PR_ASSERT(the_map);
/* Get the writer lock */
- vattr_wrlock();
+ slapi_rwlock_wrlock(the_map->lock);
/* Insert the thing */
/* It's illegal to call this function if the entry is already there */
PR_ASSERT(NULL == PL_HashTableLookupConst(the_map->hashtable, (void *)vae->type_name));
PL_HashTableAdd(the_map->hashtable, (void *)vae->type_name, (void *)vae);
/* Unlock and we're done */
- vattr_wr_unlock();
+ slapi_rwlock_unlock(the_map->lock);
return 0;
}
@@ -2235,13 +2104,13 @@ schema_changed_callback(Slapi_Entry *e __attribute__((unused)),
void *caller_data __attribute__((unused)))
{
/* Get the writer lock */
- vattr_wrlock();
+ slapi_rwlock_wrlock(the_map->lock);
/* go through the list */
PL_HashTableEnumerateEntries(the_map->hashtable, vattr_map_entry_rebuild_schema, 0);
/* Unlock and we're done */
- vattr_wr_unlock();
+ slapi_rwlock_unlock(the_map->lock);
}
@@ -2261,7 +2130,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
objAttrValue *obj;
if (0 == vattr_map_lookup(type, &map_entry)) {
- vattr_rdlock();
+ slapi_rwlock_rdlock(the_map->lock);
obj = map_entry->objectclasses;
@@ -2278,7 +2147,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
obj = obj->pNext;
}
- vattr_rd_unlock();
+ slapi_rwlock_unlock(the_map->lock);
}
slapi_valueset_free(vs);
--
2.31.1