diff --git a/.389-ds-base.metadata b/.389-ds-base.metadata new file mode 100644 index 0000000..3a558a8 --- /dev/null +++ b/.389-ds-base.metadata @@ -0,0 +1 @@ +debdbca81fda1651bf73e504ca8bc8c1b48a3b59 SOURCES/389-ds-base-1.3.6.1.tar.bz2 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a990b51 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/389-ds-base-1.3.6.1.tar.bz2 diff --git a/README.md b/README.md deleted file mode 100644 index 0e7897f..0000000 --- a/README.md +++ /dev/null @@ -1,5 +0,0 @@ -The master branch has no content - -Look at the c7 branch if you are working with CentOS-7, or the c4/c5/c6 branch for CentOS-4, 5 or 6 - -If you find this file in a distro specific branch, it means that no content has been checked in yet diff --git a/SOURCES/0000-Ticket-49164-Change-NS-to-acq-rel-semantics-for-atom.patch b/SOURCES/0000-Ticket-49164-Change-NS-to-acq-rel-semantics-for-atom.patch new file mode 100644 index 0000000..a779d35 --- /dev/null +++ b/SOURCES/0000-Ticket-49164-Change-NS-to-acq-rel-semantics-for-atom.patch @@ -0,0 +1,67 @@ +From 70230bf894d9c0150dca8dc6fccc2712187f7b86 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Mon, 13 Mar 2017 13:29:43 +1000 +Subject: [PATCH 1/5] Ticket 49164 - Change NS to acq-rel semantics for atomics + +Bug Description: We were using seq_cst to guarantee our operations +as a poc. Changing to acq/rel allows us the same guarantees, but +with less overheads. + +Fix Description: Change the barrier type. + +https://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync + +https://pagure.io/389-ds-base/issue/49164 + +Author: wibrown + +Review by: mreynolds (Thanks!) + +(cherry picked from commit b1b0574d2cdb012ab206999ed51f08d3340386ce) +--- + src/nunc-stans/ns/ns_thrpool.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/src/nunc-stans/ns/ns_thrpool.c b/src/nunc-stans/ns/ns_thrpool.c +index 744749b..a867b39 100644 +--- a/src/nunc-stans/ns/ns_thrpool.c ++++ b/src/nunc-stans/ns/ns_thrpool.c +@@ -167,7 +167,7 @@ ns_thrpool_is_shutdown(struct ns_thrpool_t *tp) + { + /* We need to barrier this somehow? */ + int32_t result = 0; +- __atomic_load(&(tp->shutdown), &result, __ATOMIC_SEQ_CST); ++ __atomic_load(&(tp->shutdown), &result, __ATOMIC_ACQUIRE); + return result; + } + +@@ -176,7 +176,7 @@ ns_thrpool_is_event_shutdown(struct ns_thrpool_t *tp) + { + /* We need to barrier this somehow? */ + int32_t result = 0; +- __atomic_load(&(tp->shutdown_event_loop), &result, __ATOMIC_SEQ_CST); ++ __atomic_load(&(tp->shutdown_event_loop), &result, __ATOMIC_ACQUIRE); + return result; + } + +@@ -1402,7 +1402,7 @@ ns_thrpool_destroy(struct ns_thrpool_t *tp) + #endif + if (tp) { + /* Set the flag to shutdown the event loop. */ +- __atomic_add_fetch(&(tp->shutdown_event_loop), 1, __ATOMIC_SEQ_CST); ++ __atomic_add_fetch(&(tp->shutdown_event_loop), 1, __ATOMIC_RELEASE); + + /* Finish the event queue wakeup job. This has the + * side effect of waking up the event loop thread, which +@@ -1491,7 +1491,7 @@ ns_thrpool_shutdown(struct ns_thrpool_t *tp) + } + /* Set the shutdown flag. This will cause the worker + * threads to exit after they finish all remaining work. */ +- __atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_SEQ_CST); ++ __atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_RELEASE); + + /* Wake up the idle worker threads so they can exit. */ + pthread_mutex_lock(&(tp->work_q_lock)); +-- +2.9.3 + diff --git a/SOURCES/0001-Issue-49170-sync-plugin-thread-count-not-handled-cor.patch b/SOURCES/0001-Issue-49170-sync-plugin-thread-count-not-handled-cor.patch new file mode 100644 index 0000000..28d65ec --- /dev/null +++ b/SOURCES/0001-Issue-49170-sync-plugin-thread-count-not-handled-cor.patch @@ -0,0 +1,71 @@ +From a00917eec0bcef75180eaf4dd9b360519b9e2644 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 14 Mar 2017 14:35:05 -0400 +Subject: [PATCH 2/5] Issue 49170 - sync plugin thread count not handled + correctly + +Bug Description: If sync repl connections get aborted the thread_count is + not properly updated which leads to the server hanging + on shutdown. + +Fix Description: When connections get aborted we still need to shutdown + the result thread cleanly: remove the req, update thread + count, etc. + +https://pagure.io/389-ds-base/issue/49170 + +Reviewed by: nhosoi(Thanks!) + +(cherry picked from commit 770fcf4349ccf9e07ff0e1cf0d6991927ec9ba75) +--- + ldap/servers/plugins/sync/sync_persist.c | 17 ++++++++--------- + 1 file changed, 8 insertions(+), 9 deletions(-) + +diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c +index d0c8da2..667a529 100644 +--- a/ldap/servers/plugins/sync/sync_persist.c ++++ b/ldap/servers/plugins/sync/sync_persist.c +@@ -548,16 +548,16 @@ sync_send_results( void *arg ) + slapi_pblock_get(req->req_pblock, SLAPI_CONNECTION, &conn); + if (NULL == conn) { + slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, +- "sync_send_results - conn=%" NSPRIu64 " op=%d Null connection - aborted\n", +- connid, opid); +- return; ++ "sync_send_results - conn=%" NSPRIu64 " op=%d Null connection - aborted\n", ++ connid, opid); ++ goto done; + } + conn_acq_flag = sync_acquire_connection (conn); + if (conn_acq_flag) { + slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, +- "sync_send_results - conn=%" NSPRIu64 " op=%d Could not acquire the connection - aborted\n", +- connid, opid); +- return; ++ "sync_send_results - conn=%" NSPRIu64 " op=%d Could not acquire the connection - aborted\n", ++ connid, opid); ++ goto done; + } + + PR_Lock( sync_request_list->sync_req_cvarlock ); +@@ -658,15 +658,14 @@ sync_send_results( void *arg ) + } + } + PR_Unlock( sync_request_list->sync_req_cvarlock ); +- sync_remove_request( req ); + + /* indicate the end of search */ +- + sync_release_connection(req->req_pblock, conn, op, conn_acq_flag == 0); + ++done: ++ sync_remove_request( req ); + PR_DestroyLock ( req->req_lock ); + req->req_lock = NULL; +- + slapi_ch_free((void **) &req->req_pblock ); + slapi_ch_free((void **) &req->req_orig_base ); + slapi_filter_free(req->req_filter, 1); +-- +2.9.3 + diff --git a/SOURCES/0002-Ticket-49165-pw_verify-did-not-handle-external-auth.patch b/SOURCES/0002-Ticket-49165-pw_verify-did-not-handle-external-auth.patch new file mode 100644 index 0000000..bbc9972 --- /dev/null +++ b/SOURCES/0002-Ticket-49165-pw_verify-did-not-handle-external-auth.patch @@ -0,0 +1,174 @@ +From 22f1ff8d87a7daf9fbbe2ddfbd195a6bfdae1cd6 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Tue, 14 Mar 2017 14:01:33 +1000 +Subject: [PATCH 3/5] Ticket 49165 pw_verify did not handle external auth + +Bug Description: During the change to improve sasl and simple bind, +we externalised the backend selection outside of do_bind. In an +auto_bind scenario however, this mean the be was null, causing the +dn to always be invalidated. + +Fix Description: Add a pw_validate_be_dn function, that correctly +checks if we are anonymous, a real be dn, or rootdn. This then allows +the correct authentication of autobinds. + +https://pagure.io/389-ds-base/issue/49165 + +Author: wibrown + +Review by: mreynolds (Thanks!) + +(cherry picked from commit 8dbfff1ff4152afb018490886a612c448ea2a1b0) +--- + ldap/servers/slapd/bind.c | 9 +++++-- + ldap/servers/slapd/dn.c | 5 ++++ + ldap/servers/slapd/pw_verify.c | 57 +++++++++++++++++++++++++++++++++++++-- + ldap/servers/slapd/pw_verify.h | 1 + + ldap/servers/slapd/slapi-plugin.h | 9 +++++++ + 5 files changed, 77 insertions(+), 4 deletions(-) + +diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c +index b4bb363..5c4fada 100644 +--- a/ldap/servers/slapd/bind.c ++++ b/ldap/servers/slapd/bind.c +@@ -656,7 +656,12 @@ do_bind( Slapi_PBlock *pb ) + /* We could be serving multiple database backends. Select the appropriate one */ + /* pw_verify_be_dn will select the backend we need for us. */ + +- rc = pw_verify_be_dn(pb, &referral); ++ if (auto_bind) { ++ /* We have no password material. We should just check who we are binding as. */ ++ rc = pw_validate_be_dn(pb, &referral); ++ } else { ++ rc = pw_verify_be_dn(pb, &referral); ++ } + + if (rc == SLAPI_BIND_NO_BACKEND) { + send_nobackend_ldap_result( pb ); +@@ -715,7 +720,7 @@ do_bind( Slapi_PBlock *pb ) + * + */ + slapi_pblock_get(pb, SLAPI_BACKEND, &be); +- if (!slapi_be_is_flag_set(be, SLAPI_BE_FLAG_REMOTE_DATA)) { ++ if (!isroot && !slapi_be_is_flag_set(be, SLAPI_BE_FLAG_REMOTE_DATA)) { + bind_target_entry = get_entry(pb, slapi_sdn_get_ndn(sdn)); + myrc = slapi_check_account_lock(pb, bind_target_entry, pw_response_requested, 1, 1); + if (1 == myrc) { /* account is locked */ +diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c +index d043f2a..fa3909f 100644 +--- a/ldap/servers/slapd/dn.c ++++ b/ldap/servers/slapd/dn.c +@@ -1738,6 +1738,11 @@ slapi_dn_isroot( const char *dn ) + return( rc ); + } + ++int32_t ++slapi_sdn_isroot(const Slapi_DN *sdn) { ++ return slapi_dn_isroot(slapi_sdn_get_ndn(sdn)); ++} ++ + int + slapi_is_rootdse( const char *dn ) + { +diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c +index 93e5ff3..529bb83 100644 +--- a/ldap/servers/slapd/pw_verify.c ++++ b/ldap/servers/slapd/pw_verify.c +@@ -88,8 +88,61 @@ pw_verify_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + return rc; + } + ++/* ++ * Resolve the dn we have been requested to bind with and verify it's ++ * valid, and has a backend. ++ * ++ * We are checking: ++ * * is this anonymous? ++ * * is this the rootdn? ++ * * is this a real dn, which associates to a real backend. ++ * ++ * This is used in SASL autobinds, so we need to handle this validation. ++ */ ++ + int +-pw_verify_dn() ++pw_validate_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + { +- return LDAP_OPERATIONS_ERROR; ++ int rc = 0; ++ Slapi_Backend *be = NULL; ++ Slapi_DN *pb_sdn; ++ struct berval *cred; ++ ber_tag_t method; ++ ++ ++ slapi_pblock_get(pb, SLAPI_BIND_TARGET_SDN, &pb_sdn); ++ slapi_pblock_get(pb, SLAPI_BIND_CREDENTIALS, &cred); ++ slapi_pblock_get(pb, SLAPI_BIND_METHOD, &method); ++ ++ if (pb_sdn != NULL || cred != NULL) { ++ return LDAP_OPERATIONS_ERROR; ++ } ++ ++ if (*referral) { ++ return SLAPI_BIND_REFERRAL; ++ } ++ ++ /* We need a slapi_sdn_isanon? */ ++ if (method == LDAP_AUTH_SIMPLE && cred->bv_len == 0) { ++ return SLAPI_BIND_ANONYMOUS; ++ } ++ ++ if (slapi_sdn_isroot(pb_sdn)) { ++ /* This is a real identity */ ++ return SLAPI_BIND_SUCCESS; ++ } ++ ++ if (slapi_mapping_tree_select(pb, &be, referral, NULL, 0) != LDAP_SUCCESS) { ++ return SLAPI_BIND_NO_BACKEND; ++ } ++ slapi_be_Unlock(be); ++ ++ slapi_pblock_set(pb, SLAPI_BACKEND, be); ++ slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database); ++ /* Make sure the result handlers are setup */ ++ set_db_default_result_handlers(pb); ++ ++ /* The backend associated with this identity is real. */ ++ ++ return SLAPI_BIND_SUCCESS; + } +diff --git a/ldap/servers/slapd/pw_verify.h b/ldap/servers/slapd/pw_verify.h +index fc34fd1..5137027 100644 +--- a/ldap/servers/slapd/pw_verify.h ++++ b/ldap/servers/slapd/pw_verify.h +@@ -11,5 +11,6 @@ + + int pw_verify_root_dn(const char *dn, const Slapi_Value *cred); + int pw_verify_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral); ++int pw_validate_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral); + + #endif /* _SLAPD_PW_VERIFY_H_ */ +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index b223f65..1bd8fc8 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -3800,6 +3800,15 @@ int slapi_dn_isparent( const char *parentdn, const char *childdn ); + int slapi_dn_isroot( const char *dn ); + + /** ++ * Determines if an SDN is the root DN. ++ * ++ * \param sdn The DN to check ++ * \return \c 1 if the DN is the root DN. ++ * \return \c 0 if the DN is not the root DN. ++ */ ++int32_t slapi_sdn_isroot( const Slapi_DN *sdn ); ++ ++/** + * Checks if a DN is the backend suffix. + * + * \param pb A parameter block with the backend set. +-- +2.9.3 + diff --git a/SOURCES/0003-Issue-49169-Fix-covscan-errors.patch b/SOURCES/0003-Issue-49169-Fix-covscan-errors.patch new file mode 100644 index 0000000..d989fed --- /dev/null +++ b/SOURCES/0003-Issue-49169-Fix-covscan-errors.patch @@ -0,0 +1,266 @@ +From 97f09918ef370c3be5aa64dcfeb3bb21e762f90d Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 14 Mar 2017 20:23:07 -0400 +Subject: [PATCH 4/5] Issue 49169 - Fix covscan errors + +src/libsds/bpt/map.c - resource leak +ldap/servers/slapd/vattr.c - resource leak +ldap/servers/slapd/task.c: resource leaks +ldap/servers/slapd/str2filter.c - resource leak +ldap/servers/slapd/pw.c - resource leak +ldap/servers/slapd/back-ldbm/import-threads.c - resource leak +ldap/servers/plugins/uiduniq/uid.c:536 - resource leak +ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c:164 - resource leak +ldap/servers/plugins/linkedattrs/linked_attrs.c:1672 - resource leak +ldap/servers/plugins/addn/addn.c:419 +ldap/servers/slapd/ssl.c - dead code +ldap/servers/slapd/index_subsystem.c - null dereference + +https://pagure.io/389-ds-base/issue/49169 + +Reviewed by: nkinder & wibrown(Thanks!!) + +(cherry picked from commit c75126be1edece121826e336141f9b0b9c0bddfd) +--- + ldap/servers/plugins/addn/addn.c | 4 +++- + ldap/servers/plugins/linkedattrs/linked_attrs.c | 2 ++ + ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 1 + + ldap/servers/plugins/uiduniq/uid.c | 6 +++++- + ldap/servers/slapd/back-ldbm/import-threads.c | 1 + + ldap/servers/slapd/index_subsystem.c | 27 +++++++++++++------------ + ldap/servers/slapd/pw.c | 1 + + ldap/servers/slapd/pw_verify.c | 1 - + ldap/servers/slapd/ssl.c | 8 +++----- + ldap/servers/slapd/str2filter.c | 1 + + ldap/servers/slapd/task.c | 3 +-- + ldap/servers/slapd/vattr.c | 6 +++--- + src/libsds/sds/bpt/map.c | 1 + + 13 files changed, 36 insertions(+), 26 deletions(-) + +diff --git a/ldap/servers/plugins/addn/addn.c b/ldap/servers/plugins/addn/addn.c +index 3abc112..6ba7833 100644 +--- a/ldap/servers/plugins/addn/addn.c ++++ b/ldap/servers/plugins/addn/addn.c +@@ -415,7 +415,9 @@ addn_start(Slapi_PBlock *pb) + domain = slapi_entry_attr_get_charptr(plugin_entry, "addn_default_domain"); + + if (domain == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, plugin_name, "addn_start: CRITICAL: No default domain in configuration, you must set addn_default_domain!\n"); ++ slapi_log_err(SLAPI_LOG_ERR, plugin_name, ++ "addn_start: CRITICAL: No default domain in configuration, you must set addn_default_domain!\n"); ++ slapi_ch_free((void**)&config); + return SLAPI_PLUGIN_FAILURE; + } + +diff --git a/ldap/servers/plugins/linkedattrs/linked_attrs.c b/ldap/servers/plugins/linkedattrs/linked_attrs.c +index b5adb21..d046542 100644 +--- a/ldap/servers/plugins/linkedattrs/linked_attrs.c ++++ b/ldap/servers/plugins/linkedattrs/linked_attrs.c +@@ -1669,6 +1669,8 @@ linked_attrs_mod_post_op(Slapi_PBlock *pb) + /* Bail out if the plug-in close function was just called. */ + if (!slapi_plugin_running(pb)) { + linked_attrs_unlock(); ++ slapi_mod_free(&next_mod); ++ slapi_mods_free(&smods); + return SLAPI_PLUGIN_SUCCESS; + } + +diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c +index 1b3e555..b228700 100644 +--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c ++++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c +@@ -161,6 +161,7 @@ pbkdf2_sha256_pw_enc(const char *pwd) + */ + if ( pbkdf2_sha256_hash(hash + PBKDF2_ITERATIONS_LENGTH + PBKDF2_SALT_LENGTH, PBKDF2_HASH_LENGTH, &passItem, &saltItem, PBKDF2_ITERATIONS) != SECSuccess ) { + slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Could not generate pbkdf2_sha256_hash!\n"); ++ slapi_ch_free_string(&enc); + return NULL; + } + +diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c +index ae9320e..46554b2 100644 +--- a/ldap/servers/plugins/uiduniq/uid.c ++++ b/ldap/servers/plugins/uiduniq/uid.c +@@ -533,7 +533,11 @@ create_filter(const char **attributes, const struct berval *value, const char *r + + /* Place value in filter */ + if (ldap_quote_filter_value(value->bv_val, value->bv_len, +- fp, max-fp, &valueLen)) { slapi_ch_free((void**)&filter); return 0; } ++ fp, max-fp, &valueLen)) { ++ slapi_ch_free((void**)&filter); ++ slapi_ch_free((void**)&attrLen); ++ return 0; ++ } + fp += valueLen; + + strcpy(fp, ")"); +diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c +index 5b81427..087103b 100644 +--- a/ldap/servers/slapd/back-ldbm/import-threads.c ++++ b/ldap/servers/slapd/back-ldbm/import-threads.c +@@ -1647,6 +1647,7 @@ upgradedn_producer(void *param) + } + e = slapi_str2entry_ext(normdn, NULL, data.dptr, + SLAPI_STR2ENTRY_USE_OBSOLETE_DNFORMAT); ++ slapi_ch_free_string(&rdn); + } + } else { + e = +diff --git a/ldap/servers/slapd/index_subsystem.c b/ldap/servers/slapd/index_subsystem.c +index 57d4f58..8f9fe6d 100644 +--- a/ldap/servers/slapd/index_subsystem.c ++++ b/ldap/servers/slapd/index_subsystem.c +@@ -185,27 +185,28 @@ static int index_subsys_index_matches_filter(indexEntry *index, Slapi_Filter *f) + */ + int index_subsys_assign_filter_decoders(Slapi_PBlock *pb) + { +- int rc; ++ int rc = 0; + Slapi_Filter *f; + char *subsystem = "index_subsys_assign_filter_decoders"; + char logbuf[ 1024 ]; + + /* extract the filter */ + slapi_pblock_get(pb, SLAPI_SEARCH_FILTER, &f); ++ if (f) { ++ if ( loglevel_is_set( LDAP_DEBUG_FILTER )) { ++ logbuf[0] = '\0'; ++ slapi_log_err(SLAPI_LOG_DEBUG, subsystem, "before: %s\n", ++ slapi_filter_to_string(f, logbuf, sizeof(logbuf))); ++ } + +- if ( loglevel_is_set( LDAP_DEBUG_FILTER ) && NULL != f ) { +- logbuf[0] = '\0'; +- slapi_log_err(SLAPI_LOG_DEBUG, subsystem, "before: %s\n", +- slapi_filter_to_string(f, logbuf, sizeof(logbuf))); +- } +- +- /* find decoders */ +- rc = index_subsys_assign_decoders(f); ++ /* find decoders */ ++ rc = index_subsys_assign_decoders(f); + +- if ( loglevel_is_set( LDAP_DEBUG_FILTER ) && NULL != f ) { +- logbuf[0] = '\0'; +- slapi_log_err(SLAPI_LOG_DEBUG, subsystem, " after: %s\n", +- slapi_filter_to_string(f, logbuf, sizeof(logbuf))); ++ if ( loglevel_is_set( LDAP_DEBUG_FILTER )) { ++ logbuf[0] = '\0'; ++ slapi_log_err(SLAPI_LOG_DEBUG, subsystem, " after: %s\n", ++ slapi_filter_to_string(f, logbuf, sizeof(logbuf))); ++ } + } + + return rc; +diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c +index 215c9eb..378d148 100644 +--- a/ldap/servers/slapd/pw.c ++++ b/ldap/servers/slapd/pw.c +@@ -1512,6 +1512,7 @@ check_trivial_words (Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Value **vals, char + ep = sp + strlen(sp); + ep = ldap_utf8prevn(sp, ep, toklen); + if (!ep || (sp >= ep)) { ++ slapi_ch_free_string(&sp); + continue; + } + /* See if the password contains the value */ +diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c +index 529bb83..a9fd9ec 100644 +--- a/ldap/servers/slapd/pw_verify.c ++++ b/ldap/servers/slapd/pw_verify.c +@@ -103,7 +103,6 @@ pw_verify_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + int + pw_validate_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + { +- int rc = 0; + Slapi_Backend *be = NULL; + Slapi_DN *pb_sdn; + struct berval *cred; +diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c +index f35b3f1..050e7b5 100644 +--- a/ldap/servers/slapd/ssl.c ++++ b/ldap/servers/slapd/ssl.c +@@ -1418,12 +1418,10 @@ slapd_ssl_init() + errorCode = PR_GetError(); + slapd_SSL_error("Failed to retrieve SSL " + "configuration information (" +- SLAPI_COMPONENT_NAME_NSPR " error %d - %s): " ++ SLAPI_COMPONENT_NAME_NSPR " error %d - not found): " + "nssslSessionTimeout: %s ", +- errorCode, slapd_pr_strerror(errorCode), +- (val ? "found" : "not found")); +- slapi_ch_free((void **) &val); +- slapi_ch_free((void **) &ciphers); ++ errorCode, slapd_pr_strerror(errorCode)); ++ slapi_ch_free((void **)&ciphers); + freeConfigEntry( &entry ); + return -1; + } +diff --git a/ldap/servers/slapd/str2filter.c b/ldap/servers/slapd/str2filter.c +index ebd5c5d..744c93f 100644 +--- a/ldap/servers/slapd/str2filter.c ++++ b/ldap/servers/slapd/str2filter.c +@@ -344,6 +344,7 @@ str2simple( char *str , int unescape_filter) + *endp = '\0'; + rc = _parse_ext_filter(str, extp, &f->f_mr_type, &f->f_mr_oid, &f->f_mr_dnAttrs); + if (rc) { ++ slapi_filter_free(f, 1); + return NULL; /* error */ + } else { + f->f_choice = LDAP_FILTER_EXTENDED; +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index ad52e9d..eabd517 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -2389,7 +2389,6 @@ task_fixup_tombstones_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, + slapi_task_finish(task, *returncode); + slapi_ch_array_free(base); + slapi_ch_free((void **)&task_data); +- return SLAPI_DSE_CALLBACK_ERROR; + } + + done: +@@ -2507,9 +2506,9 @@ task_des2aes(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, + error: + if (rc == SLAPI_DSE_CALLBACK_ERROR){ + slapi_ch_array_free(bases); +- slapi_ch_array_free(suffix); + slapi_ch_free((void **)&task_data); + } ++ slapi_ch_array_free(suffix); + return rc; + } + +diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c +index 34665de..599b54e 100644 +--- a/ldap/servers/slapd/vattr.c ++++ b/ldap/servers/slapd/vattr.c +@@ -753,10 +753,10 @@ slapi_vattr_values_get_sp(vattr_context *c, + } + if (use_local_ctx) { + /* slapi_pblock_destroy cleans up pb_vattr_context, as well */ +- slapi_pblock_destroy(local_pb); +- } else { +- vattr_context_ungrok(&c); ++ slapi_pblock_destroy(local_pb); ++ ctx->pb = NULL; + } ++ vattr_context_ungrok(&ctx); + return rc; + } + +diff --git a/src/libsds/sds/bpt/map.c b/src/libsds/sds/bpt/map.c +index 4205aa5..2c3468b 100644 +--- a/src/libsds/sds/bpt/map.c ++++ b/src/libsds/sds/bpt/map.c +@@ -18,6 +18,7 @@ sds_bptree_map_nodes(sds_bptree_instance *binst, sds_bptree_node *root, sds_resu + sds_bptree_node_list *tail = cur; + + if (binst == NULL) { ++ sds_free(cur); + return SDS_NULL_POINTER; + } + +-- +2.9.3 + diff --git a/SOURCES/0004-Ticket-49171-Nunc-Stans-incorrectly-reports-a-timeou.patch b/SOURCES/0004-Ticket-49171-Nunc-Stans-incorrectly-reports-a-timeou.patch new file mode 100644 index 0000000..4f04068 --- /dev/null +++ b/SOURCES/0004-Ticket-49171-Nunc-Stans-incorrectly-reports-a-timeou.patch @@ -0,0 +1,96 @@ +From 645e628626f4a3d4b662c067584b4efc6b5c70c5 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Wed, 15 Mar 2017 10:46:38 +1000 +Subject: [PATCH 5/5] Ticket 49171 - Nunc Stans incorrectly reports a timeout + +Bug Description: In some cases nunc-stans would incorrectly report +and IO timeout. + +Fix Description: Make the io output type volatile to prevent re-arranging +of the code. We then make timeout exclusive to read, write and signal. +Finally, we add an extra check into ns_handle_pr_read_ready that +asserts we truly have an idle timeout. It issues a warning now +instead if this scenario occurs, rather than closing the +connection. + +https://pagure.io/389-ds-base/issue/49171 + +Author: wibrown + +Review by: mreynolds (thanks!) + +(cherry picked from commit c8ce1b32cc365174c8280111c2d55bba45f7949f) +--- + ldap/servers/slapd/daemon.c | 15 +++++++++++---- + src/nunc-stans/ns/ns_event_fw_event.c | 28 ++++++++++++++++------------ + 2 files changed, 27 insertions(+), 16 deletions(-) + +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index a37c8c6..6b3331d 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1970,11 +1970,18 @@ ns_handle_pr_read_ready(struct ns_job_t *job) + connection_release_nolock_ext(c, 1); /* release ref acquired when job was added */ + if (CONN_NEEDS_CLOSING(c)) { + ns_handle_closure_nomutex(c); ++ /* We shouldn't need the c_idletimeout check here because of how libevent works. ++ * consider testing this and removing it oneday. ++ */ + } else if (NS_JOB_IS_TIMER(ns_job_get_output_type(job))) { +- /* idle timeout */ +- disconnect_server_nomutex_ext(c, c->c_connid, -1, +- SLAPD_DISCONNECT_IDLE_TIMEOUT, EAGAIN, +- 0 /* do not schedule closure, do it next */); ++ if (c->c_idletimeout > 0) { ++ /* idle timeout */ ++ disconnect_server_nomutex_ext(c, c->c_connid, -1, ++ SLAPD_DISCONNECT_IDLE_TIMEOUT, EAGAIN, ++ 0 /* do not schedule closure, do it next */); ++ } else { ++ slapi_log_err(SLAPI_LOG_WARNING, "ns_handle_pr_read_ready", "Received idletime out with c->c_idletimeout as 0. Ignoring.\n"); ++ } + ns_handle_closure_nomutex(c); + } else if ((connection_activity(c, maxthreads)) == -1) { + /* This might happen as a result of +diff --git a/src/nunc-stans/ns/ns_event_fw_event.c b/src/nunc-stans/ns/ns_event_fw_event.c +index 58dac28..3acbaf7 100644 +--- a/src/nunc-stans/ns/ns_event_fw_event.c ++++ b/src/nunc-stans/ns/ns_event_fw_event.c +@@ -71,18 +71,22 @@ event_logger_cb(int severity, const char *msg) + static ns_job_type_t + event_flags_to_type(short events) + { +- ns_job_type_t job_type = 0; +- if (events & EV_READ) { +- job_type |= NS_JOB_READ; +- } +- if (events & EV_WRITE) { +- job_type |= NS_JOB_WRITE; +- } +- if (events & EV_TIMEOUT) { +- job_type |= NS_JOB_TIMER; +- } +- if (events & EV_SIGNAL) { +- job_type |= NS_JOB_SIGNAL; ++ /* The volatile here prevents gcc rearranging this code within the thread. */ ++ volatile ns_job_type_t job_type = 0; ++ ++ /* Either we timeout *or* we are a real event */ ++ if (!(events & EV_TIMEOUT)) { ++ if (events & EV_READ) { ++ job_type |= NS_JOB_READ; ++ } ++ if (events & EV_WRITE) { ++ job_type |= NS_JOB_WRITE; ++ } ++ if (events & EV_SIGNAL) { ++ job_type |= NS_JOB_SIGNAL; ++ } ++ } else { ++ job_type = NS_JOB_TIMER; + } + return job_type; + } +-- +2.9.3 + diff --git a/SOURCES/0005-Issue-49169-Fix-covscan-errors-regression.patch b/SOURCES/0005-Issue-49169-Fix-covscan-errors-regression.patch new file mode 100644 index 0000000..757a4dc --- /dev/null +++ b/SOURCES/0005-Issue-49169-Fix-covscan-errors-regression.patch @@ -0,0 +1,36 @@ +From 6dde613c1a44731e017d262c2b5868dbe333da74 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 15 Mar 2017 09:00:19 -0400 +Subject: [PATCH] Issue 49169 - Fix covscan errors(regression) + +Description: The change to vattr.c caused problems with the tests. + Removing change. + +https://pagure.io/389-ds-base/issue/49169 + +Reviewed by: one line commit rule + +(cherry picked from commit 314e9ecf310d4ab8e8fc700bd5d3477d52e4fa19) +--- + ldap/servers/slapd/vattr.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c +index 599b54e..ef4d7f2 100644 +--- a/ldap/servers/slapd/vattr.c ++++ b/ldap/servers/slapd/vattr.c +@@ -754,9 +754,9 @@ slapi_vattr_values_get_sp(vattr_context *c, + if (use_local_ctx) { + /* slapi_pblock_destroy cleans up pb_vattr_context, as well */ + slapi_pblock_destroy(local_pb); +- ctx->pb = NULL; ++ } else { ++ vattr_context_ungrok(&c); + } +- vattr_context_ungrok(&ctx); + return rc; + } + +-- +2.9.3 + diff --git a/SOURCES/0006-Issue-49062-Reset-agmt-update-staus-and-total-init b/SOURCES/0006-Issue-49062-Reset-agmt-update-staus-and-total-init new file mode 100644 index 0000000..77c5104 --- /dev/null +++ b/SOURCES/0006-Issue-49062-Reset-agmt-update-staus-and-total-init @@ -0,0 +1,29 @@ +From 310b8f8b3c59423b9dfa3a6ea30f4a719f342fc9 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 1 Mar 2017 10:56:40 -0500 +Subject: [PATCH] Issue 49062 - Reset agmt update staus and total init + +Description: Make sure we always reset the agmt status after doing a reinit + +https://pagure.io/389-ds-base/issue/49062 + +Reviewed by: tbordaz & nhosoi(Thanks!!) +--- + ldap/servers/plugins/replication/repl5_tot_protocol.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c +index 57d9de2..45a084a 100644 +--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c +@@ -591,6 +591,7 @@ retry: + "\"%s\". Sent %lu entries.\n", + agmt_get_long_name(prp->agmt), cb_data.num_entries); + agmt_set_last_init_status(prp->agmt, 0, 0, 0, "Total update succeeded"); ++ agmt_set_last_update_status(prp->agmt, 0, 0, NULL); + } + + done: +-- +2.9.3 + diff --git a/SOURCES/0007-Issue-49065-dbmon.sh-fails-if-you-have-nsslapd-requi.patch b/SOURCES/0007-Issue-49065-dbmon.sh-fails-if-you-have-nsslapd-requi.patch new file mode 100644 index 0000000..b27dbb5 --- /dev/null +++ b/SOURCES/0007-Issue-49065-dbmon.sh-fails-if-you-have-nsslapd-requi.patch @@ -0,0 +1,174 @@ +From edf3d210e9ba9006f87e0597b052fa925c68ddc2 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 20 Mar 2017 17:35:10 -0400 +Subject: [PATCH] Issue 49065 - dbmon.sh fails if you have + nsslapd-require-secure-binds enabled + +Description: Add the ability to detect if security is enabled, if so connect using + start TLS. Added a new param SERVID for specifying which instance + you want to look at. + +https://pagure.io/389-ds-base/issue/49065 + +Reviewed by: firstyear(Thanks!) +--- + Makefile.am | 2 +- + ldap/admin/src/scripts/{dbmon.sh => dbmon.sh.in} | 62 ++++++++++++++++++++++-- + man/man8/dbmon.sh.8 | 14 +++--- + 3 files changed, 65 insertions(+), 13 deletions(-) + rename ldap/admin/src/scripts/{dbmon.sh => dbmon.sh.in} (81%) + mode change 100755 => 100644 + +diff --git a/Makefile.am b/Makefile.am +index 9aebb6b..4a4b2d3 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -235,7 +235,7 @@ CLEANFILES = dberrstrs.h ns-slapd.properties \ + ldap/admin/src/scripts/usn-tombstone-cleanup.pl ldap/admin/src/scripts/verify-db.pl \ + ldap/admin/src/scripts/ds_selinux_port_query ldap/admin/src/scripts/ds_selinux_enabled \ + ldap/admin/src/scripts/dbverify ldap/admin/src/scripts/readnsstate \ +- doxyfile.stamp \ ++ doxyfile.stamp ldap/admin/src/scripts/dbmon.sh \ + $(NULL) + + clean-local: +diff --git a/ldap/admin/src/scripts/dbmon.sh b/ldap/admin/src/scripts/dbmon.sh.in +old mode 100755 +new mode 100644 +similarity index 81% +rename from ldap/admin/src/scripts/dbmon.sh +rename to ldap/admin/src/scripts/dbmon.sh.in +index 3b8b4d1..4ee6adc +--- a/ldap/admin/src/scripts/dbmon.sh ++++ b/ldap/admin/src/scripts/dbmon.sh.in +@@ -8,10 +8,11 @@ + # END COPYRIGHT BLOCK + # + ++. @datadir@/@package_name@/data/DSSharedLib ++ + DURATION=${DURATION:-0} + INCR=${INCR:-1} +-HOST=${HOST:-localhost} +-PORT=${PORT:-389} ++SERVID=${SERVID} + BINDDN=${BINDDN:-"cn=directory manager"} + BINDPW=${BINDPW:-"secret"} + DBLIST=${DBLIST:-all} +@@ -180,10 +181,63 @@ parseldif() { + } + + dodbmon() { ++ initfile=$(get_init_file "@initconfigdir@" $SERVID) ++ if [ $? -eq 1 ] ++ then ++ echo "You must supply a valid server instance identifier (via SERVID)." ++ echo "Available instances: $initfile" ++ exit 1 ++ fi ++ ++ . $initfile ++ ++ process_dse $CONFIG_DIR $$ ++ file="/tmp/DSSharedLib.$$" ++ port=$(grep -i 'nsslapd-port' $file | awk '{print $2}' ) ++ host=$(grep -i 'nsslapd-localhost' $file | awk '{print $2}' ) ++ security=$(grep -i 'nsslapd-security' $file | awk '{print $2}' ) ++ certdir=$(grep -i 'nsslapd-certdir' $file | awk '{print $2}' ) ++ rm $file ++ ++ if [ -n "$ldapiURL" ] ++ then ++ ldapiURL=`echo "$ldapiURL" | sed -e 's/\//%2f/g'` ++ ldapiURL="ldapi://"$ldapiURL ++ fi ++ ++ client_type=`ldapsearch -V 2>&1`; ++ echo "$client_type" | grep -q "OpenLDAP" ++ if [ $? -eq 0 ] ++ then ++ openldap="yes" ++ export LDAPTLS_CACERTDIR=$certdir ++ fi ++ ++ if [ -z $security ]; then ++ security="off" ++ fi ++ + while [ 1 ] ; do + date +- ldapsearch -xLLL -h $HOST -p $PORT -D "$BINDDN" -w "$BINDPW" -b "$ldbmdn" '(|(cn=config)(cn=database)(cn=monitor))' \ +- | parseldif ++ if [ "$security" = "on" ]; then ++ # STARTTLS ++ if [ "$openldap" = "yes" ]; then ++ ldapsearch -x -LLL -ZZ -h $host -p $port -D "$BINDDN" -w "$BINDPW" -b "$ldbmdn" '(|(cn=config)(cn=database)(cn=monitor))' \ ++ | parseldif ++ else ++ ldapsearch -ZZZ -P $certdir -h $host -p $port -D "$BINDDN" -w "$BINDPW" -b "$ldbmdn" '(|(cn=config)(cn=database)(cn=monitor))' \ ++ | parseldif ++ fi ++ else ++ # LDAP ++ if [ "$openldap" = "yes" ]; then ++ ldapsearch -x -LLL -h $host -p $port -D "$BINDDN" -w "$BINDPW" -b "$ldbmdn" '(|(cn=config)(cn=database)(cn=monitor))' \ ++ | parseldif ++ else ++ ldapsearch -h $host -p $port -D "$BINDDN" -w "$BINDPW" -b "$ldbmdn" '(|(cn=config)(cn=database)(cn=monitor))' \ ++ | parseldif ++ fi ++ fi + echo "" + sleep $INCR + done +diff --git a/man/man8/dbmon.sh.8 b/man/man8/dbmon.sh.8 +index 49e61d0..ad318a1 100644 +--- a/man/man8/dbmon.sh.8 ++++ b/man/man8/dbmon.sh.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DBMON.SH 8 "Jul 25, 2014" ++.TH DBMON.SH 8 "Mar 20, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -18,7 +18,7 @@ + .SH NAME + dbmon.sh - Directory Server script for monitoring database and entry cache usage + .SH SYNOPSIS +-[INCR=num] [HOST=hostname] [PORT=num] [BINDDN=binddn] [BINDPW=password] [DBLIST=databases] [INDEXLIST=indexes] [VERBOSE=num] dbmon.sh ++[INCR=num] [SERVID=server_id][BINDDN=binddn] [BINDPW=password] [DBLIST=databases] [INDEXLIST=indexes] [VERBOSE=num] dbmon.sh + .SH DESCRIPTION + dbmon.sh is a tool used to monitor database and entry cache usage. It is especially useful for database cache and entry/dn cache tuning - how much space is left, is the cache full, how much space on average do I need per entry/dn. + .SH OPTIONS +@@ -31,9 +31,7 @@ All arguments are optional, but you will most likely have to provide BINDPW + .TP + .B \fBINCR\fR - show results every INCR seconds - default is 1 second + .TP +-.B \fBHOST\fR - name of host or IP address - default is "localhost" +-.TP +-.B \fBPORT\fR - port number (LDAP not LDAPS) - default is 389 ++.B \fBSERVID\fR - Name of the server instance + .TP + .B \fBBINDDN\fR - DN to use to bind - must have permission to read everything under cn=config - default is cn=Directory Manager + .TP +@@ -46,11 +44,11 @@ All arguments are optional, but you will most likely have to provide BINDPW + .B \fBVERBOSE\fR - output level - 0 == suitable for parsing by a script - 1 == has column headings - 2 == provides detailed descriptions of the data - default is 0 + + .SH EXAMPLE +-INCR=1 HOST=ldap.example.com BINDDN="cn=directory manager" BINDPW="secret" VERBOSE=2 dbmon.sh ++INCR=1 SERVID=slapd-localhost BINDDN="cn=directory manager" BINDPW="secret" VERBOSE=2 dbmon.sh + + .SH AUTHOR + dbmon.sh was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2014 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +-- +2.9.3 + diff --git a/SOURCES/0008-Issue-49095-targetattr-wildcard-evaluation-is-incorr.patch b/SOURCES/0008-Issue-49095-targetattr-wildcard-evaluation-is-incorr.patch new file mode 100644 index 0000000..d3cbdb1 --- /dev/null +++ b/SOURCES/0008-Issue-49095-targetattr-wildcard-evaluation-is-incorr.patch @@ -0,0 +1,157 @@ +From abc9ff876209819c8f0dd7e799f1ab6a1b084fe5 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 20 Mar 2017 15:08:45 -0400 +Subject: [PATCH] Issue 49095 - targetattr wildcard evaluation is incorrectly + case sensitive + +Description: When processing an aci that uses a wildcard targetattr, the + comparision should be done using case insensitive functions. + +https://pagure.io/389-ds-base/issue/49095 + +Reviewed by: firstyear(Thanks!) +--- + dirsrvtests/tests/tickets/ticket49095_test.py | 85 +++++++++++++++++++++++++++ + ldap/servers/plugins/acl/acl.c | 10 ++-- + 2 files changed, 90 insertions(+), 5 deletions(-) + create mode 100644 dirsrvtests/tests/tickets/ticket49095_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket49095_test.py b/dirsrvtests/tests/tickets/ticket49095_test.py +new file mode 100644 +index 0000000..04f92b2 +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket49095_test.py +@@ -0,0 +1,85 @@ ++import time ++import ldap ++import logging ++import pytest ++from lib389 import DirSrv, Entry, tools, tasks ++from lib389.tools import DirSrvTools ++from lib389._constants import * ++from lib389.properties import * ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st as topo ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++USER_DN = 'uid=testuser,dc=example,dc=com' ++acis = ['(targetattr != "tele*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', ++ '(targetattr != "TELE*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', ++ '(targetattr != "telephonenum*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', ++ '(targetattr != "TELEPHONENUM*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)'] ++ ++ ++def test_ticket49095(topo): ++ """Check that target attrbiutes with wildcards are case insensitive ++ """ ++ ++ # Add an entry ++ try: ++ topo.standalone.add_s(Entry((USER_DN, { ++ 'objectclass': 'top extensibleObject'.split(), ++ 'uid': 'testuser', ++ 'telephonenumber': '555-555-5555' ++ }))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add test user: ' + e.message['desc']) ++ assert False ++ ++ for aci in acis: ++ # Add ACI ++ try: ++ topo.standalone.modify_s(DEFAULT_SUFFIX, ++ [(ldap.MOD_REPLACE, 'aci', aci)]) ++ ++ except ldap.LDAPError as e: ++ log.fatal('Failed to set aci: ' + aci + ': ' + e.message['desc']) ++ assert False ++ ++ # Set Anonymous Bind to test aci ++ try: ++ topo.standalone.simple_bind_s("", "") ++ except ldap.LDAPError as e: ++ log.fatal('Failed to bind anonymously: ' + e.message['desc']) ++ assert False ++ ++ # Search for entry - should not get any results ++ try: ++ entry = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, ++ 'telephonenumber=*') ++ if entry: ++ log.fatal('The entry was incorrectly returned') ++ assert False ++ except ldap.LDAPError as e: ++ log.fatal('Failed to search anonymously: ' + e.message['desc']) ++ assert False ++ ++ # Set root DN Bind so we can update aci's ++ try: ++ topo.standalone.simple_bind_s(DN_DM, PASSWORD) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to bind anonymously: ' + e.message['desc']) ++ assert False ++ ++ log.info("Test Passed") ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c +index 0a93808..48b8efc 100644 +--- a/ldap/servers/plugins/acl/acl.c ++++ b/ldap/servers/plugins/acl/acl.c +@@ -3407,19 +3407,19 @@ acl_match_substring ( Slapi_Filter *f, char *str, int exact_match) + } + + /* this assumes that str and the filter components are already +- * normalized. If not, it shoul be done ++ * normalized. If not, it should be done + */ + if ( initial != NULL) { + len = strlen(initial); + if (exact_match) { +- int rc = strncmp(p, initial, len); ++ int rc = strncasecmp(p, initial, len); + if (rc) { + return ACL_FALSE; + } else { + p += len; + } + } else { +- p = strstr(p, initial); ++ p = strcasestr(p, initial); + if (p) { + p += len; + } else { +@@ -3430,7 +3430,7 @@ acl_match_substring ( Slapi_Filter *f, char *str, int exact_match) + + if ( any != NULL) { + for (i = 0; any && any[i] != NULL; i++) { +- p = strstr(p, any[i]); ++ p = strcasestr(p, any[i]); + if (p) { + p += strlen(any[i]); + } else { +@@ -3444,7 +3444,7 @@ acl_match_substring ( Slapi_Filter *f, char *str, int exact_match) + len = strlen(final); + tlen = strlen(p); + if (len > tlen) return ACL_FALSE; +- if (strcmp(p+tlen-len, final)) return ACL_FALSE; ++ if (strcasecmp(p+tlen-len, final)) return ACL_FALSE; + } + + return ACL_TRUE; +-- +2.9.3 + diff --git a/SOURCES/0009-Issue-49157-ds-logpipe.py-crashes-for-non-existing-u.patch b/SOURCES/0009-Issue-49157-ds-logpipe.py-crashes-for-non-existing-u.patch new file mode 100644 index 0000000..a79e911 --- /dev/null +++ b/SOURCES/0009-Issue-49157-ds-logpipe.py-crashes-for-non-existing-u.patch @@ -0,0 +1,75 @@ +From e33f58d5a9984fd5d5533425fb420d05e6484d7f Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 20 Mar 2017 15:29:48 -0400 +Subject: [PATCH] Issue 49157 - ds-logpipe.py crashes for non-existing users + +Description: Added try/except's for various OS function calls, as the tool + should gracefully exit when there is a problem and not crash + +https://pagure.io/389-ds-base/issue/49157 + +Reviewed by: firstyear(Thanks!) +--- + ldap/admin/src/scripts/ds-logpipe.py | 25 ++++++++++++++++++------- + 1 file changed, 18 insertions(+), 7 deletions(-) + +diff --git a/ldap/admin/src/scripts/ds-logpipe.py b/ldap/admin/src/scripts/ds-logpipe.py +index 4ba4d1b..dc1856a 100644 +--- a/ldap/admin/src/scripts/ds-logpipe.py ++++ b/ldap/admin/src/scripts/ds-logpipe.py +@@ -262,7 +262,8 @@ def parse_options(): + + options, logfname = parse_options() + +-if options.debug: debug = True ++if options.debug: ++ debug = True + + if len(plgfuncs) == 0: + plgfuncs.append(defaultplugin) +@@ -270,9 +271,15 @@ if len(plgpostfuncs) == 0: + plgpostfuncs.append(defaultpost) + + if options.user: +- try: userid = int(options.user) +- except ValueError: # not a numeric userid - look it up +- userid = pwd.getpwnam(options.user)[2] ++ try: ++ userid = int(options.user) ++ except ValueError: # not a numeric userid - look it up ++ try: ++ userid = pwd.getpwnam(options.user)[2] ++ except Exception as e: ++ print("Failed to lookup name (%s) error: %s" % ++ (options.user, str(e))) ++ sys.exit(1) + os.seteuid(userid) + + if options.scriptpidfile: +@@ -298,8 +305,12 @@ except OSError as e: + if e.errno == errno.ENOENT: + if debug: + print("Creating log pipe", logfname) +- os.mkfifo(logfname) +- os.chmod(logfname, 0o600) ++ try: ++ os.mkfifo(logfname) ++ os.chmod(logfname, 0o600) ++ except Exception as e: ++ print("Failed to create log pipe: " + str(e)) ++ sys.exit(1) + else: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + +@@ -393,7 +404,7 @@ while not done: + else: # we read something + # pipe closed - usually when server shuts down + done = True +- ++ + if not done and debug: + print("log pipe", logfname, "closed - reopening - read", totallines, "total lines") + +-- +2.9.3 + diff --git a/SOURCES/0010-Fix-double-free-in-_cl5NewDBFile-error-path.patch b/SOURCES/0010-Fix-double-free-in-_cl5NewDBFile-error-path.patch new file mode 100644 index 0000000..44813e8 --- /dev/null +++ b/SOURCES/0010-Fix-double-free-in-_cl5NewDBFile-error-path.patch @@ -0,0 +1,40 @@ +From 8c39c9dbe69949065940019e930c37b8f5450a75 Mon Sep 17 00:00:00 2001 +From: Adam Tkac +Date: Sat, 18 Mar 2017 23:34:54 +0100 +Subject: [PATCH] Fix double-free in _cl5NewDBFile() error path + +Although slapi_ch_free should prevent double-free errors, it doesn't work +in old code because after assignment + +(*dbFile)->name = name; + +two independent pointers points to the same allocated area and both pointers +are free()-ed (one directly in error path in _cl5NewDBFile and the second +in _cl5DBCloseFile, called in error path as well). + +Signed-off-by: Mark Reynolds +--- + ldap/servers/plugins/replication/cl5_api.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c +index fc70ab7..5c2233f 100644 +--- a/ldap/servers/plugins/replication/cl5_api.c ++++ b/ldap/servers/plugins/replication/cl5_api.c +@@ -6269,9 +6269,10 @@ out: + } + + (*dbFile)->db = db; +- (*dbFile)->name = name; +- (*dbFile)->replName = slapi_ch_strdup (replName); +- (*dbFile)->replGen = slapi_ch_strdup (replGen); ++ (*dbFile)->name = name; ++ name = NULL; /* transfer ownership to dbFile struct */ ++ (*dbFile)->replName = slapi_ch_strdup (replName); ++ (*dbFile)->replGen = slapi_ch_strdup (replGen); + + /* + * Considerations for setting up cl semaphore: +-- +2.9.3 + diff --git a/SOURCES/0011-Issue-49188-retrocl-can-crash-server-at-shutdown.patch b/SOURCES/0011-Issue-49188-retrocl-can-crash-server-at-shutdown.patch new file mode 100644 index 0000000..994fa70 --- /dev/null +++ b/SOURCES/0011-Issue-49188-retrocl-can-crash-server-at-shutdown.patch @@ -0,0 +1,34 @@ +From 8f908a1de1906a0b7451505d9640e2fd2f9fa7eb Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 22 Mar 2017 10:18:13 -0400 +Subject: [PATCH] Issue 49188 - retrocl can crash server at shutdown + +Description: We do not calloc enough elements when processing nsslapd-attribute + from the retrocl plugin configuration. This causes invalid memory + to be freed at shutdown(via slapi_ch_array_free). + +https://pagure.io/389-ds-base/issue/49188 + +Reviewed by: mreynolds(one line commit rule) +--- + ldap/servers/plugins/retrocl/retrocl.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/plugins/retrocl/retrocl.c b/ldap/servers/plugins/retrocl/retrocl.c +index 32b30c7..6e68667 100644 +--- a/ldap/servers/plugins/retrocl/retrocl.c ++++ b/ldap/servers/plugins/retrocl/retrocl.c +@@ -470,8 +470,8 @@ static int retrocl_start (Slapi_PBlock *pb) + + retrocl_nattributes = n; + +- retrocl_attributes = (char **)slapi_ch_calloc(n, sizeof(char *)); +- retrocl_aliases = (char **)slapi_ch_calloc(n, sizeof(char *)); ++ retrocl_attributes = (char **)slapi_ch_calloc(n + 1, sizeof(char *)); ++ retrocl_aliases = (char **)slapi_ch_calloc(n + 1, sizeof(char *)); + + slapi_log_err(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, "retrocl_start - Attributes:\n"); + +-- +2.9.3 + diff --git a/SOURCES/0012-Ticket-49177-rpm-would-not-create-valid-pkgconfig-fi.patch b/SOURCES/0012-Ticket-49177-rpm-would-not-create-valid-pkgconfig-fi.patch new file mode 100644 index 0000000..8b25c83 --- /dev/null +++ b/SOURCES/0012-Ticket-49177-rpm-would-not-create-valid-pkgconfig-fi.patch @@ -0,0 +1,131 @@ +From fffbb3d39a2ea12a2b3a72c729e76c1e69a19d8f Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 27 Mar 2017 14:33:17 -0400 +Subject: [PATCH] Ticket 49177 - rpm would not create valid pkgconfig files + + Bug Description: pkgconfig from the rpm was not valid. + + Fix Description: Resolve an issue in the way we handle the file + substiution to resolve this issue. + + https://pagure.io/389-ds-base/issue/49177 +--- + Makefile.am | 10 ++-------- + configure.ac | 3 +++ + m4/mozldap.m4 | 4 ++++ + m4/openldap.m4 | 4 ++++ + src/pkgconfig/dirsrv.pc.in | 4 ++++ + 5 files changed, 17 insertions(+), 8 deletions(-) + +diff --git a/Makefile.am b/Makefile.am +index 4a4b2d3..982dd28 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -110,14 +110,12 @@ if OPENLDAP + # shared lib _fini for one will stomp on the other, and the program will crash + LDAPSDK_LINK_NOTHR = @openldap_lib@ -lldap@ol_libver@ @ldap_lib_ldif@ -llber@ol_libver@ + LDAPSDK_LINK = @openldap_lib@ -lldap_r@ol_libver@ @ldap_lib_ldif@ -llber@ol_libver@ +-ldaplib = openldap +-ldaplib_defs = -DUSE_OPENLDAP + else + LDAPSDK_LINK = @ldapsdk_lib@ -lssldap60 -lprldap60 -lldap60 -lldif60 + LDAPSDK_LINK_NOTHR = $(LDAPSDK_LINK) +-ldaplib = mozldap +-ldaplib_defs = + endif ++ldaplib = @ldaplib@ ++ldaplib_defs = @ldaplib_defs@ + + DB_LINK = @db_lib@ -ldb-@db_libver@ + SASL_LINK = @sasl_lib@ -lsasl2 +@@ -2237,10 +2235,6 @@ else + $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@ + endif + +-%/$(PACKAGE_NAME).pc: %/dirsrv.pc.in +- if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi +- $(fixupcmd) $^ > $@ +- + %/$(PACKAGE_NAME)-snmp: %/ldap-agent-initscript.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ +diff --git a/configure.ac b/configure.ac +index 4e3e9fb..3f2aa75 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -785,6 +785,8 @@ AC_SUBST(openldap_inc) + AC_SUBST(openldap_lib) + AC_SUBST(openldap_libdir) + AC_SUBST(openldap_bindir) ++AC_SUBST(ldaplib) ++AC_SUBST(ldaplib_defs) + AC_SUBST(ldaptool_bindir) + AC_SUBST(ldaptool_opts) + AC_SUBST(plainldif_opts) +@@ -853,6 +855,7 @@ if test "$GCC" != yes ; then + fi + + # Build our pkgconfig files ++# This currently conflicts with %.in: rule in Makefile.am, which should be removed eventually. + AC_CONFIG_FILES([src/pkgconfig/dirsrv.pc src/pkgconfig/nunc-stans.pc src/pkgconfig/libsds.pc]) + + AC_CONFIG_FILES([Makefile rpm/389-ds-base.spec ]) +diff --git a/m4/mozldap.m4 b/m4/mozldap.m4 +index 4352151..8084ed8 100644 +--- a/m4/mozldap.m4 ++++ b/m4/mozldap.m4 +@@ -15,6 +15,8 @@ AC_ARG_WITH(ldapsdk, AS_HELP_STRING([--with-ldapsdk@<:@=PATH@:>@],[Mozilla LDAP + if test "$withval" = yes + then + AC_MSG_RESULT(yes) ++ ldaplib="mozldap" ++ ldaplib_defs="" + elif test "$withval" = no + then + AC_MSG_RESULT(no) +@@ -22,6 +24,8 @@ AC_ARG_WITH(ldapsdk, AS_HELP_STRING([--with-ldapsdk@<:@=PATH@:>@],[Mozilla LDAP + then + AC_MSG_RESULT([using $withval]) + LDAPSDKDIR=$withval ++ ldaplib="mozldap" ++ ldaplib_defs="" + ldapsdk_inc="-I$LDAPSDKDIR/include" + ldapsdk_lib="-L$LDAPSDKDIR/lib" + ldapsdk_libdir="$LDAPSDKDIR/lib" +diff --git a/m4/openldap.m4 b/m4/openldap.m4 +index 417bf43..f45637c 100644 +--- a/m4/openldap.m4 ++++ b/m4/openldap.m4 +@@ -15,6 +15,8 @@ AC_ARG_WITH(openldap, AS_HELP_STRING([--with-openldap@<:@=PATH@:>@],[Use OpenLDA + if test "$withval" = yes + then + AC_MSG_RESULT([using system OpenLDAP]) ++ ldaplib="openldap" ++ ldaplib_defs="-DUSE_OPENLDAP" + elif test "$withval" = no + then + AC_MSG_RESULT(no) +@@ -22,6 +24,8 @@ AC_ARG_WITH(openldap, AS_HELP_STRING([--with-openldap@<:@=PATH@:>@],[Use OpenLDA + then + AC_MSG_RESULT([using $withval]) + OPENLDAPDIR=$withval ++ ldaplib="openldap" ++ ldaplib_defs="-DUSE_OPENLDAP" + openldap_incdir="$OPENLDAPDIR/include" + openldap_inc="-I$openldap_incdir" + openldap_lib="-L$OPENLDAPDIR/lib" +diff --git a/src/pkgconfig/dirsrv.pc.in b/src/pkgconfig/dirsrv.pc.in +index 4140031..df433cf 100644 +--- a/src/pkgconfig/dirsrv.pc.in ++++ b/src/pkgconfig/dirsrv.pc.in +@@ -1,3 +1,7 @@ ++prefix=@prefix@ ++exec_prefix=@exec_prefix@ ++libdir=@libdir@ ++includedir=@includedir@ + ldaplib=@ldaplib@ + + Name: dirsrv +-- +2.9.3 + diff --git a/SOURCES/0013-Ticket-49076-To-debug-DB_DEADLOCK-condition-allow-to.patch b/SOURCES/0013-Ticket-49076-To-debug-DB_DEADLOCK-condition-allow-to.patch new file mode 100644 index 0000000..7d0d1c3 --- /dev/null +++ b/SOURCES/0013-Ticket-49076-To-debug-DB_DEADLOCK-condition-allow-to.patch @@ -0,0 +1,245 @@ +From 1a66f5f232d6c2869ef4e439eafe5a820f61a976 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Wed, 15 Feb 2017 11:31:27 +0100 +Subject: [PATCH] Ticket 49076 - To debug DB_DEADLOCK condition, allow to reset + DB_TXN_NOWAIT flag on txn_begin + +Bug Description: + For debug reason it is interesting to have a new configuration ldbm backend config + option (nsslapd-db-transaction-wait) that allows to hang on deadlock + rather to let the server handling retries. + +Fix Description: + The fix introduce a new attribute nsslapd-db-transaction-wait under + "cn=config,cn=ldbm database,cn=plugins,cn=config". + By default it is "off" (ldbm returns DB_DEADLOCK) and can be changed + online. + It is taken into account when a new transcation begin. + +https://pagure.io/389-ds-base/issue/49076 + +Reviewed by: William Brown, Ludwig Krispenz + +Platforms tested: F23 + +Flag Day: no + +Doc impact: no +--- + dirsrvtests/tests/tickets/ticket49076_test.py | 103 ++++++++++++++++++++++++++ + ldap/servers/slapd/back-ldbm/dblayer.c | 9 ++- + ldap/servers/slapd/back-ldbm/dblayer.h | 3 + + ldap/servers/slapd/back-ldbm/ldbm_config.c | 22 ++++++ + ldap/servers/slapd/back-ldbm/ldbm_config.h | 1 + + 5 files changed, 137 insertions(+), 1 deletion(-) + create mode 100644 dirsrvtests/tests/tickets/ticket49076_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket49076_test.py b/dirsrvtests/tests/tickets/ticket49076_test.py +new file mode 100644 +index 0000000..c4a2c1b +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket49076_test.py +@@ -0,0 +1,103 @@ ++import time ++import ldap ++import logging ++import pytest ++from lib389 import DirSrv, Entry, tools, tasks ++from lib389.tools import DirSrvTools ++from lib389._constants import * ++from lib389.properties import * ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st as topo ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++ldbm_config = "cn=config,%s" % (DN_LDBM) ++txn_begin_flag = "nsslapd-db-transaction-wait" ++TEST_USER_DN = 'cn=test,%s' % SUFFIX ++TEST_USER = "test" ++ ++def _check_configured_value(topology_st, attr=txn_begin_flag, expected_value=None, required=False): ++ entries = topology_st.standalone.search_s(ldbm_config, ldap.SCOPE_BASE, 'cn=config') ++ if required: ++ assert (entries[0].hasValue(attr)) ++ if entries[0].hasValue(attr): ++ topology_st.standalone.log.info('Current value is %s' % entries[0].getValue(attr)) ++ assert (entries[0].getValue(attr) == expected_value) ++ ++def _update_db(topology_st): ++ topology_st.standalone.add_s( ++ Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), ++ 'cn': TEST_USER, ++ 'sn': TEST_USER, ++ 'givenname': TEST_USER}))) ++ topology_st.standalone.delete_s(TEST_USER_DN) ++ ++def test_ticket49076(topo): ++ """Write your testcase here... ++ ++ Also, if you need any testcase initialization, ++ please, write additional fixture for that(include finalizer). ++ """ ++ ++ # check default value is DB_TXN_NOWAIT ++ _check_configured_value(topo, expected_value="off") ++ ++ # tests we are able to update DB ++ _update_db(topo) ++ ++ # switch to wait mode ++ topo.standalone.modify_s(ldbm_config, ++ [(ldap.MOD_REPLACE, txn_begin_flag, "on")]) ++ # check default value is DB_TXN_NOWAIT ++ _check_configured_value(topo, expected_value="on") ++ _update_db(topo) ++ ++ ++ # switch back to "normal mode" ++ topo.standalone.modify_s(ldbm_config, ++ [(ldap.MOD_REPLACE, txn_begin_flag, "off")]) ++ # check default value is DB_TXN_NOWAIT ++ _check_configured_value(topo, expected_value="off") ++ # tests we are able to update DB ++ _update_db(topo) ++ ++ # check that settings are not reset by restart ++ topo.standalone.modify_s(ldbm_config, ++ [(ldap.MOD_REPLACE, txn_begin_flag, "on")]) ++ # check default value is DB_TXN_NOWAIT ++ _check_configured_value(topo, expected_value="on") ++ _update_db(topo) ++ topo.standalone.restart(timeout=10) ++ _check_configured_value(topo, expected_value="on") ++ _update_db(topo) ++ ++ # switch default value ++ topo.standalone.modify_s(ldbm_config, ++ [(ldap.MOD_DELETE, txn_begin_flag, None)]) ++ # check default value is DB_TXN_NOWAIT ++ _check_configured_value(topo, expected_value="off") ++ # tests we are able to update DB ++ _update_db(topo) ++ topo.standalone.restart(timeout=10) ++ _check_configured_value(topo, expected_value="off") ++ # tests we are able to update DB ++ _update_db(topo) ++ ++ ++ if DEBUGGING: ++ # Add debugging steps(if any)... ++ pass ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c +index 683994f..507a3cc 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.c ++++ b/ldap/servers/slapd/back-ldbm/dblayer.c +@@ -3374,6 +3374,8 @@ dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, + + if (priv->dblayer_enable_transactions) + { ++ int txn_begin_flags; ++ + dblayer_private_env *pEnv = priv->dblayer_env; + if(use_lock) slapi_rwlock_rdlock(pEnv->dblayer_env_lock); + if (!parent_txn) +@@ -3383,11 +3385,16 @@ dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, + if (par_txn_txn) { + parent_txn = par_txn_txn->back_txn_txn; + } ++ } ++ if (priv->dblayer_txn_wait) { ++ txn_begin_flags = 0; ++ } else { ++ txn_begin_flags = DB_TXN_NOWAIT; + } + return_value = TXN_BEGIN(pEnv->dblayer_DB_ENV, + (DB_TXN*)parent_txn, + &new_txn.back_txn_txn, +- DB_TXN_NOWAIT); ++ txn_begin_flags); + if (0 != return_value) + { + if(use_lock) slapi_rwlock_unlock(priv->dblayer_env->dblayer_env_lock); +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.h b/ldap/servers/slapd/back-ldbm/dblayer.h +index e02e6e0..e4307fc 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.h ++++ b/ldap/servers/slapd/back-ldbm/dblayer.h +@@ -104,6 +104,9 @@ struct dblayer_private + * the mpool */ + int dblayer_recovery_required; + int dblayer_enable_transactions; ++ int dblayer_txn_wait; /* Default is "off" (DB_TXN_NOWAIT) but for ++ * support purpose it could be helpful to set ++ * "on" so that backend hang on deadlock */ + int dblayer_durable_transactions; + int dblayer_checkpoint_interval; + int dblayer_circular_logging; +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index 8541224..dfe7a13 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -636,6 +636,27 @@ static int ldbm_config_db_transaction_logging_set(void *arg, void *value, char * + return retval; + } + ++ ++static void *ldbm_config_db_transaction_wait_get(void *arg) ++{ ++ struct ldbminfo *li = (struct ldbminfo *) arg; ++ ++ return (void *) ((uintptr_t)li->li_dblayer_private->dblayer_txn_wait); ++} ++ ++static int ldbm_config_db_transaction_wait_set(void *arg, void *value, char *errorbuf, int phase, int apply) ++{ ++ struct ldbminfo *li = (struct ldbminfo *) arg; ++ int retval = LDAP_SUCCESS; ++ int val = (int) ((uintptr_t)value); ++ ++ if (apply) { ++ li->li_dblayer_private->dblayer_txn_wait = val; ++ } ++ ++ return retval; ++} ++ + static void *ldbm_config_db_logbuf_size_get(void *arg) + { + struct ldbminfo *li = (struct ldbminfo *) arg; +@@ -1517,6 +1538,7 @@ static config_info ldbm_config[] = { + {CONFIG_DB_DURABLE_TRANSACTIONS, CONFIG_TYPE_ONOFF, "on", &ldbm_config_db_durable_transactions_get, &ldbm_config_db_durable_transactions_set, CONFIG_FLAG_ALWAYS_SHOW}, + {CONFIG_DB_CIRCULAR_LOGGING, CONFIG_TYPE_ONOFF, "on", &ldbm_config_db_circular_logging_get, &ldbm_config_db_circular_logging_set, 0}, + {CONFIG_DB_TRANSACTION_LOGGING, CONFIG_TYPE_ONOFF, "on", &ldbm_config_db_transaction_logging_get, &ldbm_config_db_transaction_logging_set, 0}, ++ {CONFIG_DB_TRANSACTION_WAIT, CONFIG_TYPE_ONOFF, "off", &ldbm_config_db_transaction_wait_get, &ldbm_config_db_transaction_wait_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_CHECKPOINT_INTERVAL, CONFIG_TYPE_INT, "60", &ldbm_config_db_checkpoint_interval_get, &ldbm_config_db_checkpoint_interval_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_COMPACTDB_INTERVAL, CONFIG_TYPE_INT, "2592000"/*30days*/, &ldbm_config_db_compactdb_interval_get, &ldbm_config_db_compactdb_interval_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_DB_TRANSACTION_BATCH, CONFIG_TYPE_INT, "0", &dblayer_get_batch_transactions, &dblayer_set_batch_transactions, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h +index f481937..ddec3a8 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h +@@ -80,6 +80,7 @@ struct config_info { + #define CONFIG_DB_DURABLE_TRANSACTIONS "nsslapd-db-durable-transaction" + #define CONFIG_DB_CIRCULAR_LOGGING "nsslapd-db-circular-logging" + #define CONFIG_DB_TRANSACTION_LOGGING "nsslapd-db-transaction-logging" ++#define CONFIG_DB_TRANSACTION_WAIT "nsslapd-db-transaction-wait" + #define CONFIG_DB_CHECKPOINT_INTERVAL "nsslapd-db-checkpoint-interval" + #define CONFIG_DB_COMPACTDB_INTERVAL "nsslapd-db-compactdb-interval" + #define CONFIG_DB_TRANSACTION_BATCH "nsslapd-db-transaction-batch-val" +-- +2.9.3 + diff --git a/SOURCES/0014-Issue-49192-Deleting-suffix-can-hang-server.patch b/SOURCES/0014-Issue-49192-Deleting-suffix-can-hang-server.patch new file mode 100644 index 0000000..60a04d2 --- /dev/null +++ b/SOURCES/0014-Issue-49192-Deleting-suffix-can-hang-server.patch @@ -0,0 +1,244 @@ +From 353955ba9b4c487e30315d39d1880b6b784817d2 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 27 Mar 2017 10:59:40 -0400 +Subject: [PATCH] Issue 49192 - Deleting suffix can hang server + +Description: If you attempt to bind as an inactive user the backend rwlock + is not unlocked. Regression introduced from issue 49051. + +https://pagure.io/389-ds-base/issue/49192 + +Reviewed by: nhosoi(Thanks!) +--- + dirsrvtests/tests/tickets/ticket49192_test.py | 177 ++++++++++++++++++++++++++ + ldap/servers/slapd/bind.c | 3 - + ldap/servers/slapd/pw_verify.c | 8 +- + 3 files changed, 179 insertions(+), 9 deletions(-) + create mode 100644 dirsrvtests/tests/tickets/ticket49192_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket49192_test.py b/dirsrvtests/tests/tickets/ticket49192_test.py +new file mode 100644 +index 0000000..f770ba7 +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket49192_test.py +@@ -0,0 +1,177 @@ ++import time ++import ldap ++import logging ++import pytest ++from lib389 import Entry ++from lib389._constants import * ++from lib389.properties import * ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st as topo ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++INDEX_DN = 'cn=index,cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config' ++SUFFIX_DN = 'cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config' ++MY_SUFFIX = "o=hang.com" ++USER_DN = 'uid=user,' + MY_SUFFIX ++ ++ ++def test_ticket49192(topo): ++ """Trigger deadlock when removing suffix ++ """ ++ ++ # ++ # Create a second suffix/backend ++ # ++ log.info('Creating second backend...') ++ topo.standalone.backends.create(None, properties={ ++ BACKEND_NAME: "Second_Backend", ++ 'suffix': "o=hang.com", ++ }) ++ try: ++ topo.standalone.add_s(Entry(("o=hang.com", { ++ 'objectclass': 'top organization'.split(), ++ 'o': 'hang.com'}))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to create 2nd suffix: error ' + e.message['desc']) ++ assert False ++ ++ # ++ # Add roles ++ # ++ log.info('Adding roles...') ++ try: ++ topo.standalone.add_s(Entry(('cn=nsManagedDisabledRole,' + MY_SUFFIX, { ++ 'objectclass': ['top', 'LdapSubEntry', ++ 'nsRoleDefinition', ++ 'nsSimpleRoleDefinition', ++ 'nsManagedRoleDefinition'], ++ 'cn': 'nsManagedDisabledRole'}))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add managed role: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.add_s(Entry(('cn=nsDisabledRole,' + MY_SUFFIX, { ++ 'objectclass': ['top', 'LdapSubEntry', ++ 'nsRoleDefinition', ++ 'nsComplexRoleDefinition', ++ 'nsNestedRoleDefinition'], ++ 'cn': 'nsDisabledRole', ++ 'nsRoledn': 'cn=nsManagedDisabledRole,' + MY_SUFFIX}))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add nested role: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.add_s(Entry(('cn=nsAccountInactivationTmp,' + MY_SUFFIX, { ++ 'objectclass': ['top', 'nsContainer'], ++ 'cn': 'nsAccountInactivationTmp'}))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add container: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.add_s(Entry(('cn=\"cn=nsDisabledRole,' + MY_SUFFIX + '\",cn=nsAccountInactivationTmp,' + MY_SUFFIX, { ++ 'objectclass': ['top', 'extensibleObject', 'costemplate', ++ 'ldapsubentry'], ++ 'nsAccountLock': 'true'}))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add cos1: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.add_s(Entry(('cn=nsAccountInactivation_cos,' + MY_SUFFIX, { ++ 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', ++ 'cosClassicDefinition'], ++ 'cn': 'nsAccountInactivation_cos', ++ 'cosTemplateDn': 'cn=nsAccountInactivationTmp,' + MY_SUFFIX, ++ 'cosSpecifier': 'nsRole', ++ 'cosAttribute': 'nsAccountLock operational'}))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add cos2 : error ' + e.message['desc']) ++ assert False ++ ++ # ++ # Add test entry ++ # ++ try: ++ topo.standalone.add_s(Entry((USER_DN, { ++ 'objectclass': 'top extensibleObject'.split(), ++ 'uid': 'user', ++ 'userpassword': 'password', ++ }))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user: error ' + e.message['desc']) ++ assert False ++ ++ # ++ # Inactivate the user account ++ # ++ try: ++ topo.standalone.modify_s(USER_DN, ++ [(ldap.MOD_ADD, ++ 'nsRoleDN', ++ 'cn=nsManagedDisabledRole,' + MY_SUFFIX)]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to disable user: error ' + e.message['desc']) ++ assert False ++ ++ time.sleep(1) ++ ++ # Bind as user (should fail) ++ try: ++ topo.standalone.simple_bind_s(USER_DN, 'password') ++ log.error("Bind incorrectly worked") ++ assert False ++ except ldap.UNWILLING_TO_PERFORM: ++ log.info('Got error 53 as expected') ++ except ldap.LDAPError as e: ++ log.fatal('Bind has unexpected error ' + e.message['desc']) ++ assert False ++ ++ # Bind as root DN ++ try: ++ topo.standalone.simple_bind_s(DN_DM, PASSWORD) ++ except ldap.LDAPError as e: ++ log.fatal('RootDN Bind has unexpected error ' + e.message['desc']) ++ assert False ++ ++ # ++ # Delete suffix ++ # ++ log.info('Delete the suffix and children...') ++ try: ++ index_entries = topo.standalone.search_s( ++ SUFFIX_DN, ldap.SCOPE_SUBTREE, 'objectclass=top') ++ except ldap.LDAPError as e: ++ log.error('Failed to search: %s - error %s' % (SUFFIX_DN, str(e))) ++ ++ for entry in reversed(index_entries): ++ try: ++ log.info("Deleting: " + entry.dn) ++ if entry.dn != SUFFIX_DN and entry.dn != INDEX_DN: ++ topo.standalone.search_s(entry.dn, ++ ldap.SCOPE_ONELEVEL, ++ 'objectclass=top') ++ topo.standalone.delete_s(entry.dn) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to delete entry: %s - error %s' % ++ (entry.dn, str(e))) ++ assert False ++ ++ log.info("Test Passed") ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c +index 5c4fada..f83df7d 100644 +--- a/ldap/servers/slapd/bind.c ++++ b/ldap/servers/slapd/bind.c +@@ -771,9 +771,6 @@ do_bind( Slapi_PBlock *pb ) + /* need_new_pw failed; need_new_pw already send_ldap_result in it. */ + goto free_and_return; + } +- if (be) { +- slapi_be_Unlock(be); +- } + } else { /* anonymous */ + /* set bind creds here so anonymous limits are set */ + bind_credentials_set(pb->pb_conn, authtype, NULL, NULL, NULL, NULL, NULL); +diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c +index a9fd9ec..852b027 100644 +--- a/ldap/servers/slapd/pw_verify.c ++++ b/ldap/servers/slapd/pw_verify.c +@@ -50,8 +50,6 @@ pw_verify_root_dn(const char *dn, const Slapi_Value *cred) + * + * In the future, this will use the credentials and do mfa. + * +- * If you get SLAPI_BIND_SUCCESS or SLAPI_BIND_ANONYMOUS you need to unlock +- * the backend. + * All other results, it's already released. + */ + int +@@ -81,10 +79,8 @@ pw_verify_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + set_db_default_result_handlers(pb); + /* now take the dn, and check it */ + rc = (*be->be_bind)(pb); +- /* now attempt the bind. */ +- if (rc != SLAPI_BIND_SUCCESS && rc != SLAPI_BIND_ANONYMOUS) { +- slapi_be_Unlock(be); +- } ++ slapi_be_Unlock(be); ++ + return rc; + } + +-- +2.9.3 + diff --git a/SOURCES/0015-Ticket-49174-nunc-stans-can-not-use-negative-timeout.patch b/SOURCES/0015-Ticket-49174-nunc-stans-can-not-use-negative-timeout.patch new file mode 100644 index 0000000..f780980 --- /dev/null +++ b/SOURCES/0015-Ticket-49174-nunc-stans-can-not-use-negative-timeout.patch @@ -0,0 +1,200 @@ +From 4f90e73538f1faf101733fcd95392bb77ba9467c Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Wed, 22 Mar 2017 14:10:11 +1000 +Subject: [PATCH] Ticket 49174 - nunc-stans can not use negative timeout + +Bug Description: FreeIPA regularly sets up service accounts with +an nsIdleTimeout of -1. As a result of an issue with NS and libevent +this would cause an instant timeout and disconnect of the service +account. + +Fix Description: Correctly check that jobs are registered to NS. +Add validation to NS for negative timeouts. During the job registration, +we force the timeout to be a valid value. + +https://pagure.io/389-ds-base/issue/49174 + +Author: wibrown + +Review by: mreynolds(Thanks!!!) + +Signed-off-by: Mark Reynolds +--- + ldap/servers/slapd/daemon.c | 39 ++++++++++++++++++++++++++++------- + src/nunc-stans/ns/ns_event_fw_event.c | 8 ------- + src/nunc-stans/ns/ns_thrpool.c | 16 ++++++++++++++ + src/nunc-stans/test/test_nuncstans.c | 20 ++++++++++++++++++ + 4 files changed, 68 insertions(+), 15 deletions(-) + +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index e17a858..a4ea4c0 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1891,15 +1891,32 @@ ns_connection_post_io_or_closing(Connection *conn) + tv.tv_usec = slapd_wakeup_timer * 1000; + conn->c_ns_close_jobs++; /* now 1 active closure job */ + connection_acquire_nolock_ext(conn, 1 /* allow acquire even when closing */); /* event framework now has a reference */ +- ns_add_timeout_job(conn->c_tp, &tv, NS_JOB_TIMER, ++ PRStatus job_result = ns_add_timeout_job(conn->c_tp, &tv, NS_JOB_TIMER, + ns_handle_closure, conn, NULL); +- slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_post_io_or_closing", "post closure job " +- "for conn %" NSPRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); ++#ifdef DEBUG ++ PR_ASSERT(job_result == PR_SUCCESS); ++#endif ++ if (job_result != PR_SUCCESS) { ++ slapi_log_err(SLAPI_LOG_WARNING, "ns_connection_post_io_or_closing", "post closure job " ++ "for conn %" NSPRIu64 " for fd=%d failed to be added to event queue\n", conn->c_connid, conn->c_sd); ++ } else { ++ slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_post_io_or_closing", "post closure job " ++ "for conn %" NSPRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); ++ } + + } + } else { + /* process event normally - wait for I/O until idletimeout */ +- tv.tv_sec = conn->c_idletimeout; ++ /* With nunc-stans there is a quirk. When we have idleTimeout of -1 ++ * which is set on some IPA bind dns for infinite, this causes libevent ++ * to *instantly* timeout. So if we detect < 0, we set 0 to this timeout, to ++ * catch all possible times that an admin could set. ++ */ ++ if (conn->c_idletimeout < 0) { ++ tv.tv_sec = 0; ++ } else { ++ tv.tv_sec = conn->c_idletimeout; ++ } + tv.tv_usec = 0; + #ifdef DEBUG + PR_ASSERT(0 == connection_acquire_nolock(conn)); +@@ -1913,11 +1930,19 @@ ns_connection_post_io_or_closing(Connection *conn) + return; + } + #endif +- ns_add_io_timeout_job(conn->c_tp, conn->c_prfd, &tv, ++ PRStatus job_result = ns_add_io_timeout_job(conn->c_tp, conn->c_prfd, &tv, + NS_JOB_READ|NS_JOB_PRESERVE_FD, + ns_handle_pr_read_ready, conn, NULL); +- slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_post_io_or_closing", "post I/O job for " +- "conn %" NSPRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); ++#ifdef DEBUG ++ PR_ASSERT(job_result == PR_SUCCESS); ++#endif ++ if (job_result != PR_SUCCESS) { ++ slapi_log_err(SLAPI_LOG_WARNING, "ns_connection_post_io_or_closing", "post I/O job for " ++ "conn %" NSPRIu64 " for fd=%d failed to be added to event queue\n", conn->c_connid, conn->c_sd); ++ } else { ++ slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_post_io_or_closing", "post I/O job for " ++ "conn %" NSPRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); ++ } + } + #endif + } +diff --git a/src/nunc-stans/ns/ns_event_fw_event.c b/src/nunc-stans/ns/ns_event_fw_event.c +index 3acbaf7..76936de 100644 +--- a/src/nunc-stans/ns/ns_event_fw_event.c ++++ b/src/nunc-stans/ns/ns_event_fw_event.c +@@ -48,7 +48,6 @@ typedef struct event ns_event_fw_sig_t; + #include "ns_event_fw.h" + #include + +- + static void + event_logger_cb(int severity, const char *msg) + { +@@ -248,13 +247,6 @@ ns_event_fw_mod_io( + } + if (events) { + job->ns_event_fw_fd->ev_events = events; +- +-#ifdef DEBUG_FSM +- /* REALLY make sure that we aren't being re-added */ +- if (event_pending(job->ns_event_fw_fd, events, tv)) { +- abort(); +- } +-#endif + event_add(job->ns_event_fw_fd, tv); + } else { + /* setting the job_type to remove IO events will remove it from the event system */ +diff --git a/src/nunc-stans/ns/ns_thrpool.c b/src/nunc-stans/ns/ns_thrpool.c +index a867b39..9d87384 100644 +--- a/src/nunc-stans/ns/ns_thrpool.c ++++ b/src/nunc-stans/ns/ns_thrpool.c +@@ -180,6 +180,14 @@ ns_thrpool_is_event_shutdown(struct ns_thrpool_t *tp) + return result; + } + ++static int32_t ++validate_event_timeout(struct timeval *tv) { ++ if (tv->tv_sec < 0 || tv->tv_usec < 0) { ++ /* If we get here, you have done something WRONG */ ++ return 1; ++ } ++ return 0; ++} + + static void + job_queue_cleanup(void *arg) { +@@ -864,6 +872,10 @@ ns_add_timeout_job(ns_thrpool_t *tp, struct timeval *tv, ns_job_type_t job_type, + return PR_FAILURE; + } + ++ if (validate_event_timeout(tv)) { ++ return PR_FAILURE; ++ } ++ + /* get an event context for a timer job */ + _job = alloc_timeout_context(tp, tv, job_type, func, data); + if (!_job) { +@@ -900,6 +912,10 @@ ns_add_io_timeout_job(ns_thrpool_t *tp, PRFileDesc *fd, struct timeval *tv, + return PR_FAILURE; + } + ++ if (validate_event_timeout(tv)) { ++ return PR_FAILURE; ++ } ++ + /* Don't allow an accept job to be run outside of the event thread. + * We do this so a listener job won't shut down while still processing + * current connections in other threads. +diff --git a/src/nunc-stans/test/test_nuncstans.c b/src/nunc-stans/test/test_nuncstans.c +index 8eef9e6..2795302 100644 +--- a/src/nunc-stans/test/test_nuncstans.c ++++ b/src/nunc-stans/test/test_nuncstans.c +@@ -385,6 +385,23 @@ ns_job_signal_cb_test(void **state) + assert_int_equal(ns_job_done(job), 0); + } + ++/* ++ * Test that given a timeout of -1, we fail to create a job. ++ */ ++ ++static void ++ns_job_neg_timeout_test(void **state) ++{ ++ struct ns_thrpool_t *tp = *state; ++ ++ struct timeval tv = { -1, 0 }; ++ ++ PR_ASSERT(PR_FAILURE == ns_add_io_timeout_job(tp, 0, &tv, NS_JOB_THREAD, ns_init_do_nothing_cb, NULL, NULL)); ++ ++ PR_ASSERT(PR_FAILURE == ns_add_timeout_job(tp, &tv, NS_JOB_THREAD, ns_init_do_nothing_cb, NULL, NULL)); ++ ++} ++ + int + main(void) + { +@@ -410,6 +427,9 @@ main(void) + cmocka_unit_test_setup_teardown(ns_job_signal_cb_test, + ns_test_setup, + ns_test_teardown), ++ cmocka_unit_test_setup_teardown(ns_job_neg_timeout_test, ++ ns_test_setup, ++ ns_test_teardown), + }; + return cmocka_run_group_tests(tests, NULL, NULL); + } +-- +2.9.3 + diff --git a/SOURCES/0016-Issue-48989-Integer-overflow.patch b/SOURCES/0016-Issue-48989-Integer-overflow.patch new file mode 100644 index 0000000..484ad26 --- /dev/null +++ b/SOURCES/0016-Issue-48989-Integer-overflow.patch @@ -0,0 +1,1366 @@ +From be621fcd9f2215bba4c9190fd63815dc395814d8 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 28 Mar 2017 11:39:16 -0400 +Subject: [PATCH] Issue 48989 - Integer overflow + +Redo slapi_counters and monitors +--- + Makefile.am | 4 +- + configure.ac | 74 +++-- + ldap/servers/plugins/dna/dna.c | 20 +- + ldap/servers/plugins/posix-winsync/posix-winsync.c | 12 +- + ldap/servers/plugins/replication/repl5_init.c | 2 +- + ldap/servers/plugins/replication/repl_extop.c | 2 +- + ldap/servers/plugins/usn/usn.c | 16 +- + ldap/servers/slapd/back-ldbm/monitor.c | 4 +- + ldap/servers/slapd/back-ldbm/perfctrs.c | 12 +- + ldap/servers/slapd/back-ldbm/perfctrs.h | 74 ++--- + ldap/servers/slapd/back-ldbm/vlv_srch.h | 2 +- + ldap/servers/slapd/conntable.c | 14 +- + ldap/servers/slapd/entry.c | 4 +- + ldap/servers/slapd/log.c | 13 +- + ldap/servers/slapd/monitor.c | 14 +- + ldap/servers/slapd/slapi-plugin.h | 14 +- + ldap/servers/slapd/slapi_counter.c | 333 ++++----------------- + ldap/servers/slapd/slapi_counter_sunos_sparcv9.S | 105 ------- + ldap/servers/slapd/snmp_collator.c | 2 +- + test/libslapd/test.c | 2 + + test/test_slapd.h | 4 + + 21 files changed, 205 insertions(+), 522 deletions(-) + delete mode 100644 ldap/servers/slapd/slapi_counter_sunos_sparcv9.S + +diff --git a/Makefile.am b/Makefile.am +index df4a037..982dd28 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -1222,9 +1222,6 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ + $(libavl_a_SOURCES) + + libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_INCLUDES) @db_inc@ $(SVRCORE_INCLUDES) @kerberos_inc@ @pcre_inc@ +-if SPARC +-libslapd_la_SOURCES += ldap/servers/slapd/slapi_counter_sunos_sparcv9.S +-endif + libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LINK) $(THREADLIB) $(SYSTEMD_LINK) + libslapd_la_LDFLAGS = $(AM_LDFLAGS) $(SLAPD_LDFLAGS) + +@@ -2004,6 +2001,7 @@ TESTS = test_slapd \ + + test_slapd_SOURCES = test/main.c \ + test/libslapd/test.c \ ++ test/libslapd/counters/atomic.c \ + test/libslapd/pblock/analytics.c \ + test/libslapd/pblock/v3_compat.c \ + test/libslapd/operation/v3_compat.c +diff --git a/configure.ac b/configure.ac +index 3f2aa75..8172bab 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -555,7 +555,6 @@ case $host in + case $host in + i*86-*-linux*) + AC_DEFINE([CPU_x86], [], [cpu type x86]) +- AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [enabling atomic counter]) + ;; + x86_64-*-linux*) + with_xsixfour="yes" +@@ -565,23 +564,6 @@ case $host in + # wibrown -- 2017-02-21 disabled temporarily + # with_atomic_queue="yes" + # AC_DEFINE([ATOMIC_QUEUE_OPERATIONS], [1], [enabling atomic queue operations]) +- AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [enabling atomic counter]) +- +- AC_MSG_CHECKING([for SSE4.2 features ...]) +- save_CFLAGS="$CFLAGS" +- CFLAGS="$CFLAGS -msse4.2" +- AC_TRY_COMPILE( +- [], +- [return 0;], +- [ +- AC_DEFINE([HAVE_SSE4_2], [1], [Have sss4.2 on this platform arch]) +- AC_MSG_RESULT([SSE4.2 avaliable on this platform]) +- ], +- [ +- AC_MSG_RESULT([SSE4.2 not avaliable on this platform]) +- ] +- ) +- CFLAGS="$save_CFLAGS" + ;; + aarch64-*-linux*) + AC_DEFINE([CPU_arm], [], [cpu type arm]) +@@ -600,17 +582,6 @@ case $host in + s390x-*-linux*) + ;; + esac +- AC_MSG_CHECKING([for GCC provided 64-bit atomic bool cas function ...]) +- AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], +- [[long long ptrval = 0, val = 0, newval = 1; (void)__sync_bool_compare_and_swap_8(&ptrval, val, newval);]])], +- [AC_DEFINE([HAVE_64BIT_ATOMIC_CAS_FUNC], [1], [have 64-bit atomic bool compare and swap function provided by gcc])AC_MSG_RESULT([yes])], +- [AC_MSG_RESULT([no])]) +- AC_MSG_CHECKING([for GCC provided 64-bit atomic ops functions ...]) +- AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], +- [[long long ptrval = 0, val = 0; (void)__sync_add_and_fetch_8(&ptrval, val);]])], +- [AC_DEFINE([HAVE_64BIT_ATOMIC_OP_FUNCS], [1], [have 64-bit atomic operation functions provided by gcc])AC_MSG_RESULT([yes])], +- [AC_MSG_RESULT([no])]) +- + # some programs use the native thread library directly + THREADLIB=-lpthread + AC_SUBST([THREADLIB], [$THREADLIB]) +@@ -654,7 +625,6 @@ case $host in + AC_DEFINE([_POSIX_C_SOURCE], [199506L], [POSIX revision]) + AC_DEFINE([_HPUX_SOURCE], [1], [Source namespace]) + AC_DEFINE([_INCLUDE_STDC__SOURCE_199901], [1], [to pick up all of the printf format macros in inttypes.h]) +- AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [enabling atomic counter]) + # assume 64 bit + initconfigdir="/$PACKAGE_NAME/config" + perlexec='/opt/perl_64/bin/perl' +@@ -689,12 +659,11 @@ dnl Cstd and Crun are required to link any C++ related code + initdir='$(sysconfdir)/init.d' + case $host in + i?86-*-solaris2.1[[0-9]]*) +-dnl I dont know why i386 need this explicit ++ dnl I dont know why i386 need this explicit + AC_DEFINE([HAVE_GETPEERUCRED], [1], [have getpeerucred]) + ;; + sparc-*-solaris*) +-dnl includes some assembler stuff in counter.o +- AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [enabling atomic counter]) ++ dnl includes some assembler stuff in counter.o + AC_DEFINE([CPU_sparc], [], [cpu type sparc]) + TARGET='SPARC' + ;; +@@ -705,6 +674,45 @@ dnl includes some assembler stuff in counter.o + ;; + esac + ++AC_MSG_CHECKING([for SSE4.2 features ...]) ++save_CFLAGS="$CFLAGS" ++CFLAGS="$CFLAGS -msse4.2" ++AC_TRY_COMPILE( ++ [], ++ [return 0;], ++ [ ++ AC_DEFINE([HAVE_SSE4_2], [1], [Have sss4.2 on this platform arch]) ++ AC_MSG_RESULT([SSE4.2 avaliable on this platform]) ++ ], ++ [ ++ AC_MSG_RESULT([SSE4.2 not avaliable on this platform]) ++ ] ++) ++CFLAGS="$save_CFLAGS" ++ ++AC_MSG_CHECKING([for GCC provided 64-bit atomic operations]) ++AC_LINK_IFELSE([AC_LANG_PROGRAM([[ ++ #include ++ ]], ++ [[ ++ uint64_t t_counter = 0; ++ uint64_t t_oldval = 0; ++ uint64_t t_newval = 1; ++ ++ __atomic_compare_exchange_8(&t_counter, &t_oldval, t_newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); ++ __atomic_add_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST); ++ __atomic_sub_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST); ++ __atomic_load(&t_counter, &t_oldval, __ATOMIC_SEQ_CST); ++ return 0; ++ ]])], ++ [ ++ AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [have 64-bit atomic operation functions provided by gcc]) ++ AC_MSG_RESULT([yes]) ++ ], ++ [ ++ AC_MSG_RESULT([no]) ++ ] ++) + + # cmd line overrides default setting above + if test -n "$with_initddir" ; then +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index 54bbe86..34011b9 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -2497,7 +2497,7 @@ static int dna_get_next_value(struct configEntry *config_entry, + if ((config_entry->maxval == -1) || + (nextval <= (config_entry->maxval + config_entry->interval))) { + /* try to set the new next value in the config entry */ +- PR_snprintf(next_value, sizeof(next_value),"%" NSPRIu64, nextval); ++ snprintf(next_value, sizeof(next_value),"%" NSPRIu64, nextval); + + /* set up our replace modify operation */ + replace_val[0] = next_value; +@@ -2565,13 +2565,13 @@ dna_get_shared_config_attr_val(struct configEntry *config_entry, char *attr, cha + if(slapi_sdn_compare(server->sdn, server_sdn) == 0){ + if(strcmp(attr, DNA_REMOTE_BIND_METHOD) == 0){ + if (server->remote_bind_method) { +- PR_snprintf(value, DNA_REMOTE_BUFSIZ, "%s", server->remote_bind_method); ++ snprintf(value, DNA_REMOTE_BUFSIZ, "%s", server->remote_bind_method); + found = 1; + } + break; + } else if(strcmp(attr, DNA_REMOTE_CONN_PROT) == 0){ + if (server->remote_conn_prot) { +- PR_snprintf(value, DNA_REMOTE_BUFSIZ, "%s", server->remote_conn_prot); ++ snprintf(value, DNA_REMOTE_BUFSIZ, "%s", server->remote_conn_prot); + found = 1; + } + break; +@@ -2609,7 +2609,7 @@ dna_update_shared_config(struct configEntry *config_entry) + + /* We store the number of remaining assigned values + * in the shared config entry. */ +- PR_snprintf(remaining_vals, sizeof(remaining_vals),"%" NSPRIu64, ++ snprintf(remaining_vals, sizeof(remaining_vals),"%" NSPRIu64, + config_entry->remaining); + + /* set up our replace modify operation */ +@@ -2709,7 +2709,7 @@ dna_update_next_range(struct configEntry *config_entry, + int ret = 0; + + /* Try to set the new next range in the config entry. */ +- PR_snprintf(nextrange_value, sizeof(nextrange_value), "%" NSPRIu64 "-%" NSPRIu64, ++ snprintf(nextrange_value, sizeof(nextrange_value), "%" NSPRIu64 "-%" NSPRIu64, + lower, upper); + + /* set up our replace modify operation */ +@@ -2778,8 +2778,8 @@ dna_activate_next_range(struct configEntry *config_entry) + int ret = 0; + + /* Setup the modify operation for the config entry */ +- PR_snprintf(maxval_val, sizeof(maxval_val),"%" NSPRIu64, config_entry->next_range_upper); +- PR_snprintf(nextval_val, sizeof(nextval_val),"%" NSPRIu64, config_entry->next_range_lower); ++ snprintf(maxval_val, sizeof(maxval_val),"%" NSPRIu64, config_entry->next_range_upper); ++ snprintf(nextval_val, sizeof(nextval_val),"%" NSPRIu64, config_entry->next_range_lower); + + maxval_vals[0] = maxval_val; + maxval_vals[1] = 0; +@@ -4411,8 +4411,8 @@ static int dna_extend_exop(Slapi_PBlock *pb) + char highstr[16]; + + /* Create the exop response */ +- PR_snprintf(lowstr, sizeof(lowstr), "%" NSPRIu64, lower); +- PR_snprintf(highstr, sizeof(highstr), "%" NSPRIu64, upper); ++ snprintf(lowstr, sizeof(lowstr), "%" NSPRIu64, lower); ++ snprintf(highstr, sizeof(highstr), "%" NSPRIu64, upper); + range_low.bv_val = lowstr; + range_low.bv_len = strlen(range_low.bv_val); + range_high.bv_val = highstr; +@@ -4588,7 +4588,7 @@ dna_release_range(char *range_dn, PRUint64 *lower, PRUint64 *upper) + *lower = *upper - release + 1; + + /* try to set the new maxval in the config entry */ +- PR_snprintf(max_value, sizeof(max_value),"%" NSPRIu64, (*lower - 1)); ++ snprintf(max_value, sizeof(max_value),"%" NSPRIu64, (*lower - 1)); + + /* set up our replace modify operation */ + replace_val[0] = max_value; +diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c +index a7e024d..63444e5 100644 +--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c ++++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c +@@ -234,7 +234,7 @@ sync_acct_disable(void *cbdata, /* the usual domain config data */ + { + int ds_is_enabled = 1; /* default to true */ + int ad_is_enabled = 1; /* default to true */ +- unsigned long adval = 0; /* raw account val from ad entry */ ++ uint64_t adval = 0; /* raw account val from ad entry */ + int isvirt = 0; + + /* get the account lock state of the ds entry */ +@@ -270,9 +270,8 @@ sync_acct_disable(void *cbdata, /* the usual domain config data */ + if (update_entry) { + slapi_entry_attr_set_ulong(update_entry, "userAccountControl", adval); + slapi_log_err(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name, +- "<-- sync_acct_disable - %s AD account [%s] - " +- "new value is [%ld]\n", (ds_is_enabled) ? "enabled" : "disabled", +- slapi_entry_get_dn_const(update_entry), adval); ++ "<-- sync_acct_disable - %s AD account [%s] - new value is [%" NSPRIu64 "]\n", ++ (ds_is_enabled) ? "enabled" : "disabled", slapi_entry_get_dn_const(update_entry), adval); + } else { + /* iterate through the mods - if there is already a mod + for userAccountControl, change it - otherwise, add it */ +@@ -327,9 +326,8 @@ sync_acct_disable(void *cbdata, /* the usual domain config data */ + mod_bval->bv_len = strlen(acctvalstr); + } + slapi_log_err(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name, +- "<-- sync_acct_disable - %s AD account [%s] - " +- "new value is [%ld]\n", (ds_is_enabled) ? "enabled" : "disabled", +- slapi_entry_get_dn_const(ad_entry), adval); ++ "<-- sync_acct_disable - %s AD account [%s] - new value is [%" NSPRIu64 "]\n", ++ (ds_is_enabled) ? "enabled" : "disabled", slapi_entry_get_dn_const(ad_entry), adval); + } + } + +diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c +index 0945f7b..9549dcf 100644 +--- a/ldap/servers/plugins/replication/repl5_init.c ++++ b/ldap/servers/plugins/replication/repl5_init.c +@@ -208,7 +208,7 @@ get_repl_session_id (Slapi_PBlock *pb, char *idstr, CSN **csn) + /* Avoid "Connection is NULL and hence cannot access SLAPI_CONN_ID" */ + if (opid) { + slapi_pblock_get (pb, SLAPI_CONN_ID, &connid); +- PR_snprintf (idstr, REPL_SESSION_ID_SIZE, "conn=%" NSPRIu64 " op=%d", ++ snprintf (idstr, REPL_SESSION_ID_SIZE, "conn=%" NSPRIu64 " op=%d", + connid, opid); + } + +diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c +index 948f38d..80580f9 100644 +--- a/ldap/servers/plugins/replication/repl_extop.c ++++ b/ldap/servers/plugins/replication/repl_extop.c +@@ -865,7 +865,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + * the session's conn id and op id to identify the the supplier. + */ + /* junkrc = ruv_get_first_id_and_purl(supplier_ruv, &junkrid, &locking_purl); */ +- PR_snprintf(locking_session, sizeof(locking_session), "conn=%" NSPRIu64 " id=%d", ++ snprintf(locking_session, sizeof(locking_session), "conn=%" NSPRIu64 " id=%d", + connid, opid); + locking_purl = &locking_session[0]; + if (replica_get_exclusive_access(replica, &isInc, connid, opid, +diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c +index 6fe8d2e..5e67e0a 100644 +--- a/ldap/servers/plugins/usn/usn.c ++++ b/ldap/servers/plugins/usn/usn.c +@@ -360,7 +360,7 @@ _usn_mod_next_usn(LDAPMod ***mods, Slapi_Backend *be) + + /* add next USN to the mods; "be" contains the usn counter */ + usn_berval.bv_val = counter_buf; +- PR_snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, ++ snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, + slapi_counter_get_value(be->be_usn_counter)); + usn_berval.bv_len = strlen(usn_berval.bv_val); + bvals[0] = &usn_berval; +@@ -670,7 +670,7 @@ usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, + /* nsslapd-entryusn-global: on*/ + /* root dse shows ... + * lastusn: */ +- PR_snprintf(attr, USN_LAST_USN_ATTR_CORE_LEN + 1, "%s", USN_LAST_USN); ++ snprintf(attr, USN_LAST_USN_ATTR_CORE_LEN + 1, "%s", USN_LAST_USN); + for (be = slapi_get_first_backend(&cookie); be; + be = slapi_get_next_backend(cookie)) { + if (be->be_usn_counter) { +@@ -681,10 +681,10 @@ usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, + /* get a next USN counter from be_usn_counter; + * then minus 1 from it (except if be_usn_counter has value 0) */ + if (slapi_counter_get_value(be->be_usn_counter)) { +- PR_snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, ++ snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, + slapi_counter_get_value(be->be_usn_counter)-1); + } else { +- PR_snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "-1"); ++ snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "-1"); + } + usn_berval.bv_len = strlen(usn_berval.bv_val); + slapi_entry_attr_replace(e, attr, vals); +@@ -693,7 +693,7 @@ usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, + /* nsslapd-entryusn-global: off (default) */ + /* root dse shows ... + * lastusn;: */ +- PR_snprintf(attr, USN_LAST_USN_ATTR_CORE_LEN + 2, "%s;", USN_LAST_USN); ++ snprintf(attr, USN_LAST_USN_ATTR_CORE_LEN + 2, "%s;", USN_LAST_USN); + attr_subp = attr + USN_LAST_USN_ATTR_CORE_LEN + 1; + for (be = slapi_get_first_backend(&cookie); be; + be = slapi_get_next_backend(cookie)) { +@@ -704,10 +704,10 @@ usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, + /* get a next USN counter from be_usn_counter; + * then minus 1 from it (except if be_usn_counter has value 0) */ + if (slapi_counter_get_value(be->be_usn_counter)) { +- PR_snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, ++ snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, + slapi_counter_get_value(be->be_usn_counter)-1); + } else { +- PR_snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "-1"); ++ snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "-1"); + } + usn_berval.bv_len = strlen(usn_berval.bv_val); + +@@ -716,7 +716,7 @@ usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, + attr = (char *)slapi_ch_realloc(attr, attr_len); + attr_subp = attr + USN_LAST_USN_ATTR_CORE_LEN; + } +- PR_snprintf(attr_subp, attr_len - USN_LAST_USN_ATTR_CORE_LEN, ++ snprintf(attr_subp, attr_len - USN_LAST_USN_ATTR_CORE_LEN, + "%s", be->be_name); + slapi_entry_attr_replace(e, attr, vals); + } +diff --git a/ldap/servers/slapd/back-ldbm/monitor.c b/ldap/servers/slapd/back-ldbm/monitor.c +index dfcc735..757792b 100644 +--- a/ldap/servers/slapd/back-ldbm/monitor.c ++++ b/ldap/servers/slapd/back-ldbm/monitor.c +@@ -26,7 +26,7 @@ + + #define MSETF(_attr, _x) do { \ + char tmp_atype[37]; \ +- PR_snprintf(tmp_atype, sizeof(tmp_atype), _attr, _x); \ ++ snprintf(tmp_atype, sizeof(tmp_atype), _attr, _x); \ + MSET(tmp_atype); \ + } while (0) + +@@ -86,7 +86,7 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, + MSET("entryCacheHits"); + sprintf(buf, "%lu", (long unsigned int)tries); + MSET("entryCacheTries"); +- sprintf(buf, "%lu", (unsigned long)(100.0*(double)hits / (double)(tries > 0 ? tries : 1))); ++ sprintf(buf, "%lu", (long unsigned int)(100.0*(double)hits / (double)(tries > 0 ? tries : 1))); + MSET("entryCacheHitRatio"); + sprintf(buf, "%lu", (long unsigned int)size); + MSET("currentEntryCacheSize"); +diff --git a/ldap/servers/slapd/back-ldbm/perfctrs.c b/ldap/servers/slapd/back-ldbm/perfctrs.c +index 2bd18bd..5929dea 100644 +--- a/ldap/servers/slapd/back-ldbm/perfctrs.c ++++ b/ldap/servers/slapd/back-ldbm/perfctrs.c +@@ -49,7 +49,7 @@ + + static void perfctrs_update(perfctrs_private *priv, DB_ENV *db_env); + static void perfctr_add_to_entry( Slapi_Entry *e, char *type, +- PRUint32 countervalue ); ++ uint64_t countervalue ); + + /* Init perf ctrs */ + void perfctrs_init(struct ldbminfo *li, perfctrs_private **ret_priv) +@@ -304,17 +304,13 @@ perfctrs_as_entry( Slapi_Entry *e, perfctrs_private *priv, DB_ENV *db_env ) + */ + for ( i = 0; i < SLAPI_LDBM_PERFCTR_AT_MAP_COUNT; ++i ) { + perfctr_add_to_entry( e, perfctr_at_map[i].pam_type, +- *((PRUint32 *)((char *)perf + perfctr_at_map[i].pam_offset))); ++ *((uint64_t *)((char *)perf + perfctr_at_map[i].pam_offset))); + } + } + + + static void +-perfctr_add_to_entry( Slapi_Entry *e, char *type, PRUint32 countervalue ) ++perfctr_add_to_entry( Slapi_Entry *e, char *type, uint64_t countervalue ) + { +- /* +- * XXXmcs: the following line assumes that long's are 32 bits or larger, +- * which we assume in other places too I am sure. +- */ +- slapi_entry_attr_set_ulong( e, type, (unsigned long)countervalue ); ++ slapi_entry_attr_set_ulong( e, type, countervalue ); + } +diff --git a/ldap/servers/slapd/back-ldbm/perfctrs.h b/ldap/servers/slapd/back-ldbm/perfctrs.h +index 65a7850..57be1d1 100644 +--- a/ldap/servers/slapd/back-ldbm/perfctrs.h ++++ b/ldap/servers/slapd/back-ldbm/perfctrs.h +@@ -11,46 +11,48 @@ + # include + #endif + ++#include ++ + /* Structure definition for performance data */ + /* This stuff goes in shared memory, so make sure the packing is consistent */ + + struct _performance_counters { +- PRUint32 sequence_number; +- PRUint32 lock_region_wait_rate; +- PRUint32 deadlock_rate; +- PRUint32 configured_locks; +- PRUint32 current_locks; +- PRUint32 max_locks; +- PRUint32 lockers; +- PRUint32 current_lock_objects; +- PRUint32 max_lock_objects; +- PRUint32 lock_conflicts; +- PRUint32 lock_request_rate; +- PRUint32 log_region_wait_rate; +- PRUint32 log_write_rate; +- PRUint32 log_bytes_since_checkpoint; +- PRUint32 cache_size_bytes; +- PRUint32 page_access_rate; +- PRUint32 cache_hit; +- PRUint32 cache_try; +- PRUint32 page_create_rate; +- PRUint32 page_read_rate; +- PRUint32 page_write_rate; +- PRUint32 page_ro_evict_rate; +- PRUint32 page_rw_evict_rate; +- PRUint32 hash_buckets; +- PRUint32 hash_search_rate; +- PRUint32 longest_chain_length; +- PRUint32 hash_elements_examine_rate; +- PRUint32 pages_in_use; +- PRUint32 dirty_pages; +- PRUint32 clean_pages; +- PRUint32 page_trickle_rate; +- PRUint32 cache_region_wait_rate; +- PRUint32 active_txns; +- PRUint32 commit_rate; +- PRUint32 abort_rate; +- PRUint32 txn_region_wait_rate; ++ uint64_t sequence_number; ++ uint64_t lock_region_wait_rate; ++ uint64_t deadlock_rate; ++ uint64_t configured_locks; ++ uint64_t current_locks; ++ uint64_t max_locks; ++ uint64_t lockers; ++ uint64_t current_lock_objects; ++ uint64_t max_lock_objects; ++ uint64_t lock_conflicts; ++ uint64_t lock_request_rate; ++ uint64_t log_region_wait_rate; ++ uint64_t log_write_rate; ++ uint64_t log_bytes_since_checkpoint; ++ uint64_t cache_size_bytes; ++ uint64_t page_access_rate; ++ uint64_t cache_hit; ++ uint64_t cache_try; ++ uint64_t page_create_rate; ++ uint64_t page_read_rate; ++ uint64_t page_write_rate; ++ uint64_t page_ro_evict_rate; ++ uint64_t page_rw_evict_rate; ++ uint64_t hash_buckets; ++ uint64_t hash_search_rate; ++ uint64_t longest_chain_length; ++ uint64_t hash_elements_examine_rate; ++ uint64_t pages_in_use; ++ uint64_t dirty_pages; ++ uint64_t clean_pages; ++ uint64_t page_trickle_rate; ++ uint64_t cache_region_wait_rate; ++ uint64_t active_txns; ++ uint64_t commit_rate; ++ uint64_t abort_rate; ++ uint64_t txn_region_wait_rate; + }; + typedef struct _performance_counters performance_counters; + +diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.h b/ldap/servers/slapd/back-ldbm/vlv_srch.h +index d1eba08..6322f80 100644 +--- a/ldap/servers/slapd/back-ldbm/vlv_srch.h ++++ b/ldap/servers/slapd/back-ldbm/vlv_srch.h +@@ -92,7 +92,7 @@ struct vlvIndex + time_t vlv_lastchecked; + + /* The number of uses this search has received since start up */ +- PRUint32 vlv_uses; ++ uint64_t vlv_uses; + + struct backend* vlv_be; /* need backend to remove the index when done */ + +diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c +index 004aeae..bcafa4e 100644 +--- a/ldap/servers/slapd/conntable.c ++++ b/ldap/servers/slapd/conntable.c +@@ -395,7 +395,7 @@ connection_table_as_entry(Connection_Table *ct, Slapi_Entry *e) + * 3 = The number of operations attempted that were blocked + * by max threads. + */ +- PR_snprintf(maxthreadbuf, sizeof(maxthreadbuf), "%d:%"NSPRIu64":%"NSPRIu64"", ++ snprintf(maxthreadbuf, sizeof(maxthreadbuf), "%d:%"NSPRIu64":%"NSPRIu64"", + maxthreadstate, ct->c[i].c_maxthreadscount, + ct->c[i].c_maxthreadsblocked); + +@@ -426,32 +426,32 @@ connection_table_as_entry(Connection_Table *ct, Slapi_Entry *e) + PR_ExitMonitor(ct->c[i].c_mutex); + } + +- PR_snprintf( buf, sizeof(buf), "%d", nconns ); ++ snprintf( buf, sizeof(buf), "%d", nconns ); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "currentconnections", vals ); + +- PR_snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(num_conns)); ++ snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(num_conns)); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "totalconnections", vals ); + +- PR_snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(conns_in_maxthreads)); ++ snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(conns_in_maxthreads)); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "currentconnectionsatmaxthreads", vals ); + +- PR_snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(max_threads_count)); ++ snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(max_threads_count)); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "maxthreadsperconnhits", vals ); + +- PR_snprintf( buf, sizeof(buf), "%d", (ct!=NULL?ct->size:0) ); ++ snprintf( buf, sizeof(buf), "%d", (ct!=NULL?ct->size:0) ); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "dtablesize", vals ); + +- PR_snprintf( buf, sizeof(buf), "%d", nreadwaiters ); ++ snprintf( buf, sizeof(buf), "%d", nreadwaiters ); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "readwaiters", vals ); +diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c +index 7bbd2f4..abacc57 100644 +--- a/ldap/servers/slapd/entry.c ++++ b/ldap/servers/slapd/entry.c +@@ -3088,14 +3088,14 @@ slapi_entry_attr_set_longlong( Slapi_Entry* e, const char *type, long long l) + } + + void +-slapi_entry_attr_set_ulong( Slapi_Entry* e, const char *type, unsigned long l) ++slapi_entry_attr_set_ulong( Slapi_Entry* e, const char *type, uint64_t l) + { + char value[16]; + struct berval bv; + struct berval *bvals[2]; + bvals[0] = &bv; + bvals[1] = NULL; +- sprintf(value,"%lu",l); ++ sprintf(value,"%" NSPRIu64, l); + bv.bv_val = value; + bv.bv_len = strlen( value ); + slapi_entry_attr_replace( e, type, bvals ); +diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c +index 2f43a98..afedd5b 100644 +--- a/ldap/servers/slapd/log.c ++++ b/ldap/servers/slapd/log.c +@@ -2327,11 +2327,11 @@ vslapd_log_error( + char buffer[SLAPI_LOG_BUFSIZ]; + char sev_name[10]; + int blen = TBUFSIZE; +- char *vbuf; ++ char *vbuf = NULL; + int header_len = 0; + int err = 0; + +- if ((vbuf = PR_vsmprintf(fmt, ap)) == NULL) { ++ if (vasprintf(&vbuf, fmt, ap) == -1) { + log__error_emergency("CRITICAL: vslapd_log_error, Unable to format message", 1 , locked); + return -1; + } +@@ -2381,10 +2381,10 @@ vslapd_log_error( + /* blen = strlen(buffer); */ + /* This truncates again .... But we have the nice smprintf above! */ + if (subsystem == NULL) { +- PR_snprintf (buffer+blen, sizeof(buffer)-blen, "- %s - %s", ++ snprintf (buffer+blen, sizeof(buffer)-blen, "- %s - %s", + get_log_sev_name(sev_level, sev_name), vbuf); + } else { +- PR_snprintf (buffer+blen, sizeof(buffer)-blen, "- %s - %s - %s", ++ snprintf (buffer+blen, sizeof(buffer)-blen, "- %s - %s - %s", + get_log_sev_name(sev_level, sev_name), subsystem, vbuf); + } + +@@ -2418,7 +2418,7 @@ vslapd_log_error( + g_set_shutdown( SLAPI_SHUTDOWN_EXIT ); + } + +- PR_smprintf_free (vbuf); ++ slapi_ch_free_string(&vbuf); + return( 0 ); + } + +@@ -2520,8 +2520,7 @@ static int vslapd_log_access(char *fmt, va_list ap) + time_t tnl; + + /* We do this sooner, because that we we can use the message in other calls */ +- vlen = PR_vsnprintf(vbuf, SLAPI_LOG_BUFSIZ, fmt, ap); +- if (! vlen) { ++ if ((vlen = vsnprintf(vbuf, SLAPI_LOG_BUFSIZ, fmt, ap)) == -1){ + log__error_emergency("CRITICAL: vslapd_log_access, Unable to format message", 1 ,0); + return -1; + } +diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c +index 0917bc8..f1fb38f 100644 +--- a/ldap/servers/slapd/monitor.c ++++ b/ldap/servers/slapd/monitor.c +@@ -54,25 +54,25 @@ monitor_info(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, int *ret + attrlist_replace( &e->e_attrs, "version", vals ); + slapi_ch_free( (void **) &val.bv_val ); + +- val.bv_len = PR_snprintf( buf, sizeof(buf), "%d", g_get_active_threadcnt() ); ++ val.bv_len = snprintf( buf, sizeof(buf), "%d", g_get_active_threadcnt() ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "threads", vals ); + + connection_table_as_entry(the_connection_table, e); + +- val.bv_len = PR_snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(ops_initiated) ); ++ val.bv_len = snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(ops_initiated) ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "opsinitiated", vals ); + +- val.bv_len = PR_snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(ops_completed) ); ++ val.bv_len = snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(ops_completed) ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "opscompleted", vals ); + +- val.bv_len = PR_snprintf ( buf, sizeof(buf), "%" NSPRIu64, g_get_num_entries_sent() ); ++ val.bv_len = snprintf ( buf, sizeof(buf), "%" NSPRIu64, g_get_num_entries_sent() ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "entriessent", vals ); + +- val.bv_len = PR_snprintf ( buf, sizeof(buf), "%" NSPRIu64, g_get_num_bytes_sent() ); ++ val.bv_len = snprintf ( buf, sizeof(buf), "%" NSPRIu64, g_get_num_bytes_sent() ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "bytessent", vals ); + +@@ -88,12 +88,12 @@ monitor_info(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, int *ret + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "starttime", vals ); + +- val.bv_len = PR_snprintf( buf, sizeof(buf), "%d", be_nbackends_public() ); ++ val.bv_len = snprintf( buf, sizeof(buf), "%d", be_nbackends_public() ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "nbackends", vals ); + + #ifdef THREAD_SUNOS5_LWP +- val.bv_len = PR_snprintf( buf, sizeof(buf), "%d", thr_getconcurrency() ); ++ val.bv_len = snprintf( buf, sizeof(buf), "%d", thr_getconcurrency() ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "concurrency", vals ); + #endif +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 1bd8fc8..725fa1c 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -1954,7 +1954,7 @@ void slapi_entry_attr_set_longlong( Slapi_Entry* e, const char *type, long long + * \param type Attribute type in which you want to set the value. + * \param l Unsigned long value that you want to assign to the attribute. + */ +-void slapi_entry_attr_set_ulong(Slapi_Entry* e, const char *type, unsigned long l); ++void slapi_entry_attr_set_ulong(Slapi_Entry* e, const char *type, uint64_t l); + + /** + * Check if an attribute is set in the entry +@@ -6746,12 +6746,12 @@ void slapi_destroy_task(void *arg); + Slapi_Counter *slapi_counter_new(void); + void slapi_counter_init(Slapi_Counter *counter); + void slapi_counter_destroy(Slapi_Counter **counter); +-PRUint64 slapi_counter_increment(Slapi_Counter *counter); +-PRUint64 slapi_counter_decrement(Slapi_Counter *counter); +-PRUint64 slapi_counter_add(Slapi_Counter *counter, PRUint64 addvalue); +-PRUint64 slapi_counter_subtract(Slapi_Counter *counter, PRUint64 subvalue); +-PRUint64 slapi_counter_set_value(Slapi_Counter *counter, PRUint64 newvalue); +-PRUint64 slapi_counter_get_value(Slapi_Counter *counter); ++uint64_t slapi_counter_increment(Slapi_Counter *counter); ++uint64_t slapi_counter_decrement(Slapi_Counter *counter); ++uint64_t slapi_counter_add(Slapi_Counter *counter, uint64_t addvalue); ++uint64_t slapi_counter_subtract(Slapi_Counter *counter, uint64_t subvalue); ++uint64_t slapi_counter_set_value(Slapi_Counter *counter, uint64_t newvalue); ++uint64_t slapi_counter_get_value(Slapi_Counter *counter); + + /* Binder-based (connection centric) resource limits */ + /* +diff --git a/ldap/servers/slapd/slapi_counter.c b/ldap/servers/slapd/slapi_counter.c +index c3ac846..9904fe9 100644 +--- a/ldap/servers/slapd/slapi_counter.c ++++ b/ldap/servers/slapd/slapi_counter.c +@@ -12,37 +12,21 @@ + + #include "slap.h" + +-#ifdef SOLARIS +-PRUint64 _sparcv9_AtomicSet(PRUint64 *address, PRUint64 newval); +-PRUint64 _sparcv9_AtomicAdd(PRUint64 *address, PRUint64 val); +-PRUint64 _sparcv9_AtomicSub(PRUint64 *address, PRUint64 val); ++#ifndef ATOMIC_64BIT_OPERATIONS ++#include + #endif + + #ifdef HPUX +-#ifdef ATOMIC_64BIT_OPERATIONS + #include + #endif +-#endif +- +-#ifdef ATOMIC_64BIT_OPERATIONS +-#if defined(LINUX) && !HAVE_64BIT_ATOMIC_OP_FUNCS +-/* On systems that don't have the 64-bit GCC atomic builtins, we need to +- * implement our own atomic functions using inline assembly code. */ +-PRUint64 __sync_add_and_fetch_8(PRUint64 *ptr, PRUint64 addval); +-PRUint64 __sync_sub_and_fetch_8(PRUint64 *ptr, PRUint64 subval); +-#define __sync_add_and_fetch __sync_add_and_fetch_8 +-#define __sync_sub_and_fetch __sync_sub_and_fetch_8 +-#endif +-#endif /* ATOMIC_64BIT_OPERATIONS */ +- + + /* + * Counter Structure + */ + typedef struct slapi_counter { +- PRUint64 value; ++ uint64_t value; + #ifndef ATOMIC_64BIT_OPERATIONS +- Slapi_Mutex *lock; ++ pthread_mutex_t _lock; + #endif + } slapi_counter; + +@@ -72,15 +56,11 @@ Slapi_Counter *slapi_counter_new() + void slapi_counter_init(Slapi_Counter *counter) + { + if (counter != NULL) { +-#ifndef ATOMIC_64BIT_OPERATIONS +- /* Create the lock if necessary. */ +- if (counter->lock == NULL) { +- counter->lock = slapi_new_mutex(); +- } +-#endif +- + /* Set the value to 0. */ + slapi_counter_set_value(counter, 0); ++#ifndef ATOMIC_64BIT_OPERATIONS ++ pthread_mutex_init(&(counter->_lock), NULL); ++#endif + } + } + +@@ -94,7 +74,7 @@ void slapi_counter_destroy(Slapi_Counter **counter) + { + if ((counter != NULL) && (*counter != NULL)) { + #ifndef ATOMIC_64BIT_OPERATIONS +- slapi_destroy_mutex((*counter)->lock); ++ pthread_mutex_destroy(&((*counter)->_lock)); + #endif + slapi_ch_free((void **)counter); + } +@@ -105,7 +85,7 @@ void slapi_counter_destroy(Slapi_Counter **counter) + * + * Atomically increments a Slapi_Counter. + */ +-PRUint64 slapi_counter_increment(Slapi_Counter *counter) ++uint64_t slapi_counter_increment(Slapi_Counter *counter) + { + return slapi_counter_add(counter, 1); + } +@@ -117,7 +97,7 @@ PRUint64 slapi_counter_increment(Slapi_Counter *counter) + * that this will not prevent you from wrapping + * around 0. + */ +-PRUint64 slapi_counter_decrement(Slapi_Counter *counter) ++uint64_t slapi_counter_decrement(Slapi_Counter *counter) + { + return slapi_counter_subtract(counter, 1); + } +@@ -127,28 +107,18 @@ PRUint64 slapi_counter_decrement(Slapi_Counter *counter) + * + * Atomically add a value to a Slapi_Counter. + */ +-PRUint64 slapi_counter_add(Slapi_Counter *counter, PRUint64 addvalue) ++uint64_t slapi_counter_add(Slapi_Counter *counter, uint64_t addvalue) + { +- PRUint64 newvalue = 0; +-#ifdef HPUX +- PRUint64 prev = 0; +-#endif ++ uint64_t newvalue = 0; + + if (counter == NULL) { + return newvalue; + } +- +-#ifndef ATOMIC_64BIT_OPERATIONS +- slapi_lock_mutex(counter->lock); +- counter->value += addvalue; +- newvalue = counter->value; +- slapi_unlock_mutex(counter->lock); ++#ifdef ATOMIC_64BIT_OPERATIONS ++ newvalue = __atomic_add_fetch_8(&(counter->value), addvalue, __ATOMIC_SEQ_CST); + #else +-#ifdef LINUX +- newvalue = __sync_add_and_fetch(&(counter->value), addvalue); +-#elif defined(SOLARIS) +- newvalue = _sparcv9_AtomicAdd(&(counter->value), addvalue); +-#elif defined(HPUX) ++#ifdef HPUX ++ uint64_t prev = 0; + /* fetchadd only works with values of 1, 4, 8, and 16. In addition, it requires + * it's argument to be an integer constant. */ + if (addvalue == 1) { +@@ -172,8 +142,13 @@ PRUint64 slapi_counter_add(Slapi_Counter *counter, PRUint64 addvalue) + _Asm_mov_to_ar(_AREG_CCV, prev); + } while (prev != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE)); + } ++#else ++ pthread_mutex_lock(&(counter->_lock)); ++ counter->value += addvalue; ++ newvalue = counter->value; ++ pthread_mutex_unlock(&(counter->_lock)); ++#endif + #endif +-#endif /* ATOMIC_64BIT_OPERATIONS */ + + return newvalue; + } +@@ -184,28 +159,19 @@ PRUint64 slapi_counter_add(Slapi_Counter *counter, PRUint64 addvalue) + * Atomically subtract a value from a Slapi_Counter. Note + * that this will not prevent you from wrapping around 0. + */ +-PRUint64 slapi_counter_subtract(Slapi_Counter *counter, PRUint64 subvalue) ++uint64_t slapi_counter_subtract(Slapi_Counter *counter, uint64_t subvalue) + { +- PRUint64 newvalue = 0; +-#ifdef HPUX +- PRUint64 prev = 0; +-#endif ++ uint64_t newvalue = 0; + + if (counter == NULL) { + return newvalue; + } + +-#ifndef ATOMIC_64BIT_OPERATIONS +- slapi_lock_mutex(counter->lock); +- counter->value -= subvalue; +- newvalue = counter->value; +- slapi_unlock_mutex(counter->lock); ++#ifdef ATOMIC_64BIT_OPERATIONS ++ newvalue = __atomic_sub_fetch_8(&(counter->value), subvalue, __ATOMIC_SEQ_CST); + #else +-#ifdef LINUX +- newvalue = __sync_sub_and_fetch(&(counter->value), subvalue); +-#elif defined(SOLARIS) +- newvalue = _sparcv9_AtomicSub(&(counter->value), subvalue); +-#elif defined(HPUX) ++#ifdef HPUX ++ uint64_t prev = 0; + /* fetchadd only works with values of -1, -4, -8, and -16. In addition, it requires + * it's argument to be an integer constant. */ + if (subvalue == 1) { +@@ -229,8 +195,13 @@ PRUint64 slapi_counter_subtract(Slapi_Counter *counter, PRUint64 subvalue) + _Asm_mov_to_ar(_AREG_CCV, prev); + } while (prev != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE)); + } ++#else ++ pthread_mutex_lock(&(counter->_lock)); ++ counter->value -= subvalue; ++ newvalue = counter->value; ++ pthread_mutex_unlock(&(counter->_lock)); ++#endif + #endif +-#endif /* ATOMIC_64BIT_OPERATIONS */ + + return newvalue; + } +@@ -240,76 +211,30 @@ PRUint64 slapi_counter_subtract(Slapi_Counter *counter, PRUint64 subvalue) + * + * Atomically sets the value of a Slapi_Counter. + */ +-PRUint64 slapi_counter_set_value(Slapi_Counter *counter, PRUint64 newvalue) ++uint64_t slapi_counter_set_value(Slapi_Counter *counter, uint64_t newvalue) + { +- PRUint64 value = 0; ++ uint64_t value = 0; + + if (counter == NULL) { + return value; + } + +-#ifndef ATOMIC_64BIT_OPERATIONS +- slapi_lock_mutex(counter->lock); +- counter->value = newvalue; +- slapi_unlock_mutex(counter->lock); +- return newvalue; +-#else +-#ifdef LINUX +-/* Use our own inline assembly for an atomic set if +- * the builtins aren't available. */ +-#if !HAVE_64BIT_ATOMIC_CAS_FUNC +- /* +- * %0 = counter->value +- * %1 = newvalue +- */ +- __asm__ __volatile__( +-#ifdef CPU_x86 +- /* Save the PIC register */ +- " pushl %%ebx;" +-#endif /* CPU_x86 */ +- /* Put value of counter->value in EDX:EAX */ +- "retryset: movl %0, %%eax;" +- " movl 4%0, %%edx;" +- /* Put newval in ECX:EBX */ +- " movl %1, %%ebx;" +- " movl 4+%1, %%ecx;" +- /* If EDX:EAX and counter-> are the same, +- * replace *ptr with ECX:EBX */ +- " lock; cmpxchg8b %0;" +- " jnz retryset;" +-#ifdef CPU_x86 +- /* Restore the PIC register */ +- " popl %%ebx" +-#endif /* CPU_x86 */ +- : "+o" (counter->value) +- : "m" (newvalue) +-#ifdef CPU_x86 +- : "memory", "eax", "ecx", "edx", "cc"); +-#else +- : "memory", "eax", "ebx", "ecx", "edx", "cc"); +-#endif +- +- return newvalue; +-#else +- while (1) { +- value = counter->value; +- if (__sync_bool_compare_and_swap(&(counter->value), value, newvalue)) { +- return newvalue; +- } +- } +-#endif /* CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH */ +-#elif defined(SOLARIS) +- _sparcv9_AtomicSet(&(counter->value), newvalue); +- return newvalue; +-#elif defined(HPUX) ++#ifdef ATOMIC_64BIT_OPERATIONS ++ __atomic_store_8(&(counter->value), newvalue, __ATOMIC_SEQ_CST); ++#else /* HPUX */ ++#ifdef HPUX + do { + value = counter->value; + /* Put value in a register for cmpxchg to compare against */ + _Asm_mov_to_ar(_AREG_CCV, value); + } while (value != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE)); +- return newvalue; ++#else ++ pthread_mutex_lock(&(counter->_lock)); ++ counter->value = newvalue; ++ pthread_mutex_unlock(&(counter->_lock)); ++#endif + #endif +-#endif /* ATOMIC_64BIT_OPERATIONS */ ++ return newvalue; + } + + /* +@@ -317,174 +242,30 @@ PRUint64 slapi_counter_set_value(Slapi_Counter *counter, PRUint64 newvalue) + * + * Returns the value of a Slapi_Counter. + */ +-PRUint64 slapi_counter_get_value(Slapi_Counter *counter) ++uint64_t slapi_counter_get_value(Slapi_Counter *counter) + { +- PRUint64 value = 0; ++ uint64_t value = 0; + + if (counter == NULL) { + return value; + } + +-#ifndef ATOMIC_64BIT_OPERATIONS +- slapi_lock_mutex(counter->lock); +- value = counter->value; +- slapi_unlock_mutex(counter->lock); +-#else +-#ifdef LINUX +-/* Use our own inline assembly for an atomic get if +- * the builtins aren't available. */ +-#if !HAVE_64BIT_ATOMIC_CAS_FUNC +- /* +- * %0 = counter->value +- * %1 = value +- */ +- __asm__ __volatile__( +-#ifdef CPU_x86 +- /* Save the PIC register */ +- " pushl %%ebx;" +-#endif /* CPU_x86 */ +- /* Put value of counter->value in EDX:EAX */ +- "retryget: movl %0, %%eax;" +- " movl 4%0, %%edx;" +- /* Copy EDX:EAX to ECX:EBX */ +- " movl %%eax, %%ebx;" +- " movl %%edx, %%ecx;" +- /* If EDX:EAX and counter->value are the same, +- * replace *ptr with ECX:EBX */ +- " lock; cmpxchg8b %0;" +- " jnz retryget;" +- /* Put retrieved value into value */ +- " movl %%ebx, %1;" +- " movl %%ecx, 4%1;" +-#ifdef CPU_x86 +- /* Restore the PIC register */ +- " popl %%ebx" +-#endif /* CPU_x86 */ +- : "+o" (counter->value), "=m" (value) +- : +-#ifdef CPU_x86 +- : "memory", "eax", "ecx", "edx", "cc"); +-#else +- : "memory", "eax", "ebx", "ecx", "edx", "cc"); +-#endif +-#else +- while (1) { +- value = counter->value; +- if (__sync_bool_compare_and_swap(&(counter->value), value, value)) { +- break; +- } +- } +-#endif /* CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH */ +-#elif defined(SOLARIS) +- while (1) { +- value = counter->value; +- if (value == _sparcv9_AtomicSet(&(counter->value), value)) { +- break; +- } +- } +-#elif defined(HPUX) ++#ifdef ATOMIC_64BIT_OPERATIONS ++ value = __atomic_load_8(&(counter->value), __ATOMIC_SEQ_CST); ++#else /* HPUX */ ++#ifdef HPUX + do { + value = counter->value; + /* Put value in a register for cmpxchg to compare against */ + _Asm_mov_to_ar(_AREG_CCV, value); + } while (value != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), value, _LDHINT_NONE)); +-#endif +-#endif /* ATOMIC_64BIT_OPERATIONS */ +- +- return value; +-} +- +-#ifdef ATOMIC_64BIT_OPERATIONS +-#if defined(LINUX) && !HAVE_64BIT_ATOMIC_OP_FUNCS +-/* On systems that don't have the 64-bit GCC atomic builtins, we need to +- * implement our own atomic add and subtract functions using inline +- * assembly code. */ +-PRUint64 __sync_add_and_fetch_8(PRUint64 *ptr, PRUint64 addval) +-{ +- PRUint64 retval = 0; +- +- /* +- * %0 = *ptr +- * %1 = retval +- * %2 = addval +- */ +- __asm__ __volatile__( +-#ifdef CPU_x86 +- /* Save the PIC register */ +- " pushl %%ebx;" +-#endif /* CPU_x86 */ +- /* Put value of *ptr in EDX:EAX */ +- "retryadd: movl %0, %%eax;" +- " movl 4%0, %%edx;" +- /* Put addval in ECX:EBX */ +- " movl %2, %%ebx;" +- " movl 4+%2, %%ecx;" +- /* Add value from EDX:EAX to value in ECX:EBX */ +- " addl %%eax, %%ebx;" +- " adcl %%edx, %%ecx;" +- /* If EDX:EAX and *ptr are the same, replace ptr with ECX:EBX */ +- " lock; cmpxchg8b %0;" +- " jnz retryadd;" +- /* Put new value into retval */ +- " movl %%ebx, %1;" +- " movl %%ecx, 4%1;" +-#ifdef CPU_x86 +- /* Restore the PIC register */ +- " popl %%ebx" +-#endif /* CPU_x86 */ +- : "+o" (*ptr), "=m" (retval) +- : "m" (addval) +-#ifdef CPU_x86 +- : "memory", "eax", "ecx", "edx", "cc"); + #else +- : "memory", "eax", "ebx", "ecx", "edx", "cc"); ++ pthread_mutex_lock(&(counter->_lock)); ++ value = counter->value; ++ pthread_mutex_unlock(&(counter->_lock)); + #endif +- +- return retval; +-} +- +-PRUint64 __sync_sub_and_fetch_8(PRUint64 *ptr, PRUint64 subval) +-{ +- PRUint64 retval = 0; +- +- /* +- * %0 = *ptr +- * %1 = retval +- * %2 = subval +- */ +- __asm__ __volatile__( +-#ifdef CPU_x86 +- /* Save the PIC register */ +- " pushl %%ebx;" +-#endif /* CPU_x86 */ +- /* Put value of *ptr in EDX:EAX */ +- "retrysub: movl %0, %%eax;" +- " movl 4%0, %%edx;" +- /* Copy EDX:EAX to ECX:EBX */ +- " movl %%eax, %%ebx;" +- " movl %%edx, %%ecx;" +- /* Subtract subval from value in ECX:EBX */ +- " subl %2, %%ebx;" +- " sbbl 4+%2, %%ecx;" +- /* If EDX:EAX and ptr are the same, replace *ptr with ECX:EBX */ +- " lock; cmpxchg8b %0;" +- " jnz retrysub;" +- /* Put new value into retval */ +- " movl %%ebx, %1;" +- " movl %%ecx, 4%1;" +-#ifdef CPU_x86 +- /* Restore the PIC register */ +- " popl %%ebx" +-#endif /* CPU_x86 */ +- : "+o" (*ptr), "=m" (retval) +- : "m" (subval) +-#ifdef CPU_x86 +- : "memory", "eax", "ecx", "edx", "cc"); +-#else +- : "memory", "eax", "ebx", "ecx", "edx", "cc"); + #endif + +- return retval; ++ return value; + } +-#endif /* LINUX && !HAVE_64BIT_ATOMIC_OP_FUNCS */ +-#endif /* ATOMIC_64BIT_OPERATIONS */ ++ +diff --git a/ldap/servers/slapd/slapi_counter_sunos_sparcv9.S b/ldap/servers/slapd/slapi_counter_sunos_sparcv9.S +deleted file mode 100644 +index e582c2a..0000000 +--- a/ldap/servers/slapd/slapi_counter_sunos_sparcv9.S ++++ /dev/null +@@ -1,105 +0,0 @@ +-! BEGIN COPYRIGHT BLOCK +-! The Original Code is the Netscape Portable Runtime (NSPR). +-! +-! The Initial Developer of the Original Code is +-! Netscape Communications Corporation. +-! Portions created by the Initial Developer are Copyright (C) 1998-2000 +-! the Initial Developer. All Rights Reserved. +-! +-! The original code has been modified to support 64-bit atomic increments by +-! Red Hat, Inc. These portions are Copyright (C) 2008 Red Hat, Inc. All Rights +-! reserved. +-! +-! License: GPL (version 3 or any later version). +-! See LICENSE for details. +-! END COPYRIGHT BLOCK +-! +- +-#define _ASM 1 /* force to set an assembler code macro _ASM */ +-#include +- +-! ====================================================================== +-! +-! Perform the sequence a = b atomically with respect to other +-! fetch-and-stores to location a in a wait-free fashion. +-! +-! usage : old_val = PR_AtomicSet(address, newval) +-! +-! ----------------------- +-! Note on REGISTER USAGE: +-! as this is a LEAF procedure, a new stack frame is not created; +-! we use the caller's stack frame so what would normally be %i (input) +-! registers are actually %o (output registers). Also, we must not +-! overwrite the contents of %l (local) registers as they are not +-! assumed to be volatile during calls. +-! ----------------------- +- +- ENTRY(_sparcv9_AtomicSet) ! standard assembler/ELF prologue +- +-retryAS: +- ldx [%o0], %o2 ! set o2 to the current value +- mov %o1, %o3 ! set up the new value +- casx [%o0], %o2, %o3 ! atomically set if o0 hasn't changed +- cmp %o2, %o3 ! see if we set the value +- bne retryAS ! if not, try again +- nop ! empty out the branch pipeline +- retl ! return back to the caller +- mov %o3, %o0 ! set the return code to the prev value +- +- SET_SIZE(_sparcv9_AtomicSet) ! standard assembler/ELF epilogue +- +-! +-! end +-! +-! ====================================================================== +-! +-! Perform the sequence a = a + b atomically with respect to other +-! fetch-and-adds to location a in a wait-free fashion. +-! +-! usage : newval = PR_AtomicAdd(address, val) +-! return: the value after addition +-! +- ENTRY(_sparcv9_AtomicAdd) ! standard assembler/ELF prologue +- +-retryAA: +- ldx [%o0], %o2 ! set o2 to the current value +- addx %o2, %o1, %o3 ! calc the new value +- mov %o3, %o4 ! save the return value +- casx [%o0], %o2, %o3 ! atomically set if o0 hasn't changed +- cmp %o2, %o3 ! see if we set the value +- bne retryAA ! if not, try again +- nop ! empty out the branch pipeline +- retl ! return back to the caller +- mov %o4, %o0 ! set the return code to the new value +- +- SET_SIZE(_sparcv9_AtomicAdd) ! standard assembler/ELF epilogue +- +-! +-! end +-! +-! ====================================================================== +-! +-! Perform the sequence a = a - b atomically with respect to other +-! fetch-and-subs to location a in a wait-free fashion. +-! +-! usage : newval = PR_AtomicSub(address, val) +-! return: the value after addition +-! +- ENTRY(_sparcv9_AtomicSub) ! standard assembler/ELF prologue +- +-retryAU: +- ldx [%o0], %o2 ! set o2 to the current value +- subx %o2, %o1, %o3 ! calc the new value +- mov %o3, %o4 ! save the return value +- casx [%o0], %o2, %o3 ! atomically set if o0 hasn't changed +- cmp %o2, %o3 ! see if we set the value +- bne retryAU ! if not, try again +- nop ! empty out the branch pipeline +- retl ! return back to the caller +- mov %o4, %o0 ! set the return code to the new value +- +- SET_SIZE(_sparcv9_AtomicSub) ! standard assembler/ELF epilogue +- +-! +-! end +-! +diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c +index 841922f..b0c873d 100644 +--- a/ldap/servers/slapd/snmp_collator.c ++++ b/ldap/servers/slapd/snmp_collator.c +@@ -711,7 +711,7 @@ static void + add_counter_to_value(Slapi_Entry *e, const char *type, PRUint64 countervalue) + { + char value[40]; +- PR_snprintf(value,sizeof(value),"%" NSPRIu64, countervalue); ++ snprintf(value,sizeof(value),"%" NSPRIu64, countervalue); + slapi_entry_attr_set_charptr( e, type, value); + } + +diff --git a/test/libslapd/test.c b/test/libslapd/test.c +index 37d5543..6e1171a 100644 +--- a/test/libslapd/test.c ++++ b/test/libslapd/test.c +@@ -24,6 +24,8 @@ run_libslapd_tests (void) { + cmocka_unit_test(test_libslapd_pblock_v3c_original_target_dn), + cmocka_unit_test(test_libslapd_pblock_v3c_target_uniqueid), + cmocka_unit_test(test_libslapd_operation_v3c_target_spec), ++ cmocka_unit_test(test_libslapd_counters_atomic_usage), ++ cmocka_unit_test(test_libslapd_counters_atomic_overflow), + }; + return cmocka_run_group_tests(tests, NULL, NULL); + } +diff --git a/test/test_slapd.h b/test/test_slapd.h +index 02eefdd..b8f1aba 100644 +--- a/test/test_slapd.h ++++ b/test/test_slapd.h +@@ -37,4 +37,8 @@ void test_libslapd_pblock_v3c_target_uniqueid(void **state); + /* libslapd-operation-v3_compat */ + void test_libslapd_operation_v3c_target_spec(void **state); + ++/* libslapd-counters-atomic */ ++ ++void test_libslapd_counters_atomic_usage(void **state); ++void test_libslapd_counters_atomic_overflow(void **state); + +-- +2.9.3 + diff --git a/SOURCES/0017-Issue-49035-dbmon.sh-shows-pages-in-use-that-exceeds.patch b/SOURCES/0017-Issue-49035-dbmon.sh-shows-pages-in-use-that-exceeds.patch new file mode 100644 index 0000000..bb75d3f --- /dev/null +++ b/SOURCES/0017-Issue-49035-dbmon.sh-shows-pages-in-use-that-exceeds.patch @@ -0,0 +1,38 @@ +From c14b2d88497724c4e19e5fae89bb40c95a61e1cb Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 30 Mar 2017 15:26:00 -0400 +Subject: [PATCH] Issue 49035 - dbmon.sh shows pages-in-use that exceeds the + cache size + +Bug Description: dbmon shows negative free cache stats because the pages-in-use exceeds + the expected size of the cache. This is because on caches smaller + than 500mb, libdb automatically increases the size by ~25%. The tool + is only checking the configured db cache size, and in this case its + actaully larger than what was conigured in dse.ldif. + +Fix Description: dbmon.sh should use the libdb's "cache size in bytes", instead of + nsslapd-dbcachesize - as it could be different. + +https://pagure.io/389-ds-base/issue/49035 + +Reviewed by: nhosoi & wibrown (Thanks!!) +--- + ldap/admin/src/scripts/dbmon.sh.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/admin/src/scripts/dbmon.sh.in b/ldap/admin/src/scripts/dbmon.sh.in +index 4ee6adc..46796e2 100644 +--- a/ldap/admin/src/scripts/dbmon.sh.in ++++ b/ldap/admin/src/scripts/dbmon.sh.in +@@ -47,7 +47,7 @@ parseldif() { + } + /^[^ ]|^$/ {origline = $0; $0 = unwrapline; unwrapline = origline} + /^ / {sub(/^ /, ""); unwrapline = unwrapline $0; next} +- /^nsslapd-dbcachesize/ { dbcachesize=$2 } ++ /^nsslapd-db-cache-size-bytes/ { dbcachesize=$2 } + /^nsslapd-db-page-size/ { pagesize=$2 } + /^dbcachehitratio/ { dbhitratio=$2 } + /^dbcachepagein/ { dbcachepagein=$2 } +-- +2.9.3 + diff --git a/SOURCES/0018-Issue-49177-Fix-pkg-config-file.patch b/SOURCES/0018-Issue-49177-Fix-pkg-config-file.patch new file mode 100644 index 0000000..143e956 --- /dev/null +++ b/SOURCES/0018-Issue-49177-Fix-pkg-config-file.patch @@ -0,0 +1,56 @@ +From a1c4718d9db069ab088914ec983af8125eba3ac6 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 31 Mar 2017 09:34:27 -0400 +Subject: [PATCH] Issue 49177 - Fix pkg-config file + +Description: Need to remove the slash in front of the package name + +https://pagure.io/389-ds-base/issue/49177 + +Reviewed by: lslebodn & wibrown (Thanks!!) +--- + Makefile.am | 4 ++-- + configure.ac | 4 ++-- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/Makefile.am b/Makefile.am +index 982dd28..485a460 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -261,7 +261,7 @@ sampledatadir = $(datadir)@sampledatadir@ + systemschemadir = $(datadir)@systemschemadir@ + propertydir = $(datadir)@propertydir@ + schemadir = $(sysconfdir)@schemadir@ +-serverdir = $(libdir)@serverdir@ ++serverdir = $(libdir)/@serverdir@ + serverplugindir = $(libdir)@serverplugindir@ + taskdir = $(datadir)@scripttemplatedir@ + systemdsystemunitdir = @with_systemdsystemunitdir@ +@@ -276,7 +276,7 @@ infdir = $(datadir)@infdir@ + mibdir = $(datadir)@mibdir@ + updatedir = $(datadir)@updatedir@ + pkgconfigdir = $(libdir)/pkgconfig +-serverincdir = $(includedir)@serverincdir@ ++serverincdir = $(includedir)/@serverincdir@ + + defaultuser=@defaultuser@ + defaultgroup=@defaultgroup@ +diff --git a/configure.ac b/configure.ac +index 8172bab..51c4414 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -361,9 +361,9 @@ else + # relative to datadir + updatedir=/$PACKAGE_NAME/updates + # relative to libdir +- serverdir=/$PACKAGE_NAME ++ serverdir=$PACKAGE_NAME + # relative to includedir +- serverincdir=/$PACKAGE_NAME ++ serverincdir=$PACKAGE_NAME + # relative to libdir + serverplugindir=/$PACKAGE_NAME/plugins + # relative to datadir +-- +2.9.3 + diff --git a/SOURCES/0019-Issue-49205-Fix-logconv.pl-man-page.patch b/SOURCES/0019-Issue-49205-Fix-logconv.pl-man-page.patch new file mode 100644 index 0000000..3520f71 --- /dev/null +++ b/SOURCES/0019-Issue-49205-Fix-logconv.pl-man-page.patch @@ -0,0 +1,1438 @@ +From 6cad70f25460f3ede0429ce11d5e60946acf1174 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 31 Mar 2017 11:23:50 -0400 +Subject: [PATCH] Issue 49205 - Fix logconv.pl man page + +Description: Fixed mistake in wording in the logconv.pl man page. Also + updated all the man pages with the new pagure ticket url. + +https://pagure.io/389-ds-base/issue/49205 + +Reviewed by: nhosoi(Thanks!) +--- + man/man1/cl-dump.1 | 6 +++--- + man/man1/dbgen.pl.1 | 6 +++--- + man/man1/dbscan.1 | 6 +++--- + man/man1/ds-logpipe.py.1 | 6 +++--- + man/man1/dsktune.1 | 6 +++--- + man/man1/infadd.1 | 6 +++--- + man/man1/ldap-agent.1 | 6 +++--- + man/man1/ldclt.1 | 6 +++--- + man/man1/ldif.1 | 6 +++--- + man/man1/logconv.pl.1 | 10 +++++----- + man/man1/migratecred.1 | 6 +++--- + man/man1/mmldif.1 | 6 +++--- + man/man1/pwdhash.1 | 6 +++--- + man/man1/readnsstate.1 | 6 +++--- + man/man1/repl-monitor.1 | 8 ++++---- + man/man1/rsearch.1 | 6 +++--- + man/man8/bak2db.8 | 6 +++--- + man/man8/bak2db.pl.8 | 6 +++--- + man/man8/cleanallruv.pl.8 | 6 +++--- + man/man8/db2bak.8 | 6 +++--- + man/man8/db2bak.pl.8 | 6 +++--- + man/man8/db2index.8 | 6 +++--- + man/man8/db2index.pl.8 | 6 +++--- + man/man8/db2ldif.8 | 6 +++--- + man/man8/db2ldif.pl.8 | 6 +++--- + man/man8/dbverify.8 | 6 +++--- + man/man8/dn2rdn.8 | 6 +++--- + man/man8/fixup-linkedattrs.pl.8 | 6 +++--- + man/man8/fixup-memberof.pl.8 | 6 +++--- + man/man8/ldif2db.8 | 6 +++--- + man/man8/ldif2db.pl.8 | 6 +++--- + man/man8/ldif2ldap.8 | 6 +++--- + man/man8/migrate-ds.pl.8 | 18 +++++++++--------- + man/man8/monitor.8 | 6 +++--- + man/man8/ns-accountstatus.pl.8 | 6 +++--- + man/man8/ns-activate.pl.8 | 6 +++--- + man/man8/ns-inactivate.pl.8 | 6 +++--- + man/man8/ns-newpwpolicy.pl.8 | 6 +++--- + man/man8/ns-slapd.8 | 6 +++--- + man/man8/remove-ds.pl.8 | 6 +++--- + man/man8/restart-dirsrv.8 | 6 +++--- + man/man8/restoreconfig.8 | 6 +++--- + man/man8/saveconfig.8 | 6 +++--- + man/man8/schema-reload.pl.8 | 6 +++--- + man/man8/setup-ds.pl.8 | 6 +++--- + man/man8/start-dirsrv.8 | 6 +++--- + man/man8/status-dirsrv.8 | 6 +++--- + man/man8/stop-dirsrv.8 | 6 +++--- + man/man8/suffix2instance.8 | 6 +++--- + man/man8/syntax-validate.pl.8 | 6 +++--- + man/man8/upgradedb.8 | 6 +++--- + man/man8/upgradednformat.8 | 6 +++--- + man/man8/usn-tombstone-cleanup.pl.8 | 6 +++--- + man/man8/verify-db.pl.8 | 6 +++--- + man/man8/vlvindex.8 | 6 +++--- + 55 files changed, 174 insertions(+), 174 deletions(-) + +diff --git a/man/man1/cl-dump.1 b/man/man1/cl-dump.1 +index f9dedbe..db736ac 100644 +--- a/man/man1/cl-dump.1 ++++ b/man/man1/cl-dump.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH CL-DUMP 1 "May 18, 2008" ++.TH CL-DUMP 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -81,11 +81,11 @@ is running, and from where the server's changelog directory is accessible. + .SH AUTHOR + cl-dump was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/dbgen.pl.1 b/man/man1/dbgen.pl.1 +index 6f25080..c238c9c 100644 +--- a/man/man1/dbgen.pl.1 ++++ b/man/man1/dbgen.pl.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DBGEN.PL 1 "May 18, 2008" ++.TH DBGEN.PL 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -78,11 +78,11 @@ Add groups containing uniquemembers; generate a group for every 100 user entries + .SH AUTHOR + dbgen.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/dbscan.1 b/man/man1/dbscan.1 +index e5ff0a7..641a3af 100644 +--- a/man/man1/dbscan.1 ++++ b/man/man1/dbscan.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DBSCAN 1 "May 18, 2008" ++.TH DBSCAN 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -91,11 +91,11 @@ dbscan \fB\-f\fR objectclass.db4 + .SH AUTHOR + dbscan was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/ds-logpipe.py.1 b/man/man1/ds-logpipe.py.1 +index c7ea93d..0a6c15a 100644 +--- a/man/man1/ds-logpipe.py.1 ++++ b/man/man1/ds-logpipe.py.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DS-LOGPIPE.PY 1 "November 24, 2009" ++.TH DS-LOGPIPE.PY 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -90,9 +90,9 @@ The error log and audit log have similarly named configuration attributes e.g. n + .SH AUTHOR + ds-logpipe.py was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2009 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This is free software. You may redistribute copies of it under the terms of + the Directory Server license found in the LICENSE file of this +diff --git a/man/man1/dsktune.1 b/man/man1/dsktune.1 +index 19a6229..b3a5f7b 100644 +--- a/man/man1/dsktune.1 ++++ b/man/man1/dsktune.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DSKTUNE 1 "May 18, 2008" ++.TH DSKTUNE 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -49,11 +49,11 @@ specify alternate server installation directory + .SH AUTHOR + dsktune was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/infadd.1 b/man/man1/infadd.1 +index 0969019..75c2f42 100644 +--- a/man/man1/infadd.1 ++++ b/man/man1/infadd.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH INFADD 1 "May 18, 2008" ++.TH INFADD 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -67,11 +67,11 @@ add binary blob of average size of bytes + .SH AUTHOR + infadd was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/ldap-agent.1 b/man/man1/ldap-agent.1 +index 50d80c3..f2d6576 100644 +--- a/man/man1/ldap-agent.1 ++++ b/man/man1/ldap-agent.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH LDAP-AGENT 1 "May 18, 2008" ++.TH LDAP-AGENT 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -44,11 +44,11 @@ Sample usage: + .SH AUTHOR + ldap\-agent was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/ldclt.1 b/man/man1/ldclt.1 +index 4105f42..451a88b 100644 +--- a/man/man1/ldclt.1 ++++ b/man/man1/ldclt.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH LDCLT 1 "May 18, 2008" ++.TH LDCLT 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -220,11 +220,11 @@ Execution parameters: + .SH AUTHOR + ldclt was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/ldif.1 b/man/man1/ldif.1 +index f5fa99b..c2398b4 100644 +--- a/man/man1/ldif.1 ++++ b/man/man1/ldif.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH LDIF 1 "May 18, 2008" ++.TH LDIF 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -40,11 +40,11 @@ ldif dn < /tmp/ldif + .SH AUTHOR + ldif was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/logconv.pl.1 b/man/man1/logconv.pl.1 +index e4c1d13..13db49f 100644 +--- a/man/man1/logconv.pl.1 ++++ b/man/man1/logconv.pl.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH LOGCONV.PL 1 "May 18, 2008" ++.TH LOGCONV.PL 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -38,7 +38,7 @@ DEFAULT \-> cn=directory manager + .B \fB\-D, \-\-data\fR + DEFAULT \-> /tmp + .br +-TIP \-> If there is not enough RAM, a RAM disk can be used instead: ++TIP \-> If there is enough RAM, a RAM disk can be used instead: + + mkdir /dev/shm/logconv, and use this directory for the "\-D" value. + .TP +@@ -115,7 +115,7 @@ Examples: + .IP + logconv.pl \fB\-s\fR 10 \fB\-V\fR access + .IP +-logconv.pl \fB\-d\fR "cn=directory manager" /export/server4/slapd\-host/logs/access* ++logconv.pl \fB\-d\fR "cn=directory manager" /var/log/dirsrv/slapd\-host/access* + .IP + logconv.pl \fB\-s\fR 50 \fB\-ibgju\fR access* + .IP +@@ -124,11 +124,11 @@ logconv.pl \fB\-S\fR "[28/Mar/2002:13:14:22 \fB\-0800]\fR" \fB\-E\fR "[28/Mar/20 + .SH AUTHOR + logconv.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/migratecred.1 b/man/man1/migratecred.1 +index 50fbe0a..e935b5f 100644 +--- a/man/man1/migratecred.1 ++++ b/man/man1/migratecred.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH MIGRATECRED 1 "May 18, 2008" ++.TH MIGRATECRED 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -50,11 +50,11 @@ New plugin path (of the new instance) + .SH AUTHOR + migratecred was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/mmldif.1 b/man/man1/mmldif.1 +index e3f31ca..bde95d1 100644 +--- a/man/man1/mmldif.1 ++++ b/man/man1/mmldif.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH MMLDIF 1 "May 18, 2008" ++.TH MMLDIF 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -46,11 +46,11 @@ Write authoritative data to this file + .SH AUTHOR + mmldif was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/pwdhash.1 b/man/man1/pwdhash.1 +index 3ef5c9d..96194b3 100644 +--- a/man/man1/pwdhash.1 ++++ b/man/man1/pwdhash.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH PWDHASH 1 "May 18, 2008" ++.TH PWDHASH 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -47,11 +47,11 @@ Takes the password schema directly from the ns-slapd configuration + .SH AUTHOR + dbscan was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man1/readnsstate.1 b/man/man1/readnsstate.1 +index 0edf352..b18f49a 100644 +--- a/man/man1/readnsstate.1 ++++ b/man/man1/readnsstate.1 +@@ -1,4 +1,4 @@ +-.TH READNSSTATE 1 "May 13 2016" ++.TH READNSSTATE 1 "March 31, 2017" + .SH NAME + readnsstate \- interpret the contents of cn=replica's nsState value + .B readnsstate +@@ -38,9 +38,9 @@ For replica cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config + .SH AUTHOR + readnsstate was written by the 389 Project by richm. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2016 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by William Brown , + for the 389 Directory Server Project. +diff --git a/man/man1/repl-monitor.1 b/man/man1/repl-monitor.1 +index 17b9c4b..2e4fc77 100644 +--- a/man/man1/repl-monitor.1 ++++ b/man/man1/repl-monitor.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH REPL-MONITOR 1 "Jun 28, 2016" ++.TH REPL-MONITOR 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -121,16 +121,16 @@ MY_SYSTEM2 = localhost2.localdomain:3892 + .SH AUTHOR + repl-monitor was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2016 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). + .br +-Manual page updated by Mark Reynolds 6/28/2016 ++Manual page updated by Mark Reynolds 3/31/2017 + .br + This is free software. You may redistribute copies of it under the terms of + the Directory Server license found in the LICENSE file of this +diff --git a/man/man1/rsearch.1 b/man/man1/rsearch.1 +index 319bfc1..ec269a5 100644 +--- a/man/man1/rsearch.1 ++++ b/man/man1/rsearch.1 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH RSEARCH 1 "May 18, 2008" ++.TH RSEARCH 1 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -123,11 +123,11 @@ Use \fB\-B\fR file for binding; ignored if \fB\-B\fR is not given + .SH AUTHOR + rsearch was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man8/bak2db.8 b/man/man8/bak2db.8 +index 77864728..74e5e58 100644 +--- a/man/man8/bak2db.8 ++++ b/man/man8/bak2db.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH BAK2DB 8 "Mar 5, 2013" ++.TH BAK2DB 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -57,6 +57,6 @@ to standard error. + .SH AUTHOR + bak2db was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/bak2db.pl.8 b/man/man8/bak2db.pl.8 +index 1bb76c5..01a41c1 100644 +--- a/man/man8/bak2db.pl.8 ++++ b/man/man8/bak2db.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH BAK2DB.PL 8 "Mar 5, 2013" ++.TH BAK2DB.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -73,6 +73,6 @@ to standard error. + .SH AUTHOR + bak2db.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/cleanallruv.pl.8 b/man/man8/cleanallruv.pl.8 +index 55678ac..3afc688 100644 +--- a/man/man8/cleanallruv.pl.8 ++++ b/man/man8/cleanallruv.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH CLEANALLRUV.PL 8 " Mar 5, 2013" ++.TH CLEANALLRUV.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -76,6 +76,6 @@ to standard error. + .SH AUTHOR + cleanallruv.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/db2bak.8 b/man/man8/db2bak.8 +index 5de017e..727867a 100644 +--- a/man/man8/db2bak.8 ++++ b/man/man8/db2bak.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DB2BAK 8 "Mar 5, 2013" ++.TH DB2BAK 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -58,6 +58,6 @@ to standard error. + .SH AUTHOR + db2bak was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/db2bak.pl.8 b/man/man8/db2bak.pl.8 +index 9a34d51..a752885 100644 +--- a/man/man8/db2bak.pl.8 ++++ b/man/man8/db2bak.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DB2BAK.PL 8 "Mar 5, 2013" ++.TH DB2BAK.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -72,6 +72,6 @@ to standard error. + .SH AUTHOR + db2bak.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/db2index.8 b/man/man8/db2index.8 +index 1e70cc9..f70ba67 100644 +--- a/man/man8/db2index.8 ++++ b/man/man8/db2index.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DB2INDEX 8 "Mar 5, 2013" ++.TH DB2INDEX 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -62,6 +62,6 @@ to standard error. + .SH AUTHOR + db2index was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/db2index.pl.8 b/man/man8/db2index.pl.8 +index 4ff9c7a..7297fae 100644 +--- a/man/man8/db2index.pl.8 ++++ b/man/man8/db2index.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DB2INDEX.PL 8 "Mar 5, 2013" ++.TH DB2INDEX.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -76,6 +76,6 @@ to standard error. + .SH AUTHOR + db2index.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/db2ldif.8 b/man/man8/db2ldif.8 +index 2a787f2..31f686b 100644 +--- a/man/man8/db2ldif.8 ++++ b/man/man8/db2ldif.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DB2LDIF 8 "Mar 5, 2013" ++.TH DB2LDIF 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -101,6 +101,6 @@ to standard error. + .SH AUTHOR + db2ldif was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/db2ldif.pl.8 b/man/man8/db2ldif.pl.8 +index f02d3ed..babd225 100644 +--- a/man/man8/db2ldif.pl.8 ++++ b/man/man8/db2ldif.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DB2LDIF.PL 8 "Mar 5, 2013" ++.TH DB2LDIF.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -115,6 +115,6 @@ to standard error. + .SH AUTHOR + db2ldif.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/dbverify.8 b/man/man8/dbverify.8 +index c74747a..256e0aa 100644 +--- a/man/man8/dbverify.8 ++++ b/man/man8/dbverify.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DBVERIFY 8 "Mar 5, 2013" ++.TH DBVERIFY 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -58,6 +58,6 @@ to standard error. + .SH AUTHOR + dbverify was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/dn2rdn.8 b/man/man8/dn2rdn.8 +index d6cd3cc..98d74da 100644 +--- a/man/man8/dn2rdn.8 ++++ b/man/man8/dn2rdn.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH DN2RDN 8 "Mar 5, 2013" ++.TH DN2RDN 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -53,6 +53,6 @@ to standard error. + .SH AUTHOR + dn2rdn was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/fixup-linkedattrs.pl.8 b/man/man8/fixup-linkedattrs.pl.8 +index ee484c8..1189cb8 100644 +--- a/man/man8/fixup-linkedattrs.pl.8 ++++ b/man/man8/fixup-linkedattrs.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH FIXUP-LINKEDATTRS.PL 8 "Mar 5, 2013" ++.TH FIXUP-LINKEDATTRS.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -67,6 +67,6 @@ to standard error. + .SH AUTHOR + fixup-linkedattrs.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/fixup-memberof.pl.8 b/man/man8/fixup-memberof.pl.8 +index 55b7503..f80043c 100644 +--- a/man/man8/fixup-memberof.pl.8 ++++ b/man/man8/fixup-memberof.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH FIXUP-MEMBEROF.PL 8 "Mar 5, 2013" ++.TH FIXUP-MEMBEROF.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -71,6 +71,6 @@ to standard error. + .SH AUTHOR + fixup-memberof.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/ldif2db.8 b/man/man8/ldif2db.8 +index a5db3ea..9e3b149 100644 +--- a/man/man8/ldif2db.8 ++++ b/man/man8/ldif2db.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH LDIF2DB 8 "Mar 5, 2013" ++.TH LDIF2DB 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -84,6 +84,6 @@ to standard error. + .SH AUTHOR + ldif2db was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/ldif2db.pl.8 b/man/man8/ldif2db.pl.8 +index cc3e316..3c02d03 100644 +--- a/man/man8/ldif2db.pl.8 ++++ b/man/man8/ldif2db.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH LDIF2DB.PL 8 "Mar 5, 2013" ++.TH LDIF2DB.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -93,6 +93,6 @@ to standard error. + .SH AUTHOR + ldif2db.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/ldif2ldap.8 b/man/man8/ldif2ldap.8 +index 117a1f7..b89f464 100644 +--- a/man/man8/ldif2ldap.8 ++++ b/man/man8/ldif2ldap.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH LDIF2LDAP 8 "Mar 5, 2013" ++.TH LDIF2LDAP 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -57,6 +57,6 @@ to standard error. + .SH AUTHOR + ldif2ldap was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/migrate-ds.pl.8 b/man/man8/migrate-ds.pl.8 +index 65a62d0..24eac5a 100644 +--- a/man/man8/migrate-ds.pl.8 ++++ b/man/man8/migrate-ds.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH MIGRATE-DS.PL 8 "May 18, 2008" ++.TH MIGRATE-DS.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -121,15 +121,15 @@ for migration. Changelog information will not be migrated, and replicas + will need to be reinitialized (if migrating masters or hubs). This type + of migration requires that all of your old databases have been dumped + to LDIF format, and the LDIF file must be in the default database directory +-(usually /opt/fedora\-ds/slapd\-instance/db), and the LDIF file must have ++(usually /var/lib/dirsrv/slapd\-instance/db), and the LDIF file must have + the same name as the database instance directory, with a ".ldif". For + example, if you have + .IP + .ad l + .nf +-/opt/fedora\-ds/slapd\-instance/db/userRoot/ ++/var/lib/dirsrv/slapd\-instance/db/userRoot/ + and +-/opt/fedora\-ds/slapd\-instance/db/NetscapeRoot/ ++/var/lib/dirsrv/slapd\-instance/db/NetscapeRoot/ + .na + .fi + .PP +@@ -137,9 +137,9 @@ you must first use db2ldif to export these databases to LDIF e.g. + .IP + .ad l + .nf +-cd /opt/fedora\-ds/slapd\-instance +-\&./db2ldif \fB\-n\fR userRoot \fB\-a\fR /opt/fedora\-ds/slapd\-instance/db/userRoot.ldif and +-\&./db2ldif \fB\-n\fR NetscapeRoot \fB\-a\fR /opt/fedora\-ds/slapd\-instance/db/NetscapeRoot.ldif ++cd /var/lib/dirsrv/slapd\-instance ++\&./db2ldif \fB\-n\fR userRoot \fB\-a\fR /var/lib/dirsrv/slapd\-instance/ldif/userRoot.ldif and ++\&./db2ldif \fB\-n\fR NetscapeRoot \fB\-a\fR /var/lib/dirsrv/slapd\-instance/ldif/NetscapeRoot.ldif + .fi + .na + .PP +@@ -151,9 +151,9 @@ directory on the destination machine. + .SH AUTHOR + migrate-ds.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man8/monitor.8 b/man/man8/monitor.8 +index 1cc2166..1e75fcf 100644 +--- a/man/man8/monitor.8 ++++ b/man/man8/monitor.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH MONITOR 8 "Mar 5, 2013" ++.TH MONITOR 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -62,6 +62,6 @@ to standard error. + .SH AUTHOR + monitor was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/ns-accountstatus.pl.8 b/man/man8/ns-accountstatus.pl.8 +index 9ffc4d3..ceba3e6 100644 +--- a/man/man8/ns-accountstatus.pl.8 ++++ b/man/man8/ns-accountstatus.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH NS-ACCOUNTSTATUS.PL 8 "Feb 8, 2016" ++.TH NS-ACCOUNTSTATUS.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -90,6 +90,6 @@ to standard error. + .SH AUTHOR + ns-accountstatus.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2016 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/ns-activate.pl.8 b/man/man8/ns-activate.pl.8 +index 4b6f46d..2d4b82c 100644 +--- a/man/man8/ns-activate.pl.8 ++++ b/man/man8/ns-activate.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH NS-ACTIVATE.PL 8 "Mar 5, 2013" ++.TH NS-ACTIVATE.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -68,6 +68,6 @@ to standard error. + .SH AUTHOR + ns-activate.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/ns-inactivate.pl.8 b/man/man8/ns-inactivate.pl.8 +index 9e8ad77..f71a63f 100644 +--- a/man/man8/ns-inactivate.pl.8 ++++ b/man/man8/ns-inactivate.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH NS-INACTIVATE.PL 8 "Mar 5, 2013" ++.TH NS-INACTIVATE.PL 8 "Mar 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -67,6 +67,6 @@ to standard error. + .SH AUTHOR + ns-inactivate.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/ns-newpwpolicy.pl.8 b/man/man8/ns-newpwpolicy.pl.8 +index 1c38748..795bdc9 100644 +--- a/man/man8/ns-newpwpolicy.pl.8 ++++ b/man/man8/ns-newpwpolicy.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH NS-NEWPWPOLICY.PL 8 "Mar 5, 2013" ++.TH NS-NEWPWPOLICY.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -74,6 +74,6 @@ to standard error. + .SH AUTHOR + ns-newpwpolicy.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/ns-slapd.8 b/man/man8/ns-slapd.8 +index 7c61533..96f995e 100644 +--- a/man/man8/ns-slapd.8 ++++ b/man/man8/ns-slapd.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH NS-SLAPD 8 "May 18, 2008" ++.TH NS-SLAPD 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -45,11 +45,11 @@ Specifies file where the pid of the process will be stored + .SH AUTHOR + ns-slapd was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT + Copyright \(co 2001 Sun Microsystems, Inc. Used by permission. + .br +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man8/remove-ds.pl.8 b/man/man8/remove-ds.pl.8 +index 0568ff8..5d60e47 100644 +--- a/man/man8/remove-ds.pl.8 ++++ b/man/man8/remove-ds.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH REMOVE-DS.PL 8 "Feb 13, 2009" ++.TH REMOVE-DS.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -48,9 +48,9 @@ The full name of the instance to remove (e.g. slapd\(hyexample) + .SH AUTHOR + remove-ds.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2009 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This is free software. You may redistribute copies of it under the terms of + the Directory Server license found in the LICENSE file of this +diff --git a/man/man8/restart-dirsrv.8 b/man/man8/restart-dirsrv.8 +index c8af2ce..c82ec2b 100644 +--- a/man/man8/restart-dirsrv.8 ++++ b/man/man8/restart-dirsrv.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH RESTART-DIRSRV 8 "Jun 8, 2010" ++.TH RESTART-DIRSRV 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -40,9 +40,9 @@ Sample usage: + .SH AUTHOR + restart\-dirsrv was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2010 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This is free software. You may redistribute copies of it under the terms of + the Directory Server license found in the LICENSE file of this +diff --git a/man/man8/restoreconfig.8 b/man/man8/restoreconfig.8 +index 18b27f5..97642b6 100644 +--- a/man/man8/restoreconfig.8 ++++ b/man/man8/restoreconfig.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH RESTORECONFIG 8 "Mar 5, 2013" ++.TH RESTORECONFIG 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -43,6 +43,6 @@ to standard error. + .SH AUTHOR + restoreconfig was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/saveconfig.8 b/man/man8/saveconfig.8 +index 8ba8a1d..1597043 100644 +--- a/man/man8/saveconfig.8 ++++ b/man/man8/saveconfig.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH SAVECONFIG 8 "Mar 5, 2013" ++.TH SAVECONFIG 8 "March 31, 2013" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -43,6 +43,6 @@ to standard error. + .SH AUTHOR + saveconfig was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/schema-reload.pl.8 b/man/man8/schema-reload.pl.8 +index 17380cf..85797f3 100644 +--- a/man/man8/schema-reload.pl.8 ++++ b/man/man8/schema-reload.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH SCHEMA-RELOAD.PL 8 "Mar 5, 2013" ++.TH SCHEMA-RELOAD.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -65,6 +65,6 @@ to standard error. + .SH AUTHOR + schema-reload.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/setup-ds.pl.8 b/man/man8/setup-ds.pl.8 +index 11e0966..b491e18 100644 +--- a/man/man8/setup-ds.pl.8 ++++ b/man/man8/setup-ds.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH SETUP-DS.PL 8 "May 18, 2008" ++.TH SETUP-DS.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -103,9 +103,9 @@ Offline \- servers \fBmust be shutdown\fR - no username or password required + .SH AUTHOR + setup-ds.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2008 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This manual page was written by Michele Baldessari , + for the Debian project (but may be used by others). +diff --git a/man/man8/start-dirsrv.8 b/man/man8/start-dirsrv.8 +index f34da66..34f3988 100644 +--- a/man/man8/start-dirsrv.8 ++++ b/man/man8/start-dirsrv.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH START-DIRSRV 8 "Jun 8, 2010" ++.TH START-DIRSRV 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -40,9 +40,9 @@ Sample usage: + .SH AUTHOR + start\-dirsrv was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2010 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This is free software. You may redistribute copies of it under the terms of + the Directory Server license found in the LICENSE file of this +diff --git a/man/man8/status-dirsrv.8 b/man/man8/status-dirsrv.8 +index 83844b3..862c871 100644 +--- a/man/man8/status-dirsrv.8 ++++ b/man/man8/status-dirsrv.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH STATUS-DIRSRV 8 "Jan 20, 2016" ++.TH STATUS-DIRSRV 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -45,9 +45,9 @@ If a single Directory Server instance is specified, 0 is returned if the instanc + .SH AUTHOR + status\-dirsrv was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2016 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This is free software. You may redistribute copies of it under the terms of + the Directory Server license found in the LICENSE file of this +diff --git a/man/man8/stop-dirsrv.8 b/man/man8/stop-dirsrv.8 +index 08bf493..fdca0a9 100644 +--- a/man/man8/stop-dirsrv.8 ++++ b/man/man8/stop-dirsrv.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH STOP-DIRSRV 8 "Jun 8, 2010" ++.TH STOP-DIRSRV 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -40,9 +40,9 @@ Sample usage: + .SH AUTHOR + stop\-dirsrv was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2010 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + .br + This is free software. You may redistribute copies of it under the terms of + the Directory Server license found in the LICENSE file of this +diff --git a/man/man8/suffix2instance.8 b/man/man8/suffix2instance.8 +index 8e30f4a..0e4cf69 100644 +--- a/man/man8/suffix2instance.8 ++++ b/man/man8/suffix2instance.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH SUFFIX2INSTANCE 8 "Mar 5, 2013" ++.TH SUFFIX2INSTANCE 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -46,6 +46,6 @@ to standard error. + .SH AUTHOR + suffix2instance was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/syntax-validate.pl.8 b/man/man8/syntax-validate.pl.8 +index ece2d59..d34c49d 100644 +--- a/man/man8/syntax-validate.pl.8 ++++ b/man/man8/syntax-validate.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH SYNTAX-VALIDATE.PL 8 "Mar 5, 2013" ++.TH SYNTAX-VALIDATE.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -68,6 +68,6 @@ to standard error. + .SH AUTHOR + syntax-validate.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/upgradedb.8 b/man/man8/upgradedb.8 +index 495c722..70d1c5f 100644 +--- a/man/man8/upgradedb.8 ++++ b/man/man8/upgradedb.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH UPGRADEDB 8 "Mar 5, 2013" ++.TH UPGRADEDB 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -50,7 +50,7 @@ to standard error. + .SH AUTHOR + upgradedb was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. + +diff --git a/man/man8/upgradednformat.8 b/man/man8/upgradednformat.8 +index 3f3f58f..7513994 100644 +--- a/man/man8/upgradednformat.8 ++++ b/man/man8/upgradednformat.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH UPGRADEDNFORMAT 8 "Mar 5, 2013" ++.TH UPGRADEDNFORMAT 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -50,6 +50,6 @@ to standard error. + .SH AUTHOR + upgradednformat was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/usn-tombstone-cleanup.pl.8 b/man/man8/usn-tombstone-cleanup.pl.8 +index f78b230..c5aabc5 100644 +--- a/man/man8/usn-tombstone-cleanup.pl.8 ++++ b/man/man8/usn-tombstone-cleanup.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH USN-TOMBSTONE-CLEANUP.PL 8 "Mar 5, 2013" ++.TH USN-TOMBSTONE-CLEANUP.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -71,6 +71,6 @@ to standard error. + .SH AUTHOR + usn-tombstone-cleanup.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/verify-db.pl.8 b/man/man8/verify-db.pl.8 +index 500b713..0de3af5 100644 +--- a/man/man8/verify-db.pl.8 ++++ b/man/man8/verify-db.pl.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH VERIFY-DB.PL 8 "Mar 5, 2013" ++.TH VERIFY-DB.PL 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -44,6 +44,6 @@ to standard error. + .SH AUTHOR + verify-db.pl was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +diff --git a/man/man8/vlvindex.8 b/man/man8/vlvindex.8 +index 154f1c2..f3e1748 100644 +--- a/man/man8/vlvindex.8 ++++ b/man/man8/vlvindex.8 +@@ -2,7 +2,7 @@ + .\" First parameter, NAME, should be all caps + .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection + .\" other parameters are allowed: see man(7), man(1) +-.TH VLVINDEX 8 "Mar 5, 2013" ++.TH VLVINDEX 8 "March 31, 2017" + .\" Please adjust this date whenever revising the manpage. + .\" + .\" Some roff macros, for reference: +@@ -53,6 +53,6 @@ to standard error. + .SH AUTHOR + vlvindex was written by the 389 Project. + .SH "REPORTING BUGS" +-Report bugs to https://fedorahosted.org/389/newticket. ++Report bugs to https://pagure.io/389-ds-base/new_issue + .SH COPYRIGHT +-Copyright \(co 2013 Red Hat, Inc. ++Copyright \(co 2017 Red Hat, Inc. +-- +2.9.3 + diff --git a/SOURCES/0020-Issue-49039-password-min-age-should-be-ignored-if-pa.patch b/SOURCES/0020-Issue-49039-password-min-age-should-be-ignored-if-pa.patch new file mode 100644 index 0000000..68aedd6 --- /dev/null +++ b/SOURCES/0020-Issue-49039-password-min-age-should-be-ignored-if-pa.patch @@ -0,0 +1,121 @@ +From 578d207cd66e97e9ff8211559c62114a961e35a8 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 28 Mar 2017 14:21:47 -0400 +Subject: [PATCH] Issue 49039 - password min age should be ignored if password + needs to be reset + +Description: Do not check the password minimum age when changing a password + if the password "must" be reset. + +https://pagure.io/389-ds-base/issue/49039 + +Reviewed by: firstyear(Thanks!) +--- + dirsrvtests/tests/tickets/ticket49039_test.py | 79 +++++++++++++++++++++++++++ + ldap/servers/slapd/modify.c | 4 +- + 2 files changed, 81 insertions(+), 2 deletions(-) + create mode 100644 dirsrvtests/tests/tickets/ticket49039_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket49039_test.py b/dirsrvtests/tests/tickets/ticket49039_test.py +new file mode 100644 +index 0000000..e6d4c03 +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket49039_test.py +@@ -0,0 +1,79 @@ ++import time ++import ldap ++import logging ++import pytest ++from lib389 import Entry ++from lib389._constants import * ++from lib389.properties import * ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st as topo ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++USER_DN = 'uid=user,dc=example,dc=com' ++ ++ ++def test_ticket49039(topo): ++ """Test "password must change" verses "password min age". Min age should not ++ block password update if the password was reset. ++ """ ++ ++ # Configure password policy ++ try: ++ topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'), ++ (ldap.MOD_REPLACE, 'passwordMustChange', 'on'), ++ (ldap.MOD_REPLACE, 'passwordExp', 'on'), ++ (ldap.MOD_REPLACE, 'passwordMaxAge', '86400000'), ++ (ldap.MOD_REPLACE, 'passwordMinAge', '8640000'), ++ (ldap.MOD_REPLACE, 'passwordChange', 'on')]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to set password policy: ' + str(e)) ++ ++ # Add user, bind, and set password ++ try: ++ topo.standalone.add_s(Entry((USER_DN, { ++ 'objectclass': 'top extensibleObject'.split(), ++ 'uid': 'user1', ++ 'userpassword': PASSWORD ++ }))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user: error ' + e.message['desc']) ++ assert False ++ ++ # Reset password as RootDN ++ try: ++ topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', PASSWORD)]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to bind: error ' + e.message['desc']) ++ assert False ++ ++ time.sleep(1) ++ ++ # Reset password as user ++ try: ++ topo.standalone.simple_bind_s(USER_DN, PASSWORD) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to bind: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', PASSWORD)]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to change password: error ' + e.message['desc']) ++ assert False ++ ++ log.info('Test Passed') ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c +index 4bef90a..32defae 100644 +--- a/ldap/servers/slapd/modify.c ++++ b/ldap/servers/slapd/modify.c +@@ -1326,8 +1326,8 @@ static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old + + /* check if password is within password minimum age; + error result is sent directly from check_pw_minage */ +- if ((internal_op || !pb->pb_conn->c_needpw) && +- check_pw_minage(pb, &sdn, mod->mod_bvalues) == 1) ++ if (!pb->pb_conn->c_needpw && ++ check_pw_minage(pb, &sdn, mod->mod_bvalues) == 1) + { + if (operation_is_flag_set(operation,OP_FLAG_ACTION_LOG_ACCESS)) + { +-- +2.9.3 + diff --git a/SOURCES/0021-fix-for-cve-2017-2668-simple-return-text-if-suffix-n.patch b/SOURCES/0021-fix-for-cve-2017-2668-simple-return-text-if-suffix-n.patch new file mode 100644 index 0000000..15f6cd2 --- /dev/null +++ b/SOURCES/0021-fix-for-cve-2017-2668-simple-return-text-if-suffix-n.patch @@ -0,0 +1,110 @@ +From ea60248d99abb8fed9f7a2b1ab7325c5523b8562 Mon Sep 17 00:00:00 2001 +From: Ludwig Krispenz +Date: Mon, 3 Apr 2017 09:32:20 +0200 +Subject: [PATCH] fix for cve 2017-2668 - simple return text if suffix not + found + +Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1436575 + +Signed-off-by: Mark Reynolds +--- + ldap/servers/slapd/defbackend.c | 75 ++--------------------------------------- + 1 file changed, 2 insertions(+), 73 deletions(-) + +diff --git a/ldap/servers/slapd/defbackend.c b/ldap/servers/slapd/defbackend.c +index 6fd74a3..6cd2c04 100644 +--- a/ldap/servers/slapd/defbackend.c ++++ b/ldap/servers/slapd/defbackend.c +@@ -166,50 +166,7 @@ defbackend_abandon( Slapi_PBlock *pb ) + } + + +-#define DEFBE_NO_SUCH_SUFFIX "No such suffix" +-/* +- * Generate a "No such suffix" return text +- * Example: +- * cn=X,dc=bogus,dc=com ==> "No such suffix (dc=bogus,dc=com)" +- * if the last rdn starts with "dc=", print all last dc= rdn's. +- * cn=X,cn=bogus ==> "No such suffix (cn=bogus)" +- * otherwise, print the very last rdn. +- * cn=X,z=bogus ==> "No such suffix (x=bogus)" +- * it is true even if it is an invalid rdn. +- * cn=X,bogus ==> "No such suffix (bogus)" +- * another example of invalid rdn. +- */ +-static void +-_defbackend_gen_returntext(char *buffer, size_t buflen, char **dns) +-{ +- int dnidx; +- int sidx; +- struct suffix_repeat { +- char *suffix; +- int size; +- } candidates[] = { +- {"dc=", 3}, /* dc could be repeated. otherwise the last rdn is used. */ +- {NULL, 0} +- }; +- PR_snprintf(buffer, buflen, "%s (", DEFBE_NO_SUCH_SUFFIX); +- for (dnidx = 0; dns[dnidx]; dnidx++) ; /* finding the last */ +- dnidx--; /* last rdn */ +- for (sidx = 0; candidates[sidx].suffix; sidx++) { +- if (!PL_strncasecmp(dns[dnidx], candidates[sidx].suffix, candidates[sidx].size)) { +- while (!PL_strncasecmp(dns[--dnidx], candidates[sidx].suffix, candidates[sidx].size)) ; +- PL_strcat(buffer, dns[++dnidx]); /* the first "dn=", e.g. */ +- for (++dnidx; dns[dnidx]; dnidx++) { +- PL_strcat(buffer, ","); +- PL_strcat(buffer, dns[dnidx]); +- } +- PL_strcat(buffer, ")"); +- return; /* finished the task */ +- } +- } +- PL_strcat(buffer, dns[dnidx]); +- PL_strcat(buffer, ")"); +- return; +-} ++#define DEFBE_NO_SUCH_SUFFIX "No suffix for bind dn found" + + static int + defbackend_bind( Slapi_PBlock *pb ) +@@ -231,36 +188,8 @@ defbackend_bind( Slapi_PBlock *pb ) + slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsAnonymousBinds); + rc = SLAPI_BIND_ANONYMOUS; + } else { +- Slapi_DN *sdn = NULL; +- char *suffix = NULL; +- char **dns = NULL; +- +- if (pb->pb_op) { +- sdn = operation_get_target_spec(pb->pb_op); +- if (sdn) { +- dns = slapi_ldap_explode_dn(slapi_sdn_get_dn(sdn), 0); +- if (dns) { +- size_t dnlen = slapi_sdn_get_ndn_len(sdn); +- size_t len = dnlen + sizeof(DEFBE_NO_SUCH_SUFFIX) + 4; +- suffix = slapi_ch_malloc(len); +- if (dnlen) { +- _defbackend_gen_returntext(suffix, len, dns); +- } else { +- PR_snprintf(suffix, len, "%s", DEFBE_NO_SUCH_SUFFIX); +- } +- } +- } +- } +- if (suffix) { +- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, suffix); +- } else { +- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, DEFBE_NO_SUCH_SUFFIX); +- } ++ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, DEFBE_NO_SUCH_SUFFIX); + send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, "", 0, NULL); +- if (dns) { +- slapi_ldap_value_free(dns); +- } +- slapi_ch_free_string(&suffix); + rc = SLAPI_BIND_FAIL; + } + +-- +2.9.3 + diff --git a/SOURCES/0022-Issue-47662-CLI-args-get-removed.patch b/SOURCES/0022-Issue-47662-CLI-args-get-removed.patch new file mode 100644 index 0000000..6bd0608 --- /dev/null +++ b/SOURCES/0022-Issue-47662-CLI-args-get-removed.patch @@ -0,0 +1,63 @@ +From 3937047eee31638df068b3294aa90ef603915676 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 10 Apr 2017 14:55:55 -0400 +Subject: [PATCH] Issue 47662 - CLI args get removed + +Bug Description. Regression from previous fix. The process to check unknown + CLI options blows away the built-in arg list "$@" + +Fix Description: Make a copy of $@, and use it as needed. + +https://pagure.io/389-ds-base/issue/47662 + +Reviewed by: nhosoi(Thanks!) +--- + ldap/admin/src/scripts/db2ldif.in | 3 ++- + ldap/admin/src/scripts/ldif2db.in | 3 ++- + 2 files changed, 4 insertions(+), 2 deletions(-) + +diff --git a/ldap/admin/src/scripts/db2ldif.in b/ldap/admin/src/scripts/db2ldif.in +index 85854b3..08f30e4 100755 +--- a/ldap/admin/src/scripts/db2ldif.in ++++ b/ldap/admin/src/scripts/db2ldif.in +@@ -130,6 +130,7 @@ do + esac + done + ++ARGS=$@ + shift $(($OPTIND - 1)) + if [ $1 ] + then +@@ -156,7 +157,7 @@ fi + servid=`normalize_server_id $initfile` + . $initfile + +-ldif_file=`make_ldiffile $@` ++ldif_file=`make_ldiffile $ARGS` + rn=$? + + echo "Exported ldif file: $ldif_file" +diff --git a/ldap/admin/src/scripts/ldif2db.in b/ldap/admin/src/scripts/ldif2db.in +index f968303..20c7d46 100755 +--- a/ldap/admin/src/scripts/ldif2db.in ++++ b/ldap/admin/src/scripts/ldif2db.in +@@ -87,6 +87,7 @@ then + exit 1 + fi + ++ARGS=$@ + shift $(($OPTIND - 1)) + if [ $1 ] + then +@@ -106,7 +107,7 @@ fi + + . $initfile + +-handleopts $@ ++handleopts $ARGS + quiet=$? + if [ $quiet -eq 0 ]; then + echo importing data ... +-- +2.9.3 + diff --git a/SOURCES/0023-Issue-49210-Fix-regression-when-checking-is-password.patch b/SOURCES/0023-Issue-49210-Fix-regression-when-checking-is-password.patch new file mode 100644 index 0000000..850d829 --- /dev/null +++ b/SOURCES/0023-Issue-49210-Fix-regression-when-checking-is-password.patch @@ -0,0 +1,141 @@ +From 5854fc41c6620567f0356e382baec4eda1e645b2 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 5 Apr 2017 11:05:28 -0400 +Subject: [PATCH] Issue 49210 - Fix regression when checking is password min + age should be checked + +Bug Description: If a plugin makes an internal modification of userpassword + the connection structure in the pblock is null, and it was + being dereferenced. + +Fix Description: These internal operations do not need to have the password + policy checks done. They are intended to be unrestricted. + So we only need to check password policy on client connections. + The fix frist hecks if the connection structy is present, + only then it continues. + + Revised test script to include the tool: ldappasswd + +https://pagure.io/389-ds-base/issue/49210 + +Reviewed by: firstyear(Thanks!) +--- + dirsrvtests/tests/tickets/ticket49039_test.py | 62 +++++++++++++++++++++++++++ + ldap/servers/slapd/modify.c | 2 +- + 2 files changed, 63 insertions(+), 1 deletion(-) + +diff --git a/dirsrvtests/tests/tickets/ticket49039_test.py b/dirsrvtests/tests/tickets/ticket49039_test.py +index e6d4c03..f0b224c 100644 +--- a/dirsrvtests/tests/tickets/ticket49039_test.py ++++ b/dirsrvtests/tests/tickets/ticket49039_test.py +@@ -2,6 +2,7 @@ import time + import ldap + import logging + import pytest ++import os + from lib389 import Entry + from lib389._constants import * + from lib389.properties import * +@@ -9,6 +10,7 @@ from lib389.tasks import * + from lib389.utils import * + from lib389.topologies import topology_st as topo + ++ + DEBUGGING = os.getenv("DEBUGGING", default=False) + if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +@@ -19,11 +21,39 @@ log = logging.getLogger(__name__) + USER_DN = 'uid=user,dc=example,dc=com' + + ++def ssl_init(topo): ++ """ Setup TLS ++ """ ++ topo.standalone.stop() ++ # Prepare SSL but don't enable it. ++ for f in ('key3.db', 'cert8.db', 'key4.db', 'cert9.db', 'secmod.db', 'pkcs11.txt'): ++ try: ++ os.remove("%s/%s" % (topo.standalone.confdir, f)) ++ except: ++ pass ++ assert(topo.standalone.nss_ssl.reinit() is True) ++ assert(topo.standalone.nss_ssl.create_rsa_ca() is True) ++ assert(topo.standalone.nss_ssl.create_rsa_key_and_cert() is True) ++ # Start again ++ topo.standalone.start() ++ topo.standalone.rsa.create() ++ topo.standalone.config.set('nsslapd-ssl-check-hostname', 'off') ++ topo.standalone.config.set('nsslapd-secureport', '%s' % ++ SECUREPORT_STANDALONE1) ++ topo.standalone.config.set('nsslapd-security', 'on') ++ topo.standalone.restart() ++ ++ log.info("SSL setup complete\n") ++ ++ + def test_ticket49039(topo): + """Test "password must change" verses "password min age". Min age should not + block password update if the password was reset. + """ + ++ # Setup SSL (for ldappasswd test) ++ ssl_init(topo) ++ + # Configure password policy + try: + topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'), +@@ -68,6 +98,38 @@ def test_ticket49039(topo): + log.fatal('Failed to change password: error ' + e.message['desc']) + assert False + ++ ################################### ++ # Make sure ldappasswd also works ++ ################################### ++ ++ # Reset password as RootDN ++ try: ++ topo.standalone.simple_bind_s(DN_DM, PASSWORD) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to bind as rootdn: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', PASSWORD)]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to bind: error ' + e.message['desc']) ++ assert False ++ ++ time.sleep(1) ++ ++ # Run ldappasswd as the User. ++ cmd = ('LDAPTLS_REQCERT=never LDAPTLS_CACERTDIR=' + topo.standalone.get_cert_dir() + ++ ' ldappasswd' + ' -h ' + topo.standalone.host + ' -Z -p 38901 -D ' + USER_DN + ++ ' -w password -a password -s password2 ' + USER_DN) ++ os.system(cmd) ++ time.sleep(1) ++ ++ try: ++ topo.standalone.simple_bind_s(USER_DN, "password2") ++ except ldap.LDAPError as e: ++ log.fatal('Failed to bind: error ' + e.message['desc']) ++ assert False ++ + log.info('Test Passed') + + +diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c +index 32defae..e23fe67 100644 +--- a/ldap/servers/slapd/modify.c ++++ b/ldap/servers/slapd/modify.c +@@ -1326,7 +1326,7 @@ static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old + + /* check if password is within password minimum age; + error result is sent directly from check_pw_minage */ +- if (!pb->pb_conn->c_needpw && ++ if (pb->pb_conn && !pb->pb_conn->c_needpw && + check_pw_minage(pb, &sdn, mod->mod_bvalues) == 1) + { + if (operation_is_flag_set(operation,OP_FLAG_ACTION_LOG_ACCESS)) +-- +2.9.3 + diff --git a/SOURCES/0024-Ticket-49209-Hang-due-to-omitted-replica-lock-releas.patch b/SOURCES/0024-Ticket-49209-Hang-due-to-omitted-replica-lock-releas.patch new file mode 100644 index 0000000..e10387c --- /dev/null +++ b/SOURCES/0024-Ticket-49209-Hang-due-to-omitted-replica-lock-releas.patch @@ -0,0 +1,42 @@ +From 765520fa7bf49f2de542d619b0fce99e13e4d53a Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Tue, 4 Apr 2017 10:44:55 +0200 +Subject: [PATCH] Ticket 49209 - Hang due to omitted replica lock release + +Bug Description: + When an operation is canceled (failure), its csn is aborted + and removed from the pending list. + If at that time the pending list is empty or the csn is not found + in that list, the cancel callback forgots to release the replica lock + +Fix Description: + Release replica lock systematically, whether cnsplRemove fails or not + +https://pagure.io/389-ds-base/issue/49209 + +Reviewed by: Mark Reynolds (thanks Mark !!) + +Platforms tested: F23 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/plugins/replication/repl5_replica.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index 7beef50..5718a98 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -3662,6 +3662,7 @@ abort_csn_callback(const CSN *csn, void *data) + int rc = csnplRemove(r->min_csn_pl, csn); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "abort_csn_callback - csnplRemove failed"); ++ replica_unlock(r->repl_lock); + return; + } + } +-- +2.9.3 + diff --git a/SOURCES/0025-Ticket-49184-Overflow-in-memberof.patch b/SOURCES/0025-Ticket-49184-Overflow-in-memberof.patch new file mode 100644 index 0000000..be478e0 --- /dev/null +++ b/SOURCES/0025-Ticket-49184-Overflow-in-memberof.patch @@ -0,0 +1,299 @@ +From 710b0a6aaf1c648bc8fd33d4ab5bcc859a0ed851 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Thu, 13 Apr 2017 15:21:49 +0200 +Subject: [PATCH] Ticket 49184 - Overflow in memberof + +Bug Description: + The function memberof_call_foreach_dn can be used to retrieve ancestors of a + given entry. (ancestors are groups owning directly or indirectly a given entry). + + With the use of group cache in memberof, at the entrance of memberof_call_foreach_dn + there is an attempt to get the entry ancestors from the cache. + + Before doing so it needs to test if the cache is safe. In fact in case of + circular groups the use of the cache is disabled and lookup in the cache should not + happend. + + To know if the cache is safe it needs to access a flag (use_cache) in callback_data. + The callback_data structure is opaque at this level. So accessing it + while its structure is unknown is dangerous. + + The bug is that we may read an 'int' at an offset that overflow the actual structure. + This is just a test and should not trigger a crash. + +Fix Description: + Add a flag to call memberof_call_foreach_dn so that, that indicates if + it is valid to use the group cache. + +https://pagure.io/389-ds-base/issue/49184 + +Reviewed by: William Brown and Mark Reynolds (thanks to you !!) + +Platforms tested: F23 + +Flag Day: no + +Doc impact: no +--- + dirsrvtests/tests/tickets/ticket49184_test.py | 146 ++++++++++++++++++++++++++ + ldap/servers/plugins/memberof/memberof.c | 38 ++++--- + 2 files changed, 167 insertions(+), 17 deletions(-) + create mode 100644 dirsrvtests/tests/tickets/ticket49184_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket49184_test.py b/dirsrvtests/tests/tickets/ticket49184_test.py +new file mode 100644 +index 0000000..20edfde +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket49184_test.py +@@ -0,0 +1,146 @@ ++import time ++import ldap ++import logging ++import pytest ++from lib389 import DirSrv, Entry, tools, tasks ++from lib389.tools import DirSrvTools ++from lib389._constants import * ++from lib389.properties import * ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st as topo ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++GROUP_DN_1 = ("cn=group1," + DEFAULT_SUFFIX) ++GROUP_DN_2 = ("cn=group2," + DEFAULT_SUFFIX) ++SUPER_GRP1 = ("cn=super_grp1," + DEFAULT_SUFFIX) ++SUPER_GRP2 = ("cn=super_grp2," + DEFAULT_SUFFIX) ++SUPER_GRP3 = ("cn=super_grp3," + DEFAULT_SUFFIX) ++ ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++def _add_group_with_members(topo, group_dn): ++ # Create group ++ try: ++ topo.standalone.add_s(Entry((group_dn, ++ {'objectclass': 'top groupofnames extensibleObject'.split(), ++ 'cn': 'group'}))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add group: error ' + e.message['desc']) ++ assert False ++ ++ # Add members to the group - set timeout ++ log.info('Adding members to the group...') ++ for idx in range(1, 5): ++ try: ++ MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ++ topo.standalone.modify_s(group_dn, ++ [(ldap.MOD_ADD, ++ 'member', ++ MEMBER_VAL)]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to update group: member (%s) - error: %s' % ++ (MEMBER_VAL, e.message['desc'])) ++ assert False ++ ++def _check_memberof(topo, member=None, memberof=True, group_dn=None): ++ # Check that members have memberof attribute on M1 ++ for idx in range(1, 5): ++ try: ++ USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ++ ent = topo.standalone.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") ++ if presence_flag: ++ assert ent.hasAttr('memberof') and ent.getValue('memberof') == group_dn ++ else: ++ assert not ent.hasAttr('memberof') ++ except ldap.LDAPError as e: ++ log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.message['desc'])) ++ assert False ++ ++def _check_memberof(topo, member=None, memberof=True, group_dn=None): ++ ent = topo.standalone.getEntry(member, ldap.SCOPE_BASE, "(objectclass=*)") ++ if memberof: ++ assert group_dn ++ assert ent.hasAttr('memberof') and group_dn in ent.getValues('memberof') ++ else: ++ if ent.hasAttr('memberof'): ++ assert group_dn not in ent.getValues('memberof') ++ ++ ++def test_ticket49184(topo): ++ """Write your testcase here... ++ ++ Also, if you need any testcase initialization, ++ please, write additional fixture for that(include finalizer). ++ """ ++ ++ topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) ++ topo.standalone.restart(timeout=10) ++ ++ # ++ # create some users and a group ++ # ++ log.info('create users and group...') ++ for idx in range(1, 5): ++ try: ++ USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ++ topo.standalone.add_s(Entry((USER_DN, ++ {'objectclass': 'top extensibleObject'.split(), ++ 'uid': 'member%d' % (idx)}))) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) ++ assert False ++ ++ # add all users in GROUP_DN_1 and checks each users is memberof GROUP_DN_1 ++ _add_group_with_members(topo, GROUP_DN_1) ++ for idx in range(1, 5): ++ USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ++ _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_1 ) ++ ++ # add all users in GROUP_DN_2 and checks each users is memberof GROUP_DN_2 ++ _add_group_with_members(topo, GROUP_DN_2) ++ for idx in range(1, 5): ++ USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ++ _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_2 ) ++ ++ # add the level 2, 3 and 4 group ++ for super_grp in (SUPER_GRP1, SUPER_GRP2, SUPER_GRP3): ++ topo.standalone.add_s(Entry((super_grp, ++ {'objectclass': 'top groupofnames extensibleObject'.split(), ++ 'cn': 'super_grp'}))) ++ topo.standalone.modify_s(SUPER_GRP1, ++ [(ldap.MOD_ADD, ++ 'member', ++ GROUP_DN_1), ++ (ldap.MOD_ADD, ++ 'member', ++ GROUP_DN_2)]) ++ topo.standalone.modify_s(SUPER_GRP2, ++ [(ldap.MOD_ADD, ++ 'member', ++ GROUP_DN_1), ++ (ldap.MOD_ADD, ++ 'member', ++ GROUP_DN_2)]) ++ return ++ topo.standalone.delete_s(GROUP_DN_2) ++ for idx in range(1, 5): ++ USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ++ _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_1 ) ++ _check_memberof(topo, member=USER_DN, memberof=False, group_dn=GROUP_DN_2 ) ++ ++ if DEBUGGING: ++ # Add debugging steps(if any)... ++ pass ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c +index 81ef092..5cd2c01 100644 +--- a/ldap/servers/plugins/memberof/memberof.c ++++ b/ldap/servers/plugins/memberof/memberof.c +@@ -159,7 +159,7 @@ static int memberof_qsort_compare(const void *a, const void *b); + static void memberof_load_array(Slapi_Value **array, Slapi_Attr *attr); + static int memberof_del_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN *sdn); + static int memberof_call_foreach_dn(Slapi_PBlock *pb, Slapi_DN *sdn, MemberOfConfig *config, +- char **types, plugin_search_entry_callback callback, void *callback_data, int *cached); ++ char **types, plugin_search_entry_callback callback, void *callback_data, int *cached, PRBool use_grp_cache); + static int memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, + Slapi_Value *memberdn); + static int memberof_is_grouping_attr(char *type, MemberOfConfig *config); +@@ -659,7 +659,7 @@ memberof_del_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN * + + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_del_dn_from_groups: Ancestors of %s\n", slapi_sdn_get_dn(sdn)); + rc = memberof_call_foreach_dn(pb, sdn, config, groupattrs, +- memberof_del_dn_type_callback, &data, &cached); ++ memberof_del_dn_type_callback, &data, &cached, PR_FALSE); + } + + return rc; +@@ -776,8 +776,8 @@ add_ancestors_cbdata(memberof_cached_value *ancestors, void *callback_data) + * could want type to be either "member" or "memberOf" depending on the case. + */ + int +-memberof_call_foreach_dn(Slapi_PBlock *pb, Slapi_DN *sdn, +- MemberOfConfig *config, char **types, plugin_search_entry_callback callback, void *callback_data, int *cached) ++memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn, ++ MemberOfConfig *config, char **types, plugin_search_entry_callback callback, void *callback_data, int *cached, PRBool use_grp_cache) + { + Slapi_PBlock *search_pb = NULL; + Slapi_DN *base_sdn = NULL; +@@ -792,9 +792,6 @@ memberof_call_foreach_dn(Slapi_PBlock *pb, Slapi_DN *sdn, + int free_it = 0; + int rc = 0; + int i = 0; +- memberof_cached_value *ht_grp = NULL; +- memberof_get_groups_data *data = (memberof_get_groups_data*) callback_data; +- const char *ndn = slapi_sdn_get_ndn(sdn); + + *cached = 0; + +@@ -802,17 +799,24 @@ memberof_call_foreach_dn(Slapi_PBlock *pb, Slapi_DN *sdn, + return (rc); + } + +- /* Here we will retrieve the ancestor of sdn. +- * The key access is the normalized sdn +- * This is done through recursive internal searches of parents +- * If the ancestors of sdn are already cached, just use +- * this value ++ /* This flags indicates memberof_call_foreach_dn is called to retrieve ancestors (groups). ++ * To improve performance, it can use a cache. (it will not in case of circular groups) ++ * When this flag is true it means no circular group are detected (so far) so we can use the cache + */ +- if (data && data->use_cache) { ++ if (use_grp_cache) { ++ /* Here we will retrieve the ancestor of sdn. ++ * The key access is the normalized sdn ++ * This is done through recursive internal searches of parents ++ * If the ancestors of sdn are already cached, just use ++ * this value ++ */ ++ memberof_cached_value *ht_grp = NULL; ++ const char *ndn = slapi_sdn_get_ndn(sdn); ++ + ht_grp = ancestors_cache_lookup((const void *) ndn); + if (ht_grp) { + #if MEMBEROF_CACHE_DEBUG +- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp); ++ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp); + #endif + add_ancestors_cbdata(ht_grp, callback_data); + *cached = 1; +@@ -1106,7 +1110,7 @@ memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_replace_dn_from_groups: Ancestors of %s\n", slapi_sdn_get_dn(post_sdn)); + if((ret = memberof_call_foreach_dn(pb, pre_sdn, config, groupattrs, + memberof_replace_dn_type_callback, +- &data, &cached))) ++ &data, &cached, PR_FALSE))) + { + break; + } +@@ -2383,7 +2387,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_get_groups_r: Ancestors of %s\n", slapi_sdn_get_dn(member_sdn)); + #endif + rc = memberof_call_foreach_dn(NULL, member_sdn, config, config->groupattrs, +- memberof_get_groups_callback, &member_data, &cached); ++ memberof_get_groups_callback, &member_data, &cached, member_data.use_cache); + + merge_ancestors(&member_ndn_val, &member_data, data); + if (!cached && member_data.use_cache) +@@ -2578,7 +2582,7 @@ memberof_test_membership(Slapi_PBlock *pb, MemberOfConfig *config, + int cached = 0; + + return memberof_call_foreach_dn(pb, group_sdn, config, attrs, +- memberof_test_membership_callback, config, &cached); ++ memberof_test_membership_callback, config, &cached, PR_FALSE); + } + + /* +-- +2.9.3 + diff --git a/SOURCES/0026-Ticket-49196-Autotune-generates-crit-messages.patch b/SOURCES/0026-Ticket-49196-Autotune-generates-crit-messages.patch new file mode 100644 index 0000000..a2875dd --- /dev/null +++ b/SOURCES/0026-Ticket-49196-Autotune-generates-crit-messages.patch @@ -0,0 +1,42 @@ +From 8a0b4643e1119e994370089fd52721373e88bb51 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Wed, 29 Mar 2017 10:59:14 +1000 +Subject: [PATCH] Ticket 49196 - Autotune generates crit messages + +Bug Description: The cache sanity check generates critical messages. + +Fix Description: Make the sanity check generate warning messages. + +https://pagure.io/389-ds-base/issue/49196 + +Author: wibrown + +Review by: mreynolds (Thanks!) +--- + ldap/servers/slapd/back-ldbm/start.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c +index 759af8a..1ae9858 100644 +--- a/ldap/servers/slapd/back-ldbm/start.c ++++ b/ldap/servers/slapd/back-ldbm/start.c +@@ -265,12 +265,12 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + issane = util_is_cachesize_sane(&total_cache_size); + if (!issane) { + /* Right, it's time to panic */ +- slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "It is highly likely your memory configuration of all backends will EXCEED your systems memory.\n"); +- slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "In a future release this WILL prevent server start up. You MUST alter your configuration.\n"); +- slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Total entry cache size: %lu B; dbcache size: %lu B; available memory size: %lu B; \n", ++ slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "It is highly likely your memory configuration of all backends will EXCEED your systems memory.\n"); ++ slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "In a future release this WILL prevent server start up. You MUST alter your configuration.\n"); ++ slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Total entry cache size: %lu B; dbcache size: %lu B; available memory size: %lu B; \n", + (PRUint64)total_cache_size, (PRUint64)li->li_dbcachesize, availpages * pagesize + ); +- slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "%s\n", msg); ++ slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "%s\n", msg); + /* WB 2016 - This should be UNCOMMENTED in a future release */ + /* return SLAPI_FAIL_GENERAL; */ + } +-- +2.9.3 + diff --git a/SOURCES/0027-Issue-49221-During-an-upgrade-the-provided-localhost.patch b/SOURCES/0027-Issue-49221-During-an-upgrade-the-provided-localhost.patch new file mode 100644 index 0000000..8eb11b7 --- /dev/null +++ b/SOURCES/0027-Issue-49221-During-an-upgrade-the-provided-localhost.patch @@ -0,0 +1,36 @@ +From 4e66114109263fff6b13192e07be9bbd9e493fee Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 17 Apr 2017 17:06:19 -0400 +Subject: [PATCH 1/2] Issue 49221 - During an upgrade the provided localhost + name is ignored + +Description: If the FullMachine name, or localhost, is provided in an INF + it is ignored during the upgrade the value of nsslapd-localhost + from the current server is used instead. We should only override + the localhost value if it is missing. + +https://pagure.io/389-ds-base/issue/49221 + +Reviewed by: nhosoi(Thanks!) +--- + ldap/admin/src/scripts/DSUpdate.pm.in | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/ldap/admin/src/scripts/DSUpdate.pm.in b/ldap/admin/src/scripts/DSUpdate.pm.in +index e84a9a9..8b24b47 100644 +--- a/ldap/admin/src/scripts/DSUpdate.pm.in ++++ b/ldap/admin/src/scripts/DSUpdate.pm.in +@@ -435,7 +435,9 @@ sub initInfFromInst { + my $servid = $inst; + $servid =~ s/slapd-//; + +- $inf->{General}->{FullMachineName} = $entry->getValue("nsslapd-localhost"); ++ if (!$inf->{General}->{FullMachineName}) { ++ $inf->{General}->{FullMachineName} = $entry->getValue("nsslapd-localhost"); ++ } + $inf->{General}->{SuiteSpotUserID} = $entry->getValue("nsslapd-localuser"); + $inf->{slapd}->{ServerPort} = $entry->getValue("nsslapd-port"); + $inf->{slapd}->{ldapifilepath} = $entry->getValue("nsslapd-ldapifilepath"); +-- +2.9.3 + diff --git a/SOURCES/0028-Ticket-48864-Add-cgroup-memory-limit-detection-to-38.patch b/SOURCES/0028-Ticket-48864-Add-cgroup-memory-limit-detection-to-38.patch new file mode 100644 index 0000000..1a6d37b --- /dev/null +++ b/SOURCES/0028-Ticket-48864-Add-cgroup-memory-limit-detection-to-38.patch @@ -0,0 +1,4106 @@ +From 57b6e5afb6265363ede667ad450e267f8a803b9e Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 19 Apr 2017 13:37:10 -0400 +Subject: [PATCH] Ticket 48864 - Add cgroup memory limit detection to 389-ds + +Bug Description: Previously our memory limits only check hardware + limits and shell resource limits. However, we may be in a container + like docker or lxc, and unable to detect these limits. This can lead + to crash conditions or worse, especially with autosizing + on import we may have conditions where the server may not + even be able to install. + +Fix Description: Add support for cgroup memory limit detection + so that we can properly determine our resource availability regardless + of lxc, docker, systemd or others. + +https://pagure.io/389-ds-base/issue/48864 + +Author: wibrown + +Review by: mreynolds (Thanks!) +--- + Makefile.am | 7 +- + ldap/servers/plugins/acl/acl.c | 18 +- + ldap/servers/plugins/acl/acl.h | 16 - + ldap/servers/plugins/acl/aclanom.c | 8 +- + ldap/servers/plugins/dna/dna.c | 50 ++- + ldap/servers/plugins/posix-winsync/posix-winsync.c | 4 +- + ldap/servers/plugins/replication/repl.h | 17 +- + .../plugins/replication/repl5_inc_protocol.c | 2 +- + ldap/servers/plugins/replication/repl5_init.c | 2 +- + ldap/servers/plugins/replication/repl5_plugins.c | 2 +- + ldap/servers/plugins/replication/repl5_replica.c | 8 +- + ldap/servers/plugins/replication/repl5_total.c | 4 +- + ldap/servers/plugins/replication/repl_connext.c | 20 +- + ldap/servers/plugins/replication/repl_extop.c | 26 +- + ldap/servers/plugins/sync/sync_persist.c | 6 +- + ldap/servers/plugins/syntaxes/validate_task.c | 6 +- + ldap/servers/plugins/usn/usn.c | 8 +- + ldap/servers/slapd/abandon.c | 8 +- + ldap/servers/slapd/add.c | 4 +- + ldap/servers/slapd/auth.c | 18 +- + ldap/servers/slapd/back-ldbm/back-ldbm.h | 23 +- + ldap/servers/slapd/back-ldbm/cache.c | 34 +- + ldap/servers/slapd/back-ldbm/dblayer.c | 72 ++-- + ldap/servers/slapd/back-ldbm/dblayer.h | 8 - + ldap/servers/slapd/back-ldbm/import-threads.c | 2 +- + ldap/servers/slapd/back-ldbm/import.c | 12 +- + ldap/servers/slapd/back-ldbm/ldbm_config.c | 32 +- + ldap/servers/slapd/back-ldbm/ldbm_delete.c | 4 +- + .../servers/slapd/back-ldbm/ldbm_instance_config.c | 20 +- + ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 4 +- + ldap/servers/slapd/back-ldbm/ldbm_search.c | 4 +- + ldap/servers/slapd/back-ldbm/misc.c | 2 +- + ldap/servers/slapd/back-ldbm/monitor.c | 10 +- + ldap/servers/slapd/back-ldbm/perfctrs.h | 2 +- + ldap/servers/slapd/back-ldbm/start.c | 113 +++---- + ldap/servers/slapd/bind.c | 8 +- + ldap/servers/slapd/compare.c | 2 +- + ldap/servers/slapd/connection.c | 72 ++-- + ldap/servers/slapd/conntable.c | 8 +- + ldap/servers/slapd/control.c | 2 +- + ldap/servers/slapd/daemon.c | 48 +-- + ldap/servers/slapd/delete.c | 2 +- + ldap/servers/slapd/entry.c | 2 +- + ldap/servers/slapd/extendop.c | 4 +- + ldap/servers/slapd/log.c | 10 +- + ldap/servers/slapd/modify.c | 12 +- + ldap/servers/slapd/modrdn.c | 6 +- + ldap/servers/slapd/monitor.c | 8 +- + ldap/servers/slapd/operation.c | 6 +- + ldap/servers/slapd/opshared.c | 4 +- + ldap/servers/slapd/pagedresults.c | 2 +- + ldap/servers/slapd/psearch.c | 10 +- + ldap/servers/slapd/result.c | 12 +- + ldap/servers/slapd/sasl_io.c | 52 +-- + ldap/servers/slapd/saslbind.c | 2 +- + ldap/servers/slapd/search.c | 2 +- + ldap/servers/slapd/slap.h | 9 +- + ldap/servers/slapd/slapi-plugin.h | 14 +- + ldap/servers/slapd/slapi-private.h | 38 ++- + ldap/servers/slapd/slapi_pal.c | 311 +++++++++++++++++ + ldap/servers/slapd/slapi_pal.h | 62 ++++ + ldap/servers/slapd/snmp_collator.c | 2 +- + ldap/servers/slapd/unbind.c | 6 +- + ldap/servers/slapd/util.c | 376 +-------------------- + test/libslapd/spal/meminfo.c | 54 +++ + test/libslapd/test.c | 2 + + test/test_slapd.h | 5 + + 67 files changed, 870 insertions(+), 859 deletions(-) + create mode 100644 ldap/servers/slapd/slapi_pal.c + create mode 100644 ldap/servers/slapd/slapi_pal.h + create mode 100644 test/libslapd/spal/meminfo.c + +diff --git a/Makefile.am b/Makefile.am +index 485a460..429a345 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -481,6 +481,7 @@ dist_noinst_HEADERS = \ + ldap/servers/slapd/pw_verify.h \ + ldap/servers/slapd/secerrstrs.h \ + ldap/servers/slapd/slap.h \ ++ ldap/servers/slapd/slapi_pal.h \ + ldap/servers/slapd/slapi-plugin-compat4.h \ + ldap/servers/slapd/slapi-plugin.h \ + ldap/servers/slapd/slapi-private.h \ +@@ -850,6 +851,7 @@ pkgconfig_DATA = src/pkgconfig/dirsrv.pc \ + # header files + #------------------------ + serverinc_HEADERS = ldap/servers/plugins/replication/repl-session-plugin.h \ ++ ldap/servers/slapd/slapi_pal.h \ + ldap/servers/slapd/slapi-plugin.h \ + ldap/servers/plugins/replication/winsync-plugin.h \ + src/nunc-stans/include/nunc-stans.h \ +@@ -1219,6 +1221,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ + ldap/servers/slapd/value.c \ + ldap/servers/slapd/valueset.c \ + ldap/servers/slapd/vattr.c \ ++ ldap/servers/slapd/slapi_pal.c \ + $(libavl_a_SOURCES) + + libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_INCLUDES) @db_inc@ $(SVRCORE_INCLUDES) @kerberos_inc@ @pcre_inc@ +@@ -2004,7 +2007,9 @@ test_slapd_SOURCES = test/main.c \ + test/libslapd/counters/atomic.c \ + test/libslapd/pblock/analytics.c \ + test/libslapd/pblock/v3_compat.c \ +- test/libslapd/operation/v3_compat.c ++ test/libslapd/operation/v3_compat.c \ ++ test/libslapd/spal/meminfo.c ++ + test_slapd_LDADD = libslapd.la + test_slapd_LDFLAGS = $(AM_CPPFLAGS) $(CMOCKA_LINKS) + ### WARNING: Slap.h needs cert.h, which requires the -I/lib/ldaputil!!! +diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c +index 48b8efc..561dd91 100644 +--- a/ldap/servers/plugins/acl/acl.c ++++ b/ldap/servers/plugins/acl/acl.c +@@ -276,7 +276,7 @@ acl_access_allowed( + + if ( !privateBackend && (be_readonly || slapi_config_get_readonly () )){ + slapi_log_err(loglevel, plugin_name, +- "acl_access_allowed - conn=%" NSPRIu64 " op=%d (main): Deny %s on entry(%s)" ++ "acl_access_allowed - conn=%" PRIu64 " op=%d (main): Deny %s on entry(%s)" + ": readonly backend\n", + o_connid, o_opid, + acl_access2str(access), +@@ -289,7 +289,7 @@ acl_access_allowed( + TNF_PROBE_0_DEBUG(acl_skipaccess_start,"ACL",""); + if ( acl_skip_access_check ( pb, e, access )) { + slapi_log_err(loglevel, plugin_name, +- "acl_access_allowed - conn=%" NSPRIu64 " op=%d (main): Allow %s on entry(%s)" ++ "acl_access_allowed - conn=%" PRIu64 " op=%d (main): Allow %s on entry(%s)" + ": root user\n", + o_connid, o_opid, + acl_access2str(access), +@@ -448,7 +448,7 @@ acl_access_allowed( + TNF_PROBE_0_DEBUG(acl_entry_first_touch_start,"ACL",""); + + slapi_log_err(loglevel, plugin_name, +- "acl_access_allowed - #### conn=%" NSPRIu64 " op=%d binddn=\"%s\"\n", ++ "acl_access_allowed - #### conn=%" PRIu64 " op=%d binddn=\"%s\"\n", + o_connid, o_opid, clientDn); + aclpb->aclpb_stat_total_entries++; + +@@ -776,7 +776,7 @@ print_access_control_summary( char *source, int ret_val, char *clientDn, + null_user); + if (strcasecmp(right, access_str_moddn) == 0) { + slapi_log_err(loglevel, plugin_name, "print_access_control_summary - " +- "conn=%" NSPRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) [from %s] to proxy (%s)" ++ "conn=%" PRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) [from %s] to proxy (%s)" + ": %s\n", + o_connid, o_opid, + source, +@@ -790,7 +790,7 @@ print_access_control_summary( char *source, int ret_val, char *clientDn, + + } else { + slapi_log_err(loglevel, plugin_name, +- "print_access_control_summary - conn=%" NSPRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) to proxy (%s)" ++ "print_access_control_summary - conn=%" PRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) to proxy (%s)" + ": %s\n", + o_connid, o_opid, + source, +@@ -805,7 +805,7 @@ print_access_control_summary( char *source, int ret_val, char *clientDn, + proxy_user = null_user; + if (strcasecmp(right, access_str_moddn) == 0) { + slapi_log_err(loglevel, plugin_name, +- "print_access_control_summary - conn=%" NSPRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) [from %s] to proxy (%s)" ++ "print_access_control_summary - conn=%" PRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) [from %s] to proxy (%s)" + ": %s\n", + o_connid, o_opid, + source, +@@ -819,7 +819,7 @@ print_access_control_summary( char *source, int ret_val, char *clientDn, + + } else { + slapi_log_err(loglevel, plugin_name, +- "print_access_control_summary - conn=%" NSPRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) to proxy (%s)" ++ "print_access_control_summary - conn=%" PRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) to proxy (%s)" + ": %s\n", + o_connid, o_opid, + source, +@@ -834,7 +834,7 @@ print_access_control_summary( char *source, int ret_val, char *clientDn, + } else { + if (strcasecmp(right, access_str_moddn) == 0) { + slapi_log_err(loglevel, plugin_name, +- "print_access_control_summary - conn=%" NSPRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) [from %s] to %s" ++ "print_access_control_summary - conn=%" PRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) [from %s] to %s" + ": %s\n", + o_connid, o_opid, + source, +@@ -848,7 +848,7 @@ print_access_control_summary( char *source, int ret_val, char *clientDn, + + } else { + slapi_log_err(loglevel, plugin_name, +- "print_access_control_summary - conn=%" NSPRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) to %s" ++ "print_access_control_summary - conn=%" PRIu64 " op=%d (%s): %s %s on entry(%s).attr(%s) to %s" + ": %s\n", + o_connid, o_opid, + source, +diff --git a/ldap/servers/plugins/acl/acl.h b/ldap/servers/plugins/acl/acl.h +index 91f5071..8b3486c 100644 +--- a/ldap/servers/plugins/acl/acl.h ++++ b/ldap/servers/plugins/acl/acl.h +@@ -20,22 +20,6 @@ + #ifndef _ACL_H_ + #define _ACL_H_ + +-/* Required to get portable printf/scanf format macros */ +-#ifdef HAVE_INTTYPES_H +-#include +- +-/* NSPR uses the print macros a bit differently than ANSI C. We +- * need to use ll for a 64-bit integer, even when a long is 64-bit. +- */ +-#undef PRIu64 +-#define PRIu64 "llu" +-#undef PRI64 +-#define PRI64 "ll" +- +-#else +-#error Need to define portable format macros such as PRIu64 +-#endif /* HAVE_INTTYPES_H */ +- + #include + #include + #include +diff --git a/ldap/servers/plugins/acl/aclanom.c b/ldap/servers/plugins/acl/aclanom.c +index 5462d87..96d0d9f 100644 +--- a/ldap/servers/plugins/acl/aclanom.c ++++ b/ldap/servers/plugins/acl/aclanom.c +@@ -523,7 +523,7 @@ aclanom_match_profile (Slapi_PBlock *pb, struct acl_pblock *aclpb, Slapi_Entry * + aci_ndn = slapi_sdn_get_ndn (acl_anom_profile->anom_targetinfo[i].anom_target); + if (access & SLAPI_ACL_MODDN) { + slapi_log_err(loglevel, plugin_name, +- "aclanom_match_profile - conn=%" NSPRIu64 " op=%d: Allow access on entry(%s).attr(%s) (from %s) to anonymous: acidn=\"%s\"\n", ++ "aclanom_match_profile - conn=%" PRIu64 " op=%d: Allow access on entry(%s).attr(%s) (from %s) to anonymous: acidn=\"%s\"\n", + o_connid, o_opid, + ndn, + attr ? attr:"NULL", +@@ -532,7 +532,7 @@ aclanom_match_profile (Slapi_PBlock *pb, struct acl_pblock *aclpb, Slapi_Entry * + + } else { + slapi_log_err(loglevel, plugin_name, +- "aclanom_match_profile - conn=%" NSPRIu64 " op=%d: Allow access on entry(%s).attr(%s) to anonymous: acidn=\"%s\"\n", ++ "aclanom_match_profile - conn=%" PRIu64 " op=%d: Allow access on entry(%s).attr(%s) to anonymous: acidn=\"%s\"\n", + o_connid, o_opid, + ndn, + attr ? attr:"NULL", +@@ -541,13 +541,13 @@ aclanom_match_profile (Slapi_PBlock *pb, struct acl_pblock *aclpb, Slapi_Entry * + } else { + if (access & SLAPI_ACL_MODDN) { + slapi_log_err(loglevel, plugin_name, +- "aclanom_match_profile - conn=%" NSPRIu64 " op=%d: Deny access on entry(%s).attr(%s) (from %s) to anonymous\n", ++ "aclanom_match_profile - conn=%" PRIu64 " op=%d: Deny access on entry(%s).attr(%s) (from %s) to anonymous\n", + o_connid, o_opid, + ndn, attr ? attr:"NULL" , + aclpb->aclpb_moddn_source_sdn ? slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn) : "NULL"); + } else { + slapi_log_err(loglevel, plugin_name, +- "aclanom_match_profile - conn=%" NSPRIu64 " op=%d: Deny access on entry(%s).attr(%s) to anonymous\n", ++ "aclanom_match_profile - conn=%" PRIu64 " op=%d: Deny access on entry(%s).attr(%s) to anonymous\n", + o_connid, o_opid, + ndn, attr ? attr:"NULL" ); + } +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index 34011b9..a085941 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -23,14 +23,6 @@ + #include "slapi-private.h" + #include "prclist.h" + +-/* Required to get portable printf/scanf format macros */ +-#ifdef HAVE_INTTYPES_H +-#include +- +-#else +-#error Need to define portable format macros such as PRIu64 +-#endif /* HAVE_INTTYPES_H */ +- + #include + + #define DNA_PLUGIN_SUBSYSTEM "dna-plugin" +@@ -997,7 +989,7 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply) + } + + slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, +- "dna_parse_config_entry - %s [%" NSPRIu64 "]\n", DNA_NEXTVAL, ++ "dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_NEXTVAL, + entry->nextval); + + value = slapi_entry_attr_get_charptr(e, DNA_PREFIX); +@@ -1026,7 +1018,7 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply) + } + + slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, +- "dna_parse_config_entry - %s [%" NSPRIu64 "]\n", DNA_INTERVAL, entry->interval); ++ "dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval); + #endif + + value = slapi_entry_attr_get_charptr(e, DNA_GENERATE); +@@ -1126,7 +1118,7 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply) + } + + slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, +- "dna_parse_config_entry - %s [%" NSPRIu64 "]\n", DNA_MAXVAL, ++ "dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_MAXVAL, + entry->maxval); + + /* get the global bind dn and password(if any) */ +@@ -1256,7 +1248,7 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply) + } + + slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, +- "dna_parse_config_entry - %s [%" NSPRIu64 "]\n", DNA_THRESHOLD, ++ "dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_THRESHOLD, + entry->threshold); + + value = slapi_entry_attr_get_charptr(e, DNA_RANGE_REQUEST_TIMEOUT); +@@ -1268,7 +1260,7 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply) + } + + slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, +- "dna_parse_config_entry - %s [%" NSPRIu64 "]\n", DNA_RANGE_REQUEST_TIMEOUT, ++ "dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_RANGE_REQUEST_TIMEOUT, + entry->timeout); + + value = slapi_entry_attr_get_charptr(e, DNA_NEXT_RANGE); +@@ -2307,7 +2299,7 @@ dna_first_free_value(struct configEntry *config_entry, + return LDAP_OPERATIONS_ERROR; + } + +- filter = slapi_ch_smprintf("(&%s(&(%s>=%" NSPRIu64 ")(%s<=%" NSPRIu64 ")))", ++ filter = slapi_ch_smprintf("(&%s(&(%s>=%" PRIu64 ")(%s<=%" PRIu64 ")))", + config_entry->filter, + config_entry->types[0], tmpval, + config_entry->types[0], config_entry->maxval); +@@ -2497,7 +2489,7 @@ static int dna_get_next_value(struct configEntry *config_entry, + if ((config_entry->maxval == -1) || + (nextval <= (config_entry->maxval + config_entry->interval))) { + /* try to set the new next value in the config entry */ +- snprintf(next_value, sizeof(next_value),"%" NSPRIu64, nextval); ++ snprintf(next_value, sizeof(next_value),"%" PRIu64, nextval); + + /* set up our replace modify operation */ + replace_val[0] = next_value; +@@ -2524,7 +2516,7 @@ static int dna_get_next_value(struct configEntry *config_entry, + + if (LDAP_SUCCESS == ret) { + slapi_ch_free_string(next_value_ret); +- *next_value_ret = slapi_ch_smprintf("%" NSPRIu64, setval); ++ *next_value_ret = slapi_ch_smprintf("%" PRIu64, setval); + if (NULL == *next_value_ret) { + ret = LDAP_OPERATIONS_ERROR; + goto done; +@@ -2609,7 +2601,7 @@ dna_update_shared_config(struct configEntry *config_entry) + + /* We store the number of remaining assigned values + * in the shared config entry. */ +- snprintf(remaining_vals, sizeof(remaining_vals),"%" NSPRIu64, ++ snprintf(remaining_vals, sizeof(remaining_vals),"%" PRIu64, + config_entry->remaining); + + /* set up our replace modify operation */ +@@ -2709,7 +2701,7 @@ dna_update_next_range(struct configEntry *config_entry, + int ret = 0; + + /* Try to set the new next range in the config entry. */ +- snprintf(nextrange_value, sizeof(nextrange_value), "%" NSPRIu64 "-%" NSPRIu64, ++ snprintf(nextrange_value, sizeof(nextrange_value), "%" PRIu64 "-%" PRIu64, + lower, upper); + + /* set up our replace modify operation */ +@@ -2778,8 +2770,8 @@ dna_activate_next_range(struct configEntry *config_entry) + int ret = 0; + + /* Setup the modify operation for the config entry */ +- snprintf(maxval_val, sizeof(maxval_val),"%" NSPRIu64, config_entry->next_range_upper); +- snprintf(nextval_val, sizeof(nextval_val),"%" NSPRIu64, config_entry->next_range_lower); ++ snprintf(maxval_val, sizeof(maxval_val),"%" PRIu64, config_entry->next_range_upper); ++ snprintf(nextval_val, sizeof(nextval_val),"%" PRIu64, config_entry->next_range_lower); + + maxval_vals[0] = maxval_val; + maxval_vals[1] = 0; +@@ -3319,7 +3311,7 @@ dna_create_valcheck_filter(struct configEntry *config_entry, PRUint64 value, cha + * - the string length of the filter in the config + * - the string length sum of all configured types + * - 23 bytes for each type (20 for the max string +- * representation of a NSPRIu64, 3 for "(=)" ++ * representation of a PRIu64, 3 for "(=)" + * - 3 bytes for the beginning and end of the filter - "(&" and ")" + * - 3 bytes to OR together multiple types (if present) - "(|" and ")" + * - the string length of the prefix (if one is configured) for each type +@@ -3556,8 +3548,8 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr) + (config_entry->remaining <= config_entry->threshold)) { + slapi_log_err(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, + "_dna_pre_op_add - Passed threshold of %" +- NSPRIu64 " remaining values " +- "for range %s. (%" NSPRIu64 " values remain)\n", ++ PRIu64 " remaining values " ++ "for range %s. (%" PRIu64 " values remain)\n", + config_entry->threshold, config_entry->dn, + config_entry->remaining); + dna_fix_maxval(config_entry, 0); +@@ -3828,8 +3820,8 @@ _dna_pre_op_modify(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Mods *smods, char **e + (config_entry->remaining <= config_entry->threshold)) { + slapi_log_err(SLAPI_LOG_ERR, DNA_PLUGIN_SUBSYSTEM, + "_dna_pre_op_modify - Passed threshold of %" +- NSPRIu64 " remaining values " +- "for range %s. (%" NSPRIu64 " values remain)\n", ++ PRIu64 " remaining values " ++ "for range %s. (%" PRIu64 " values remain)\n", + config_entry->threshold, config_entry->dn, + config_entry->remaining); + dna_fix_maxval(config_entry, 0); +@@ -4411,8 +4403,8 @@ static int dna_extend_exop(Slapi_PBlock *pb) + char highstr[16]; + + /* Create the exop response */ +- snprintf(lowstr, sizeof(lowstr), "%" NSPRIu64, lower); +- snprintf(highstr, sizeof(highstr), "%" NSPRIu64, upper); ++ snprintf(lowstr, sizeof(lowstr), "%" PRIu64, lower); ++ snprintf(highstr, sizeof(highstr), "%" PRIu64, upper); + range_low.bv_val = lowstr; + range_low.bv_len = strlen(range_low.bv_val); + range_high.bv_val = highstr; +@@ -4445,7 +4437,7 @@ static int dna_extend_exop(Slapi_PBlock *pb) + ber_bvfree(respdata); + + slapi_log_err(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, +- "dna_extend_exop - Released range %" NSPRIu64 "-%" NSPRIu64 ".\n", ++ "dna_extend_exop - Released range %" PRIu64 "-%" PRIu64 ".\n", + lower, upper); + } + +@@ -4588,7 +4580,7 @@ dna_release_range(char *range_dn, PRUint64 *lower, PRUint64 *upper) + *lower = *upper - release + 1; + + /* try to set the new maxval in the config entry */ +- snprintf(max_value, sizeof(max_value),"%" NSPRIu64, (*lower - 1)); ++ snprintf(max_value, sizeof(max_value),"%" PRIu64, (*lower - 1)); + + /* set up our replace modify operation */ + replace_val[0] = max_value; +diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c +index 63444e5..21e4ad0 100644 +--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c ++++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c +@@ -270,7 +270,7 @@ sync_acct_disable(void *cbdata, /* the usual domain config data */ + if (update_entry) { + slapi_entry_attr_set_ulong(update_entry, "userAccountControl", adval); + slapi_log_err(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name, +- "<-- sync_acct_disable - %s AD account [%s] - new value is [%" NSPRIu64 "]\n", ++ "<-- sync_acct_disable - %s AD account [%s] - new value is [%" PRIu64 "]\n", + (ds_is_enabled) ? "enabled" : "disabled", slapi_entry_get_dn_const(update_entry), adval); + } else { + /* iterate through the mods - if there is already a mod +@@ -326,7 +326,7 @@ sync_acct_disable(void *cbdata, /* the usual domain config data */ + mod_bval->bv_len = strlen(acctvalstr); + } + slapi_log_err(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name, +- "<-- sync_acct_disable - %s AD account [%s] - new value is [%" NSPRIu64 "]\n", ++ "<-- sync_acct_disable - %s AD account [%s] - new value is [%" PRIu64 "]\n", + (ds_is_enabled) ? "enabled" : "disabled", slapi_entry_get_dn_const(ad_entry), adval); + } + } +diff --git a/ldap/servers/plugins/replication/repl.h b/ldap/servers/plugins/replication/repl.h +index 89ad481..9460ca9 100644 +--- a/ldap/servers/plugins/replication/repl.h ++++ b/ldap/servers/plugins/replication/repl.h +@@ -15,21 +15,8 @@ + #ifndef _REPL_H_ + #define _REPL_H_ + +-/* Required to get portable printf/scanf format macros */ +-#ifdef HAVE_INTTYPES_H +-#include +- +-/* NSPR uses the print macros a bit differently than ANSI C. We +- * need to use ll for a 64-bit integer, even when a long is 64-bit. +- */ +-#undef PRIu64 +-#define PRIu64 "llu" +-#undef PRI64 +-#define PRI64 "ll" +- +-#else +-#error Need to define portable format macros such as PRIu64 +-#endif /* HAVE_INTTYPES_H */ ++/* Provides our int types and platform specific requirements. */ ++#include + + #include + #include +diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c +index a5ae885..36c279e 100644 +--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c +@@ -2104,7 +2104,7 @@ repl5_inc_stop(Private_Repl_Protocol *prp) + /* Isn't listening. Do something drastic. */ + return_value = -1; + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, +- "repl5_inc_stop - %s: Protocol does not stop after %" NSPRIu64 " seconds\n", ++ "repl5_inc_stop - %s: Protocol does not stop after %" PRIu64 " seconds\n", + agmt_get_long_name(prp->agmt), timeout); + } + else +diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c +index 9549dcf..edffb84 100644 +--- a/ldap/servers/plugins/replication/repl5_init.c ++++ b/ldap/servers/plugins/replication/repl5_init.c +@@ -208,7 +208,7 @@ get_repl_session_id (Slapi_PBlock *pb, char *idstr, CSN **csn) + /* Avoid "Connection is NULL and hence cannot access SLAPI_CONN_ID" */ + if (opid) { + slapi_pblock_get (pb, SLAPI_CONN_ID, &connid); +- snprintf (idstr, REPL_SESSION_ID_SIZE, "conn=%" NSPRIu64 " op=%d", ++ snprintf (idstr, REPL_SESSION_ID_SIZE, "conn=%" PRIu64 " op=%d", + connid, opid); + } + +diff --git a/ldap/servers/plugins/replication/repl5_plugins.c b/ldap/servers/plugins/replication/repl5_plugins.c +index 357c093..ebcc230 100644 +--- a/ldap/servers/plugins/replication/repl5_plugins.c ++++ b/ldap/servers/plugins/replication/repl5_plugins.c +@@ -1335,7 +1335,7 @@ process_postop (Slapi_PBlock *pb) + { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, + "process_postop - Failed to apply update (%s) error (%d). " +- "Aborting replication session(conn=%" NSPRIu64 " op=%d)\n", ++ "Aborting replication session(conn=%" PRIu64 " op=%d)\n", + csn_as_string(opcsn, PR_FALSE, csn_str), retval, + connid, opid); + /* +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index 5718a98..a106f8b 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -596,7 +596,7 @@ replica_get_exclusive_access(Replica *r, PRBool *isInc, PRUint64 connid, int opi + + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "replica_get_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "Replica in use locking_purl=%s\n", + connid, opid, + slapi_sdn_get_dn(r->repl_root), +@@ -620,7 +620,7 @@ replica_get_exclusive_access(Replica *r, PRBool *isInc, PRUint64 connid, int opi + { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "replica_get_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": Acquired replica\n", ++ "conn=%" PRIu64 " op=%d repl=\"%s\": Acquired replica\n", + connid, opid, + slapi_sdn_get_dn(r->repl_root)); + r->repl_state_flags |= REPLICA_IN_USE; +@@ -664,13 +664,13 @@ replica_relinquish_exclusive_access(Replica *r, PRUint64 connid, int opid) + { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "replica_relinquish_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "Replica not in use\n", + connid, opid, slapi_sdn_get_dn(r->repl_root)); + } else { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "replica_relinquish_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "Released replica held by locking_purl=%s\n", + connid, opid, + slapi_sdn_get_dn(r->repl_root), r->locking_purl); +diff --git a/ldap/servers/plugins/replication/repl5_total.c b/ldap/servers/plugins/replication/repl5_total.c +index af570a8..064a099 100644 +--- a/ldap/servers/plugins/replication/repl5_total.c ++++ b/ldap/servers/plugins/replication/repl5_total.c +@@ -853,7 +853,7 @@ multimaster_extop_NSDS50ReplicationEntry(Slapi_PBlock *pb) + const char *dn = slapi_entry_get_dn_const(e); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_NSDS50ReplicationEntry - " +- "Error %d: could not import entry dn %s for total update operation conn=%" NSPRIu64 " op=%d\n", ++ "Error %d: could not import entry dn %s for total update operation conn=%" PRIu64 " op=%d\n", + rc, dn, connid, opid); + rc = -1; + } +@@ -864,7 +864,7 @@ multimaster_extop_NSDS50ReplicationEntry(Slapi_PBlock *pb) + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_NSDS50ReplicationEntry - " + "Error %d: could not decode the total update extop " +- "for total update operation conn=%" NSPRIu64 " op=%d\n", ++ "for total update operation conn=%" PRIu64 " op=%d\n", + rc, connid, opid); + } + +diff --git a/ldap/servers/plugins/replication/repl_connext.c b/ldap/servers/plugins/replication/repl_connext.c +index 29dc2a7..ba0fa15 100644 +--- a/ldap/servers/plugins/replication/repl_connext.c ++++ b/ldap/servers/plugins/replication/repl_connext.c +@@ -84,7 +84,7 @@ void consumer_connection_extension_destructor (void *ext, void *object, void *pa + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_destructor - " + "Aborting total update in progress for replicated " +- "area %s connid=%" NSPRIu64 "\n", slapi_sdn_get_dn(repl_root_sdn), connid); ++ "area %s connid=%" PRIu64 "\n", slapi_sdn_get_dn(repl_root_sdn), connid); + slapi_stop_bulk_import(pb); + } + else +@@ -156,7 +156,7 @@ consumer_connection_extension_acquire_exclusive_access(void* conn, PRUint64 conn + ret = connext; + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_acquire_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Acquired consumer connection extension\n", ++ "conn=%" PRIu64 " op=%d Acquired consumer connection extension\n", + connid, opid); + } + else if (opid == connext->in_use_opid) +@@ -164,14 +164,14 @@ consumer_connection_extension_acquire_exclusive_access(void* conn, PRUint64 conn + ret = connext; + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_acquire_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Reacquired consumer connection extension\n", ++ "conn=%" PRIu64 " op=%d Reacquired consumer connection extension\n", + connid, opid); + } + else + { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_acquire_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Could not acquire consumer connection extension; it is in use by op=%d\n", ++ "conn=%" PRIu64 " op=%d Could not acquire consumer connection extension; it is in use by op=%d\n", + connid, opid, connext->in_use_opid); + } + +@@ -182,7 +182,7 @@ consumer_connection_extension_acquire_exclusive_access(void* conn, PRUint64 conn + { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_acquire_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Could not acquire consumer extension, it is NULL!\n", ++ "conn=%" PRIu64 " op=%d Could not acquire consumer extension, it is NULL!\n", + connid, opid); + } + +@@ -221,7 +221,7 @@ consumer_connection_extension_relinquish_exclusive_access(void* conn, PRUint64 c + { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_relinquish_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Consumer connection extension is not in use\n", ++ "conn=%" PRIu64 " op=%d Consumer connection extension is not in use\n", + connid, opid); + ret = 2; + } +@@ -230,7 +230,7 @@ consumer_connection_extension_relinquish_exclusive_access(void* conn, PRUint64 c + /* step 4, relinquish it (normal) */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_relinquish_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Relinquishing consumer connection extension\n", ++ "conn=%" PRIu64 " op=%d Relinquishing consumer connection extension\n", + connid, opid); + connext->in_use_opid = -1; + ret = 1; +@@ -240,7 +240,7 @@ consumer_connection_extension_relinquish_exclusive_access(void* conn, PRUint64 c + /* step 4, relinquish it (forced) */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_relinquish_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Forced to relinquish consumer connection extension held by op=%d\n", ++ "conn=%" PRIu64 " op=%d Forced to relinquish consumer connection extension held by op=%d\n", + connid, opid, connext->in_use_opid); + connext->in_use_opid = -1; + ret = 1; +@@ -249,7 +249,7 @@ consumer_connection_extension_relinquish_exclusive_access(void* conn, PRUint64 c + { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_relinquish_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Not relinquishing consumer connection extension, it is held by op=%d!\n", ++ "conn=%" PRIu64 " op=%d Not relinquishing consumer connection extension, it is held by op=%d!\n", + connid, opid, connext->in_use_opid); + } + +@@ -260,7 +260,7 @@ consumer_connection_extension_relinquish_exclusive_access(void* conn, PRUint64 c + { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "consumer_connection_extension_relinquish_exclusive_access - " +- "conn=%" NSPRIu64 " op=%d Could not relinquish consumer extension, it is NULL!\n", ++ "conn=%" PRIu64 " op=%d Could not relinquish consumer extension, it is NULL!\n", + connid, opid); + } + +diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c +index 80580f9..412caec 100644 +--- a/ldap/servers/plugins/replication/repl_extop.c ++++ b/ldap/servers/plugins/replication/repl_extop.c +@@ -668,7 +668,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + connext->repl_protocol_version = REPL_PROTOCOL_50_INCREMENTAL; + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": Begin incremental protocol\n", ++ "conn=%" PRIu64 " op=%d repl=\"%s\": Begin incremental protocol\n", + connid, opid, repl_root); + isInc = PR_TRUE; + } +@@ -695,7 +695,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + } + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": Begin total protocol\n", ++ "conn=%" PRIu64 " op=%d repl=\"%s\": Begin total protocol\n", + connid, opid, repl_root); + isInc = PR_FALSE; + } +@@ -705,7 +705,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + connext->repl_protocol_version = REPL_PROTOCOL_50_INCREMENTAL; + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": Begin 7.1 incremental protocol\n", ++ "conn=%" PRIu64 " op=%d repl=\"%s\": Begin 7.1 incremental protocol\n", + connid, opid, repl_root); + isInc = PR_TRUE; + } +@@ -718,7 +718,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + } + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": Begin 7.1 total protocol\n", ++ "conn=%" PRIu64 " op=%d repl=\"%s\": Begin 7.1 total protocol\n", + connid, opid, repl_root); + isInc = PR_FALSE; + } +@@ -741,7 +741,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d replica=\"%s\": " ++ "conn=%" PRIu64 " op=%d replica=\"%s\": " + "Replica is being configured: try again later\n", + connid, opid, repl_root); + response = NSDS50_REPL_REPLICA_BUSY; +@@ -814,7 +814,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "Excessive clock skew from supplier RUV\n", + connid, opid, repl_root); + response = NSDS50_REPL_EXCESSIVE_CLOCK_SKEW; +@@ -852,7 +852,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + if (check_replica_id_uniqueness(replica, supplier_ruv) != 0){ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "Replica has same replicaID %d as supplier\n", + connid, opid, repl_root, replica_get_rid(replica)); + response = NSDS50_REPL_REPLICAID_ERROR; +@@ -865,7 +865,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + * the session's conn id and op id to identify the the supplier. + */ + /* junkrc = ruv_get_first_id_and_purl(supplier_ruv, &junkrid, &locking_purl); */ +- snprintf(locking_session, sizeof(locking_session), "conn=%" NSPRIu64 " id=%d", ++ snprintf(locking_session, sizeof(locking_session), "conn=%" PRIu64 " id=%d", + connid, opid); + locking_purl = &locking_session[0]; + if (replica_get_exclusive_access(replica, &isInc, connid, opid, +@@ -892,7 +892,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + int max = 480 * 5; + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "374 - Starting sleep: connext->repl_protocol_version == %d\n", + connid, opid, repl_root, connext->repl_protocol_version); + +@@ -902,7 +902,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "374 - Finished sleep: connext->repl_protocol_version == %d\n", + connid, opid, repl_root, connext->repl_protocol_version); + } +@@ -997,7 +997,7 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) + response = NSDS50_REPL_INTERNAL_ERROR; + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "Unexpected update protocol received: %d. " + "Expected incremental or total.\n", + connid, opid, repl_root, connext->repl_protocol_version); +@@ -1039,7 +1039,7 @@ send_response: + slapi_log_err (resp_log_level, + repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d replica=\"%s\": " ++ "conn=%" PRIu64 " op=%d replica=\"%s\": " + "Unable to acquire replica: error: %s%s\n", + connid, opid, + (replica ? slapi_sdn_get_dn(replica_get_root(replica)) : "unknown"), +@@ -1092,7 +1092,7 @@ send_response: + slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "multimaster_extop_StartNSDS50ReplicationRequest - " +- "conn=%" NSPRIu64 " op=%d repl=\"%s\": " ++ "conn=%" PRIu64 " op=%d repl=\"%s\": " + "%s: response=%d rc=%d\n", + connid, opid, repl_root, + is90 ? "StartNSDS90ReplicationRequest" : +diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c +index 667a529..bd856cb 100644 +--- a/ldap/servers/plugins/sync/sync_persist.c ++++ b/ldap/servers/plugins/sync/sync_persist.c +@@ -548,14 +548,14 @@ sync_send_results( void *arg ) + slapi_pblock_get(req->req_pblock, SLAPI_CONNECTION, &conn); + if (NULL == conn) { + slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, +- "sync_send_results - conn=%" NSPRIu64 " op=%d Null connection - aborted\n", ++ "sync_send_results - conn=%" PRIu64 " op=%d Null connection - aborted\n", + connid, opid); + goto done; + } + conn_acq_flag = sync_acquire_connection (conn); + if (conn_acq_flag) { + slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, +- "sync_send_results - conn=%" NSPRIu64 " op=%d Could not acquire the connection - aborted\n", ++ "sync_send_results - conn=%" PRIu64 " op=%d Could not acquire the connection - aborted\n", + connid, opid); + goto done; + } +@@ -566,7 +566,7 @@ sync_send_results( void *arg ) + /* Check for an abandoned operation */ + if ( op == NULL || slapi_is_operation_abandoned( op ) ) { + slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, +- "sync_send_results - conn=%" NSPRIu64 " op=%d Operation no longer active - terminating\n", ++ "sync_send_results - conn=%" PRIu64 " op=%d Operation no longer active - terminating\n", + connid, opid); + break; + } +diff --git a/ldap/servers/plugins/syntaxes/validate_task.c b/ldap/servers/plugins/syntaxes/validate_task.c +index eae2d2a..c051573 100644 +--- a/ldap/servers/plugins/syntaxes/validate_task.c ++++ b/ldap/servers/plugins/syntaxes/validate_task.c +@@ -201,12 +201,12 @@ syntax_validate_task_thread(void *arg) + slapi_pblock_destroy(search_pb); + + /* Log finished message. */ +- slapi_task_log_notice(task, "Syntax validate task complete. Found %" NSPRIu64 ++ slapi_task_log_notice(task, "Syntax validate task complete. Found %" PRIu64 + " invalid entries.\n", slapi_counter_get_value(td->invalid_entries)); +- slapi_task_log_status(task, "Syntax validate task complete. Found %" NSPRIu64 ++ slapi_task_log_status(task, "Syntax validate task complete. Found %" PRIu64 + " invalid entries.\n", slapi_counter_get_value(td->invalid_entries)); + slapi_log_err(SLAPI_LOG_ERR, SYNTAX_PLUGIN_SUBSYSTEM, "syntax_validate_task_thread - Complete." +- " Found %" NSPRIu64 " invalid entries.\n", ++ " Found %" PRIu64 " invalid entries.\n", + slapi_counter_get_value(td->invalid_entries)); + slapi_task_inc_progress(task); + +diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c +index 5e67e0a..54ebc31 100644 +--- a/ldap/servers/plugins/usn/usn.c ++++ b/ldap/servers/plugins/usn/usn.c +@@ -320,7 +320,7 @@ _usn_add_next_usn(Slapi_Entry *e, Slapi_Backend *be) + "--> _usn_add_next_usn\n"); + + /* add next USN to the entry; "be" contains the usn counter */ +- usn_berval.bv_val = slapi_ch_smprintf("%" NSPRIu64, ++ usn_berval.bv_val = slapi_ch_smprintf("%" PRIu64, + slapi_counter_get_value(be->be_usn_counter)); + usn_berval.bv_len = strlen(usn_berval.bv_val); + slapi_entry_attr_find(e, SLAPI_ATTR_ENTRYUSN, &attr); +@@ -360,7 +360,7 @@ _usn_mod_next_usn(LDAPMod ***mods, Slapi_Backend *be) + + /* add next USN to the mods; "be" contains the usn counter */ + usn_berval.bv_val = counter_buf; +- snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, ++ snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" PRIu64, + slapi_counter_get_value(be->be_usn_counter)); + usn_berval.bv_len = strlen(usn_berval.bv_val); + bvals[0] = &usn_berval; +@@ -681,7 +681,7 @@ usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, + /* get a next USN counter from be_usn_counter; + * then minus 1 from it (except if be_usn_counter has value 0) */ + if (slapi_counter_get_value(be->be_usn_counter)) { +- snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, ++ snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" PRIu64, + slapi_counter_get_value(be->be_usn_counter)-1); + } else { + snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "-1"); +@@ -704,7 +704,7 @@ usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, + /* get a next USN counter from be_usn_counter; + * then minus 1 from it (except if be_usn_counter has value 0) */ + if (slapi_counter_get_value(be->be_usn_counter)) { +- snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64, ++ snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" PRIu64, + slapi_counter_get_value(be->be_usn_counter)-1); + } else { + snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "-1"); +diff --git a/ldap/servers/slapd/abandon.c b/ldap/servers/slapd/abandon.c +index 18ff8ce..0485006 100644 +--- a/ldap/servers/slapd/abandon.c ++++ b/ldap/servers/slapd/abandon.c +@@ -119,19 +119,19 @@ do_abandon( Slapi_PBlock *pb ) + } + + if ( 0 == pagedresults_free_one_msgid_nolock(pb->pb_conn, id) ) { +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 + " op=%d ABANDON targetop=Simple Paged Results msgid=%d\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, id ); + } else if ( NULL == o ) { +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d ABANDON" ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d ABANDON" + " targetop=NOTFOUND msgid=%d\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, id ); + } else if ( suppressed_by_plugin ) { +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d ABANDON" ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d ABANDON" + " targetop=SUPPRESSED-BY-PLUGIN msgid=%d\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, id ); + } else { +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d ABANDON" ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d ABANDON" + " targetop=%d msgid=%d nentries=%d etime=%ld\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, o->o_opid, id, + o->o_results.r.r_search.nentries, current_time() - o->o_time ); +diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c +index 1b994a0..9c4001e 100644 +--- a/ldap/servers/slapd/add.c ++++ b/ldap/servers/slapd/add.c +@@ -168,7 +168,7 @@ do_add( Slapi_PBlock *pb ) + if (( rc = slapi_entry_add_values( e, normtype, vals )) + != LDAP_SUCCESS ) { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d ADD dn=\"%s\", add values for type %s failed\n", ++ "conn=%" PRIu64 " op=%d ADD dn=\"%s\", add values for type %s failed\n", + pb->pb_conn->c_connid, operation->o_opid, + slapi_entry_get_dn_const(e), normtype ); + send_ldap_result( pb, rc, NULL, NULL, 0, NULL ); +@@ -460,7 +460,7 @@ static void op_shared_add (Slapi_PBlock *pb) + + if ( !internal_op ) + { +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d ADD dn=\"%s\"%s\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d ADD dn=\"%s\"%s\n", + pb->pb_conn->c_connid, + operation->o_opid, + slapi_entry_get_dn_const(e), +diff --git a/ldap/servers/slapd/auth.c b/ldap/servers/slapd/auth.c +index c787dd4..da1b586 100644 +--- a/ldap/servers/slapd/auth.c ++++ b/ldap/servers/slapd/auth.c +@@ -366,7 +366,7 @@ handle_bad_certificate (void* clientData, PRFileDesc *prfd) + char* subject = subject_of (clientCert); + char* issuer = issuer_of (clientCert); + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " " SLAPI_COMPONENT_NAME_NSPR " error %i (%s); unauthenticated client %s; issuer %s\n", ++ "conn=%" PRIu64 " " SLAPI_COMPONENT_NAME_NSPR " error %i (%s); unauthenticated client %s; issuer %s\n", + conn->c_connid, errorCode, slapd_pr_strerror(errorCode), + subject ? escape_string( subject, sbuf ) : "NULL", + issuer ? escape_string( issuer, ibuf ) : "NULL" ); +@@ -402,7 +402,7 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) + if ( (slapd_ssl_getChannelInfo (prfd, &channelInfo, sizeof(channelInfo))) != SECSuccess ) { + PRErrorCode errorCode = PR_GetError(); + slapi_log_access (LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " SSL failed to obtain channel info; " ++ "conn=%" PRIu64 " SSL failed to obtain channel info; " + SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n", + conn->c_connid, errorCode, slapd_pr_strerror(errorCode)); + goto done; +@@ -411,7 +411,7 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) + != SECSuccess) { + PRErrorCode errorCode = PR_GetError(); + slapi_log_access (LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " SSL failed to obtain cipher info; " ++ "conn=%" PRIu64 " SSL failed to obtain cipher info; " + SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n", + conn->c_connid, errorCode, slapd_pr_strerror(errorCode)); + goto done; +@@ -432,14 +432,14 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) + + if (config_get_SSLclientAuth() == SLAPD_SSLCLIENTAUTH_OFF ) { + (void) slapi_getSSLVersion_str(channelInfo.protocolVersion, sslversion, sizeof(sslversion)); +- slapi_log_access (LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " %s %i-bit %s\n", ++ slapi_log_access (LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n", + conn->c_connid, + sslversion, keySize, cipher ? cipher : "NULL" ); + goto done; + } + if (clientCert == NULL) { + (void) slapi_getSSLVersion_str(channelInfo.protocolVersion, sslversion, sizeof(sslversion)); +- slapi_log_access (LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " %s %i-bit %s\n", ++ slapi_log_access (LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n", + conn->c_connid, + sslversion, keySize, cipher ? cipher : "NULL" ); + } else { +@@ -448,7 +448,7 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) + (void) slapi_getSSLVersion_str(channelInfo.protocolVersion, + sslversion, sizeof(sslversion)); + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " %s %i-bit %s; missing subject\n", ++ "conn=%" PRIu64 " %s %i-bit %s; missing subject\n", + conn->c_connid, + sslversion, keySize, cipher ? cipher : "NULL"); + goto done; +@@ -459,7 +459,7 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) + (void) slapi_getSSLVersion_str(channelInfo.protocolVersion, + sslversion, sizeof(sslversion)); + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " %s %i-bit %s; client %s; issuer %s\n", ++ "conn=%" PRIu64 " %s %i-bit %s; client %s; issuer %s\n", + conn->c_connid, + sslversion, keySize, cipher ? cipher : "NULL", + subject ? escape_string( subject, sbuf ) : "NULL", +@@ -503,14 +503,14 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) + (void) slapi_getSSLVersion_str(channelInfo.protocolVersion, + sslversion, sizeof(sslversion)); + slapi_log_access (LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " %s client bound as %s\n", ++ "conn=%" PRIu64 " %s client bound as %s\n", + conn->c_connid, + sslversion, clientDN); + } else if (clientCert != NULL) { + (void) slapi_getSSLVersion_str(channelInfo.protocolVersion, + sslversion, sizeof(sslversion)); + slapi_log_access (LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " %s failed to map client " ++ "conn=%" PRIu64 " %s failed to map client " + "certificate to LDAP DN (%s)\n", + conn->c_connid, + sslversion, extraErrorMsg); +diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h +index a5fc540..0bb15e3 100644 +--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h ++++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h +@@ -28,21 +28,8 @@ + #endif + #endif + +-/* Required to get portable printf/scanf format macros */ +-#ifdef HAVE_INTTYPES_H +-#include +- +-/* NSPR uses the print macros a bit differently than ANSI C. We +- * need to use ll for a 64-bit integer, even when a long is 64-bit. +- */ +-#undef PRIu64 +-#define PRIu64 "llu" +-#undef PRI64 +-#define PRI64 "ll" +- +-#else +-#error Need to define portable format macros such as PRIu64 +-#endif /* HAVE_INTTYPES_H */ ++/* Provides our int types and platform specific requirements. */ ++#include + + /* A bunch of random system headers taken from all the source files, no source file should #include + any system headers now */ +@@ -162,11 +149,11 @@ typedef unsigned short u_int16_t; + #define DBVERSION_FILENAME "DBVERSION" + /* 0 here means to let the autotuning reset the value on first run */ + /* cache can't get any smaller than this (in bytes) */ +-#define MINCACHESIZE (size_t)512000 +-#define DEFAULT_CACHE_SIZE (size_t)0 ++#define MINCACHESIZE (uint64_t)512000 ++#define DEFAULT_CACHE_SIZE (uint64_t)0 + #define DEFAULT_CACHE_SIZE_STR "0" + #define DEFAULT_CACHE_ENTRIES -1 /* no limit */ +-#define DEFAULT_DNCACHE_SIZE (size_t)16777216 ++#define DEFAULT_DNCACHE_SIZE (uint64_t)16777216 + #define DEFAULT_DNCACHE_SIZE_STR "16777216" + #define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */ + #define DEFAULT_DBCACHE_SIZE 33554432 +diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c +index ade2240..0f0cf3b 100644 +--- a/ldap/servers/slapd/back-ldbm/cache.c ++++ b/ldap/servers/slapd/back-ldbm/cache.c +@@ -649,7 +649,7 @@ void cache_set_max_size(struct cache *cache, size_t bytes, int type) + } + } + +-static void entrycache_set_max_size(struct cache *cache, size_t bytes) ++static void entrycache_set_max_size(struct cache *cache, uint64_t bytes) + { + struct backentry *eflush = NULL; + struct backentry *eflushtemp = NULL; +@@ -659,16 +659,17 @@ static void entrycache_set_max_size(struct cache *cache, size_t bytes) + * to happen. In that case, suppress this warning. + */ + if (bytes > 0) { +- slapi_log_err(SLAPI_LOG_WARNING, "entrycache_set_max_size", "Minimum cache size is %lu -- rounding up\n", MINCACHESIZE); ++ slapi_log_err(SLAPI_LOG_WARNING, "entrycache_set_max_size", "Minimum cache size is %"PRIu64" -- rounding up\n", MINCACHESIZE); + } + bytes = MINCACHESIZE; + } + cache_lock(cache); + cache->c_maxsize = bytes; +- LOG("entry cache size set to %lu\n", bytes); ++ LOG("entry cache size set to %"PRIu64"\n", bytes); + /* check for full cache, and clear out if necessary */ +- if (CACHE_FULL(cache)) ++ if (CACHE_FULL(cache)) { + eflush = entrycache_flush(cache); ++ } + while (eflush) + { + eflushtemp = BACK_LRU_NEXT(eflush, struct backentry *); +@@ -686,12 +687,11 @@ static void entrycache_set_max_size(struct cache *cache, size_t bytes) + /* This may already have been called by one of the functions in + * ldbm_instance_config + */ +- if (! util_is_cachesize_sane(&bytes)) { +- slapi_log_err(SLAPI_LOG_WARNING, +- "entrycache_set_max_size", "Possible CONFIGURATION ERROR -- cachesize " +- "(%lu) may be configured to use more than the available " +- "physical memory.\n", bytes); ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ if (util_is_cachesize_sane(mi, &bytes) != UTIL_CACHESIZE_VALID) { ++ slapi_log_err(SLAPI_LOG_WARNING, "entrycache_set_max_size", "Cachesize (%"PRIu64") may use more than the available physical memory.\n", bytes); + } ++ spal_meminfo_destroy(mi); + } + + void cache_set_max_entries(struct cache *cache, long entries) +@@ -1597,7 +1597,7 @@ dn_same_id(const void *bdn, const void *k) + } + + static void +-dncache_set_max_size(struct cache *cache, size_t bytes) ++dncache_set_max_size(struct cache *cache, uint64_t bytes) + { + struct backdn *dnflush = NULL; + struct backdn *dnflushtemp = NULL; +@@ -1609,12 +1609,12 @@ dncache_set_max_size(struct cache *cache, size_t bytes) + if (bytes < MINCACHESIZE) { + bytes = MINCACHESIZE; + slapi_log_err(SLAPI_LOG_WARNING, +- "dncache_set_max_size", "Minimum cache size is %lu -- rounding up\n", ++ "dncache_set_max_size", "Minimum cache size is %"PRIu64" -- rounding up\n", + MINCACHESIZE); + } + cache_lock(cache); + cache->c_maxsize = bytes; +- LOG("entry cache size set to %lu\n", bytes); ++ LOG("entry cache size set to %"PRIu64"\n", bytes); + /* check for full cache, and clear out if necessary */ + if (CACHE_FULL(cache)) { + dnflush = dncache_flush(cache); +@@ -1636,12 +1636,12 @@ dncache_set_max_size(struct cache *cache, size_t bytes) + /* This may already have been called by one of the functions in + * ldbm_instance_config + */ +- if (! util_is_cachesize_sane(&bytes)) { +- slapi_log_err(SLAPI_LOG_WARNING, +- "dncache_set_max_size", "Possible CONFIGURATION ERROR -- cachesize " +- "(%lu) may be configured to use more than the available " +- "physical memory.\n", bytes); ++ ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ if (util_is_cachesize_sane(mi, &bytes) != UTIL_CACHESIZE_VALID) { ++ slapi_log_err(SLAPI_LOG_WARNING, "dncache_set_max_size", "Cachesize (%"PRIu64") may use more than the available physical memory.\n", bytes); + } ++ spal_meminfo_destroy(mi); + } + + /* remove a dn from the cache */ +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c +index 507a3cc..3c1fbb0 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.c ++++ b/ldap/servers/slapd/back-ldbm/dblayer.c +@@ -1386,14 +1386,16 @@ dblayer_start(struct ldbminfo *li, int dbmode) + + /* Sanity check on cache size on platforms which allow us to figure out + * the available phys mem */ +- if (!util_is_cachesize_sane(&(priv->dblayer_cachesize))) { ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ if (!util_is_cachesize_sane(mi, &(priv->dblayer_cachesize))) { + /* Oops---looks like the admin misconfigured, let's warn them */ + slapi_log_err(SLAPI_LOG_WARNING,"dblayer_start", "Likely CONFIGURATION ERROR -" + "dbcachesize is configured to use more than the available " +- "physical memory, decreased to the largest available size (%lu bytes).\n", ++ "physical memory, decreased to the largest available size (%"PRIu64" bytes).\n", + priv->dblayer_cachesize); + li->li_dbcachesize = priv->dblayer_cachesize; + } ++ spal_meminfo_destroy(mi); + + /* fill in DB_ENV stuff from the common configuration */ + return_value = dblayer_make_env(&pEnv, li); +@@ -1690,9 +1692,6 @@ dblayer_start(struct ldbminfo *li, int dbmode) + * nsslapd-import-cache-autosize: 0 + * get the nsslapd-import-cachesize. + * Calculate the memory size left after allocating the import cache size. +- * If the size is less than the hard limit, it issues an error and quit. +- * If the size is greater than the hard limit and less than the soft limit, +- * it issues a warning, but continues the import task. + * + * Note: this function is called only if the import is executed as a stand + * alone command line (ldif2db). +@@ -1700,27 +1699,17 @@ dblayer_start(struct ldbminfo *li, int dbmode) + int + check_and_set_import_cache(struct ldbminfo *li) + { +- size_t import_pages = 0; +- size_t pagesize, pages, procpages, availpages; +- size_t soft_limit = 0; +- size_t hard_limit = 0; +- size_t page_delta = 0; ++ uint64_t import_cache = 0; + char s[64]; /* big enough to hold %ld */ ++ /* Get our platform memory values. */ ++ slapi_pal_meminfo *mi = spal_meminfo_get(); + +- if (util_info_sys_pages(&pagesize, &pages, &procpages, &availpages) != 0 || 0 == pagesize || 0 == pages) { +- slapi_log_err(SLAPI_LOG_ERR, "check_and_set_import_cache", +- "Failed to get pagesize: %ld or pages: %ld\n", +- pagesize, pages); ++ if (mi == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "check_and_set_import_cache", "Failed to get system memory infomation\n"); + return ENOENT; + } +- slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", +- "pagesize: %ld, pages: %ld, procpages: %ld\n", +- pagesize, pages, procpages); ++ slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "pagesize: %"PRIu64", available bytes %"PRIu64", process usage %"PRIu64" \n", mi->pagesize_bytes, mi->system_available_bytes, mi->process_consumed_bytes); + +- /* Soft limit: pages equivalent to 1GB (defined in dblayer.h) */ +- soft_limit = (DBLAYER_IMPORTCACHESIZE_SL*1024) / (pagesize/1024); +- /* Hard limit: pages equivalent to 100MB (defined in dblayer.h) */ +- hard_limit = (DBLAYER_IMPORTCACHESIZE_HL*1024) / (pagesize/1024); + /* + * default behavior for ldif2db import cache, + * nsslapd-import-cache-autosize==-1, +@@ -1741,48 +1730,29 @@ check_and_set_import_cache(struct ldbminfo *li) + + if (li->li_import_cache_autosize == 0) { + /* user specified importCache */ +- import_pages = li->li_import_cachesize / pagesize; ++ import_cache = li->li_import_cachesize; + + } else { + /* autosizing importCache */ + /* ./125 instead of ./100 is for adjusting the BDB overhead. */ +-#ifdef LINUX +- /* On linux, availpages is correct so we should use it! */ +- import_pages = (li->li_import_cache_autosize * availpages) / 125; +-#else +- import_pages = (li->li_import_cache_autosize * pages) / 125; +-#endif ++ import_cache = (li->li_import_cache_autosize * mi->system_available_bytes) / 125; + } + +- page_delta = pages - import_pages; +- if (page_delta < hard_limit) { +- slapi_log_err(SLAPI_LOG_ERR, +- "check_and_set_import_cache", "After allocating import cache %ldKB, " +- "the available memory is %ldKB, " +- "which is less than the hard limit %ldKB. " +- "Please decrease the import cache size and rerun import.\n", +- import_pages*(pagesize/1024), page_delta*(pagesize/1024), +- hard_limit*(pagesize/1024)); ++ if (util_is_cachesize_sane(mi, &import_cache) == UTIL_CACHESIZE_ERROR) { ++ ++ slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "Import failed to run: unable to validate system memory limits.\n"); ++ spal_meminfo_destroy(mi); + return ENOMEM; + } +- if (page_delta < soft_limit) { +- slapi_log_err(SLAPI_LOG_WARNING, +- "check_and_set_import_cache", "After allocating import cache %ldKB, " +- "the available memory is %ldKB, " +- "which is less than the soft limit %ldKB. " +- "You may want to decrease the import cache size and " +- "rerun import.\n", +- import_pages*(pagesize/1024), page_delta*(pagesize/1024), +- soft_limit*(pagesize/1024)); +- } + +- slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "Import allocates %ldKB import cache.\n", +- import_pages*(pagesize/1024)); +- if (li->li_import_cache_autosize > 0) { /* import cache autosizing */ ++ slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "Import allocates %"PRIu64"KB import cache.\n", import_cache / 1024); ++ if (li->li_import_cache_autosize > 0) { ++ /* import cache autosizing */ + /* set the calculated import cache size to the config */ +- sprintf(s, "%lu", (unsigned long)(import_pages * pagesize)); ++ sprintf(s, "%"PRIu64, import_cache); + ldbm_config_internal_set(li, CONFIG_IMPORT_CACHESIZE, s); + } ++ spal_meminfo_destroy(mi); + return 0; + } + +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.h b/ldap/servers/slapd/back-ldbm/dblayer.h +index e4307fc..816c943 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.h ++++ b/ldap/servers/slapd/back-ldbm/dblayer.h +@@ -68,14 +68,6 @@ + #define DB_REGION_NAME 25 /* DB: named regions, no backing file. */ + #endif + +-/* Used in check_and_set_import_cache */ +-/* After allocating the import cache, free memory must be left more than +- * the hard limit to run import. */ +-/* If the free memory size left is greater than hard limit and less than +- * soft limit, the import utility issues a warning, but it runs */ +-#define DBLAYER_IMPORTCACHESIZE_HL 100 /* import cache hard limit 100MB */ +-#define DBLAYER_IMPORTCACHESIZE_SL 1024 /* import cache soft limit 1GB */ +- + struct dblayer_private_env { + DB_ENV *dblayer_DB_ENV; + Slapi_RWLock * dblayer_env_lock; +diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c +index 087103b..ab32e0a 100644 +--- a/ldap/servers/slapd/back-ldbm/import-threads.c ++++ b/ldap/servers/slapd/back-ldbm/import-threads.c +@@ -3979,7 +3979,7 @@ _get_import_entryusn(ImportJob *job, Slapi_Value **usn_value) + * Use the counter which stores the old DB's + * next entryusn. */ + PR_snprintf(counter_buf, sizeof(counter_buf), +- "%" NSPRIu64, slapi_counter_get_value(be->be_usn_counter)); ++ "%" PRIu64, slapi_counter_get_value(be->be_usn_counter)); + } else { + /* import_init value is digit. + * Initialize the entryusn values with the digit */ +diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c +index d0cef1a..7161bac 100644 +--- a/ldap/servers/slapd/back-ldbm/import.c ++++ b/ldap/servers/slapd/back-ldbm/import.c +@@ -84,17 +84,19 @@ static int import_fifo_init(ImportJob *job) + int import_fifo_validate_capacity_or_expand(ImportJob *job, size_t entrysize) { + int result = 1; + /* We shoot for four times as much to start with. */ +- size_t request = entrysize * 4; +- int sane = 0; ++ uint64_t request = entrysize * 4; ++ util_cachesize_result sane; + + if (entrysize > job->fifo.bsize) { + /* Check the amount of memory on the system */ +- sane = util_is_cachesize_sane(&request); +- if (!sane && entrysize <= request) { ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ sane = util_is_cachesize_sane(mi, &request); ++ spal_meminfo_destroy(mi); ++ if (sane == UTIL_CACHESIZE_REDUCED && entrysize <= request) { + /* Did the amount cachesize set still exceed entrysize? It'll do ... */ + job->fifo.bsize = request; + result = 0; +- } else if (!sane) { ++ } else if (sane != UTIL_CACHESIZE_VALID) { + /* Can't allocate! No!!! */ + result = 1; + } else { +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index dfe7a13..d5120d3 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -403,8 +403,8 @@ static int ldbm_config_dbcachesize_set(void *arg, void *value, char *errorbuf, i + { + struct ldbminfo *li = (struct ldbminfo *) arg; + int retval = LDAP_SUCCESS; +- size_t val = (size_t)value; +- size_t delta = (size_t)value; ++ uint64_t val = (size_t)value; ++ uint64_t delta = (size_t)value; + + /* There is an error here. We check the new val against our current mem-alloc + * Issue is that we already are using system pages, so while our value *might* +@@ -430,7 +430,13 @@ static int ldbm_config_dbcachesize_set(void *arg, void *value, char *errorbuf, i + val = DBDEFMINSIZ; + } else if (val > li->li_dbcachesize) { + delta = val - li->li_dbcachesize; +- if (!util_is_cachesize_sane(&delta)){ ++ ++ util_cachesize_result sane; ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ sane = util_is_cachesize_sane(mi, &delta); ++ spal_meminfo_destroy(mi); ++ ++ if (sane != UTIL_CACHESIZE_VALID){ + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: nsslapd-dbcachesize value is too large."); + slapi_log_err(SLAPI_LOG_ERR, "ldbm_config_dbcachesize_set", + "nsslapd-dbcachesize value is too large.\n"); +@@ -1086,7 +1092,7 @@ static int ldbm_config_db_cache_set(void *arg, void *value, char *errorbuf, int + struct ldbminfo *li = (struct ldbminfo *) arg; + int retval = LDAP_SUCCESS; + int val = ((uintptr_t)value); +- size_t delta = 0; ++ uint64_t delta = 0; + + /* There is an error here. We check the new val against our current mem-alloc + * Issue is that we already are using system pages, so while our value *might* +@@ -1101,7 +1107,13 @@ static int ldbm_config_db_cache_set(void *arg, void *value, char *errorbuf, int + if (apply) { + if (val > li->li_dblayer_private->dblayer_cache_config) { + delta = val - li->li_dblayer_private->dblayer_cache_config; +- if (!util_is_cachesize_sane(&delta)){ ++ util_cachesize_result sane; ++ ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ sane = util_is_cachesize_sane(mi, &delta); ++ spal_meminfo_destroy(mi); ++ ++ if (sane != UTIL_CACHESIZE_VALID){ + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: db cachesize value is too large"); + slapi_log_err(SLAPI_LOG_ERR,"ldbm_config_db_cache_set", "db cachesize value is too large.\n"); + return LDAP_UNWILLING_TO_PERFORM; +@@ -1219,7 +1231,7 @@ static int ldbm_config_import_cachesize_set(void *arg, void *value, char *errorb + { + struct ldbminfo *li = (struct ldbminfo *)arg; + size_t val = (size_t)value; +- size_t delta = (size_t)value; ++ uint64_t delta = (size_t)value; + /* There is an error here. We check the new val against our current mem-alloc + * Issue is that we already are using system pages, so while our value *might* + * be valid, we may reject it here due to the current procs page usage. +@@ -1232,7 +1244,13 @@ static int ldbm_config_import_cachesize_set(void *arg, void *value, char *errorb + if (apply){ + if (val > li->li_import_cachesize) { + delta = val - li->li_import_cachesize; +- if (!util_is_cachesize_sane(&delta)){ ++ ++ util_cachesize_result sane; ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ sane = util_is_cachesize_sane(mi, &delta); ++ spal_meminfo_destroy(mi); ++ ++ if (sane != UTIL_CACHESIZE_VALID){ + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: import cachesize value is too large."); + slapi_log_err(SLAPI_LOG_ERR,"ldbm_config_import_cachesize_set", + "Import cachesize value is too large.\n"); +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c +index 92d982e..0b0b37e 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c +@@ -107,7 +107,7 @@ ldbm_back_delete( Slapi_PBlock *pb ) + + if (pb->pb_conn) + { +- slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_delete", "Enter conn=%" NSPRIu64 " op=%d\n", ++ slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_delete", "Enter conn=%" PRIu64 " op=%d\n", + pb->pb_conn->c_connid, operation->o_opid); + } + +@@ -1493,7 +1493,7 @@ diskfull_return: + slapi_sdn_done(&parentsdn); + if (pb->pb_conn) + { +- slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_delete", "leave conn=%" NSPRIu64 " op=%d\n", ++ slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_delete", "leave conn=%" PRIu64 " op=%d\n", + pb->pb_conn->c_connid, operation->o_opid); + } + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +index e03954d..62cdbc3 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +@@ -92,7 +92,7 @@ ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, in + ldbm_instance *inst = (ldbm_instance *) arg; + int retval = LDAP_SUCCESS; + size_t val = (size_t) value; +- size_t delta = 0; ++ uint64_t delta = 0; + + /* Do whatever we can to make sure the data is ok. */ + /* There is an error here. We check the new val against our current mem-alloc +@@ -108,7 +108,13 @@ ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, in + if (apply) { + if (val > inst->inst_cache.c_maxsize) { + delta = val - inst->inst_cache.c_maxsize; +- if (!util_is_cachesize_sane(&delta)){ ++ ++ util_cachesize_result sane; ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ sane = util_is_cachesize_sane(mi, &delta); ++ spal_meminfo_destroy(mi); ++ ++ if (sane != UTIL_CACHESIZE_VALID){ + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: cachememsize value is too large."); + slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", "cachememsize value is too large.\n"); + return LDAP_UNWILLING_TO_PERFORM; +@@ -134,7 +140,7 @@ ldbm_instance_config_dncachememsize_set(void *arg, void *value, char *errorbuf, + ldbm_instance *inst = (ldbm_instance *) arg; + int retval = LDAP_SUCCESS; + size_t val = (size_t)value; +- size_t delta = 0; ++ uint64_t delta = 0; + + /* Do whatever we can to make sure the data is ok. */ + /* There is an error here. We check the new val against our current mem-alloc +@@ -150,7 +156,13 @@ ldbm_instance_config_dncachememsize_set(void *arg, void *value, char *errorbuf, + if (apply) { + if (val > inst->inst_dncache.c_maxsize) { + delta = val - inst->inst_dncache.c_maxsize; +- if (!util_is_cachesize_sane(&delta)){ ++ ++ util_cachesize_result sane; ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ sane = util_is_cachesize_sane(mi, &delta); ++ spal_meminfo_destroy(mi); ++ ++ if (sane != UTIL_CACHESIZE_VALID){ + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: dncachememsize value is too large."); + slapi_log_err(SLAPI_LOG_ERR,"ldbm_instance_config_dncachememsize_set", +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +index a78d850..533273b 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +@@ -142,7 +142,7 @@ ldbm_back_modrdn( Slapi_PBlock *pb ) + + if (pb->pb_conn) + { +- slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_modrdn", "enter conn=%" NSPRIu64 " op=%d\n", ++ slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_modrdn", "enter conn=%" PRIu64 " op=%d\n", + pb->pb_conn->c_connid, operation->o_opid); + } + +@@ -1539,7 +1539,7 @@ common_return: + if (pb->pb_conn) + { + slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_modrdn", +- "leave conn=%" NSPRIu64 " op=%d\n", ++ "leave conn=%" PRIu64 " op=%d\n", + pb->pb_conn->c_connid, operation->o_opid); + } + return retval; +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c +index a6c3b74..cfb0d6b 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c +@@ -407,7 +407,7 @@ ldbm_back_search( Slapi_PBlock *pb ) + slapi_pblock_get(pb, SLAPI_OPERATION_ID, &op_id); + + slapi_log_err(SLAPI_LOG_WARNING, +- "ldbm_back_search", "Sort control ignored for conn=%" NSPRIu64 " op=%d\n", ++ "ldbm_back_search", "Sort control ignored for conn=%" PRIu64 " op=%d\n", + conn_id, op_id); + } + } else { +@@ -442,7 +442,7 @@ ldbm_back_search( Slapi_PBlock *pb ) + slapi_pblock_get(pb, SLAPI_OPERATION_ID, &op_id); + + slapi_log_err(SLAPI_LOG_WARNING, +- "ldbm_back_search", "VLV control ignored for conn=%" NSPRIu64 " op=%d\n", ++ "ldbm_back_search", "VLV control ignored for conn=%" PRIu64 " op=%d\n", + conn_id, op_id); + } + +diff --git a/ldap/servers/slapd/back-ldbm/misc.c b/ldap/servers/slapd/back-ldbm/misc.c +index 5268087..7192b3a 100644 +--- a/ldap/servers/slapd/back-ldbm/misc.c ++++ b/ldap/servers/slapd/back-ldbm/misc.c +@@ -54,7 +54,7 @@ void ldbm_log_access_message(Slapi_PBlock *pblock,char *string) + return; + } + operation_id = operation->o_opid; +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d %s\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d %s\n", + connection_id, operation_id, string); + } + +diff --git a/ldap/servers/slapd/back-ldbm/monitor.c b/ldap/servers/slapd/back-ldbm/monitor.c +index 757792b..c58b069 100644 +--- a/ldap/servers/slapd/back-ldbm/monitor.c ++++ b/ldap/servers/slapd/back-ldbm/monitor.c +@@ -101,9 +101,9 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, + /* fetch cache statistics */ + cache_get_stats(&(inst->inst_dncache), &hits, &tries, + &nentries, &maxentries, &size, &maxsize); +- sprintf(buf, "%" NSPRIu64, hits); ++ sprintf(buf, "%" PRIu64, hits); + MSET("dnCacheHits"); +- sprintf(buf, "%" NSPRIu64, tries); ++ sprintf(buf, "%" PRIu64, tries); + MSET("dnCacheTries"); + sprintf(buf, "%lu", (unsigned long)(100.0*(double)hits / (double)(tries > 0 ? tries : 1))); + MSET("dnCacheHitRatio"); +@@ -119,11 +119,11 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, + /* normalized dn cache stats */ + if(ndn_cache_started()){ + ndn_cache_get_stats(&hits, &tries, &size, &maxsize, &count); +- sprintf(buf, "%" NSPRIu64, tries); ++ sprintf(buf, "%" PRIu64, tries); + MSET("normalizedDnCacheTries"); +- sprintf(buf, "%" NSPRIu64, hits); ++ sprintf(buf, "%" PRIu64, hits); + MSET("normalizedDnCacheHits"); +- sprintf(buf, "%" NSPRIu64, (tries - hits)); ++ sprintf(buf, "%" PRIu64, (tries - hits)); + MSET("normalizedDnCacheMisses"); + sprintf(buf, "%lu", (unsigned long)(100.0*(double)hits / (double)(tries > 0 ? tries : 1))); + MSET("normalizedDnCacheHitRatio"); +diff --git a/ldap/servers/slapd/back-ldbm/perfctrs.h b/ldap/servers/slapd/back-ldbm/perfctrs.h +index 57be1d1..64c79e1 100644 +--- a/ldap/servers/slapd/back-ldbm/perfctrs.h ++++ b/ldap/servers/slapd/back-ldbm/perfctrs.h +@@ -11,7 +11,7 @@ + # include + #endif + +-#include ++#include + + /* Structure definition for performance data */ + /* This stuff goes in shared memory, so make sure the packing is consistent */ +diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c +index 1ae9858..a207bd8 100644 +--- a/ldap/servers/slapd/back-ldbm/start.c ++++ b/ldap/servers/slapd/back-ldbm/start.c +@@ -32,34 +32,25 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + Object *inst_obj = NULL; + ldbm_instance *inst = NULL; + /* size_t is a platform unsigned int, IE uint64_t */ +- size_t total_cache_size = 0; +- size_t pagesize = 0; +- size_t pages = 0; +- size_t procpages __attribute__((unused)) = 0; +- size_t availpages = 0; +- size_t cache_size_to_configure = 0; +- size_t zone_pages = 0; +- size_t db_pages = 0; +- size_t entry_pages = 0; +- size_t import_pages = 0; +- size_t zone_size = 0; +- size_t import_size = 0; +- size_t cache_size = 0; +- size_t db_size = 0; ++ uint64_t total_cache_size = 0; ++ uint64_t entry_size = 0; ++ uint64_t zone_size = 0; ++ uint64_t import_size = 0; ++ uint64_t cache_size = 0; ++ uint64_t db_size = 0; + /* For clamping the autotune value to a 64Mb boundary */ +- size_t clamp_pages = 0; +- size_t clamp_div = 0; +- size_t clamp_mod = 0; ++ uint64_t clamp_div = 0; + /* Backend count */ +- size_t backend_count = 0; ++ uint64_t backend_count = 0; + + int_fast32_t autosize_percentage = 0; + int_fast32_t autosize_db_percentage_split = 0; + int_fast32_t import_percentage = 0; +- int32_t issane = 0; ++ util_cachesize_result issane; + char *msg = ""; /* This will be set by one of the two cache sizing paths below. */ + char size_to_str[32]; /* big enough to hold %ld */ + ++ + /* == Begin autotune == */ + + /* +@@ -120,42 +111,34 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + return SLAPI_FAIL_GENERAL; + } + +- if (util_info_sys_pages(&pagesize, &pages, &procpages, &availpages) != 0) { ++ /* Get our platform memory values. */ ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ if (mi == NULL) { + slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Unable to determine system page limits\n"); + return SLAPI_FAIL_GENERAL; + } + +- if (pagesize == 0) { +- /* If this happens, we are in a very bad state indeed... */ +- slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Unable to determine system page size\n"); +- return SLAPI_FAIL_GENERAL; +- } +- + /* calculate the needed values */ +- zone_pages = (autosize_percentage * pages) / 100; +- zone_size = zone_pages * pagesize; ++ zone_size = (autosize_percentage * mi->system_total_bytes) / 100; + /* This is how much we "might" use, lets check it's sane. */ + /* In the case it is not, this will *reduce* the allocation */ +- issane = util_is_cachesize_sane(&zone_size); +- if (!issane) { ++ issane = util_is_cachesize_sane(mi, &zone_size); ++ if (issane == UTIL_CACHESIZE_REDUCED) { + slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Your autosized cache values have been reduced. Likely your nsslapd-cache-autosize percentage is too high.\n"); + slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "%s", msg); + } + /* It's valid, lets divide it up and set according to user prefs */ +- zone_pages = zone_size / pagesize; +- db_pages = (autosize_db_percentage_split * zone_pages) / 100; ++ db_size = (autosize_db_percentage_split * zone_size) / 100; + + /* Cap the DB size at 512MB, as this doesn't help perf much more (lkrispen's advice) */ +- if ((db_pages * pagesize) > (512 * MEGABYTE)) { +- db_pages = (512 * MEGABYTE) / pagesize; ++ if (db_size > (512 * MEGABYTE)) { ++ db_size = (512 * MEGABYTE); + } + + if (backend_count > 0 ) { + /* Number of entry cache pages per backend. */ +- entry_pages = (zone_pages - db_pages) / backend_count; ++ entry_size = (zone_size - db_size) / backend_count; + /* Now, clamp this value to a 64mb boundary. */ +- /* How many pages are in 64mb? */ +- clamp_pages = (64 * MEGABYTE) / pagesize; + /* Now divide the entry pages by this, and also mod. If mod != 0, we need + * to add 1 to the diveded number. This should give us: + * 510 * 1024 * 1024 == 510MB +@@ -166,17 +149,15 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + * 130560 % 16384 = 15872 which is != 0 + * therfore 7 + 1, aka 8 * 16384 = 131072 pages = 536870912 bytes = 512MB. + */ +- clamp_div = entry_pages / clamp_pages; +- clamp_mod = entry_pages % clamp_pages; +- if (clamp_mod != 0) { +- /* If we want to clamp down, remove this line. This would change the above from 510mb -> 448mb. */ +- clamp_div += 1; +- entry_pages = clamp_div * clamp_pages; ++ if (entry_size % (64 * MEGABYTE) != 0) { ++ /* If we want to clamp down, remove the "+1". This would change the above from 510mb -> 448mb. */ ++ clamp_div = (entry_size / (64 * MEGABYTE)) + 1; ++ entry_size = clamp_div * (64 * MEGABYTE); + } + } + +- slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk physical memory\n", pages*(pagesize/1024)); +- slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk avaliable\n", zone_pages*(pagesize/1024)); ++ slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk physical memory\n", mi->system_total_bytes / 1024); ++ slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk avaliable\n", mi->system_available_bytes / 1024); + + /* We've now calculated the autotuning values. Do we need to apply it? + * we use the logic of "if size is 0, or autosize is > 0. This way three +@@ -191,13 +172,12 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + + /* First, check the dbcache */ + if (li->li_dbcachesize == 0 || li->li_cache_autosize > 0) { +- slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: db cache: %luk\n", db_pages*(pagesize/1024)); +- cache_size_to_configure = (unsigned long)(db_pages * pagesize); +- if (cache_size_to_configure < (500 * MEGABYTE)) { +- cache_size_to_configure = (unsigned long)((db_pages * pagesize) / 1.25); ++ slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: db cache: %luk\n", db_size / 1024); ++ if (db_size < (500 * MEGABYTE)) { ++ db_size = db_size / 1.25; + } + /* Have to set this value through text. */ +- sprintf(size_to_str, "%lu", cache_size_to_configure); ++ sprintf(size_to_str, "%" PRIu64 , db_size); + ldbm_config_internal_set(li, CONFIG_DBCACHESIZE, size_to_str); + } + total_cache_size += li->li_dbcachesize; +@@ -205,7 +185,7 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + /* For each backend */ + /* apply the appropriate cache size if 0 */ + if (backend_count > 0 ) { +- li->li_cache_autosize_ec = (unsigned long)entry_pages * pagesize; ++ li->li_cache_autosize_ec = entry_size; + } + + for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; +@@ -220,7 +200,7 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + * it's highly unlikely. + */ + if (cache_size == 0 || cache_size == MINCACHESIZE || li->li_cache_autosize > 0) { +- slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: %s entry cache (%lu total): %luk\n", inst->inst_name, backend_count, entry_pages*(pagesize/1024)); ++ slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: %s entry cache (%lu total): %luk\n", inst->inst_name, backend_count, entry_size / 1024); + cache_set_max_entries(&(inst->inst_cache), -1); + cache_set_max_size(&(inst->inst_cache), li->li_cache_autosize_ec, CACHE_TYPE_ENTRY); + } +@@ -229,8 +209,8 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + db_size = dblayer_get_id2entry_size(inst); + if (cache_size < db_size) { + slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", +- "%s: entry cache size %lu B is " +- "less than db size %lu B; " ++ "%s: entry cache size %"PRIu64" B is " ++ "less than db size %"PRIu64" B; " + "We recommend to increase the entry cache size " + "nsslapd-cachememsize.\n", + inst->inst_name, cache_size, db_size); +@@ -244,37 +224,36 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + /* autosizing importCache */ + if (li->li_import_cache_autosize > 0) { + /* Use import percentage here, as it's been corrected for -1 behaviour */ +- import_pages = (import_percentage * pages) / 100; +- import_size = import_pages * pagesize; +- issane = util_is_cachesize_sane(&import_size); +- if (!issane) { ++ import_size = (import_percentage * mi->system_total_bytes) / 100; ++ issane = util_is_cachesize_sane(mi, &import_size); ++ if (issane == UTIL_CACHESIZE_REDUCED) { + slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Your autosized import cache values have been reduced. Likely your nsslapd-import-cache-autosize percentage is too high.\n"); + } + /* We just accept the reduced allocation here. */ +- import_pages = import_size / pagesize; +- slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: import cache: %luk\n", +- import_pages*(pagesize/1024)); ++ slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: import cache: %"PRIu64"k\n", import_size / 1024); + +- sprintf(size_to_str, "%lu", (unsigned long)(import_pages * pagesize)); ++ sprintf(size_to_str, "%"PRIu64, import_size); + ldbm_config_internal_set(li, CONFIG_IMPORT_CACHESIZE, size_to_str); + } + + /* Finally, lets check that the total result is sane. */ +- slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "total cache size: %lu B; \n", total_cache_size); ++ slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "total cache size: %"PRIu64" B; \n", total_cache_size); + +- issane = util_is_cachesize_sane(&total_cache_size); +- if (!issane) { ++ issane = util_is_cachesize_sane(mi, &total_cache_size); ++ if (issane != UTIL_CACHESIZE_VALID) { + /* Right, it's time to panic */ + slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "It is highly likely your memory configuration of all backends will EXCEED your systems memory.\n"); + slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "In a future release this WILL prevent server start up. You MUST alter your configuration.\n"); +- slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Total entry cache size: %lu B; dbcache size: %lu B; available memory size: %lu B; \n", +- (PRUint64)total_cache_size, (PRUint64)li->li_dbcachesize, availpages * pagesize ++ slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Total entry cache size: %"PRIu64" B; dbcache size: %"PRIu64" B; available memory size: %"PRIu64" B; \n", ++ total_cache_size, (uint64_t)li->li_dbcachesize, mi->system_available_bytes + ); + slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "%s\n", msg); + /* WB 2016 - This should be UNCOMMENTED in a future release */ + /* return SLAPI_FAIL_GENERAL; */ + } + ++ spal_meminfo_destroy(mi); ++ + /* == End autotune == */ + return 0; + } +diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c +index f83df7d..648a0f9 100644 +--- a/ldap/servers/slapd/bind.c ++++ b/ldap/servers/slapd/bind.c +@@ -853,25 +853,25 @@ log_bind_access ( + { + if (method == LDAP_AUTH_SASL && saslmech && msg) { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d BIND dn=\"%s\" " ++ "conn=%" PRIu64 " op=%d BIND dn=\"%s\" " + "method=sasl version=%d mech=%s, %s\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, dn, + version, saslmech, msg ); + } else if (method == LDAP_AUTH_SASL && saslmech) { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d BIND dn=\"%s\" " ++ "conn=%" PRIu64 " op=%d BIND dn=\"%s\" " + "method=sasl version=%d mech=%s\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, dn, + version, saslmech ); + } else if (msg) { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d BIND dn=\"%s\" " ++ "conn=%" PRIu64 " op=%d BIND dn=\"%s\" " + "method=%" BERTAG_T " version=%d, %s\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, dn, + method, version, msg ); + } else { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d BIND dn=\"%s\" " ++ "conn=%" PRIu64 " op=%d BIND dn=\"%s\" " + "method=%" BERTAG_T " version=%d\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, dn, + method, version ); +diff --git a/ldap/servers/slapd/compare.c b/ldap/servers/slapd/compare.c +index 07ded98..3c03053 100644 +--- a/ldap/servers/slapd/compare.c ++++ b/ldap/servers/slapd/compare.c +@@ -111,7 +111,7 @@ do_compare( Slapi_PBlock *pb ) + rawdn, ava.ava_type, 0 ); + + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d CMP dn=\"%s\" attr=\"%s\"\n", ++ "conn=%" PRIu64 " op=%d CMP dn=\"%s\" attr=\"%s\"\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, dn, ava.ava_type ); + + /* +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index 7c83c66..359b59a 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -411,7 +411,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is + + /* log useful stuff to our access log */ + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " fd=%d slot=%d %sconnection from %s to %s\n", ++ "conn=%" PRIu64 " fd=%d slot=%d %sconnection from %s to %s\n", + conn->c_connid, conn->c_sd, ns, pTmp, str_ip, str_destip ); + + /* initialize the remaining connection fields */ +@@ -511,7 +511,7 @@ connection_need_new_password(const Connection *conn, const Operation *op, Slapi_ + op->o_tag != LDAP_REQ_ABANDON && op->o_tag != LDAP_REQ_EXTENDED) + { + slapi_add_pwd_control ( pb, LDAP_CONTROL_PWEXPIRED, 0); +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d %s\n", ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d %s\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, + "UNPROCESSED OPERATION - need new password" ); + send_ldap_result( pb, LDAP_UNWILLING_TO_PERFORM, +@@ -562,7 +562,7 @@ connection_dispatch_operation(Connection *conn, Operation *op, Slapi_PBlock *pb) + (op->o_tag != LDAP_REQ_EXTENDED) && (op->o_tag != LDAP_REQ_UNBIND) && + (op->o_tag != LDAP_REQ_ABANDON)) { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d UNPROCESSED OPERATION" ++ "conn=%" PRIu64 " op=%d UNPROCESSED OPERATION" + " - Insufficient SSF (local_ssf=%d sasl_ssf=%d ssl_ssf=%d)\n", + conn->c_connid, op->o_opid, conn->c_local_ssf, + conn->c_sasl_ssf, conn->c_ssl_ssf ); +@@ -591,7 +591,7 @@ connection_dispatch_operation(Connection *conn, Operation *op, Slapi_PBlock *pb) + (op->o_tag != LDAP_REQ_ABANDON) && (op->o_tag != LDAP_REQ_SEARCH)))) + { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d UNPROCESSED OPERATION" ++ "conn=%" PRIu64 " op=%d UNPROCESSED OPERATION" + " - Anonymous access not allowed\n", + conn->c_connid, op->o_opid ); + +@@ -650,7 +650,7 @@ connection_dispatch_operation(Connection *conn, Operation *op, Slapi_PBlock *pb) + int ret = setsockopt(conn->c_sd,IPPROTO_TCP,TCP_CORK,&i,sizeof(i)); + if (ret < 0) { + slapi_log_err(SLAPI_LOG_ERR, "connection_dispatch_operation", +- "Failed to set TCP_CORK on connection %" NSPRIu64 "\n",conn->c_connid); ++ "Failed to set TCP_CORK on connection %" PRIu64 "\n",conn->c_connid); + } + pop_cork = 1; + } +@@ -664,7 +664,7 @@ connection_dispatch_operation(Connection *conn, Operation *op, Slapi_PBlock *pb) + int ret = setsockopt(conn->c_sd,IPPROTO_TCP,TCP_CORK,&i,sizeof(i)); + if (ret < 0) { + slapi_log_err(SLAPI_LOG_ERR, "connection_dispatch_operation", +- "Failed to clear TCP_CORK on connection %" NSPRIu64 "\n",conn->c_connid); ++ "Failed to clear TCP_CORK on connection %" PRIu64 "\n",conn->c_connid); + } + } + #endif +@@ -690,7 +690,7 @@ connection_dispatch_operation(Connection *conn, Operation *op, Slapi_PBlock *pb) + + default: + slapi_log_err(SLAPI_LOG_ERR, +- "connection_dispatch_operation", "Ignoring unknown LDAP request (conn=%" NSPRIu64 ", tag=0x%lx)\n", ++ "connection_dispatch_operation", "Ignoring unknown LDAP request (conn=%" PRIu64 ", tag=0x%lx)\n", + conn->c_connid, op->o_tag); + break; + } +@@ -702,7 +702,7 @@ int connection_release_nolock_ext (Connection *conn, int release_only) + if (conn->c_refcnt <= 0) + { + slapi_log_err(SLAPI_LOG_ERR, "connection_release_nolock_ext", +- "conn=%" NSPRIu64 " fd=%d Attempt to release connection that is not acquired\n", ++ "conn=%" PRIu64 " fd=%d Attempt to release connection that is not acquired\n", + conn->c_connid, conn->c_sd); + PR_ASSERT (PR_FALSE); + return -1; +@@ -734,7 +734,7 @@ int connection_acquire_nolock_ext (Connection *conn, int allow_when_closing) + { + /* This may happen while other threads are still working on this connection */ + slapi_log_err(SLAPI_LOG_ERR, "connection_acquire_nolock_ext", +- "conn=%" NSPRIu64 " fd=%d Attempt to acquire connection in the closing state\n", ++ "conn=%" PRIu64 " fd=%d Attempt to acquire connection in the closing state\n", + conn->c_connid, conn->c_sd); + return -1; + } +@@ -1070,7 +1070,7 @@ get_next_from_buffer( void *buffer, size_t buffer_size, ber_len_t *lenp, + syserr = errno; + /* Bad stuff happened, like the client sent us some junk */ + slapi_log_err(SLAPI_LOG_CONNS, "get_next_from_buffer", +- "ber_get_next failed for connection %" NSPRIu64 "\n", conn->c_connid); ++ "ber_get_next failed for connection %" PRIu64 "\n", conn->c_connid); + /* reset private buffer */ + conn->c_private->c_buffer_bytes = conn->c_private->c_buffer_offset = 0; + +@@ -1232,7 +1232,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i + /* Otherwise we loop, unless we exceeded the ioblock timeout */ + if (waits_done > ioblocktimeout_waits) { + slapi_log_err(SLAPI_LOG_CONNS,"connection_read_operation", +- "ioblocktimeout expired on connection %" NSPRIu64 "\n", conn->c_connid); ++ "ioblocktimeout expired on connection %" PRIu64 "\n", conn->c_connid); + disconnect_server_nomutex( conn, conn->c_connid, -1, + SLAPD_DISCONNECT_IO_TIMEOUT, 0 ); + ret = CONN_DONE; +@@ -1253,19 +1253,19 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i + err = PR_GetError(); + syserr = PR_GetOSError(); + slapi_log_err(SLAPI_LOG_ERR, "connection_read_operation", +- "PR_Poll for connection %" NSPRIu64 " returns %d (%s)\n", conn->c_connid, err, slapd_pr_strerror( err ) ); ++ "PR_Poll for connection %" PRIu64 " returns %d (%s)\n", conn->c_connid, err, slapd_pr_strerror( err ) ); + /* If this happens we should close the connection */ + disconnect_server_nomutex( conn, conn->c_connid, -1, err, syserr ); + ret = CONN_DONE; + goto done; + } + slapi_log_err(SLAPI_LOG_CONNS, +- "connection_read_operation", "connection %" NSPRIu64 " waited %d times for read to be ready\n", conn->c_connid, waits_done); ++ "connection_read_operation", "connection %" PRIu64 " waited %d times for read to be ready\n", conn->c_connid, waits_done); + } else { + /* Some other error, typically meaning bad stuff */ + syserr = PR_GetOSError(); + slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", +- "PR_Recv for connection %" NSPRIu64 " returns %d (%s)\n", conn->c_connid, err, slapd_pr_strerror( err ) ); ++ "PR_Recv for connection %" PRIu64 " returns %d (%s)\n", conn->c_connid, err, slapd_pr_strerror( err ) ); + /* If this happens we should close the connection */ + disconnect_server_nomutex( conn, conn->c_connid, -1, err, syserr ); + ret = CONN_DONE; +@@ -1286,7 +1286,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i + } + } + slapi_log_err(SLAPI_LOG_CONNS, +- "connection_read_operation", "connection %" NSPRIu64 " read %d bytes\n", conn->c_connid, ret); ++ "connection_read_operation", "connection %" PRIu64 " read %d bytes\n", conn->c_connid, ret); + + new_operation = 0; + ret = CONN_FOUND_WORK_TO_DO; +@@ -1307,7 +1307,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i + * We received a non-LDAP message. Log and close connection. + */ + slapi_log_err(SLAPI_LOG_ERR, +- "connection_read_operation", "conn=%" NSPRIu64 " received a non-LDAP message (tag 0x%lx, expected 0x%lx)\n", ++ "connection_read_operation", "conn=%" PRIu64 " received a non-LDAP message (tag 0x%lx, expected 0x%lx)\n", + conn->c_connid, *tag, LDAP_TAG_MESSAGE ); + disconnect_server_nomutex( conn, conn->c_connid, -1, + SLAPD_DISCONNECT_BAD_BER_TAG, EPROTO ); +@@ -1319,7 +1319,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i + != LDAP_TAG_MSGID ) { + /* log, close and send error */ + slapi_log_err(SLAPI_LOG_ERR, +- "connection_read_operation", "conn=%" NSPRIu64 " unable to read tag for incoming request\n", conn->c_connid); ++ "connection_read_operation", "conn=%" PRIu64 " unable to read tag for incoming request\n", conn->c_connid); + disconnect_server_nomutex( conn, conn->c_connid, -1, SLAPD_DISCONNECT_BAD_BER_TAG, EPROTO ); + ret = CONN_DONE; + goto done; +@@ -1337,7 +1337,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i + case LDAP_TAG_LDAPDN: /* optional username, for CLDAP */ + /* log, close and send error */ + slapi_log_err(SLAPI_LOG_ERR, +- "connection_read_operation", "conn=%" NSPRIu64 " ber_peek_tag returns 0x%lx\n", conn->c_connid, *tag); ++ "connection_read_operation", "conn=%" PRIu64 " ber_peek_tag returns 0x%lx\n", conn->c_connid, *tag); + disconnect_server_nomutex( conn, conn->c_connid, -1, SLAPD_DISCONNECT_BER_PEEK, EPROTO ); + ret = CONN_DONE; + goto done; +@@ -1361,7 +1361,7 @@ void connection_make_readable(Connection *conn) + void connection_make_readable_nolock(Connection *conn) + { + conn->c_gettingber = 0; +- slapi_log_err(SLAPI_LOG_CONNS, "connection_make_readable_nolock", "making readable conn %" NSPRIu64 " fd=%d\n", ++ slapi_log_err(SLAPI_LOG_CONNS, "connection_make_readable_nolock", "making readable conn %" PRIu64 " fd=%d\n", + conn->c_connid, conn->c_sd); + if (!(conn->c_flags & CONN_FLAG_CLOSING)) { + /* if the connection is closing, try the close in connection_release_nolock */ +@@ -1388,7 +1388,7 @@ void connection_check_activity_level(Connection *conn) + /* update the last checked time */ + conn->c_private->previous_count_check_time = current_time(); + PR_ExitMonitor(conn->c_mutex); +- slapi_log_err(SLAPI_LOG_CONNS,"connection_check_activity_level", "conn %" NSPRIu64 " activity level = %d\n",conn->c_connid,delta_count); ++ slapi_log_err(SLAPI_LOG_CONNS,"connection_check_activity_level", "conn %" PRIu64 " activity level = %d\n",conn->c_connid,delta_count); + } + + typedef struct table_iterate_info_struct { +@@ -1450,7 +1450,7 @@ void connection_enter_leave_turbo(Connection *conn, int current_turbo_flag, int + double activet = 0.0; + connection_find_our_rank(conn,&connection_count, &our_rank); + slapi_log_err(SLAPI_LOG_CONNS,"connection_enter_leave_turbo", +- "conn %" NSPRIu64 " turbo rank = %d out of %d conns\n",conn->c_connid,our_rank,connection_count); ++ "conn %" PRIu64 " turbo rank = %d out of %d conns\n",conn->c_connid,our_rank,connection_count); + activet = (double)g_get_active_threadcnt(); + threshold_rank = (int)(activet * ((double)CONN_TURBO_PERCENTILE / 100.0)); + +@@ -1491,9 +1491,9 @@ void connection_enter_leave_turbo(Connection *conn, int current_turbo_flag, int + PR_ExitMonitor(conn->c_mutex); + if (current_mode != new_mode) { + if (current_mode) { +- slapi_log_err(SLAPI_LOG_CONNS,"connection_enter_leave_turbo", "conn %" NSPRIu64 " leaving turbo mode\n",conn->c_connid); ++ slapi_log_err(SLAPI_LOG_CONNS,"connection_enter_leave_turbo", "conn %" PRIu64 " leaving turbo mode\n",conn->c_connid); + } else { +- slapi_log_err(SLAPI_LOG_CONNS,"connection_enter_leave_turbo", "conn %" NSPRIu64 " entering turbo mode\n",conn->c_connid); ++ slapi_log_err(SLAPI_LOG_CONNS,"connection_enter_leave_turbo", "conn %" PRIu64 " entering turbo mode\n",conn->c_connid); + } + } + *new_turbo_flag = new_mode; +@@ -1617,12 +1617,12 @@ connection_threadmain() + ret = connection_read_operation(conn, op, &tag, &more_data); + if ((ret == CONN_DONE) || (ret == CONN_TIMEDOUT)) { + slapi_log_err(SLAPI_LOG_CONNS, "connection_threadmain", +- "conn %" NSPRIu64 " read not ready due to %d - thread_turbo_flag %d more_data %d " ++ "conn %" PRIu64 " read not ready due to %d - thread_turbo_flag %d more_data %d " + "ops_initiated %d refcnt %d flags %d\n", conn->c_connid, ret, thread_turbo_flag, more_data, + conn->c_opsinitiated, conn->c_refcnt, conn->c_flags); + } else if (ret == CONN_FOUND_WORK_TO_DO) { + slapi_log_err(SLAPI_LOG_CONNS, "connection_threadmain", +- "conn %" NSPRIu64 " read operation successfully - thread_turbo_flag %d more_data %d " ++ "conn %" PRIu64 " read operation successfully - thread_turbo_flag %d more_data %d " + "ops_initiated %d refcnt %d flags %d\n", conn->c_connid, thread_turbo_flag, more_data, + conn->c_opsinitiated, conn->c_refcnt, conn->c_flags); + } +@@ -1648,7 +1648,7 @@ connection_threadmain() + if (thread_turbo_flag && !WORK_Q_EMPTY) { + thread_turbo_flag = 0; + slapi_log_err(SLAPI_LOG_CONNS,"connection_threadmain", +- "conn %" NSPRIu64 " leaving turbo mode - pb_q is not empty %d\n", ++ "conn %" PRIu64 " leaving turbo mode - pb_q is not empty %d\n", + conn->c_connid,work_q_size); + } + #endif +@@ -1675,7 +1675,7 @@ connection_threadmain() + * connection_make_readable(conn); + */ + slapi_log_err(SLAPI_LOG_CONNS,"connection_threadmain", +- "conn %" NSPRIu64 " leaving turbo mode due to %d\n", ++ "conn %" PRIu64 " leaving turbo mode due to %d\n", + conn->c_connid,ret); + goto done; + case CONN_SHUTDOWN: +@@ -1732,7 +1732,7 @@ connection_threadmain() + */ + conn->c_idlesince = curtime; + connection_activity(conn, maxthreads); +- slapi_log_err(SLAPI_LOG_CONNS,"connection_threadmain", "conn %" NSPRIu64 " queued because more_data\n", ++ slapi_log_err(SLAPI_LOG_CONNS,"connection_threadmain", "conn %" PRIu64 " queued because more_data\n", + conn->c_connid); + } else { + /* keep count of how many times maxthreads has blocked an operation */ +@@ -1814,7 +1814,7 @@ done: + /* If we're in turbo mode, we keep our reference to the connection alive */ + /* can't use the more_data var because connection could have changed in another thread */ + more_data = conn_buffered_data_avail_nolock(conn, &conn_closed) ? 1 : 0; +- slapi_log_err(SLAPI_LOG_CONNS,"connection_threadmain", "conn %" NSPRIu64 " check more_data %d thread_turbo_flag %d\n", ++ slapi_log_err(SLAPI_LOG_CONNS,"connection_threadmain", "conn %" PRIu64 " check more_data %d thread_turbo_flag %d\n", + conn->c_connid,more_data,thread_turbo_flag); + if (!more_data) { + if (!thread_turbo_flag) { +@@ -1863,7 +1863,7 @@ connection_activity(Connection *conn, int maxthreads) + + if (connection_acquire_nolock (conn) == -1) { + slapi_log_err(SLAPI_LOG_CONNS, +- "connection_activity", "Could not acquire lock in connection_activity as conn %" NSPRIu64 " closing fd=%d\n", ++ "connection_activity", "Could not acquire lock in connection_activity as conn %" PRIu64 " closing fd=%d\n", + conn->c_connid,conn->c_sd); + /* XXX how to handle this error? */ + /* MAB: 25 Jan 01: let's return on error and pray this won't leak */ +@@ -2046,7 +2046,7 @@ connection_remove_operation( Connection *conn, Operation *op ) + + if ( *tmp == NULL ) + { +- slapi_log_err(SLAPI_LOG_ERR, "connection_remove_operation", "Can't find op %d for conn %" NSPRIu64 "\n", ++ slapi_log_err(SLAPI_LOG_ERR, "connection_remove_operation", "Can't find op %d for conn %" PRIu64 "\n", + (int)op->o_msgid, conn->c_connid); + } + else +@@ -2187,13 +2187,13 @@ log_ber_too_big_error(const Connection *conn, ber_len_t ber_len, + } + if (0 == ber_len) { + slapi_log_err(SLAPI_LOG_ERR, "log_ber_too_big_error", +- "conn=%" NSPRIu64 " fd=%d Incoming BER Element was too long, max allowable" ++ "conn=%" PRIu64 " fd=%d Incoming BER Element was too long, max allowable" + " is %" BERLEN_T " bytes. Change the nsslapd-maxbersize attribute in" + " cn=config to increase.\n", + conn->c_connid, conn->c_sd, maxbersize ); + } else { + slapi_log_err(SLAPI_LOG_ERR, "log_ber_too_big_error", +- "conn=%" NSPRIu64 " fd=%d Incoming BER Element was %" BERLEN_T " bytes, max allowable" ++ "conn=%" PRIu64 " fd=%d Incoming BER Element was %" BERLEN_T " bytes, max allowable" + " is %" BERLEN_T " bytes. Change the nsslapd-maxbersize attribute in" + " cn=config to increase.\n", + conn->c_connid, conn->c_sd, ber_len, maxbersize ); +@@ -2224,7 +2224,7 @@ disconnect_server_nomutex_ext( Connection *conn, PRUint64 opconnid, int opid, PR + if ( ( conn->c_sd != SLAPD_INVALID_SOCKET && + conn->c_connid == opconnid ) && !(conn->c_flags & CONN_FLAG_CLOSING) ) + { +- slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext", "Setting conn %" NSPRIu64 " fd=%d " ++ slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext", "Setting conn %" PRIu64 " fd=%d " + "to be disconnected: reason %d\n", conn->c_connid, conn->c_sd, reason); + /* + * PR_Close must be called before anything else is done because +@@ -2248,13 +2248,13 @@ disconnect_server_nomutex_ext( Connection *conn, PRUint64 opconnid, int opid, PR + */ + if (error && (EPIPE != error) ) { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d fd=%d closed error %d (%s) - %s\n", ++ "conn=%" PRIu64 " op=%d fd=%d closed error %d (%s) - %s\n", + conn->c_connid, opid, conn->c_sd, error, + slapd_system_strerror(error), + slapd_pr_strerror(reason)); + } else { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d fd=%d closed - %s\n", ++ "conn=%" PRIu64 " op=%d fd=%d closed - %s\n", + conn->c_connid, opid, conn->c_sd, + slapd_pr_strerror(reason)); + } +diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c +index bcafa4e..30da055 100644 +--- a/ldap/servers/slapd/conntable.c ++++ b/ldap/servers/slapd/conntable.c +@@ -395,7 +395,7 @@ connection_table_as_entry(Connection_Table *ct, Slapi_Entry *e) + * 3 = The number of operations attempted that were blocked + * by max threads. + */ +- snprintf(maxthreadbuf, sizeof(maxthreadbuf), "%d:%"NSPRIu64":%"NSPRIu64"", ++ snprintf(maxthreadbuf, sizeof(maxthreadbuf), "%d:%"PRIu64":%"PRIu64"", + maxthreadstate, ct->c[i].c_maxthreadscount, + ct->c[i].c_maxthreadsblocked); + +@@ -431,17 +431,17 @@ connection_table_as_entry(Connection_Table *ct, Slapi_Entry *e) + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "currentconnections", vals ); + +- snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(num_conns)); ++ snprintf( buf, sizeof(buf), "%" PRIu64, slapi_counter_get_value(num_conns)); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "totalconnections", vals ); + +- snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(conns_in_maxthreads)); ++ snprintf( buf, sizeof(buf), "%" PRIu64, slapi_counter_get_value(conns_in_maxthreads)); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "currentconnectionsatmaxthreads", vals ); + +- snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(max_threads_count)); ++ snprintf( buf, sizeof(buf), "%" PRIu64, slapi_counter_get_value(max_threads_count)); + val.bv_val = buf; + val.bv_len = strlen( buf ); + attrlist_replace( &e->e_attrs, "maxthreadsperconnhits", vals ); +diff --git a/ldap/servers/slapd/control.c b/ldap/servers/slapd/control.c +index 8f9e74e..52f2519 100644 +--- a/ldap/servers/slapd/control.c ++++ b/ldap/servers/slapd/control.c +@@ -335,7 +335,7 @@ get_ldapmessage_controls_ext( + slapi_pblock_set(pb, SLAPI_REQCONTROLS, NULL); + slapi_pblock_set(pb, SLAPI_MANAGEDSAIT, &ctrl_not_found); + slapi_pblock_set(pb, SLAPI_PWPOLICY, &ctrl_not_found); +- slapi_log_err(SLAPI_LOG_CONNS, "get_ldapmessage_controls_ext", "Warning: conn=%" NSPRIu64 " op=%d contains an empty list of controls\n", ++ slapi_log_err(SLAPI_LOG_CONNS, "get_ldapmessage_controls_ext", "Warning: conn=%" PRIu64 " op=%d contains an empty list of controls\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid); + } else { + /* len, ber_len_t is uint, not int, cannot be != -1, may be better to remove this check. */ +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index a4ea4c0..5f2471e 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -616,7 +616,7 @@ disk_monitoring_thread(void *nothing) + */ + if(disk_space < 4096){ /* 4 k */ + slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space is critically low on disk (%s), " +- "remaining space: %" NSPRIu64 " Kb. Signaling slapd for shutdown...\n", dirstr , (disk_space / 1024)); ++ "remaining space: %" PRIu64 " Kb. Signaling slapd for shutdown...\n", dirstr , (disk_space / 1024)); + g_set_shutdown( SLAPI_SHUTDOWN_EXIT ); + return; + } +@@ -626,7 +626,7 @@ disk_monitoring_thread(void *nothing) + */ + if(verbose_logging != 0 && verbose_logging != LDAP_DEBUG_ANY){ + slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space is low on disk (%s), remaining space: " +- "%" NSPRIu64 " Kb, temporarily setting error loglevel to the default level(%d).\n", dirstr, ++ "%" PRIu64 " Kb, temporarily setting error loglevel to the default level(%d).\n", dirstr, + (disk_space / 1024), SLAPD_DEFAULT_ERRORLOG_LEVEL); + /* Setting the log level back to zero, actually sets the value to LDAP_DEBUG_ANY */ + config_set_errorlog_level(CONFIG_LOGLEVEL_ATTRIBUTE, +@@ -640,7 +640,7 @@ disk_monitoring_thread(void *nothing) + */ + if(!logs_disabled && !logging_critical){ + slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space is too low on disk (%s), remaining " +- "space: %" NSPRIu64 " Kb, disabling access and audit logging.\n", dirstr, (disk_space / 1024)); ++ "space: %" PRIu64 " Kb, disabling access and audit logging.\n", dirstr, (disk_space / 1024)); + config_set_accesslog_enabled(LOGGING_OFF); + config_set_auditlog_enabled(LOGGING_OFF); + config_set_auditfaillog_enabled(LOGGING_OFF); +@@ -653,7 +653,7 @@ disk_monitoring_thread(void *nothing) + */ + if(!deleted_rotated_logs && !logging_critical){ + slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space is too low on disk (%s), remaining " +- "space: %" NSPRIu64 " Kb, deleting rotated logs.\n", dirstr, (disk_space / 1024)); ++ "space: %" PRIu64 " Kb, deleting rotated logs.\n", dirstr, (disk_space / 1024)); + log__delete_rotated_logs(); + deleted_rotated_logs = 1; + continue; +@@ -663,7 +663,7 @@ disk_monitoring_thread(void *nothing) + */ + if(disk_space < previous_mark){ + slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space is too low on disk (%s), remaining " +- "space: %" NSPRIu64 " Kb\n", dirstr, (disk_space / 1024)); ++ "space: %" PRIu64 " Kb\n", dirstr, (disk_space / 1024)); + } + /* + * +@@ -674,7 +674,7 @@ disk_monitoring_thread(void *nothing) + * + */ + if(disk_space < halfway){ +- slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space on (%s) is too far below the threshold(%" NSPRIu64 " bytes). " ++ slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). " + "Waiting %d minutes for disk space to be cleaned up before shutting slapd down...\n", + dirstr, threshold, (grace_period / 60)); + time(&start); +@@ -697,7 +697,7 @@ disk_monitoring_thread(void *nothing) + * Excellent, we are back to acceptable levels, reset everything... + */ + slapi_log_err(SLAPI_LOG_INFO, "disk_monitoring_thread", "Available disk space is now " +- "acceptable (%" NSPRIu64 " bytes). Aborting shutdown, and restoring the log settings.\n", ++ "acceptable (%" PRIu64 " bytes). Aborting shutdown, and restoring the log settings.\n", + disk_space); + if(logs_disabled && using_accesslog){ + config_set_accesslog_enabled(LOGGING_ON); +@@ -721,7 +721,7 @@ disk_monitoring_thread(void *nothing) + * Disk space is critical, log an error, and shut it down now! + */ + slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space is critically low " +- "on disk (%s), remaining space: %" NSPRIu64 " Kb. Signaling slapd for shutdown...\n", ++ "on disk (%s), remaining space: %" PRIu64 " Kb. Signaling slapd for shutdown...\n", + dirstr, (disk_space / 1024)); + g_set_shutdown( SLAPI_SHUTDOWN_DISKFULL ); + return; +@@ -739,7 +739,7 @@ disk_monitoring_thread(void *nothing) + * If disk space was freed up we would of detected in the above while loop. So shut it down. + */ + slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Disk space is still too low " +- "(%" NSPRIu64 " Kb). Signaling slapd for shutdown...\n", (disk_space / 1024)); ++ "(%" PRIu64 " Kb). Signaling slapd for shutdown...\n", (disk_space / 1024)); + g_set_shutdown( SLAPI_SHUTDOWN_DISKFULL ); + + return; +@@ -1785,7 +1785,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll) + * trying to acquire a closing connection + */ + slapi_log_err(SLAPI_LOG_ERR, +- "handle_pr_read_ready", "connection_activity: abandoning conn %" NSPRIu64 " as " ++ "handle_pr_read_ready", "connection_activity: abandoning conn %" PRIu64 " as " + "fd=%d is already closing\n", c->c_connid,c->c_sd); + /* The call disconnect_server should do nothing, + * as the connection c should be already set to CLOSING */ +@@ -1843,7 +1843,7 @@ ns_handle_closure(struct ns_job_t *job) + #else + /* This doesn't actually confirm it's in the event loop thread, but it's a start */ + if (NS_JOB_IS_THREAD(ns_job_get_type(job)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "ns_handle_closure", "Attempt to close outside of event loop thread %" NSPRIu64 " for fd=%d\n", ++ slapi_log_err(SLAPI_LOG_ERR, "ns_handle_closure", "Attempt to close outside of event loop thread %" PRIu64 " for fd=%d\n", + c->c_connid, c->c_sd); + return; + } +@@ -1883,7 +1883,7 @@ ns_connection_post_io_or_closing(Connection *conn) + PR_ASSERT((conn->c_ns_close_jobs == 0) || (conn->c_ns_close_jobs == 1)); + if (conn->c_ns_close_jobs) { + slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_post_io_or_closing", "Already a close " +- "job in progress on conn %" NSPRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); ++ "job in progress on conn %" PRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); + return; + } else { + /* just make sure we schedule the event to be closed in a timely manner */ +@@ -1898,10 +1898,10 @@ ns_connection_post_io_or_closing(Connection *conn) + #endif + if (job_result != PR_SUCCESS) { + slapi_log_err(SLAPI_LOG_WARNING, "ns_connection_post_io_or_closing", "post closure job " +- "for conn %" NSPRIu64 " for fd=%d failed to be added to event queue\n", conn->c_connid, conn->c_sd); ++ "for conn %" PRIu64 " for fd=%d failed to be added to event queue\n", conn->c_connid, conn->c_sd); + } else { + slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_post_io_or_closing", "post closure job " +- "for conn %" NSPRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); ++ "for conn %" PRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); + } + + } +@@ -1938,10 +1938,10 @@ ns_connection_post_io_or_closing(Connection *conn) + #endif + if (job_result != PR_SUCCESS) { + slapi_log_err(SLAPI_LOG_WARNING, "ns_connection_post_io_or_closing", "post I/O job for " +- "conn %" NSPRIu64 " for fd=%d failed to be added to event queue\n", conn->c_connid, conn->c_sd); ++ "conn %" PRIu64 " for fd=%d failed to be added to event queue\n", conn->c_connid, conn->c_sd); + } else { + slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_post_io_or_closing", "post I/O job for " +- "conn %" NSPRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); ++ "conn %" PRIu64 " for fd=%d\n", conn->c_connid, conn->c_sd); + } + } + #endif +@@ -1964,14 +1964,14 @@ ns_handle_pr_read_ready(struct ns_job_t *job) + #else + /* This doesn't actually confirm it's in the event loop thread, but it's a start */ + if (NS_JOB_IS_THREAD(ns_job_get_type(job)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "ns_handle_pr_read_ready", "Attempt to handle read ready outside of event loop thread %" NSPRIu64 " for fd=%d\n", ++ slapi_log_err(SLAPI_LOG_ERR, "ns_handle_pr_read_ready", "Attempt to handle read ready outside of event loop thread %" PRIu64 " for fd=%d\n", + c->c_connid, c->c_sd); + return; + } + #endif + + PR_EnterMonitor(c->c_mutex); +- slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "activity on conn %" NSPRIu64 " for fd=%d\n", ++ slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "activity on conn %" PRIu64 " for fd=%d\n", + c->c_connid, c->c_sd); + /* if we were called due to some i/o event, see what the state of the socket is */ + if (slapi_is_loglevel_set(SLAPI_LOG_CONNS) && !NS_JOB_IS_TIMER(ns_job_get_output_type(job)) && c && c->c_sd) { +@@ -1980,16 +1980,16 @@ ns_handle_pr_read_ready(struct ns_job_t *job) + ssize_t rc = recv(c->c_sd, buf, sizeof(buf), MSG_PEEK); + if (!rc) { + slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "socket is closed conn" +- " %" NSPRIu64 " for fd=%d\n", c->c_connid, c->c_sd); ++ " %" PRIu64 " for fd=%d\n", c->c_connid, c->c_sd); + } else if (rc > 0) { + slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "socket read data available" +- " for conn %" NSPRIu64 " for fd=%d\n", c->c_connid, c->c_sd); ++ " for conn %" PRIu64 " for fd=%d\n", c->c_connid, c->c_sd); + } else if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) { + slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "socket has no data available" +- " conn %" NSPRIu64 " for fd=%d\n", c->c_connid, c->c_sd); ++ " conn %" PRIu64 " for fd=%d\n", c->c_connid, c->c_sd); + } else { + slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "socket has error [%d] " +- "conn %" NSPRIu64 " for fd=%d\n", errno, c->c_connid, c->c_sd); ++ "conn %" PRIu64 " for fd=%d\n", errno, c->c_connid, c->c_sd); + } + } + connection_release_nolock_ext(c, 1); /* release ref acquired when job was added */ +@@ -2013,7 +2013,7 @@ ns_handle_pr_read_ready(struct ns_job_t *job) + * trying to acquire a closing connection + */ + slapi_log_err(SLAPI_LOG_ERR, "ns_handle_pr_read_ready", "connection_activity: abandoning" +- " conn %" NSPRIu64 " as fd=%d is already closing\n", c->c_connid, c->c_sd); ++ " conn %" PRIu64 " as fd=%d is already closing\n", c->c_connid, c->c_sd); + /* The call disconnect_server should do nothing, + * as the connection c should be already set to CLOSING */ + disconnect_server_nomutex_ext(c, c->c_connid, -1, +@@ -2021,7 +2021,7 @@ ns_handle_pr_read_ready(struct ns_job_t *job) + 0 /* do not schedule closure, do it next */); + ns_handle_closure_nomutex(c); + } else { +- slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "queued conn %" NSPRIu64 " for fd=%d\n", ++ slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "queued conn %" PRIu64 " for fd=%d\n", + c->c_connid, c->c_sd); + } + PR_ExitMonitor(c->c_mutex); +diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c +index a16718a..e4e82a3 100644 +--- a/ldap/servers/slapd/delete.c ++++ b/ldap/servers/slapd/delete.c +@@ -260,7 +260,7 @@ static void op_shared_delete (Slapi_PBlock *pb) + + if (!internal_op ) + { +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d DEL dn=\"%s\"%s\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d DEL dn=\"%s\"%s\n", + pb->pb_conn->c_connid, + pb->pb_op->o_opid, + slapi_sdn_get_dn(sdn), +diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c +index abacc57..b56e3da 100644 +--- a/ldap/servers/slapd/entry.c ++++ b/ldap/servers/slapd/entry.c +@@ -3095,7 +3095,7 @@ slapi_entry_attr_set_ulong( Slapi_Entry* e, const char *type, uint64_t l) + struct berval *bvals[2]; + bvals[0] = &bv; + bvals[1] = NULL; +- sprintf(value,"%" NSPRIu64, l); ++ sprintf(value,"%" PRIu64, l); + bv.bv_val = value; + bv.bv_len = strlen( value ); + slapi_entry_attr_replace( e, type, bvals ); +diff --git a/ldap/servers/slapd/extendop.c b/ldap/servers/slapd/extendop.c +index 7e41b8c..6a5d2e3 100644 +--- a/ldap/servers/slapd/extendop.c ++++ b/ldap/servers/slapd/extendop.c +@@ -247,14 +247,14 @@ do_extended( Slapi_PBlock *pb ) + if ( NULL == ( name = extended_op_oid2string( extoid ))) { + slapi_log_err(SLAPI_LOG_ARGS, "do_extended", "oid (%s)\n", extoid); + +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d EXT oid=\"%s\"\n", ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d EXT oid=\"%s\"\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, extoid ); + } else { + slapi_log_err(SLAPI_LOG_ARGS, "do_extended", "oid (%s-%s)\n", + extoid, name); + + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d EXT oid=\"%s\" name=\"%s\"\n", ++ "conn=%" PRIu64 " op=%d EXT oid=\"%s\" name=\"%s\"\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, extoid, name ); + } + +diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c +index afedd5b..a63c6a6 100644 +--- a/ldap/servers/slapd/log.c ++++ b/ldap/servers/slapd/log.c +@@ -2769,7 +2769,7 @@ log__open_accesslogfile(int logfile_state, int locked) + while ( logp) { + log_convert_time (logp->l_ctime, tbuf, 1 /*short*/); + PR_snprintf(buffer, sizeof(buffer), "LOGINFO:%s%s.%s (%lu) (%" +- NSPRI64 "d)\n", PREVLOGFILE, loginfo.log_access_file, tbuf, ++ PRId64 "d)\n", PREVLOGFILE, loginfo.log_access_file, tbuf, + logp->l_ctime, logp->l_size); + LOG_WRITE(fpinfo, buffer, strlen(buffer), 0); + logp = logp->l_next; +@@ -2907,7 +2907,7 @@ log_rotate: + if (type == LOG_SIZE_EXCEEDED) { + slapi_log_err(SLAPI_LOG_TRACE, "log__needrotation", + "LOGINFO:End of Log because size exceeded(Max:%" +- NSPRI64 "d bytes) (Is:%" NSPRI64 "d bytes)\n", ++ PRId64 "d bytes) (Is:%" PRId64 "d bytes)\n", + maxlogsize, f_size); + } else if ( type == LOG_EXPIRED) { + slapi_log_err(SLAPI_LOG_TRACE, "log__needrotation", +@@ -4636,7 +4636,7 @@ log__open_errorlogfile(int logfile_state, int locked) + while (logp) { + log_convert_time (logp->l_ctime, tbuf, 1 /*short */); + PR_snprintf(buffer, sizeof(buffer), "LOGINFO:%s%s.%s (%lu) (%" +- NSPRI64 "d)\n", PREVLOGFILE, loginfo.log_error_file, tbuf, ++ PRId64 "d)\n", PREVLOGFILE, loginfo.log_error_file, tbuf, + logp->l_ctime, logp->l_size); + LOG_WRITE(fpinfo, buffer, strlen(buffer), 0); + logp = logp->l_next; +@@ -4763,7 +4763,7 @@ log__open_auditlogfile(int logfile_state, int locked) + while ( logp) { + log_convert_time (logp->l_ctime, tbuf, 1 /*short */); + PR_snprintf(buffer, sizeof(buffer), "LOGINFO:%s%s.%s (%lu) (%" +- NSPRI64 "d)\n", PREVLOGFILE, loginfo.log_audit_file, tbuf, ++ PRId64 "d)\n", PREVLOGFILE, loginfo.log_audit_file, tbuf, + logp->l_ctime, logp->l_size); + LOG_WRITE(fpinfo, buffer, strlen(buffer), 0); + logp = logp->l_next; +@@ -4889,7 +4889,7 @@ log__open_auditfaillogfile(int logfile_state, int locked) + while ( logp) { + log_convert_time (logp->l_ctime, tbuf, 1 /*short */); + PR_snprintf(buffer, sizeof(buffer), "LOGINFO:%s%s.%s (%lu) (%" +- NSPRI64 "d)\n", PREVLOGFILE, loginfo.log_auditfail_file, tbuf, ++ PRId64 "d)\n", PREVLOGFILE, loginfo.log_auditfail_file, tbuf, + logp->l_ctime, logp->l_size); + LOG_WRITE(fpinfo, buffer, strlen(buffer), 0); + logp = logp->l_next; +diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c +index e23fe67..da66b44 100644 +--- a/ldap/servers/slapd/modify.c ++++ b/ldap/servers/slapd/modify.c +@@ -680,7 +680,7 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) + + if ( !internal_op ) + { +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d MOD dn=\"%s\"%s\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d MOD dn=\"%s\"%s\n", + pb->pb_conn->c_connid, + pb->pb_op->o_opid, + slapi_sdn_get_dn(sdn), +@@ -1227,7 +1227,7 @@ static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old + { + if (operation_is_flag_set(operation,OP_FLAG_ACTION_LOG_ACCESS)) + { +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d MOD dn=\"%s\"\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d MOD dn=\"%s\"\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, + slapi_sdn_get_dn(&sdn)); + } +@@ -1267,7 +1267,7 @@ static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old + if (proxydn){ + proxystr = slapi_ch_smprintf(" authzid=\"%s\"", proxydn); + } +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d MOD dn=\"%s\"%s\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d MOD dn=\"%s\"%s\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, + slapi_sdn_get_dn(&sdn), proxystr ? proxystr : ""); + } +@@ -1312,7 +1312,7 @@ static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old + proxystr = slapi_ch_smprintf(" authzid=\"%s\"", proxydn); + } + +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d MOD dn=\"%s\"%s, %s\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d MOD dn=\"%s\"%s, %s\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, + slapi_sdn_get_dn(&sdn), + proxystr ? proxystr : "", +@@ -1338,7 +1338,7 @@ static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old + + if ( !internal_op ) + { +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d MOD dn=\"%s\"%s, %s\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d MOD dn=\"%s\"%s, %s\n", + pb->pb_conn->c_connid, + pb->pb_op->o_opid, + slapi_sdn_get_dn(&sdn), +@@ -1381,7 +1381,7 @@ static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old + + if ( !internal_op ) + { +- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d MOD dn=\"%s\"%s, %s\n", ++ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d MOD dn=\"%s\"%s, %s\n", + pb->pb_conn->c_connid, + pb->pb_op->o_opid, + slapi_sdn_get_dn(&sdn), +diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c +index 15f5210..69cca40 100644 +--- a/ldap/servers/slapd/modrdn.c ++++ b/ldap/servers/slapd/modrdn.c +@@ -463,7 +463,7 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args) + if ( !internal_op ) + { + slapi_log_access(LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d MODRDN dn=\"%s\" newrdn=\"%s\" newsuperior=\"%s\"%s\n", ++ "conn=%" PRIu64 " op=%d MODRDN dn=\"%s\" newrdn=\"%s\" newsuperior=\"%s\"%s\n", + pb->pb_conn->c_connid, + pb->pb_op->o_opid, + dn, +@@ -497,7 +497,7 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args) + { + if ( !internal_op ) { + slapi_log_err(SLAPI_LOG_ARGS, "op_shared_rename", +- "conn=%" NSPRIu64 " op=%d MODRDN invalid new RDN (\"%s\")\n", ++ "conn=%" PRIu64 " op=%d MODRDN invalid new RDN (\"%s\")\n", + pb->pb_conn->c_connid, + pb->pb_op->o_opid, + (NULL == newrdn) ? "(null)" : newrdn); +@@ -531,7 +531,7 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args) + "Syntax check of newSuperior failed\n"); + if (!internal_op) { + slapi_log_err(SLAPI_LOG_ARGS, "op_shared_rename", +- "conn=%" NSPRIu64 " op=%d MODRDN invalid new superior (\"%s\")", ++ "conn=%" PRIu64 " op=%d MODRDN invalid new superior (\"%s\")", + pb->pb_conn->c_connid, + pb->pb_op->o_opid, + newsuperior ? newsuperior : "(null)"); +diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c +index f1fb38f..8d1788f 100644 +--- a/ldap/servers/slapd/monitor.c ++++ b/ldap/servers/slapd/monitor.c +@@ -60,19 +60,19 @@ monitor_info(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter, int *ret + + connection_table_as_entry(the_connection_table, e); + +- val.bv_len = snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(ops_initiated) ); ++ val.bv_len = snprintf( buf, sizeof(buf), "%" PRIu64, slapi_counter_get_value(ops_initiated) ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "opsinitiated", vals ); + +- val.bv_len = snprintf( buf, sizeof(buf), "%" NSPRIu64, slapi_counter_get_value(ops_completed) ); ++ val.bv_len = snprintf( buf, sizeof(buf), "%" PRIu64, slapi_counter_get_value(ops_completed) ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "opscompleted", vals ); + +- val.bv_len = snprintf ( buf, sizeof(buf), "%" NSPRIu64, g_get_num_entries_sent() ); ++ val.bv_len = snprintf ( buf, sizeof(buf), "%" PRIu64, g_get_num_entries_sent() ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "entriessent", vals ); + +- val.bv_len = snprintf ( buf, sizeof(buf), "%" NSPRIu64, g_get_num_bytes_sent() ); ++ val.bv_len = snprintf ( buf, sizeof(buf), "%" PRIu64, g_get_num_bytes_sent() ); + val.bv_val = buf; + attrlist_replace( &e->e_attrs, "bytessent", vals ); + +diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c +index ccbc549..00fb9b8 100644 +--- a/ldap/servers/slapd/operation.c ++++ b/ldap/servers/slapd/operation.c +@@ -580,7 +580,7 @@ int slapi_connection_acquire(Slapi_Connection *conn) + { + /* This may happen while other threads are still working on this connection */ + slapi_log_err(SLAPI_LOG_ERR, "slapi_connection_acquire", +- "conn=%" NSPRIu64 " fd=%d Attempt to acquire connection in the closing state\n", ++ "conn=%" PRIu64 " fd=%d Attempt to acquire connection in the closing state\n", + conn->c_connid, conn->c_sd); + rc = -1; + } +@@ -606,7 +606,7 @@ slapi_connection_remove_operation( Slapi_PBlock *pb, Slapi_Connection *conn, Sla + if ( *tmp == NULL ) { + if (op) { + slapi_log_err(SLAPI_LOG_ERR, "slapi_connection_remove_operation", +- "Can't find op %d for conn %" NSPRIu64 "\n", ++ "Can't find op %d for conn %" PRIu64 "\n", + (int)op->o_msgid, conn->c_connid); + } else { + slapi_log_err(SLAPI_LOG_ERR, "slapi_connection_remove_operation", +@@ -620,7 +620,7 @@ slapi_connection_remove_operation( Slapi_PBlock *pb, Slapi_Connection *conn, Sla + /* connection_release_nolock(conn); */ + if (conn->c_refcnt <= 0) { + slapi_log_err(SLAPI_LOG_ERR, "slapi_connection_remove_operation", +- "conn=%" NSPRIu64 " fd=%d Attempt to release connection that is not acquired\n", ++ "conn=%" PRIu64 " fd=%d Attempt to release connection that is not acquired\n", + conn->c_connid, conn->c_sd); + rc = -1; + } else { +diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c +index 3ce7970..4682a73 100644 +--- a/ldap/servers/slapd/opshared.c ++++ b/ldap/servers/slapd/opshared.c +@@ -289,7 +289,7 @@ op_shared_search (Slapi_PBlock *pb, int send_result) + { + char *fmtstr; + +-#define SLAPD_SEARCH_FMTSTR_BASE "conn=%" NSPRIu64 " op=%d SRCH base=\"%s\" scope=%d " ++#define SLAPD_SEARCH_FMTSTR_BASE "conn=%" PRIu64 " op=%d SRCH base=\"%s\" scope=%d " + #define SLAPD_SEARCH_FMTSTR_BASE_INT "conn=%s op=%d SRCH base=\"%s\" scope=%d " + #define SLAPD_SEARCH_FMTSTR_REMAINDER " attrs=%s%s%s\n" + +@@ -1744,7 +1744,7 @@ void op_shared_log_error_access (Slapi_PBlock *pb, const char *type, const char + proxystr = slapi_ch_smprintf(" authzid=\"%s\"", proxydn); + } + +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d %s dn=\"%s\"%s, %s\n", ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d %s dn=\"%s\"%s, %s\n", + ( pb->pb_conn ? pb->pb_conn->c_connid : 0), + ( pb->pb_op ? pb->pb_op->o_opid : 0), + type, +diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c +index a30e2fa..e8c80e7 100644 +--- a/ldap/servers/slapd/pagedresults.c ++++ b/ldap/servers/slapd/pagedresults.c +@@ -310,7 +310,7 @@ pagedresults_free_one( Connection *conn, Operation *op, int index ) + PR_EnterMonitor(conn->c_mutex); + if (conn->c_pagedresults.prl_count <= 0) { + slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one", +- "conn=%" NSPRIu64 " paged requests list count is %d\n", ++ "conn=%" PRIu64 " paged requests list count is %d\n", + conn->c_connid, conn->c_pagedresults.prl_count); + } else if (index < conn->c_pagedresults.prl_maxlen) { + PagedResults *prp = conn->c_pagedresults.prl_list + index; +diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c +index 793bea3..2d0badc 100644 +--- a/ldap/servers/slapd/psearch.c ++++ b/ldap/servers/slapd/psearch.c +@@ -283,7 +283,7 @@ ps_send_results( void *arg ) + + if (conn_acq_flag) { + slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results", +- "conn=%" NSPRIu64 " op=%d Could not acquire the connection - psearch aborted\n", ++ "conn=%" PRIu64 " op=%d Could not acquire the connection - psearch aborted\n", + ps->ps_pblock->pb_conn->c_connid, ps->ps_pblock->pb_op->o_opid); + } + +@@ -293,7 +293,7 @@ ps_send_results( void *arg ) + /* Check for an abandoned operation */ + if ( ps->ps_pblock->pb_op == NULL || slapi_op_abandoned( ps->ps_pblock ) ) { + slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results", +- "conn=%" NSPRIu64 " op=%d The operation has been abandoned\n", ++ "conn=%" PRIu64 " op=%d The operation has been abandoned\n", + ps->ps_pblock->pb_conn->c_connid, ps->ps_pblock->pb_op->o_opid); + break; + } +@@ -351,7 +351,7 @@ ps_send_results( void *arg ) + ectrls, attrs, attrsonly ); + if (rc) { + slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results", +- "conn=%" NSPRIu64 " op=%d Error %d sending entry %s with op status %d\n", ++ "conn=%" PRIu64 " op=%d Error %d sending entry %s with op status %d\n", + ps->ps_pblock->pb_conn->c_connid, ps->ps_pblock->pb_op->o_opid, + rc, slapi_entry_get_dn_const(ec), ps->ps_pblock->pb_op->o_status); + } +@@ -400,7 +400,7 @@ ps_send_results( void *arg ) + PR_EnterMonitor(conn->c_mutex); + + slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results", +- "conn=%" NSPRIu64 " op=%d Releasing the connection and operation\n", ++ "conn=%" PRIu64 " op=%d Releasing the connection and operation\n", + conn->c_connid, ps->ps_pblock->pb_op->o_opid); + /* Delete this op from the connection's list */ + connection_remove_operation_ext( ps->ps_pblock, conn, ps->ps_pblock->pb_op ); +@@ -535,7 +535,7 @@ ps_service_persistent_searches( Slapi_Entry *e, Slapi_Entry *eprev, ber_int_t ch + } + + slapi_log_err(SLAPI_LOG_CONNS, "ps_service_persistent_searches", +- "conn=%" NSPRIu64 " op=%d entry %s with chgtype %d " ++ "conn=%" PRIu64 " op=%d entry %s with chgtype %d " + "matches the ps changetype %d\n", + ps->ps_pblock->pb_conn->c_connid, + ps->ps_pblock->pb_op->o_opid, +diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c +index 5c38bae..56257c3 100644 +--- a/ldap/servers/slapd/result.c ++++ b/ldap/servers/slapd/result.c +@@ -1980,7 +1980,7 @@ log_result( Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentrie + if ( !internal_op ) + { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d RESULT err=%d" ++ "conn=%" PRIu64 " op=%d RESULT err=%d" + " tag=%" BERTAG_T " nentries=%d etime=%s%s%s" + ", SASL bind in progress\n", + op->o_connid, +@@ -2012,7 +2012,7 @@ log_result( Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentrie + if ( !internal_op ) + { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d RESULT err=%d" ++ "conn=%" PRIu64 " op=%d RESULT err=%d" + " tag=%" BERTAG_T " nentries=%d etime=%s%s%s" + " dn=\"%s\"\n", + op->o_connid, +@@ -2040,7 +2040,7 @@ log_result( Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentrie + if ( !internal_op ) + { + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d RESULT err=%d" ++ "conn=%" PRIu64 " op=%d RESULT err=%d" + " tag=%" BERTAG_T " nentries=%d etime=%s%s%s" + " pr_idx=%d pr_cookie=%d\n", + op->o_connid, +@@ -2073,7 +2073,7 @@ log_result( Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentrie + ext_str = ""; + } + slapi_log_access( LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d RESULT err=%d" ++ "conn=%" PRIu64 " op=%d RESULT err=%d" + " tag=%" BERTAG_T " nentries=%d etime=%s%s%s%s\n", + op->o_connid, + op->o_opid, +@@ -2142,7 +2142,7 @@ log_entry( Operation *op, Slapi_Entry *e ) + + if ( !internal_op ) + { +- slapi_log_access( LDAP_DEBUG_STATS2, "conn=%" NSPRIu64 " op=%d ENTRY dn=\"%s\"\n", ++ slapi_log_access( LDAP_DEBUG_STATS2, "conn=%" PRIu64 " op=%d ENTRY dn=\"%s\"\n", + op->o_connid, op->o_opid, + slapi_entry_get_dn_const(e)); + } +@@ -2167,7 +2167,7 @@ log_referral( Operation *op ) + + if ( !internal_op ) + { +- slapi_log_access( LDAP_DEBUG_STATS2, "conn=%" NSPRIu64 " op=%d REFERRAL\n", ++ slapi_log_access( LDAP_DEBUG_STATS2, "conn=%" PRIu64 " op=%d REFERRAL\n", + op->o_connid, op->o_opid ); + } + else +diff --git a/ldap/servers/slapd/sasl_io.c b/ldap/servers/slapd/sasl_io.c +index 1337e1c..9458083 100644 +--- a/ldap/servers/slapd/sasl_io.c ++++ b/ldap/servers/slapd/sasl_io.c +@@ -198,17 +198,17 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + /* first we need the length bytes */ + ret = PR_Recv(fd->lower, buffer, amount, flags, timeout); + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", +- "Read sasl packet length returned %d on connection %" NSPRIu64 "\n", ++ "Read sasl packet length returned %d on connection %" PRIu64 "\n", + ret, c->c_connid); + if (ret <= 0) { + *err = PR_GetError(); + if (ret == 0) { + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", +- "Connection closed while reading sasl packet length on connection %" NSPRIu64 "\n", ++ "Connection closed while reading sasl packet length on connection %" PRIu64 "\n", + c->c_connid ); + } else { + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", +- "Error reading sasl packet length on connection %" NSPRIu64 " %d:%s\n", ++ "Error reading sasl packet length on connection %" PRIu64 " %d:%s\n", + c->c_connid, *err, slapd_pr_strerror(*err) ); + } + return ret; +@@ -226,7 +226,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + if (sp->encrypted_buffer_offset < sizeof(buffer)) { + slapi_log_err(SLAPI_LOG_CONNS, + "sasl_io_start_packet", "Read only %d bytes of sasl packet " +- "length on connection %" NSPRIu64 "\n", ret, c->c_connid ); ++ "length on connection %" PRIu64 "\n", ret, c->c_connid ); + #if defined(EWOULDBLOCK) + errno = EWOULDBLOCK; + #elif defined(EAGAIN) +@@ -251,7 +251,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + ber_len_t ber_len = 0; + ber_tag_t tag = 0; + +- slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", "conn=%" NSPRIu64 " fd=%d " ++ slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", "conn=%" PRIu64 " fd=%d " + "Sent an LDAP message that was not encrypted.\n", c->c_connid, + c->c_sd); + +@@ -265,7 +265,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + /* Is the ldap operation too large? */ + if(ber_len > maxbersize){ + slapi_log_err(SLAPI_LOG_ERR, "sasl_io_start_packet", +- "conn=%" NSPRIu64 " fd=%d Incoming BER Element was too long, max allowable " ++ "conn=%" PRIu64 " fd=%d Incoming BER Element was too long, max allowable " + "is %" BERLEN_T " bytes. Change the nsslapd-maxbersize attribute in " + "cn=config to increase.\n", + c->c_connid, c->c_sd, maxbersize ); +@@ -305,7 +305,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + } else if (ret > 0) { + slapi_log_err(SLAPI_LOG_CONNS, + "sasl_io_start_packet", +- "Continued: read sasl packet length returned %d on connection %" NSPRIu64 "\n", ++ "Continued: read sasl packet length returned %d on connection %" PRIu64 "\n", + ret, c->c_connid); + if((ret + sp->encrypted_buffer_offset) > sp->encrypted_buffer_size){ + sasl_io_resize_encrypted_buffer(sp, ret + sp->encrypted_buffer_offset); +@@ -316,7 +316,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + *err = PR_GetError(); + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", + "Error reading sasl packet length on connection " +- "%" NSPRIu64 " %d:%s\n", c->c_connid, *err, slapd_pr_strerror(*err) ); ++ "%" PRIu64 " %d:%s\n", c->c_connid, *err, slapd_pr_strerror(*err) ); + return ret; + } + } +@@ -360,7 +360,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + tag = *ber->ber_ptr++; + if (*ber->ber_ptr == LDAP_REQ_UNBIND){ + #endif +- slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", "conn=%" NSPRIu64 " fd=%d " ++ slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", "conn=%" PRIu64 " fd=%d " + "Received unencrypted UNBIND operation.\n", c->c_connid, + c->c_sd); + sp->encrypted_buffer_count = sp->encrypted_buffer_offset; +@@ -368,7 +368,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + ber_free(ber, 1); + return SASL_IO_BUFFER_NOT_ENCRYPTED; + } +- slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", "conn=%" NSPRIu64 " fd=%d " ++ slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", "conn=%" PRIu64 " fd=%d " + "Error: received an LDAP message (tag 0x%lx) that was not encrypted.\n", + #ifdef USE_OPENLDAP + c->c_connid, c->c_sd, (long unsigned int)tag); +@@ -380,7 +380,7 @@ sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt + + done: + /* If we got here we have garbage, or a denied LDAP operation */ +- slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", "conn=%" NSPRIu64 " fd=%d " ++ slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", "conn=%" PRIu64 " fd=%d " + "Error: received an invalid message that was not encrypted.\n", + c->c_connid, c->c_sd); + +@@ -399,7 +399,7 @@ done: + packet_length += sizeof(uint32_t); + + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_start_packet", +- "read sasl packet length %ld on connection %" NSPRIu64 "\n", ++ "read sasl packet length %ld on connection %" PRIu64 "\n", + packet_length, c->c_connid ); + + /* Check if the packet length is larger than our max allowed. A +@@ -432,17 +432,17 @@ sasl_io_read_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt3 + size_t bytes_remaining_to_read = sp->encrypted_buffer_count - sp->encrypted_buffer_offset; + + slapi_log_err(SLAPI_LOG_CONNS, +- "sasl_io_read_packet", "Reading %lu bytes for connection %" NSPRIu64 "\n", ++ "sasl_io_read_packet", "Reading %lu bytes for connection %" PRIu64 "\n", + bytes_remaining_to_read, c->c_connid ); + ret = PR_Recv(fd->lower, sp->encrypted_buffer + sp->encrypted_buffer_offset, bytes_remaining_to_read, flags, timeout); + if (ret <= 0) { + *err = PR_GetError(); + if (ret == 0) { + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_read_packet", +- "Connection closed while reading sasl packet on connection %" NSPRIu64 "\n", c->c_connid ); ++ "Connection closed while reading sasl packet on connection %" PRIu64 "\n", c->c_connid ); + } else { + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_read_packet", +- "Error reading sasl packet on connection %" NSPRIu64 " %d:%s\n", ++ "Error reading sasl packet on connection %" PRIu64 " %d:%s\n", + c->c_connid, *err, slapd_pr_strerror(*err) ); + } + return ret; +@@ -464,10 +464,10 @@ sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, + /* Do we have decrypted data buffered from 'before' ? */ + bytes_in_buffer = sp->decrypted_buffer_count - sp->decrypted_buffer_offset; + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_recv", +- "Connection %" NSPRIu64 " len %d bytes_in_buffer %lu\n", ++ "Connection %" PRIu64 " len %d bytes_in_buffer %lu\n", + c->c_connid, len, bytes_in_buffer ); + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_recv", +- "Connection %" NSPRIu64 " len %d encrypted buffer count %lu\n", ++ "Connection %" PRIu64 " len %d encrypted buffer count %lu\n", + c->c_connid, len, sp->encrypted_buffer_count ); + if (0 == bytes_in_buffer) { + /* If there wasn't buffered decrypted data, we need to get some... */ +@@ -502,7 +502,7 @@ sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, + */ + if (!sasl_io_finished_packet(sp)) { + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_recv", +- "Connection %" NSPRIu64 " - not finished reading packet yet\n", c->c_connid); ++ "Connection %" PRIu64 " - not finished reading packet yet\n", c->c_connid); + #if defined(EWOULDBLOCK) + errno = EWOULDBLOCK; + #elif defined(EAGAIN) +@@ -516,7 +516,7 @@ sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, + const char *output_buffer = NULL; + unsigned int output_length = 0; + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_recv", +- "Finished reading packet for connection %" NSPRIu64 "\n", c->c_connid ); ++ "Finished reading packet for connection %" PRIu64 "\n", c->c_connid ); + /* Now decode it */ + ret = sasl_decode(c->c_sasl_conn,sp->encrypted_buffer,sp->encrypted_buffer_count,&output_buffer,&output_length); + /* even if decode fails, need re-initialize the encrypted_buffer */ +@@ -524,7 +524,7 @@ sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, + sp->encrypted_buffer_count = 0; + if (SASL_OK == ret) { + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_recv", +- "Decoded packet length %u for connection %" NSPRIu64 "\n", output_length, c->c_connid ); ++ "Decoded packet length %u for connection %" PRIu64 "\n", output_length, c->c_connid ); + if (output_length) { + sasl_io_resize_decrypted_buffer(sp,output_length); + memcpy(sp->decrypted_buffer,output_buffer,output_length); +@@ -534,7 +534,7 @@ sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, + } + } else { + slapi_log_err(SLAPI_LOG_ERR, "sasl_io_recv", +- "Failed to decode packet for connection %" NSPRIu64 "\n", c->c_connid ); ++ "Failed to decode packet for connection %" PRIu64 "\n", c->c_connid ); + PR_SetError(PR_IO_ERROR, 0); + return PR_FAILURE; + } +@@ -552,11 +552,11 @@ sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags, + sp->decrypted_buffer_offset = 0; + sp->decrypted_buffer_count = 0; + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_recv", +- "All decrypted data returned for connection %" NSPRIu64 "\n", c->c_connid ); ++ "All decrypted data returned for connection %" PRIu64 "\n", c->c_connid ); + } else { + sp->decrypted_buffer_offset += bytes_to_return; + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_recv", +- "Returning %lu bytes to caller %lu bytes left to return for connection %" NSPRIu64 "\n", ++ "Returning %lu bytes to caller %lu bytes left to return for connection %" PRIu64 "\n", + bytes_to_return, sp->decrypted_buffer_count - sp->decrypted_buffer_offset, c->c_connid ); + } + ret = bytes_to_return; +@@ -772,11 +772,11 @@ sasl_io_enable(Connection *c, void *data /* UNUSED */) + rv = PR_PushIOLayer(c->c_prfd, PR_TOP_IO_LAYER, layer); + if (rv) { + slapi_log_err(SLAPI_LOG_ERR, "sasl_io_enable", +- "Error enabling sasl io on connection %" NSPRIu64 " %d:%s\n", ++ "Error enabling sasl io on connection %" PRIu64 " %d:%s\n", + c->c_connid, rv, slapd_pr_strerror(rv) ); + } else { + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_enable", +- "Enabled sasl io on connection %" NSPRIu64 " \n", c->c_connid); ++ "Enabled sasl io on connection %" PRIu64 " \n", c->c_connid); + debug_print_layers(c->c_prfd); + } + } +@@ -794,7 +794,7 @@ sasl_io_cleanup(Connection *c, void *data /* UNUSED */) + int ret = 0; + + slapi_log_err(SLAPI_LOG_CONNS, "sasl_io_cleanup", +- "Connection %" NSPRIu64 "\n", c->c_connid); ++ "Connection %" PRIu64 "\n", c->c_connid); + + ret = sasl_pop_IO_layer(c->c_prfd, 0 /* do not close */); + +diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c +index 9e5d1f0..2d6fb64 100644 +--- a/ldap/servers/slapd/saslbind.c ++++ b/ldap/servers/slapd/saslbind.c +@@ -929,7 +929,7 @@ void ids_sasl_check_bind(Slapi_PBlock *pb) + Slapi_Operation *operation; + slapi_pblock_get( pb, SLAPI_OPERATION, &operation); + slapi_log_err(SLAPI_LOG_CONNS, "ids_sasl_check_bind", +- "cleaning up sasl IO conn=%" NSPRIu64 " op=%d complete=%d continuing=%d\n", ++ "cleaning up sasl IO conn=%" PRIu64 " op=%d complete=%d continuing=%d\n", + pb->pb_conn->c_connid, operation->o_opid, + (pb->pb_conn->c_flags & CONN_FLAG_SASL_COMPLETE), continuing); + /* reset flag */ +diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c +index 670347e..ea8b479 100644 +--- a/ldap/servers/slapd/search.c ++++ b/ldap/servers/slapd/search.c +@@ -380,7 +380,7 @@ free_and_return:; + static void log_search_access (Slapi_PBlock *pb, const char *base, int scope, const char *fstr, const char *msg) + { + slapi_log_access(LDAP_DEBUG_STATS, +- "conn=%" NSPRIu64 " op=%d SRCH base=\"%s\" scope=%d filter=\"%s\", %s\n", ++ "conn=%" PRIu64 " op=%d SRCH base=\"%s\" scope=%d filter=\"%s\", %s\n", + pb->pb_conn->c_connid, pb->pb_op->o_opid, + base, scope, fstr, msg ? msg : ""); + +diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h +index 5871bf0..abfad20 100644 +--- a/ldap/servers/slapd/slap.h ++++ b/ldap/servers/slapd/slap.h +@@ -72,13 +72,8 @@ static char ptokPBE[34] = "Internal (Software) Token "; + #include + #include + +-/* Required to get portable printf/scanf format macros */ +-#ifdef HAVE_INTTYPES_H +-#include +- +-#else +-#error Need to define portable format macros such as PRIu64 +-#endif /* HAVE_INTTYPES_H */ ++/* Provides our int types and platform specific requirements. */ ++#include + + #define LOG_INTERNAL_OP_CON_ID "Internal" + #define LOG_INTERNAL_OP_OP_ID -1 +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 725fa1c..ec8917d 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -27,6 +27,9 @@ + extern "C" { + #endif + ++/* Provides our int types and platform specific requirements. */ ++#include ++ + #include "prtypes.h" + #include "ldap.h" + #include "prprf.h" +@@ -57,17 +60,6 @@ NSPR_API(PRUint32) PR_fprintf(struct PRFileDesc* fd, const char *fmt, ...) + ; + #endif + +-/* NSPR uses the print macros a bit differently than ANSI C. We +- * need to use ll for a 64-bit integer, even when a long is 64-bit. +- */ +-#if defined(HAVE_LONG_LONG) && PR_BYTES_PER_LONG == 8 && !defined(PR_ALTERNATE_INT64_TYPEDEF) +-#define NSPRIu64 "lu" +-#define NSPRI64 "l" +-#else /* just assume long long is used */ +-#define NSPRIu64 "llu" +-#define NSPRI64 "ll" +-#endif +- + /* OpenLDAP uses unsigned long for ber_tag_t and ber_len_t but mozldap uses unsigned int */ + /* use this macro for printf statements for ber_tag_t and ber_len_t */ + #if defined(USE_OPENLDAP) +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index dd180a7..0c76580 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -30,6 +30,9 @@ extern "C" { + */ + #include "slapi-plugin-compat4.h" + ++/* slapi platform abstraction functions. */ ++#include ++ + /* Define our internal logging macro */ + #define slapi_log_err(level, subsystem, fmt, ...) + #ifdef LDAP_ERROR_LOGGING +@@ -1363,26 +1366,33 @@ long long slapi_parse_duration_longlong(const char *value); + int slapi_is_duration_valid(const char *value); + + /** +- * Populate the pointers with the system memory information. +- * At this time, Linux is the only "reliable" system for returning these values +- * +- * \param pagesize Will return the system page size in bytes. +- * \param pages The total number of memory pages on the system. May include swap pages depending on OS. +- * \param procpages Number of memory pages our current process is consuming. May not be accurate on all platforms as this could be the VMSize rather than the actual number of consumed pages. +- * \param availpages Number of available pages of memory on the system. Not all operating systems set this correctly. +- * +- * \return 0 on success, non-zero on failure to determine memory sizings. ++ * Possible results of a cachesize check + */ +-int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size_t *availpages); +- ++typedef enum _util_cachesize_result { ++ /** ++ * The requested cachesize was valid and can be used. ++ */ ++ UTIL_CACHESIZE_VALID = 0, ++ /** ++ * The requested cachesize may cause OOM and was reduced. ++ */ ++ UTIL_CACHESIZE_REDUCED = 1, ++ /** ++ * An error occured resolving the cache size. You must stop processing. ++ */ ++ UTIL_CACHESIZE_ERROR = 2, ++} util_cachesize_result; + /** +- * Determine if the requested cachesize will exceed the system memory limits causing an out of memory condition ++ * Determine if the requested cachesize will exceed the system memory limits causing an out of memory condition. You must ++ * check the result before proceeding to correctly use the cache. + * ++ * \param mi. The system memory infomation. You should retrieve this with spal_meminfo_get(), and destroy it after use. + * \param cachesize. The requested allocation. If this value is greater than the memory available, this value will be REDUCED to be valid. + * +- * \return 0 if the size is "sane". 1 if the value will cause OOM and has been REDUCED ++ * \return util_cachesize_result. ++ * \sa util_cachesize_result, spal_meminfo_get + */ +-int util_is_cachesize_sane(size_t *cachesize); ++util_cachesize_result util_is_cachesize_sane(slapi_pal_meminfo *mi, size_t *cachesize); + + /** + * Retrieve the number of threads the server should run with based on this hardware. +diff --git a/ldap/servers/slapd/slapi_pal.c b/ldap/servers/slapd/slapi_pal.c +new file mode 100644 +index 0000000..91576ca +--- /dev/null ++++ b/ldap/servers/slapd/slapi_pal.c +@@ -0,0 +1,311 @@ ++/** BEGIN COPYRIGHT BLOCK ++ * Copyright (C) 2017 Red Hat, Inc. ++ * All rights reserved. ++ * ++ * License: GPL (version 3 or any later version). ++ * See LICENSE for details. ++ * END COPYRIGHT BLOCK **/ ++ ++/* ++ * Implementation of functions to abstract from platform ++ * specific issues. ++ */ ++ ++/* Provide ch_malloc etc. */ ++#include ++/* Provide slapi_log_err macro wrapper */ ++#include ++#include ++ ++/* Assert macros */ ++#include ++/* Access errno */ ++#include ++ ++/* For getpagesize */ ++#include ++ ++/* For rlimit */ ++#include ++#include ++ ++#ifdef OS_solaris ++#include ++#endif ++ ++#if defined ( hpux ) ++#include ++#endif ++ ++static int_fast32_t ++_spal_rlimit_get(int resource, uint64_t *soft_limit, uint64_t *hard_limit) { ++ struct rlimit rl = {0}; ++ ++ if (getrlimit(resource, &rl) != 0) { ++ int errsrv = errno; ++ slapi_log_err(SLAPI_LOG_ERR, "_spal_rlimit_mem_get", "Failed to access system resource limits %d\n", errsrv); ++ return 1; ++ } ++ ++ if (rl.rlim_cur != RLIM_INFINITY) { ++ *soft_limit = (uint64_t)rl.rlim_cur; ++ } ++ if (rl.rlim_max != RLIM_INFINITY) { ++ *hard_limit = (uint64_t)rl.rlim_max; ++ } ++ ++ return 0; ++} ++ ++ ++#ifdef LINUX ++static int_fast32_t ++_spal_uint64_t_file_get(char *name, char *prefix, uint64_t *dest) { ++ FILE *f; ++ char s[40] = {0}; ++ size_t prefix_len = 0; ++ ++ if (prefix != NULL) { ++ prefix_len = strlen(prefix); ++ } ++ ++ /* Make sure we can fit into our buffer */ ++ assert((prefix_len + 20) < 39); ++ ++ f = fopen(name, "r"); ++ if (!f) { /* fopen failed */ ++ int errsrv = errno; ++ slapi_log_err(SLAPI_LOG_ERR,"_spal_get_uint64_t_file", "Unable to open file \"%s\". errno=%d\n", name, errsrv); ++ return 1; ++ } ++ ++ int_fast32_t retval = 0; ++ while (! feof(f)) { ++ if (!fgets(s, 39, f)) { ++ retval = 1; ++ break; /* error or eof */ ++ } ++ if (feof(f)) { ++ retval = 1; ++ break; ++ } ++ if (prefix_len > 0 && strncmp(s, prefix, prefix_len) == 0) { ++ sscanf(s + prefix_len, "%"SCNu64, dest); ++ break; ++ } else if (prefix_len == 0) { ++ sscanf(s, "%"SCNu64, dest); ++ break; ++ } ++ } ++ fclose(f); ++ return retval; ++} ++ ++ ++ ++slapi_pal_meminfo * ++spal_meminfo_get() { ++ slapi_pal_meminfo *mi = (slapi_pal_meminfo *)slapi_ch_calloc(1, sizeof(slapi_pal_meminfo)); ++ ++ mi->pagesize_bytes = getpagesize(); ++ ++ /* ++ * We have to compare values from a number of sources to ensure we have ++ * the correct result. ++ */ ++ ++ char f_proc_status[30] = {0}; ++ sprintf(f_proc_status, "/proc/%d/status", getpid()); ++ char *p_vmrss = "VmRSS:"; ++ uint64_t vmrss = 0; ++ ++ if (_spal_uint64_t_file_get(f_proc_status, p_vmrss, &vmrss)) { ++ slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve vmrss\n"); ++ } ++ ++ /* vmrss is in kb, so convert to bytes */ ++ vmrss = vmrss * 1024; ++ ++ uint64_t rl_mem_soft = 0; ++ uint64_t rl_mem_hard = 0; ++ uint64_t rl_mem_soft_avail = 0; ++ ++ if (_spal_rlimit_get(RLIMIT_AS, &rl_mem_soft, &rl_mem_hard)) { ++ slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve memory rlimit\n"); ++ } ++ ++ if (rl_mem_soft != 0 && vmrss != 0 && rl_mem_soft > vmrss) { ++ rl_mem_soft_avail = rl_mem_soft - vmrss; ++ } ++ ++ char *f_meminfo = "/proc/meminfo"; ++ char *p_memtotal = "MemTotal:"; ++ char *p_memavail = "MemAvailable:"; ++ ++ uint64_t memtotal = 0; ++ uint64_t memavail = 0; ++ ++ if (_spal_uint64_t_file_get(f_meminfo, p_memtotal, &memtotal)) { ++ slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve %s : %s\n", f_meminfo, p_memtotal); ++ } ++ ++ if (_spal_uint64_t_file_get(f_meminfo, p_memavail, &memavail)) { ++ slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve %s : %s\n", f_meminfo, p_memavail); ++ } ++ ++ /* Both memtotal and memavail are in kb */ ++ memtotal = memtotal * 1024; ++ memavail = memavail * 1024; ++ ++ /* If it's possible, get our cgroup info */ ++ uint64_t cg_mem_soft = 0; ++ uint64_t cg_mem_hard = 0; ++ uint64_t cg_mem_usage = 0; ++ uint64_t cg_mem_soft_avail = 0; ++ ++ char *f_cg_soft = "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes"; ++ char *f_cg_hard = "/sys/fs/cgroup/memory/memory.limit_in_bytes"; ++ char *f_cg_usage = "/sys/fs/cgroup/memory/memory.usage_in_bytes"; ++ ++ if (_spal_uint64_t_file_get(f_cg_soft, NULL, &cg_mem_soft)) { ++ slapi_log_err(SLAPI_LOG_WARNING, "spal_meminfo_get", "Unable to retrieve %s. There may be no cgroup support on this platform\n", f_cg_soft); ++ } ++ ++ if (_spal_uint64_t_file_get(f_cg_hard, NULL, &cg_mem_hard)) { ++ slapi_log_err(SLAPI_LOG_WARNING, "spal_meminfo_get", "Unable to retrieve %s. There may be no cgroup support on this platform\n", f_cg_hard); ++ } ++ ++ if (_spal_uint64_t_file_get(f_cg_usage, NULL, &cg_mem_usage)) { ++ slapi_log_err(SLAPI_LOG_WARNING, "spal_meminfo_get", "Unable to retrieve %s. There may be no cgroup support on this platform\n", f_cg_hard); ++ } ++ ++ /* ++ * In some conditions, like docker, we only have a *hard* limit set. ++ * This obviously breaks our logic, so we need to make sure we correct this ++ */ ++ ++ if (cg_mem_hard != 0 && cg_mem_soft != 0 && cg_mem_hard < cg_mem_soft) { ++ /* Right, we only have a hard limit. Impose a 10% watermark. */ ++ cg_mem_soft = cg_mem_hard * 0.9; ++ } ++ ++ if (cg_mem_soft != 0 && cg_mem_usage != 0 && cg_mem_soft > cg_mem_usage) { ++ cg_mem_soft_avail = cg_mem_soft - cg_mem_usage; ++ } ++ ++ ++ /* Now, compare the values and make a choice to which is provided */ ++ ++ /* Process consumed memory */ ++ mi->process_consumed_bytes = vmrss; ++ mi->process_consumed_pages = vmrss / mi->pagesize_bytes; ++ ++ /* System Total memory */ ++ /* If we have a memtotal, OR if no memtotal but rlimit */ ++ if (rl_mem_hard != 0 && ++ ((memtotal != 0 && rl_mem_hard < memtotal) || memtotal == 0) && ++ ((cg_mem_hard != 0 && rl_mem_hard < cg_mem_hard) || cg_mem_hard == 0) ++ ) ++ { ++ slapi_log_err(SLAPI_LOG_TRACE, "spal_meminfo_get", "system_total_bytes - using rlimit\n"); ++ mi->system_total_bytes = rl_mem_hard; ++ mi->system_total_pages = rl_mem_hard / mi->pagesize_bytes; ++ } else if (cg_mem_hard != 0 && ((memtotal != 0 && cg_mem_hard < memtotal) || memtotal == 0)) { ++ slapi_log_err(SLAPI_LOG_TRACE, "spal_meminfo_get", "system_total_bytes - using cgroup\n"); ++ mi->system_total_bytes = cg_mem_hard; ++ mi->system_total_pages = cg_mem_hard / mi->pagesize_bytes; ++ } else if (memtotal != 0) { ++ slapi_log_err(SLAPI_LOG_TRACE, "spal_meminfo_get", "system_total_bytes - using memtotal\n"); ++ mi->system_total_bytes = memtotal; ++ mi->system_total_pages = memtotal / mi->pagesize_bytes; ++ } else { ++ slapi_log_err(SLAPI_LOG_CRIT, "spal_meminfo_get", "Unable to determine system total memory!\n"); ++ spal_meminfo_destroy(mi); ++ return NULL; ++ } ++ ++ /* System Available memory */ ++ ++ if (rl_mem_soft_avail != 0 && ++ ((memavail != 0 && (rl_mem_soft_avail) < memavail) || memavail == 0) && ++ ((cg_mem_soft_avail != 0 && rl_mem_soft_avail < cg_mem_soft_avail) || cg_mem_soft_avail == 0) ++ ) ++ { ++ slapi_log_err(SLAPI_LOG_TRACE, "spal_meminfo_get", "system_available_bytes - using rlimit\n"); ++ mi->system_available_bytes = rl_mem_soft_avail; ++ mi->system_available_pages = rl_mem_soft_avail / mi->pagesize_bytes; ++ } else if (cg_mem_soft_avail != 0 && ((memavail != 0 && (cg_mem_soft_avail) < memavail) || memavail == 0)) { ++ slapi_log_err(SLAPI_LOG_TRACE, "spal_meminfo_get", "system_available_bytes - using cgroup\n"); ++ mi->system_available_bytes = cg_mem_soft_avail; ++ mi->system_available_pages = cg_mem_soft_avail / mi->pagesize_bytes; ++ } else if (memavail != 0) { ++ slapi_log_err(SLAPI_LOG_TRACE, "spal_meminfo_get", "system_available_bytes - using memavail\n"); ++ mi->system_available_bytes = memavail; ++ mi->system_available_pages = memavail / mi->pagesize_bytes; ++ } else { ++ slapi_log_err(SLAPI_LOG_CRIT, "spal_meminfo_get", "Unable to determine system available memory!\n"); ++ spal_meminfo_destroy(mi); ++ return NULL; ++ } ++ ++ slapi_log_err(SLAPI_LOG_TRACE, "spal_meminfo_get", "{pagesize_bytes = %"PRIu64", system_total_pages = %"PRIu64", system_total_bytes = %"PRIu64", process_consumed_pages = %"PRIu64", process_consumed_bytes = %"PRIu64", system_available_pages = %"PRIu64", system_available_bytes = %"PRIu64"},\n", ++ mi->pagesize_bytes, mi->system_total_pages, mi->system_total_bytes, mi->process_consumed_pages, mi->process_consumed_bytes, mi->system_available_pages, mi->system_available_bytes); ++ ++ return mi; ++} ++ ++ ++#endif ++ ++#ifdef OS_solaris ++uint64_t ++_spal_solaris_resident_pages_get() { ++ uint64_t procpages = 0; ++ struct prpsinfo psi = {0}; ++ char fn[40]; ++ int fd; ++ ++ sprintf(fn, "/proc/%d", getpid()); ++ fd = open(fn, O_RDONLY); ++ if (fd >= 0) { ++ if (ioctl(fd, PIOCPSINFO, (void *)&psi) == 0) { ++ procpages = (uint64_t)psi.pr_size; ++ } ++ close(fd); ++ } ++ return procpages; ++} ++ ++slapi_pal_meminfo * ++spal_meminfo_get() { ++ slapi_pal_meminfo *mi = (slapi_pal_meminfo *)slapi_ch_calloc(1, sizeof(slapi_pal_meminfo)); ++ ++ uint64_t rl_mem_soft = 0; ++ uint64_t rl_mem_hard = 0; ++ ++ if (_spal_rlimit_get(RLIMIT_AS, &rl_mem_soft, &rl_mem_hard)) { ++ slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve memory rlimit\n"); ++ } ++ ++ mi->pagesize_bytes = sysconf(_SC_PAGESIZE); ++ mi->system_total_pages = sysconf(_SC_PHYS_PAGES); ++ mi->system_total_bytes = mi->system_total_pages * mi->pagesize_bytes; ++ mi->system_available_bytes = rl_mem_soft; ++ if (rl_mem_soft != 0) { ++ mi->system_available_pages = rl_mem_soft / mi->pagesize_bytes; ++ } ++ mi->process_consumed_pages = _spal_solaris_resident_pages_get(); ++ mi->process_consumed_bytes = mi->process_consumed_pages * mi->pagesize_bytes; ++ ++ return mi; ++ ++} ++#endif ++ ++#ifdef HPUX ++#endif ++ ++void ++spal_meminfo_destroy(slapi_pal_meminfo *mi) { ++ slapi_ch_free((void **)&mi); ++} +diff --git a/ldap/servers/slapd/slapi_pal.h b/ldap/servers/slapd/slapi_pal.h +new file mode 100644 +index 0000000..cb61d84 +--- /dev/null ++++ b/ldap/servers/slapd/slapi_pal.h +@@ -0,0 +1,62 @@ ++/** BEGIN COPYRIGHT BLOCK ++ * Copyright (C) 2017 Red Hat, Inc. ++ * All rights reserved. ++ * ++ * License: GPL (version 3 or any later version). ++ * See LICENSE for details. ++ * END COPYRIGHT BLOCK **/ ++ ++/* ++ * Header for the slapi platform abstraction layer. ++ * ++ * This implements a number of functions that help to provide vendor ++ * neutral requests. Candidates for this are memory, thread, disk size ++ * and other operations. ++ * ++ * Basically anywhere you see a "ifdef PLATFORM" is a candidate ++ * for this. ++ */ ++ ++#pragma once ++ ++#include ++ ++#ifdef HAVE_INTTYPES_H ++#include ++#else ++#error Need to define portable format macros such as PRIu64 ++#endif /* HAVE_INTTYPES_H */ ++ ++/** ++ * Structure that contains our system memory information in bytes and pages. ++ * ++ */ ++typedef struct _slapi_pal_meminfo { ++ uint64_t pagesize_bytes; ++ uint64_t system_total_pages; ++ uint64_t system_total_bytes; ++ uint64_t process_consumed_pages; ++ uint64_t process_consumed_bytes; ++ /* This value may be limited by cgroup or others. */ ++ uint64_t system_available_pages; ++ uint64_t system_available_bytes; ++} slapi_pal_meminfo; ++ ++/** ++ * Allocate and returne a populated memory info structure. This will be NULL ++ * on error, or contain a structure populated with platform information on ++ * success. You should free this with spal_meminfo_destroy. ++ * ++ * \return slapi_pal_meminfo * pointer to structure containing data, or NULL. ++ */ ++slapi_pal_meminfo * spal_meminfo_get(); ++ ++/** ++ * Destroy an allocated memory info structure. The caller is responsible for ++ * ensuring this is called. ++ * ++ * \param mi the allocated slapi_pal_meminfo structure from spal_meminfo_get(); ++ */ ++void spal_meminfo_destroy(slapi_pal_meminfo *mi); ++ ++ +diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c +index b0c873d..21043d9 100644 +--- a/ldap/servers/slapd/snmp_collator.c ++++ b/ldap/servers/slapd/snmp_collator.c +@@ -711,7 +711,7 @@ static void + add_counter_to_value(Slapi_Entry *e, const char *type, PRUint64 countervalue) + { + char value[40]; +- snprintf(value,sizeof(value),"%" NSPRIu64, countervalue); ++ snprintf(value,sizeof(value),"%" PRIu64, countervalue); + slapi_entry_attr_set_charptr( e, type, value); + } + +diff --git a/ldap/servers/slapd/unbind.c b/ldap/servers/slapd/unbind.c +index fd44249..92dd905 100644 +--- a/ldap/servers/slapd/unbind.c ++++ b/ldap/servers/slapd/unbind.c +@@ -49,7 +49,7 @@ do_unbind( Slapi_PBlock *pb ) + * UnBindRequest ::= NULL + */ + if ( ber_get_null( ber ) == LBER_ERROR ) { +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d UNBIND," ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d UNBIND," + " decoding error: UnBindRequest not null\n", + pb->pb_conn->c_connid, operation->o_opid ); + /* LDAPv3 does not allow a response to an unbind... so just return. */ +@@ -64,7 +64,7 @@ do_unbind( Slapi_PBlock *pb ) + * MUST ignore the criticality field of controls + */ + if ( (err = get_ldapmessage_controls_ext( pb, ber, NULL, ignore_criticality )) != 0 ) { +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d UNBIND," ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d UNBIND," + " error processing controls - error %d (%s)\n", + pb->pb_conn->c_connid, operation->o_opid, + err, ldap_err2string( err )); +@@ -79,7 +79,7 @@ do_unbind( Slapi_PBlock *pb ) + + /* ONREPL - plugins should be called and passed bind dn and, possibly, other data */ + +- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d UNBIND\n", ++ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" PRIu64 " op=%d UNBIND\n", + pb->pb_conn->c_connid, operation->o_opid ); + + /* pass the unbind to all backends */ +diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c +index 48fa3c4..012e83d 100644 +--- a/ldap/servers/slapd/util.c ++++ b/ldap/servers/slapd/util.c +@@ -40,20 +40,8 @@ + #define FILTER_BUF 128 /* initial buffer size for attr value */ + #define BUF_INCR 16 /* the amount to increase the FILTER_BUF once it fills up */ + +-/* Used by our util_info_sys_pages function +- * +- * platforms supported so far: +- * Solaris, Linux, Windows +- */ +-#ifdef OS_solaris +-#include +-#endif +-#ifdef LINUX +-#include +-#endif +-#if defined ( hpux ) +-#include +-#endif ++/* slapi-private contains the pal. */ ++#include + + static int special_filename(unsigned char c) + { +@@ -1471,361 +1459,25 @@ slapi_uniqueIDRdnSize(void) + return util_uniqueidlen; + } + +- +-/** +- * Get the virtual memory size as defined by system rlimits. +- * +- * \return size_t bytes available +- */ +-static size_t util_getvirtualmemsize(void) +-{ +- struct rlimit rl; +- /* the maximum size of a process's total available memory, in bytes */ +- if (getrlimit(RLIMIT_AS, &rl) != 0) { +- /* We received an error condition. There are a number of possible +- * reasons we have have gotten here, but most likely is EINVAL, where +- * rlim->rlim_cur was greater than rlim->rlim_max. +- * As a result, we should return a 0, to tell the system we can't alloc +- * memory. +- */ +- int errsrv = errno; +- slapi_log_err(SLAPI_LOG_ERR,"util_getvirtualmemsize", +- "getrlimit returned non-zero. errno=%u\n", errsrv); +- return 0; +- } +- return rl.rlim_cur; +-} +- +-/* pages = number of pages of physical ram on the machine (corrected for 32-bit build on 64-bit machine). +- * procpages = pages currently used by this process (or working set size, sometimes) +- * availpages = some notion of the number of pages 'free'. Typically this number is not useful. +- */ +-int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size_t *availpages) +-{ +- if ((NULL == pagesize) || (NULL == pages) || (NULL == procpages) || (NULL == availpages)) { +- slapi_log_err(SLAPI_LOG_ERR, "util_info_sys_pages", +- "Null return variables are passed. Skip getting the system info.\n"); +- return 1; +- } +- *pagesize = 0; +- *pages = 0; +- *availpages = 0; +- *procpages = 0; +- +-#ifdef OS_solaris +- *pagesize = (int)sysconf(_SC_PAGESIZE); +- *pages = (int)sysconf(_SC_PHYS_PAGES); +- *availpages = util_getvirtualmemsize() / *pagesize; +- /* solaris has THE most annoying way to get this info */ +- { +- struct prpsinfo psi = {0}; +- char fn[40]; +- int fd; +- +- sprintf(fn, "/proc/%d", getpid()); +- fd = open(fn, O_RDONLY); +- if (fd >= 0) { +- if (ioctl(fd, PIOCPSINFO, (void *)&psi) == 0) { +- *procpages = psi.pr_size; +- } +- close(fd); +- } +- } +-#endif +- +-#ifdef LINUX +- { +- /* +- * On linux because of the way that the virtual memory system works, we +- * don't really need to think about other processes, or fighting them. +- * But that's not without quirks. +- * +- * We are given a virtual memory space, represented by vsize (man 5 proc) +- * This space is a "funny number". It's a best effort based system +- * where linux instead of telling us how much memory *actually* exists +- * for us to use, gives us a virtual memory allocation which is the +- * value of ram + swap.... sometimes. Depends on platform. +- * +- * But none of these pages even exist or belong to us on the real system +- * until will malloc them AND write a non-zero to them. +- * +- * The biggest issue with this is that vsize does NOT consider the +- * effect other processes have on the system. So a process can malloc +- * 2 Gig from the host, and our vsize doesn't reflect that until we +- * suddenly can't malloc anything. +- * +- * We can see exactly what we are using inside of the vmm by +- * looking at rss (man 5 proc). This shows us the current actual +- * allocation of memory we are using. This is a good thing. +- * +- * We obviously don't want to have any pages in swap, but sometimes we +- * can't help that: And there is also no guarantee that while we have +- * X bytes in vsize, that we can even allocate any of them. Plus, we +- * don't know if we are about to allocate to swap or not .... or get us +- * killed in a blaze of oom glory. +- * +- * So there are now two strategies avaliable in this function. +- * The first is to blindly accept what the VMM tells us about vsize +- * while we hope and pray that we don't get nailed because we used +- * too much. +- * +- * The other is a more conservative approach: We check vsize from +- * proc/pid/status, and we check /proc/meminfo for freemem +- * Which ever value is "lower" is the upper bound on pages we could +- * potentially allocate: generally, this will be MemAvailable. +- */ +- +- size_t freesize = 0; +- size_t rlimsize = 0; +- +- *pagesize = getpagesize(); +- +- /* Get the amount of freeram, rss */ +- +- FILE *f; +- char fn[40], s[80]; +- +- sprintf(fn, "/proc/%d/status", getpid()); +- f = fopen(fn, "r"); +- if (!f) { /* fopen failed */ +- /* We should probably make noise here! */ +- int errsrv = errno; +- slapi_log_err(SLAPI_LOG_ERR,"util_info_sys_pages", "Unable to open file /proc/%d/status. errno=%u\n", getpid(), errsrv); +- return 1; +- } +- while (! feof(f)) { +- if (!fgets(s, 79, f)) { +- break; /* error or eof */ +- } +- if (feof(f)) { +- break; +- } +- /* VmRSS shows us what we are ACTUALLY using for proc pages +- * Rather than "funny" pages. +- */ +- if (strncmp(s, "VmRSS:", 6) == 0) { +- sscanf(s+6, "%lu", (long unsigned int *)procpages); +- } +- } +- fclose(f); +- +- FILE *fm; +- char *fmn = "/proc/meminfo"; +- fm = fopen(fmn, "r"); +- if (!fm) { +- int errsrv = errno; +- slapi_log_err(SLAPI_LOG_ERR,"util_info_sys_pages", "Unable to open file /proc/meminfo. errno=%u\n", errsrv); +- return 1; +- } +- while (! feof(fm)) { +- if (!fgets(s, 79, fm)) { +- break; /* error or eof */ +- } +- if (feof(fm)) { +- break; +- } +- if (strncmp(s, "MemTotal:", 9) == 0) { +- sscanf(s+9, "%lu", (long unsigned int *)pages); +- } +- if (strncmp(s, "MemAvailable:", 13) == 0) { +- sscanf(s+13, "%lu", (long unsigned int *)&freesize); +- } +- } +- fclose(fm); +- +- +- *pages /= (*pagesize / 1024); +- freesize /= (*pagesize / 1024); +- /* procpages is now in kb not pages... */ +- *procpages /= (*pagesize / 1024); +- +- rlimsize = util_getvirtualmemsize(); +- /* On a 64 bit system, this is uint64 max, but on 32 it's -1 */ +- /* Either way, we should be ignoring it at this point if it's infinite */ +- if (rlimsize != RLIM_INFINITY) { +- /* This is in bytes, make it pages */ +- rlimsize = rlimsize / *pagesize; +- } +- +- /* Pages is the total ram on the system. */ +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "pages=%lu, \n", +- (unsigned long) *pages); +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "using pages for pages \n"); +- +- /* Availpages is how much we *could* alloc. We should take the smallest: +- * - pages +- * - getrlimit (availpages) +- * - freesize +- */ +- if (rlimsize == RLIM_INFINITY) { +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "pages=%lu, getrlim=RLIM_INFINITY, freesize=%lu\n", +- (unsigned long)*pages, (unsigned long)freesize); +- } else { +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "pages=%lu, getrlim=%lu, freesize=%lu\n", +- (unsigned long)*pages, (unsigned long)*availpages, (unsigned long)freesize); +- } +- +- if (rlimsize != RLIM_INFINITY && rlimsize < freesize && rlimsize < *pages && rlimsize > 0) { +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "using getrlim for availpages \n"); +- *availpages = rlimsize; +- } else if (freesize < *pages && freesize > 0) { +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "using freesize for availpages \n"); +- *availpages = freesize; +- } else { +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "using pages for availpages \n"); +- *availpages = *pages; +- } +- +- } +-#endif /* linux */ +- +- +- +-#if defined ( hpux ) +- { +- struct pst_static pst; +- int rval = pstat_getstatic(&pst, sizeof(pst), (size_t)1, 0); +- if (rval < 0) { /* pstat_getstatic failed */ +- return 1; +- } +- *pagesize = pst.page_size; +- *pages = pst.physical_memory; +- *availpages = util_getvirtualmemsize() / *pagesize; +- if (procpages) +- { +-#define BURST (size_t)32 /* get BURST proc info at one time... */ +- struct pst_status psts[BURST]; +- int i, count; +- int idx = 0; /* index within the context */ +- int mypid = getpid(); +- +- *procpages = 0; +- /* loop until count == 0, will occur all have been returned */ +- while ((count = pstat_getproc(psts, sizeof(psts[0]), BURST, idx)) > 0) { +- /* got count (max of BURST) this time. process them */ +- for (i = 0; i < count; i++) { +- if (psts[i].pst_pid == mypid) +- { +- *procpages = (size_t)(psts[i].pst_dsize + psts[i].pst_tsize + psts[i].pst_ssize); +- break; +- } +- } +- if (i < count) +- break; +- +- /* +- * now go back and do it again, using the next index after +- * the current 'burst' +- */ +- idx = psts[count-1].pst_idx + 1; +- } +- } +- } +-#endif +- /* If this is a 32-bit build, it might be running on a 64-bit machine, +- * in which case, if the box has tons of ram, we can end up telling +- * the auto cache code to use more memory than the process can address. +- * so we cap the number returned here. +- */ +-#if defined(__LP64__) || defined (_LP64) +-#else +- { +-#define GIGABYTE (1024*1024*1024) +- size_t one_gig_pages = GIGABYTE / *pagesize; +- if (*pages > (2 * one_gig_pages) ) { +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", +- "More than 2Gbytes physical memory detected. Since this is a 32-bit process, truncating memory size used for auto cache calculations to 2Gbytes\n"); +- *pages = (2 * one_gig_pages); +- } +- } +-#endif +- +- /* This is stupid. If you set %u to %zu to print a size_t, you get literal %zu in your logs +- * So do the filthy cast instead. +- */ +- slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "USING pages=%lu, procpages=%lu, availpages=%lu \n", +- (unsigned long)*pages, (unsigned long)*procpages, (unsigned long)*availpages); +- return 0; +- +-} +- +-int util_is_cachesize_sane(size_t *cachesize) ++util_cachesize_result ++util_is_cachesize_sane(slapi_pal_meminfo *mi, uint64_t *cachesize) + { +- size_t pages = 0; +- size_t pagesize = 0; +- size_t procpages = 0; +- size_t availpages = 0; +- +- size_t cachepages = 0; +- +- int issane = 1; +- +- if (util_info_sys_pages(&pagesize, &pages, &procpages, &availpages) != 0) { +- goto out; +- } +-#ifdef LINUX +- /* Linux we calculate availpages correctly, so USE IT */ +- if (!pagesize || !availpages) { +- goto out; +- } +-#else +- if (!pagesize || !pages) { +- goto out; ++ /* Check we have a valid meminfo struct */ ++ if (mi->system_available_bytes == 0) { ++ slapi_log_err(SLAPI_LOG_CRIT, "util_is_cachesize_sane", "Invalid system memory info, can not proceed."); ++ return UTIL_CACHESIZE_ERROR; + } +-#endif +- /* do nothing when we can't get the avail mem */ +- +- +- /* If the requested cache size is larger than the remaining physical memory +- * after the current working set size for this process has been subtracted, +- * then we say that's insane and try to correct. +- */ +- +- cachepages = *cachesize / pagesize; +- slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "cachesize=%lu / pagesize=%lu \n", +- (unsigned long)*cachesize,(unsigned long)pagesize); +- +-#ifdef LINUX +- /* Linux we calculate availpages correctly, so USE IT */ +- issane = (int)(cachepages <= availpages); +- slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "cachepages=%lu <= availpages=%lu\n", +- (unsigned long)cachepages,(unsigned long)availpages); + +- if (!issane) { ++ slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "Available bytes %"PRIu64", requested bytes %"PRIu64"\n", mi->system_available_bytes, *cachesize); ++ if (*cachesize > mi->system_available_bytes) { + /* Since we are ask for more than what's available, we give 3/4 of the remaining. + * the remaining system mem to the cachesize instead, and log a warning + */ +- *cachesize = (size_t)((availpages * 0.75 ) * pagesize); +- /* These are now trace warnings, because it was to confusing to log this *then* kill the request anyway. +- * Instead, we will let the caller worry about the notification, and we'll just use this in debugging and tracing. +- */ +- slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", +- "Available pages %lu, requested pages %lu, pagesize %lu\n", (unsigned long)availpages, (unsigned long)cachepages, (unsigned long)pagesize); +- slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", +- "WARNING adjusted cachesize to %lu\n", (unsigned long)*cachesize); +- } +-#else +- size_t freepages = 0; +- freepages = pages - procpages; +- slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "pages=%lu - procpages=%lu\n", +- (unsigned long)pages,(unsigned long)procpages); +- +- issane = (int)(cachepages <= freepages); +- slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "cachepages=%lu <= freepages=%lu\n", +- (unsigned long)cachepages,(unsigned long)freepages); +- +- if (!issane) { +- *cachesize = (size_t)((pages - procpages) * pagesize); +- slapi_log_err(SLAPI_LOG_WARNING, "util_is_cachesize_sane", "WARNING adjusted cachesize to %lu\n", +- (unsigned long )*cachesize); ++ *cachesize = (mi->system_available_bytes * 0.75); ++ slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "Adjusted cachesize to %"PRIu64"\n", *cachesize); ++ return UTIL_CACHESIZE_REDUCED; + } +-#endif +-out: +- if (!issane) { +- slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "WARNING: Cachesize not sane \n"); +- } +- +- return issane; ++ return UTIL_CACHESIZE_VALID; + } + + long +diff --git a/test/libslapd/spal/meminfo.c b/test/libslapd/spal/meminfo.c +new file mode 100644 +index 0000000..776141a +--- /dev/null ++++ b/test/libslapd/spal/meminfo.c +@@ -0,0 +1,54 @@ ++/** BEGIN COPYRIGHT BLOCK ++ * Copyright (C) 2017 Red Hat, Inc. ++ * All rights reserved. ++ * ++ * License: GPL (version 3 or any later version). ++ * See LICENSE for details. ++ * END COPYRIGHT BLOCK **/ ++ ++#include "../../test_slapd.h" ++ ++#include ++#include ++ ++/* ++ * Assert that our meminfo interface in slapi_pal works. ++ */ ++ ++void ++test_libslapd_pal_meminfo(void **state __attribute__((unused))) { ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ assert_true(mi->pagesize_bytes > 0); ++ assert_true(mi->system_total_pages > 0); ++ assert_true(mi->system_total_bytes > 0); ++ assert_true(mi->process_consumed_pages > 0); ++ assert_true(mi->process_consumed_bytes > 0); ++ assert_true(mi->system_available_pages > 0); ++ assert_true(mi->system_available_bytes > 0); ++ spal_meminfo_destroy(mi); ++} ++ ++void ++test_libslapd_util_cachesane(void **state __attribute__((unused))) { ++ slapi_pal_meminfo *mi = spal_meminfo_get(); ++ uint64_t request = 0; ++ mi->system_available_bytes = 0; ++ assert_true(util_is_cachesize_sane(mi, &request) == UTIL_CACHESIZE_ERROR); ++ ++ // Set the values to known quantities ++ request = 50000; ++ mi->system_available_bytes = 99999; ++ assert_true(util_is_cachesize_sane(mi, &request) == UTIL_CACHESIZE_VALID); ++ ++ request = 99999; ++ assert_true(util_is_cachesize_sane(mi, &request) == UTIL_CACHESIZE_VALID); ++ ++ request = 100000; ++ assert_true(util_is_cachesize_sane(mi, &request) == UTIL_CACHESIZE_REDUCED); ++ assert_true(request <= 75000); ++ ++ spal_meminfo_destroy(mi); ++} ++ ++ ++ +diff --git a/test/libslapd/test.c b/test/libslapd/test.c +index 6e1171a..6fa7996 100644 +--- a/test/libslapd/test.c ++++ b/test/libslapd/test.c +@@ -26,6 +26,8 @@ run_libslapd_tests (void) { + cmocka_unit_test(test_libslapd_operation_v3c_target_spec), + cmocka_unit_test(test_libslapd_counters_atomic_usage), + cmocka_unit_test(test_libslapd_counters_atomic_overflow), ++ cmocka_unit_test(test_libslapd_pal_meminfo), ++ cmocka_unit_test(test_libslapd_util_cachesane), + }; + return cmocka_run_group_tests(tests, NULL, NULL); + } +diff --git a/test/test_slapd.h b/test/test_slapd.h +index b8f1aba..50de11b 100644 +--- a/test/test_slapd.h ++++ b/test/test_slapd.h +@@ -42,3 +42,8 @@ void test_libslapd_operation_v3c_target_spec(void **state); + void test_libslapd_counters_atomic_usage(void **state); + void test_libslapd_counters_atomic_overflow(void **state); + ++/* libslapd-pal-meminfo */ ++ ++void test_libslapd_pal_meminfo(void **state); ++void test_libslapd_util_cachesane(void **state); ++ +-- +2.9.3 + diff --git a/SOURCES/0029-Ticket-49204-Fix-lower-bounds-on-import-autosize-On-.patch b/SOURCES/0029-Ticket-49204-Fix-lower-bounds-on-import-autosize-On-.patch new file mode 100644 index 0000000..1e28e73 --- /dev/null +++ b/SOURCES/0029-Ticket-49204-Fix-lower-bounds-on-import-autosize-On-.patch @@ -0,0 +1,296 @@ +From 9be74e83539e204e9a56721da5c22bd9abf38195 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 19 Apr 2017 13:41:22 -0400 +Subject: [PATCH] Ticket 49204 - Fix lower bounds on import autosize + On small + VM, autotune breaks the access of the suffixes + + Bug Description: + ldif2db in some cases may set a cache of 0, which may y break imports. + + Under memory pressure, the amount of available memory at startup + can be so low that the configured cachememsize will be rejected + (unwilling to perform). + This should leave the cachememsize being "0" (default) + This conduct to be unable to access the suffix pages. + + Fix Description: + + * autosize set an incorrect percentage which was too high. + * we did not check the lower bound of the allocation + so we now set that we must have a minimum allocation. + * Set entrycache to a minimal value, even if it looks insane + * add a cap on reduction of caches, so we always allocate a few pages + at least, and prevent returning 0 to the caller. + + https://pagure.io/389-ds-base/issue/49204 + + Author: wibrown, tbordaz + + Review by: tbordaz (Thanks mate, great work with this :) ) +--- + ldap/servers/slapd/back-ldbm/cache.c | 4 +-- + ldap/servers/slapd/back-ldbm/dblayer.c | 33 +++++++++++++--------- + ldap/servers/slapd/back-ldbm/dblayer.h | 12 ++++---- + ldap/servers/slapd/back-ldbm/ldbm_config.c | 4 +-- + .../servers/slapd/back-ldbm/ldbm_instance_config.c | 23 +++++++++++++-- + ldap/servers/slapd/slapi-private.h | 2 +- + ldap/servers/slapd/util.c | 20 +++++++++---- + 7 files changed, 65 insertions(+), 33 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c +index 0f0cf3b..c6638a2 100644 +--- a/ldap/servers/slapd/back-ldbm/cache.c ++++ b/ldap/servers/slapd/back-ldbm/cache.c +@@ -65,7 +65,7 @@ + + /* static functions */ + static void entrycache_clear_int(struct cache *cache); +-static void entrycache_set_max_size(struct cache *cache, size_t bytes); ++static void entrycache_set_max_size(struct cache *cache, uint64_t bytes); + static int entrycache_remove_int(struct cache *cache, struct backentry *e); + static void entrycache_return(struct cache *cache, struct backentry **bep); + static int entrycache_replace(struct cache *cache, struct backentry *olde, struct backentry *newe); +@@ -77,7 +77,7 @@ static void entry_lru_verify(struct cache *cache, struct backentry *e, int in); + + static int dn_same_id(const void *bdn, const void *k); + static void dncache_clear_int(struct cache *cache); +-static void dncache_set_max_size(struct cache *cache, size_t bytes); ++static void dncache_set_max_size(struct cache *cache, uint64_t bytes); + static int dncache_remove_int(struct cache *cache, struct backdn *dn); + static void dncache_return(struct cache *cache, struct backdn **bdn); + static int dncache_replace(struct cache *cache, struct backdn *olddn, struct backdn *newdn); +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c +index 3c1fbb0..f834322 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.c ++++ b/ldap/servers/slapd/back-ldbm/dblayer.c +@@ -1237,8 +1237,8 @@ no_diskspace(struct ldbminfo *li, int dbenv_flags) + struct statvfs db_buf; + int using_region_files = !(dbenv_flags & ( DB_PRIVATE | DB_SYSTEM_MEM)); + /* value of 10 == 10% == little more than the average overhead calculated for very large files on 64-bit system for bdb 4.7 */ +- PRUint64 expected_siz = li->li_dbcachesize + li->li_dbcachesize/10; /* dbcache + region files */ +- PRUint64 fsiz; ++ uint64_t expected_siz = li->li_dbcachesize + li->li_dbcachesize/10; /* dbcache + region files */ ++ uint64_t fsiz; + char *region_dir; + + if (statvfs(li->li_directory, &db_buf) < 0){ +@@ -1263,7 +1263,7 @@ no_diskspace(struct ldbminfo *li, int dbenv_flags) + li->li_dblayer_private->dblayer_dbhome_directory); + return 1; + } +- fsiz = ((PRUint64)dbhome_buf.f_bavail) * ((PRUint64)dbhome_buf.f_bsize); ++ fsiz = ((uint64_t)dbhome_buf.f_bavail) * ((uint64_t)dbhome_buf.f_bsize); + region_dir = li->li_dblayer_private->dblayer_dbhome_directory; + } else { + /* Shared/private memory. No need to check disk space, return success */ +@@ -1387,12 +1387,17 @@ dblayer_start(struct ldbminfo *li, int dbmode) + /* Sanity check on cache size on platforms which allow us to figure out + * the available phys mem */ + slapi_pal_meminfo *mi = spal_meminfo_get(); +- if (!util_is_cachesize_sane(mi, &(priv->dblayer_cachesize))) { ++ util_cachesize_result result = util_is_cachesize_sane(mi, &(priv->dblayer_cachesize)); ++ if (result == UTIL_CACHESIZE_ERROR) { ++ slapi_log_err(SLAPI_LOG_CRIT, "dblayer_start", "Unable to determine if cachesize was valid!!!"); ++ } else if (result == UTIL_CACHESIZE_REDUCED) { ++ /* In some cases we saw this go to 0, prevent this. */ ++ if (priv->dblayer_cachesize < MINCACHESIZE) { ++ priv->dblayer_cachesize = MINCACHESIZE; ++ } + /* Oops---looks like the admin misconfigured, let's warn them */ +- slapi_log_err(SLAPI_LOG_WARNING,"dblayer_start", "Likely CONFIGURATION ERROR -" +- "dbcachesize is configured to use more than the available " +- "physical memory, decreased to the largest available size (%"PRIu64" bytes).\n", +- priv->dblayer_cachesize); ++ slapi_log_err(SLAPI_LOG_WARNING, "dblayer_start", "Likely CONFIGURATION ERROR - dbcachesize is configured to use more than the available " ++ "memory, decreased to (%"PRIu64" bytes).\n", priv->dblayer_cachesize); + li->li_dbcachesize = priv->dblayer_cachesize; + } + spal_meminfo_destroy(mi); +@@ -3816,7 +3821,7 @@ static const u_int32_t default_flags = DB_NEXT; + typedef struct txn_test_iter { + DB *db; + DBC *cur; +- size_t cnt; ++ uint64_t cnt; + const char *attr; + u_int32_t flags; + backend *be; +@@ -3938,10 +3943,10 @@ static int txn_test_threadmain(void *param) + Object *inst_obj; + int rc = 0; + txn_test_iter **ttilist = NULL; +- size_t tticnt = 0; ++ uint64_t tticnt = 0; + DB_TXN *txn = NULL; + txn_test_cfg cfg = {0}; +- size_t counter = 0; ++ uint64_t counter = 0; + char keybuf[8192]; + char databuf[8192]; + int dbattempts = 0; +@@ -4062,9 +4067,9 @@ retry_txn: + if (!rc) { + DBT key; + DBT data; +- size_t ii; +- size_t donecnt = 0; +- size_t cnt = 0; ++ uint64_t ii; ++ uint64_t donecnt = 0; ++ uint64_t cnt = 0; + + /* phase 1 - open a cursor to each db */ + if (cfg.verbose) { +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.h b/ldap/servers/slapd/back-ldbm/dblayer.h +index 816c943..77b04fa 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.h ++++ b/ldap/servers/slapd/back-ldbm/dblayer.h +@@ -90,8 +90,8 @@ struct dblayer_private + int dblayer_ncache; + int dblayer_previous_ncache; + int dblayer_tx_max; +- size_t dblayer_cachesize; +- size_t dblayer_previous_cachesize; /* Cache size when we last shut down-- ++ uint64_t dblayer_cachesize; ++ uint64_t dblayer_previous_cachesize; /* Cache size when we last shut down-- + * used to determine if we delete + * the mpool */ + int dblayer_recovery_required; +@@ -102,15 +102,15 @@ struct dblayer_private + int dblayer_durable_transactions; + int dblayer_checkpoint_interval; + int dblayer_circular_logging; +- size_t dblayer_page_size; /* db page size if configured, ++ uint64_t dblayer_page_size; /* db page size if configured, + * otherwise default to DBLAYER_PAGESIZE */ +- size_t dblayer_index_page_size; /* db index page size if configured, ++ uint64_t dblayer_index_page_size; /* db index page size if configured, + * otherwise default to + * DBLAYER_INDEX_PAGESIZE */ + int dblayer_idl_divisor; /* divide page size by this to get IDL + * size */ +- size_t dblayer_logfile_size; /* How large can one logfile be ? */ +- size_t dblayer_logbuf_size; /* how large log buffer can be */ ++ uint64_t dblayer_logfile_size; /* How large can one logfile be ? */ ++ uint64_t dblayer_logbuf_size; /* how large log buffer can be */ + int dblayer_file_mode; /* pmode for files we create */ + int dblayer_verbose; /* Get libdb to exhale debugging info */ + int dblayer_debug; /* Will libdb emit debugging info into +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index d5120d3..401cd60 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -1582,9 +1582,9 @@ static config_info ldbm_config[] = { + {CONFIG_DB_DEBUG_CHECKPOINTING, CONFIG_TYPE_ONOFF, "off", &ldbm_config_db_debug_checkpointing_get, &ldbm_config_db_debug_checkpointing_set, 0}, + {CONFIG_DB_HOME_DIRECTORY, CONFIG_TYPE_STRING, "", &ldbm_config_db_home_directory_get, &ldbm_config_db_home_directory_set, 0}, + {CONFIG_IMPORT_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "-1", &ldbm_config_import_cache_autosize_get, &ldbm_config_import_cache_autosize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, +- {CONFIG_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "0", &ldbm_config_cache_autosize_get, &ldbm_config_cache_autosize_set, 0}, ++ {CONFIG_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "10", &ldbm_config_cache_autosize_get, &ldbm_config_cache_autosize_set, 0}, + {CONFIG_CACHE_AUTOSIZE_SPLIT, CONFIG_TYPE_INT, "40", &ldbm_config_cache_autosize_split_get, &ldbm_config_cache_autosize_split_set, 0}, +- {CONFIG_IMPORT_CACHESIZE, CONFIG_TYPE_SIZE_T, "20000000", &ldbm_config_import_cachesize_get, &ldbm_config_import_cachesize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_IMPORT_CACHESIZE, CONFIG_TYPE_SIZE_T, "16777216", &ldbm_config_import_cachesize_get, &ldbm_config_import_cachesize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_IDL_SWITCH, CONFIG_TYPE_STRING, "new", &ldbm_config_idl_get_idl_new, &ldbm_config_idl_set_tune, CONFIG_FLAG_ALWAYS_SHOW}, + {CONFIG_IDL_UPDATE, CONFIG_TYPE_ONOFF, "on", &ldbm_config_idl_get_update, &ldbm_config_idl_set_update, 0}, + {CONFIG_BYPASS_FILTER_TEST, CONFIG_TYPE_STRING, "on", &ldbm_config_get_bypass_filter_test, &ldbm_config_set_bypass_filter_test, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +index 62cdbc3..36d830d 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +@@ -93,6 +93,7 @@ ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, in + int retval = LDAP_SUCCESS; + size_t val = (size_t) value; + uint64_t delta = 0; ++ uint64_t delta_original = 0; + + /* Do whatever we can to make sure the data is ok. */ + /* There is an error here. We check the new val against our current mem-alloc +@@ -108,18 +109,34 @@ ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, in + if (apply) { + if (val > inst->inst_cache.c_maxsize) { + delta = val - inst->inst_cache.c_maxsize; ++ delta_original = delta; + + util_cachesize_result sane; + slapi_pal_meminfo *mi = spal_meminfo_get(); + sane = util_is_cachesize_sane(mi, &delta); + spal_meminfo_destroy(mi); + +- if (sane != UTIL_CACHESIZE_VALID){ +- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: cachememsize value is too large."); +- slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", "cachememsize value is too large.\n"); ++ if (sane == UTIL_CACHESIZE_ERROR){ ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: unable to determine system memory limits."); ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", "Enable to determine system memory limits.\n"); + return LDAP_UNWILLING_TO_PERFORM; ++ } else if (sane == UTIL_CACHESIZE_REDUCED) { ++ slapi_log_err(SLAPI_LOG_WARNING, "ldbm_instance_config_cachememsize_set", "delta +%"PRIu64" of request %"PRIu64" reduced to %"PRIu64"\n", delta_original, val, delta); ++ /* ++ * This works as: value = 100 ++ * delta_original to inst, 20; ++ * delta reduced to 5: ++ * 100 - (20 - 5) == 85; ++ * so if you recalculated delta now (val - inst), it would be 5. ++ */ ++ val = val - (delta_original - delta); + } + } ++ if (inst->inst_cache.c_maxsize < MINCACHESIZE || val < MINCACHESIZE) { ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", "force a minimal value %"PRIu64"\n", MINCACHESIZE); ++ /* This value will trigger an autotune next start up, but it should increase only */ ++ val = MINCACHESIZE; ++ } + cache_set_max_size(&(inst->inst_cache), val, CACHE_TYPE_ENTRY); + } + +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index 0c76580..d9547d8 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -1392,7 +1392,7 @@ typedef enum _util_cachesize_result { + * \return util_cachesize_result. + * \sa util_cachesize_result, spal_meminfo_get + */ +-util_cachesize_result util_is_cachesize_sane(slapi_pal_meminfo *mi, size_t *cachesize); ++util_cachesize_result util_is_cachesize_sane(slapi_pal_meminfo *mi, uint64_t *cachesize); + + /** + * Retrieve the number of threads the server should run with based on this hardware. +diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c +index 012e83d..4ff6d41 100644 +--- a/ldap/servers/slapd/util.c ++++ b/ldap/servers/slapd/util.c +@@ -1468,16 +1468,26 @@ util_is_cachesize_sane(slapi_pal_meminfo *mi, uint64_t *cachesize) + return UTIL_CACHESIZE_ERROR; + } + ++ util_cachesize_result result = UTIL_CACHESIZE_VALID; + slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "Available bytes %"PRIu64", requested bytes %"PRIu64"\n", mi->system_available_bytes, *cachesize); + if (*cachesize > mi->system_available_bytes) { +- /* Since we are ask for more than what's available, we give 3/4 of the remaining. ++ /* Since we are ask for more than what's available, we give 1/2 of the remaining. + * the remaining system mem to the cachesize instead, and log a warning + */ +- *cachesize = (mi->system_available_bytes * 0.75); +- slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "Adjusted cachesize to %"PRIu64"\n", *cachesize); +- return UTIL_CACHESIZE_REDUCED; ++ uint64_t adjust_cachesize = (mi->system_available_bytes * 0.5); ++ if (adjust_cachesize > *cachesize) { ++ slapi_log_err(SLAPI_LOG_CRIT, "util_is_cachesize_sane", "Invalid adjusted cachesize is greater than request %"PRIu64, adjust_cachesize); ++ return UTIL_CACHESIZE_ERROR; ++ } ++ if (adjust_cachesize < (16 * mi->pagesize_bytes)) { ++ /* At minimum respond with 16 pages - that's 64k on x86_64 */ ++ adjust_cachesize = 16 * mi->pagesize_bytes; ++ } ++ *cachesize = adjust_cachesize; ++ slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "Adjusted cachesize down to %"PRIu64"\n", *cachesize); ++ result = UTIL_CACHESIZE_REDUCED; + } +- return UTIL_CACHESIZE_VALID; ++ return result; + } + + long +-- +2.9.3 + diff --git a/SOURCES/0030-Ticket-49231-fix-sasl-mech-handling.patch b/SOURCES/0030-Ticket-49231-fix-sasl-mech-handling.patch new file mode 100644 index 0000000..8d5d46f --- /dev/null +++ b/SOURCES/0030-Ticket-49231-fix-sasl-mech-handling.patch @@ -0,0 +1,328 @@ +From 88a0ce3c3f89244a77dfa618c8a5064bda30f376 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Wed, 26 Apr 2017 15:48:30 +1000 +Subject: [PATCH] Ticket 49231 - fix sasl mech handling + +Bug Description: In our sasl code we had two issues. One was that +we did not correctly apply the list of sasl allowed mechs to our +rootdse list in ids_sasl_listmech. The second was that on config +reset, we did not correctly set null to the value. + +Fix Description: Fix the handling of the mech lists to allow +reset, and allow the mech list to be updated properly. + +https://pagure.io/389-ds-base/issue/49231 + +Author: wibrown + +Review by: mreynolds (Thanks!) +--- + dirsrvtests/tests/suites/sasl/allowed_mechs.py | 43 ++++++++++++++++++ + ldap/servers/slapd/charray.c | 48 +++++++++++++++++--- + ldap/servers/slapd/libglobs.c | 62 ++++++++++++++++++++------ + ldap/servers/slapd/proto-slap.h | 1 + + ldap/servers/slapd/saslbind.c | 21 ++++++++- + ldap/servers/slapd/slap.h | 1 + + ldap/servers/slapd/slapi-private.h | 1 + + 7 files changed, 156 insertions(+), 21 deletions(-) + create mode 100644 dirsrvtests/tests/suites/sasl/allowed_mechs.py + +diff --git a/dirsrvtests/tests/suites/sasl/allowed_mechs.py b/dirsrvtests/tests/suites/sasl/allowed_mechs.py +new file mode 100644 +index 0000000..a3e385e +--- /dev/null ++++ b/dirsrvtests/tests/suites/sasl/allowed_mechs.py +@@ -0,0 +1,43 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2017 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++ ++import pytest ++import ldap ++ ++import time ++ ++from lib389.topologies import topology_st ++ ++def test_sasl_allowed_mechs(topology_st): ++ standalone = topology_st.standalone ++ ++ # Get the supported mechs. This should contain PLAIN, GSSAPI, EXTERNAL at least ++ orig_mechs = standalone.rootdse.supported_sasl() ++ print(orig_mechs) ++ assert('GSSAPI' in orig_mechs) ++ assert('PLAIN' in orig_mechs) ++ assert('EXTERNAL' in orig_mechs) ++ ++ # Now edit the supported mechs. CHeck them again. ++ standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'EXTERNAL, PLAIN') ++ ++ limit_mechs = standalone.rootdse.supported_sasl() ++ print(limit_mechs) ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) ++ assert('GSSAPI' not in limit_mechs) ++ ++ # Do a config reset ++ standalone.config.reset('nsslapd-allowed-sasl-mechanisms') ++ ++ # check the supported list is the same as our first check. ++ final_mechs = standalone.rootdse.supported_sasl() ++ print(final_mechs) ++ assert(set(final_mechs) == set(orig_mechs)) ++ +diff --git a/ldap/servers/slapd/charray.c b/ldap/servers/slapd/charray.c +index 5551dcc..6b89714 100644 +--- a/ldap/servers/slapd/charray.c ++++ b/ldap/servers/slapd/charray.c +@@ -348,8 +348,9 @@ slapi_str2charray_ext( char *str, char *brkstr, int allow_dups ) + } + } + +- if ( !dup_found ) ++ if ( !dup_found ) { + res[i++] = slapi_ch_strdup( s ); ++ } + } + res[i] = NULL; + +@@ -413,10 +414,11 @@ charray_subtract(char **a, char **b, char ***c) + char **bp, **cp, **tmp; + char **p; + +- if (c) ++ if (c) { + tmp = *c = cool_charray_dup(a); +- else ++ } else { + tmp = a; ++ } + + for (cp = tmp; cp && *cp; cp++) { + for (bp = b; bp && *bp; bp++) { +@@ -433,12 +435,48 @@ charray_subtract(char **a, char **b, char ***c) + for (p = cp+1; *p && *p == (char *)SUBTRACT_DEL; p++) + ; + *cp = *p; +- if (*p == NULL) ++ if (*p == NULL) { + break; +- else ++ } else { + *p = SUBTRACT_DEL; ++ } ++ } ++ } ++} ++ ++/* ++ * Provides the intersection of two arrays. ++ * IE if you have: ++ * (A, B, C) ++ * (B, D, E) ++ * result is (B,) ++ * a and b are NOT consumed in the process. ++ */ ++char ** ++charray_intersection(char **a, char **b) { ++ char **result; ++ size_t rp = 0; ++ ++ if (a == NULL || b == NULL) { ++ return NULL; ++ } ++ ++ size_t a_len = 0; ++ /* Find how long A is. */ ++ for (; a[a_len] != NULL; a_len++); ++ ++ /* Allocate our result, it can't be bigger than A */ ++ result = (char **)slapi_ch_calloc(1, sizeof(char *) * (a_len + 1)); ++ ++ /* For each in A, see if it's in b */ ++ for (size_t i = 0; a[i] != NULL; i++) { ++ if (charray_get_index(b, a[i]) != -1) { ++ result[rp] = slapi_ch_strdup(a[i]); ++ rp++; + } + } ++ ++ return result; + } + + int +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index 0e818a9..2fc9fbf 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -7090,9 +7090,30 @@ config_set_entryusn_import_init( const char *attrname, char *value, + return retVal; + } + ++char ** ++config_get_allowed_sasl_mechs_array(void) ++{ ++ /* ++ * array of mechs. If is null, returns NULL thanks to ch_array_dup. ++ * Caller must free! ++ */ ++ char **retVal; ++ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); ++ ++ CFG_LOCK_READ(slapdFrontendConfig); ++ retVal = slapi_ch_array_dup(slapdFrontendConfig->allowed_sasl_mechs_array); ++ CFG_UNLOCK_READ(slapdFrontendConfig); ++ ++ return retVal; ++} ++ + char * +-config_get_allowed_sasl_mechs() ++config_get_allowed_sasl_mechs(void) + { ++ /* ++ * Space seperated list of allowed mechs ++ * if this is NULL, means *all* mechs are allowed! ++ */ + char *retVal; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + +@@ -7113,22 +7134,35 @@ config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, + return LDAP_SUCCESS; + } + +- /* cyrus sasl doesn't like comma separated lists */ +- remove_commas(value); ++ /* During a reset, the value is "", so we have to handle this case. */ ++ if (strcmp(value, "") != 0) { ++ /* cyrus sasl doesn't like comma separated lists */ ++ remove_commas(value); ++ ++ if(invalid_sasl_mech(value)){ ++ slapi_log_err(SLAPI_LOG_ERR,"config_set_allowed_sasl_mechs", ++ "Invalid value/character for sasl mechanism (%s). Use ASCII " ++ "characters, upto 20 characters, that are upper-case letters, " ++ "digits, hyphens, or underscores\n", value); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + +- if(invalid_sasl_mech(value)){ +- slapi_log_err(SLAPI_LOG_ERR,"config_set_allowed_sasl_mechs", +- "Invalid value/character for sasl mechanism (%s). Use ASCII " +- "characters, upto 20 characters, that are upper-case letters, " +- "digits, hyphens, or underscores\n", value); +- return LDAP_UNWILLING_TO_PERFORM; ++ CFG_LOCK_WRITE(slapdFrontendConfig); ++ slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs); ++ slapi_ch_array_free(slapdFrontendConfig->allowed_sasl_mechs_array); ++ slapdFrontendConfig->allowed_sasl_mechs = slapi_ch_strdup(value); ++ slapdFrontendConfig->allowed_sasl_mechs_array = slapi_str2charray_ext(value, " ", 0); ++ CFG_UNLOCK_WRITE(slapdFrontendConfig); ++ } else { ++ /* If this value is "", we need to set the list to *all* possible mechs */ ++ CFG_LOCK_WRITE(slapdFrontendConfig); ++ slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs); ++ slapi_ch_array_free(slapdFrontendConfig->allowed_sasl_mechs_array); ++ slapdFrontendConfig->allowed_sasl_mechs = NULL; ++ slapdFrontendConfig->allowed_sasl_mechs_array = NULL; ++ CFG_UNLOCK_WRITE(slapdFrontendConfig); + } + +- CFG_LOCK_WRITE(slapdFrontendConfig); +- slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs); +- slapdFrontendConfig->allowed_sasl_mechs = slapi_ch_strdup(value); +- CFG_UNLOCK_WRITE(slapdFrontendConfig); +- + return LDAP_SUCCESS; + } + +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index fdb4bf0..9696ead 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -553,6 +553,7 @@ size_t config_get_ndn_cache_size(void); + int config_get_ndn_cache_enabled(void); + int config_get_return_orig_type_switch(void); + char *config_get_allowed_sasl_mechs(void); ++char **config_get_allowed_sasl_mechs_array(void); + int config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, int apply); + int config_get_schemamod(void); + int config_set_ignore_vattrs(const char *attrname, char *value, char *errorbuf, int apply); +diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c +index 2d6fb64..6e544e6 100644 +--- a/ldap/servers/slapd/saslbind.c ++++ b/ldap/servers/slapd/saslbind.c +@@ -744,7 +744,10 @@ void ids_sasl_server_new(Connection *conn) + */ + char **ids_sasl_listmech(Slapi_PBlock *pb) + { +- char **ret, **others; ++ char **ret; ++ char **config_ret; ++ char **sup_ret; ++ char **others; + const char *str; + char *dupstr; + sasl_conn_t *sasl_conn; +@@ -754,7 +757,7 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + PR_ASSERT(pb); + + /* hard-wired mechanisms and slapi plugin registered mechanisms */ +- ret = slapi_get_supported_saslmechanisms_copy(); ++ sup_ret = slapi_get_supported_saslmechanisms_copy(); + + if (pb->pb_conn == NULL) return ret; + +@@ -777,6 +780,20 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + } + PR_ExitMonitor(pb->pb_conn->c_mutex); + ++ /* Get the servers "allowed" list */ ++ config_ret = config_get_allowed_sasl_mechs_array(); ++ ++ /* Remove any content that isn't in the allowed list */ ++ if (config_ret != NULL) { ++ /* Get the set of supported mechs in the insection of the two */ ++ ret = charray_intersection(sup_ret, config_ret); ++ charray_free(sup_ret); ++ charray_free(config_ret); ++ } else { ++ /* The allowed list was empty, just take our supported list. */ ++ ret = sup_ret; ++ } ++ + slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "<=\n"); + + return ret; +diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h +index abfad20..5e44cc8 100644 +--- a/ldap/servers/slapd/slap.h ++++ b/ldap/servers/slapd/slap.h +@@ -2577,6 +2577,7 @@ typedef struct _slapdFrontendConfig { + int pagedsizelimit; + char *default_naming_context; /* Default naming context (normalized) */ + char *allowed_sasl_mechs; /* comma/space separated list of allowed sasl mechs */ ++ char **allowed_sasl_mechs_array; /* Array of allow sasl mechs */ + int sasl_max_bufsize; /* The max receive buffer size for SASL */ + + /* disk monitoring */ +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index d9547d8..3f732e8 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -831,6 +831,7 @@ int charray_remove(char **a, const char *s, int freeit); + char ** cool_charray_dup( char **a ); + void cool_charray_free( char **array ); + void charray_subtract( char **a, char **b, char ***c ); ++char **charray_intersection(char **a, char **b); + int charray_get_index(char **array, char *s); + int charray_normdn_add(char ***chararray, char *dn, char *errstr); + +-- +2.9.3 + diff --git a/SOURCES/0031-Ticket-49230-slapi_register_plugin-creates-config-en.patch b/SOURCES/0031-Ticket-49230-slapi_register_plugin-creates-config-en.patch new file mode 100644 index 0000000..f8e5141 --- /dev/null +++ b/SOURCES/0031-Ticket-49230-slapi_register_plugin-creates-config-en.patch @@ -0,0 +1,50 @@ +From 91a162d66c2fe239c009f1ee16974d310b333e7e Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Fri, 21 Apr 2017 17:16:55 +0200 +Subject: [PATCH] Ticket 49230 - slapi_register_plugin creates config entry + where it should not + +Bug Description: + slapi-register-plugin systematically create an entry under + 'cn=plugins,cn=config' because it is not taking into account + the flag 'add_entry in 'plugin_setup'. + + This is potentially a regression introduced by + https://pagure.io/389-ds-base/issue/49066 (TBC) + +Fix Description: + Test 'add_entry' before adding the entry + + https://pagure.io/389-ds-base/issue/49230 + +Review by: Mark Reynolds, William Brown +--- + ldap/servers/slapd/plugin.c | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c +index ac8306f..a5e0724 100644 +--- a/ldap/servers/slapd/plugin.c ++++ b/ldap/servers/slapd/plugin.c +@@ -3132,11 +3132,13 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group, + add_plugin_entry_dn(dn_copy); + } + +- /* make a copy of the plugin entry for our own use because it will +- be freed later by the caller */ +- Slapi_Entry *e_copy = slapi_entry_dup(plugin_entry); +- /* new_plugin_entry(&plugin_entries, plugin_entry, plugin); */ +- new_plugin_entry(&dep_plugin_entries, e_copy, plugin); ++ if (add_entry) { ++ /* make a copy of the plugin entry for our own use because it will ++ be freed later by the caller */ ++ Slapi_Entry *e_copy = slapi_entry_dup(plugin_entry); ++ /* new_plugin_entry(&plugin_entries, plugin_entry, plugin); */ ++ new_plugin_entry(&dep_plugin_entries, e_copy, plugin); ++ } + + PLUGIN_CLEANUP: + if (status) { +-- +2.9.3 + diff --git a/SOURCES/0032-49227-ldapsearch-for-nsslapd-errorlog-level-re.patch b/SOURCES/0032-49227-ldapsearch-for-nsslapd-errorlog-level-re.patch new file mode 100644 index 0000000..d2fd071 --- /dev/null +++ b/SOURCES/0032-49227-ldapsearch-for-nsslapd-errorlog-level-re.patch @@ -0,0 +1,221 @@ +From e5f78f9f6a8cab7bfbd33e14912508183f9da283 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 20 Apr 2017 15:01:33 -0400 +Subject: [PATCH] Issue 49227 - ldapsearch for nsslapd-errorlog-level returns + incorrect values + +Bug Description: ldapsearch for the error log level returns the internal + bitmask value and not the value set in cn=config. + +Fix Description: When setting the error log level store the initial/untouched + value in the config entry first, then set the bitmasked + global log level variable. + +https://pagure.io/389-ds-base/issue/49227 + +Reviewed by: nhosoi(Thanks!) +--- + dirsrvtests/tests/tickets/ticket49227_test.py | 111 ++++++++++++++++++++++++++ + ldap/servers/slapd/configdse.c | 4 +- + ldap/servers/slapd/libglobs.c | 11 +-- + ldap/servers/slapd/slap.h | 3 +- + 4 files changed, 121 insertions(+), 8 deletions(-) + create mode 100644 dirsrvtests/tests/tickets/ticket49227_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket49227_test.py b/dirsrvtests/tests/tickets/ticket49227_test.py +new file mode 100644 +index 0000000..86e0b9a +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket49227_test.py +@@ -0,0 +1,111 @@ ++import os ++import time ++import ldap ++import logging ++import pytest ++from lib389._constants import * ++from lib389.properties import * ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st as topo ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++DEFAULT_LEVEL = "16384" ++ ++ ++def set_level(topo, level): ++ ''' Set the error log level ++ ''' ++ try: ++ topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', level)]) ++ time.sleep(1) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to set loglevel to %s - error: %s' % (level, str(e))) ++ assert False ++ ++ ++def get_level(topo): ++ ''' Set the error log level ++ ''' ++ try: ++ config = topo.standalone.search_s("cn=config", ldap.SCOPE_BASE, "objectclass=top") ++ time.sleep(1) ++ return config[0].getValue('nsslapd-errorlog-level') ++ except ldap.LDAPError as e: ++ log.fatal('Failed to get loglevel - error: %s' % (str(e))) ++ assert False ++ ++ ++def get_log_size(topo): ++ ''' Get the errors log size ++ ''' ++ statinfo = os.stat(topo.standalone.errlog) ++ return statinfo.st_size ++ ++ ++def test_ticket49227(topo): ++ """Set the error log to varying levels, and make sure a search for that value ++ reflects the expected value (not the bitmasked value. ++ """ ++ log_size = get_log_size(topo) ++ ++ # Check the default level ++ level = get_level(topo) ++ if level != DEFAULT_LEVEL: ++ log.fatal('Incorrect default logging level: %s' % (level)) ++ assert False ++ ++ # Set connection logging ++ set_level(topo, '8') ++ level = get_level(topo) ++ if level != '8': ++ log.fatal('Incorrect connection logging level: %s' % (level)) ++ assert False ++ ++ # Check the actual log ++ new_size = get_log_size(topo) ++ if new_size == log_size: ++ # Size should be different ++ log.fatal('Connection logging is not working') ++ assert False ++ ++ # Set default logging using zero ++ set_level(topo, '0') ++ log_size = get_log_size(topo) ++ level = get_level(topo) ++ if level != DEFAULT_LEVEL: ++ log.fatal('Incorrect default logging level: %s' % (level)) ++ assert False ++ ++ # Check the actual log ++ new_size = get_log_size(topo) ++ if new_size != log_size: ++ # Size should be the size ++ log.fatal('Connection logging is still on') ++ assert False ++ ++ # Set default logging using the default value ++ set_level(topo, DEFAULT_LEVEL) ++ level = get_level(topo) ++ if level != DEFAULT_LEVEL: ++ log.fatal('Incorrect default logging level: %s' % (level)) ++ assert False ++ ++ # Check the actual log ++ new_size = get_log_size(topo) ++ if new_size != log_size: ++ # Size should be the size ++ log.fatal('Connection logging is still on') ++ assert False ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/ldap/servers/slapd/configdse.c b/ldap/servers/slapd/configdse.c +index 78162c9..08d1ace 100644 +--- a/ldap/servers/slapd/configdse.c ++++ b/ldap/servers/slapd/configdse.c +@@ -404,12 +404,12 @@ modify_config_dse(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* e, in + config_attr); + rc = LDAP_UNWILLING_TO_PERFORM; + } else if (ignore_attr_type(config_attr)) { +- slapi_log_err(SLAPI_LOG_WARNING, "modify_config_dse", ++ slapi_log_err(SLAPI_LOG_CONFIG, "modify_config_dse", + "Modification of attribute \"%s\" is not allowed, ignoring!\n", + config_attr); + } else if (SLAPI_IS_MOD_ADD(mods[i]->mod_op)) { + if (apply_mods) { /* log warning once */ +- slapi_log_err(SLAPI_LOG_WARNING, "modify_config_dse", ++ slapi_log_err(SLAPI_LOG_CONFIG, "modify_config_dse", + "Adding configuration attribute \"%s\"\n", + config_attr); + } +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index 2fc9fbf..bb51827 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -308,7 +308,7 @@ static struct config_get_and_set { + {CONFIG_LOGLEVEL_ATTRIBUTE, config_set_errorlog_level, + NULL, 0, + (void**)&global_slapdFrontendConfig.errorloglevel, +- CONFIG_SPECIAL_ERRORLOGLEVEL, NULL, SLAPD_DEFAULT_ERRORLOG_LEVEL_STR}, ++ CONFIG_SPECIAL_ERRORLOGLEVEL, NULL, SLAPD_DEFAULT_FE_ERRORLOG_LEVEL_STR}, + {CONFIG_ERRORLOG_LOGGING_ENABLED_ATTRIBUTE, NULL, + log_set_logging, SLAPD_ERROR_LOG, + (void**)&global_slapdFrontendConfig.errorlog_logging_enabled, +@@ -1597,7 +1597,7 @@ FrontendConfig_init(void) { + cfg->errorlog_minfreespace = SLAPD_DEFAULT_LOG_MINFREESPACE; + cfg->errorlog_exptime = SLAPD_DEFAULT_LOG_EXPTIME; + cfg->errorlog_exptimeunit = slapi_ch_strdup(SLAPD_INIT_LOG_EXPTIMEUNIT); +- cfg->errorloglevel = SLAPD_DEFAULT_ERRORLOG_LEVEL; ++ cfg->errorloglevel = SLAPD_DEFAULT_FE_ERRORLOG_LEVEL; + + init_auditlog_logging_enabled = cfg->auditlog_logging_enabled = LDAP_OFF; + cfg->auditlog_mode = slapi_ch_strdup(SLAPD_INIT_LOG_MODE); +@@ -4474,9 +4474,10 @@ config_set_errorlog_level( const char *attrname, char *value, char *errorbuf, in + + if ( apply ) { + CFG_LOCK_WRITE(slapdFrontendConfig); +- level |= SLAPD_DEFAULT_ERRORLOG_LEVEL; /* Always apply the new default error levels for now */ +- slapd_ldap_debug = level; + slapdFrontendConfig->errorloglevel = level; ++ /* Set the internal value - apply the default error level */ ++ level |= SLAPD_DEFAULT_ERRORLOG_LEVEL; ++ slapd_ldap_debug = level; + CFG_UNLOCK_WRITE(slapdFrontendConfig); + } + return retVal; +@@ -5771,7 +5772,7 @@ config_get_errorlog_level(){ + retVal = slapdFrontendConfig->errorloglevel; + CFG_UNLOCK_READ(slapdFrontendConfig); + +- return retVal; ++ return retVal |= SLAPD_DEFAULT_ERRORLOG_LEVEL; + } + + /* return integer -- don't worry about locking similar to config_check_referral_mode +diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h +index 5e44cc8..04c9b79 100644 +--- a/ldap/servers/slapd/slap.h ++++ b/ldap/servers/slapd/slap.h +@@ -343,7 +343,8 @@ typedef void (*VFPV)(); /* takes undefined arguments */ + * LDAP_DEBUG_WARNING | LDAP_DEBUG_NOTICE | LDAP_DEBUG_INFO) + */ + #define SLAPD_DEFAULT_ERRORLOG_LEVEL 266354688 +-#define SLAPD_DEFAULT_ERRORLOG_LEVEL_STR "266354688" ++#define SLAPD_DEFAULT_FE_ERRORLOG_LEVEL 16384 /* frontend log level */ ++#define SLAPD_DEFAULT_FE_ERRORLOG_LEVEL_STR "16384" + #define SLAPD_DEFAULT_ACCESSLOG_LEVEL 256 + #define SLAPD_DEFAULT_ACCESSLOG_LEVEL_STR "256" + +-- +2.9.3 + diff --git a/SOURCES/0033-Ticket-48989-fix-perf-counters.patch b/SOURCES/0033-Ticket-48989-fix-perf-counters.patch new file mode 100644 index 0000000..fd75c88 --- /dev/null +++ b/SOURCES/0033-Ticket-48989-fix-perf-counters.patch @@ -0,0 +1,57 @@ +From a7b9a9ddbff47c2226e60e403374d5e451fac344 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 2 May 2017 13:48:33 -0400 +Subject: [PATCH] Ticket 48989 - fix perf counters + +Description: There was a copy & paste error where page_access_rate + was added, but it listed the wrong attribute name. However, + the page_access_rate formula doesn't make sense, nor are + there more page stats to use from Berklely DB. Because + of this I just removed page_access_rate. + +https://pagure.io/389-ds-base/issue/48989 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 18a77e957119bd9994833b7290747f99d73b3745) +--- + ldap/servers/slapd/back-ldbm/perfctrs.c | 3 --- + ldap/servers/slapd/back-ldbm/perfctrs.h | 1 - + 2 files changed, 4 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/perfctrs.c b/ldap/servers/slapd/back-ldbm/perfctrs.c +index 5929dea..9132097 100644 +--- a/ldap/servers/slapd/back-ldbm/perfctrs.c ++++ b/ldap/servers/slapd/back-ldbm/perfctrs.c +@@ -165,7 +165,6 @@ void perfctrs_update(perfctrs_private *priv, DB_ENV *db_env) + if (0 == ret) { + #define ONEG 1073741824 + perf->cache_size_bytes = mpstat->st_gbytes * ONEG + mpstat->st_bytes; +- perf->page_access_rate = mpstat->st_cache_hit + mpstat->st_cache_miss; + perf->cache_hit = mpstat->st_cache_hit; + perf->cache_try = mpstat->st_cache_hit + mpstat->st_cache_miss; + perf->page_create_rate = mpstat->st_page_create; +@@ -257,8 +256,6 @@ static SlapiLDBMPerfctrATMap perfctr_at_map[] = { + offsetof( performance_counters, log_write_rate ) }, + { SLAPI_LDBM_PERFCTR_AT_PREFIX "longest-chain-length", + offsetof( performance_counters, longest_chain_length ) }, +- { SLAPI_LDBM_PERFCTR_AT_PREFIX "objects-locked", +- offsetof( performance_counters, page_access_rate ) }, + { SLAPI_LDBM_PERFCTR_AT_PREFIX "page-create-rate", + offsetof( performance_counters, page_create_rate ) }, + { SLAPI_LDBM_PERFCTR_AT_PREFIX "page-read-rate", +diff --git a/ldap/servers/slapd/back-ldbm/perfctrs.h b/ldap/servers/slapd/back-ldbm/perfctrs.h +index 64c79e1..a6213ec 100644 +--- a/ldap/servers/slapd/back-ldbm/perfctrs.h ++++ b/ldap/servers/slapd/back-ldbm/perfctrs.h +@@ -32,7 +32,6 @@ struct _performance_counters { + uint64_t log_write_rate; + uint64_t log_bytes_since_checkpoint; + uint64_t cache_size_bytes; +- uint64_t page_access_rate; + uint64_t cache_hit; + uint64_t cache_try; + uint64_t page_create_rate; +-- +2.9.3 + diff --git a/SOURCES/0034-Ticket-48681-logconv.pl-fix-sasl-bind-stats.patch b/SOURCES/0034-Ticket-48681-logconv.pl-fix-sasl-bind-stats.patch new file mode 100644 index 0000000..9d63e57 --- /dev/null +++ b/SOURCES/0034-Ticket-48681-logconv.pl-fix-sasl-bind-stats.patch @@ -0,0 +1,41 @@ +From e9514af2fed9f882a8d11d509ffb99e49a304438 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 2 May 2017 16:49:59 -0400 +Subject: [PATCH] Ticket 48681 - logconv.pl - fix sasl/bind stats + +Description: Fixed the bind and sasl bind total counts, also adjusted the + v3 bind count to match the sasl bind. + +https://pagure.io/389-ds-base/issue/48681 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 770bf3a2341f1ea2e0778a6443b0f89ed77e73af) +--- + ldap/admin/src/logconv.pl | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl +index 96639f2..c30e175 100755 +--- a/ldap/admin/src/logconv.pl ++++ b/ldap/admin/src/logconv.pl +@@ -2533,6 +2533,7 @@ sub parseLineNormal + } + if (/ BIND / && /method=sasl/i){ + $saslBindCount++; ++ $bindCount++; + if ($_ =~ /mech=(.*)/i ){ + my $mech = $1; + $hashes->{saslmech}->{$mech}++; +@@ -2550,6 +2551,8 @@ sub parseLineNormal + if (/ RESULT err=14 tag=97 / && / SASL bind in progress/){ + # Drop the sasl bind count since this is step in the bind process + $saslBindCount--; ++ $bindCount--; ++ $v3BindCount--; + my ($conn, $op); + if ($_ =~ /conn= *([0-9A-Z]+) +op= *([0-9\-]+)/i){ + $conn = $1; +-- +2.9.3 + diff --git a/SOURCES/0035-Ticket-49241-Update-man-page-and-usage-for-db2bak.pl.patch b/SOURCES/0035-Ticket-49241-Update-man-page-and-usage-for-db2bak.pl.patch new file mode 100644 index 0000000..6cd8932 --- /dev/null +++ b/SOURCES/0035-Ticket-49241-Update-man-page-and-usage-for-db2bak.pl.patch @@ -0,0 +1,51 @@ +From cdcc387f6f1287da1edda418d746e6c2c772e5bd Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 4 May 2017 15:44:51 -0400 +Subject: [PATCH] Ticket 49241 - Update man page and usage for db2bak.pl + +Description: The usage and man page should state thtthe backup directory + is actually a symlink to the the server's backup directory. + Otherwise it is misleading, and could eventaully lead to + diskspace issues. + +https://pagure.io/389-ds-base/issue/49241 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 0804c43991fa29ef7bd946b3e5a37844e2b87da4) +--- + ldap/admin/src/scripts/db2bak.pl.in | 4 ++-- + man/man8/db2bak.pl.8 | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/ldap/admin/src/scripts/db2bak.pl.in b/ldap/admin/src/scripts/db2bak.pl.in +index c73caa1..73d4187 100644 +--- a/ldap/admin/src/scripts/db2bak.pl.in ++++ b/ldap/admin/src/scripts/db2bak.pl.in +@@ -33,8 +33,8 @@ sub usage { + print(STDERR " -w - - Prompt for Directory Manager's password\n"); + print(STDERR " -Z serverID - Server instance identifier\n"); + print(STDERR " -j filename - Read Directory Manager's password from file\n"); +- print(STDERR " -A backupdir - Backup directory (backupdir/ID-)\n"); +- print(STDERR " -a backupdir - Backup directory\n"); ++ print(STDERR " -A backupdir - Backup directory symlink(backupdir/ID-)\n"); ++ print(STDERR " -a backupdir - Backup directory symlink\n"); + print(STDERR " -t dbtype - Database type (default: ldbm database)\n"); + print(STDERR " -P protocol - STARTTLS, LDAPS, LDAPI, LDAP (default: uses most secure protocol available)\n"); + print(STDERR " -h - Display usage\n"); +diff --git a/man/man8/db2bak.pl.8 b/man/man8/db2bak.pl.8 +index a752885..c51ccae 100644 +--- a/man/man8/db2bak.pl.8 ++++ b/man/man8/db2bak.pl.8 +@@ -47,7 +47,7 @@ The name of the file that contains the root DN password. + The backend database type (default: ldbm database). + .TP + .B \fB\-a\fR \fIbackupdir\fR +-The directory where the backup should be stored. ++The directory where the backup should be stored. This directory is a symbolic link to the actual backup files located under "nsslapd-bakdir" directory that is set in the "cn=config" entry. + .TP + .B \fB\-A\fR \fIbackupdir\fR + This is similar to \fB-a\fR, except that a sub-directory of \fIbackupdir\fR will be created for the backup, and the name of the sub-directory will be a timestamp of the form \fIserver-instance-date_time\fR. +-- +2.9.3 + diff --git a/SOURCES/0036-Ticket-7662-db2index-not-properly-evalauating-argume.patch b/SOURCES/0036-Ticket-7662-db2index-not-properly-evalauating-argume.patch new file mode 100644 index 0000000..a088384 --- /dev/null +++ b/SOURCES/0036-Ticket-7662-db2index-not-properly-evalauating-argume.patch @@ -0,0 +1,72 @@ +From 0ac013079796cafb119379e40f24559187935851 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 3 May 2017 14:50:15 -0400 +Subject: [PATCH] Ticket 7662 - db2index not properly evalauating arguments + +Description: Fix a regression where the argument count gets adjusted + before it is checked for errors. The fix is to copy the + number before we shift the arguments, and use that copy + for the usage check. + +https://pagure.io/389-ds-base/issue/47662 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 46011e24580fcee2f438506f91b9fc119306defc) +--- + ldap/admin/src/scripts/db2index.in | 11 ++++++----- + ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 2 +- + 2 files changed, 7 insertions(+), 6 deletions(-) + +diff --git a/ldap/admin/src/scripts/db2index.in b/ldap/admin/src/scripts/db2index.in +index fec082e..04183d3 100755 +--- a/ldap/admin/src/scripts/db2index.in ++++ b/ldap/admin/src/scripts/db2index.in +@@ -52,6 +52,7 @@ do + esac + done + ++argnum=$# + shift $(($OPTIND - 1)) + if [ $1 ] + then +@@ -71,18 +72,18 @@ fi + + idxall=0 + print_usage=0 +-if [ -z $servid ] && [ $# -eq 0 ]; then ++if [ -z $servid ] && [ $argnum -eq 0 ]; then + idxall=1 +-elif [ "$servid" ] && [ $# -eq 2 ]; then ++elif [ "$servid" ] && [ $argnum -eq 2 ]; then + idxall=1 + elif [ -z $benameopt ] && [ -z $includeSuffix ]; then + print_usage=1 + fi +-if [ -z $servid ] && [ $# -lt 2 ]; then ++if [ -z $servid ] && [ $argnum -lt 2 ]; then + print_usage=1 +-elif [ -n "$servid" ] && [ $# -lt 4 ]; then ++elif [ -n "$servid" ] && [ $argnum -lt 4 ]; then + print_usage=1 +-elif [ -n "$servid" ] && [ $# -eq 4 ]; then ++elif [ -n "$servid" ] && [ $argnum -eq 4 ]; then + idxall=1 + fi + +diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +index f8fed7c..a0710f7 100644 +--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c ++++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +@@ -3225,7 +3225,7 @@ upgradedb_core(Slapi_PBlock *pb, ldbm_instance *inst) + run_from_cmdline = (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE); + + be = inst->inst_be; +- slapi_log_err(SLAPI_LOG_ERR, "upgradedb_core", ++ slapi_log_err(SLAPI_LOG_INFO, "upgradedb_core", + "%s: Start upgradedb.\n", inst->inst_name); + + if (!run_from_cmdline) +-- +2.9.3 + diff --git a/SOURCES/0037-Ticket-49075-Adjust-logging-severity-levels.patch b/SOURCES/0037-Ticket-49075-Adjust-logging-severity-levels.patch new file mode 100644 index 0000000..8f0890d --- /dev/null +++ b/SOURCES/0037-Ticket-49075-Adjust-logging-severity-levels.patch @@ -0,0 +1,71 @@ +From d3771cf05358c0230c8c77d7f7dabe9219ea7c8c Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 3 May 2017 14:37:11 -0400 +Subject: [PATCH] Ticket 49075 - Adjust logging severity levels + +Description: There are places wherre we log a severity "ERR", + when in fact it is a benign message. + +https://pagure.io/389-ds-base/issue/49075 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 0762e393850f54ce8462c45321b3db084bd8a0e1) +--- + ldap/servers/slapd/back-ldbm/ldbm_instance_config.c | 17 ++++++++++------- + 1 file changed, 10 insertions(+), 7 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +index 36d830d..55f1887 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +@@ -118,10 +118,12 @@ ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, in + + if (sane == UTIL_CACHESIZE_ERROR){ + slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: unable to determine system memory limits."); +- slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", "Enable to determine system memory limits.\n"); ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", ++ "Enable to determine system memory limits.\n"); + return LDAP_UNWILLING_TO_PERFORM; + } else if (sane == UTIL_CACHESIZE_REDUCED) { +- slapi_log_err(SLAPI_LOG_WARNING, "ldbm_instance_config_cachememsize_set", "delta +%"PRIu64" of request %"PRIu64" reduced to %"PRIu64"\n", delta_original, val, delta); ++ slapi_log_err(SLAPI_LOG_WARNING, "ldbm_instance_config_cachememsize_set", ++ "delta +%"PRIu64" of request %"PRIu64" reduced to %"PRIu64"\n", delta_original, val, delta); + /* + * This works as: value = 100 + * delta_original to inst, 20; +@@ -133,7 +135,8 @@ ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, in + } + } + if (inst->inst_cache.c_maxsize < MINCACHESIZE || val < MINCACHESIZE) { +- slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", "force a minimal value %"PRIu64"\n", MINCACHESIZE); ++ slapi_log_err(SLAPI_LOG_INFO, "ldbm_instance_config_cachememsize_set", ++ "force a minimal value %"PRIu64"\n", MINCACHESIZE); + /* This value will trigger an autotune next start up, but it should increase only */ + val = MINCACHESIZE; + } +@@ -1134,7 +1137,7 @@ ldbm_instance_post_delete_instance_entry_callback(Slapi_PBlock *pb, Slapi_Entry* + return SLAPI_DSE_CALLBACK_ERROR; + } + +- slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_post_delete_instance_entry_callback", ++ slapi_log_err(SLAPI_LOG_INFO, "ldbm_instance_post_delete_instance_entry_callback", + "Removing '%s'.\n", instance_name); + + cache_destroy_please(&inst->inst_cache, CACHE_TYPE_ENTRY); +@@ -1171,9 +1174,9 @@ ldbm_instance_post_delete_instance_entry_callback(Slapi_PBlock *pb, Slapi_Entry* + dbp = PR_smprintf("%s/%s", inst_dirp, direntry->name); + if (NULL == dbp) { + slapi_log_err(SLAPI_LOG_ERR, +- "ldbm_instance_post_delete_instance_entry_callback", +- "Failed to generate db path: %s/%s\n", +- inst_dirp, direntry->name); ++ "ldbm_instance_post_delete_instance_entry_callback", ++ "Failed to generate db path: %s/%s\n", ++ inst_dirp, direntry->name); + break; + } + +-- +2.9.3 + diff --git a/SOURCES/0038-Ticket-49231-Fix-backport-issue.patch b/SOURCES/0038-Ticket-49231-Fix-backport-issue.patch new file mode 100644 index 0000000..ba41c98 --- /dev/null +++ b/SOURCES/0038-Ticket-49231-Fix-backport-issue.patch @@ -0,0 +1,62 @@ +From e0d5f86c9410bd29c0e4636d3072b24228e60128 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 5 May 2017 14:58:13 -0400 +Subject: [PATCH] Ticket 49231 - Fix backport issue + +Description: The cherry-pick was incorrect, and caused a crash +--- + ldap/servers/slapd/saslbind.c | 36 ++++++++++++++++++------------------ + 1 file changed, 18 insertions(+), 18 deletions(-) + +diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c +index 6e544e6..8d23c52 100644 +--- a/ldap/servers/slapd/saslbind.c ++++ b/ldap/servers/slapd/saslbind.c +@@ -759,26 +759,26 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + /* hard-wired mechanisms and slapi plugin registered mechanisms */ + sup_ret = slapi_get_supported_saslmechanisms_copy(); + +- if (pb->pb_conn == NULL) return ret; ++ /* If we have a connection, get the provided list from SASL */ ++ if (pb->pb_conn != NULL) { ++ sasl_conn = (sasl_conn_t*)pb->pb_conn->c_sasl_conn; + +- sasl_conn = (sasl_conn_t*)pb->pb_conn->c_sasl_conn; +- if (sasl_conn == NULL) return ret; +- +- /* sasl library mechanisms are connection dependent */ +- PR_EnterMonitor(pb->pb_conn->c_mutex); +- if (sasl_listmech(sasl_conn, +- NULL, /* username */ +- "", ",", "", +- &str, NULL, NULL) == SASL_OK) { +- slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "sasl library mechs: %s\n", str); +- /* merge into result set */ +- dupstr = slapi_ch_strdup(str); +- others = slapi_str2charray_ext(dupstr, ",", 0 /* don't list duplicate mechanisms */); +- charray_merge(&ret, others, 1); +- charray_free(others); +- slapi_ch_free((void**)&dupstr); ++ /* sasl library mechanisms are connection dependent */ ++ PR_EnterMonitor(pb->pb_conn->c_mutex); ++ if (sasl_listmech(sasl_conn, ++ NULL, /* username */ ++ "", ",", "", ++ &str, NULL, NULL) == SASL_OK) { ++ slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "sasl library mechs: %s\n", str); ++ /* merge into result set */ ++ dupstr = slapi_ch_strdup(str); ++ others = slapi_str2charray_ext(dupstr, ",", 0 /* don't list duplicate mechanisms */); ++ charray_merge(&ret, others, 1); ++ charray_free(others); ++ slapi_ch_free((void**)&dupstr); ++ } ++ PR_ExitMonitor(pb->pb_conn->c_mutex); + } +- PR_ExitMonitor(pb->pb_conn->c_mutex); + + /* Get the servers "allowed" list */ + config_ret = config_get_allowed_sasl_mechs_array(); +-- +2.9.3 + diff --git a/SOURCES/0039-Ticket-49231-Fix-backport-issue-part2.patch b/SOURCES/0039-Ticket-49231-Fix-backport-issue-part2.patch new file mode 100644 index 0000000..e9411e1 --- /dev/null +++ b/SOURCES/0039-Ticket-49231-Fix-backport-issue-part2.patch @@ -0,0 +1,25 @@ +From fe06dee8f346a8d8ded338bb5080c4cd3b230eef Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 5 May 2017 18:33:36 -0400 +Subject: [PATCH] Ticket 49231 - Fix backport issue (part2) + +--- + ldap/servers/slapd/saslbind.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c +index 8d23c52..75b83fe 100644 +--- a/ldap/servers/slapd/saslbind.c ++++ b/ldap/servers/slapd/saslbind.c +@@ -773,7 +773,7 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + /* merge into result set */ + dupstr = slapi_ch_strdup(str); + others = slapi_str2charray_ext(dupstr, ",", 0 /* don't list duplicate mechanisms */); +- charray_merge(&ret, others, 1); ++ charray_merge(&sup_ret, others, 1); + charray_free(others); + slapi_ch_free((void**)&dupstr); + } +-- +2.9.3 + diff --git a/SOURCES/0040-Ticket-48681-logconv.pl-Fix-SASL-Bind-stats-and-rewo.patch b/SOURCES/0040-Ticket-48681-logconv.pl-Fix-SASL-Bind-stats-and-rewo.patch new file mode 100644 index 0000000..7fae21f --- /dev/null +++ b/SOURCES/0040-Ticket-48681-logconv.pl-Fix-SASL-Bind-stats-and-rewo.patch @@ -0,0 +1,130 @@ +From e78c098543bbf64b03d1f3df98aa26184c435737 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 19 May 2017 11:18:20 -0400 +Subject: [PATCH] Ticket 48681 - logconv.pl - Fix SASL Bind stats and rework + report format + +Description: We were previously counting ANONYMOUS sasl bind mechanisms + as anonymous binds. The report was also changed to make the + binds stats clearer. + +https://pagure.io/389-ds-base/issue/48681 + +Reviewed by: tbordaz(Thanks!) + +(cherry picked from commit f913252541c90ab7f3d62d74818c43ad01ff5c4e) +--- + ldap/admin/src/logconv.pl | 52 ++++++++++++++++++++++++++++++++++++----------- + 1 file changed, 40 insertions(+), 12 deletions(-) + +diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl +index c30e175..4932db4 100755 +--- a/ldap/admin/src/logconv.pl ++++ b/ldap/admin/src/logconv.pl +@@ -1099,23 +1099,23 @@ print "Max BER Size Exceeded: $maxBerSizeCount\n"; + print "\n"; + print "Binds: $bindCount\n"; + print "Unbinds: $unbindCount\n"; ++print "------------------------------"; ++print "-" x length $bindCount; ++print "\n"; + print " - LDAP v2 Binds: $v2BindCount\n"; + print " - LDAP v3 Binds: $v3BindCount\n"; +-print " - AUTOBINDs: $autobindCount\n"; ++print " - AUTOBINDs(LDAPI): $autobindCount\n"; + print " - SSL Client Binds: $sslClientBindCount\n"; + print " - Failed SSL Client Binds: $sslClientFailedCount\n"; + print " - SASL Binds: $saslBindCount\n"; + if ($saslBindCount > 0){ + my $saslmech = $hashes->{saslmech}; + foreach my $saslb ( sort {$saslmech->{$b} <=> $saslmech->{$a} } (keys %{$saslmech}) ){ +- printf " %-4s - %s\n",$saslb, $saslmech->{$saslb}; ++ printf " - %-4s: %s\n",$saslb, $saslmech->{$saslb}; + } + } +- + print " - Directory Manager Binds: $rootDNBindCount\n"; + print " - Anonymous Binds: $anonymousBindCount\n"; +-my $otherBindCount = $bindCount -($rootDNBindCount + $anonymousBindCount); +-print " - Other Binds: $otherBindCount\n\n"; + + ########################################################################## + # Verbose Logging Section # +@@ -1195,9 +1195,9 @@ if ($usage =~ /e/i || $verb eq "yes"){ + } + + #################################### +-# # ++# # + # Print Failed Logins # +-# # ++# # + #################################### + + if ($verb eq "yes" || $usage =~ /f/ ){ +@@ -2117,7 +2117,7 @@ sub parseLineNormal + ($connID) = $_ =~ /conn=(\d*)\s/; + handleConnClose($connID); + } +- if (m/ BIND/ && $_ =~ /dn=\"(.*)\" method=128/i ){ ++ if (m/ BIND / && $_ =~ /dn=\"(.*)\" method=128/i ){ + my $binddn = $1; + if($reportStats){ inc_stats('bind',$s_stats,$m_stats); } + $bindCount++; +@@ -2531,21 +2531,49 @@ sub parseLineNormal + } + } + } +- if (/ BIND / && /method=sasl/i){ ++ if (/ BIND / && $_ =~ /dn=\"(.*)\" method=sasl/i){ ++ my $binddn = $1; ++ my ($conn, $op); + $saslBindCount++; + $bindCount++; + if ($_ =~ /mech=(.*)/i ){ + my $mech = $1; + $hashes->{saslmech}->{$mech}++; +- my ($conn, $op); + if ($_ =~ /conn= *([0-9A-Z]+) +op= *([0-9\-]+)/i){ + $conn = $1; + $op = $2; + $hashes->{saslconnop}->{$conn-$op} = $mech; + } + } +- if (/ mech=ANONYMOUS/){ +- $anonymousBindCount++; ++ if ($binddn ne ""){ ++ if($binddn eq $rootDN){ $rootDNBindCount++; } ++ if($usage =~ /f/ || $usage =~ /u/ || $usage =~ /U/ || $usage =~ /b/ || $verb eq "yes"){ ++ $tmpp = $binddn; ++ $tmpp =~ tr/A-Z/a-z/; ++ $hashes->{bindlist}->{$tmpp}++; ++ $hashes->{bind_conn_op}->{"$serverRestartCount,$conn,$op"} = $tmpp; ++ } ++ } ++ } ++ if (/ RESULT err=/ && / tag=97 nentries=0 etime=/ && $_ =~ /dn=\"(.*)\"/i){ ++ # Check if this is a sasl bind, if see we need to add the RESULT's dn as a bind dn ++ my $binddn = $1; ++ my ($conn, $op); ++ if ($_ =~ /conn= *([0-9A-Z]+) +op= *([0-9\-]+)/i){ ++ $conn = $1; ++ $op = $2; ++ if ($hashes->{saslconnop}->{$conn-$op} ne ""){ ++ # This was a SASL BIND - record the dn ++ if ($binddn ne ""){ ++ if($binddn eq $rootDN){ $rootDNBindCount++; } ++ if($usage =~ /f/ || $usage =~ /u/ || $usage =~ /U/ || $usage =~ /b/ || $verb eq "yes"){ ++ $tmpp = $binddn; ++ $tmpp =~ tr/A-Z/a-z/; ++ $hashes->{bindlist}->{$tmpp}++; ++ $hashes->{bind_conn_op}->{"$serverRestartCount,$conn,$op"} = $tmpp; ++ } ++ } ++ } + } + } + if (/ RESULT err=14 tag=97 / && / SASL bind in progress/){ +-- +2.9.4 + diff --git a/SOURCES/0041-Ticket-49157-ds-logpipe.py-crashes-for-non-existing-.patch b/SOURCES/0041-Ticket-49157-ds-logpipe.py-crashes-for-non-existing-.patch new file mode 100644 index 0000000..b8004e0 --- /dev/null +++ b/SOURCES/0041-Ticket-49157-ds-logpipe.py-crashes-for-non-existing-.patch @@ -0,0 +1,83 @@ +From a842e43becb9312574071b1460bfa835bfecc47b Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 8 May 2017 14:12:53 -0400 +Subject: [PATCH] Ticket 49157 - ds-logpipe.py crashes for non-existing users + +Description: Remove all "raises", and gracefully exit with a message + +https://pagure.io/389-ds-base/issue/49157 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 94ebab36770465a50e3f61590f0f1adec2cc9224) +--- + ldap/admin/src/scripts/ds-logpipe.py | 18 ++++++++++++------ + 1 file changed, 12 insertions(+), 6 deletions(-) + +diff --git a/ldap/admin/src/scripts/ds-logpipe.py b/ldap/admin/src/scripts/ds-logpipe.py +index dc1856a..13712ea 100644 +--- a/ldap/admin/src/scripts/ds-logpipe.py ++++ b/ldap/admin/src/scripts/ds-logpipe.py +@@ -146,7 +146,8 @@ def open_pipe(logfname): + if e.errno == errno.EINTR: + continue # open was interrupted, try again + else: # hard error +- raise Exception("%s [%d]" % (e.strerror, e.errno)) ++ print("%s [%d]" % (e.strerror, e.errno)) ++ sys.exit(1) + return logf + + def is_proc_alive(procpid): +@@ -156,7 +157,8 @@ def is_proc_alive(procpid): + except IOError as e: + if e.errno != errno.ENOENT: # may not exist yet - that's ok + # otherwise, probably permissions or other badness +- raise Exception("could not open file %s - %s [%d]" % (procfile, e.strerror, e.errno)) ++ print("could not open file %s - %s [%d]" % (procfile, e.strerror, e.errno)) ++ sys.exit(1) + # using /proc/pid failed, try kill + if not retval: + try: +@@ -177,7 +179,8 @@ def get_pid_from_file(pidfile): + except IOError as e: + if e.errno != errno.ENOENT: # may not exist yet - that's ok + # otherwise, probably permissions or other badness +- raise Exception("Could not read pid from file %s - %s [%d]" % (pidfile, e.strerror, e.errno)) ++ print("Could not read pid from file %s - %s [%d]" % (pidfile, e.strerror, e.errno)) ++ sys.exit(1) + if line: + procpid = int(line) + return procpid +@@ -188,7 +191,8 @@ def write_pid_file(pidfile): + pfd.write("%d\n" % os.getpid()) + pfd.close() + except IOError as e: +- raise Exception("Could not write pid to file %s - %s [%d]" % (pidfile, e.strerror, e.errno)) ++ print("Could not write pid to file %s - %s [%d]" % (pidfile, e.strerror, e.errno)) ++ sys.exit(1) + + def handle_script_pidfile(scriptpidfile): + scriptpid = get_pid_from_file(scriptpidfile) +@@ -216,7 +220,8 @@ def read_and_process_line(logf, plgfuncs): + if e.errno == errno.EINTR: + continue # read was interrupted, try again + else: # hard error +- raise Exception("%s [%d]" % (e.strerror, e.errno)) ++ print("%s [%d]" % (e.strerror, e.errno)) ++ sys.exit(1) + if line: # read something + for plgfunc in plgfuncs: + if not plgfunc(line): +@@ -312,7 +317,8 @@ except OSError as e: + print("Failed to create log pipe: " + str(e)) + sys.exit(1) + else: +- raise Exception("%s [%d]" % (e.strerror, e.errno)) ++ print("Failed to create log pipe - %s [error %d]" % (e.strerror, e.errno)) ++ sys.ext(1) + + if debug: + print("Listening to log pipe", logfname, "number of lines", maxlines) +-- +2.9.4 + diff --git a/SOURCES/0042-Ticket-49249-cos_cache-is-erroneously-logging-schema.patch b/SOURCES/0042-Ticket-49249-cos_cache-is-erroneously-logging-schema.patch new file mode 100644 index 0000000..66faff3 --- /dev/null +++ b/SOURCES/0042-Ticket-49249-cos_cache-is-erroneously-logging-schema.patch @@ -0,0 +1,202 @@ +From 834b5f7355d4233c4b9d6931ba6ec8482413bca8 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Thu, 11 May 2017 09:21:38 +0200 +Subject: [PATCH] Ticket 49249 - cos_cache is erroneously logging schema + checking failure + +Bug Description: + cos is generating virtual attributes in several steps. + One of the first step is to check that the generated attribute will + conform the schema. + Then additional checks (override/merge and cos scope) are performed. + If the entry does not conform the schema, it skips the additional checks. + In such case it logs a message stating that the virtual attribute does not + apply. + During slapi-log-err refactoring (https://pagure.io/389-ds-base/issue/48978) + the logging level, in case of schema violation, was move from SLAPI_LOG_PLUGIN + to SLAPI_LOG_ERR. + + This change is incorrect because the potential failure to schema check is + normal and does not imply the cos would apply to the entry (for example if + the entry was not in the scope, the cos would also be skipped). + +Fix Description: + Move back the logging level from SLAPI_LOG_ERR to SLAPI_LOG_PLUGIN + +https://pagure.io/389-ds-base/issue/49249 + +Reviewed by: Mark Reynolds + +Platforms tested: F23 + +Flag Day: no + +Doc impact: no +--- + dirsrvtests/tests/tickets/ticket49249_test.py | 140 ++++++++++++++++++++++++++ + ldap/servers/plugins/cos/cos_cache.c | 2 +- + 2 files changed, 141 insertions(+), 1 deletion(-) + create mode 100644 dirsrvtests/tests/tickets/ticket49249_test.py + +diff --git a/dirsrvtests/tests/tickets/ticket49249_test.py b/dirsrvtests/tests/tickets/ticket49249_test.py +new file mode 100644 +index 0000000..1dfd07e +--- /dev/null ++++ b/dirsrvtests/tests/tickets/ticket49249_test.py +@@ -0,0 +1,140 @@ ++import time ++import ldap ++import logging ++import pytest ++from lib389 import DirSrv, Entry, tools, tasks ++from lib389.tools import DirSrvTools ++from lib389._constants import * ++from lib389.properties import * ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st as topo ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++COS_BRANCH = 'ou=cos_scope,' + DEFAULT_SUFFIX ++COS_DEF = 'cn=cos_definition,' + COS_BRANCH ++COS_TEMPLATE = 'cn=cos_template,' + COS_BRANCH ++INVALID_USER_WITH_COS = 'cn=cos_user_no_mail,' + COS_BRANCH ++VALID_USER_WITH_COS = 'cn=cos_user_with_mail,' + COS_BRANCH ++ ++NO_COS_BRANCH = 'ou=no_cos_scope,' + DEFAULT_SUFFIX ++INVALID_USER_WITHOUT_COS = 'cn=no_cos_user_no_mail,' + NO_COS_BRANCH ++VALID_USER_WITHOUT_COS = 'cn=no_cos_user_with_mail,' + NO_COS_BRANCH ++ ++def test_ticket49249(topo): ++ """Write your testcase here... ++ ++ Also, if you need any testcase initialization, ++ please, write additional fixture for that(include finalizer). ++ """ ++ # Add the branches ++ try: ++ topo.standalone.add_s(Entry((COS_BRANCH, { ++ 'objectclass': 'top extensibleObject'.split(), ++ 'ou': 'cos_scope' ++ }))) ++ except ldap.LDAPError as e: ++ log.error('Failed to add cos_scope: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.add_s(Entry((NO_COS_BRANCH, { ++ 'objectclass': 'top extensibleObject'.split(), ++ 'ou': 'no_cos_scope' ++ }))) ++ except ldap.LDAPError as e: ++ log.error('Failed to add no_cos_scope: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.add_s(Entry((COS_TEMPLATE, { ++ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), ++ 'cn': 'cos_template', ++ 'cosPriority': '1', ++ 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', ++ 'mailAlternateAddress': 'hello@world' ++ }))) ++ except ldap.LDAPError as e: ++ log.error('Failed to add cos_template: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ topo.standalone.add_s(Entry((COS_DEF, { ++ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), ++ 'cn': 'cos_definition', ++ 'costemplatedn': COS_TEMPLATE, ++ 'cosAttribute': 'mailAlternateAddress default' ++ }))) ++ except ldap.LDAPError as e: ++ log.error('Failed to add cos_definition: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ # This entry is not allowed to have mailAlternateAddress ++ topo.standalone.add_s(Entry((INVALID_USER_WITH_COS, { ++ 'objectclass': 'top person'.split(), ++ 'cn': 'cos_user_no_mail', ++ 'sn': 'cos_user_no_mail' ++ }))) ++ except ldap.LDAPError as e: ++ log.error('Failed to add cos_user_no_mail: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ # This entry is allowed to have mailAlternateAddress ++ topo.standalone.add_s(Entry((VALID_USER_WITH_COS, { ++ 'objectclass': 'top mailGroup'.split(), ++ 'cn': 'cos_user_with_mail' ++ }))) ++ except ldap.LDAPError as e: ++ log.error('Failed to add cos_user_no_mail: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ # This entry is not allowed to have mailAlternateAddress ++ topo.standalone.add_s(Entry((INVALID_USER_WITHOUT_COS, { ++ 'objectclass': 'top person'.split(), ++ 'cn': 'no_cos_user_no_mail', ++ 'sn': 'no_cos_user_no_mail' ++ }))) ++ except ldap.LDAPError as e: ++ log.error('Failed to add no_cos_user_no_mail: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ # This entry is allowed to have mailAlternateAddress ++ topo.standalone.add_s(Entry((VALID_USER_WITHOUT_COS, { ++ 'objectclass': 'top mailGroup'.split(), ++ 'cn': 'no_cos_user_with_mail' ++ }))) ++ except ldap.LDAPError as e: ++ log.error('Failed to add no_cos_user_with_mail: error ' + e.message['desc']) ++ assert False ++ ++ try: ++ entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(mailAlternateAddress=*)') ++ assert len(entries) == 1 ++ assert entries[0].hasValue('mailAlternateAddress', 'hello@world') ++ except ldap.LDAPError as e: ++ log.fatal('Unable to retrieve cos_user_with_mail (only entry with mailAlternateAddress) : error %s' % (USER1_DN, e.message['desc'])) ++ assert False ++ ++ assert not topo.standalone.ds_error_log.match(".*cos attribute mailAlternateAddress failed schema.*") ++ ++ if DEBUGGING: ++ # Add debugging steps(if any)... ++ pass ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c +index 8942254..66c6c7f 100644 +--- a/ldap/servers/plugins/cos/cos_cache.c ++++ b/ldap/servers/plugins/cos/cos_cache.c +@@ -2362,7 +2362,7 @@ static int cos_cache_query_attr(cos_cache *ptheCache, vattr_context *context, + + if(!cos_cache_schema_check(pCache, attr_index, pObjclasses)) + { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_query_attr - cos attribute %s failed schema check on dn: %s\n",type,pDn); ++ slapi_log_err(SLAPI_LOG_PLUGIN, COS_PLUGIN_SUBSYSTEM, "cos_cache_query_attr - cos attribute %s failed schema check on dn: %s\n",type,pDn); + goto bail; + } + } +-- +2.9.4 + diff --git a/SOURCES/0043-Ticket-49238-AddressSanitizer-heap-use-after-free-in.patch b/SOURCES/0043-Ticket-49238-AddressSanitizer-heap-use-after-free-in.patch new file mode 100644 index 0000000..5e1b21b --- /dev/null +++ b/SOURCES/0043-Ticket-49238-AddressSanitizer-heap-use-after-free-in.patch @@ -0,0 +1,243 @@ +From 4182dd8bbff22f9e0e45b763a4619c0bc8dcb153 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 9 May 2017 12:31:58 -0400 +Subject: [PATCH] Ticket 49238 - AddressSanitizer: heap-use-after-free in + libreplication + +Bug Description: + The bug is detected in csn pending list component, when + accessing a csn that has already been freed. + + The bug is mostly detectable under ASAN because under normal run + the read access to the csn would only crash if the csn was in + an unmapped page (that is quite difficult to acheive). + + The bug was observed under the following conditions: + - very slow machine + - all instances running on the same machine + + The patch address 2 issues + + Issue - 1 + Under specfic circumstance (failure, like "db_deadlock" during changelog update), + the csn was freed but still present in the pending list (fix-1). + + Issue - 2 + Further investigations, showed an other corner case where a + replica could be updated by several suppliers in parallel. + In such scenario, an update (on one thread-2) with a higher csn (let csn-2) + may be applied before an update (on another thread-1) with a smaller + csn (let csn-1). + csn-2 is freed when thread-2 complete but the csn-2 will remain + in the pending list until csn-1 is commited. + so followup of pending list may access a csn that was freed + +Fix Description: + Issue - 1 + The fix in repl5_plugins.c, frees the csn (thread private area) + at the condition pending list was roll up for that csn (ruv update). + + Issue - 2 + The fix is in two parts: + If a supplier tries to acquire a replica while it is + already owner of it, the replica is granted. + + If a supplier owns a replica and is asking again for it, + but this time the replica is not granted, the replica is release and + the supplier disconnected. + +https://pagure.io/389-ds-base/issue/49238 + +Reviewed by: Mark Reynolds, Ludwig Krispenz, William Brown (thanks to you all !!) + +Platforms tested: 7.4 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/plugins/replication/repl5.h | 1 + + ldap/servers/plugins/replication/repl5_plugins.c | 7 +++- + ldap/servers/plugins/replication/repl5_replica.c | 49 +++++++++++++++++++----- + ldap/servers/plugins/replication/repl_extop.c | 42 ++++++++++++++++++-- + 4 files changed, 86 insertions(+), 13 deletions(-) + +diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h +index c3bd10c..1d8989c 100644 +--- a/ldap/servers/plugins/replication/repl5.h ++++ b/ldap/servers/plugins/replication/repl5.h +@@ -549,6 +549,7 @@ void replica_relinquish_exclusive_access(Replica *r, PRUint64 connid, int opid); + PRBool replica_get_tombstone_reap_active(const Replica *r); + const Slapi_DN *replica_get_root(const Replica *r); + const char *replica_get_name(const Replica *r); ++uint64_t replica_get_locking_conn(const Replica *r); + ReplicaId replica_get_rid (const Replica *r); + void replica_set_rid (Replica *r, ReplicaId rid); + PRBool replica_is_initialized (const Replica *r); +diff --git a/ldap/servers/plugins/replication/repl5_plugins.c b/ldap/servers/plugins/replication/repl5_plugins.c +index ebcc230..9ef06af 100644 +--- a/ldap/servers/plugins/replication/repl5_plugins.c ++++ b/ldap/servers/plugins/replication/repl5_plugins.c +@@ -1224,7 +1224,12 @@ common_return: + opcsn = operation_get_csn(op); + prim_csn = get_thread_primary_csn(); + if (csn_is_equal(opcsn, prim_csn)) { +- set_thread_primary_csn(NULL); ++ if (return_value == 0) { ++ /* the primary csn was succesfully committed ++ * unset it in the thread local data ++ */ ++ set_thread_primary_csn(NULL); ++ } + } + if (repl_obj) { + object_release (repl_obj); +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index a106f8b..1bdc138 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -64,6 +64,7 @@ struct replica { + PRBool state_update_inprogress; /* replica state is being updated */ + PRLock *agmt_lock; /* protects agreement creation, start and stop */ + char *locking_purl; /* supplier who has exclusive access */ ++ uint64_t locking_conn; /* The supplier's connection id */ + Slapi_Counter *protocol_timeout;/* protocol shutdown timeout */ + Slapi_Counter *backoff_min; /* backoff retry minimum */ + Slapi_Counter *backoff_max; /* backoff retry maximum */ +@@ -602,19 +603,32 @@ replica_get_exclusive_access(Replica *r, PRBool *isInc, PRUint64 connid, int opi + slapi_sdn_get_dn(r->repl_root), + r->locking_purl ? r->locking_purl : "unknown"); + rval = PR_FALSE; ++ if (!(r->repl_state_flags & REPLICA_TOTAL_IN_PROGRESS)) { ++ /* inc update */ ++ if (r->locking_purl && r->locking_conn == connid) { ++ /* This is the same supplier connection, reset the replica ++ * purl, and return success */ ++ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, ++ "replica_get_exclusive_access - " ++ "This is a second acquire attempt from the same replica connection " ++ " - return success instead of busy\n"); ++ slapi_ch_free_string(&r->locking_purl); ++ r->locking_purl = slapi_ch_strdup(locking_purl); ++ rval = PR_TRUE; ++ goto done; ++ } ++ if (replica_get_release_timeout(r)) { ++ /* ++ * Abort the current session so other replicas can acquire ++ * this server. ++ */ ++ r->abort_session = ABORT_SESSION; ++ } ++ } + if (current_purl) + { + *current_purl = slapi_ch_strdup(r->locking_purl); + } +- if (!(r->repl_state_flags & REPLICA_TOTAL_IN_PROGRESS) && +- replica_get_release_timeout(r)) +- { +- /* +- * We are not doing a total update, so abort the current session +- * so other replicas can acquire this server. +- */ +- r->abort_session = ABORT_SESSION; +- } + } + else + { +@@ -642,7 +656,9 @@ replica_get_exclusive_access(Replica *r, PRBool *isInc, PRUint64 connid, int opi + } + slapi_ch_free_string(&r->locking_purl); + r->locking_purl = slapi_ch_strdup(locking_purl); ++ r->locking_conn = connid; + } ++done: + replica_unlock(r->repl_lock); + return rval; + } +@@ -720,6 +736,18 @@ replica_get_name(const Replica *r) /* ONREPL - should we return copy instead? */ + return(r->repl_name); + } + ++/* ++ * Returns locking_conn of this replica ++ */ ++uint64_t ++replica_get_locking_conn(const Replica *r) ++{ ++ uint64_t connid; ++ replica_lock(r->repl_lock); ++ connid = r->locking_conn; ++ replica_unlock(r->repl_lock); ++ return connid; ++} + /* + * Returns replicaid of this replica + */ +@@ -2251,6 +2279,9 @@ _replica_init_from_config (Replica *r, Slapi_Entry *e, char *errortext) + } + + r->tombstone_reap_stop = r->tombstone_reap_active = PR_FALSE; ++ ++ /* No supplier holding the replica */ ++ r->locking_conn = ULONG_MAX; + + return (_replica_check_validity (r)); + } +diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c +index 412caec..a39d918 100644 +--- a/ldap/servers/plugins/replication/repl_extop.c ++++ b/ldap/servers/plugins/replication/repl_extop.c +@@ -1138,9 +1138,45 @@ send_response: + */ + if (NULL != connext && NULL != connext->replica_acquired) + { +- Object *r_obj = (Object*)connext->replica_acquired; +- replica_relinquish_exclusive_access((Replica*)object_get_data (r_obj), +- connid, opid); ++ Replica *r = (Replica*)object_get_data ((Object*)connext->replica_acquired); ++ uint64_t r_locking_conn; ++ ++ /* At this point the supplier runs a Replica Agreement for ++ * the specific replica connext->replica_acquired. ++ * The RA does not know it holds the replica (because it is ++ * sending this request). ++ * The situation is confused ++ */ ++ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - " ++ "already acquired replica: replica not ready (%d) (replica=%s)\n", response, replica_get_name(r) ? replica_get_name(r) : "no name"); ++ ++ /* ++ * On consumer side, we release the exclusive access at the ++ * condition this is this RA that holds the replica ++ */ ++ if (r) { ++ ++ r_locking_conn = replica_get_locking_conn(r); ++ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - " ++ "already acquired replica: locking_conn=%d, current connid=%d\n", (int) r_locking_conn, (int) connid); ++ ++ if ((r_locking_conn != ULONG_MAX) && (r_locking_conn == connid)) { ++ replica_relinquish_exclusive_access(r, connid, opid); ++ object_release((Object*) connext->replica_acquired); ++ connext->replica_acquired = NULL; ++ } ++ } ++ /* ++ * On consumer side we should not keep a incoming connection ++ * with replica_acquired set although the supplier is not aware of ++ * ++ * On the supplier, we need to close the connection so ++ * that the RA will restart a new session in a clear state ++ */ ++ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - " ++ "already acquired replica: disconnect conn=%d\n", connid); ++ slapi_disconnect_server(conn); ++ + } + /* Remove any flags that would indicate repl session in progress */ + if (NULL != connext) +-- +2.9.4 + diff --git a/SOURCES/0044-Ticket-49246-ns-slapd-crashes-in-role-cache-creation.patch b/SOURCES/0044-Ticket-49246-ns-slapd-crashes-in-role-cache-creation.patch new file mode 100644 index 0000000..5aafc3d --- /dev/null +++ b/SOURCES/0044-Ticket-49246-ns-slapd-crashes-in-role-cache-creation.patch @@ -0,0 +1,171 @@ +From 18491418e661b5dc1b9ca4c6bb4adb85bfb0bf0d Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 9 May 2017 16:31:52 -0400 +Subject: [PATCH] Ticket 49246 - ns-slapd crashes in role cache creation + +Bug Description: Using a nested filter for a filtered role can + cause a crash. This was due to the way the filter + was being checked by the roles plugin. + +Fix Description: Properly resurse over a filter. + +https://pagure.io/389-ds-base/issue/49246 + +Reviewed by: firstyear & tbordaz(Thanks!!) + +(cherry picked from commit 54e4fca35899550e0c25b25e7f7c756302d258ce) +--- + dirsrvtests/tests/tickets/ticket49122_test.py | 61 ++++++++++++++++++--------- + ldap/servers/plugins/roles/roles_cache.c | 34 +++++++++++---- + 2 files changed, 66 insertions(+), 29 deletions(-) + +diff --git a/dirsrvtests/tests/tickets/ticket49122_test.py b/dirsrvtests/tests/tickets/ticket49122_test.py +index ff1e8d1..0945122 100644 +--- a/dirsrvtests/tests/tickets/ticket49122_test.py ++++ b/dirsrvtests/tests/tickets/ticket49122_test.py +@@ -2,8 +2,7 @@ import time + import ldap + import logging + import pytest +-from lib389 import DirSrv, Entry, tools, tasks +-from lib389.tools import DirSrvTools ++from lib389 import Entry + from lib389._constants import * + from lib389.properties import * + from lib389.tasks import * +@@ -19,6 +18,15 @@ log = logging.getLogger(__name__) + + USER_DN = 'uid=user,' + DEFAULT_SUFFIX + ROLE_DN = 'cn=Filtered_Role_That_Includes_Empty_Role,' + DEFAULT_SUFFIX ++filters = ['nsrole=cn=empty,dc=example,dc=com', ++ '(nsrole=cn=empty,dc=example,dc=com)', ++ '(&(nsrole=cn=empty,dc=example,dc=com))', ++ '(!(nsrole=cn=empty,dc=example,dc=com))', ++ '(&(|(objectclass=person)(sn=app*))(userpassword=*))', ++ '(&(|(objectclass=person)(nsrole=cn=empty,dc=example,dc=com))(userpassword=*))', ++ '(&(|(nsrole=cn=empty,dc=example,dc=com)(sn=app*))(userpassword=*))', ++ '(&(|(objectclass=person)(sn=app*))(nsrole=cn=empty,dc=example,dc=com))', ++ '(&(|(&(cn=*)(objectclass=person)(nsrole=cn=empty,dc=example,dc=com)))(uid=*))'] + + + def test_ticket49122(topo): +@@ -29,18 +37,6 @@ def test_ticket49122(topo): + topo.standalone.plugins.enable(name=PLUGIN_ROLES) + topo.standalone.restart() + +- # Add invalid role +- try: +- topo.standalone.add_s(Entry(( +- ROLE_DN, {'objectclass': ['top', 'ldapsubentry', 'nsroledefinition', +- 'nscomplexroledefinition', 'nsfilteredroledefinition'], +- 'cn': 'Filtered_Role_That_Includes_Empty_Role', +- 'nsRoleFilter': '(!(nsrole=cn=This_Is_An_Empty_Managed_NsRoleDefinition,dc=example,dc=com))', +- 'description': 'A filtered role with filter that will crash the server'}))) +- except ldap.LDAPError as e: +- topo.standalone.log.fatal('Failed to add filtered role: error ' + e.message['desc']) +- assert False +- + # Add test user + try: + topo.standalone.add_s(Entry(( +@@ -51,16 +47,39 @@ def test_ticket49122(topo): + assert False + + if DEBUGGING: +- # Add debugging steps(if any)... + print("Attach gdb") + time.sleep(20) + +- # Search for the role +- try: +- topo.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nsrole']) +- except ldap.LDAPError as e: +- topo.standalone.log.fatal('Search failed: error ' + str(e)) +- assert False ++ # Loop over filters ++ for role_filter in filters: ++ log.info('Testing filter: ' + role_filter) ++ ++ # Add invalid role ++ try: ++ topo.standalone.add_s(Entry(( ++ ROLE_DN, {'objectclass': ['top', 'ldapsubentry', 'nsroledefinition', ++ 'nscomplexroledefinition', 'nsfilteredroledefinition'], ++ 'cn': 'Filtered_Role_That_Includes_Empty_Role', ++ 'nsRoleFilter': role_filter, ++ 'description': 'A filtered role with filter that will crash the server'}))) ++ except ldap.LDAPError as e: ++ topo.standalone.log.fatal('Failed to add filtered role: error ' + e.message['desc']) ++ assert False ++ ++ # Search for the role ++ try: ++ topo.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nsrole']) ++ except ldap.LDAPError as e: ++ topo.standalone.log.fatal('Search failed: error ' + str(e)) ++ assert False ++ ++ # Cleanup ++ try: ++ topo.standalone.delete_s(ROLE_DN) ++ except ldap.LDAPError as e: ++ topo.standalone.log.fatal('delete failed: error ' + str(e)) ++ assert False ++ time.sleep(1) + + topo.standalone.log.info('Test Passed') + +diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c +index 4f27c4c..3697eaa 100644 +--- a/ldap/servers/plugins/roles/roles_cache.c ++++ b/ldap/servers/plugins/roles/roles_cache.c +@@ -1073,20 +1073,38 @@ static int roles_cache_create_role_under(roles_cache_def** roles_cache_suffix, S + } + + /* +- * Check that we are not using nsrole in the filter ++ * Check that we are not using nsrole in the filter, recurse over all the ++ * nested filters. + */ + static int roles_check_filter(Slapi_Filter *filter_list) + { + Slapi_Filter *f; + char *type = NULL; + +- for ( f = slapi_filter_list_first( filter_list ); +- f != NULL; +- f = slapi_filter_list_next( filter_list, f ) ) +- { +- slapi_filter_get_attribute_type(f, &type); +- if (strcasecmp(type, NSROLEATTR) == 0){ +- return -1; ++ f = slapi_filter_list_first( filter_list ); ++ if (f == NULL){ ++ /* Single filter */ ++ if (slapi_filter_get_attribute_type(filter_list, &type) == 0){ ++ if (strcasecmp(type, NSROLEATTR) == 0){ ++ return -1; ++ } ++ } ++ } ++ for ( ; f != NULL; f = slapi_filter_list_next(filter_list, f) ){ ++ /* Complex filter */ ++ if (slapi_filter_list_first(f)) { ++ /* Another filter list - recurse */ ++ if (roles_check_filter(f) == -1){ ++ /* Done, break out */ ++ return -1; ++ } ++ } else { ++ /* Not a filter list, so check the type */ ++ if (slapi_filter_get_attribute_type(f, &type) == 0){ ++ if (strcasecmp(type, NSROLEATTR) == 0){ ++ return -1; ++ } ++ } + } + } + +-- +2.9.4 + diff --git a/SOURCES/0045-Ticket-49258-Allow-nsslapd-cache-autosize-to-be-modi.patch b/SOURCES/0045-Ticket-49258-Allow-nsslapd-cache-autosize-to-be-modi.patch new file mode 100644 index 0000000..07892e9 --- /dev/null +++ b/SOURCES/0045-Ticket-49258-Allow-nsslapd-cache-autosize-to-be-modi.patch @@ -0,0 +1,84 @@ +From e0cb3e9ff5337cfc4ecaa6fa5efa189b7bc16246 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 15 May 2017 11:14:43 -0400 +Subject: [PATCH 05/10] Ticket 49258 - Allow nsslapd-cache-autosize to be + modified while the server is running + +Bug Description: Previously you're not allowed to set nsslapd-cache-autosize, and + nsslapd-cache-autosize-set while the server was running. The only + way to set it was to edit the dse.ldif + +Fix Description: Allow it to be set while the server is running. Also added value + validation for these settigs + +https://pagure.io/389-ds-base/issue/49258 + +Reviewed by: tbordaz(Thanks!) + +(cherry picked from commit 2d07ca48f9c1232fc544361b5103d353e4791a72) +--- + ldap/servers/slapd/back-ldbm/ldbm_config.c | 34 ++++++++++++++++++++++++------ + 1 file changed, 28 insertions(+), 6 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index 401cd60..f7edd9e 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -1197,8 +1197,19 @@ static int ldbm_config_cache_autosize_set(void *arg, void *value, char *errorbuf + { + struct ldbminfo *li = (struct ldbminfo *)arg; + +- if (apply) +- li->li_cache_autosize = (int)((uintptr_t)value); ++ if (apply) { ++ int val = (int)((uintptr_t)value); ++ if (val < 0 || val > 100) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Error: Invalid value for %s (%d). The value must be between \"0\" and \"100\"\n", ++ CONFIG_CACHE_AUTOSIZE, val); ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_config_cache_autosize_set", ++ "Invalid value for %s (%d). The value must be between \"0\" and \"100\"\n", ++ CONFIG_CACHE_AUTOSIZE, val); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } ++ li->li_cache_autosize = val; ++ } + return LDAP_SUCCESS; + } + +@@ -1214,8 +1225,19 @@ static int ldbm_config_cache_autosize_split_set(void *arg, void *value, char *er + { + struct ldbminfo *li = (struct ldbminfo *)arg; + +- if (apply) +- li->li_cache_autosize_split = (int)((uintptr_t)value); ++ if (apply) { ++ int val = (int)((uintptr_t)value); ++ if (val < 0 || val > 100) { ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Error: Invalid value for %s (%d). The value must be between \"0\" and \"100\"\n", ++ CONFIG_CACHE_AUTOSIZE_SPLIT, val); ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_config_cache_autosize_split_set", ++ "Invalid value for %s (%d). The value must be between \"0\" and \"100\"\n", ++ CONFIG_CACHE_AUTOSIZE_SPLIT, val); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } ++ li->li_cache_autosize_split = val; ++ } + return LDAP_SUCCESS; + } + +@@ -1582,8 +1604,8 @@ static config_info ldbm_config[] = { + {CONFIG_DB_DEBUG_CHECKPOINTING, CONFIG_TYPE_ONOFF, "off", &ldbm_config_db_debug_checkpointing_get, &ldbm_config_db_debug_checkpointing_set, 0}, + {CONFIG_DB_HOME_DIRECTORY, CONFIG_TYPE_STRING, "", &ldbm_config_db_home_directory_get, &ldbm_config_db_home_directory_set, 0}, + {CONFIG_IMPORT_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "-1", &ldbm_config_import_cache_autosize_get, &ldbm_config_import_cache_autosize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, +- {CONFIG_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "10", &ldbm_config_cache_autosize_get, &ldbm_config_cache_autosize_set, 0}, +- {CONFIG_CACHE_AUTOSIZE_SPLIT, CONFIG_TYPE_INT, "40", &ldbm_config_cache_autosize_split_get, &ldbm_config_cache_autosize_split_set, 0}, ++ {CONFIG_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "10", &ldbm_config_cache_autosize_get, &ldbm_config_cache_autosize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, ++ {CONFIG_CACHE_AUTOSIZE_SPLIT, CONFIG_TYPE_INT, "40", &ldbm_config_cache_autosize_split_get, &ldbm_config_cache_autosize_split_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_IMPORT_CACHESIZE, CONFIG_TYPE_SIZE_T, "16777216", &ldbm_config_import_cachesize_get, &ldbm_config_import_cachesize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE}, + {CONFIG_IDL_SWITCH, CONFIG_TYPE_STRING, "new", &ldbm_config_idl_get_idl_new, &ldbm_config_idl_set_tune, CONFIG_FLAG_ALWAYS_SHOW}, + {CONFIG_IDL_UPDATE, CONFIG_TYPE_ONOFF, "on", &ldbm_config_idl_get_update, &ldbm_config_idl_set_update, 0}, +-- +2.9.4 + diff --git a/SOURCES/0046-Ticket-49261-Fix-script-usage-and-man-pages.patch b/SOURCES/0046-Ticket-49261-Fix-script-usage-and-man-pages.patch new file mode 100644 index 0000000..71857d0 --- /dev/null +++ b/SOURCES/0046-Ticket-49261-Fix-script-usage-and-man-pages.patch @@ -0,0 +1,156 @@ +From c0a50f26aa52bda451c5b5bce7fa2c7c2eb90fe6 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 17 May 2017 16:24:50 -0400 +Subject: [PATCH] Ticket 49261 - Fix script usage and man pages + +Description: We incorrectly said db2bak.pl and db2ldif.pl took a "-v" option, + but they did not. Plus the usage for some of the shell scripts + did not display "-v" option in the usage + +https://pagure.io/389-ds-base/issue/49261 + +Reviewed by: tbordaz(Thanks!) +--- + ldap/admin/src/scripts/db2bak.in | 3 ++- + ldap/admin/src/scripts/db2bak.pl.in | 2 +- + ldap/admin/src/scripts/db2index.in | 3 ++- + ldap/admin/src/scripts/db2ldif.in | 3 ++- + ldap/admin/src/scripts/db2ldif.pl.in | 2 +- + ldap/admin/src/scripts/vlvindex.in | 3 ++- + man/man8/vlvindex.8 | 6 +++++- + 7 files changed, 15 insertions(+), 7 deletions(-) + +diff --git a/ldap/admin/src/scripts/db2bak.in b/ldap/admin/src/scripts/db2bak.in +index e773b28..a13d2e2 100755 +--- a/ldap/admin/src/scripts/db2bak.in ++++ b/ldap/admin/src/scripts/db2bak.in +@@ -13,11 +13,12 @@ export SHLIB_PATH + + usage() + { +- echo "Usage: db2bak [archivedir] [-Z serverID] [-q] [-h]" ++ echo "Usage: db2bak [archivedir] [-Z serverID] [-q] [-v] [-h]" + echo "Options:" + echo " archivedir - Directory where the backup should be stored" + echo " -Z serverID - Server instance identifier" + echo " -q - Quiet mode - suppresses output" ++ echo " -v - Display version" + echo " -h - Display usage" + } + +diff --git a/ldap/admin/src/scripts/db2bak.pl.in b/ldap/admin/src/scripts/db2bak.pl.in +index 73d4187..335285e 100644 +--- a/ldap/admin/src/scripts/db2bak.pl.in ++++ b/ldap/admin/src/scripts/db2bak.pl.in +@@ -25,7 +25,7 @@ $dbtype = "ldbm database"; + $i = 0; + + sub usage { +- print(STDERR "Usage: db2bak.pl [-v] [-Z serverID] [-D rootdn] { -w password | -w - | -j filename } [-a backupdir]\n"); ++ print(STDERR "Usage: db2bak.pl [-Z serverID] [-D rootdn] { -w password | -w - | -j filename } [-a backupdir]\n"); + print(STDERR " [-t dbtype] [-P protocol] [-h]\n"); + print(STDERR "Options:\n"); + print(STDERR " -D rootdn - Directory Manager\n"); +diff --git a/ldap/admin/src/scripts/db2index.in b/ldap/admin/src/scripts/db2index.in +index 04183d3..3fc4c2c 100755 +--- a/ldap/admin/src/scripts/db2index.in ++++ b/ldap/admin/src/scripts/db2index.in +@@ -14,7 +14,7 @@ export SHLIB_PATH + usage () + { + echo "Usage: db2index [-Z serverID] [-n backend | {-s includesuffix}* -t attribute[:indextypes[:matchingrules]]" +- echo " -T vlvTag] [-h]" ++ echo " -T vlvTag] [-v] [-h]" + echo "Options:" + echo " -Z serverID - Server instance identifier" + echo " -n backend - Backend database name. Example: userRoot" +@@ -26,6 +26,7 @@ usage () + echo " - matchingrules: comma separated matrules" + echo " Example: -t foo:eq,pres" + echo " -T vlvTag - VLV index name" ++ echo " -v - Display version" + echo " -h - Display usage" + } + +diff --git a/ldap/admin/src/scripts/db2ldif.in b/ldap/admin/src/scripts/db2ldif.in +index 08f30e4..95d2754 100755 +--- a/ldap/admin/src/scripts/db2ldif.in ++++ b/ldap/admin/src/scripts/db2ldif.in +@@ -16,7 +16,7 @@ cwd=`pwd` + usage() + { + echo "Usage: db2ldif [-Z serverID] {-n backend_instance}* | {-s includesuffix}* [{-x excludesuffix}*] [-a outputfile]" +- echo " [-E] [-r] [-u] [-U] [-m] [-1] [-q] [-h]" ++ echo " [-E] [-r] [-u] [-U] [-m] [-1] [-q] [-v] [-h]" + echo "Note: either \"-n backend\" or \"-s includesuffix\" is required." + echo "Options:" + echo " -Z serverID - Server instance identifier" +@@ -31,6 +31,7 @@ usage() + echo " -m - Do not base64 encode values" + echo " -1 - Do not include version text" + echo " -q - Quiet mode - suppresses output" ++ echo " -v - Display version" + echo " -h - Display usage" + } + +diff --git a/ldap/admin/src/scripts/db2ldif.pl.in b/ldap/admin/src/scripts/db2ldif.pl.in +index 179d236..0d220f0 100644 +--- a/ldap/admin/src/scripts/db2ldif.pl.in ++++ b/ldap/admin/src/scripts/db2ldif.pl.in +@@ -38,7 +38,7 @@ $decrypt_on_export = 0; + $cwd = cwd(); + + sub usage { +- print(STDERR "Usage: db2ldif.pl [-v] [-Z serverID] [-D rootdn] { -w password | -w - | -j pwfilename }\n"); ++ print(STDERR "Usage: db2ldif.pl [-Z serverID] [-D rootdn] { -w password | -w - | -j pwfilename }\n"); + print(STDERR " [-P protocol] {-n backendname}* | {-s include}* [{-x exclude}*] [-h]\n"); + print(STDERR " [-a filename] [-m] [-M] [-r] [-u] [-C] [-N] [-U] [-E] [-1] [-a filename]\n"); + print(STDERR "Options:\n"); +diff --git a/ldap/admin/src/scripts/vlvindex.in b/ldap/admin/src/scripts/vlvindex.in +index ba2a2b3..6820de4 100755 +--- a/ldap/admin/src/scripts/vlvindex.in ++++ b/ldap/admin/src/scripts/vlvindex.in +@@ -13,7 +13,7 @@ export SHLIB_PATH + + usage () + { +- echo "Usage: vlvindex [-Z serverID] -n backendname | {-s includesuffix}* -T vlvTag [-d debuglevel] [-h]" ++ echo "Usage: vlvindex [-Z serverID] -n backendname | {-s includesuffix}* -T vlvTag [-d debuglevel] [-v] [-h]" + echo "Note: either \"-n backend\" or \"-s includesuffix\" are required." + echo "Options:" + echo " -Z serverID - Server instance identifier" +@@ -21,6 +21,7 @@ usage () + echo " -s includessuffix - Suffix to index" + echo " -T vlvTag - VLV index name" + echo " -d debuglevel - Debugging level" ++ echo " -v - Display version" + echo " -h - Display usage" + } + +diff --git a/man/man8/vlvindex.8 b/man/man8/vlvindex.8 +index f3e1748..4d9497a 100644 +--- a/man/man8/vlvindex.8 ++++ b/man/man8/vlvindex.8 +@@ -18,7 +18,7 @@ + .SH NAME + vlvindex - Directory Server script for VLV indexing + .SH SYNOPSIS +-vlvindex [\-Z serverID] \-n backendname | {\-s includesuffix}* \-T vlvTag [\-d debuglevel] [\-h] ++vlvindex [\-Z serverID] \-n backendname | {\-s includesuffix}* \-T vlvTag [\-d debuglevel] [\-v] [\-h] + .SH DESCRIPTION + Creates virtual list view (VLV) indexes, known in the Directory Server Console as browsing indexes. VLV indexes introduce flexibility in the way search results are viewed. VLV index configuration must already exist prior to running this script. The Directory Server must be stopped before running this script. + .SH OPTIONS +@@ -40,6 +40,10 @@ This is the name of the vlv index entry under cn=config. + .B \fB\-d\fR \fIDebug Level\fR + Settings the debugging level. + .TP ++.B \fB\-v\fR ++.br ++Display the version. ++.TP + .B \fB\-h\fR + .br + Display the usage. +-- +2.9.4 + diff --git a/SOURCES/0047-Ticket-48864-Fix-FreeIPA-build.patch b/SOURCES/0047-Ticket-48864-Fix-FreeIPA-build.patch new file mode 100644 index 0000000..011e441 --- /dev/null +++ b/SOURCES/0047-Ticket-48864-Fix-FreeIPA-build.patch @@ -0,0 +1,46 @@ +From f007ba9e5ac0bbeee1c1d6b4e292b293629a838c Mon Sep 17 00:00:00 2001 +From: Viktor Ashirov +Date: Wed, 17 May 2017 22:03:54 +0200 +Subject: [PATCH] Issue 48864 - Fix FreeIPA build + +Bug Description: +FreeIPA build fails because of incorrect include files + +https://pagure.io/389-ds-base/issue/48864 + +Reviewed by: mreynolds (Thanks!) +--- + ldap/servers/slapd/slapi-plugin.h | 2 +- + ldap/servers/slapd/slapi_pal.h | 2 ++ + 2 files changed, 3 insertions(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index ec8917d..4084945 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -28,7 +28,7 @@ extern "C" { + #endif + + /* Provides our int types and platform specific requirements. */ +-#include ++#include "slapi_pal.h" + + #include "prtypes.h" + #include "ldap.h" +diff --git a/ldap/servers/slapd/slapi_pal.h b/ldap/servers/slapd/slapi_pal.h +index cb61d84..307679d 100644 +--- a/ldap/servers/slapd/slapi_pal.h ++++ b/ldap/servers/slapd/slapi_pal.h +@@ -19,7 +19,9 @@ + + #pragma once + ++#ifdef HAVE_CONFIG_H + #include ++#endif + + #ifdef HAVE_INTTYPES_H + #include +-- +2.9.4 + diff --git a/SOURCES/0048-Ticket-49157-fix-error-in-ds-logpipe.py.patch b/SOURCES/0048-Ticket-49157-fix-error-in-ds-logpipe.py.patch new file mode 100644 index 0000000..c1017a7 --- /dev/null +++ b/SOURCES/0048-Ticket-49157-fix-error-in-ds-logpipe.py.patch @@ -0,0 +1,32 @@ +From 33dc0b3fc6de5d7a400d24a69098ec1b23917e44 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 22 May 2017 12:25:42 -0400 +Subject: [PATCH] Ticket 49157 - fix error in ds-logpipe.py + +Description: Fix typo in ds-logpipe.py + +https://pagure.io/389-ds-base/issue/49157 + +Reviewed by: mreynolds(one line commit rule) + +(cherry picked from commit 15f5f6ac42768ae0cd2040cc4169abde8187bcdf) +--- + ldap/admin/src/scripts/ds-logpipe.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/admin/src/scripts/ds-logpipe.py b/ldap/admin/src/scripts/ds-logpipe.py +index 13712ea..f29a9ff 100644 +--- a/ldap/admin/src/scripts/ds-logpipe.py ++++ b/ldap/admin/src/scripts/ds-logpipe.py +@@ -318,7 +318,7 @@ except OSError as e: + sys.exit(1) + else: + print("Failed to create log pipe - %s [error %d]" % (e.strerror, e.errno)) +- sys.ext(1) ++ sys.exit(1) + + if debug: + print("Listening to log pipe", logfname, "number of lines", maxlines) +-- +2.9.4 + diff --git a/SOURCES/0049-Ticket-49267-autosize-split-of-0-results-in-dbcache-.patch b/SOURCES/0049-Ticket-49267-autosize-split-of-0-results-in-dbcache-.patch new file mode 100644 index 0000000..7c58465 --- /dev/null +++ b/SOURCES/0049-Ticket-49267-autosize-split-of-0-results-in-dbcache-.patch @@ -0,0 +1,62 @@ +From e52c519a8553dd8abee5740714054ebbdd59e51a Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Tue, 23 May 2017 11:03:24 +1000 +Subject: [PATCH] Ticket 49267 - autosize split of 0 results in dbcache of 0 + +Bug Description: autosize split of 0 results in a dbcache of 0. This was +due to a missing bounds check on the value for 0. In theory this could +still be problematic if the value was say 1% ... But hopefully we don't +see that :) + +Fix Description: Add the bounds check. + +https://pagure.io/389-ds-base/issue/49267 + +Author: wibrown + +Review by: mreynolds (Thanks!) + +(cherry picked from commit 22d4865ea20acb6e6c11aed10d09241b09bb711c) +--- + ldap/servers/slapd/back-ldbm/start.c | 14 +++++++++++++- + 1 file changed, 13 insertions(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c +index a207bd8..1834a19 100644 +--- a/ldap/servers/slapd/back-ldbm/start.c ++++ b/ldap/servers/slapd/back-ldbm/start.c +@@ -101,7 +101,11 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + /* This doesn't control the availability of the feature, so we can take the + * default from ldbm_config.c + */ +- autosize_db_percentage_split = li->li_cache_autosize_split; ++ if (li->li_cache_autosize_split == 0) { ++ autosize_db_percentage_split = 40; ++ } else { ++ autosize_db_percentage_split = li->li_cache_autosize_split; ++ } + + + /* Check the values are sane. */ +@@ -131,10 +135,18 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + db_size = (autosize_db_percentage_split * zone_size) / 100; + + /* Cap the DB size at 512MB, as this doesn't help perf much more (lkrispen's advice) */ ++ /* NOTE: Do we need a minimum DB size? */ + if (db_size > (512 * MEGABYTE)) { + db_size = (512 * MEGABYTE); + } + ++ /* NOTE: Because of how we workout entry_size, even if ++ * have autosize split to say ... 90% for dbcache, because ++ * we cap db_size, we use zone_size - db_size, meaning that entry ++ * cache still gets the remaining memory *even* though we didn't use it all. ++ * If we didn't do this, entry_cache would only get 10% of of the avail, even ++ * if db_size was caped at say 5% down from 90. ++ */ + if (backend_count > 0 ) { + /* Number of entry cache pages per backend. */ + entry_size = (zone_size - db_size) / backend_count; +-- +2.9.4 + diff --git a/SOURCES/0050-Ticket-49231-force-EXTERNAL-always.patch b/SOURCES/0050-Ticket-49231-force-EXTERNAL-always.patch new file mode 100644 index 0000000..685f9ba --- /dev/null +++ b/SOURCES/0050-Ticket-49231-force-EXTERNAL-always.patch @@ -0,0 +1,114 @@ +From d2648bbddbf087c4e3803a89cb67541a50682eae Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Mon, 15 May 2017 09:04:45 +1000 +Subject: [PATCH] Ticket 49231 - force EXTERNAL always + +Bug Description: Because of how our sasl code works, EXTERNAL bypasses +a number of checks so is always available. + +Fix Description: Force EXTERNAL to the present mech list, regardless +of the whitelist. + +https://pagure.io/389-ds-base/issue/49231 + +Author: wibrown + +Review by: mreynosd (Thanks!) + +(cherry picked from commit e6e0db35842fc6612134cff5a08c4968230d1b2f) +--- + dirsrvtests/tests/suites/sasl/allowed_mechs.py | 13 +++++++++++-- + ldap/servers/slapd/charray.c | 14 ++++++++++++++ + ldap/servers/slapd/saslbind.c | 9 +++++++++ + ldap/servers/slapd/slapi-private.h | 2 ++ + 4 files changed, 36 insertions(+), 2 deletions(-) + +diff --git a/dirsrvtests/tests/suites/sasl/allowed_mechs.py b/dirsrvtests/tests/suites/sasl/allowed_mechs.py +index a3e385e..7958db4 100644 +--- a/dirsrvtests/tests/suites/sasl/allowed_mechs.py ++++ b/dirsrvtests/tests/suites/sasl/allowed_mechs.py +@@ -25,12 +25,21 @@ def test_sasl_allowed_mechs(topology_st): + assert('EXTERNAL' in orig_mechs) + + # Now edit the supported mechs. CHeck them again. +- standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'EXTERNAL, PLAIN') ++ standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN') + + limit_mechs = standalone.rootdse.supported_sasl() +- print(limit_mechs) + assert('PLAIN' in limit_mechs) ++ # Should always be in the allowed list, even if not set. + assert('EXTERNAL' in limit_mechs) ++ # Should not be there! ++ assert('GSSAPI' not in limit_mechs) ++ ++ standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, EXTERNAL') ++ ++ limit_mechs = standalone.rootdse.supported_sasl() ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) ++ # Should not be there! + assert('GSSAPI' not in limit_mechs) + + # Do a config reset +diff --git a/ldap/servers/slapd/charray.c b/ldap/servers/slapd/charray.c +index 6b89714..9056f16 100644 +--- a/ldap/servers/slapd/charray.c ++++ b/ldap/servers/slapd/charray.c +@@ -272,6 +272,20 @@ charray_utf8_inlist( + return( 0 ); + } + ++/* ++ * Assert that some str s is in the charray, or add it. ++ */ ++void ++charray_assert_present(char ***a, char *s) ++{ ++ int result = charray_utf8_inlist(*a, s); ++ /* Not in the list */ ++ if (result == 0) { ++ char *sdup = slapi_ch_strdup(s); ++ slapi_ch_array_add_ext(a, sdup); ++ } ++} ++ + int slapi_ch_array_utf8_inlist(char **a, char *s) + { + return charray_utf8_inlist(a,s); +diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c +index 75b83fe..dd0c4fb 100644 +--- a/ldap/servers/slapd/saslbind.c ++++ b/ldap/servers/slapd/saslbind.c +@@ -794,6 +794,15 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + ret = sup_ret; + } + ++ /* ++ * https://pagure.io/389-ds-base/issue/49231 ++ * Because of the way that SASL mechs are managed in bind.c and saslbind.c ++ * even if EXTERNAL was *not* in the list of allowed mechs, it was allowed ++ * in the bind process because it bypasses lots of our checking. As a result ++ * we have to always present it. ++ */ ++ charray_assert_present(&ret, "EXTERNAL"); ++ + slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "<=\n"); + + return ret; +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index 3f732e8..0836d66 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -834,6 +834,8 @@ void charray_subtract( char **a, char **b, char ***c ); + char **charray_intersection(char **a, char **b); + int charray_get_index(char **array, char *s); + int charray_normdn_add(char ***chararray, char *dn, char *errstr); ++void charray_assert_present(char ***a, char *s); ++ + + /****************************************************************************** + * value array routines. +-- +2.9.4 + diff --git a/SOURCES/0051-Ticket-48538-Failed-to-delete-old-semaphore.patch b/SOURCES/0051-Ticket-48538-Failed-to-delete-old-semaphore.patch new file mode 100644 index 0000000..4d2d345 --- /dev/null +++ b/SOURCES/0051-Ticket-48538-Failed-to-delete-old-semaphore.patch @@ -0,0 +1,58 @@ +From bbc63ef4dab6c275b1d8b8fe6439483309781401 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Fri, 12 May 2017 10:09:32 +1000 +Subject: [PATCH] Ticket 48538 - Failed to delete old semaphore + +Bug Description: I misunderstood the sem_unlink call, and logged +the wrong filepath. + +Fix Description: Fix the file path of the semaphore. + +https://pagure.io/389-ds-base/issue/48538 + +Author: wibrown + +Review by: mreynolds (Thanks!) + +(cherry picked from commit b81c8ba38c29e15e13b0dd0bf6f5d3c773d31b20) +--- + ldap/servers/slapd/snmp_collator.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c +index 21043d9..2deab91 100644 +--- a/ldap/servers/slapd/snmp_collator.c ++++ b/ldap/servers/slapd/snmp_collator.c +@@ -458,23 +458,23 @@ snmp_collator_create_semaphore(void) + * around. Recreate it since we don't know what state it is in. */ + if (sem_unlink(stats_sem_name) != 0) { + slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore", +- "Failed to delete old semaphore for stats file (%s). " +- "Error %d (%s).\n", stats_sem_name, errno, slapd_system_strerror(errno) ); ++ "Failed to delete old semaphore for stats file (/dev/shm/sem.%s). " ++ "Error %d (%s).\n", stats_sem_name + 1, errno, slapd_system_strerror(errno) ); + exit(1); + } + + if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) { + /* No dice */ + slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore", +- "Failed to create semaphore for stats file (%s). Error %d (%s).\n", +- stats_sem_name, errno, slapd_system_strerror(errno) ); ++ "Failed to create semaphore for stats file (/dev/shm/sem.%s). Error %d (%s).\n", ++ stats_sem_name + 1, errno, slapd_system_strerror(errno) ); + exit(1); + } + } else { + /* Some other problem occurred creating the semaphore. */ + slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore", +- "Failed to create semaphore for stats file (%s). Error %d.(%s)\n", +- stats_sem_name, errno, slapd_system_strerror(errno) ); ++ "Failed to create semaphore for stats file (/dev/shm/sem.%s). Error %d.(%s)\n", ++ stats_sem_name + 1, errno, slapd_system_strerror(errno) ); + exit(1); + } + } +-- +2.9.4 + diff --git a/SOURCES/0052-Ticket-49257-Reject-nsslapd-cachememsize-nsslapd-cac.patch b/SOURCES/0052-Ticket-49257-Reject-nsslapd-cachememsize-nsslapd-cac.patch new file mode 100644 index 0000000..b75d574 --- /dev/null +++ b/SOURCES/0052-Ticket-49257-Reject-nsslapd-cachememsize-nsslapd-cac.patch @@ -0,0 +1,131 @@ +From 0f04c8e7c1219940baf0ae9c1bcb2464ddf079df Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 16 May 2017 13:19:43 -0400 +Subject: [PATCH] Ticket 49257 - Reject nsslapd-cachememsize & + nsslapd-cachesize when nsslapd-cache-autosize is set + +Description: We need to also reject entry cache changes when cache autosizing is being used. + + I also found out that we were not registering the ldbm instance callbacks at startup. + So all those functions were only used when creating an instance, and not after it was + started. + +https://pagure.io/389-ds-base/issue/49257 + +Reviewed by: tbordaz(Thanks!) +--- + ldap/servers/slapd/back-ldbm/instance.c | 19 +++++++++---- + .../servers/slapd/back-ldbm/ldbm_instance_config.c | 32 ++++++++++++++++++++-- + ldap/servers/slapd/back-ldbm/start.c | 2 +- + 3 files changed, 44 insertions(+), 9 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c +index f79d048..8b38644 100644 +--- a/ldap/servers/slapd/back-ldbm/instance.c ++++ b/ldap/servers/slapd/back-ldbm/instance.c +@@ -302,12 +302,19 @@ ldbm_instance_startall(struct ldbminfo *li) + inst = (ldbm_instance *) object_get_data(inst_obj); + ldbm_instance_set_flags(inst); + rc1 = ldbm_instance_start(inst->inst_be); +- if (rc1 != 0) { +- rc = rc1; +- } else { +- vlv_init(inst); +- slapi_mtn_be_started(inst->inst_be); +- } ++ if (rc1 != 0) { ++ rc = rc1; ++ } else { ++ if(ldbm_instance_config_load_dse_info(inst) != 0){ ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_startall", ++ "Loading database instance configuration failed for (%s)\n", ++ inst->inst_name); ++ rc = -1; ++ } else { ++ vlv_init(inst); ++ slapi_mtn_be_started(inst->inst_be); ++ } ++ } + inst_obj = objset_next_obj(li->li_instance_set, inst_obj); + } + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +index 55f1887..49a6cac 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +@@ -72,6 +72,18 @@ ldbm_instance_config_cachesize_set(void *arg, void *value, char *errorbuf, int p + /* Do whatever we can to make sure the data is ok. */ + + if (apply) { ++ if (CONFIG_PHASE_RUNNING == phase) { ++ if (val > 0 && inst->inst_li->li_cache_autosize) { ++ /* We are auto-tuning the cache, so this change would be overwritten - return an error */ ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Error: \"nsslapd-cachesize\" can not be updated while \"nsslapd-cache-autosize\" is set " ++ "in \"cn=config,cn=ldbm database,cn=plugins,cn=config\"."); ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachesize_set", ++ "\"nsslapd-cachesize\" can not be set while \"nsslapd-cache-autosize\" is set " ++ "in \"cn=config,cn=ldbm database,cn=plugins,cn=config\".\n"); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } ++ } + cache_set_max_entries(&(inst->inst_cache), val); + } + +@@ -87,7 +99,11 @@ ldbm_instance_config_cachememsize_get(void *arg) + } + + static int +-ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, int phase, int apply) ++ldbm_instance_config_cachememsize_set(void *arg, ++ void *value, ++ char *errorbuf, ++ int phase, ++ int apply) + { + ldbm_instance *inst = (ldbm_instance *) arg; + int retval = LDAP_SUCCESS; +@@ -107,6 +123,18 @@ ldbm_instance_config_cachememsize_set(void *arg, void *value, char *errorbuf, in + */ + + if (apply) { ++ if (CONFIG_PHASE_RUNNING == phase) { ++ if (val > 0 && inst->inst_li->li_cache_autosize) { ++ /* We are auto-tuning the cache, so this change would be overwritten - return an error */ ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Error: \"nsslapd-cachememsize\" can not be updated while \"nsslapd-cache-autosize\" is set " ++ "in \"cn=config,cn=ldbm database,cn=plugins,cn=config\"."); ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", ++ "\"nsslapd-cachememsize\" can not be set while \"nsslapd-cache-autosize\" is set " ++ "in \"cn=config,cn=ldbm database,cn=plugins,cn=config\".\n"); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } ++ } + if (val > inst->inst_cache.c_maxsize) { + delta = val - inst->inst_cache.c_maxsize; + delta_original = delta; +@@ -825,7 +853,7 @@ ldbm_instance_modify_config_entry_callback(Slapi_PBlock *pb, Slapi_Entry* entryB + continue; + } + +- /* This assumes there is only one bval for this mod. */ ++ /* This assumes there is only one bval for this mod. */ + if (mods[i]->mod_bvalues == NULL) { + /* This avoids the null pointer deref. + * In ldbm_config.c ldbm_config_set, it checks for the NULL. +diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c +index 1834a19..d4e8bb8 100644 +--- a/ldap/servers/slapd/back-ldbm/start.c ++++ b/ldap/servers/slapd/back-ldbm/start.c +@@ -169,7 +169,7 @@ ldbm_back_start_autotune(struct ldbminfo *li) { + } + + slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk physical memory\n", mi->system_total_bytes / 1024); +- slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk avaliable\n", mi->system_available_bytes / 1024); ++ slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %luk available\n", mi->system_available_bytes / 1024); + + /* We've now calculated the autotuning values. Do we need to apply it? + * we use the logic of "if size is 0, or autosize is > 0. This way three +-- +2.9.4 + diff --git a/SOURCES/0053-Ticket-49257-Reject-dbcachesize-updates-while-auto-c.patch b/SOURCES/0053-Ticket-49257-Reject-dbcachesize-updates-while-auto-c.patch new file mode 100644 index 0000000..de2e652 --- /dev/null +++ b/SOURCES/0053-Ticket-49257-Reject-dbcachesize-updates-while-auto-c.patch @@ -0,0 +1,53 @@ +From 550d30d3aa27cd69057604e1ee7d5ca43711d718 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 15 May 2017 13:30:22 -0400 +Subject: [PATCH] Ticket 49257 - Reject dbcachesize updates while auto cache + sizing is enabled + +Description: We should reject updates to nsslapd-dbcachesize while auto cache sizing + is in effect. This is because at startup we would overwrite the + manually set dbcache size anyway. It would never take effect, so it + should be rejected. + +https://pagure.io/389-ds-base/issue/49257 + +Reviewed by: tbordaz & firstyear(Thanks!!) +--- + ldap/servers/slapd/back-ldbm/ldbm_config.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c +index f7edd9e..6c1dda0 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c +@@ -420,7 +420,7 @@ static int ldbm_config_dbcachesize_set(void *arg, void *value, char *errorbuf, i + /* Stop the user configuring a stupidly small cache */ + /* min: 8KB (page size) * def thrd cnts (threadnumber==20). */ + #define DBDEFMINSIZ 500000 +- /* We allow a value of 0, because the autotuting in start.c will ++ /* We allow a value of 0, because the autotuning in start.c will + * register that, and trigger the recalculation of the dbcachesize as + * needed on the next start up. + */ +@@ -443,7 +443,18 @@ static int ldbm_config_dbcachesize_set(void *arg, void *value, char *errorbuf, i + return LDAP_UNWILLING_TO_PERFORM; + } + } ++ + if (CONFIG_PHASE_RUNNING == phase) { ++ if (val > 0 && li->li_cache_autosize) { ++ /* We are auto-tuning the cache, so this change would be overwritten - return an error */ ++ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, ++ "Error: \"nsslapd-dbcachesize\" can not be updated while \"nsslapd-cache-autosize\" is set " ++ "in \"cn=config,cn=ldbm database,cn=plugins,cn=config\"."); ++ slapi_log_err(SLAPI_LOG_ERR, "ldbm_config_dbcachesize_set", ++ "\"nsslapd-dbcachesize\" can not be set while \"nsslapd-cache-autosize\" is set " ++ "in \"cn=config,cn=ldbm database,cn=plugins,cn=config\".\n"); ++ return LDAP_UNWILLING_TO_PERFORM; ++ } + li->li_new_dbcachesize = val; + if (val == 0) { + slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_config_dbcachesize_set", "cache size reset to 0, will be autosized on next startup.\n"); +-- +2.9.4 + diff --git a/SOURCES/0054-Ticket-49184-adjust-logging-level-in-MO-plugin.patch b/SOURCES/0054-Ticket-49184-adjust-logging-level-in-MO-plugin.patch new file mode 100644 index 0000000..7e34e4b --- /dev/null +++ b/SOURCES/0054-Ticket-49184-adjust-logging-level-in-MO-plugin.patch @@ -0,0 +1,30 @@ +From db98cb29158741cc960f1e1a2df3d4214f5bd36e Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 6 Jun 2017 10:50:19 -0400 +Subject: [PATCH] Ticket 49184 - adjust logging level in MO plugin + +Description: Change logging level for benign message + +https://pagure.io/389-ds-base/issue/49184 + +Reviewed by: mreynolds(one line commit ruile) +--- + ldap/servers/plugins/memberof/memberof.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c +index 5cd2c01..9bbe13c 100644 +--- a/ldap/servers/plugins/memberof/memberof.c ++++ b/ldap/servers/plugins/memberof/memberof.c +@@ -3396,7 +3396,7 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) + /* This is quite unexpected, after a call to memberof_get_groups + * ndn ancestors should be in the cache + */ +- slapi_log_err(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_fix_memberof_callback: Weird, %s is not in the cache\n", ndn); ++ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_fix_memberof_callback: Weird, %s is not in the cache\n", ndn); + } + } + } +-- +2.9.4 + diff --git a/SOURCES/0055-Ticket-49241-add-symblic-link-location-to-db2bak.pl-.patch b/SOURCES/0055-Ticket-49241-add-symblic-link-location-to-db2bak.pl-.patch new file mode 100644 index 0000000..f5db74e --- /dev/null +++ b/SOURCES/0055-Ticket-49241-add-symblic-link-location-to-db2bak.pl-.patch @@ -0,0 +1,38 @@ +From 6935bd0821395051c0483b0ee393d2d4567f6f0c Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 24 May 2017 12:15:20 -0400 +Subject: [PATCH] Ticket 49241 - add symblic link location to db2bak.pl output + +Description: If a symbolic link is used for the script's backup + location then add info to the output. + +https://pagure.io/389-ds-base/issue/49241 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 95a7f23262076d90fdc8a9ec76e131e9e4c09bcc) +--- + ldap/admin/src/scripts/db2bak.pl.in | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/ldap/admin/src/scripts/db2bak.pl.in b/ldap/admin/src/scripts/db2bak.pl.in +index 335285e..352a01e 100644 +--- a/ldap/admin/src/scripts/db2bak.pl.in ++++ b/ldap/admin/src/scripts/db2bak.pl.in +@@ -105,7 +105,12 @@ if ($archivedir eq "") { + } else { + $symname = $archivedir; + } +- print("Back up directory: $archivedir\n"); ++ if ($symname eq "") { ++ print("Back up directory: $archivedir\n"); ++ } else { ++ print("Back up directory: $archivedir -> $mybakdir/$archivebase\n"); ++ } ++ + # If an archive dir is specified, create it as a symlink pointing + # to the default backup dir not to violate the selinux policy. + $archivedir = "${mybakdir}/${archivebase}"; +-- +2.9.4 + diff --git a/SOURCES/0056-Ticket-49313-Change-the-retrochangelog-default-cache.patch b/SOURCES/0056-Ticket-49313-Change-the-retrochangelog-default-cache.patch new file mode 100644 index 0000000..c1089ec --- /dev/null +++ b/SOURCES/0056-Ticket-49313-Change-the-retrochangelog-default-cache.patch @@ -0,0 +1,47 @@ +From 0fc3c803c34311eb05c5c7a7e710c8591b592649 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Thu, 27 Jul 2017 18:10:05 +0200 +Subject: [PATCH] Ticket 49313 - Change the retrochangelog default cache size + +Bug Description: + Default retroCL backend entry cache size is 2Mb. + It has been reported in many deployments that DB corruption could + be prevented by increasing entry cache to 200Mb. + There is no identified reproducible steps to debug this DB corruption. + So to prevent this problem we are increasing the entry cache + +Fix Description: + Set default cn=changelog cache to 200Mb (based on production cases) + An other option would be to set a maximum number of entries but + as we do not know if it works to prevent DB corruption, let's prefere + entry cache size + +https://pagure.io/389-ds-base/issue/49313 + +Reviewed by: William Brown + +Platforms tested: F23 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/plugins/retrocl/retrocl.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/retrocl/retrocl.h b/ldap/servers/plugins/retrocl/retrocl.h +index 6963d4b..eef1a17 100644 +--- a/ldap/servers/plugins/retrocl/retrocl.h ++++ b/ldap/servers/plugins/retrocl/retrocl.h +@@ -58,7 +58,7 @@ typedef struct _cnumRet { + #else + #define RETROCL_DLL_DEFAULT_THREAD_STACKSIZE 131072L + #endif +-#define RETROCL_BE_CACHEMEMSIZE "2097152" ++#define RETROCL_BE_CACHEMEMSIZE "209715200" + #define RETROCL_BE_CACHESIZE "-1" + #define RETROCL_PLUGIN_NAME "DSRetroclPlugin" + +-- +2.9.4 + diff --git a/SOURCES/0057-Ticket-49287-v3-extend-csnpl-handling-to-multiple-ba.patch b/SOURCES/0057-Ticket-49287-v3-extend-csnpl-handling-to-multiple-ba.patch new file mode 100644 index 0000000..8a71d55 --- /dev/null +++ b/SOURCES/0057-Ticket-49287-v3-extend-csnpl-handling-to-multiple-ba.patch @@ -0,0 +1,795 @@ +From 6b5aa0e288f1ea5553d4dd5d220d4e5daf50a247 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 31 Jul 2017 14:45:50 -0400 +Subject: [PATCH] Ticket 49287 - v3 extend csnpl handling to multiple backends + + The csn pending list mechanism failed if internal operation affected multiple backends + + This fix is an extension to the fix in ticket 49008, the thread local data now also contains + a list of all affected replicas. + + http://www.port389.org/docs/389ds/design/csn-pending-lists-and-ruv-update.html + + Reviewed by: William, Thierry - thanks +--- + ldap/servers/plugins/replication/csnpl.c | 85 ++++++++-- + ldap/servers/plugins/replication/csnpl.h | 8 +- + ldap/servers/plugins/replication/repl5.h | 22 ++- + ldap/servers/plugins/replication/repl5_init.c | 48 +++++- + ldap/servers/plugins/replication/repl5_plugins.c | 16 +- + ldap/servers/plugins/replication/repl5_replica.c | 18 ++- + ldap/servers/plugins/replication/repl5_ruv.c | 191 ++++++++++++++--------- + ldap/servers/plugins/replication/repl5_ruv.h | 6 +- + ldap/servers/slapd/slapi-private.h | 2 +- + 9 files changed, 283 insertions(+), 113 deletions(-) + +diff --git a/ldap/servers/plugins/replication/csnpl.c b/ldap/servers/plugins/replication/csnpl.c +index 4a0f5f5..12a0bb8 100644 +--- a/ldap/servers/plugins/replication/csnpl.c ++++ b/ldap/servers/plugins/replication/csnpl.c +@@ -14,7 +14,6 @@ + + #include "csnpl.h" + #include "llist.h" +-#include "repl_shared.h" + + struct csnpl + { +@@ -22,13 +21,17 @@ struct csnpl + Slapi_RWLock* csnLock; /* lock to serialize access to PL */ + }; + ++ + typedef struct _csnpldata + { + PRBool committed; /* True if CSN committed */ + CSN *csn; /* The actual CSN */ ++ Replica * prim_replica; /* The replica where the prom csn was generated */ + const CSN *prim_csn; /* The primary CSN of an operation consising of multiple sub ops*/ + } csnpldata; + ++static PRBool csn_primary_or_nested(csnpldata *csn_data, const CSNPL_CTX *csn_ctx); ++ + /* forward declarations */ + #ifdef DEBUG + static void _csnplDumpContentNoLock(CSNPL *csnpl, const char *caller); +@@ -104,7 +107,7 @@ void csnplFree (CSNPL **csnpl) + * 1 if the csn has already been seen + * -1 for any other kind of errors + */ +-int csnplInsert (CSNPL *csnpl, const CSN *csn, const CSN *prim_csn) ++int csnplInsert (CSNPL *csnpl, const CSN *csn, const CSNPL_CTX *prim_csn) + { + int rc; + csnpldata *csnplnode; +@@ -129,10 +132,13 @@ int csnplInsert (CSNPL *csnpl, const CSN *csn, const CSN *prim_csn) + return 1; + } + +- csnplnode = (csnpldata *)slapi_ch_malloc(sizeof(csnpldata)); ++ csnplnode = (csnpldata *)slapi_ch_calloc(1, sizeof(csnpldata)); + csnplnode->committed = PR_FALSE; + csnplnode->csn = csn_dup(csn); +- csnplnode->prim_csn = prim_csn; ++ if (prim_csn) { ++ csnplnode->prim_csn = prim_csn->prim_csn; ++ csnplnode->prim_replica = prim_csn->prim_repl; ++ } + csn_as_string(csn, PR_FALSE, csn_str); + rc = llistInsertTail (csnpl->csnList, csn_str, csnplnode); + +@@ -187,8 +193,58 @@ int csnplRemove (CSNPL *csnpl, const CSN *csn) + + return 0; + } ++PRBool csn_primary(Replica *replica, const CSN *csn, const CSNPL_CTX *csn_ctx) ++{ ++ if (csn_ctx == NULL) ++ return PR_FALSE; ++ ++ if (replica != csn_ctx->prim_repl) { ++ /* The CSNs are not from the same replication topology ++ * so even if the csn values are equal they are not related ++ * to the same operation ++ */ ++ return PR_FALSE; ++ } ++ ++ /* Here the two CSNs belong to the same replication topology */ ++ ++ /* check if the CSN identifies the primary update */ ++ if (csn_is_equal(csn, csn_ctx->prim_csn)) { ++ return PR_TRUE; ++ } ++ ++ return PR_FALSE; ++} ++ ++static PRBool csn_primary_or_nested(csnpldata *csn_data, const CSNPL_CTX *csn_ctx) ++{ ++ if ((csn_data == NULL) || (csn_ctx == NULL)) ++ return PR_FALSE; ++ ++ if (csn_data->prim_replica != csn_ctx->prim_repl) { ++ /* The CSNs are not from the same replication topology ++ * so even if the csn values are equal they are not related ++ * to the same operation ++ */ ++ return PR_FALSE; ++ } ++ ++ /* Here the two CSNs belong to the same replication topology */ ++ ++ /* First check if the CSN identifies the primary update */ ++ if (csn_is_equal(csn_data->csn, csn_ctx->prim_csn)) { ++ return PR_TRUE; ++ } ++ ++ /* Second check if the CSN identifies a nested update */ ++ if (csn_is_equal(csn_data->prim_csn, csn_ctx->prim_csn)) { ++ return PR_TRUE; ++ } ++ ++ return PR_FALSE; ++} + +-int csnplRemoveAll (CSNPL *csnpl, const CSN *csn) ++int csnplRemoveAll (CSNPL *csnpl, const CSNPL_CTX *csn_ctx) + { + csnpldata *data; + void *iterator; +@@ -197,8 +253,7 @@ int csnplRemoveAll (CSNPL *csnpl, const CSN *csn) + data = (csnpldata *)llistGetFirst(csnpl->csnList, &iterator); + while (NULL != data) + { +- if (csn_is_equal(data->csn, csn) || +- csn_is_equal(data->prim_csn, csn)) { ++ if (csn_primary_or_nested(data, csn_ctx)) { + csnpldata_free(&data); + data = (csnpldata *)llistRemoveCurrentAndGetNext(csnpl->csnList, &iterator); + } else { +@@ -213,13 +268,13 @@ int csnplRemoveAll (CSNPL *csnpl, const CSN *csn) + } + + +-int csnplCommitAll (CSNPL *csnpl, const CSN *csn) ++int csnplCommitAll (CSNPL *csnpl, const CSNPL_CTX *csn_ctx) + { + csnpldata *data; + void *iterator; + char csn_str[CSN_STRSIZE]; + +- csn_as_string(csn, PR_FALSE, csn_str); ++ csn_as_string(csn_ctx->prim_csn, PR_FALSE, csn_str); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "csnplCommitALL: committing all csns for csn %s\n", csn_str); + slapi_rwlock_wrlock (csnpl->csnLock); +@@ -229,8 +284,7 @@ int csnplCommitAll (CSNPL *csnpl, const CSN *csn) + csn_as_string(data->csn, PR_FALSE, csn_str); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "csnplCommitALL: processing data csn %s\n", csn_str); +- if (csn_is_equal(data->csn, csn) || +- csn_is_equal(data->prim_csn, csn)) { ++ if (csn_primary_or_nested(data, csn_ctx)) { + data->committed = PR_TRUE; + } + data = (csnpldata *)llistGetNext (csnpl->csnList, &iterator); +@@ -395,7 +449,12 @@ static void _csnplDumpContentNoLock(CSNPL *csnpl, const char *caller) + + /* wrapper around csn_free, to satisfy NSPR thread context API */ + void +-csnplFreeCSN (void *arg) ++csnplFreeCSNPL_CTX (void *arg) + { +- csn_free((CSN **)&arg); ++ CSNPL_CTX *csnpl_ctx = (CSNPL_CTX *)arg; ++ csn_free(&csnpl_ctx->prim_csn); ++ if (csnpl_ctx->sec_repl) { ++ slapi_ch_free((void **)&csnpl_ctx->sec_repl); ++ } ++ slapi_ch_free((void **)&csnpl_ctx); + } +diff --git a/ldap/servers/plugins/replication/csnpl.h b/ldap/servers/plugins/replication/csnpl.h +index 594c8f2..1036c62 100644 +--- a/ldap/servers/plugins/replication/csnpl.h ++++ b/ldap/servers/plugins/replication/csnpl.h +@@ -17,15 +17,17 @@ + #define CSNPL_H + + #include "slapi-private.h" ++#include "repl5.h" + + typedef struct csnpl CSNPL; + + CSNPL* csnplNew(void); + void csnplFree (CSNPL **csnpl); +-int csnplInsert (CSNPL *csnpl, const CSN *csn, const CSN *prim_csn); ++int csnplInsert (CSNPL *csnpl, const CSN *csn, const CSNPL_CTX *prim_csn); + int csnplRemove (CSNPL *csnpl, const CSN *csn); +-int csnplRemoveAll (CSNPL *csnpl, const CSN *csn); +-int csnplCommitAll (CSNPL *csnpl, const CSN *csn); ++int csnplRemoveAll (CSNPL *csnpl, const CSNPL_CTX *csn_ctx); ++int csnplCommitAll (CSNPL *csnpl, const CSNPL_CTX *csn_ctx); ++PRBool csn_primary(Replica *replica, const CSN *csn, const CSNPL_CTX *csn_ctx); + CSN* csnplGetMinCSN (CSNPL *csnpl, PRBool *committed); + int csnplCommit (CSNPL *csnpl, const CSN *csn); + CSN *csnplRollUp(CSNPL *csnpl, CSN ** first); +diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h +index 1d8989c..718f64e 100644 +--- a/ldap/servers/plugins/replication/repl5.h ++++ b/ldap/servers/plugins/replication/repl5.h +@@ -228,12 +228,27 @@ int multimaster_be_betxnpostop_delete (Slapi_PBlock *pb); + int multimaster_be_betxnpostop_add (Slapi_PBlock *pb); + int multimaster_be_betxnpostop_modify (Slapi_PBlock *pb); + ++/* In repl5_replica.c */ ++typedef struct replica Replica; ++ ++/* csn pending lists */ ++#define CSNPL_CTX_REPLCNT 4 ++typedef struct CSNPL_CTX ++{ ++ CSN *prim_csn; ++ size_t repl_alloc; /* max number of replicas */ ++ size_t repl_cnt; /* number of replicas affected by operation */ ++ Replica *prim_repl; /* pirmary replica */ ++ Replica **sec_repl; /* additional replicas affected */ ++} CSNPL_CTX; ++ + /* In repl5_init.c */ + extern int repl5_is_betxn; + char* get_thread_private_agmtname(void); + void set_thread_private_agmtname (const char *agmtname); +-void set_thread_primary_csn (const CSN *prim_csn); +-CSN* get_thread_primary_csn(void); ++void set_thread_primary_csn (const CSN *prim_csn, Replica *repl); ++void add_replica_to_primcsn(CSNPL_CTX *prim_csn, Replica *repl); ++CSNPL_CTX* get_thread_primary_csn(void); + void* get_thread_private_cache(void); + void set_thread_private_cache (void *buf); + char* get_repl_session_id (Slapi_PBlock *pb, char *id, CSN **opcsn); +@@ -302,7 +317,6 @@ typedef struct repl_bos Repl_Bos; + + /* In repl5_agmt.c */ + typedef struct repl5agmt Repl_Agmt; +-typedef struct replica Replica; + + #define TRANSPORT_FLAG_SSL 1 + #define TRANSPORT_FLAG_TLS 2 +@@ -629,6 +643,8 @@ PRUint64 replica_get_precise_purging(Replica *r); + void replica_set_precise_purging(Replica *r, PRUint64 on_off); + PRBool ignore_error_and_keep_going(int error); + void replica_check_release_timeout(Replica *r, Slapi_PBlock *pb); ++void replica_lock_replica(Replica *r); ++void replica_unlock_replica(Replica *r); + + /* The functions below handles the state flag */ + /* Current internal state flags */ +diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c +index edffb84..b0bc515 100644 +--- a/ldap/servers/plugins/replication/repl5_init.c ++++ b/ldap/servers/plugins/replication/repl5_init.c +@@ -154,26 +154,62 @@ set_thread_private_agmtname(const char *agmtname) + PR_SetThreadPrivate(thread_private_agmtname, (void *)agmtname); + } + +-CSN* ++CSNPL_CTX* + get_thread_primary_csn(void) + { +- CSN *prim_csn = NULL; ++ CSNPL_CTX *prim_csn = NULL; + if (thread_primary_csn) +- prim_csn = (CSN *)PR_GetThreadPrivate(thread_primary_csn); ++ prim_csn = (CSNPL_CTX *)PR_GetThreadPrivate(thread_primary_csn); ++ + return prim_csn; + } + void +-set_thread_primary_csn(const CSN *prim_csn) ++set_thread_primary_csn (const CSN *prim_csn, Replica *repl) + { + if (thread_primary_csn) { + if (prim_csn) { +- PR_SetThreadPrivate(thread_primary_csn, (void *)csn_dup(prim_csn)); ++ CSNPL_CTX *csnpl_ctx = (CSNPL_CTX *)slapi_ch_calloc(1,sizeof(CSNPL_CTX)); ++ csnpl_ctx->prim_csn = csn_dup(prim_csn); ++ /* repl_alloc, repl_cnt and sec_repl are 0 by calloc */ ++ csnpl_ctx->prim_repl = repl; ++ PR_SetThreadPrivate(thread_primary_csn, (void *)csnpl_ctx); + } else { + PR_SetThreadPrivate(thread_primary_csn, NULL); + } + } + } + ++void ++add_replica_to_primcsn(CSNPL_CTX *csnpl_ctx, Replica *repl) ++{ ++ size_t found = 0; ++ size_t it = 0; ++ ++ if (repl == csnpl_ctx->prim_repl) return; ++ ++ while (it < csnpl_ctx->repl_cnt) { ++ if (csnpl_ctx->sec_repl[it] == repl) { ++ found = 1; ++ break; ++ } ++ it++; ++ } ++ if (found) return; ++ ++ if (csnpl_ctx->repl_cnt < csnpl_ctx->repl_alloc) { ++ csnpl_ctx->sec_repl[csnpl_ctx->repl_cnt++] = repl; ++ return; ++ } ++ csnpl_ctx->repl_alloc += CSNPL_CTX_REPLCNT; ++ if (csnpl_ctx->repl_cnt == 0) { ++ csnpl_ctx->sec_repl = (Replica **)slapi_ch_calloc(csnpl_ctx->repl_alloc, sizeof(Replica *)); ++ } else { ++ csnpl_ctx->sec_repl = (Replica **)slapi_ch_realloc((char *)csnpl_ctx->sec_repl, csnpl_ctx->repl_alloc * sizeof(Replica *)); ++ } ++ csnpl_ctx->sec_repl[csnpl_ctx->repl_cnt++] = repl; ++ return; ++} ++ + void* + get_thread_private_cache () + { +@@ -740,7 +776,7 @@ multimaster_start( Slapi_PBlock *pb ) + /* Initialize thread private data for logging. Ignore if fails */ + PR_NewThreadPrivateIndex (&thread_private_agmtname, NULL); + PR_NewThreadPrivateIndex (&thread_private_cache, NULL); +- PR_NewThreadPrivateIndex (&thread_primary_csn, csnplFreeCSN); ++ PR_NewThreadPrivateIndex (&thread_primary_csn, csnplFreeCSNPL_CTX); + + /* Decode the command line args to see if we're dumping to LDIF */ + is_ldif_dump = check_for_ldif_dump(pb); +diff --git a/ldap/servers/plugins/replication/repl5_plugins.c b/ldap/servers/plugins/replication/repl5_plugins.c +index 9ef06af..c31d9d5 100644 +--- a/ldap/servers/plugins/replication/repl5_plugins.c ++++ b/ldap/servers/plugins/replication/repl5_plugins.c +@@ -45,6 +45,7 @@ + #include "repl.h" + #include "cl5_api.h" + #include "urp.h" ++#include "csnpl.h" + + static char *local_purl = NULL; + static char *purl_attrs[] = {"nsslapd-localhost", "nsslapd-port", "nsslapd-secureport", NULL}; +@@ -1034,7 +1035,7 @@ write_changelog_and_ruv (Slapi_PBlock *pb) + { + Slapi_Operation *op = NULL; + CSN *opcsn; +- CSN *prim_csn; ++ CSNPL_CTX *prim_csn; + int rc; + slapi_operation_parameters *op_params = NULL; + Object *repl_obj = NULL; +@@ -1070,14 +1071,15 @@ write_changelog_and_ruv (Slapi_PBlock *pb) + if (repl_obj == NULL) + return return_value; + ++ r = (Replica*)object_get_data (repl_obj); ++ PR_ASSERT (r); ++ + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); + if (rc) { /* op failed - just return */ + cancel_opcsn(pb); + goto common_return; + } + +- r = (Replica*)object_get_data (repl_obj); +- PR_ASSERT (r); + + replica_check_release_timeout(r, pb); + +@@ -1223,12 +1225,12 @@ write_changelog_and_ruv (Slapi_PBlock *pb) + common_return: + opcsn = operation_get_csn(op); + prim_csn = get_thread_primary_csn(); +- if (csn_is_equal(opcsn, prim_csn)) { ++ if (csn_primary(r, opcsn, prim_csn)) { + if (return_value == 0) { + /* the primary csn was succesfully committed + * unset it in the thread local data + */ +- set_thread_primary_csn(NULL); ++ set_thread_primary_csn(NULL, NULL); + } + } + if (repl_obj) { +@@ -1430,7 +1432,7 @@ cancel_opcsn (Slapi_PBlock *pb) + + ruv_obj = replica_get_ruv (r); + PR_ASSERT (ruv_obj); +- ruv_cancel_csn_inprogress ((RUV*)object_get_data (ruv_obj), opcsn, replica_get_rid(r)); ++ ruv_cancel_csn_inprogress (r, (RUV*)object_get_data (ruv_obj), opcsn, replica_get_rid(r)); + object_release (ruv_obj); + } + +@@ -1491,7 +1493,7 @@ process_operation (Slapi_PBlock *pb, const CSN *csn) + ruv = (RUV*)object_get_data (ruv_obj); + PR_ASSERT (ruv); + +- rc = ruv_add_csn_inprogress (ruv, csn); ++ rc = ruv_add_csn_inprogress (r, ruv, csn); + + object_release (ruv_obj); + object_release (r_obj); +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index 1bdc138..7927ac3 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -923,7 +923,7 @@ replica_update_ruv(Replica *r, const CSN *updated_csn, const char *replica_purl) + } + } + /* Update max csn for local and remote replicas */ +- rc = ruv_update_ruv (ruv, updated_csn, replica_purl, r->repl_rid); ++ rc = ruv_update_ruv (ruv, updated_csn, replica_purl, r, r->repl_rid); + if (RUV_COVERS_CSN == rc) + { + slapi_log_err(SLAPI_LOG_REPL, +@@ -3663,7 +3663,7 @@ assign_csn_callback(const CSN *csn, void *data) + } + } + +- ruv_add_csn_inprogress (ruv, csn); ++ ruv_add_csn_inprogress (r, ruv, csn); + + replica_unlock(r->repl_lock); + +@@ -3692,13 +3692,13 @@ abort_csn_callback(const CSN *csn, void *data) + { + int rc = csnplRemove(r->min_csn_pl, csn); + if (rc) { +- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "abort_csn_callback - csnplRemove failed"); ++ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "abort_csn_callback - csnplRemove failed\n"); + replica_unlock(r->repl_lock); + return; + } + } + +- ruv_cancel_csn_inprogress (ruv, csn, replica_get_rid(r)); ++ ruv_cancel_csn_inprogress (r, ruv, csn, replica_get_rid(r)); + replica_unlock(r->repl_lock); + + object_release (ruv_obj); +@@ -4489,3 +4489,13 @@ replica_check_release_timeout(Replica *r, Slapi_PBlock *pb) + } + replica_unlock(r->repl_lock); + } ++void ++replica_lock_replica(Replica *r) ++{ ++ replica_lock(r->repl_lock); ++} ++void ++replica_unlock_replica(Replica *r) ++{ ++ replica_unlock(r->repl_lock); ++} +diff --git a/ldap/servers/plugins/replication/repl5_ruv.c b/ldap/servers/plugins/replication/repl5_ruv.c +index d59e6d2..39449b6 100644 +--- a/ldap/servers/plugins/replication/repl5_ruv.c ++++ b/ldap/servers/plugins/replication/repl5_ruv.c +@@ -77,7 +77,7 @@ static char *get_replgen_from_berval(const struct berval *bval); + static const char * const prefix_replicageneration = "{replicageneration}"; + static const char * const prefix_ruvcsn = "{replica "; /* intentionally missing '}' */ + +-static int ruv_update_ruv_element (RUV *ruv, RUVElement *replica, const CSN *csn, const char *replica_purl, PRBool isLocal); ++static int ruv_update_ruv_element (RUV *ruv, RUVElement *replica, const CSNPL_CTX *prim_csn, const char *replica_purl, PRBool isLocal); + + /* API implementation */ + +@@ -1599,13 +1599,13 @@ ruv_dump(const RUV *ruv, char *ruv_name, PRFileDesc *prFile) + + /* this function notifies the ruv that there are operations in progress so that + they can be added to the pending list for the appropriate client. */ +-int ruv_add_csn_inprogress (RUV *ruv, const CSN *csn) ++int ruv_add_csn_inprogress (void *repl, RUV *ruv, const CSN *csn) + { + RUVElement* replica; + char csn_str[CSN_STRSIZE]; + int rc = RUV_SUCCESS; + int rid = csn_get_replicaid (csn); +- CSN *prim_csn; ++ CSNPL_CTX *prim_csn; + + PR_ASSERT (ruv && csn); + +@@ -1645,8 +1645,13 @@ int ruv_add_csn_inprogress (RUV *ruv, const CSN *csn) + } + prim_csn = get_thread_primary_csn(); + if (prim_csn == NULL) { +- set_thread_primary_csn(csn); ++ set_thread_primary_csn(csn, (Replica *)repl); + prim_csn = get_thread_primary_csn(); ++ } else { ++ /* the prim csn data already exist, need to check if ++ * current replica is already present ++ */ ++ add_replica_to_primcsn(prim_csn, (Replica *)repl); + } + rc = csnplInsert (replica->csnpl, csn, prim_csn); + if (rc == 1) /* we already seen this csn */ +@@ -1656,7 +1661,7 @@ int ruv_add_csn_inprogress (RUV *ruv, const CSN *csn) + "The csn %s has already be seen - ignoring\n", + csn_as_string (csn, PR_FALSE, csn_str)); + } +- set_thread_primary_csn(NULL); ++ set_thread_primary_csn(NULL, NULL); + rc = RUV_COVERS_CSN; + } + else if(rc != 0) +@@ -1681,11 +1686,13 @@ done: + return rc; + } + +-int ruv_cancel_csn_inprogress (RUV *ruv, const CSN *csn, ReplicaId local_rid) ++int ruv_cancel_csn_inprogress (void *repl, RUV *ruv, const CSN *csn, ReplicaId local_rid) + { +- RUVElement* replica; ++ RUVElement* repl_ruv; + int rc = RUV_SUCCESS; +- CSN *prim_csn = NULL; ++ CSNPL_CTX *prim_csn = NULL; ++ Replica *repl_it; ++ size_t it; + + + PR_ASSERT (ruv && csn); +@@ -1693,29 +1700,53 @@ int ruv_cancel_csn_inprogress (RUV *ruv, const CSN *csn, ReplicaId local_rid) + prim_csn = get_thread_primary_csn(); + /* locate ruvElement */ + slapi_rwlock_wrlock (ruv->lock); +- replica = ruvGetReplica (ruv, csn_get_replicaid (csn)); +- if (replica == NULL) { ++ repl_ruv = ruvGetReplica (ruv, csn_get_replicaid (csn)); ++ if (repl_ruv == NULL) { + /* ONREPL - log error */ + rc = RUV_NOTFOUND; + goto done; + } +- if (csn_is_equal(csn, prim_csn)) { +- /* the prim csn is cancelled, lets remove all dependent csns */ +- ReplicaId prim_rid = csn_get_replicaid (csn); +- replica = ruvGetReplica (ruv, prim_rid); +- rc = csnplRemoveAll (replica->csnpl, prim_csn); +- if (prim_rid != local_rid) { +- if( local_rid != READ_ONLY_REPLICA_ID) { +- replica = ruvGetReplica (ruv, local_rid); +- if (replica) { +- rc = csnplRemoveAll (replica->csnpl, prim_csn); +- } else { +- rc = RUV_NOTFOUND; +- } +- } +- } ++ if (csn_primary(repl, csn, prim_csn)) { ++ /* the prim csn is cancelled, lets remove all dependent csns */ ++ /* for the primary replica we can have modifications for two RIDS: ++ * - the local RID for direct or internal operations ++ * - a remote RID if the primary csn is for a replciated op. ++ */ ++ ReplicaId prim_rid = csn_get_replicaid(csn); ++ repl_ruv = ruvGetReplica(ruv, prim_rid); ++ if (!repl_ruv) { ++ rc = RUV_NOTFOUND; ++ goto done; ++ } ++ rc = csnplRemoveAll(repl_ruv->csnpl, prim_csn); ++ ++ if (prim_rid != local_rid && local_rid != READ_ONLY_REPLICA_ID) { ++ repl_ruv = ruvGetReplica(ruv, local_rid); ++ if (!repl_ruv) { ++ rc = RUV_NOTFOUND; ++ goto done; ++ } ++ rc = csnplRemoveAll(repl_ruv->csnpl, prim_csn); ++ } ++ ++ for (it = 0; it < prim_csn->repl_cnt; it++) { ++ repl_it = prim_csn->sec_repl[it]; ++ replica_lock_replica(repl_it); ++ local_rid = replica_get_rid(repl_it); ++ if (local_rid != READ_ONLY_REPLICA_ID) { ++ Object *ruv_obj = replica_get_ruv(repl_it); ++ RUV *ruv_it = object_get_data(ruv_obj); ++ repl_ruv = ruvGetReplica(ruv_it, local_rid); ++ if (repl_ruv) { ++ rc = csnplRemoveAll(repl_ruv->csnpl, prim_csn); ++ } else { ++ rc = RUV_NOTFOUND; ++ } ++ } ++ replica_unlock_replica(repl_it); ++ } + } else { +- rc = csnplRemove (replica->csnpl, csn); ++ rc = csnplRemove (repl_ruv->csnpl, csn); + } + if (rc != 0) + rc = RUV_NOTFOUND; +@@ -1727,86 +1758,100 @@ done: + return rc; + } + +-int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, ReplicaId local_rid) ++int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, void *replica, ReplicaId local_rid) + { + int rc=RUV_SUCCESS; +- RUVElement *replica; ++ RUVElement *repl_ruv; + ReplicaId prim_rid; ++ Replica *repl_it = NULL; ++ size_t it = 0; + +- CSN *prim_csn = get_thread_primary_csn(); ++ CSNPL_CTX *prim_csn = get_thread_primary_csn(); + +- if (! csn_is_equal(csn, prim_csn)) { ++ if (! csn_primary(replica, csn, prim_csn)) { + /* not a primary csn, nothing to do */ + return rc; + } +- slapi_rwlock_wrlock (ruv->lock); ++ ++ /* first handle primary replica ++ * there can be two ruv elements affected ++ */ + prim_rid = csn_get_replicaid (csn); +- replica = ruvGetReplica (ruv, local_rid); +- rc = ruv_update_ruv_element(ruv, replica, csn, replica_purl, PR_TRUE); +- if ( rc || local_rid == prim_rid) goto done; +- replica = ruvGetReplica (ruv, prim_rid); +- rc = ruv_update_ruv_element(ruv, replica, csn, replica_purl, PR_FALSE); +-done: ++ slapi_rwlock_wrlock (ruv->lock); ++ if ( local_rid != prim_rid) { ++ repl_ruv = ruvGetReplica (ruv, prim_rid); ++ rc = ruv_update_ruv_element(ruv, repl_ruv, prim_csn, replica_purl, PR_FALSE); ++ } ++ repl_ruv = ruvGetReplica (ruv, local_rid); ++ rc = ruv_update_ruv_element(ruv, repl_ruv, prim_csn, replica_purl, PR_TRUE); + slapi_rwlock_unlock (ruv->lock); ++ if (rc) return rc; ++ ++ /* now handle secondary replicas */ ++ for (it=0; itrepl_cnt; it++) { ++ repl_it = prim_csn->sec_repl[it]; ++ replica_lock_replica(repl_it); ++ Object *ruv_obj = replica_get_ruv (repl_it); ++ RUV *ruv_it = object_get_data (ruv_obj); ++ slapi_rwlock_wrlock (ruv_it->lock); ++ repl_ruv = ruvGetReplica (ruv_it, replica_get_rid(repl_it)); ++ rc = ruv_update_ruv_element(ruv_it, repl_ruv, prim_csn, replica_purl, PR_TRUE); ++ slapi_rwlock_unlock (ruv_it->lock); ++ replica_unlock_replica(repl_it); ++ if (rc) break; ++ } + return rc; + } ++ + static int +-ruv_update_ruv_element (RUV *ruv, RUVElement *replica, const CSN *csn, const char *replica_purl, PRBool isLocal) ++ruv_update_ruv_element (RUV *ruv, RUVElement *replica, const CSNPL_CTX *prim_csn, const char *replica_purl, PRBool isLocal) + { + int rc=RUV_SUCCESS; + char csn_str[CSN_STRSIZE]; + CSN *max_csn; + CSN *first_csn = NULL; + +- if (replica == NULL) +- { ++ if (replica == NULL) { + /* we should have a ruv element at this point because it would have + been added by ruv_add_inprogress function */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "ruv_update_ruv - " +- "Can't locate RUV element for replica %d\n", csn_get_replicaid (csn)); ++ "Can't locate RUV element for replica %d\n", csn_get_replicaid (prim_csn->prim_csn)); + goto done; + } + +- if (csnplCommitAll(replica->csnpl, csn) != 0) +- { +- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "ruv_update_ruv - Cannot commit csn %s\n", +- csn_as_string(csn, PR_FALSE, csn_str)); ++ if (csnplCommitAll(replica->csnpl, prim_csn) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "ruv_update_ruv - Cannot commit csn %s\n", ++ csn_as_string(prim_csn->prim_csn, PR_FALSE, csn_str)); + rc = RUV_CSNPL_ERROR; + goto done; +- } +- else +- { ++ } else { + if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "ruv_update_ruv - " +- "Successfully committed csn %s\n", csn_as_string(csn, PR_FALSE, csn_str)); ++ "Successfully committed csn %s\n", csn_as_string(prim_csn->prim_csn, PR_FALSE, csn_str)); + } + } + +- if ((max_csn = csnplRollUp(replica->csnpl, &first_csn)) != NULL) +- { +-#ifdef DEBUG +- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "ruv_update_ruv - Rolled up to csn %s\n", +- csn_as_string(max_csn, PR_FALSE, csn_str)); /* XXXggood remove debugging */ +-#endif ++ if ((max_csn = csnplRollUp(replica->csnpl, &first_csn)) != NULL) { ++ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "ruv_update_ruv - Rolled up to csn %s\n", ++ csn_as_string(max_csn, PR_FALSE, csn_str)); /* XXXggood remove debugging */ + /* replica object sets min csn for local replica */ +- if (!isLocal && replica->min_csn == NULL) { +- /* bug 559223 - it seems that, under huge stress, a server might pass +- * through this code when more than 1 change has already been sent and commited into +- * the pending lists... Therefore, as we are trying to set the min_csn ever +- * generated by this replica, we need to set the first_csn as the min csn in the +- * ruv */ +- set_min_csn_nolock(ruv, first_csn, replica_purl); +- } +- /* only update the max_csn in the RUV if it is greater than the existing one */ +- rc = set_max_csn_nolock_ext(ruv, max_csn, replica_purl, PR_TRUE /* must be greater */); +- /* It is possible that first_csn points to max_csn. +- We need to free it once */ +- if (max_csn != first_csn) { +- csn_free(&first_csn); +- } +- csn_free(&max_csn); +- } +- ++ if (!isLocal && replica->min_csn == NULL) { ++ /* bug 559223 - it seems that, under huge stress, a server might pass ++ * through this code when more than 1 change has already been sent and commited into ++ * the pending lists... Therefore, as we are trying to set the min_csn ever ++ * generated by this replica, we need to set the first_csn as the min csn in the ++ * ruv */ ++ set_min_csn_nolock(ruv, first_csn, replica_purl); ++ } ++ /* only update the max_csn in the RUV if it is greater than the existing one */ ++ rc = set_max_csn_nolock_ext(ruv, max_csn, replica_purl, PR_TRUE /* must be greater */); ++ /* It is possible that first_csn points to max_csn. ++ We need to free it once */ ++ if (max_csn != first_csn) { ++ csn_free(&first_csn); ++ } ++ csn_free(&max_csn); ++ } + done: + + return rc; +diff --git a/ldap/servers/plugins/replication/repl5_ruv.h b/ldap/servers/plugins/replication/repl5_ruv.h +index c8960fd..f3cd38b 100644 +--- a/ldap/servers/plugins/replication/repl5_ruv.h ++++ b/ldap/servers/plugins/replication/repl5_ruv.h +@@ -108,9 +108,9 @@ int ruv_to_bervals(const RUV *ruv, struct berval ***bvals); + PRInt32 ruv_replica_count (const RUV *ruv); + char **ruv_get_referrals(const RUV *ruv); + void ruv_dump(const RUV *ruv, char *ruv_name, PRFileDesc *prFile); +-int ruv_add_csn_inprogress (RUV *ruv, const CSN *csn); +-int ruv_cancel_csn_inprogress (RUV *ruv, const CSN *csn, ReplicaId rid); +-int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, ReplicaId local_rid); ++int ruv_add_csn_inprogress (void *repl, RUV *ruv, const CSN *csn); ++int ruv_cancel_csn_inprogress (void *repl, RUV *ruv, const CSN *csn, ReplicaId rid); ++int ruv_update_ruv (RUV *ruv, const CSN *csn, const char *replica_purl, void *replica, ReplicaId local_rid); + int ruv_move_local_supplier_to_first(RUV *ruv, ReplicaId rid); + int ruv_get_first_id_and_purl(RUV *ruv, ReplicaId *rid, char **replica_purl ); + int ruv_local_contains_supplier(RUV *ruv, ReplicaId rid); +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index 0836d66..3910dbe 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -193,7 +193,7 @@ const CSN *csn_max(const CSN *csn1,const CSN *csn2); + a csn from the set.*/ + int csn_increment_subsequence (CSN *csn); + +-void csnplFreeCSN (void *arg); ++void csnplFreeCSNPL_CTX (void *arg); + /* + * csnset.c + */ +-- +2.9.4 + diff --git a/SOURCES/0058-Ticket-49336-SECURITY-Locked-account-provides-differ.patch b/SOURCES/0058-Ticket-49336-SECURITY-Locked-account-provides-differ.patch new file mode 100644 index 0000000..c110f0a --- /dev/null +++ b/SOURCES/0058-Ticket-49336-SECURITY-Locked-account-provides-differ.patch @@ -0,0 +1,201 @@ +From 95b39e29361812a62f2e038c89a88d717c82794e Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Mon, 31 Jul 2017 14:13:49 +1000 +Subject: [PATCH] Ticket 49336 - SECURITY: Locked account provides different + return code + +Bug Description: The directory server password lockout policy prevents binds + from operating once a threshold of failed passwords has been met. During + this lockout, if you bind with a successful password, a different error code + is returned. This means that an attacker has no ratelimit or penalty during + an account lock, and can continue to attempt passwords via bruteforce, using + the change in return code to ascertain a sucessful password auth. + +Fix Description: Move the account lock check *before* the password bind +check. If the account is locked, we do not mind disclosing this as the +attacker will either ignore it (and will not bind anyway), or they will +be forced to back off as the attack is not working preventing the +bruteforce. + +https://pagure.io/389-ds-base/issue/49336 + +Author: wibrown + +Review by: tbordaz (Thanks!) + +Signed-off-by: Mark Reynolds +--- + .../suites/password/pwd_lockout_bypass_test.py | 55 ++++++++++++++++++++++ + ldap/servers/slapd/bind.c | 29 ++++++++---- + ldap/servers/slapd/pw_verify.c | 15 +++--- + 3 files changed, 84 insertions(+), 15 deletions(-) + create mode 100644 dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py + +diff --git a/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py b/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py +new file mode 100644 +index 0000000..e4add72 +--- /dev/null ++++ b/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py +@@ -0,0 +1,55 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2017 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import pytest ++from lib389.tasks import * ++from lib389.utils import * ++from lib389.topologies import topology_st ++from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES ++import ldap ++ ++# The irony of these names is not lost on me. ++GOOD_PASSWORD = 'password' ++BAD_PASSWORD = 'aontseunao' ++ ++logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++def test_lockout_bypass(topology_st): ++ inst = topology_st.standalone ++ ++ # Configure the lock policy ++ inst.config.set('passwordMaxFailure', '1') ++ inst.config.set('passwordLockoutDuration', '99999') ++ inst.config.set('passwordLockout', 'on') ++ ++ # Create the account ++ users = UserAccounts(inst, DEFAULT_SUFFIX) ++ testuser = users.create(properties=TEST_USER_PROPERTIES) ++ testuser.set('userPassword', GOOD_PASSWORD) ++ ++ conn = testuser.bind(GOOD_PASSWORD) ++ assert conn != None ++ conn.unbind_s() ++ ++ # Bind with bad creds twice ++ # This is the failure. ++ with pytest.raises(ldap.INVALID_CREDENTIALS): ++ conn = testuser.bind(BAD_PASSWORD) ++ # Now we should not be able to ATTEMPT the bind. It doesn't matter that ++ # we disclose that we have hit the rate limit here, what matters is that ++ # it exists. ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ conn = testuser.bind(BAD_PASSWORD) ++ ++ # now bind with good creds ++ # Should be error 19 still. ++ with pytest.raises(ldap.CONSTRAINT_VIOLATION): ++ conn = testuser.bind(GOOD_PASSWORD) ++ ++ +diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c +index 7f4414f..064ace1 100644 +--- a/ldap/servers/slapd/bind.c ++++ b/ldap/servers/slapd/bind.c +@@ -662,12 +662,14 @@ do_bind( Slapi_PBlock *pb ) + /* We could be serving multiple database backends. Select the appropriate one */ + /* pw_verify_be_dn will select the backend we need for us. */ + +- if (auto_bind) { +- /* We have no password material. We should just check who we are binding as. */ +- rc = pw_validate_be_dn(pb, &referral); +- } else { +- rc = pw_verify_be_dn(pb, &referral); +- } ++ /* ++ * WARNING: We have to validate *all* other conditions *first* before ++ * we attempt the bind! ++ * ++ * this is because ldbm_bind.c will SEND THE FAILURE. ++ */ ++ ++ rc = pw_validate_be_dn(pb, &referral); + + if (rc == SLAPI_BIND_NO_BACKEND) { + send_nobackend_ldap_result( pb ); +@@ -736,8 +738,18 @@ do_bind( Slapi_PBlock *pb ) + myrc = 0; + } + if (!auto_bind) { +- /* +- * There could be a race that bind_target_entry was not added ++ /* ++ * Okay, we've made it here. FINALLY check if the entry really ++ * can bind or not. THIS IS THE PASSWORD CHECK. ++ */ ++ rc = pw_verify_be_dn(pb, &referral); ++ if (rc != SLAPI_BIND_SUCCESS) { ++ /* Invalid pass - lets bail ... */ ++ goto bind_failed; ++ } ++ ++ /* ++ * There could be a race that bind_target_entry was not added + * when bind_target_entry was retrieved before be_bind, but it + * was in be_bind. Since be_bind returned SLAPI_BIND_SUCCESS, + * the entry is in the DS. So, we need to retrieve it once more. +@@ -786,6 +798,7 @@ do_bind( Slapi_PBlock *pb ) + } + } + } else { /* if auto_bind || rc == slapi_bind_success | slapi_bind_anonymous */ ++ bind_failed: + if (rc == LDAP_OPERATIONS_ERROR) { + send_ldap_result( pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Function not implemented", 0, NULL ); + goto free_and_return; +diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c +index 852b027..cb182ed 100644 +--- a/ldap/servers/slapd/pw_verify.c ++++ b/ldap/servers/slapd/pw_verify.c +@@ -55,7 +55,7 @@ pw_verify_root_dn(const char *dn, const Slapi_Value *cred) + int + pw_verify_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + { +- int rc = 0; ++ int rc = SLAPI_BIND_SUCCESS; + Slapi_Backend *be = NULL; + + if (slapi_mapping_tree_select(pb, &be, referral, NULL, 0) != LDAP_SUCCESS) { +@@ -109,14 +109,10 @@ pw_validate_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + slapi_pblock_get(pb, SLAPI_BIND_CREDENTIALS, &cred); + slapi_pblock_get(pb, SLAPI_BIND_METHOD, &method); + +- if (pb_sdn != NULL || cred != NULL) { ++ if (pb_sdn == NULL) { + return LDAP_OPERATIONS_ERROR; + } + +- if (*referral) { +- return SLAPI_BIND_REFERRAL; +- } +- + /* We need a slapi_sdn_isanon? */ + if (method == LDAP_AUTH_SIMPLE && cred->bv_len == 0) { + return SLAPI_BIND_ANONYMOUS; +@@ -130,7 +126,11 @@ pw_validate_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + if (slapi_mapping_tree_select(pb, &be, referral, NULL, 0) != LDAP_SUCCESS) { + return SLAPI_BIND_NO_BACKEND; + } +- slapi_be_Unlock(be); ++ ++ if (*referral) { ++ slapi_be_Unlock(be); ++ return SLAPI_BIND_REFERRAL; ++ } + + slapi_pblock_set(pb, SLAPI_BACKEND, be); + slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database); +@@ -138,6 +138,7 @@ pw_validate_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + set_db_default_result_handlers(pb); + + /* The backend associated with this identity is real. */ ++ slapi_be_Unlock(be); + + return SLAPI_BIND_SUCCESS; + } +-- +2.9.4 + diff --git a/SOURCES/0059-Ticket-49298-force-sync-on-shutdown.patch b/SOURCES/0059-Ticket-49298-force-sync-on-shutdown.patch new file mode 100644 index 0000000..4d900fb --- /dev/null +++ b/SOURCES/0059-Ticket-49298-force-sync-on-shutdown.patch @@ -0,0 +1,177 @@ +From ba30cc562f5ebd58955502a19edbf9720a45b655 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 8 Aug 2017 13:02:53 -0400 +Subject: [PATCH] Ticket 49298 - force sync() on shutdown + + Bug Description: During shutdown on xfs we would occasionally + see a broke dse.ldif (specifically, empty). This happens due to + a bug in xfs where the directory isn't synced on rename(). + + Fix Description: As we shutdown call sync() to force all our + writes to disk - dse.ldif, logs, db, all of it. + + https://pagure.io/389-ds-base/issue/49298 +--- + ldap/servers/slapd/dse.c | 59 +++++++++++++++++++++++++++++------------------ + ldap/servers/slapd/main.c | 9 ++++---- + 2 files changed, 42 insertions(+), 26 deletions(-) + +diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c +index 5715c83..fa1aacc 100644 +--- a/ldap/servers/slapd/dse.c ++++ b/ldap/servers/slapd/dse.c +@@ -40,6 +40,8 @@ + #include "slap.h" + #include + ++#include /* provides fsync/close */ ++ + /* #define SLAPI_DSE_DEBUG */ /* define this to force trace log */ + /* messages to always be logged */ + +@@ -72,11 +74,11 @@ + struct dse_callback + { + int operation; +- int flags; +- Slapi_DN *base; +- int scope; +- char *filter; /* NULL means match all entries */ +- Slapi_Filter *slapifilter; /* NULL means match all entries */ ++ int flags; ++ Slapi_DN *base; ++ int scope; ++ char *filter; /* NULL means match all entries */ ++ Slapi_Filter *slapifilter; /* NULL means match all entries */ + int (*fn)(Slapi_PBlock *,Slapi_Entry *,Slapi_Entry *,int*,char*,void *); + void *fn_arg; + struct slapdplugin *plugin; +@@ -89,13 +91,14 @@ struct dse + char *dse_tmpfile; /* and written to when changes are made via LDAP */ + char *dse_fileback; /* contain the latest info, just before a new change */ + char *dse_filestartOK; /* contain the latest info with which the server has successfully started */ ++ char *dse_configdir; /* The location of config files - allows us to fsync the dir post rename */ + Avlnode *dse_tree; + struct dse_callback *dse_callback; + Slapi_RWLock *dse_rwlock; /* a read-write lock to protect the whole dse backend */ +- char **dse_filelist; /* these are additional read only files used to */ +- /* initialize the dse */ +- int dse_is_updateable; /* if non-zero, this DSE can be written to */ +- int dse_readonly_error_reported; /* used to ensure that read-only errors are logged only once */ ++ char **dse_filelist; /* these are additional read only files used to */ ++ /* initialize the dse */ ++ int dse_is_updateable; /* if non-zero, this DSE can be written to */ ++ int dse_readonly_error_reported; /* used to ensure that read-only errors are logged only once */ + }; + + struct dse_node +@@ -361,37 +364,39 @@ dse_new( char *filename, char *tmpfilename, char *backfilename, char *startokfil + if (!strstr(filename, realconfigdir)) + { + pdse->dse_filename = slapi_ch_smprintf("%s/%s", realconfigdir, filename ); +- } +- else ++ } else { + pdse->dse_filename = slapi_ch_strdup(filename); ++ } + + if (!strstr(tmpfilename, realconfigdir)) { + pdse->dse_tmpfile = slapi_ch_smprintf("%s/%s", realconfigdir, tmpfilename ); +- } +- else ++ } else { + pdse->dse_tmpfile = slapi_ch_strdup(tmpfilename); ++ } ++ ++ pdse->dse_configdir = slapi_ch_strdup(realconfigdir); + + if ( backfilename != NULL ) + { + if (!strstr(backfilename, realconfigdir)) { + pdse->dse_fileback = slapi_ch_smprintf("%s/%s", realconfigdir, backfilename ); +- } +- else ++ } else { + pdse->dse_fileback = slapi_ch_strdup(backfilename); +- } +- else ++ } ++ } else { + pdse->dse_fileback = NULL; ++ } + + if ( startokfilename != NULL ) + { + if (!strstr(startokfilename, realconfigdir)) { + pdse->dse_filestartOK = slapi_ch_smprintf("%s/%s", realconfigdir, startokfilename ); +- } +- else ++ } else { + pdse->dse_filestartOK = slapi_ch_strdup(startokfilename); +- } +- else ++ } ++ } else { + pdse->dse_filestartOK = NULL; ++ } + + pdse->dse_tree= NULL; + pdse->dse_callback= NULL; +@@ -440,6 +445,7 @@ dse_destroy(struct dse *pdse) + slapi_ch_free((void **)&(pdse->dse_tmpfile)); + slapi_ch_free((void **)&(pdse->dse_fileback)); + slapi_ch_free((void **)&(pdse->dse_filestartOK)); ++ slapi_ch_free((void **)&(pdse->dse_configdir)); + dse_callback_deletelist(&pdse->dse_callback); + charray_free(pdse->dse_filelist); + nentries = avl_free(pdse->dse_tree, dse_internal_delete_entry); +@@ -991,8 +997,9 @@ dse_write_file_nolock(struct dse* pdse) + FPWrapper fpw; + int rc = 0; + +- if (dont_ever_write_dse_files) ++ if (dont_ever_write_dse_files) { + return rc; ++ } + + fpw.fpw_rc = 0; + fpw.fpw_prfd = NULL; +@@ -1042,6 +1049,14 @@ dse_write_file_nolock(struct dse* pdse) + pdse->dse_tmpfile, pdse->dse_filename, + rc, slapd_system_strerror( rc )); + } ++ /* ++ * We have now written to the tmp location, and renamed it ++ * we need to open and fsync the dir to make the rename stick. ++ */ ++ int fp_configdir = open(pdse->dse_configdir, O_PATH | O_DIRECTORY); ++ fsync(fp_configdir); ++ close(fp_configdir); ++ + } + } + if (fpw.fpw_prfd) +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index ba1f5e8..3351464 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -1154,11 +1154,12 @@ cleanup: + ndn_cache_destroy(); + NSS_Shutdown(); + PR_Cleanup(); +-#if defined( hpux ) +- exit( return_value ); +-#else ++ /* ++ * Server has stopped, lets force everything to disk: logs ++ * db, dse.ldif, all of it. ++ */ ++ sync(); + return return_value; +-#endif + } + + +-- +2.9.4 + diff --git a/SOURCES/0060-Ticket-49334-fix-backup-restore-if-changelog-exists.patch b/SOURCES/0060-Ticket-49334-fix-backup-restore-if-changelog-exists.patch new file mode 100644 index 0000000..25d4010 --- /dev/null +++ b/SOURCES/0060-Ticket-49334-fix-backup-restore-if-changelog-exists.patch @@ -0,0 +1,37 @@ +From c903f66194f04e97fc684f5a9654cedb27530931 Mon Sep 17 00:00:00 2001 +From: Ludwig Krispenz +Date: Mon, 31 Jul 2017 10:51:08 +0200 +Subject: [PATCH 1/3] Ticket 49334 - fix backup restore if changelog exists + +The corrcect flag to copy a directory in backup/restore must be passed for the changelog directory + +Reviewed by: William, thanks +--- + ldap/servers/slapd/back-ldbm/dblayer.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c +index ff97aa4..3a97f2f 100644 +--- a/ldap/servers/slapd/back-ldbm/dblayer.c ++++ b/ldap/servers/slapd/back-ldbm/dblayer.c +@@ -6143,7 +6143,7 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task) + return_value = dblayer_copy_directory(li, task, changelogdir, + changelog_destdir, + 0 /* backup */, +- &cnt, 1, 0, 0); ++ &cnt, 0, 0, 1); + if (return_value) { + slapi_log_err(SLAPI_LOG_ERR, + "dblayer_backup", "Error in copying directory " +@@ -6823,7 +6823,7 @@ int dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char * + *cldirname = '\0'; + return_value = dblayer_copy_directory(li, task, filename1, + changelogdir, 1 /* restore */, +- &cnt, 1, 0 ,0); ++ &cnt, 0, 0 ,1); + *cldirname = '/'; + if (return_value) { + slapi_log_err(SLAPI_LOG_ERR, +-- +2.9.4 + diff --git a/SOURCES/0061-Ticket-49356-mapping-tree-crash-can-occur-during-tot.patch b/SOURCES/0061-Ticket-49356-mapping-tree-crash-can-occur-during-tot.patch new file mode 100644 index 0000000..23540e5 --- /dev/null +++ b/SOURCES/0061-Ticket-49356-mapping-tree-crash-can-occur-during-tot.patch @@ -0,0 +1,502 @@ +From b0954a5df7841330732a5ab532c528a68cf380cf Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Fri, 18 Aug 2017 13:00:46 +1000 +Subject: [PATCH] Ticket 49356 - mapping tree crash can occur during tot init + +Bug Description: Two faults were found in the handling of the mapping +tree of 389 directory server. The first fault was that the tree-free +check was not performed atomically and may cause an incorrect operations +error to be returned. The second was that during a total init the referral +would not lock the be, but the pw_verify code assumed a be was locked. +This caused a segfault. + +Fix Description: Fix the freed check to use atomics. Fix the pw_verify +to assert be is NULL (which is correct, there is no backend). + +https://pagure.io/389-ds-base/issue/49356 + +Author: wibrown + +Review by: mreynolds (THanks!) +--- + .../mapping_tree/referral_during_tot_init.py | 57 ++++++++ + ldap/servers/slapd/fedse.c | 10 ++ + ldap/servers/slapd/main.c | 10 -- + ldap/servers/slapd/mapping_tree.c | 150 +++++++++++---------- + ldap/servers/slapd/pw_verify.c | 8 +- + 5 files changed, 150 insertions(+), 85 deletions(-) + create mode 100644 dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init.py + +diff --git a/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init.py b/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init.py +new file mode 100644 +index 0000000..e5aee7d +--- /dev/null ++++ b/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init.py +@@ -0,0 +1,57 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2017 Red Hat, Inc. ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++# ++import ldap ++import pytest ++from lib389.topologies import topology_m2 ++from lib389._constants import (DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2, TASK_WAIT) ++ ++from lib389.idm.user import (TEST_USER_PROPERTIES, UserAccounts) ++ ++def test_referral_during_tot(topology_m2): ++ ++ master1 = topology_m2.ms["master1"] ++ master2 = topology_m2.ms["master2"] ++ ++ # Create a bunch of entries on master1 ++ ldif_dir = master1.get_ldif_dir() ++ import_ldif = ldif_dir + '/ref_during_tot_import.ldif' ++ master1.buildLDIF(10000, import_ldif) ++ ++ master1.stop() ++ try: ++ master1.ldif2db(bename=None, excludeSuffixes=None, encrypt=False, suffixes=[DEFAULT_SUFFIX], import_file=import_ldif) ++ except: ++ pass ++ # master1.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, args={TASK_WAIT: True}) ++ master1.start() ++ users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou=Accounting') ++ ++ u = users.create(properties=TEST_USER_PROPERTIES) ++ u.set('userPassword', 'password') ++ ++ binddn = u.dn ++ bindpw = 'password' ++ ++ # Now export them to master2 ++ master1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) ++ ++ # While that's happening try to bind as a user to master 2 ++ # This should trigger the referral code. ++ for i in range(0, 100): ++ conn = ldap.initialize(master2.toLDAPURL()) ++ conn.set_option(ldap.OPT_REFERRALS, False) ++ try: ++ conn.simple_bind_s(binddn, bindpw) ++ conn.unbind_s() ++ except ldap.REFERRAL: ++ pass ++ ++ # Done. ++ ++ +diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c +index 13a3c74..c2a862b 100644 +--- a/ldap/servers/slapd/fedse.c ++++ b/ldap/servers/slapd/fedse.c +@@ -1853,6 +1853,16 @@ setup_internal_backends(char *configdir) + be_addsuffix(be,&monitor); + be_addsuffix(be,&config); + ++ /* ++ * Now that the be's are in place, we can ++ * setup the mapping tree. ++ */ ++ ++ if (mapping_tree_init()) { ++ slapi_log_err(SLAPI_LOG_EMERG, "setup_internal_backends", "Failed to init mapping tree\n"); ++ exit(1); ++ } ++ + add_internal_entries(); + + add_easter_egg_entry(); +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index 552d54d..1d9afce 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -1034,16 +1034,6 @@ main( int argc, char **argv) + + ps_init_psearch_system(); /* must come before plugin_startall() */ + +- /* Initailize the mapping tree */ +- +- if (mapping_tree_init()) +- { +- slapi_log_err(SLAPI_LOG_EMERG, "main", "Failed to init mapping tree\n"); +- return_value = 1; +- goto cleanup; +- } +- +- + /* initialize UniqueID generator - must be done once backends are started + and event queue is initialized but before plugins are started */ + /* Note: This DN is no need to be normalized. */ +diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c +index 1b8d2d9..dfb6584 100644 +--- a/ldap/servers/slapd/mapping_tree.c ++++ b/ldap/servers/slapd/mapping_tree.c +@@ -88,13 +88,13 @@ struct mt_node + * release backend lock + * + */ +-static Slapi_RWLock *myLock; /* global lock on the mapping tree structures */ ++static Slapi_RWLock *myLock = NULL; /* global lock on the mapping tree structures */ + + + static mapping_tree_node *mapping_tree_root = NULL; +-static int mapping_tree_inited = 0; +-static int mapping_tree_freed = 0; +-static int extension_type = -1; /* type returned from the factory */ ++static int32_t mapping_tree_inited = 0; ++static int32_t mapping_tree_freed = 0; ++static int extension_type = -1; /* type returned from the factory */ + + /* The different states a mapping tree node can be in. */ + #define MTN_DISABLED 0 /* The server acts like the node isn't there. */ +@@ -1659,22 +1659,24 @@ add_internal_mapping_tree_node(const char *subtree, Slapi_Backend *be, mapping_t + { + Slapi_DN *dn; + mapping_tree_node *node; +- backend ** be_list = (backend **) slapi_ch_malloc(sizeof(backend *)); ++ backend **be_list = (backend **)slapi_ch_malloc(sizeof(backend *)); ++ int *be_states = (int *)slapi_ch_malloc(sizeof(int)); + + be_list[0] = be; ++ be_states[0] = SLAPI_BE_STATE_ON; + + dn = slapi_sdn_new_dn_byval(subtree); +- node= mapping_tree_node_new( +- dn, +- be_list, +- NULL, /* backend_name */ +- NULL, +- 1, /* number of backends at this node */ +- 1, /* size of backend list structure */ +- NULL, /* referral */ +- parent, +- MTN_BACKEND, +- 1, /* The config node is a private node. ++ node = mapping_tree_node_new( ++ dn, ++ be_list, ++ NULL, /* backend_name */ ++ be_states, /* be state */ ++ 1, /* number of backends at this node */ ++ 1, /* size of backend list structure */ ++ NULL, /* referral */ ++ parent, ++ MTN_BACKEND, ++ 1, /* The config node is a private node. + * People can't see or change it. */ + NULL, NULL, NULL, 0); /* no distribution */ + return node; +@@ -1722,17 +1724,20 @@ mapping_tree_init() + + /* we call this function from a single thread, so it should be ok */ + +- if(mapping_tree_freed){ +- /* shutdown has been detected */ +- return 0; +- } +- +- if (mapping_tree_inited) ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { ++ /* shutdown has been detected */ + return 0; ++ } + +- /* ONREPL - I have moved this up because otherwise we can endup calling this ++ /* ONREPL - I have moved this up because otherwise we can endup calling this + * function recursively */ ++ if (myLock != NULL) { ++ return 0; ++ } ++ myLock = slapi_new_rwlock(); ++ slapi_rwlock_wrlock(myLock); + ++ /* Should be fenced by the rwlock. */ + mapping_tree_inited = 1; + + slapi_register_supported_control(MTN_CONTROL_USE_ONE_BACKEND_OID, +@@ -1740,10 +1745,8 @@ mapping_tree_init() + slapi_register_supported_control(MTN_CONTROL_USE_ONE_BACKEND_EXT_OID, + SLAPI_OPERATION_SEARCH); + +- myLock = slapi_new_rwlock(); +- +- be= slapi_be_select_by_instance_name(DSE_BACKEND); +- mapping_tree_root= add_internal_mapping_tree_node("", be, NULL); ++ be = slapi_be_select_by_instance_name(DSE_BACKEND); ++ mapping_tree_root = add_internal_mapping_tree_node("", be, NULL); + + /* We also need to add the config and schema backends to the mapping tree. + * They are special in that users will not know about it's node in the +@@ -1757,17 +1760,23 @@ mapping_tree_init() + node= add_internal_mapping_tree_node("cn=schema", be, mapping_tree_root); + mapping_tree_node_add_child(mapping_tree_root, node); + +- /* ++ slapi_rwlock_unlock(myLock); ++ ++ /* + * Now we need to look under cn=mapping tree, cn=config to find the rest + * of the mapping tree entries. + * Builds the mapping tree from entries in the DIT. This function just + * calls mapping_tree_node_get_children with the special case for the + * root node. + */ +- if (mapping_tree_node_get_children(mapping_tree_root, 1)) ++ ++ if (mapping_tree_node_get_children(mapping_tree_root, 1)) { + return -1; ++ } + ++ slapi_rwlock_wrlock(myLock); + mtn_create_extension(mapping_tree_root); ++ slapi_rwlock_unlock(myLock); + + /* setup the dse callback functions for the ldbm instance config entry */ + { +@@ -1840,8 +1849,8 @@ mapping_tree_free () + */ + slapi_unregister_backend_state_change_all(); + /* recursively free tree nodes */ +- mtn_free_node (&mapping_tree_root); +- mapping_tree_freed = 1; ++ mtn_free_node(&mapping_tree_root); ++ __atomic_store_4(&mapping_tree_freed, 1, __ATOMIC_RELAXED); + } + + /* This function returns the first node to parse when a search is done +@@ -2083,14 +2092,12 @@ int slapi_dn_write_needs_referral(Slapi_DN *target_sdn, Slapi_Entry **referral) + mapping_tree_node *target_node = NULL; + int ret = 0; + +- if(mapping_tree_freed){ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shutdown detected */ + goto done; + } + +- if(!mapping_tree_inited) { +- mapping_tree_init(); +- } ++ PR_ASSERT(mapping_tree_inited == 1); + + if (target_sdn) { + mtn_lock(); +@@ -2157,8 +2164,8 @@ int slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry + int fixup = 0; + + +- if(mapping_tree_freed){ +- /* shutdown detected */ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { ++ /* shutdown detected */ + return LDAP_OPERATIONS_ERROR; + } + +@@ -2175,9 +2182,7 @@ int slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry + target_sdn = operation_get_target_spec (op); + fixup = operation_is_flag_set(op, OP_FLAG_TOMBSTONE_FIXUP); + +- if(!mapping_tree_inited) { +- mapping_tree_init(); +- } ++ PR_ASSERT(mapping_tree_inited == 1); + + be[0] = NULL; + if (referral) { +@@ -2188,8 +2193,9 @@ int slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry + + /* Get the mapping tree node that is the best match for the target dn. */ + target_node = slapi_get_mapping_tree_node_by_dn(target_sdn); +- if (target_node == NULL) ++ if (target_node == NULL) { + target_node = mapping_tree_root; ++ } + + /* The processing of the base scope root DSE search and all other LDAP operations on "" + * will be transferred to the internal DSE backend +@@ -2266,8 +2272,8 @@ int slapi_mapping_tree_select_all(Slapi_PBlock *pb, Slapi_Backend **be_list, + Slapi_DN *sdn = NULL; + int flag_partial_result = 0; + int op_type; +- +- if(mapping_tree_freed){ ++ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + return LDAP_OPERATIONS_ERROR; + } + +@@ -2287,9 +2293,7 @@ int slapi_mapping_tree_select_all(Slapi_PBlock *pb, Slapi_Backend **be_list, + slapi_pblock_get(pb, SLAPI_OPERATION_TYPE, &op_type); + slapi_pblock_get(pb, SLAPI_SEARCH_SCOPE, &scope); + +- if(!mapping_tree_inited){ +- mapping_tree_init(); +- } ++ PR_ASSERT(mapping_tree_inited == 1); + + mtn_lock(); + +@@ -2448,8 +2452,8 @@ int slapi_mapping_tree_select_and_check(Slapi_PBlock *pb,char *newdn, Slapi_Back + Slapi_Operation *op; + int ret; + int need_unlock = 0; +- +- if(mapping_tree_freed){ ++ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + return LDAP_OPERATIONS_ERROR; + } + +@@ -2635,7 +2639,7 @@ static int mtn_get_be(mapping_tree_node *target_node, Slapi_PBlock *pb, + int flag_stop = 0; + struct slapi_componentid *cid = NULL; + +- if(mapping_tree_freed){ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shut down detected */ + return LDAP_OPERATIONS_ERROR; + } +@@ -2719,21 +2723,22 @@ static int mtn_get_be(mapping_tree_node *target_node, Slapi_PBlock *pb, + } else { + /* This MTN has not been linked to its backend + * instance yet. */ +- target_node->mtn_be[*index] = +- slapi_be_select_by_instance_name( +- target_node->mtn_backend_names[*index]); +- *be = target_node->mtn_be[*index]; +- if(*be==NULL) { +- slapi_log_err(SLAPI_LOG_BACKLDBM, "mtn_get_be", +- "Warning: Mapping tree node entry for %s " +- "point to an unknown backend : %s\n", +- slapi_sdn_get_dn(target_node->mtn_subtree), +- target_node->mtn_backend_names[*index]); +- /* Well there's still not backend instance for +- * this MTN, so let's have the default backend +- * deal with this. +- */ +- *be = defbackend_get_backend(); ++ /* WARNING: internal memory dse backends don't provide NAMES */ ++ if (target_node->mtn_backend_names != NULL) { ++ target_node->mtn_be[*index] = slapi_be_select_by_instance_name(target_node->mtn_backend_names[*index]); ++ *be = target_node->mtn_be[*index]; ++ if (*be == NULL) { ++ slapi_log_err(SLAPI_LOG_BACKLDBM, "mtn_get_be", ++ "Warning: Mapping tree node entry for %s " ++ "point to an unknown backend : %s\n", ++ slapi_sdn_get_dn(target_node->mtn_subtree), ++ target_node->mtn_backend_names[*index]); ++ /* Well there's still not backend instance for ++ * this MTN, so let's have the default backend ++ * deal with this. ++ */ ++ *be = defbackend_get_backend(); ++ } + } + } + } +@@ -2745,10 +2750,11 @@ static int mtn_get_be(mapping_tree_node *target_node, Slapi_PBlock *pb, + result = LDAP_OPERATIONS_ERROR; + *be = defbackend_get_backend(); + } +- if (flag_stop) ++ if (flag_stop) { + *index = SLAPI_BE_NO_BACKEND; +- else ++ } else { + (*index)++; ++ } + } + } + } else { +@@ -2822,7 +2828,7 @@ static mapping_tree_node *best_matching_child(mapping_tree_node *parent, + mapping_tree_node *highest_match_node = NULL; + mapping_tree_node *current; + +- if(mapping_tree_freed){ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shutdown detected */ + return NULL; + } +@@ -2849,7 +2855,7 @@ mtn_get_mapping_tree_node_by_entry(mapping_tree_node* node, const Slapi_DN *dn) + { + mapping_tree_node *found_node = NULL; + +- if(mapping_tree_freed){ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shutdown detected */ + return NULL; + } +@@ -2895,7 +2901,7 @@ slapi_get_mapping_tree_node_by_dn(const Slapi_DN *dn) + mapping_tree_node *current_best_match = mapping_tree_root; + mapping_tree_node *next_best_match = mapping_tree_root; + +- if(mapping_tree_freed){ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shutdown detected */ + return NULL; + } +@@ -2929,7 +2935,7 @@ get_mapping_tree_node_by_name(mapping_tree_node * node, char * be_name) + int i; + mapping_tree_node *found_node = NULL; + +- if(mapping_tree_freed){ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shutdown detected */ + return NULL; + } +@@ -2980,7 +2986,7 @@ slapi_get_mapping_tree_node_configdn (const Slapi_DN *root) + { + char *dn = NULL; + +- if(mapping_tree_freed){ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shutdown detected */ + return NULL; + } +@@ -3007,7 +3013,7 @@ slapi_get_mapping_tree_node_configsdn (const Slapi_DN *root) + char *dn = NULL; + Slapi_DN *sdn = NULL; + +- if(mapping_tree_freed){ ++ if (__atomic_load_4(&mapping_tree_freed, __ATOMIC_RELAXED)) { + /* shutdown detected */ + return NULL; + } +diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c +index cb182ed..1f0c18a 100644 +--- a/ldap/servers/slapd/pw_verify.c ++++ b/ldap/servers/slapd/pw_verify.c +@@ -58,12 +58,14 @@ pw_verify_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + int rc = SLAPI_BIND_SUCCESS; + Slapi_Backend *be = NULL; + +- if (slapi_mapping_tree_select(pb, &be, referral, NULL, 0) != LDAP_SUCCESS) { ++ int mt_result = slapi_mapping_tree_select(pb, &be, referral, NULL, 0); ++ if (mt_result != LDAP_SUCCESS) { + return SLAPI_BIND_NO_BACKEND; + } + + if (*referral) { +- slapi_be_Unlock(be); ++ /* If we have a referral, this is NULL */ ++ PR_ASSERT(be == NULL); + return SLAPI_BIND_REFERRAL; + } + +@@ -128,7 +130,7 @@ pw_validate_be_dn(Slapi_PBlock *pb, Slapi_Entry **referral) + } + + if (*referral) { +- slapi_be_Unlock(be); ++ PR_ASSERT(be == NULL); + return SLAPI_BIND_REFERRAL; + } + +-- +2.9.4 + diff --git a/SOURCES/0062-Ticket-49330-Improve-ndn-cache-performance-1.3.6.patch b/SOURCES/0062-Ticket-49330-Improve-ndn-cache-performance-1.3.6.patch new file mode 100644 index 0000000..9b21655 --- /dev/null +++ b/SOURCES/0062-Ticket-49330-Improve-ndn-cache-performance-1.3.6.patch @@ -0,0 +1,1041 @@ +From 2975f68e139169ee2d2259cfbbb2a15b54dc3724 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Wed, 26 Jul 2017 11:01:49 +1000 +Subject: [PATCH] Ticket 49330 - Improve ndn cache performance 1.3.6 + +Backport from 1.3.7 master. + +Bug Description: Normalised DN's are a costly process to update +and maintain. As a result, a normalised DN cache was created. Yet +it was never able to perform well. In some datasets with large sets +of dn attr types, the NDN cache actively hurt performance. + +The issue stemmed from 3 major issues in the design of the NDN +cache. + +First, it is a global cache which means it exists behind +a rwlock. This causes delay as threads wait behind the lock +to access or update the cache (especially on a miss). + +Second, the cache was limited to 4073 buckets. Despite the fact +that a prime number on a hash causes a skew in distribution, +this was in an NSPR hash - which does not grow dynamically, +rather devolving a bucket to a linked list. AS a result, once you +passed ~3000 your lookup performance would degrade rapidly to O(1) + +Finally, the cache's lru policy did not evict least used - it +evicted the 10,000 least used. So if you tuned your cache +to match the NSPR map, every inclusion that would trigger a +delete of old values would effectively empty your cache. ON bigger +set sizes, this has to walk the map (at O(1)) to clean 10,000 +elements. + +Premature optimisation strikes again .... + +Fix Description: Throw it out. Rewrite. We now use a hash +algo that has proper distribution across a set. The hash +sizes slots to a power of two. Finally, each thread has +a private cache rather than shared which completely eliminates +a lock contention and even NUMA performance issues. + +Interestingly this fix should have improvements for DB +imports, memberof and refint performance and more. + +Some testing has shown in simple search workloads a 10% +improvement in throughput, and on complex searches a 47x +improvement. + +https://pagure.io/389-ds-base/issue/49330 + +Author: wibrown + +Review by: lkrispen, tbordaz +--- + ldap/servers/slapd/back-ldbm/monitor.c | 11 +- + ldap/servers/slapd/dn.c | 809 +++++++++++++++++++++------------ + ldap/servers/slapd/slapi-private.h | 2 +- + 3 files changed, 527 insertions(+), 295 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/monitor.c b/ldap/servers/slapd/back-ldbm/monitor.c +index c58b069..aa7d709 100644 +--- a/ldap/servers/slapd/back-ldbm/monitor.c ++++ b/ldap/servers/slapd/back-ldbm/monitor.c +@@ -43,6 +43,9 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, + PRUint64 hits, tries; + long nentries, maxentries, count; + size_t size, maxsize; ++ size_t thread_size; ++ size_t evicts; ++ size_t slots; + /* NPCTE fix for bugid 544365, esc 0. <04-Jul-2001> */ + struct stat astat; + /* end of NPCTE fix for bugid 544365 */ +@@ -118,7 +121,7 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, + } + /* normalized dn cache stats */ + if(ndn_cache_started()){ +- ndn_cache_get_stats(&hits, &tries, &size, &maxsize, &count); ++ ndn_cache_get_stats(&hits, &tries, &size, &maxsize, &thread_size, &evicts, &slots, &count); + sprintf(buf, "%" PRIu64, tries); + MSET("normalizedDnCacheTries"); + sprintf(buf, "%" PRIu64, hits); +@@ -127,6 +130,8 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, + MSET("normalizedDnCacheMisses"); + sprintf(buf, "%lu", (unsigned long)(100.0*(double)hits / (double)(tries > 0 ? tries : 1))); + MSET("normalizedDnCacheHitRatio"); ++ sprintf(buf, "%"PRIu64, evicts); ++ MSET("NormalizedDnCacheEvictions"); + sprintf(buf, "%lu", (long unsigned int)size); + MSET("currentNormalizedDnCacheSize"); + if(maxsize == 0){ +@@ -135,6 +140,10 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, + sprintf(buf, "%lu", (long unsigned int)maxsize); + } + MSET("maxNormalizedDnCacheSize"); ++ sprintf(buf, "%"PRIu64, thread_size); ++ MSET("NormalizedDnCacheThreadSize"); ++ sprintf(buf, "%"PRIu64, slots); ++ MSET("NormalizedDnCacheThreadSlots"); + sprintf(buf, "%ld", count); + MSET("currentNormalizedDnCacheCount"); + } +diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c +index fa3909f..9cb3e7b 100644 +--- a/ldap/servers/slapd/dn.c ++++ b/ldap/servers/slapd/dn.c +@@ -22,6 +22,24 @@ + #include "slap.h" + #include + ++#include ++#include /* for size_t */ ++ ++#if defined(HAVE_SYS_ENDIAN_H) ++#include ++#elif defined(HAVE_ENDIAN_H) ++#include ++#else ++#error platform header for endian detection not found. ++#endif ++ ++/* See: http://sourceforge.net/p/predef/wiki/Endianness/ */ ++#if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN ++#define _le64toh(x) ((uint64_t)(x)) ++#else ++#define _le64toh(x) le64toh(x) ++#endif ++ + #undef SDN_DEBUG + + static void add_rdn_av( char *avstart, char *avend, int *rdn_av_countp, +@@ -33,52 +51,89 @@ static void rdn_av_swap( struct berval *av1, struct berval *av2, int escape ); + static int does_cn_uses_dn_syntax_in_dns(char *type, char *dn); + + /* normalized dn cache related definitions*/ +-struct +-ndn_cache_lru +-{ +- struct ndn_cache_lru *prev; +- struct ndn_cache_lru *next; +- char *key; +-}; +- +-struct +-ndn_cache_ctx +-{ +- struct ndn_cache_lru *head; +- struct ndn_cache_lru *tail; ++struct ndn_cache_stats { + Slapi_Counter *cache_hits; + Slapi_Counter *cache_tries; +- Slapi_Counter *cache_misses; +- size_t cache_size; +- size_t cache_max_size; +- long cache_count; ++ Slapi_Counter *cache_count; ++ Slapi_Counter *cache_size; ++ Slapi_Counter *cache_evicts; ++ size_t max_size; ++ size_t thread_max_size; ++ size_t slots; + }; + +-struct +-ndn_hash_val +-{ ++struct ndn_cache_value { ++ size_t size; ++ size_t slot; ++ char *dn; + char *ndn; +- size_t len; +- int size; +- struct ndn_cache_lru *lru_node; /* used to speed up lru shuffling */ ++ struct ndn_cache_value *next; ++ struct ndn_cache_value *prev; ++ struct ndn_cache_value *child; ++}; ++ ++/* ++ * This uses a similar alloc trick to IDList to keep ++ * The amount of derefs small. ++ */ ++struct ndn_cache { ++ /* ++ * We keep per thread stats and flush them occasionally ++ */ ++ size_t max_size; ++ /* Need to track this because we need to provide diffs to counter */ ++ size_t last_count; ++ size_t count; ++ /* Number of ops */ ++ size_t tries; ++ /* hit vs miss. in theroy miss == tries - hits.*/ ++ size_t hits; ++ /* How many values we kicked out */ ++ size_t evicts; ++ /* Need to track this because we need to provide diffs to counter */ ++ size_t last_size; ++ size_t size; ++ ++ size_t slots; ++ /* ++ * This is used by siphash to prevent hash bugket attacks ++ */ ++ char key[16]; ++ ++ struct ndn_cache_value *head; ++ struct ndn_cache_value *tail; ++ struct ndn_cache_value *table[1]; + }; + +-#define NDN_FLUSH_COUNT 10000 /* number of DN's to remove when cache fills up */ +-#define NDN_MIN_COUNT 1000 /* the minimum number of DN's to keep in the cache */ +-#define NDN_CACHE_BUCKETS 2053 /* prime number */ ++/* ++ * This means we need 1 MB minimum per thread ++ * ++ */ ++#define NDN_CACHE_MINIMUM_CAPACITY 1048576 ++/* ++ * This helps us define the number of hashtable slots ++ * to create. We assume an average DN is 64 chars long ++ * This way we end up we a ht entry of: ++ * 8 bytes: from the table pointing to us. ++ * 8 bytes: next ptr ++ * 8 bytes: prev ptr ++ * 8 bytes + 64: dn ++ * 8 bytes + 64: ndn itself. ++ * This gives us 168 bytes. In theory this means ++ * 6241 entries, but we have to clamp this to a power of ++ * two, so we have 8192 slots. In reality, dns may be ++ * shorter *and* the dn may be the same as the ndn ++ * so we *may* store more ndns that this. Again, a good reason ++ * to round the ht size up! ++ */ ++#define NDN_ENTRY_AVG_SIZE 168 ++/* ++ * After how many operations do we sync our per-thread stats. ++ */ ++#define NDN_STAT_COMMIT_FREQUENCY 256 + +-static PLHashNumber ndn_hash_string(const void *key); + static int ndn_cache_lookup(char *dn, size_t dn_len, char **result, char **udn, int *rc); +-static void ndn_cache_update_lru(struct ndn_cache_lru **node); + static void ndn_cache_add(char *dn, size_t dn_len, char *ndn, size_t ndn_len); +-static void ndn_cache_delete(char *dn); +-static void ndn_cache_flush(void); +-static void ndn_cache_free(void); +-static int ndn_started = 0; +-static PRLock *lru_lock = NULL; +-static Slapi_RWLock *ndn_cache_lock = NULL; +-static struct ndn_cache_ctx *ndn_cache = NULL; +-static PLHashTable *ndn_cache_hashtable = NULL; + + #define ISBLANK(c) ((c) == ' ') + #define ISBLANKSTR(s) (((*(s)) == '2') && (*((s)+1) == '0')) +@@ -2768,166 +2823,408 @@ slapi_sdn_get_size(const Slapi_DN *sdn) + * + */ + ++/* ++ Copyright (c) 2013 Marek Majkowski ++ ++ Permission is hereby granted, free of charge, to any person obtaining a copy ++ of this software and associated documentation files (the "Software"), to deal ++ in the Software without restriction, including without limitation the rights ++ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ copies of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: ++ ++ The above copyright notice and this permission notice shall be included in ++ all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++ THE SOFTWARE. ++ ++ ++ Original location: ++ https://github.com/majek/csiphash/ ++ ++ Solution inspired by code from: ++ Samuel Neves (supercop/crypto_auth/siphash24/little) ++ djb (supercop/crypto_auth/siphash24/little2) ++ Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c) ++*/ ++ ++#define ROTATE(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b)))) ++ ++#define HALF_ROUND(a, b, c, d, s, t) \ ++ a += b; \ ++ c += d; \ ++ b = ROTATE(b, s) ^ a; \ ++ d = ROTATE(d, t) ^ c; \ ++ a = ROTATE(a, 32); ++ ++#define ROUND(v0, v1, v2, v3) \ ++ HALF_ROUND(v0, v1, v2, v3, 13, 16); \ ++ HALF_ROUND(v2, v1, v0, v3, 17, 21) ++ ++#define cROUND(v0, v1, v2, v3) \ ++ ROUND(v0, v1, v2, v3) ++ ++#define dROUND(v0, v1, v2, v3) \ ++ ROUND(v0, v1, v2, v3); \ ++ ROUND(v0, v1, v2, v3); \ ++ ROUND(v0, v1, v2, v3) ++ ++ ++static uint64_t ++sds_siphash13(const void *src, size_t src_sz, const char key[16]) ++{ ++ const uint64_t *_key = (uint64_t *)key; ++ uint64_t k0 = _le64toh(_key[0]); ++ uint64_t k1 = _le64toh(_key[1]); ++ uint64_t b = (uint64_t)src_sz << 56; ++ const uint64_t *in = (uint64_t *)src; ++ ++ uint64_t v0 = k0 ^ 0x736f6d6570736575ULL; ++ uint64_t v1 = k1 ^ 0x646f72616e646f6dULL; ++ uint64_t v2 = k0 ^ 0x6c7967656e657261ULL; ++ uint64_t v3 = k1 ^ 0x7465646279746573ULL; ++ ++ while (src_sz >= 8) { ++ uint64_t mi = _le64toh(*in); ++ in += 1; ++ src_sz -= 8; ++ v3 ^= mi; ++ // cround ++ cROUND(v0, v1, v2, v3); ++ v0 ^= mi; ++ } ++ ++ uint64_t t = 0; ++ uint8_t *pt = (uint8_t *)&t; ++ uint8_t *m = (uint8_t *)in; ++ ++ switch (src_sz) { ++ case 7: ++ pt[6] = m[6]; /* FALLTHRU */ ++ case 6: ++ pt[5] = m[5]; /* FALLTHRU */ ++ case 5: ++ pt[4] = m[4]; /* FALLTHRU */ ++ case 4: ++ *((uint32_t *)&pt[0]) = *((uint32_t *)&m[0]); ++ break; ++ case 3: ++ pt[2] = m[2]; /* FALLTHRU */ ++ case 2: ++ pt[1] = m[1]; /* FALLTHRU */ ++ case 1: ++ pt[0] = m[0]; /* FALLTHRU */ ++ } ++ b |= _le64toh(t); ++ ++ v3 ^= b; ++ // cround ++ cROUND(v0, v1, v2, v3); ++ v0 ^= b; ++ v2 ^= 0xff; ++ // dround ++ dROUND(v0, v1, v2, v3); ++ return (v0 ^ v1) ^ (v2 ^ v3); ++} ++ ++static pthread_key_t ndn_cache_key; ++static pthread_once_t ndn_cache_key_once = PTHREAD_ONCE_INIT; ++static struct ndn_cache_stats t_cache_stats = {0}; + /* +- * Hashing function using Bernstein's method ++ * WARNING: For some reason we try to use the NDN cache *before* ++ * we have a chance to configure it. As a result, we need to rely ++ * on a trick in the way we start, that we start in one thread ++ * so we can manipulate ints as though they were atomics, then ++ * we start in *one* thread, so it's set, then when threads ++ * fork the get barriers, so we can go from there. However we *CANNOT* ++ * change this at runtime without expensive atomics per op, so lets ++ * not bother until we improve libglobs to be COW. + */ +-static PLHashNumber +-ndn_hash_string(const void *key) +-{ +- PLHashNumber hash = 5381; +- unsigned char *x = (unsigned char *)key; +- int c; ++static int32_t ndn_enabled = 0; ++ ++static struct ndn_cache * ++ndn_thread_cache_create(size_t thread_max_size, size_t slots) { ++ size_t t_cache_size = sizeof(struct ndn_cache) + (slots * sizeof(struct ndn_cache_value *)); ++ struct ndn_cache *t_cache = (struct ndn_cache *)slapi_ch_calloc(1, t_cache_size); ++ ++ t_cache->max_size = thread_max_size; ++ t_cache->slots = slots; + +- while ((c = *x++)){ +- hash = ((hash << 5) + hash) ^ c; ++ return t_cache; ++} ++ ++static void ++ndn_thread_cache_commit_status(struct ndn_cache *t_cache) { ++ /* ++ * Every so often we commit these atomically. We do this infrequently ++ * to avoid the costly atomics. ++ */ ++ if (t_cache->tries % NDN_STAT_COMMIT_FREQUENCY == 0) { ++ /* We can just add tries and hits. */ ++ slapi_counter_add(t_cache_stats.cache_evicts, t_cache->evicts); ++ slapi_counter_add(t_cache_stats.cache_tries, t_cache->tries); ++ slapi_counter_add(t_cache_stats.cache_hits, t_cache->hits); ++ t_cache->hits = 0; ++ t_cache->tries = 0; ++ t_cache->evicts = 0; ++ /* Count and size need diff */ ++ int64_t diff = (t_cache->size - t_cache->last_size); ++ if (diff > 0) { ++ // We have more .... ++ slapi_counter_add(t_cache_stats.cache_size, (uint64_t)diff); ++ } else if (diff < 0) { ++ slapi_counter_subtract(t_cache_stats.cache_size, (uint64_t)llabs(diff)); ++ } ++ t_cache->last_size = t_cache->size; ++ ++ diff = (t_cache->count - t_cache->last_count); ++ if (diff > 0) { ++ // We have more .... ++ slapi_counter_add(t_cache_stats.cache_count, (uint64_t)diff); ++ } else if (diff < 0) { ++ slapi_counter_subtract(t_cache_stats.cache_count, (uint64_t)llabs(diff)); ++ } ++ t_cache->last_count = t_cache->count; ++ ++ } ++} ++ ++static void ++ndn_thread_cache_value_destroy(struct ndn_cache *t_cache, struct ndn_cache_value *v) { ++ /* Update stats */ ++ t_cache->size = t_cache->size - v->size; ++ t_cache->count--; ++ t_cache->evicts++; ++ ++ if (v == t_cache->head) { ++ t_cache->head = v->prev; ++ } ++ if (v == t_cache->tail) { ++ t_cache->tail = v->next; ++ } ++ ++ /* Cut the node out. */ ++ if (v->next != NULL) { ++ v->next->prev = v->prev; ++ } ++ if (v->prev != NULL) { ++ v->prev->next = v->next; ++ } ++ /* Set the pointer in the table to NULL */ ++ /* Now see if we were in a list */ ++ struct ndn_cache_value *slot_node = t_cache->table[v->slot]; ++ if (slot_node == v) { ++ t_cache->table[v->slot] = v->child; ++ } else { ++ struct ndn_cache_value *former_slot_node = NULL; ++ do { ++ former_slot_node = slot_node; ++ slot_node = slot_node->child; ++ } while(slot_node != v); ++ /* Okay, now slot_node is us, and former is our parent */ ++ former_slot_node->child = v->child; ++ } ++ ++ slapi_ch_free((void **)&(v->dn)); ++ slapi_ch_free((void **)&(v->ndn)); ++ slapi_ch_free((void **)&v); ++} ++ ++static void ++ndn_thread_cache_destroy(void *v_cache) { ++ struct ndn_cache *t_cache = (struct ndn_cache *)v_cache; ++ /* ++ * FREE ALL THE NODES!!! ++ */ ++ struct ndn_cache_value *node = t_cache->tail; ++ struct ndn_cache_value *next_node = NULL; ++ while (node) { ++ next_node = node->next; ++ ndn_thread_cache_value_destroy(t_cache, node); ++ node = next_node; ++ } ++ slapi_ch_free((void **)&t_cache); ++} ++ ++static void ++ndn_cache_key_init() { ++ if (pthread_key_create(&ndn_cache_key, ndn_thread_cache_destroy) != 0) { ++ /* Log a scary warning? */ ++ slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_init", "Failed to create pthread key, aborting.\n"); + } +- return hash; + } + + void + ndn_cache_init() + { +- if(!config_get_ndn_cache_enabled() || ndn_started){ ++ ndn_enabled = config_get_ndn_cache_enabled(); ++ if (ndn_enabled == 0) { ++ /* ++ * Don't configure the keys or anything, need a restart ++ * to enable. We'll just never use ndn cache in this ++ * run. ++ */ + return; + } +- ndn_cache_hashtable = PL_NewHashTable( NDN_CACHE_BUCKETS, ndn_hash_string, PL_CompareStrings, PL_CompareValues, 0, 0); +- ndn_cache = (struct ndn_cache_ctx *)slapi_ch_malloc(sizeof(struct ndn_cache_ctx)); +- ndn_cache->cache_max_size = config_get_ndn_cache_size(); +- ndn_cache->cache_hits = slapi_counter_new(); +- ndn_cache->cache_tries = slapi_counter_new(); +- ndn_cache->cache_misses = slapi_counter_new(); +- ndn_cache->cache_count = 0; +- ndn_cache->cache_size = sizeof(struct ndn_cache_ctx) + sizeof(PLHashTable) + sizeof(PLHashTable); +- ndn_cache->head = NULL; +- ndn_cache->tail = NULL; +- ndn_started = 1; +- if ( NULL == ( lru_lock = PR_NewLock()) || NULL == ( ndn_cache_lock = slapi_new_rwlock())) { +- ndn_cache_destroy(); +- slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_init", "Failed to create locks. Disabling cache.\n" ); ++ ++ /* Create the pthread key */ ++ (void)pthread_once(&ndn_cache_key_once, ndn_cache_key_init); ++ ++ /* Create the global stats. */ ++ t_cache_stats.max_size = config_get_ndn_cache_size(); ++ t_cache_stats.cache_evicts = slapi_counter_new(); ++ t_cache_stats.cache_tries = slapi_counter_new(); ++ t_cache_stats.cache_hits = slapi_counter_new(); ++ t_cache_stats.cache_count = slapi_counter_new(); ++ t_cache_stats.cache_size = slapi_counter_new(); ++ /* Get thread numbers and calc the per thread size */ ++ int32_t maxthreads = (int32_t)config_get_threadnumber(); ++ size_t tentative_size = t_cache_stats.max_size / maxthreads; ++ if (tentative_size < NDN_CACHE_MINIMUM_CAPACITY) { ++ tentative_size = NDN_CACHE_MINIMUM_CAPACITY; ++ t_cache_stats.max_size = NDN_CACHE_MINIMUM_CAPACITY * maxthreads; ++ } ++ t_cache_stats.thread_max_size = tentative_size; ++ ++ /* ++ * Slots *must* be a power of two, even if the number of entries ++ * we store will be *less* than this. ++ */ ++ size_t possible_elements = tentative_size / NDN_ENTRY_AVG_SIZE; ++ /* ++ * So this is like 1048576 / 168, so we get 6241. Now we need to ++ * shift this to get the number of bits. ++ */ ++ size_t shifts = 0; ++ while (possible_elements > 0) { ++ shifts++; ++ possible_elements = possible_elements >> 1; + } ++ /* ++ * So now we can use this to make the slot count. ++ */ ++ t_cache_stats.slots = 1 << shifts; ++ /* Done? */ ++ return; + } + + void + ndn_cache_destroy() + { +- if(!ndn_started){ ++ if (ndn_enabled == 0) { + return; + } +- if(lru_lock){ +- PR_DestroyLock(lru_lock); +- lru_lock = NULL; +- } +- if(ndn_cache_lock){ +- slapi_destroy_rwlock(ndn_cache_lock); +- ndn_cache_lock = NULL; +- } +- if(ndn_cache_hashtable){ +- ndn_cache_free(); +- PL_HashTableDestroy(ndn_cache_hashtable); +- ndn_cache_hashtable = NULL; +- } +- config_set_ndn_cache_enabled(CONFIG_NDN_CACHE, "off", NULL, 1 ); +- slapi_counter_destroy(&ndn_cache->cache_hits); +- slapi_counter_destroy(&ndn_cache->cache_tries); +- slapi_counter_destroy(&ndn_cache->cache_misses); +- slapi_ch_free((void **)&ndn_cache); +- +- ndn_started = 0; ++ slapi_counter_destroy(&(t_cache_stats.cache_tries)); ++ slapi_counter_destroy(&(t_cache_stats.cache_hits)); ++ slapi_counter_destroy(&(t_cache_stats.cache_count)); ++ slapi_counter_destroy(&(t_cache_stats.cache_size)); ++ slapi_counter_destroy(&(t_cache_stats.cache_evicts)); + } + + int + ndn_cache_started() + { +- return ndn_started; ++ return ndn_enabled; + } + + /* + * Look up this dn in the ndn cache + */ + static int +-ndn_cache_lookup(char *dn, size_t dn_len, char **result, char **udn, int *rc) ++ndn_cache_lookup(char *dn, size_t dn_len, char **ndn, char **udn, int *rc) + { +- struct ndn_hash_val *ndn_ht_val = NULL; +- char *ndn, *key; +- int rv = 0; +- +- if(NULL == udn){ +- return rv; ++ if (ndn_enabled == 0 || NULL == udn) { ++ return 0; + } + *udn = NULL; +- if(ndn_started == 0){ +- return rv; +- } +- if(dn_len == 0){ +- *result = dn; ++ ++ if (dn_len == 0) { ++ *ndn = dn; + *rc = 0; + return 1; + } +- slapi_counter_increment(ndn_cache->cache_tries); +- slapi_rwlock_rdlock(ndn_cache_lock); +- ndn_ht_val = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn); +- if(ndn_ht_val){ +- ndn_cache_update_lru(&ndn_ht_val->lru_node); +- slapi_counter_increment(ndn_cache->cache_hits); +- if ((ndn_ht_val->len != dn_len) || +- /* even if the lengths match, dn may not be normalized yet. +- * (e.g., 'cn="o=ABC",o=XYZ' vs. 'cn=o\3DABC,o=XYZ') */ +- (memcmp(dn, ndn_ht_val->ndn, dn_len))){ +- *rc = 1; /* free result */ +- ndn = slapi_ch_malloc(ndn_ht_val->len + 1); +- memcpy(ndn, ndn_ht_val->ndn, ndn_ht_val->len); +- ndn[ndn_ht_val->len] = '\0'; +- *result = ndn; +- } else { +- /* the dn was already normalized, just return the dn as the result */ +- *result = dn; +- *rc = 0; +- } +- rv = 1; +- } else { +- /* copy/preserve the udn, so we can use it as the key when we add dn's to the hashtable */ +- key = slapi_ch_malloc(dn_len + 1); +- memcpy(key, dn, dn_len); +- key[dn_len] = '\0'; +- *udn = key; ++ ++ struct ndn_cache *t_cache = pthread_getspecific(ndn_cache_key); ++ if (t_cache == NULL) { ++ t_cache = ndn_thread_cache_create(t_cache_stats.thread_max_size, t_cache_stats.slots); ++ pthread_setspecific(ndn_cache_key, t_cache); ++ /* If we have no cache, we can't look up ... */ ++ return 0; + } +- slapi_rwlock_unlock(ndn_cache_lock); + +- return rv; +-} ++ t_cache->tries++; + +-/* +- * Move this lru node to the top of the list +- */ +-static void +-ndn_cache_update_lru(struct ndn_cache_lru **node) +-{ +- struct ndn_cache_lru *prev, *next, *curr_node = *node; ++ /* ++ * Hash our DN ... ++ */ ++ uint64_t dn_hash = sds_siphash13(dn, dn_len, t_cache->key); ++ /* Where should it be? */ ++ size_t expect_slot = dn_hash % t_cache->slots; + +- if(curr_node == NULL){ +- return; +- } +- PR_Lock(lru_lock); +- if(curr_node->prev == NULL){ +- /* already the top node */ +- PR_Unlock(lru_lock); +- return; +- } +- prev = curr_node->prev; +- next = curr_node->next; +- if(next){ +- next->prev = prev; +- prev->next = next; +- } else { +- /* this was the tail, so reset the tail */ +- ndn_cache->tail = prev; +- prev->next = NULL; ++ /* ++ * Is it there? ++ */ ++ if (t_cache->table[expect_slot] != NULL) { ++ /* ++ * Check it really matches, could be collision. ++ */ ++ struct ndn_cache_value *node = t_cache->table[expect_slot]; ++ while (node != NULL) { ++ if (strcmp(dn, node->dn) == 0) { ++ /* ++ * Update LRU ++ * Are we already the tail? If so, we can just skip. ++ * remember, this means in a set of 1, we will always be tail ++ */ ++ if (t_cache->tail != node) { ++ /* ++ * Okay, we are *not* the tail. We could be anywhere between ++ * tail -> ... -> x -> head ++ * or even, we are the head ourself. ++ */ ++ if (t_cache->head == node) { ++ /* We are the head, update head to our predecessor */ ++ t_cache->head = node->prev; ++ /* Remember, the head has no next. */ ++ t_cache->head->next = NULL; ++ } else { ++ /* Right, we aren't the head, so we have a next node. */ ++ node->next->prev = node->prev; ++ } ++ /* Because we must be in the middle somewhere, we can assume next and prev exist. */ ++ node->prev->next = node->next; ++ /* ++ * Tail can't be NULL if we have a value in the cache, so we can ++ * just deref this. ++ */ ++ node->next = t_cache->tail; ++ t_cache->tail->prev = node; ++ t_cache->tail = node; ++ node->prev = NULL; ++ } ++ /* Update that we have a hit.*/ ++ t_cache->hits++; ++ /* Cope the NDN to the caller. */ ++ *ndn = slapi_ch_strdup(node->ndn); ++ /* Indicate to the caller to free this. */ ++ *rc = 1; ++ ndn_thread_cache_commit_status(t_cache); ++ return 1; ++ } ++ node = node->child; ++ } + } +- curr_node->prev = NULL; +- curr_node->next = ndn_cache->head; +- ndn_cache->head->prev = curr_node; +- ndn_cache->head = curr_node; +- PR_Unlock(lru_lock); ++ /* If we miss, we need to duplicate dn to udn here. */ ++ *udn = slapi_ch_strdup(dn); ++ *rc = 0; ++ ndn_thread_cache_commit_status(t_cache); ++ return 0; + } + + /* +@@ -2936,176 +3233,102 @@ ndn_cache_update_lru(struct ndn_cache_lru **node) + static void + ndn_cache_add(char *dn, size_t dn_len, char *ndn, size_t ndn_len) + { +- struct ndn_hash_val *ht_entry; +- struct ndn_cache_lru *new_node = NULL; +- PLHashEntry *he; +- int size; +- +- if(ndn_started == 0 || dn_len == 0){ ++ if (ndn_enabled == 0) { + return; + } +- if(strlen(ndn) > ndn_len){ ++ if (dn_len == 0) { ++ return; ++ } ++ if (strlen(ndn) > ndn_len) { + /* we need to null terminate the ndn */ + *(ndn + ndn_len) = '\0'; + } + /* + * Calculate the approximate memory footprint of the hash entry, key, and lru entry. + */ +- size = (dn_len * 2) + ndn_len + sizeof(PLHashEntry) + sizeof(struct ndn_hash_val) + sizeof(struct ndn_cache_lru); ++ struct ndn_cache_value *new_value = (struct ndn_cache_value *)slapi_ch_calloc(1, sizeof(struct ndn_cache_value)); ++ new_value->size = sizeof(struct ndn_cache_value) + dn_len + ndn_len; ++ /* DN is alloc for us */ ++ new_value->dn = dn; ++ /* But we need to copy ndn */ ++ new_value->ndn = slapi_ch_strdup(ndn); ++ + /* +- * Create our LRU node ++ * Get our local cache out. + */ +- new_node = (struct ndn_cache_lru *)slapi_ch_malloc(sizeof(struct ndn_cache_lru)); +- if(new_node == NULL){ +- slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_add", "Failed to allocate new lru node.\n"); +- return; ++ struct ndn_cache *t_cache = pthread_getspecific(ndn_cache_key); ++ if (t_cache == NULL) { ++ t_cache = ndn_thread_cache_create(t_cache_stats.thread_max_size, t_cache_stats.slots); ++ pthread_setspecific(ndn_cache_key, t_cache); + } +- new_node->prev = NULL; +- new_node->key = dn; /* dn has already been allocated */ + /* +- * Its possible this dn was added to the hash by another thread. ++ * Hash the DN + */ +- slapi_rwlock_wrlock(ndn_cache_lock); +- ht_entry = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn); +- if(ht_entry){ +- /* already exists, free the node and return */ +- slapi_rwlock_unlock(ndn_cache_lock); +- slapi_ch_free_string(&new_node->key); +- slapi_ch_free((void **)&new_node); +- return; +- } ++ uint64_t dn_hash = sds_siphash13(new_value->dn, dn_len, t_cache->key); + /* +- * Create the hash entry ++ * Get the insert slot: This works because the number spaces of dn_hash is ++ * a 64bit int, and slots is a power of two. As a result, we end up with ++ * even distribution of the values. + */ +- ht_entry = (struct ndn_hash_val *)slapi_ch_malloc(sizeof(struct ndn_hash_val)); +- if(ht_entry == NULL){ +- slapi_rwlock_unlock(ndn_cache_lock); +- slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_add", "Failed to allocate new hash entry.\n"); +- slapi_ch_free_string(&new_node->key); +- slapi_ch_free((void **)&new_node); +- return; +- } +- ht_entry->ndn = slapi_ch_malloc(ndn_len + 1); +- memcpy(ht_entry->ndn, ndn, ndn_len); +- ht_entry->ndn[ndn_len] = '\0'; +- ht_entry->len = ndn_len; +- ht_entry->size = size; +- ht_entry->lru_node = new_node; ++ size_t insert_slot = dn_hash % t_cache->slots; ++ /* Track this for free */ ++ new_value->slot = insert_slot; ++ + /* +- * Check if our cache is full ++ * Okay, check if we have space, else we need to trim nodes from ++ * the LRU + */ +- PR_Lock(lru_lock); /* grab the lru lock now, as ndn_cache_flush needs it */ +- if(ndn_cache->cache_max_size != 0 && ((ndn_cache->cache_size + size) > ndn_cache->cache_max_size)){ +- ndn_cache_flush(); ++ while (t_cache->head && (t_cache->size + new_value->size) > t_cache->max_size) { ++ struct ndn_cache_value *trim_node = t_cache->head; ++ ndn_thread_cache_value_destroy(t_cache, trim_node); + } ++ + /* +- * Set the ndn cache lru nodes ++ * Add it! + */ +- if(ndn_cache->head == NULL && ndn_cache->tail == NULL){ +- /* this is the first node */ +- ndn_cache->head = new_node; +- ndn_cache->tail = new_node; +- new_node->next = NULL; ++ if (t_cache->table[insert_slot] == NULL) { ++ t_cache->table[insert_slot] = new_value; + } else { +- new_node->next = ndn_cache->head; +- if(ndn_cache->head) +- ndn_cache->head->prev = new_node; ++ /* ++ * Hash collision! We need to replace the bucket then .... ++ * insert at the head of the slot to make this simpler. ++ */ ++ new_value->child = t_cache->table[insert_slot]; ++ t_cache->table[insert_slot] = new_value; + } +- ndn_cache->head = new_node; +- PR_Unlock(lru_lock); ++ + /* +- * Add the new object to the hashtable, and update our stats ++ * Finally, stick this onto the tail because it's the newest. + */ +- he = PL_HashTableAdd(ndn_cache_hashtable, new_node->key, (void *)ht_entry); +- if(he == NULL){ +- slapi_log_err(SLAPI_LOG_ERR, "ndn_cache_add", "Failed to add new entry to hash(%s)\n",dn); +- } else { +- ndn_cache->cache_count++; +- ndn_cache->cache_size += size; ++ if (t_cache->head == NULL) { ++ t_cache->head = new_value; + } +- slapi_rwlock_unlock(ndn_cache_lock); +-} +- +-/* +- * cache is full, remove the least used dn's. lru_lock/ndn_cache write lock are already taken +- */ +-static void +-ndn_cache_flush(void) +-{ +- struct ndn_cache_lru *node, *next, *flush_node; +- int i; +- +- node = ndn_cache->tail; +- for(i = 0; node && i < NDN_FLUSH_COUNT && ndn_cache->cache_count > NDN_MIN_COUNT; i++){ +- flush_node = node; +- /* update the lru */ +- next = node->prev; +- next->next = NULL; +- ndn_cache->tail = next; +- node = next; +- /* now update the hash */ +- ndn_cache->cache_count--; +- ndn_cache_delete(flush_node->key); +- slapi_ch_free_string(&flush_node->key); +- slapi_ch_free((void **)&flush_node); ++ if (t_cache->tail != NULL) { ++ new_value->next = t_cache->tail; ++ t_cache->tail->prev = new_value; + } ++ t_cache->tail = new_value; + +- slapi_log_err(SLAPI_LOG_CACHE, "ndn_cache_flush","Flushed cache.\n"); +-} +- +-static void +-ndn_cache_free(void) +-{ +- struct ndn_cache_lru *node, *next, *flush_node; +- +- if(!ndn_cache){ +- return; +- } +- +- node = ndn_cache->tail; +- while(node && ndn_cache->cache_count){ +- flush_node = node; +- /* update the lru */ +- next = node->prev; +- if(next){ +- next->next = NULL; +- } +- ndn_cache->tail = next; +- node = next; +- /* now update the hash */ +- ndn_cache->cache_count--; +- ndn_cache_delete(flush_node->key); +- slapi_ch_free_string(&flush_node->key); +- slapi_ch_free((void **)&flush_node); +- } +-} +- +-/* this is already "write" locked from ndn_cache_add */ +-static void +-ndn_cache_delete(char *dn) +-{ +- struct ndn_hash_val *ht_entry; ++ /* ++ * And update the stats. ++ */ ++ t_cache->size = t_cache->size + new_value->size; ++ t_cache->count++; + +- ht_entry = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn); +- if(ht_entry){ +- ndn_cache->cache_size -= ht_entry->size; +- slapi_ch_free_string(&ht_entry->ndn); +- slapi_ch_free((void **)&ht_entry); +- PL_HashTableRemove(ndn_cache_hashtable, dn); +- } + } + + /* stats for monitor */ + void +-ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, long *count) +-{ +- slapi_rwlock_rdlock(ndn_cache_lock); +- *hits = slapi_counter_get_value(ndn_cache->cache_hits); +- *tries = slapi_counter_get_value(ndn_cache->cache_tries); +- *size = ndn_cache->cache_size; +- *max_size = ndn_cache->cache_max_size; +- *count = ndn_cache->cache_count; +- slapi_rwlock_unlock(ndn_cache_lock); ++ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, size_t *thread_size, size_t *evicts, size_t *slots, long *count) ++{ ++ *max_size = t_cache_stats.max_size; ++ *thread_size = t_cache_stats.thread_max_size; ++ *slots = t_cache_stats.slots; ++ *evicts = slapi_counter_get_value(t_cache_stats.cache_evicts); ++ *hits = slapi_counter_get_value(t_cache_stats.cache_hits); ++ *tries = slapi_counter_get_value(t_cache_stats.cache_tries); ++ *size = slapi_counter_get_value(t_cache_stats.cache_size); ++ *count = slapi_counter_get_value(t_cache_stats.cache_count); + } + + /* Common ancestor sdn is allocated. +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index 3910dbe..68b59f3 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -380,7 +380,7 @@ char *slapi_dn_normalize_case_original( char *dn ); + void ndn_cache_init(void); + void ndn_cache_destroy(void); + int ndn_cache_started(void); +-void ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, long *count); ++void ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, size_t *thread_size, size_t *evicts, size_t *slots, long *count); + #define NDN_DEFAULT_SIZE 20971520 /* 20mb - size of normalized dn cache */ + + /* filter.c */ +-- +2.9.4 + diff --git a/SOURCES/0063-Ticket-49330-Add-endian-header-file-check-to-configu.patch b/SOURCES/0063-Ticket-49330-Add-endian-header-file-check-to-configu.patch new file mode 100644 index 0000000..f1dce6f --- /dev/null +++ b/SOURCES/0063-Ticket-49330-Add-endian-header-file-check-to-configu.patch @@ -0,0 +1,25 @@ +From c9817ebe42a97ac7df155582957927b4d1d08c5f Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 18 Sep 2017 14:55:14 -0400 +Subject: [PATCH] Ticket 49330 - Add endian header file + +--- + configure.ac | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/configure.ac b/configure.ac +index 67217af..163db7d 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -39,7 +39,7 @@ AC_PROG_LIBTOOL + AC_HEADER_DIRENT + AC_HEADER_STDC + AC_HEADER_SYS_WAIT +-AC_CHECK_HEADERS([arpa/inet.h errno.h fcntl.h malloc.h netdb.h netinet/in.h stdlib.h string.h strings.h sys/file.h sys/socket.h sys/time.h syslog.h unistd.h inttypes.h mntent.h sys/sysinfo.h]) ++AC_CHECK_HEADERS([arpa/inet.h errno.h fcntl.h malloc.h netdb.h netinet/in.h stdlib.h string.h strings.h sys/file.h sys/socket.h sys/time.h syslog.h unistd.h mntent.h sys/sysinfo.h sys/endian.h endian.h]) + + # Checks for typedefs, structures, and compiler characteristics. + AC_HEADER_STAT +-- +2.9.5 + diff --git a/SOURCES/0064-Ticket-49257-only-register-modify-callbacks.patch b/SOURCES/0064-Ticket-49257-only-register-modify-callbacks.patch new file mode 100644 index 0000000..5391136 --- /dev/null +++ b/SOURCES/0064-Ticket-49257-only-register-modify-callbacks.patch @@ -0,0 +1,87 @@ +From bbe3403a88f9adecbd5d4187ceeb080fb51d9d14 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 31 May 2017 11:15:13 -0400 +Subject: [PATCH] Ticket 49257 - only register modify callbacks + +Bug Description: Regression. In the previous fix we called + ldbm_instance_config_load_dse_info() to register all + the dse preop callbacks. Previously this was only done + when creating an instance. It was not designed to be + used outside of that context, and it caused error 53's + when trying to add a backend after instance creation. + +Fix Description: Just register the "modify" DSE preop callbacks. + +https://pagure.io/389-ds-base/issue/49257 + +Reviewed by: ? + +(cherry picked from commit 75a32a8829297a5cab303590d049f581740cf87e) +--- + ldap/servers/slapd/back-ldbm/instance.c | 12 +++--------- + ldap/servers/slapd/back-ldbm/ldbm_config.h | 2 +- + ldap/servers/slapd/back-ldbm/ldbm_instance_config.c | 13 +++++++++++++ + 3 files changed, 17 insertions(+), 10 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c +index 8b38644..f067d22 100644 +--- a/ldap/servers/slapd/back-ldbm/instance.c ++++ b/ldap/servers/slapd/back-ldbm/instance.c +@@ -305,15 +305,9 @@ ldbm_instance_startall(struct ldbminfo *li) + if (rc1 != 0) { + rc = rc1; + } else { +- if(ldbm_instance_config_load_dse_info(inst) != 0){ +- slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_startall", +- "Loading database instance configuration failed for (%s)\n", +- inst->inst_name); +- rc = -1; +- } else { +- vlv_init(inst); +- slapi_mtn_be_started(inst->inst_be); +- } ++ ldbm_instance_register_modify_callback(inst); ++ vlv_init(inst); ++ slapi_mtn_be_started(inst->inst_be); + } + inst_obj = objset_next_obj(li->li_instance_set, inst_obj); + } +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h +index ddec3a8..ea59739 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h ++++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h +@@ -157,6 +157,6 @@ int + ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry* e); + int ldbm_instance_create_default_user_indexes(ldbm_instance *inst); + void ldbm_config_destroy(struct ldbminfo *li); +- ++void ldbm_instance_register_modify_callback(ldbm_instance *inst); + + #endif /* _LDBM_CONFIG_H_ */ +diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +index 49a6cac..8fb4119 100644 +--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c ++++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +@@ -554,6 +554,19 @@ static int ldbm_instance_deny_config(Slapi_PBlock *pb, Slapi_Entry *e, + return SLAPI_DSE_CALLBACK_ERROR; + } + ++void ++ldbm_instance_register_modify_callback(ldbm_instance *inst) ++{ ++ struct ldbminfo *li = inst->inst_li; ++ char *dn = NULL; ++ ++ dn = slapi_create_dn_string("cn=%s,cn=%s,cn=plugins,cn=config", ++ inst->inst_name, li->li_plugin->plg_name); ++ slapi_config_register_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, dn, ++ LDAP_SCOPE_BASE, "(objectclass=*)", ++ ldbm_instance_modify_config_entry_callback, (void *) inst); ++ slapi_ch_free_string(&dn); ++} + /* Reads in any config information held in the dse for the given + * entry. Creates dse entries used to configure the given instance + * if they don't already exist. Registers dse callback functions to +-- +2.9.5 + diff --git a/SOURCES/0065-Ticket-49291-slapi_search_internal_callback_pb-may-S.patch b/SOURCES/0065-Ticket-49291-slapi_search_internal_callback_pb-may-S.patch new file mode 100644 index 0000000..219ba58 --- /dev/null +++ b/SOURCES/0065-Ticket-49291-slapi_search_internal_callback_pb-may-S.patch @@ -0,0 +1,46 @@ +From 28529671057c95327a35c326ee99fcafccad9de9 Mon Sep 17 00:00:00 2001 +From: Thierry Bordaz +Date: Wed, 14 Jun 2017 18:36:55 +0200 +Subject: [PATCH] Ticket 49291 - slapi_search_internal_callback_pb may SIGSEV + if related pblock has not operation set + +Bug Description: + if slapi_search_internal_set_pb is called with an invalid (NULL) base, the pblock should not + be used to call send_ldap_result. If it is, the send_ldap_result trying to derefence the + operation pointer will crash + +Fix Description: + Check that the operation is set before derefencing it + +https://pagure.io/389-ds-base/issue/49291 + +Reviewed by: Mark Reynolds + +Platforms tested: F23 + +Flag Day: no + +Doc impact: no +--- + ldap/servers/slapd/result.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c +index 56257c3..f3016ca 100644 +--- a/ldap/servers/slapd/result.c ++++ b/ldap/servers/slapd/result.c +@@ -350,6 +350,11 @@ send_ldap_result_ext( + slapi_pblock_get (pb, SLAPI_BIND_METHOD, &bind_method); + slapi_pblock_get (pb, SLAPI_OPERATION, &operation); + ++ if (operation == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "send_ldap_result_ext", "No operation found: slapi_search_internal_set_pb was incomplete (invalid 'base' ?)\n"); ++ return; ++ } ++ + if (operation->o_status == SLAPI_OP_STATUS_RESULT_SENT) { + return; /* result already sent */ + } +-- +2.9.5 + diff --git a/SOURCES/0066-Ticket-49370-local-password-policies-should-use-the-.patch b/SOURCES/0066-Ticket-49370-local-password-policies-should-use-the-.patch new file mode 100644 index 0000000..2753f3b --- /dev/null +++ b/SOURCES/0066-Ticket-49370-local-password-policies-should-use-the-.patch @@ -0,0 +1,51 @@ +From 1ec56936d29985a55f9529c1ea3e71056557b3ff Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 1 Sep 2017 09:24:55 -0400 +Subject: [PATCH] Ticket 49370 - local password policies should use the same + defaults as the global policy + +Description: When a local password policy (subtree/user) is created it does not use + the same defaults as the global policy. This causes inconsistent behavior. + +https://pagure.io/389-ds-base/issue/49370 + +Reviewed by: firstyear(Thanks!) +--- + ldap/servers/slapd/pw.c | 21 +++++++++++++++++++++ + 1 file changed, 21 insertions(+) + +diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c +index 378d148..19a863a 100644 +--- a/ldap/servers/slapd/pw.c ++++ b/ldap/servers/slapd/pw.c +@@ -1768,6 +1768,27 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn) + goto done; + } + ++ /* Set the default values */ ++ pwdpolicy->pw_mintokenlength = SLAPD_DEFAULT_PW_MINTOKENLENGTH; ++ pwdpolicy->pw_minlength = SLAPD_DEFAULT_PW_MINLENGTH; ++ pwdpolicy->pw_mindigits = SLAPD_DEFAULT_PW_MINDIGITS; ++ pwdpolicy->pw_minalphas = SLAPD_DEFAULT_PW_MINALPHAS; ++ pwdpolicy->pw_minuppers = SLAPD_DEFAULT_PW_MINUPPERS; ++ pwdpolicy->pw_minlowers = SLAPD_DEFAULT_PW_MINLOWERS; ++ pwdpolicy->pw_minspecials = SLAPD_DEFAULT_PW_MINSPECIALS; ++ pwdpolicy->pw_min8bit = SLAPD_DEFAULT_PW_MIN8BIT; ++ pwdpolicy->pw_maxrepeats = SLAPD_DEFAULT_PW_MAXREPEATS; ++ pwdpolicy->pw_mincategories = SLAPD_DEFAULT_PW_MINCATEGORIES; ++ pwdpolicy->pw_mintokenlength = SLAPD_DEFAULT_PW_MINTOKENLENGTH; ++ pwdpolicy->pw_maxage = SLAPD_DEFAULT_PW_MAXAGE; ++ pwdpolicy->pw_minage = SLAPD_DEFAULT_PW_MINAGE; ++ pwdpolicy->pw_warning = SLAPD_DEFAULT_PW_WARNING; ++ pwdpolicy->pw_inhistory = SLAPD_DEFAULT_PW_INHISTORY; ++ pwdpolicy->pw_maxfailure = SLAPD_DEFAULT_PW_MAXFAILURE; ++ pwdpolicy->pw_lockduration = SLAPD_DEFAULT_PW_LOCKDURATION; ++ pwdpolicy->pw_resetfailurecount = SLAPD_DEFAULT_PW_RESETFAILURECOUNT; ++ pwdpolicy->pw_gracelimit = SLAPD_DEFAULT_PW_GRACELIMIT; ++ + /* set the default passwordLegacyPolicy setting */ + pwdpolicy->pw_is_legacy = 1; + +-- +2.9.5 + diff --git a/SOURCES/0067-Ticket-49380-Crash-when-adding-invalid-replication.patch b/SOURCES/0067-Ticket-49380-Crash-when-adding-invalid-replication.patch new file mode 100644 index 0000000..cb988c3 --- /dev/null +++ b/SOURCES/0067-Ticket-49380-Crash-when-adding-invalid-replication.patch @@ -0,0 +1,55 @@ +From af59afa03296160577e419257772d5319796a992 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 14 Sep 2017 08:32:11 -0400 +Subject: [PATCH] Ticket 49380 - Crash when adding invalid replication + agreement + + Bug Description: If you add a replication agreement with an invalid "replicaEnabled" value + the server crashes when freeing the replica schedule. This is because the + schedule never gets allocated before the rror conidtion is hit, and then + it get dereferenced. + + Fix Description: Check for a NULL schedule before trying to destroy it. + + https://pagure.io/389-ds-base/issue/49380 + + Reviewed by: tbordaz(Thanks!) +--- + ldap/servers/plugins/replication/repl5_schedule.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/replication/repl5_schedule.c b/ldap/servers/plugins/replication/repl5_schedule.c +index 60ee6f2..4572e63 100644 +--- a/ldap/servers/plugins/replication/repl5_schedule.c ++++ b/ldap/servers/plugins/replication/repl5_schedule.c +@@ -130,6 +130,10 @@ schedule_destroy(Schedule *s) + { + int i; + ++ if (s == NULL) { ++ return; ++ } ++ + /* unschedule update window event if exists */ + unschedule_window_state_change_event (s); + +@@ -177,11 +181,15 @@ free_schedule_list(schedule_item **schedule_list) + int + schedule_set(Schedule *sch, Slapi_Attr *attr) + { +- int return_value; ++ int return_value = -1; + schedule_item *si = NULL; + schedule_item *new_schedule_list = NULL; + int valid = 1; + ++ if (sch == NULL) { ++ return return_value; ++ } ++ + if (NULL != attr) + { + int ind; +-- +2.9.5 + diff --git a/SOURCES/0068-Ticket-49380-Add-CI-test.patch b/SOURCES/0068-Ticket-49380-Add-CI-test.patch new file mode 100644 index 0000000..d59e7bf --- /dev/null +++ b/SOURCES/0068-Ticket-49380-Add-CI-test.patch @@ -0,0 +1,81 @@ +From d336e3558655d44f8ba797392af882e33d492958 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 14 Sep 2017 14:15:25 -0400 +Subject: [PATCH] Ticket 49380 - Add CI test + +Description: Add test to verify invalid agreement is rejected, and it + does not cause a crash + +https://pagure.io/389-ds-base/issue/49380 + +Reviewed by: spichugi(Thanks!) + +(cherry picked from commit 02d76b61489f105f81d72d4f3848e2444463289b) +--- + .../tests/suites/replication/acceptance_test.py | 43 ++++++++++++++++++++++ + 1 file changed, 43 insertions(+) + +diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py +index e6f2ef7..2f8b180 100644 +--- a/dirsrvtests/tests/suites/replication/acceptance_test.py ++++ b/dirsrvtests/tests/suites/replication/acceptance_test.py +@@ -3,6 +3,12 @@ from lib389.tasks import * + from lib389.utils import * + from lib389.topologies import topology_m4 as topo + ++from lib389._constants import (BACKEND_NAME, DEFAULT_SUFFIX, LOG_REPLICA, REPLICA_RUV_FILTER, ++ ReplicaRole, REPLICATION_BIND_DN, REPLICATION_BIND_PW, ++ REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, defaultProperties, ++ RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, ++ DN_DM, PASSWORD, LOG_DEFAULT, RA_ENABLED, RA_SCHEDULE) ++ + TEST_ENTRY_NAME = 'mmrepl_test' + TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) + +@@ -193,6 +199,43 @@ def test_modrdn_entry(topo, test_entry, delold): + topo.ms["master1"].delete_s(newrdn_dn) + + ++def test_invalid_agmt(topo_m4): ++ """Test adding that an invalid agreement is properly rejected and does not crash the server ++ ++ :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b ++ :setup: MMR with four masters ++ :steps: ++ 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) ++ 2. Verify the server is still running ++ :expectedresults: ++ 1. Invalid repl agreement should be rejected ++ 2. Server should be still running ++ """ ++ m1 = topo_m4.ms["master1"] ++ ++ # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) ++ AGMT_DN = 'cn=whatever,cn=replica,cn="dc=example,dc=com",cn=mapping tree,cn=config' ++ try: ++ invalid_props = {RA_ENABLED: 'True', # Invalid value ++ RA_SCHEDULE: '0001-2359 0123456'} ++ m1.agreement.create(suffix=DEFAULT_SUFFIX, host='localhost', port=389, properties=invalid_props) ++ except ldap.UNWILLING_TO_PERFORM: ++ m1.log.info('Invalid repl agreement correctly rejected') ++ except ldap.LDAPError as e: ++ m1.log.fatal('Got unexpected error adding invalid agreement: ' + str(e)) ++ assert False ++ else: ++ m1.log.fatal('Invalid agreement was incorrectly accepted by the server') ++ assert False ++ ++ # Verify the server is still running ++ try: ++ m1.simple_bind_s(DN_DM, PASSWORD) ++ except ldap.LDAPError as e: ++ m1.log.fatal('Failed to bind: ' + str(e)) ++ assert False ++ ++ + if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode +-- +2.9.5 + diff --git a/SOURCES/0069-Ticket-49327-password-expired-control-not-sent-durin.patch b/SOURCES/0069-Ticket-49327-password-expired-control-not-sent-durin.patch new file mode 100644 index 0000000..e4b182a --- /dev/null +++ b/SOURCES/0069-Ticket-49327-password-expired-control-not-sent-durin.patch @@ -0,0 +1,610 @@ +From 3ab8a78cd27cc8d2ad7a2b322a4fe73c43a3db08 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 14 Sep 2017 15:47:53 -0400 +Subject: [PATCH] Ticket 49327 - password expired control not sent during grace + logins + +Bug Description: When a password is expired, but within the grace login limit, + we should still send the expired control even though we allowed + the bind. + +Fix Description: new_new_passwd() returned a variety of result codes that required + the caller to set the response controls. This was hard to read and + process. Instead I added all the controls inside the function, and + return success or failure to the caller. + +https://pagure.io/389-ds-base/issue/49327 + +Reviewed by: gparente & tbordaz (Thanks!!) + +(cherry picked from commit fbd32c4e27af9f331ee3a42dec944895a6efe2ad) +--- + ldap/servers/plugins/replication/repl_extop.c | 5 +- + ldap/servers/slapd/bind.c | 18 +- + ldap/servers/slapd/proto-slap.h | 3 +- + ldap/servers/slapd/pw_mgmt.c | 453 +++++++++++++------------- + ldap/servers/slapd/saslbind.c | 20 +- + 5 files changed, 238 insertions(+), 261 deletions(-) + +diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c +index a39d918..96ad7dd 100644 +--- a/ldap/servers/plugins/replication/repl_extop.c ++++ b/ldap/servers/plugins/replication/repl_extop.c +@@ -1173,8 +1173,9 @@ send_response: + * On the supplier, we need to close the connection so + * that the RA will restart a new session in a clear state + */ +- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - " +- "already acquired replica: disconnect conn=%d\n", connid); ++ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, ++ "multimaster_extop_StartNSDS50ReplicationRequest - " ++ "already acquired replica: disconnect conn=%" PRIu64 "\n", connid); + slapi_disconnect_server(conn); + + } +diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c +index d6c7668..e6cad7f 100644 +--- a/ldap/servers/slapd/bind.c ++++ b/ldap/servers/slapd/bind.c +@@ -673,8 +673,7 @@ do_bind( Slapi_PBlock *pb ) + slapi_entry_free(referral); + goto free_and_return; + } else if (auto_bind || rc == SLAPI_BIND_SUCCESS || rc == SLAPI_BIND_ANONYMOUS) { +- long t; +- char* authtype = NULL; ++ char *authtype = NULL; + /* rc is SLAPI_BIND_SUCCESS or SLAPI_BIND_ANONYMOUS */ + if(auto_bind) { + rc = SLAPI_BIND_SUCCESS; +@@ -761,19 +760,8 @@ do_bind( Slapi_PBlock *pb ) + slapi_ch_strdup(slapi_sdn_get_ndn(sdn)), + NULL, NULL, NULL, bind_target_entry); + if (!slapi_be_is_flag_set(be, SLAPI_BE_FLAG_REMOTE_DATA)) { +- /* check if need new password before sending +- the bind success result */ +- myrc = need_new_pw(pb, &t, bind_target_entry, pw_response_requested); +- switch (myrc) { +- case 1: +- (void)slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRED, 0); +- break; +- case 2: +- (void)slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRING, t); +- break; +- default: +- break; +- } ++ /* check if need new password before sending the bind success result */ ++ myrc = need_new_pw(pb, bind_target_entry, pw_response_requested); + } + } + if (auth_response_requested) { +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index 9696ead..0ba61d7 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -972,7 +972,7 @@ int plugin_call_acl_verify_syntax ( Slapi_PBlock *pb, Slapi_Entry *e, char **err + * pw_mgmt.c + */ + void pw_init( void ); +-int need_new_pw( Slapi_PBlock *pb, long *t, Slapi_Entry *e, int pwresponse_req ); ++int need_new_pw(Slapi_PBlock *pb, Slapi_Entry *e, int pwresponse_req); + int update_pw_info( Slapi_PBlock *pb , char *old_pw ); + int check_pw_syntax( Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals, + char **old_pw, Slapi_Entry *e, int mod_op ); +@@ -982,7 +982,6 @@ void get_old_pw( Slapi_PBlock *pb, const Slapi_DN *sdn, char **old_pw); + int check_account_lock( Slapi_PBlock *pb, Slapi_Entry * bind_target_entry, int pwresponse_req, int account_inactivation_only /*no wire/no pw policy*/); + int check_pw_minage( Slapi_PBlock *pb, const Slapi_DN *sdn, struct berval **vals) ; + void add_password_attrs( Slapi_PBlock *pb, Operation *op, Slapi_Entry *e ); +- + int add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e); + + /* +diff --git a/ldap/servers/slapd/pw_mgmt.c b/ldap/servers/slapd/pw_mgmt.c +index 7252c08..b06e3f1 100644 +--- a/ldap/servers/slapd/pw_mgmt.c ++++ b/ldap/servers/slapd/pw_mgmt.c +@@ -22,234 +22,239 @@ + /* prototypes */ + /****************************************************************************/ + +-/* need_new_pw() is called when non rootdn bind operation succeeds with authentication */ ++/* ++ * need_new_pw() is called when non rootdn bind operation succeeds with authentication ++ * ++ * Return 0 - password is okay ++ * Return -1 - password is expired, abort bind ++ */ + int +-need_new_pw( Slapi_PBlock *pb, long *t, Slapi_Entry *e, int pwresponse_req ) ++need_new_pw(Slapi_PBlock *pb, Slapi_Entry *e, int pwresponse_req) + { +- time_t cur_time, pw_exp_date; +- Slapi_Mods smods; +- double diff_t = 0; +- char *cur_time_str = NULL; +- char *passwordExpirationTime = NULL; +- char *timestring; +- char *dn; +- const Slapi_DN *sdn; +- passwdPolicy *pwpolicy = NULL; +- int pwdGraceUserTime = 0; +- char graceUserTime[8]; +- +- if (NULL == e) { +- return (-1); +- } +- slapi_mods_init (&smods, 0); +- sdn = slapi_entry_get_sdn_const( e ); +- dn = slapi_entry_get_ndn( e ); +- pwpolicy = new_passwdPolicy(pb, dn); +- +- /* after the user binds with authentication, clear the retry count */ +- if ( pwpolicy->pw_lockout == 1) +- { +- if(slapi_entry_attr_get_int( e, "passwordRetryCount") > 0) +- { +- slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordRetryCount", "0"); +- } +- } +- +- cur_time = current_time(); +- +- /* get passwordExpirationTime attribute */ +- passwordExpirationTime= slapi_entry_attr_get_charptr(e, "passwordExpirationTime"); +- +- if (passwordExpirationTime == NULL) +- { +- /* password expiration date is not set. +- * This is ok for data that has been loaded via ldif2ldbm +- * Set expiration time if needed, +- * don't do further checking and return 0 */ +- if (pwpolicy->pw_exp == 1) { +- pw_exp_date = time_plus_sec(cur_time, pwpolicy->pw_maxage); +- +- timestring = format_genTime (pw_exp_date); +- slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpirationTime", timestring); +- slapi_ch_free_string(×tring); +- slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpWarned", "0"); +- +- pw_apply_mods(sdn, &smods); +- } else if (pwpolicy->pw_lockout == 1) { +- pw_apply_mods(sdn, &smods); +- } +- slapi_mods_done(&smods); +- return ( 0 ); +- } +- +- pw_exp_date = parse_genTime(passwordExpirationTime); +- +- slapi_ch_free_string(&passwordExpirationTime); +- +- /* Check if password has been reset */ +- if ( pw_exp_date == NO_TIME ) { +- +- /* check if changing password is required */ +- if ( pwpolicy->pw_must_change ) { +- /* set c_needpw for this connection to be true. this client +- now can only change its own password */ +- pb->pb_conn->c_needpw = 1; +- *t=0; +- /* We need to include "changeafterreset" error in +- * passwordpolicy response control. So, we will not be +- * done here. We remember this scenario by (c_needpw=1) +- * and check it before sending the control from various +- * places. We will also add LDAP_CONTROL_PWEXPIRED control +- * as the return value used to be (1). +- */ +- goto skip; +- } +- /* Mark that first login occured */ +- pw_exp_date = NOT_FIRST_TIME; +- timestring = format_genTime(pw_exp_date); +- slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpirationTime", timestring); +- slapi_ch_free_string(×tring); +- } ++ time_t cur_time, pw_exp_date; ++ Slapi_Mods smods; ++ double diff_t = 0; ++ char *cur_time_str = NULL; ++ char *passwordExpirationTime = NULL; ++ char *timestring; ++ char *dn; ++ const Slapi_DN *sdn; ++ passwdPolicy *pwpolicy = NULL; ++ int pwdGraceUserTime = 0; ++ char graceUserTime[16] = {0}; ++ Connection *pb_conn = NULL; ++ long t; ++ ++ if (NULL == e) { ++ return (-1); ++ } ++ slapi_mods_init(&smods, 0); ++ sdn = slapi_entry_get_sdn_const(e); ++ dn = slapi_entry_get_ndn(e); ++ pwpolicy = new_passwdPolicy(pb, dn); ++ ++ /* after the user binds with authentication, clear the retry count */ ++ if (pwpolicy->pw_lockout == 1) { ++ if (slapi_entry_attr_get_int(e, "passwordRetryCount") > 0) { ++ slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordRetryCount", "0"); ++ } ++ } ++ ++ cur_time = current_time(); ++ ++ /* get passwordExpirationTime attribute */ ++ passwordExpirationTime = slapi_entry_attr_get_charptr(e, "passwordExpirationTime"); ++ ++ if (passwordExpirationTime == NULL) { ++ /* password expiration date is not set. ++ * This is ok for data that has been loaded via ldif2ldbm ++ * Set expiration time if needed, ++ * don't do further checking and return 0 */ ++ if (pwpolicy->pw_exp == 1) { ++ pw_exp_date = time_plus_sec(cur_time, pwpolicy->pw_maxage); ++ ++ timestring = format_genTime(pw_exp_date); ++ slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpirationTime", timestring); ++ slapi_ch_free_string(×tring); ++ slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpWarned", "0"); ++ ++ pw_apply_mods(sdn, &smods); ++ } else if (pwpolicy->pw_lockout == 1) { ++ pw_apply_mods(sdn, &smods); ++ } ++ slapi_mods_done(&smods); ++ return (0); ++ } ++ ++ pw_exp_date = parse_genTime(passwordExpirationTime); ++ ++ slapi_ch_free_string(&passwordExpirationTime); ++ ++ slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); ++ ++ /* Check if password has been reset */ ++ if (pw_exp_date == NO_TIME) { ++ ++ /* check if changing password is required */ ++ if (pwpolicy->pw_must_change) { ++ /* set c_needpw for this connection to be true. this client ++ now can only change its own password */ ++ pb_conn->c_needpw = 1; ++ t = 0; ++ /* We need to include "changeafterreset" error in ++ * passwordpolicy response control. So, we will not be ++ * done here. We remember this scenario by (c_needpw=1) ++ * and check it before sending the control from various ++ * places. We will also add LDAP_CONTROL_PWEXPIRED control ++ * as the return value used to be (1). ++ */ ++ goto skip; ++ } ++ /* Mark that first login occured */ ++ pw_exp_date = NOT_FIRST_TIME; ++ timestring = format_genTime(pw_exp_date); ++ slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpirationTime", timestring); ++ slapi_ch_free_string(×tring); ++ } + + skip: +- /* if password never expires, don't need to go on; return 0 */ +- if ( pwpolicy->pw_exp == 0 ) { +- /* check for "changeafterreset" condition */ +- if (pb->pb_conn->c_needpw == 1) { +- if (pwresponse_req) { +- slapi_pwpolicy_make_response_control ( pb, -1, -1, LDAP_PWPOLICY_CHGAFTERRESET ); +- } +- slapi_add_pwd_control ( pb, LDAP_CONTROL_PWEXPIRED, 0); +- } +- pw_apply_mods(sdn, &smods); +- slapi_mods_done(&smods); +- return ( 0 ); +- } +- +- /* check if password expired. If so, abort bind. */ +- cur_time_str = format_genTime ( cur_time ); +- if ((pw_exp_date != NO_TIME) && (pw_exp_date != NOT_FIRST_TIME) && +- (diff_t = difftime(pw_exp_date, parse_genTime(cur_time_str))) <= 0) { +- slapi_ch_free_string(&cur_time_str); /* only need this above */ +- /* password has expired. Check the value of +- * passwordGraceUserTime and compare it +- * against the value of passwordGraceLimit */ +- pwdGraceUserTime = slapi_entry_attr_get_int( e, "passwordGraceUserTime"); +- if ( pwpolicy->pw_gracelimit > pwdGraceUserTime ) { +- pwdGraceUserTime++; +- sprintf ( graceUserTime, "%d", pwdGraceUserTime ); +- slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, +- "passwordGraceUserTime", graceUserTime); +- pw_apply_mods(sdn, &smods); +- slapi_mods_done(&smods); +- if (pwresponse_req) { +- /* check for "changeafterreset" condition */ +- if (pb->pb_conn->c_needpw == 1) { +- slapi_pwpolicy_make_response_control( pb, -1, +- ((pwpolicy->pw_gracelimit) - pwdGraceUserTime), +- LDAP_PWPOLICY_CHGAFTERRESET); +- } else { +- slapi_pwpolicy_make_response_control( pb, -1, +- ((pwpolicy->pw_gracelimit) - pwdGraceUserTime), +- -1); +- } +- } +- +- if (pb->pb_conn->c_needpw == 1) { +- slapi_add_pwd_control ( pb, LDAP_CONTROL_PWEXPIRED, 0); +- } +- return ( 0 ); +- } +- +- /* password expired and user exceeded limit of grace attemps. +- * Send result and also the control */ +- slapi_add_pwd_control ( pb, LDAP_CONTROL_PWEXPIRED, 0); +- if (pwresponse_req) { +- slapi_pwpolicy_make_response_control ( pb, -1, -1, LDAP_PWPOLICY_PWDEXPIRED ); +- } +- slapi_send_ldap_result ( pb, LDAP_INVALID_CREDENTIALS, NULL, +- "password expired!", 0, NULL ); +- +- /* abort bind */ +- /* pass pb to do_unbind(). pb->pb_op->o_opid and +- pb->pb_op->o_tag are not right but I don't see +- do_unbind() checking for those. We might need to +- create a pb for unbind operation. Also do_unbind calls +- pre and post ops. Maybe we don't want to call them */ +- if (pb->pb_conn && (LDAP_VERSION2 == pb->pb_conn->c_ldapversion)) { +- /* We close the connection only with LDAPv2 connections */ +- disconnect_server( pb->pb_conn, pb->pb_op->o_connid, +- pb->pb_op->o_opid, SLAPD_DISCONNECT_UNBIND, 0); +- } +- /* Apply current modifications */ +- pw_apply_mods(sdn, &smods); +- slapi_mods_done(&smods); +- return (-1); +- } +- slapi_ch_free((void **) &cur_time_str ); +- +- /* check if password is going to expire within "passwordWarning" */ +- /* Note that if pw_exp_date is NO_TIME or NOT_FIRST_TIME, +- * we must send warning first and this changes the expiration time. +- * This is done just below since diff_t is 0 +- */ +- if ( diff_t <= pwpolicy->pw_warning ) { +- int pw_exp_warned = 0; +- +- pw_exp_warned = slapi_entry_attr_get_int( e, "passwordExpWarned"); +- if ( !pw_exp_warned ){ +- /* first time send out a warning */ +- /* reset the expiration time to current + warning time +- * and set passwordExpWarned to true +- */ +- if (pb->pb_conn->c_needpw != 1) { +- pw_exp_date = time_plus_sec(cur_time, pwpolicy->pw_warning); +- } +- +- timestring = format_genTime(pw_exp_date); +- slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpirationTime", timestring); +- slapi_ch_free_string(×tring); +- +- slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpWarned", "1"); +- +- *t = pwpolicy->pw_warning; +- +- } else { +- *t = (long)diff_t; /* jcm: had to cast double to long */ +- } +- +- pw_apply_mods(sdn, &smods); +- slapi_mods_done(&smods); +- if (pwresponse_req) { +- /* check for "changeafterreset" condition */ +- if (pb->pb_conn->c_needpw == 1) { +- slapi_pwpolicy_make_response_control( pb, *t, -1, +- LDAP_PWPOLICY_CHGAFTERRESET); +- } else { +- slapi_pwpolicy_make_response_control( pb, *t, -1, +- -1); +- } +- } +- +- if (pb->pb_conn->c_needpw == 1) { +- slapi_add_pwd_control ( pb, LDAP_CONTROL_PWEXPIRED, 0); +- } +- return (2); +- } else { +- if (pwresponse_req && pwpolicy->pw_send_expiring) { +- slapi_pwpolicy_make_response_control( pb, diff_t, -1, -1); +- slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRING, diff_t); +- } +- } +- +- pw_apply_mods(sdn, &smods); +- slapi_mods_done(&smods); +- /* Leftover from "changeafterreset" condition */ +- if (pb->pb_conn->c_needpw == 1) { +- slapi_add_pwd_control ( pb, LDAP_CONTROL_PWEXPIRED, 0); +- } +- /* passes checking, return 0 */ +- return( 0 ); ++ /* if password never expires, don't need to go on; return 0 */ ++ if (pwpolicy->pw_exp == 0) { ++ /* check for "changeafterreset" condition */ ++ if (pb_conn->c_needpw == 1) { ++ if (pwresponse_req) { ++ slapi_pwpolicy_make_response_control(pb, -1, -1, LDAP_PWPOLICY_CHGAFTERRESET); ++ } ++ slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRED, 0); ++ } ++ pw_apply_mods(sdn, &smods); ++ slapi_mods_done(&smods); ++ return (0); ++ } ++ ++ /* check if password expired. If so, abort bind. */ ++ cur_time_str = format_genTime(cur_time); ++ if ((pw_exp_date != NO_TIME) && (pw_exp_date != NOT_FIRST_TIME) && ++ (diff_t = difftime(pw_exp_date, parse_genTime(cur_time_str))) <= 0) { ++ slapi_ch_free_string(&cur_time_str); /* only need this above */ ++ /* password has expired. Check the value of ++ * passwordGraceUserTime and compare it ++ * against the value of passwordGraceLimit */ ++ pwdGraceUserTime = slapi_entry_attr_get_int(e, "passwordGraceUserTime"); ++ if (pwpolicy->pw_gracelimit > pwdGraceUserTime) { ++ pwdGraceUserTime++; ++ sprintf(graceUserTime, "%d", pwdGraceUserTime); ++ slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, ++ "passwordGraceUserTime", graceUserTime); ++ pw_apply_mods(sdn, &smods); ++ slapi_mods_done(&smods); ++ if (pwresponse_req) { ++ /* check for "changeafterreset" condition */ ++ if (pb_conn->c_needpw == 1) { ++ slapi_pwpolicy_make_response_control(pb, -1, ++ ((pwpolicy->pw_gracelimit) - pwdGraceUserTime), ++ LDAP_PWPOLICY_CHGAFTERRESET); ++ } else { ++ slapi_pwpolicy_make_response_control(pb, -1, ++ ((pwpolicy->pw_gracelimit) - pwdGraceUserTime), ++ -1); ++ } ++ } ++ slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRED, 0); ++ return (0); ++ } ++ ++ /* password expired and user exceeded limit of grace attemps. ++ * Send result and also the control */ ++ slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRED, 0); ++ if (pwresponse_req) { ++ slapi_pwpolicy_make_response_control(pb, -1, -1, LDAP_PWPOLICY_PWDEXPIRED); ++ } ++ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, ++ "password expired!", 0, NULL); ++ ++ /* abort bind */ ++ /* pass pb to do_unbind(). pb->pb_op->o_opid and ++ pb->pb_op->o_tag are not right but I don't see ++ do_unbind() checking for those. We might need to ++ create a pb for unbind operation. Also do_unbind calls ++ pre and post ops. Maybe we don't want to call them */ ++ if (pb_conn && (LDAP_VERSION2 == pb_conn->c_ldapversion)) { ++ Operation *pb_op = NULL; ++ slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op); ++ /* We close the connection only with LDAPv2 connections */ ++ disconnect_server(pb_conn, pb_op->o_connid, ++ pb_op->o_opid, SLAPD_DISCONNECT_UNBIND, 0); ++ } ++ /* Apply current modifications */ ++ pw_apply_mods(sdn, &smods); ++ slapi_mods_done(&smods); ++ return (-1); ++ } ++ slapi_ch_free((void **)&cur_time_str); ++ ++ /* check if password is going to expire within "passwordWarning" */ ++ /* Note that if pw_exp_date is NO_TIME or NOT_FIRST_TIME, ++ * we must send warning first and this changes the expiration time. ++ * This is done just below since diff_t is 0 ++ */ ++ if (diff_t <= pwpolicy->pw_warning) { ++ int pw_exp_warned = 0; ++ ++ pw_exp_warned = slapi_entry_attr_get_int(e, "passwordExpWarned"); ++ if (!pw_exp_warned) { ++ /* first time send out a warning */ ++ /* reset the expiration time to current + warning time ++ * and set passwordExpWarned to true ++ */ ++ if (pb_conn->c_needpw != 1) { ++ pw_exp_date = time_plus_sec(cur_time, pwpolicy->pw_warning); ++ } ++ ++ timestring = format_genTime(pw_exp_date); ++ slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpirationTime", timestring); ++ slapi_ch_free_string(×tring); ++ ++ slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "passwordExpWarned", "1"); ++ ++ t = pwpolicy->pw_warning; ++ ++ } else { ++ t = (long)diff_t; /* jcm: had to cast double to long */ ++ } ++ ++ pw_apply_mods(sdn, &smods); ++ slapi_mods_done(&smods); ++ if (pwresponse_req) { ++ /* check for "changeafterreset" condition */ ++ if (pb_conn->c_needpw == 1) { ++ slapi_pwpolicy_make_response_control(pb, t, -1, LDAP_PWPOLICY_CHGAFTERRESET); ++ } else { ++ slapi_pwpolicy_make_response_control(pb, t, -1, -1); ++ } ++ } ++ ++ if (pb_conn->c_needpw == 1) { ++ slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRED, 0); ++ } else { ++ slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRING, t); ++ } ++ return (0); ++ } else { ++ if (pwresponse_req && pwpolicy->pw_send_expiring) { ++ slapi_pwpolicy_make_response_control(pb, diff_t, -1, -1); ++ slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRING, diff_t); ++ } ++ } ++ ++ pw_apply_mods(sdn, &smods); ++ slapi_mods_done(&smods); ++ /* Leftover from "changeafterreset" condition */ ++ if (pb_conn->c_needpw == 1) { ++ slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRED, 0); ++ } ++ /* passes checking, return 0 */ ++ return (0); + } + + /* Called once from main */ +diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c +index dd0c4fb..134f5aa 100644 +--- a/ldap/servers/slapd/saslbind.c ++++ b/ldap/servers/slapd/saslbind.c +@@ -859,7 +859,6 @@ ids_sasl_mech_supported(Slapi_PBlock *pb, const char *mech) + void ids_sasl_check_bind(Slapi_PBlock *pb) + { + int rc, isroot; +- long t; + sasl_conn_t *sasl_conn; + struct propctx *propctx; + sasl_ssf_t *ssfp; +@@ -1096,23 +1095,8 @@ sasl_check_result: + set_db_default_result_handlers(pb); + + /* check password expiry */ +- if (!isroot) { +- int pwrc; +- +- pwrc = need_new_pw(pb, &t, bind_target_entry, pwresponse_requested); +- +- switch (pwrc) { +- case 1: +- slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRED, 0); +- break; +- case 2: +- slapi_add_pwd_control(pb, LDAP_CONTROL_PWEXPIRING, t); +- break; +- case -1: +- goto out; +- default: +- break; +- } ++ if (!isroot && need_new_pw(pb, bind_target_entry, pwresponse_requested) == -1) { ++ goto out; + } + + /* attach the sasl data */ +-- +2.9.5 + diff --git a/SOURCES/0070-Ticket-49379-Allowed-sasl-mapping-requires-restart.patch b/SOURCES/0070-Ticket-49379-Allowed-sasl-mapping-requires-restart.patch new file mode 100644 index 0000000..c2e230b --- /dev/null +++ b/SOURCES/0070-Ticket-49379-Allowed-sasl-mapping-requires-restart.patch @@ -0,0 +1,439 @@ +From 8a7b47602acc910d2f64439b81af3299b60359c8 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 18 Sep 2017 10:35:20 -0400 +Subject: [PATCH] Ticket 49379 - Allowed sasl mapping requires restart + +Bug Description: If allowed sasl mechanisms are configured, and the server is + restarted, trying to add new sasl mechanisms does not get applied + until the server is restarted again. [1] + + We were also overwriting memory when we stripped the commas from + the allowed machanism list. THis lead to the allowed mechanisms + to get truncated,and permanently lose certain mechs. [2] + + A crash with PLAIN sasl mechanism was also found. [3] + +Fix Description: To address allowed sasl mechs, we no longer explicitly the mechanisms + during the sasl_init at server startup. Instead we check the allowed + list ourselves during a bind. [1] + + When setting the allowed sasl mechs, make a copy of the value to + apply the changes to(removing coamms), and do not change the original + value as it's still being used. [2] + + The crash when using sasl PLAIN was due to unlocking a rwlock that + was not locked. [3] + +https://pagure.io/389-ds-base/issue/49379 + +Reviewed by: tbordaz(Thanks!) + +(cherry picked from commit c78f41db31752a99aadd6abcbf7a1d852a8e7931) +--- + dirsrvtests/tests/suites/sasl/allowed_mechs.py | 114 ++++++++++++++++++++++-- + dirsrvtests/tests/suites/sasl/plain.py | 10 ++- + ldap/servers/slapd/libglobs.c | 23 ++--- + ldap/servers/slapd/saslbind.c | 116 +++++++++++++------------ + 4 files changed, 187 insertions(+), 76 deletions(-) + +diff --git a/dirsrvtests/tests/suites/sasl/allowed_mechs.py b/dirsrvtests/tests/suites/sasl/allowed_mechs.py +index 7958db4..5b1b92c 100644 +--- a/dirsrvtests/tests/suites/sasl/allowed_mechs.py ++++ b/dirsrvtests/tests/suites/sasl/allowed_mechs.py +@@ -8,45 +8,141 @@ + # + + import pytest +-import ldap +- +-import time +- ++import os + from lib389.topologies import topology_st + ++ + def test_sasl_allowed_mechs(topology_st): ++ """Test the alloweed sasl mechanism feature ++ ++ :ID: ab7d9f86-8cfe-48c3-8baa-739e599f006a ++ :feature: Allowed sasl mechanisms ++ :steps: 1. Get the default list of mechanisms ++ 2. Set allowed mechanism PLAIN, and verify it's correctly listed ++ 3. Restart server, and verify list is still correct ++ 4. Test EXTERNAL is properly listed ++ 5. Add GSSAPI to the existing list, and verify it's correctly listed ++ 6. Restart server and verify list is still correct ++ 7. Add ANONYMOUS to the existing list, and veirfy it's correctly listed ++ 8. Restart server and verify list is still correct ++ 9. Remove GSSAPI and verify it's correctly listed ++ 10. Restart server and verify list is still correct ++ 11. Reset allowed list to nothing, verify "all" the mechanisms are returned ++ 12. Restart server and verify list is still correct ++ ++ :expectedresults: The supported mechanisms supported what is set for the allowed ++ mechanisms ++ """ + standalone = topology_st.standalone + + # Get the supported mechs. This should contain PLAIN, GSSAPI, EXTERNAL at least ++ standalone.log.info("Test we have some of the default mechanisms") + orig_mechs = standalone.rootdse.supported_sasl() + print(orig_mechs) + assert('GSSAPI' in orig_mechs) + assert('PLAIN' in orig_mechs) + assert('EXTERNAL' in orig_mechs) + +- # Now edit the supported mechs. CHeck them again. ++ # Now edit the supported mechs. Check them again. ++ standalone.log.info("Edit mechanisms to allow just PLAIN") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN') ++ limit_mechs = standalone.rootdse.supported_sasl() ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) # Should always be in the allowed list, even if not set. ++ assert('GSSAPI' not in limit_mechs) # Should not be there! + ++ # Restart the server a few times and make sure nothing changes ++ standalone.log.info("Restart server and make sure we still have correct allowed mechs") ++ standalone.restart() ++ standalone.restart() + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) +- # Should always be in the allowed list, even if not set. + assert('EXTERNAL' in limit_mechs) +- # Should not be there! + assert('GSSAPI' not in limit_mechs) + ++ # Set EXTERNAL, even though its always supported ++ standalone.log.info("Edit mechanisms to allow just PLAIN and EXTERNAL") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, EXTERNAL') ++ limit_mechs = standalone.rootdse.supported_sasl() ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) ++ assert('GSSAPI' not in limit_mechs) ++ ++ # Now edit the supported mechs. Check them again. ++ standalone.log.info("Edit mechanisms to allow just PLAIN and GSSAPI") ++ standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, GSSAPI') ++ limit_mechs = standalone.rootdse.supported_sasl() ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) ++ assert('GSSAPI' in limit_mechs) ++ assert(len(limit_mechs) == 3) ++ ++ # Restart server twice and make sure the allowed list is the same ++ standalone.restart() ++ standalone.restart() # For ticket 49379 (test double restart) ++ limit_mechs = standalone.rootdse.supported_sasl() ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) ++ assert('GSSAPI' in limit_mechs) ++ assert(len(limit_mechs) == 3) ++ ++ # Add ANONYMOUS to the supported mechs and test again. ++ standalone.log.info("Edit mechanisms to allow just PLAIN, GSSAPI, and ANONYMOUS") ++ standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, GSSAPI, ANONYMOUS') ++ limit_mechs = standalone.rootdse.supported_sasl() ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) ++ assert('GSSAPI' in limit_mechs) ++ assert('ANONYMOUS' in limit_mechs) ++ assert(len(limit_mechs) == 4) ++ ++ # Restart server and make sure the allowed list is the same ++ standalone.restart() ++ standalone.restart() # For ticket 49379 (test double restart) ++ limit_mechs = standalone.rootdse.supported_sasl() ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) ++ assert('GSSAPI' in limit_mechs) ++ assert('ANONYMOUS' in limit_mechs) ++ assert(len(limit_mechs) == 4) + ++ # Remove GSSAPI ++ standalone.log.info("Edit mechanisms to allow just PLAIN and ANONYMOUS") ++ standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, ANONYMOUS') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) +- # Should not be there! + assert('GSSAPI' not in limit_mechs) ++ assert('ANONYMOUS' in limit_mechs) ++ assert(len(limit_mechs) == 3) ++ ++ # Restart server and make sure the allowed list is the same ++ standalone.restart() ++ limit_mechs = standalone.rootdse.supported_sasl() ++ assert('PLAIN' in limit_mechs) ++ assert('EXTERNAL' in limit_mechs) ++ assert('GSSAPI' not in limit_mechs) ++ assert('ANONYMOUS' in limit_mechs) ++ assert(len(limit_mechs) == 3) + + # Do a config reset ++ standalone.log.info("Reset allowed mechaisms") + standalone.config.reset('nsslapd-allowed-sasl-mechanisms') + + # check the supported list is the same as our first check. ++ standalone.log.info("Check that we have the original set of mechanisms") + final_mechs = standalone.rootdse.supported_sasl() +- print(final_mechs) + assert(set(final_mechs) == set(orig_mechs)) + ++ # Check it after a restart ++ standalone.log.info("Check that we have the original set of mechanisms after a restart") ++ standalone.restart() ++ final_mechs = standalone.rootdse.supported_sasl() ++ assert(set(final_mechs) == set(orig_mechs)) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) +diff --git a/dirsrvtests/tests/suites/sasl/plain.py b/dirsrvtests/tests/suites/sasl/plain.py +index 91ccb02..6bf39a8 100644 +--- a/dirsrvtests/tests/suites/sasl/plain.py ++++ b/dirsrvtests/tests/suites/sasl/plain.py +@@ -15,9 +15,11 @@ from lib389.topologies import topology_st + from lib389.utils import * + from lib389.sasl import PlainSASL + from lib389.idm.services import ServiceAccounts ++from lib389._constants import (SECUREPORT_STANDALONE1, DEFAULT_SUFFIX) + + log = logging.getLogger(__name__) + ++ + def test_sasl_plain(topology_st): + + standalone = topology_st.standalone +@@ -38,7 +40,7 @@ def test_sasl_plain(topology_st): + standalone.rsa.create() + # Set the secure port and nsslapd-security + # Could this fail with selinux? +- standalone.config.set('nsslapd-secureport', '%s' % SECUREPORT_STANDALONE1 ) ++ standalone.config.set('nsslapd-secureport', '%s' % SECUREPORT_STANDALONE1) + standalone.config.set('nsslapd-security', 'on') + # Do we need to restart to allow starttls? + standalone.restart() +@@ -65,12 +67,14 @@ def test_sasl_plain(topology_st): + # I can not solve. I think it's leaking state across connections in start_tls_s? + + # Check that it works with TLS +- conn = standalone.openConnection(saslmethod='PLAIN', sasltoken=auth_tokens, starttls=True, connOnly=True, certdir=standalone.get_cert_dir(), reqcert=ldap.OPT_X_TLS_NEVER) ++ conn = standalone.openConnection(saslmethod='PLAIN', sasltoken=auth_tokens, starttls=True, connOnly=True, ++ certdir=standalone.get_cert_dir(), reqcert=ldap.OPT_X_TLS_NEVER) + conn.close() + + # Check that it correct fails our bind if we don't have the password. + auth_tokens = PlainSASL("dn:%s" % sa.dn, 'password-wrong') + with pytest.raises(ldap.INVALID_CREDENTIALS): +- standalone.openConnection(saslmethod='PLAIN', sasltoken=auth_tokens, starttls=False, connOnly=True, certdir=standalone.get_cert_dir(), reqcert=ldap.OPT_X_TLS_NEVER) ++ standalone.openConnection(saslmethod='PLAIN', sasltoken=auth_tokens, starttls=True, connOnly=True, ++ certdir=standalone.get_cert_dir(), reqcert=ldap.OPT_X_TLS_NEVER) + + # Done! +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index bb51827..2fb4bab 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -7137,22 +7137,25 @@ config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, + + /* During a reset, the value is "", so we have to handle this case. */ + if (strcmp(value, "") != 0) { +- /* cyrus sasl doesn't like comma separated lists */ +- remove_commas(value); ++ char *nval = slapi_ch_strdup(value); + +- if(invalid_sasl_mech(value)){ +- slapi_log_err(SLAPI_LOG_ERR,"config_set_allowed_sasl_mechs", +- "Invalid value/character for sasl mechanism (%s). Use ASCII " +- "characters, upto 20 characters, that are upper-case letters, " +- "digits, hyphens, or underscores\n", value); ++ /* cyrus sasl doesn't like comma separated lists */ ++ remove_commas(nval); ++ ++ if (invalid_sasl_mech(nval)) { ++ slapi_log_err(SLAPI_LOG_ERR, "config_set_allowed_sasl_mechs", ++ "Invalid value/character for sasl mechanism (%s). Use ASCII " ++ "characters, upto 20 characters, that are upper-case letters, " ++ "digits, hyphens, or underscores\n", ++ nval); ++ slapi_ch_free_string(&nval); + return LDAP_UNWILLING_TO_PERFORM; + } +- + CFG_LOCK_WRITE(slapdFrontendConfig); + slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs); + slapi_ch_array_free(slapdFrontendConfig->allowed_sasl_mechs_array); +- slapdFrontendConfig->allowed_sasl_mechs = slapi_ch_strdup(value); +- slapdFrontendConfig->allowed_sasl_mechs_array = slapi_str2charray_ext(value, " ", 0); ++ slapdFrontendConfig->allowed_sasl_mechs = nval; ++ slapdFrontendConfig->allowed_sasl_mechs_array = slapi_str2charray_ext(nval, " ", 0); + CFG_UNLOCK_WRITE(slapdFrontendConfig); + } else { + /* If this value is "", we need to set the list to *all* possible mechs */ +diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c +index 134f5aa..03e2a97 100644 +--- a/ldap/servers/slapd/saslbind.c ++++ b/ldap/servers/slapd/saslbind.c +@@ -169,8 +169,6 @@ static int ids_sasl_getopt( + } + } else if (strcasecmp(option, "auxprop_plugin") == 0) { + *result = "iDS"; +- } else if (strcasecmp(option, "mech_list") == 0){ +- *result = config_get_allowed_sasl_mechs(); + } + + if (*result) *len = strlen(*result); +@@ -572,12 +570,8 @@ static int ids_sasl_userdb_checkpass(sasl_conn_t *conn, void *context, const cha + slapi_pblock_set(pb, SLAPI_BIND_METHOD, &method); + /* Feed it to pw_verify_be_dn */ + bind_result = pw_verify_be_dn(pb, &referral); +- /* Now check the result, and unlock be if needed. */ +- if (bind_result == SLAPI_BIND_SUCCESS || bind_result == SLAPI_BIND_ANONYMOUS) { +- Slapi_Backend *be = NULL; +- slapi_pblock_get(pb, SLAPI_BACKEND, &be); +- slapi_be_Unlock(be); +- } else if (bind_result == SLAPI_BIND_REFERRAL) { ++ /* Now check the result. */ ++ if (bind_result == SLAPI_BIND_REFERRAL) { + /* If we have a referral do we ignore it for sasl? */ + slapi_entry_free(referral); + } +@@ -760,22 +754,25 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + sup_ret = slapi_get_supported_saslmechanisms_copy(); + + /* If we have a connection, get the provided list from SASL */ +- if (pb->pb_conn != NULL) { +- sasl_conn = (sasl_conn_t*)pb->pb_conn->c_sasl_conn; +- +- /* sasl library mechanisms are connection dependent */ +- PR_EnterMonitor(pb->pb_conn->c_mutex); +- if (sasl_listmech(sasl_conn, +- NULL, /* username */ +- "", ",", "", +- &str, NULL, NULL) == SASL_OK) { +- slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "sasl library mechs: %s\n", str); +- /* merge into result set */ +- dupstr = slapi_ch_strdup(str); +- others = slapi_str2charray_ext(dupstr, ",", 0 /* don't list duplicate mechanisms */); +- charray_merge(&sup_ret, others, 1); +- charray_free(others); +- slapi_ch_free((void**)&dupstr); ++ if (pb_conn != NULL) { ++ sasl_conn = (sasl_conn_t*)pb_conn->c_sasl_conn; ++ if (sasl_conn != NULL) { ++ /* sasl library mechanisms are connection dependent */ ++ PR_EnterMonitor(pb_conn->c_mutex); ++ if (sasl_listmech(sasl_conn, ++ NULL, /* username */ ++ "", ",", "", ++ &str, NULL, NULL) == SASL_OK) { ++ slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "sasl library mechs: %s\n", str); ++ /* merge into result set */ ++ dupstr = slapi_ch_strdup(str); ++ others = slapi_str2charray_ext(dupstr, ",", 0 /* don't list duplicate mechanisms */); ++ ++ charray_merge(&sup_ret, others, 1); ++ charray_free(others); ++ slapi_ch_free((void**)&dupstr); ++ } ++ PR_ExitMonitor(pb_conn->c_mutex); + } + PR_ExitMonitor(pb->pb_conn->c_mutex); + } +@@ -785,7 +782,7 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + + /* Remove any content that isn't in the allowed list */ + if (config_ret != NULL) { +- /* Get the set of supported mechs in the insection of the two */ ++ /* Get the set of supported mechs in the intersection of the two */ + ret = charray_intersection(sup_ret, config_ret); + charray_free(sup_ret); + charray_free(config_ret); +@@ -816,41 +813,52 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + static int + ids_sasl_mech_supported(Slapi_PBlock *pb, const char *mech) + { +- int i, ret = 0; +- char **mechs; +- char *dupstr; +- const char *str; +- int sasl_result = 0; +- sasl_conn_t *sasl_conn = (sasl_conn_t *)pb->pb_conn->c_sasl_conn; +- +- slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_mech_supported", "=>\n"); +- +- +- /* sasl_listmech is not thread-safe - caller must lock pb_conn */ +- sasl_result = sasl_listmech(sasl_conn, +- NULL, /* username */ +- "", ",", "", +- &str, NULL, NULL); +- if (sasl_result != SASL_OK) { +- return 0; +- } ++ int i, ret = 0; ++ char **mechs; ++ char **allowed_mechs = NULL; ++ char *dupstr; ++ const char *str; ++ int sasl_result = 0; ++ Connection *pb_conn = NULL; ++ ++ slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); ++ sasl_conn_t *sasl_conn = (sasl_conn_t *)pb_conn->c_sasl_conn; ++ slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_mech_supported", "=>\n"); ++ ++ /* sasl_listmech is not thread-safe - caller must lock pb_conn */ ++ sasl_result = sasl_listmech(sasl_conn, ++ NULL, /* username */ ++ "", ",", "", ++ &str, NULL, NULL); ++ if (sasl_result != SASL_OK) { ++ return 0; ++ } + +- dupstr = slapi_ch_strdup(str); +- mechs = slapi_str2charray(dupstr, ","); ++ dupstr = slapi_ch_strdup(str); ++ mechs = slapi_str2charray(dupstr, ","); ++ allowed_mechs = config_get_allowed_sasl_mechs_array(); + +- for (i = 0; mechs[i] != NULL; i++) { +- if (strcasecmp(mech, mechs[i]) == 0) { +- ret = 1; +- break; ++ for (i = 0; mechs[i] != NULL; i++) { ++ if (strcasecmp(mech, mechs[i]) == 0) { ++ if (allowed_mechs) { ++ if (charray_inlist(allowed_mechs, (char *)mech) == 0) { ++ ret = 1; ++ } ++ break; ++ } else { ++ ret = 1; ++ break; ++ } ++ } + } +- } + +- charray_free(mechs); +- slapi_ch_free((void**)&dupstr); ++ charray_free(allowed_mechs); ++ charray_free(mechs); ++ slapi_ch_free((void **)&dupstr); + +- slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_mech_supported", "<=\n"); ++ slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_mech_supported", "<=\n"); + +- return ret; ++ return ret; + } + + /* +-- +2.9.5 + diff --git a/SOURCES/0071-Fix-cherry-pick-error-from-sasl-mech-commit.patch b/SOURCES/0071-Fix-cherry-pick-error-from-sasl-mech-commit.patch new file mode 100644 index 0000000..b32ef38 --- /dev/null +++ b/SOURCES/0071-Fix-cherry-pick-error-from-sasl-mech-commit.patch @@ -0,0 +1,31 @@ +From 4a51a17762fb4e7ce1beb0600916fed8b45a5483 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 18 Sep 2017 15:06:06 -0400 +Subject: [PATCH] Fix cherry-pick error from sasl mech commit + +--- + ldap/servers/slapd/saslbind.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c +index 03e2a97..8e94ee6 100644 +--- a/ldap/servers/slapd/saslbind.c ++++ b/ldap/servers/slapd/saslbind.c +@@ -745,11 +745,14 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) + const char *str; + char *dupstr; + sasl_conn_t *sasl_conn; ++ Connection *pb_conn = NULL; + + slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "=>\n"); + + PR_ASSERT(pb); + ++ slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); ++ + /* hard-wired mechanisms and slapi plugin registered mechanisms */ + sup_ret = slapi_get_supported_saslmechanisms_copy(); + +-- +2.9.5 + diff --git a/SOURCES/0072-Ticket-49389-unable-to-retrieve-specific-cosAttribut.patch b/SOURCES/0072-Ticket-49389-unable-to-retrieve-specific-cosAttribut.patch new file mode 100644 index 0000000..d97bb51 --- /dev/null +++ b/SOURCES/0072-Ticket-49389-unable-to-retrieve-specific-cosAttribut.patch @@ -0,0 +1,322 @@ +From 2741a6db134ad40662cfa0233c4542d2d4148997 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Tue, 3 Oct 2017 17:22:37 -0400 +Subject: [PATCH] Ticket 49389 - unable to retrieve specific cosAttribute when + subtree password policy is configured + +Bug Description: If indirect cos is being used and a subtree password + policy is added, th orignal COS attributes aren't always + returned. The issue is that when the subtree password + policy attribute was encountered during the virtual + attribute processing it set a flag that said the attribute + was operational (which is correct for the password policy + attr: pwdpolicysubentry). + + However, this flag was accidentally carried over to the + following virtual attributes that were being processed. + Which caused those attributes to be seen as operational + which is why it was no longer being returned to the client. + +Fix Description: Reset the prop flags before processing the next COS attribute + +https://pagure.io/389-ds-base/issue/49389 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit 0953e6011368bc29300990e9493ac13e5aba9586) +--- + dirsrvtests/tests/suites/cos/__init__.py | 0 + dirsrvtests/tests/suites/cos/indirect_cos_test.py | 191 ++++++++++++++++++++++ + ldap/servers/plugins/cos/cos_cache.c | 68 ++++---- + 3 files changed, 223 insertions(+), 36 deletions(-) + create mode 100644 dirsrvtests/tests/suites/cos/__init__.py + create mode 100644 dirsrvtests/tests/suites/cos/indirect_cos_test.py + +diff --git a/dirsrvtests/tests/suites/cos/__init__.py b/dirsrvtests/tests/suites/cos/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/dirsrvtests/tests/suites/cos/indirect_cos_test.py b/dirsrvtests/tests/suites/cos/indirect_cos_test.py +new file mode 100644 +index 0000000..1aac6b8 +--- /dev/null ++++ b/dirsrvtests/tests/suites/cos/indirect_cos_test.py +@@ -0,0 +1,191 @@ ++import logging ++import pytest ++import os ++import ldap ++import time ++import subprocess ++ ++from lib389 import Entry ++from lib389.idm.user import UserAccounts ++from lib389.topologies import topology_st as topo ++from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD, HOST_STANDALONE, ++ SERVERID_STANDALONE, PORT_STANDALONE) ++ ++ ++DEBUGGING = os.getenv("DEBUGGING", default=False) ++if DEBUGGING: ++ logging.getLogger(__name__).setLevel(logging.DEBUG) ++else: ++ logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ ++TEST_USER_DN = "uid=test_user,ou=people,dc=example,dc=com" ++OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) ++ ++PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ ++ 'ou=people,dc=example,dc=com",' \ ++ 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' ++ ++PW_POLICY_CONT_PEOPLE2 = 'cn="cn=nsPwPolicyEntry,' \ ++ 'dc=example,dc=com",' \ ++ 'cn=nsPwPolicyContainerdc=example,dc=com' ++ ++ ++def check_user(inst): ++ """Search the test user and make sure it has the execpted attrs ++ """ ++ try: ++ entries = inst.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, "uid=test_user") ++ log.debug('user: \n' + str(entries[0])) ++ assert entries[0].hasAttr('ou'), "Entry is missing ou cos attribute" ++ assert entries[0].hasAttr('x-department'), "Entry is missing description cos attribute" ++ assert entries[0].hasAttr('x-en-ou'), "Entry is missing givenname cos attribute" ++ except ldap.LDAPError as e: ++ log.fatal('Failed to search for user: ' + str(e)) ++ raise e ++ ++ ++def setup_subtree_policy(topo): ++ """Set up subtree password policy ++ """ ++ try: ++ topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, ++ 'nsslapd-pwpolicy-local', ++ 'on')]) ++ except ldap.LDAPError as e: ++ log.error('Failed to set fine-grained policy: error {}'.format( ++ e.message['desc'])) ++ raise e ++ ++ log.info('Create password policy for subtree {}'.format(OU_PEOPLE)) ++ try: ++ subprocess.call(['%s/ns-newpwpolicy.pl' % topo.standalone.get_sbin_dir(), ++ '-D', DN_DM, '-w', PASSWORD, ++ '-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE, ++ '-S', DEFAULT_SUFFIX, '-Z', SERVERID_STANDALONE]) ++ except subprocess.CalledProcessError as e: ++ log.error('Failed to create pw policy policy for {}: error {}'.format( ++ OU_PEOPLE, e.message['desc'])) ++ raise e ++ ++ log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE)) ++ try: ++ topo.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, ++ 'pwdpolicysubentry', ++ PW_POLICY_CONT_PEOPLE2)]) ++ except ldap.LDAPError as e: ++ log.error('Failed to pwdpolicysubentry pw policy ' ++ 'policy for {}: error {}'.format(OU_PEOPLE, e.message['desc'])) ++ raise e ++ time.sleep(1) ++ ++ ++def setup_indirect_cos(topo): ++ """Setup indirect COS definition and template ++ """ ++ cosDef = Entry(('cn=cosDefinition,dc=example,dc=com', ++ {'objectclass': ['top', 'ldapsubentry', ++ 'cossuperdefinition', ++ 'cosIndirectDefinition'], ++ 'cosAttribute': ['ou merge-schemes', ++ 'x-department merge-schemes', ++ 'x-en-ou merge-schemes'], ++ 'cosIndirectSpecifier': 'seeAlso', ++ 'cn': 'cosDefinition'})) ++ ++ cosTemplate = Entry(('cn=cosTemplate,dc=example,dc=com', ++ {'objectclass': ['top', ++ 'extensibleObject', ++ 'cosTemplate'], ++ 'ou': 'My COS Org', ++ 'x-department': 'My COS x-department', ++ 'x-en-ou': 'my COS x-en-ou', ++ 'cn': 'cosTemplate'})) ++ try: ++ topo.standalone.add_s(cosDef) ++ topo.standalone.add_s(cosTemplate) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add cos: error ' + str(e)) ++ raise e ++ time.sleep(1) ++ ++ ++@pytest.fixture(scope="module") ++def setup(topo, request): ++ """Add schema, and test user ++ """ ++ log.info('Add custom schema...') ++ try: ++ ATTR_1 = ("( 1.3.6.1.4.1.409.389.2.189 NAME 'x-department' " + ++ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") ++ ATTR_2 = ("( 1.3.6.1.4.1.409.389.2.187 NAME 'x-en-ou' " + ++ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") ++ OC = ("( xPerson-oid NAME 'xPerson' DESC '' SUP person STRUCTURAL MAY " + ++ "( x-department $ x-en-ou ) X-ORIGIN 'user defined' )") ++ topo.standalone.modify_s("cn=schema", [(ldap.MOD_ADD, 'attributeTypes', ATTR_1), ++ (ldap.MOD_ADD, 'attributeTypes', ATTR_2), ++ (ldap.MOD_ADD, 'objectClasses', OC)]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add custom schema') ++ raise e ++ time.sleep(1) ++ ++ log.info('Add test user...') ++ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) ++ ++ user_properties = { ++ 'uid': 'test_user', ++ 'cn': 'test user', ++ 'sn': 'user', ++ 'uidNumber': '1000', ++ 'gidNumber': '2000', ++ 'homeDirectory': '/home/test_user', ++ 'seeAlso': 'cn=cosTemplate,dc=example,dc=com' ++ } ++ users.create(properties=user_properties) ++ try: ++ topo.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_ADD, ++ 'objectclass', ++ 'xPerson')]) ++ except ldap.LDAPError as e: ++ log.fatal('Failed to add objectclass to user') ++ raise e ++ ++ # Setup COS ++ log.info("Setup indirect COS...") ++ setup_indirect_cos(topo) ++ ++ ++def test_indirect_cos(topo, setup): ++ """Test indirect cos ++ ++ :id: 890d5929-7d52-4a56-956e-129611b4649a ++ :setup: standalone ++ :steps: ++ 1. Test cos is working for test user ++ 2. Add subtree password policy ++ 3. Test cos is working for test user ++ :expectedresults: ++ 1. User has expected cos attrs ++ 2. Substree password policy setup is successful ++ 3 User still has expected cos attrs ++ """ ++ ++ # Step 1 - Search user and see if the COS attrs are included ++ log.info('Checking user...') ++ check_user(topo.standalone) ++ ++ # Step 2 - Add subtree password policy (Second COS - operational attribute) ++ setup_subtree_policy(topo) ++ ++ # Step 3 - Check user again now hat we have a mix of vattrs ++ log.info('Checking user...') ++ check_user(topo.standalone) ++ ++ ++if __name__ == '__main__': ++ # Run isolated ++ # -s for DEBUG mode ++ CURRENT_FILE = os.path.realpath(__file__) ++ pytest.main("-s %s" % CURRENT_FILE) ++ +diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c +index 66c6c7f..87d4890 100644 +--- a/ldap/servers/plugins/cos/cos_cache.c ++++ b/ldap/servers/plugins/cos/cos_cache.c +@@ -2190,48 +2190,44 @@ bail: + static int cos_cache_vattr_types(vattr_sp_handle *handle,Slapi_Entry *e, + vattr_type_list_context *type_context,int flags) + { +- int ret = 0; +- int index = 0; +- cosCache *pCache; +- char *lastattr = "thisisfakeforcos"; +- int props = 0; +- +- slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_vattr_types\n"); +- +- if(cos_cache_getref((cos_cache **)&pCache) < 1) +- { +- /* problems we are hosed */ +- slapi_log_err(SLAPI_LOG_PLUGIN, COS_PLUGIN_SUBSYSTEM, "cos_cache_vattr_types - Failed to get class of service reference\n"); +- goto bail; +- } +- +- while(index < pCache->attrCount ) +- { +- if(slapi_utf8casecmp( +- (unsigned char *)pCache->ppAttrIndex[index]->pAttrName, +- (unsigned char *)lastattr)) +- { +- lastattr = pCache->ppAttrIndex[index]->pAttrName; ++ int ret = 0; ++ int index = 0; ++ cosCache *pCache; ++ char *lastattr = "thisisfakeforcos"; + +- if(1 == cos_cache_query_attr(pCache, NULL, e, lastattr, NULL, NULL, +- NULL, &props, NULL)) +- { +- /* entry contains this attr */ +- vattr_type_thang thang = {0}; ++ slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_vattr_types\n"); + +- thang.type_name = lastattr; +- thang.type_flags = props; ++ if (cos_cache_getref((cos_cache **)&pCache) < 1) { ++ /* problems we are hosed */ ++ slapi_log_err(SLAPI_LOG_PLUGIN, COS_PLUGIN_SUBSYSTEM, "cos_cache_vattr_types - Failed to get class of service reference\n"); ++ goto bail; ++ } + +- slapi_vattrspi_add_type(type_context,&thang,0); +- } +- } +- index++; +- } +- cos_cache_release(pCache); ++ while (index < pCache->attrCount) { ++ int props = 0; ++ if (slapi_utf8casecmp( ++ (unsigned char *)pCache->ppAttrIndex[index]->pAttrName, ++ (unsigned char *)lastattr)) { ++ lastattr = pCache->ppAttrIndex[index]->pAttrName; ++ ++ if (1 == cos_cache_query_attr(pCache, NULL, e, lastattr, NULL, NULL, ++ NULL, &props, NULL)) { ++ /* entry contains this attr */ ++ vattr_type_thang thang = {0}; ++ ++ thang.type_name = lastattr; ++ thang.type_flags = props; ++ ++ slapi_vattrspi_add_type(type_context, &thang, 0); ++ } ++ } ++ index++; ++ } ++ cos_cache_release(pCache); + + bail: + +-slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "<-- cos_cache_vattr_types\n"); ++ slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "<-- cos_cache_vattr_types\n"); + return ret; + } + +-- +2.9.5 + diff --git a/SOURCES/0073-Ticket-49180-backport-1.3.6-errors-log-filled-with-a.patch b/SOURCES/0073-Ticket-49180-backport-1.3.6-errors-log-filled-with-a.patch new file mode 100644 index 0000000..ae197fb --- /dev/null +++ b/SOURCES/0073-Ticket-49180-backport-1.3.6-errors-log-filled-with-a.patch @@ -0,0 +1,50 @@ +From 1787e9ffda09f9ec8518ceaede5cf1ef014c5d17 Mon Sep 17 00:00:00 2001 +From: Ludwig Krispenz +Date: Wed, 27 Sep 2017 10:58:36 +0200 +Subject: [PATCH] Ticket: 49180 - backport 1.3.6 errors log filled with + attrlist_replace - attr_replace + + Bug: If a RUV contains the same URL with different replica IDs the created referrals contain duplicates + + Fix: check duplicate referrals + + Reviewed by: Mark, thanks +--- + ldap/servers/plugins/replication/repl5_ruv.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/replication/repl5_ruv.c b/ldap/servers/plugins/replication/repl5_ruv.c +index 39449b6..7f34059 100644 +--- a/ldap/servers/plugins/replication/repl5_ruv.c ++++ b/ldap/servers/plugins/replication/repl5_ruv.c +@@ -1502,7 +1502,17 @@ ruv_replica_count (const RUV *ruv) + * Extract all the referral URL's from the RUV (but self URL), + * returning them in an array of strings, that + * the caller must free. ++ * We also check and remove duplicates (caused by unclean RUVs) + */ ++static int ++ruv_referral_exists(unsigned char *purl, char **refs, int count) ++{ ++ for (size_t j=0; jreplica_purl!=NULL) && + (slapi_utf8casecmp((unsigned char *)replica->replica_purl, +- (unsigned char *)mypurl) != 0)) ++ (unsigned char *)mypurl) != 0) && ++ !ruv_referral_exists((unsigned char *)replica->replica_purl, r, i)) + { + r[i]= slapi_ch_strdup(replica->replica_purl); + i++; +-- +2.9.5 + diff --git a/SOURCES/0074-Ticket-48894-harden-valueset_array_to_sorted_quick-v.patch b/SOURCES/0074-Ticket-48894-harden-valueset_array_to_sorted_quick-v.patch new file mode 100644 index 0000000..36fab2f --- /dev/null +++ b/SOURCES/0074-Ticket-48894-harden-valueset_array_to_sorted_quick-v.patch @@ -0,0 +1,39 @@ +From 91c80c06affa3f4bfe106d2291efc360ab2b421d Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 26 Oct 2017 10:03:39 -0400 +Subject: [PATCH] Ticket 48894 - harden valueset_array_to_sorted_quick valueset + access + +Description: It's possible during the sorting of a valueset to access an + array element past the allocated size, and also go below the index 0. + +https://pagure.io/389-ds-base/issue/48894 + +Reviewed by: nweiderm (Thanks!) + +(cherry picked from commit 2086d052e338ddcbcf6bd3222617991641573a12) +--- + ldap/servers/slapd/valueset.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c +index 8a824ac4a..e22bc9c39 100644 +--- a/ldap/servers/slapd/valueset.c ++++ b/ldap/servers/slapd/valueset.c +@@ -1054,11 +1054,11 @@ valueset_array_to_sorted_quick (const Slapi_Attr *a, Slapi_ValueSet *vs, size_t + while (1) { + do { + i++; +- } while ( valueset_value_cmp(a, vs->va[vs->sorted[i]], vs->va[pivot]) < 0); ++ } while (i < vs->max && valueset_value_cmp(a, vs->va[vs->sorted[i]], vs->va[pivot]) < 0); + + do { + j--; +- } while ( valueset_value_cmp(a, vs->va[vs->sorted[j]], vs->va[pivot]) > 0); ++ } while (valueset_value_cmp(a, vs->va[vs->sorted[j]], vs->va[pivot]) > 0 && j > 0); + + if (i >= j) { + break; +-- +2.13.6 + diff --git a/SOURCES/0075-Ticket-49401-improve-valueset-sorted-performance-on-.patch b/SOURCES/0075-Ticket-49401-improve-valueset-sorted-performance-on-.patch new file mode 100644 index 0000000..f385154 --- /dev/null +++ b/SOURCES/0075-Ticket-49401-improve-valueset-sorted-performance-on-.patch @@ -0,0 +1,248 @@ +From 64b9d015523b4ae379ff2d72fc73da173be8a712 Mon Sep 17 00:00:00 2001 +From: Mohammad Nweider +Date: Wed, 18 Oct 2017 13:02:15 +0000 +Subject: [PATCH] Ticket 49401 - improve valueset sorted performance on delete + +Bug Description: valueset sorted maintains a list of syntax sorted +references to the attributes of the entry. During addition these are +modified and added properly, so they stay sorted. + +However, in the past to maintain the sorted property, during a delete +we would need to remove the vs->sorted array, and recreate it via qsort, + +While this was an improvement from past (where we would removed +vs->sorted during an attr delete), it still has performance implications +on very very large datasets, IE 50,000 member groups with +addition/deletion, large entry caches and replication. + +Fix Description: Implement a new algorithm that is able to maintain +existing sort data in a near linear time. + +https://pagure.io/389-ds-base/issue/49401 + +Author: nweiderm, wibrown + +Review by: wibrown, lkrispen, tbordaz (Thanks nweiderm!) + +(cherry picked from commit a43a8efc7907116146b505ac40f18fac71f474b0) +--- + ldap/servers/slapd/valueset.c | 171 +++++++++++++++++++++++++----------------- + 1 file changed, 102 insertions(+), 69 deletions(-) + +diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c +index e22bc9c39..ae0a13fdc 100644 +--- a/ldap/servers/slapd/valueset.c ++++ b/ldap/servers/slapd/valueset.c +@@ -741,7 +741,10 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn) + size_t i = 0; + size_t j = 0; + int nextValue = 0; ++ int nv = 0; + int numValues = 0; ++ Slapi_Value **va2 = NULL; ++ int *sorted2 = NULL; + + /* Loop over all the values freeing the old ones. */ + for(i = 0; i < vs->num; i++) +@@ -752,91 +755,122 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn) + } else { + j = i; + } +- csnset_purge(&(vs->va[j]->v_csnset),csn); +- if (vs->va[j]->v_csnset == NULL) { +- slapi_value_free(&vs->va[j]); +- vs->va[j] = NULL; +- } else if (vs->va[j] != NULL) { +- /* This value survived, we should count it. */ +- numValues++; ++ if (vs->va[j]) { ++ csnset_purge(&(vs->va[j]->v_csnset),csn); ++ if (vs->va[j]->v_csnset == NULL) { ++ slapi_value_free(&vs->va[j]); ++ /* Set the removed value to NULL so we know later to skip it */ ++ vs->va[j] = NULL; ++ if (vs->sorted) { ++ /* Mark the value in sorted for removal */ ++ vs->sorted[i] = -1; ++ } ++ } else { ++ /* This value survived, we should count it. */ ++ numValues++; ++ } + } + } + +- /* Now compact the value/sorted list. */ ++ /* Compact vs->va and vs->sorted only when there're ++ * remaining values ie: numValues is greater than 0 */ + /* +- * Because we want to preserve the sorted array, this is complicated. ++ * Algorithm explination: We start with a pair of arrays - the attrs, and the sorted array that provides ++ * a lookup into it: ++ * ++ * va: [d e a c b] sorted: [2 4 3 0 1] ++ * ++ * When we remove the element b, we NULL it, and we have to mark the place where it "was" as a -1 to ++ * flag it's removal. ++ * ++ * va: [d e a c NULL] sorted: [2 -1 3 0 1] ++ * ++ * Now a second va is created with the reduced allocation, ++ * ++ * va2: [ X X X X ] .... + * +- * We have an array of values: +- * [ b, a, c, NULL, e, NULL, NULL, d] +- * And an array of indicies that are sorted. +- * [ 1, 0, 2, 7, 4, 3, 5, 6 ] +- * Were we to iterate over the sorted array, we get refs to the values in +- * some order. +- * The issue is now we must *remove* from both the values *and* the sorted. ++ * Now we loop over sorted, skipping -1 that we find. In a new counter we create new sorted ++ * references, and move the values compacting them in the process. ++ * va: [d e a c NULL] ++ * va2: [a x x x] ++ * sorted: [_0 -1 3 0 1] + * +- * Previously, we just discarded this, because too hard. Now we try to keep +- * it. The issue is that this is surprisingly hard to actually keep in +- * sync. ++ * Looping a few more times would yield: + * +- * We can't just blindly move the values down: That breaks the sorted array +- * and we would need to iterate over the sorted array multiple times to +- * achieve this. ++ * va2: [a c x x] ++ * sorted: [_0 _1 3 0 1] ++ * ++ * va2: [a c d x] ++ * sorted: [_0 _1 _2 0 1] ++ * ++ * va2: [a c d e] ++ * sorted: [_0 _1 _2 _3 1] ++ * ++ * Not only does this sort va, but with sorted, we have a faster lookup, and it will benefit cache ++ * lookup. + * +- * It's actually going to be easier to just ditch the sorted, compact vs +- * and then qsort the array. + */ ++ if (numValues > 0) { ++ if(vs->sorted) { ++ /* Let's allocate va2 and sorted2 */ ++ va2 = (Slapi_Value **) slapi_ch_malloc( (numValues + 1) * sizeof(Slapi_Value *)); ++ sorted2 = (int *) slapi_ch_malloc( (numValues + 1)* sizeof(int)); ++ } + +- j = 0; +- while (nextValue < numValues && j < vs->num) +- { +- /* nextValue is what we are looking at now +- * j tracks along the array getting next elements. +- * +- * [ b, a, c, NULL, e, NULL, NULL, d] +- * ^nv ^j +- * [ b, a, c, e, NULL, NULL, NULL, d] +- * ^nv ^j +- * [ b, a, c, e, NULL, NULL, NULL, d] +- * ^nv ^j +- * [ b, a, c, e, NULL, NULL, NULL, d] +- * ^nv ^j +- * [ b, a, c, e, NULL, NULL, NULL, d] +- * ^nv ^j +- * [ b, a, c, e, d, NULL, NULL, NULL] +- * ^nv ^j +- */ +- if (vs->va[nextValue] == NULL) { +- /* Advance j till we find something */ +- while (vs->va[j] == NULL) { +- j++; ++ /* I is the index for the *new* va2 array */ ++ for(i=0; inum; i++) { ++ if (vs->sorted) { ++ /* Skip any removed values from the index */ ++ while((nv < vs->num) && (-1 == vs->sorted[nv])) { ++ nv++; ++ } ++ /* We have a remaining value, add it to the va */ ++ if(nv < vs->num) { ++ va2[i] = vs->va[vs->sorted[nv]]; ++ sorted2[i] = i; ++ nv++; ++ } ++ } else { ++ while((nextValue < vs->num) && (NULL == vs->va[nextValue])) { ++ nextValue++; ++ } ++ ++ if(nextValue < vs->num) { ++ vs->va[i] = vs->va[nextValue]; ++ nextValue++; ++ } else { ++ break; ++ } + } +- /* We have something! */ +- vs->va[nextValue] = vs->va[j]; ++ } ++ ++ if (vs->sorted) { ++ /* Finally replace the valuearray and adjust num, max */ ++ slapi_ch_free((void **)&vs->va); ++ slapi_ch_free((void **)&vs->sorted); ++ vs->va = va2; ++ vs->sorted = sorted2; ++ vs->num = numValues; ++ vs->max = vs->num + 1; ++ } else { ++ vs->num = numValues; ++ } ++ ++ for (j = vs->num; j < vs->max; j++) { + vs->va[j] = NULL; ++ if (vs->sorted) { ++ vs->sorted[j] = -1; ++ } + } +- nextValue++; +- } +- /* Fix up the number of values */ +- vs->num = numValues; +- /* Should we re-alloc values to be smaller? */ +- /* Other parts of DS are lazy. Lets clean our list */ +- for (j = vs->num; j < vs->max; j++) { +- vs->va[j] = NULL; ++ } else { ++ slapi_valueset_done(vs); + } + +- /* All the values were deleted, we can discard the whole array. */ +- if(vs->num == 0) { +- if(vs->sorted) { +- slapi_ch_free ((void **)&vs->sorted); +- } +- slapi_ch_free ((void **)&vs->va); +- vs->va = NULL; +- vs->max = 0; +- } else if (vs->sorted != NULL) { +- /* We still have values! rebuild the sorted array */ ++ /* We still have values but not sorted array! rebuild it */ ++ if(vs->num > VALUESET_ARRAY_SORT_THRESHOLD && vs->sorted == NULL) { ++ vs->sorted = (int *) slapi_ch_malloc( vs->max* sizeof(int)); + valueset_array_to_sorted(a, vs); + } +- + #ifdef DEBUG + PR_ASSERT(vs->num == 0 || (vs->num > 0 && vs->va[0] != NULL)); + size_t index = 0; +@@ -847,7 +881,6 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn) + PR_ASSERT(vs->va[index] == NULL); + } + #endif +- + /* return the number of remaining values */ + return numValues; + } +-- +2.13.6 + diff --git a/SOURCES/0076-Ticket-49401-Fix-compiler-incompatible-pointer-types.patch b/SOURCES/0076-Ticket-49401-Fix-compiler-incompatible-pointer-types.patch new file mode 100644 index 0000000..08fdf39 --- /dev/null +++ b/SOURCES/0076-Ticket-49401-Fix-compiler-incompatible-pointer-types.patch @@ -0,0 +1,54 @@ +From 43c73ca572af6a4bdc9b5994a9640f4d4e713cc2 Mon Sep 17 00:00:00 2001 +From: Mohammad Nweider +Date: Wed, 25 Oct 2017 16:26:54 +0000 +Subject: [PATCH] Ticket 49401 - Fix compiler incompatible-pointer-types + warnings + +Bug Description: vs->sorted was integer pointer in older versions, + but now it's size_t pointer, this is causing compiler warnings + during the build + +Fix Description: use size_t pointers instead of integer pointers for vs->sorted and sorted2 + +Review By: mreynolds + +Signed-off-by: Mark Reynolds +(cherry picked from commit 52ba2aba49935989152010aee0d40b01d7a78432) +--- + ldap/servers/slapd/valueset.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c +index ae0a13fdc..8730d9f56 100644 +--- a/ldap/servers/slapd/valueset.c ++++ b/ldap/servers/slapd/valueset.c +@@ -744,7 +744,7 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn) + int nv = 0; + int numValues = 0; + Slapi_Value **va2 = NULL; +- int *sorted2 = NULL; ++ size_t *sorted2 = NULL; + + /* Loop over all the values freeing the old ones. */ + for(i = 0; i < vs->num; i++) +@@ -814,7 +814,7 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn) + if(vs->sorted) { + /* Let's allocate va2 and sorted2 */ + va2 = (Slapi_Value **) slapi_ch_malloc( (numValues + 1) * sizeof(Slapi_Value *)); +- sorted2 = (int *) slapi_ch_malloc( (numValues + 1)* sizeof(int)); ++ sorted2 = (size_t *) slapi_ch_malloc( (numValues + 1)* sizeof(size_t)); + } + + /* I is the index for the *new* va2 array */ +@@ -868,7 +868,7 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn) + + /* We still have values but not sorted array! rebuild it */ + if(vs->num > VALUESET_ARRAY_SORT_THRESHOLD && vs->sorted == NULL) { +- vs->sorted = (int *) slapi_ch_malloc( vs->max* sizeof(int)); ++ vs->sorted = (size_t *) slapi_ch_malloc( vs->max* sizeof(size_t)); + valueset_array_to_sorted(a, vs); + } + #ifdef DEBUG +-- +2.13.6 + diff --git a/SOURCES/0077-Ticket-48235-Remove-memberOf-global-lock.patch b/SOURCES/0077-Ticket-48235-Remove-memberOf-global-lock.patch new file mode 100644 index 0000000..2dccc5c --- /dev/null +++ b/SOURCES/0077-Ticket-48235-Remove-memberOf-global-lock.patch @@ -0,0 +1,874 @@ +From 229f61f5f54aeb9e1a1756f731dfe7bcedbf148c Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 13 Oct 2017 07:09:08 -0400 +Subject: [PATCH 06/10] Ticket 48235 - Remove memberOf global lock + +Bug Description: The memberOf global lock no longer servers a purpose since + the plugin is BETXN. This was causing potential deadlocks + when multiple backends are used. + +Fix Description: Remove the lock, and rework the fixup/ancestors caches/hashtables. + Instead of reusing a single cache, we create a fresh cache + when we copy the plugin config (which only happens at the start + of an operation). Then we destroy the caches when we free + the config. + +https://pagure.io/389-ds-base/issue/48235 + +Reviewed by: tbordaz & firstyear(Thanks!!) +--- + ldap/servers/plugins/memberof/memberof.c | 312 +++--------------------- + ldap/servers/plugins/memberof/memberof.h | 17 ++ + ldap/servers/plugins/memberof/memberof_config.c | 152 +++++++++++- + 3 files changed, 200 insertions(+), 281 deletions(-) + +diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c +index 9bbe13c9c..bbf47dd49 100644 +--- a/ldap/servers/plugins/memberof/memberof.c ++++ b/ldap/servers/plugins/memberof/memberof.c +@@ -49,13 +49,10 @@ static void* _PluginID = NULL; + static Slapi_DN* _ConfigAreaDN = NULL; + static Slapi_RWLock *config_rwlock = NULL; + static Slapi_DN* _pluginDN = NULL; +-static PRMonitor *memberof_operation_lock = 0; + MemberOfConfig *qsortConfig = 0; + static int usetxn = 0; + static int premodfn = 0; +-#define MEMBEROF_HASHTABLE_SIZE 1000 +-static PLHashTable *fixup_entry_hashtable = NULL; /* global hash table protected by memberof_lock (memberof_operation_lock) */ +-static PLHashTable *group_ancestors_hashtable = NULL; /* global hash table protected by memberof_lock (memberof_operation_lock) */ ++ + + typedef struct _memberofstringll + { +@@ -73,18 +70,7 @@ typedef struct _memberof_get_groups_data + PRBool use_cache; + } memberof_get_groups_data; + +-/* The key to access the hash table is the normalized DN +- * The normalized DN is stored in the value because: +- * - It is used in slapi_valueset_find +- * - It is used to fill the memberof_get_groups_data.group_norm_vals +- */ +-typedef struct _memberof_cached_value +-{ +- char *key; +- char *group_dn_val; +- char *group_ndn_val; +- int valid; +-} memberof_cached_value; ++ + struct cache_stat + { + int total_lookup; +@@ -189,14 +175,9 @@ static int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data); + static int memberof_entry_in_scope(MemberOfConfig *config, Slapi_DN *sdn); + static int memberof_add_objectclass(char *auto_add_oc, const char *dn); + static int memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc); +-static PLHashTable *hashtable_new(); +-static void fixup_hashtable_empty(char *msg); +-static PLHashTable *hashtable_new(); +-static void ancestor_hashtable_empty(char *msg); +-static void ancestor_hashtable_entry_free(memberof_cached_value *entry); +-static memberof_cached_value *ancestors_cache_lookup(const char *ndn); +-static PRBool ancestors_cache_remove(const char *ndn); +-static PLHashEntry *ancestors_cache_add(const void *key, void *value); ++static memberof_cached_value *ancestors_cache_lookup(MemberOfConfig *config, const char *ndn); ++static PRBool ancestors_cache_remove(MemberOfConfig *config, const char *ndn); ++static PLHashEntry *ancestors_cache_add(MemberOfConfig *config, const void *key, void *value); + + /*** implementation ***/ + +@@ -375,12 +356,6 @@ int memberof_postop_start(Slapi_PBlock *pb) + slapi_log_err(SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM, + "--> memberof_postop_start\n" ); + +- memberof_operation_lock = PR_NewMonitor(); +- if(0 == memberof_operation_lock) +- { +- rc = -1; +- goto bail; +- } + if(config_rwlock == NULL){ + if((config_rwlock = slapi_new_rwlock()) == NULL){ + rc = -1; +@@ -388,9 +363,6 @@ int memberof_postop_start(Slapi_PBlock *pb) + } + } + +- fixup_entry_hashtable = hashtable_new(); +- group_ancestors_hashtable = hashtable_new(); +- + /* Set the alternate config area if one is defined. */ + slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_AREA, &config_area); + if (config_area) +@@ -482,18 +454,7 @@ int memberof_postop_close(Slapi_PBlock *pb) + slapi_sdn_free(&_pluginDN); + slapi_destroy_rwlock(config_rwlock); + config_rwlock = NULL; +- PR_DestroyMonitor(memberof_operation_lock); +- memberof_operation_lock = NULL; +- +- if (fixup_entry_hashtable) { +- fixup_hashtable_empty("memberof_postop_close empty fixup_entry_hastable"); +- PL_HashTableDestroy(fixup_entry_hashtable); +- } + +- if (group_ancestors_hashtable) { +- ancestor_hashtable_empty("memberof_postop_close empty group_ancestors_hashtable"); +- PL_HashTableDestroy(group_ancestors_hashtable); +- } + slapi_log_err(SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM, + "<-- memberof_postop_close\n" ); + return 0; +@@ -554,7 +515,7 @@ int memberof_postop_del(Slapi_PBlock *pb) + { + int ret = SLAPI_PLUGIN_SUCCESS; + MemberOfConfig *mainConfig = NULL; +- MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ++ MemberOfConfig configCopy = {0}; + Slapi_DN *sdn; + void *caller_id = NULL; + +@@ -583,9 +544,6 @@ int memberof_postop_del(Slapi_PBlock *pb) + } + memberof_copy_config(&configCopy, memberof_get_config()); + memberof_unlock_config(); +- +- /* get the memberOf operation lock */ +- memberof_lock(); + + /* remove this DN from the + * membership lists of groups +@@ -594,7 +552,6 @@ int memberof_postop_del(Slapi_PBlock *pb) + slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, + "memberof_postop_del - Error deleting dn (%s) from group. Error (%d)\n", + slapi_sdn_get_dn(sdn),ret); +- memberof_unlock(); + goto bail; + } + +@@ -618,7 +575,6 @@ int memberof_postop_del(Slapi_PBlock *pb) + } + } + } +- memberof_unlock(); + bail: + memberof_free_config(&configCopy); + } +@@ -813,7 +769,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn + memberof_cached_value *ht_grp = NULL; + const char *ndn = slapi_sdn_get_ndn(sdn); + +- ht_grp = ancestors_cache_lookup((const void *) ndn); ++ ht_grp = ancestors_cache_lookup(config, (const void *) ndn); + if (ht_grp) { + #if MEMBEROF_CACHE_DEBUG + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp); +@@ -960,7 +916,7 @@ int memberof_postop_modrdn(Slapi_PBlock *pb) + if(memberof_oktodo(pb)) + { + MemberOfConfig *mainConfig = 0; +- MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ++ MemberOfConfig configCopy = {0}; + struct slapi_entry *pre_e = NULL; + struct slapi_entry *post_e = NULL; + Slapi_DN *pre_sdn = 0; +@@ -988,8 +944,6 @@ int memberof_postop_modrdn(Slapi_PBlock *pb) + goto bail; + } + +- memberof_lock(); +- + /* update any downstream members */ + if(pre_sdn && post_sdn && configCopy.group_filter && + 0 == slapi_filter_test_simple(post_e, configCopy.group_filter)) +@@ -1060,7 +1014,6 @@ int memberof_postop_modrdn(Slapi_PBlock *pb) + } + } + } +- memberof_unlock(); + bail: + memberof_free_config(&configCopy); + } +@@ -1220,7 +1173,7 @@ int memberof_postop_modify(Slapi_PBlock *pb) + { + int config_copied = 0; + MemberOfConfig *mainConfig = 0; +- MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ++ MemberOfConfig configCopy = {0}; + + /* get the mod set */ + slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); +@@ -1267,8 +1220,6 @@ int memberof_postop_modify(Slapi_PBlock *pb) + { + int op = slapi_mod_get_operation(smod); + +- memberof_lock(); +- + /* the modify op decides the function */ + switch(op & ~LDAP_MOD_BVALUES) + { +@@ -1280,7 +1231,6 @@ int memberof_postop_modify(Slapi_PBlock *pb) + "memberof_postop_modify - Failed to add dn (%s) to target. " + "Error (%d)\n", slapi_sdn_get_dn(sdn), ret ); + slapi_mod_done(next_mod); +- memberof_unlock(); + goto bail; + } + break; +@@ -1299,7 +1249,6 @@ int memberof_postop_modify(Slapi_PBlock *pb) + "memberof_postop_modify - Failed to replace list (%s). " + "Error (%d)\n", slapi_sdn_get_dn(sdn), ret ); + slapi_mod_done(next_mod); +- memberof_unlock(); + goto bail; + } + } +@@ -1311,7 +1260,6 @@ int memberof_postop_modify(Slapi_PBlock *pb) + "memberof_postop_modify: failed to remove dn (%s). " + "Error (%d)\n", slapi_sdn_get_dn(sdn), ret ); + slapi_mod_done(next_mod); +- memberof_unlock(); + goto bail; + } + } +@@ -1326,7 +1274,6 @@ int memberof_postop_modify(Slapi_PBlock *pb) + "memberof_postop_modify - Failed to replace values in dn (%s). " + "Error (%d)\n", slapi_sdn_get_dn(sdn), ret ); + slapi_mod_done(next_mod); +- memberof_unlock(); + goto bail; + } + break; +@@ -1342,8 +1289,6 @@ int memberof_postop_modify(Slapi_PBlock *pb) + break; + } + } +- +- memberof_unlock(); + } + + slapi_mod_done(next_mod); +@@ -1398,7 +1343,7 @@ int memberof_postop_add(Slapi_PBlock *pb) + if(memberof_oktodo(pb) && (sdn = memberof_getsdn(pb))) + { + struct slapi_entry *e = NULL; +- MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ++ MemberOfConfig configCopy = {0}; + MemberOfConfig *mainConfig; + slapi_pblock_get( pb, SLAPI_ENTRY_POST_OP, &e ); + +@@ -1424,8 +1369,6 @@ int memberof_postop_add(Slapi_PBlock *pb) + int i = 0; + Slapi_Attr *attr = 0; + +- memberof_lock(); +- + for (i = 0; configCopy.groupattrs && configCopy.groupattrs[i]; i++) + { + if(0 == slapi_entry_attr_find(e, configCopy.groupattrs[i], &attr)) +@@ -1438,8 +1381,6 @@ int memberof_postop_add(Slapi_PBlock *pb) + } + } + } +- +- memberof_unlock(); + memberof_free_config(&configCopy); + } + } +@@ -2201,7 +2142,7 @@ dump_cache_entry(memberof_cached_value *double_check, const char *msg) + * the firsts elements of the array has 'valid=1' and the dn/ndn of group it belong to + */ + static void +-cache_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *groups) ++cache_ancestors(MemberOfConfig *config, Slapi_Value **member_ndn_val, memberof_get_groups_data *groups) + { + Slapi_ValueSet *groupvals = *((memberof_get_groups_data*)groups)->groupvals; + Slapi_Value *sval; +@@ -2298,14 +2239,14 @@ cache_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *groups) + #if MEMBEROF_CACHE_DEBUG + dump_cache_entry(cache_entry, key); + #endif +- if (ancestors_cache_add((const void*) key_copy, (void *) cache_entry) == NULL) { ++ if (ancestors_cache_add(config, (const void*) key_copy, (void *) cache_entry) == NULL) { + slapi_log_err( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, "cache_ancestors: Failed to cache ancestor of %s\n", key); + ancestor_hashtable_entry_free(cache_entry); + slapi_ch_free ((void**)&cache_entry); + return; + } + #if MEMBEROF_CACHE_DEBUG +- if (double_check = ancestors_cache_lookup((const void*) key)) { ++ if (double_check = ancestors_cache_lookup(config, (const void*) key)) { + dump_cache_entry(double_check, "read back"); + } + #endif +@@ -2390,9 +2331,9 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, + memberof_get_groups_callback, &member_data, &cached, member_data.use_cache); + + merge_ancestors(&member_ndn_val, &member_data, data); +- if (!cached && member_data.use_cache) +- cache_ancestors(&member_ndn_val, &member_data); +- ++ if (!cached && member_data.use_cache) { ++ cache_ancestors(config, &member_ndn_val, &member_data); ++ } + + slapi_value_free(&member_ndn_val); + slapi_valueset_free(groupvals); +@@ -2969,46 +2910,9 @@ int memberof_qsort_compare(const void *a, const void *b) + val1, val2); + } + +-/* betxn: This locking mechanism is necessary to guarantee the memberof +- * consistency */ +-void memberof_lock() +-{ +- if (usetxn) { +- PR_EnterMonitor(memberof_operation_lock); +- } +- if (fixup_entry_hashtable) { +- fixup_hashtable_empty("memberof_lock"); +- } +- if (group_ancestors_hashtable) { +- ancestor_hashtable_empty("memberof_lock empty group_ancestors_hashtable"); +- memset(&cache_stat, 0, sizeof(cache_stat)); +- } +-} +- +-void memberof_unlock() +-{ +- if (group_ancestors_hashtable) { +- ancestor_hashtable_empty("memberof_unlock empty group_ancestors_hashtable"); +-#if MEMBEROF_CACHE_DEBUG +- slapi_log_err(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, "cache statistics: total lookup %d (success %d), add %d, remove %d, enum %d\n", +- cache_stat.total_lookup, cache_stat.successfull_lookup, +- cache_stat.total_add, cache_stat.total_remove, cache_stat.total_enumerate); +- slapi_log_err(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, "cache statistics duration: lookup %ld, add %ld, remove %ld, enum %ld\n", +- cache_stat.cumul_duration_lookup, cache_stat.cumul_duration_add, +- cache_stat.cumul_duration_remove, cache_stat.cumul_duration_enumerate); +-#endif +- } +- if (fixup_entry_hashtable) { +- fixup_hashtable_empty("memberof_lock"); +- } +- if (usetxn) { +- PR_ExitMonitor(memberof_operation_lock); +- } +-} +- + void memberof_fixup_task_thread(void *arg) + { +- MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ++ MemberOfConfig configCopy = {0}; + Slapi_Task *task = (Slapi_Task *)arg; + task_data *td = NULL; + int rc = 0; +@@ -3068,14 +2972,8 @@ void memberof_fixup_task_thread(void *arg) + } + } + +- /* get the memberOf operation lock */ +- memberof_lock(); +- + /* do real work */ + rc = memberof_fix_memberof(&configCopy, task, td); +- +- /* release the memberOf operation lock */ +- memberof_unlock(); + + done: + if (usetxn && fixup_pb) { +@@ -3240,7 +3138,7 @@ int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *t + } + + static memberof_cached_value * +-ancestors_cache_lookup(const char *ndn) ++ancestors_cache_lookup(MemberOfConfig *config, const char *ndn) + { + memberof_cached_value *e; + #if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) +@@ -3258,7 +3156,7 @@ ancestors_cache_lookup(const char *ndn) + } + #endif + +- e = (memberof_cached_value *) PL_HashTableLookupConst(group_ancestors_hashtable, (const void *) ndn); ++ e = (memberof_cached_value *) PL_HashTableLookupConst(config->ancestors_cache, (const void *) ndn); + + #if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) + if (start) { +@@ -3274,7 +3172,7 @@ ancestors_cache_lookup(const char *ndn) + + } + static PRBool +-ancestors_cache_remove(const char *ndn) ++ancestors_cache_remove(MemberOfConfig *config, const char *ndn) + { + PRBool rc; + #if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) +@@ -3292,7 +3190,7 @@ ancestors_cache_remove(const char *ndn) + } + #endif + +- rc = PL_HashTableRemove(group_ancestors_hashtable, (const void *) ndn); ++ rc = PL_HashTableRemove(config->ancestors_cache, (const void *) ndn); + + #if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) + if (start) { +@@ -3305,7 +3203,7 @@ ancestors_cache_remove(const char *ndn) + } + + static PLHashEntry * +-ancestors_cache_add(const void *key, void *value) ++ancestors_cache_add(MemberOfConfig *config, const void *key, void *value) + { + PLHashEntry *e; + #if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) +@@ -3322,7 +3220,7 @@ ancestors_cache_add(const void *key, void *value) + } + #endif + +- e = PL_HashTableAdd(group_ancestors_hashtable, key, value); ++ e = PL_HashTableAdd(config->ancestors_cache, key, value); + + #if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) + if (start) { +@@ -3360,10 +3258,11 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) + goto bail; + } + +- /* Check if the entry has not already been fixed */ ++ /* Check if the entry has not already been fixed */ + ndn = slapi_sdn_get_ndn(sdn); +- if (ndn && fixup_entry_hashtable && PL_HashTableLookupConst(fixup_entry_hashtable, (void*) ndn)) { +- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_fix_memberof_callback: Entry %s already fixed up\n", ndn); ++ if (ndn && config->fixup_cache && PL_HashTableLookupConst(config->fixup_cache, (void*) ndn)) { ++ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, ++ "memberof_fix_memberof_callback: Entry %s already fixed up\n", ndn); + goto bail; + } + +@@ -3383,9 +3282,9 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) + #if MEMBEROF_CACHE_DEBUG + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_fix_memberof_callback: This is NOT a group %s\n", ndn); + #endif +- ht_grp = ancestors_cache_lookup((const void *) ndn); ++ ht_grp = ancestors_cache_lookup(config, (const void *) ndn); + if (ht_grp) { +- if (ancestors_cache_remove((const void *) ndn)) { ++ if (ancestors_cache_remove(config, (const void *) ndn)) { + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_fix_memberof_callback: free cached values for %s\n", ndn); + ancestor_hashtable_entry_free(ht_grp); + slapi_ch_free((void **) &ht_grp); +@@ -3400,6 +3299,7 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) + } + } + } ++ + /* If we found some groups, replace the existing memberOf attribute + * with the found values. */ + if (groups && slapi_valueset_count(groups)) +@@ -3439,9 +3339,9 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) + slapi_valueset_free(groups); + + /* records that this entry has been fixed up */ +- if (fixup_entry_hashtable) { ++ if (config->fixup_cache) { + dn_copy = slapi_ch_strdup(ndn); +- if (PL_HashTableAdd(fixup_entry_hashtable, dn_copy, dn_copy) == NULL) { ++ if (PL_HashTableAdd(config->fixup_cache, dn_copy, dn_copy) == NULL) { + slapi_log_err(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_fix_memberof_callback: " + "failed to add dn (%s) in the fixup hashtable; NSPR error - %d\n", + dn_copy, PR_GetError()); +@@ -3539,150 +3439,8 @@ memberof_add_objectclass(char *auto_add_oc, const char *dn) + return rc; + } + +-static PRIntn memberof_hash_compare_keys(const void *v1, const void *v2) +-{ +- PRIntn rc; +- if (0 == strcasecmp((const char *) v1, (const char *) v2)) { +- rc = 1; +- } else { +- rc = 0; +- } +- return rc; +-} +- +-static PRIntn memberof_hash_compare_values(const void *v1, const void *v2) +-{ +- PRIntn rc; +- if ((char *) v1 == (char *) v2) { +- rc = 1; +- } else { +- rc = 0; +- } +- return rc; +-} +- +-/* +- * Hashing function using Bernstein's method +- */ +-static PLHashNumber memberof_hash_fn(const void *key) +-{ +- PLHashNumber hash = 5381; +- unsigned char *x = (unsigned char *)key; +- int c; +- +- while ((c = *x++)){ +- hash = ((hash << 5) + hash) ^ c; +- } +- return hash; +-} +- +-/* allocates the plugin hashtable +- * This hash table is used by operation and is protected from +- * concurrent operations with the memberof_lock (if not usetxn, memberof_lock +- * is not implemented and the hash table will be not used. +- * +- * The hash table contains all the DN of the entries for which the memberof +- * attribute has been computed/updated during the current operation +- * +- * hash table should be empty at the beginning and end of the plugin callback +- */ +-static PLHashTable *hashtable_new() +-{ +- if (!usetxn) { +- return NULL; +- } +- +- return PL_NewHashTable(MEMBEROF_HASHTABLE_SIZE, +- memberof_hash_fn, +- memberof_hash_compare_keys, +- memberof_hash_compare_values, NULL, NULL); +-} +-/* this function called for each hash node during hash destruction */ +-static PRIntn fixup_hashtable_remove(PLHashEntry *he, PRIntn index, void *arg) +-{ +- char *dn_copy; +- +- if (he == NULL) { +- return HT_ENUMERATE_NEXT; +- } +- dn_copy = (char*) he->value; +- slapi_ch_free_string(&dn_copy); +- +- return HT_ENUMERATE_REMOVE; +-} +- +-static void fixup_hashtable_empty(char *msg) +-{ +- if (fixup_entry_hashtable) { +- PL_HashTableEnumerateEntries(fixup_entry_hashtable, fixup_hashtable_remove, msg); +- } +-} +- +- +-/* allocates the plugin hashtable +- * This hash table is used by operation and is protected from +- * concurrent operations with the memberof_lock (if not usetxn, memberof_lock +- * is not implemented and the hash table will be not used. +- * +- * The hash table contains all the DN of the entries for which the memberof +- * attribute has been computed/updated during the current operation +- * +- * hash table should be empty at the beginning and end of the plugin callback +- */ +- +-static +-void ancestor_hashtable_entry_free(memberof_cached_value *entry) +-{ +- int i; +- for (i = 0; entry[i].valid; i++) { +- slapi_ch_free((void **) &entry[i].group_dn_val); +- slapi_ch_free((void **) &entry[i].group_ndn_val); +- } +- /* Here we are at the ending element containing the key */ +- slapi_ch_free((void**) &entry[i].key); +-} +-/* this function called for each hash node during hash destruction */ +-static PRIntn ancestor_hashtable_remove(PLHashEntry *he, PRIntn index, void *arg) ++int ++memberof_use_txn() + { +- memberof_cached_value *group_ancestor_array; +- +- if (he == NULL) +- return HT_ENUMERATE_NEXT; +- +- +- group_ancestor_array = (memberof_cached_value *) he->value; +- ancestor_hashtable_entry_free(group_ancestor_array); +- slapi_ch_free((void **)&group_ancestor_array); +- +- return HT_ENUMERATE_REMOVE; ++ return usetxn; + } +- +-static void ancestor_hashtable_empty(char *msg) +-{ +-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) +- long int start; +- struct timespec tsnow; +-#endif +- +- if (group_ancestors_hashtable) { +- cache_stat.total_enumerate++; +-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) +- if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) { +- start = 0; +- } else { +- start = tsnow.tv_nsec; +- } +-#endif +- PL_HashTableEnumerateEntries(group_ancestors_hashtable, ancestor_hashtable_remove, msg); +- +-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME) +- if (start) { +- if (clock_gettime(CLOCK_REALTIME, &tsnow) == 0) { +- cache_stat.cumul_duration_enumerate += (tsnow.tv_nsec - start); +- } +- } +-#endif +- } +- +-} +- +diff --git a/ldap/servers/plugins/memberof/memberof.h b/ldap/servers/plugins/memberof/memberof.h +index 9a3a6a25d..a01c4d247 100644 +--- a/ldap/servers/plugins/memberof/memberof.h ++++ b/ldap/servers/plugins/memberof/memberof.h +@@ -62,8 +62,22 @@ typedef struct memberofconfig { + int skip_nested; + int fixup_task; + char *auto_add_oc; ++ PLHashTable *ancestors_cache; ++ PLHashTable *fixup_cache; + } MemberOfConfig; + ++/* The key to access the hash table is the normalized DN ++ * The normalized DN is stored in the value because: ++ * - It is used in slapi_valueset_find ++ * - It is used to fill the memberof_get_groups_data.group_norm_vals ++ */ ++typedef struct _memberof_cached_value ++{ ++ char *key; ++ char *group_dn_val; ++ char *group_ndn_val; ++ int valid; ++} memberof_cached_value; + + /* + * functions +@@ -88,5 +102,8 @@ int memberof_apply_config (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Ent + void *memberof_get_plugin_id(void); + void memberof_release_config(void); + PRUint64 get_plugin_started(void); ++void ancestor_hashtable_entry_free(memberof_cached_value *entry); ++PLHashTable *hashtable_new(); ++int memberof_use_txn(); + + #endif /* _MEMBEROF_H_ */ +diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c +index c3474bf2c..3cc7c4d9c 100644 +--- a/ldap/servers/plugins/memberof/memberof_config.c ++++ b/ldap/servers/plugins/memberof/memberof_config.c +@@ -14,12 +14,12 @@ + * memberof_config.c - configuration-related code for memberOf plug-in + * + */ +- ++#include "plhash.h" + #include +- + #include "memberof.h" + + #define MEMBEROF_CONFIG_FILTER "(objectclass=*)" ++#define MEMBEROF_HASHTABLE_SIZE 1000 + + /* + * The configuration attributes are contained in the plugin entry e.g. +@@ -33,7 +33,9 @@ + + /* + * function prototypes +- */ ++ */ ++static void fixup_hashtable_empty( MemberOfConfig *config, char *msg); ++static void ancestor_hashtable_empty(MemberOfConfig *config, char *msg); + static int memberof_validate_config (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* e, + int *returncode, char *returntext, void *arg); + static int memberof_search (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* e, +@@ -48,7 +50,7 @@ static int memberof_search (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_En + /* This is the main configuration which is updated from dse.ldif. The + * config will be copied when it is used by the plug-in to prevent it + * being changed out from under a running memberOf operation. */ +-static MemberOfConfig theConfig = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ++static MemberOfConfig theConfig = {0}; + static Slapi_RWLock *memberof_config_lock = 0; + static int inited = 0; + +@@ -696,6 +698,12 @@ memberof_copy_config(MemberOfConfig *dest, MemberOfConfig *src) + { + if (dest && src) + { ++ /* Allocate our caches here since we only copy the config at the start of an op */ ++ if (memberof_use_txn() == 1){ ++ dest->ancestors_cache = hashtable_new(); ++ dest->fixup_cache = hashtable_new(); ++ } ++ + /* Check if the copy is already up to date */ + if (src->groupattrs) + { +@@ -799,6 +807,14 @@ memberof_free_config(MemberOfConfig *config) + slapi_ch_free_string(&config->memberof_attr); + memberof_free_scope(config->entryScopes, &config->entryScopeCount); + memberof_free_scope(config->entryScopeExcludeSubtrees, &config->entryExcludeScopeCount); ++ if (config->fixup_cache) { ++ fixup_hashtable_empty(config, "memberof_free_config empty fixup_entry_hastable"); ++ PL_HashTableDestroy(config->fixup_cache); ++ } ++ if (config->ancestors_cache) { ++ ancestor_hashtable_empty(config, "memberof_free_config empty group_ancestors_hashtable"); ++ PL_HashTableDestroy(config->ancestors_cache); ++ } + } + } + +@@ -1001,3 +1017,131 @@ bail: + + return ret; + } ++ ++ ++static PRIntn memberof_hash_compare_keys(const void *v1, const void *v2) ++{ ++ PRIntn rc; ++ if (0 == strcasecmp((const char *) v1, (const char *) v2)) { ++ rc = 1; ++ } else { ++ rc = 0; ++ } ++ return rc; ++} ++ ++static PRIntn memberof_hash_compare_values(const void *v1, const void *v2) ++{ ++ PRIntn rc; ++ if ((char *) v1 == (char *) v2) { ++ rc = 1; ++ } else { ++ rc = 0; ++ } ++ return rc; ++} ++ ++/* ++ * Hashing function using Bernstein's method ++ */ ++static PLHashNumber memberof_hash_fn(const void *key) ++{ ++ PLHashNumber hash = 5381; ++ unsigned char *x = (unsigned char *)key; ++ int c; ++ ++ while ((c = *x++)){ ++ hash = ((hash << 5) + hash) ^ c; ++ } ++ return hash; ++} ++ ++/* allocates the plugin hashtable ++ * This hash table is used by operation and is protected from ++ * concurrent operations with the memberof_lock (if not usetxn, memberof_lock ++ * is not implemented and the hash table will be not used. ++ * ++ * The hash table contains all the DN of the entries for which the memberof ++ * attribute has been computed/updated during the current operation ++ * ++ * hash table should be empty at the beginning and end of the plugin callback ++ */ ++PLHashTable *hashtable_new(int usetxn) ++{ ++ if (!usetxn) { ++ return NULL; ++ } ++ ++ return PL_NewHashTable(MEMBEROF_HASHTABLE_SIZE, ++ memberof_hash_fn, ++ memberof_hash_compare_keys, ++ memberof_hash_compare_values, NULL, NULL); ++} ++ ++/* this function called for each hash node during hash destruction */ ++static PRIntn fixup_hashtable_remove(PLHashEntry *he, PRIntn index __attribute__((unused)), void *arg __attribute__((unused))) ++{ ++ char *dn_copy; ++ ++ if (he == NULL) { ++ return HT_ENUMERATE_NEXT; ++ } ++ dn_copy = (char*) he->value; ++ slapi_ch_free_string(&dn_copy); ++ ++ return HT_ENUMERATE_REMOVE; ++} ++ ++static void fixup_hashtable_empty(MemberOfConfig *config, char *msg) ++{ ++ if (config->fixup_cache) { ++ PL_HashTableEnumerateEntries(config->fixup_cache, fixup_hashtable_remove, msg); ++ } ++} ++ ++ ++/* allocates the plugin hashtable ++ * This hash table is used by operation and is protected from ++ * concurrent operations with the memberof_lock (if not usetxn, memberof_lock ++ * is not implemented and the hash table will be not used. ++ * ++ * The hash table contains all the DN of the entries for which the memberof ++ * attribute has been computed/updated during the current operation ++ * ++ * hash table should be empty at the beginning and end of the plugin callback ++ */ ++ ++void ancestor_hashtable_entry_free(memberof_cached_value *entry) ++{ ++ int i; ++ ++ for (i = 0; entry[i].valid; i++) { ++ slapi_ch_free((void **) &entry[i].group_dn_val); ++ slapi_ch_free((void **) &entry[i].group_ndn_val); ++ } ++ /* Here we are at the ending element containing the key */ ++ slapi_ch_free((void**) &entry[i].key); ++} ++ ++/* this function called for each hash node during hash destruction */ ++static PRIntn ancestor_hashtable_remove(PLHashEntry *he, PRIntn index __attribute__((unused)), void *arg __attribute__((unused))) ++{ ++ memberof_cached_value *group_ancestor_array; ++ ++ if (he == NULL) { ++ return HT_ENUMERATE_NEXT; ++ } ++ group_ancestor_array = (memberof_cached_value *) he->value; ++ ancestor_hashtable_entry_free(group_ancestor_array); ++ slapi_ch_free((void **)&group_ancestor_array); ++ ++ return HT_ENUMERATE_REMOVE; ++} ++ ++static void ancestor_hashtable_empty(MemberOfConfig *config, char *msg) ++{ ++ if (config->ancestors_cache) { ++ PL_HashTableEnumerateEntries(config->ancestors_cache, ancestor_hashtable_remove, msg); ++ } ++ ++} +-- +2.13.6 + diff --git a/SOURCES/0078-Ticket-49402-Adding-a-database-entry-with-the-same-d.patch b/SOURCES/0078-Ticket-49402-Adding-a-database-entry-with-the-same-d.patch new file mode 100644 index 0000000..a0a24dc --- /dev/null +++ b/SOURCES/0078-Ticket-49402-Adding-a-database-entry-with-the-same-d.patch @@ -0,0 +1,41 @@ +From bc190eeaaffbb514f69664b4d033dc593a78683b Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 16 Oct 2017 12:52:46 -0400 +Subject: [PATCH] Ticket 49402 - Adding a database entry with the same database + name that was deleted hangs server at shutdown + +Bug Description: At shutdown, after a backend was deleted, which also had a import + task run, the server hangs at shutdown. The issue is that the + import task destructor used the ldbm inst struct to see if it was + busy, but the inst was freed and the destructor was checking invalid + memory which caused a false positive on the "busy" check. + +Fix Description: Do not check if the instance is busy to tell if it's okay to remove + the task, instead just check the task's state. + +https://pagure.io/389-ds-base/issue/49402 + +Reviewed by: lkrispen(Thanks!) + +(cherry picked from commit bc6dbf15c160ac7e6c553133b2b936a981cfb7b6) +(cherry picked from commit 2ef4e813b8f6b92908ff553a704808cbbd425b5d) +--- + ldap/servers/slapd/back-ldbm/import.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c +index 7161bace1..f60df194b 100644 +--- a/ldap/servers/slapd/back-ldbm/import.c ++++ b/ldap/servers/slapd/back-ldbm/import.c +@@ -234,7 +234,7 @@ static void import_task_destroy(Slapi_Task *task) + return; + } + +- while(is_instance_busy(job->inst)){ ++ while (task->task_state == SLAPI_TASK_RUNNING) { + /* wait for the job to finish before freeing it */ + DS_Sleep(PR_SecondsToInterval(1)); + } +-- +2.13.6 + diff --git a/SOURCES/0079-Ticket-49439-cleanallruv-is-not-logging-information.patch b/SOURCES/0079-Ticket-49439-cleanallruv-is-not-logging-information.patch new file mode 100644 index 0000000..da266a6 --- /dev/null +++ b/SOURCES/0079-Ticket-49439-cleanallruv-is-not-logging-information.patch @@ -0,0 +1,239 @@ +From 8031684255007b42df3d08b80e674aefb0ebfb2d Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 2 Nov 2017 12:55:11 -0400 +Subject: [PATCH] Ticket 49439 - cleanallruv is not logging information + +Bug Description: During the logging refector effro from ticket 48978 + a mistake was made and cleanruv_log() was using + LOG_NOTICE (which is not a true log level), it was + supposed to be SLAPI_LOG_NOTICE. + + We also use DEBUG defines to contorl the logging for + debug builds + +Fix Description: Remove the LDAP_DEBUG defines in cleanruv_log, and set + the correct logging severity level. + +https://pagure.io/389-ds-base/issue/49439 + +Reviewed by: firstyear(Thanks!) + +(cherry picked from commit e1f866a5e3ccce8e061e361c0e3dd11175a8acf2) +--- + .../plugins/replication/repl5_replica_config.c | 101 +++++++++++---------- + 1 file changed, 51 insertions(+), 50 deletions(-) + +diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c +index 053103bd0..814f1cac0 100644 +--- a/ldap/servers/plugins/replication/repl5_replica_config.c ++++ b/ldap/servers/plugins/replication/repl5_replica_config.c +@@ -1911,12 +1911,13 @@ replica_cleanallruv_thread(void *arg) + /* + * need to sleep between passes + */ +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, LOG_NOTICE, "Not all replicas have received the " +- "cleanallruv extended op, retrying in %d seconds",interval); +- if(!slapi_is_shutting_down()){ +- PR_Lock( notify_lock ); +- PR_WaitCondVar( notify_cvar, PR_SecondsToInterval(interval) ); +- PR_Unlock( notify_lock ); ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Not all replicas have received the " ++ "cleanallruv extended op, retrying in %d seconds", ++ interval); ++ if (!slapi_is_shutting_down()) { ++ PR_Lock(notify_lock); ++ PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); ++ PR_Unlock(notify_lock); + } + if(interval < 14400){ /* 4 hour max */ + interval = interval * 2; +@@ -1952,8 +1953,8 @@ replica_cleanallruv_thread(void *arg) + found_dirty_rid = 0; + } else { + found_dirty_rid = 1; +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, LOG_NOTICE, "Replica is not cleaned yet (%s)", +- agmt_get_long_name(agmt)); ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Replica is not cleaned yet (%s)", ++ agmt_get_long_name(agmt)); + break; + } + agmt_obj = agmtlist_get_next_agreement_for_replica (data->replica, agmt_obj); +@@ -1969,12 +1970,13 @@ replica_cleanallruv_thread(void *arg) + /* + * Need to sleep between passes unless we are shutting down + */ +- if (!slapi_is_shutting_down()){ +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, LOG_NOTICE, "Replicas have not been cleaned yet, " +- "retrying in %d seconds", interval); +- PR_Lock( notify_lock ); +- PR_WaitCondVar( notify_cvar, PR_SecondsToInterval(interval) ); +- PR_Unlock( notify_lock ); ++ if (!slapi_is_shutting_down()) { ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Replicas have not been cleaned yet, " ++ "retrying in %d seconds", ++ interval); ++ PR_Lock(notify_lock); ++ PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); ++ PR_Unlock(notify_lock); + } + + if(interval < 14400){ /* 4 hour max */ +@@ -2008,11 +2010,11 @@ done: + /* + * Shutdown or abort + */ +- if(!is_task_aborted(data->rid) || slapi_is_shutting_down()){ +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, LOG_NOTICE, +- "Server shutting down. Process will resume at server startup"); ++ if (!is_task_aborted(data->rid) || slapi_is_shutting_down()) { ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Server shutting down. Process will resume at server startup"); + } else { +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, LOG_NOTICE, "Task aborted for rid(%d).",data->rid); ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Task aborted for rid(%d).", data->rid); + delete_cleaned_rid_config(data); + remove_cleaned_rid(data->rid); + } +@@ -2180,7 +2182,7 @@ check_replicas_are_done_cleaning(cleanruv_data *data ) + break; + } + +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, LOG_NOTICE, ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, + "Not all replicas finished cleaning, retrying in %d seconds", + interval); + if(!slapi_is_shutting_down()){ +@@ -2289,12 +2291,12 @@ check_replicas_are_done_aborting(cleanruv_data *data ) + if(not_all_aborted == 0){ + break; + } +- cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, LOG_NOTICE, +- "Not all replicas finished aborting, retrying in %d seconds",interval); +- if(!slapi_is_shutting_down()){ +- PR_Lock( notify_lock ); +- PR_WaitCondVar( notify_cvar, PR_SecondsToInterval(interval) ); +- PR_Unlock( notify_lock ); ++ cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Not all replicas finished aborting, retrying in %d seconds", interval); ++ if (!slapi_is_shutting_down()) { ++ PR_Lock(notify_lock); ++ PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); ++ PR_Unlock(notify_lock); + } + if(interval < 14400){ /* 4 hour max */ + interval = interval * 2; +@@ -2336,8 +2338,8 @@ check_agmts_are_caught_up(cleanruv_data *data, char *maxcsn) + not_all_caughtup = 0; + } else { + not_all_caughtup = 1; +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, LOG_NOTICE, +- "Replica not caught up (%s)",agmt_get_long_name(agmt)); ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Replica not caught up (%s)", agmt_get_long_name(agmt)); + break; + } + agmt_obj = agmtlist_get_next_agreement_for_replica (data->replica, agmt_obj); +@@ -2346,12 +2348,12 @@ check_agmts_are_caught_up(cleanruv_data *data, char *maxcsn) + if(not_all_caughtup == 0 || is_task_aborted(data->rid) ){ + break; + } +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, LOG_NOTICE, +- "Not all replicas caught up, retrying in %d seconds",interval); +- if(!slapi_is_shutting_down()){ +- PR_Lock( notify_lock ); +- PR_WaitCondVar( notify_cvar, PR_SecondsToInterval(interval) ); +- PR_Unlock( notify_lock ); ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Not all replicas caught up, retrying in %d seconds", interval); ++ if (!slapi_is_shutting_down()) { ++ PR_Lock(notify_lock); ++ PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); ++ PR_Unlock(notify_lock); + } + if(interval < 14400){ /* 4 hour max */ + interval = interval * 2; +@@ -2396,8 +2398,8 @@ check_agmts_are_alive(Replica *replica, ReplicaId rid, Slapi_Task *task) + not_all_alive = 0; + } else { + not_all_alive = 1; +- cleanruv_log(task, rid, CLEANALLRUV_ID, LOG_NOTICE, "Replica not online (%s)", +- agmt_get_long_name(agmt)); ++ cleanruv_log(task, rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Replica not online (%s)", ++ agmt_get_long_name(agmt)); + break; + } + agmt_obj = agmtlist_get_next_agreement_for_replica (replica, agmt_obj); +@@ -2406,8 +2408,8 @@ check_agmts_are_alive(Replica *replica, ReplicaId rid, Slapi_Task *task) + if(not_all_alive == 0 || is_task_aborted(rid)){ + break; + } +- cleanruv_log(task, rid, CLEANALLRUV_ID, LOG_NOTICE, "Not all replicas online, retrying in %d seconds...", +- interval); ++ cleanruv_log(task, rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Not all replicas online, retrying in %d seconds...", ++ interval); + + if(!slapi_is_shutting_down()){ + PR_Lock( notify_lock ); +@@ -3174,11 +3176,11 @@ replica_abort_task_thread(void *arg) + /* + * Need to sleep between passes. unless we are shutting down + */ +- if (!slapi_is_shutting_down()){ +- cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, LOG_NOTICE, "Retrying in %d seconds",interval); +- PR_Lock( notify_lock ); +- PR_WaitCondVar( notify_cvar, PR_SecondsToInterval(interval) ); +- PR_Unlock( notify_lock ); ++ if (!slapi_is_shutting_down()) { ++ cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Retrying in %d seconds", interval); ++ PR_Lock(notify_lock); ++ PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); ++ PR_Unlock(notify_lock); + } + + if(interval < 14400){ /* 4 hour max */ +@@ -3296,9 +3298,10 @@ replica_cleanallruv_send_extop(Repl_Agmt *ra, cleanruv_data *clean_data, int che + /* extop was accepted */ + rc = 0; + } else { +- cleanruv_log(clean_data->task, clean_data->rid, CLEANALLRUV_ID, LOG_NOTICE, +- "Replica %s does not support the CLEANALLRUV task. " +- "Sending replica CLEANRUV task...", slapi_sdn_get_dn(agmt_get_dn_byref(ra))); ++ cleanruv_log(clean_data->task, clean_data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Replica %s does not support the CLEANALLRUV task. " ++ "Sending replica CLEANRUV task...", ++ slapi_sdn_get_dn(agmt_get_dn_byref(ra))); + /* + * Ok, this replica doesn't know about CLEANALLRUV, so just manually + * add the CLEANRUV task to the replica. +@@ -3463,9 +3466,9 @@ replica_cleanallruv_check_maxcsn(Repl_Agmt *agmt, char *basedn, char *rid_text, + csn_init_by_string(repl_max, remote_maxcsn); + if(csn_compare (repl_max, max) < 0){ + /* we are not caught up yet, free, and return */ +- cleanruv_log(task, atoi(rid_text), CLEANALLRUV_ID, LOG_NOTICE, +- "Replica maxcsn (%s) is not caught up with deleted replica's maxcsn(%s)", +- remote_maxcsn, maxcsn); ++ cleanruv_log(task, atoi(rid_text), CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Replica maxcsn (%s) is not caught up with deleted replica's maxcsn(%s)", ++ remote_maxcsn, maxcsn); + rc = -1; + } else { + /* ok this replica is caught up */ +@@ -3636,7 +3639,6 @@ stop_ruv_cleaning() + void + cleanruv_log(Slapi_Task *task, int rid, char *task_type, int sev_level, char *fmt, ...) + { +-#ifdef LDAP_DEBUG + va_list ap1; + va_list ap2; + va_list ap3; +@@ -3661,7 +3663,6 @@ cleanruv_log(Slapi_Task *task, int rid, char *task_type, int sev_level, char *fm + va_end(ap2); + va_end(ap3); + va_end(ap4); +-#endif + } + + char * +-- +2.13.6 + diff --git a/SOURCES/0080-Ticket-49436-double-free-in-COS-in-some-conditions.patch b/SOURCES/0080-Ticket-49436-double-free-in-COS-in-some-conditions.patch new file mode 100644 index 0000000..fc70af1 --- /dev/null +++ b/SOURCES/0080-Ticket-49436-double-free-in-COS-in-some-conditions.patch @@ -0,0 +1,1008 @@ +From fc9a206c294fb5ea2401a9365f01ef2565799478 Mon Sep 17 00:00:00 2001 +From: William Brown +Date: Thu, 2 Nov 2017 13:32:41 +1000 +Subject: [PATCH] Ticket 49436 - double free in COS in some conditions + +Bug Description: virtualattrs and COS have some serious memory +ownership issues. What was happening is that COS with multiple +attributes using the same sp_handle would cause a structure +to be registered twice. During shutdown we would then trigger +a double free in the process. + +Fix Description: Change the behaviour of sp_handles to use a +handle *per* attribute we register to guarantee the assocation +between them. + +https://pagure.io/389-ds-base/issue/49436 + +Author: wibrown + +Review by: mreynolds, vashirov (Thanks!) + +(cherry pick from commit ee4428a3f5d2d8e37a7107c7dce9d622fc17d41c) +--- + dirsrvtests/tests/suites/cos/indirect_cos_test.py | 43 +- + ldap/servers/plugins/cos/cos_cache.c | 723 +++++++++++----------- + ldap/servers/plugins/roles/roles_cache.c | 50 +- + ldap/servers/slapd/vattr.c | 34 +- + 4 files changed, 406 insertions(+), 444 deletions(-) + +diff --git a/dirsrvtests/tests/suites/cos/indirect_cos_test.py b/dirsrvtests/tests/suites/cos/indirect_cos_test.py +index 1aac6b8ed..452edcdf8 100644 +--- a/dirsrvtests/tests/suites/cos/indirect_cos_test.py ++++ b/dirsrvtests/tests/suites/cos/indirect_cos_test.py +@@ -7,6 +7,7 @@ import subprocess + + from lib389 import Entry + from lib389.idm.user import UserAccounts ++from lib389.idm.domain import Domain + from lib389.topologies import topology_st as topo + from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD, HOST_STANDALONE, + SERVERID_STANDALONE, PORT_STANDALONE) +@@ -48,14 +49,8 @@ def check_user(inst): + def setup_subtree_policy(topo): + """Set up subtree password policy + """ +- try: +- topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, +- 'nsslapd-pwpolicy-local', +- 'on')]) +- except ldap.LDAPError as e: +- log.error('Failed to set fine-grained policy: error {}'.format( +- e.message['desc'])) +- raise e ++ ++ topo.standalone.config.set('nsslapd-pwpolicy-local', 'on') + + log.info('Create password policy for subtree {}'.format(OU_PEOPLE)) + try: +@@ -68,15 +63,9 @@ def setup_subtree_policy(topo): + OU_PEOPLE, e.message['desc'])) + raise e + +- log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE)) +- try: +- topo.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, +- 'pwdpolicysubentry', +- PW_POLICY_CONT_PEOPLE2)]) +- except ldap.LDAPError as e: +- log.error('Failed to pwdpolicysubentry pw policy ' +- 'policy for {}: error {}'.format(OU_PEOPLE, e.message['desc'])) +- raise e ++ domain = Domain(topo.standalone, DEFAULT_SUFFIX) ++ domain.replace('pwdpolicysubentry', PW_POLICY_CONT_PEOPLE2) ++ + time.sleep(1) + + +@@ -116,12 +105,9 @@ def setup(topo, request): + """ + log.info('Add custom schema...') + try: +- ATTR_1 = ("( 1.3.6.1.4.1.409.389.2.189 NAME 'x-department' " + +- "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") +- ATTR_2 = ("( 1.3.6.1.4.1.409.389.2.187 NAME 'x-en-ou' " + +- "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") +- OC = ("( xPerson-oid NAME 'xPerson' DESC '' SUP person STRUCTURAL MAY " + +- "( x-department $ x-en-ou ) X-ORIGIN 'user defined' )") ++ ATTR_1 = (b"( 1.3.6.1.4.1.409.389.2.189 NAME 'x-department' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") ++ ATTR_2 = (b"( 1.3.6.1.4.1.409.389.2.187 NAME 'x-en-ou' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") ++ OC = (b"( xPerson-oid NAME 'xPerson' DESC '' SUP person STRUCTURAL MAY ( x-department $ x-en-ou ) X-ORIGIN 'user defined' )") + topo.standalone.modify_s("cn=schema", [(ldap.MOD_ADD, 'attributeTypes', ATTR_1), + (ldap.MOD_ADD, 'attributeTypes', ATTR_2), + (ldap.MOD_ADD, 'objectClasses', OC)]) +@@ -142,14 +128,9 @@ def setup(topo, request): + 'homeDirectory': '/home/test_user', + 'seeAlso': 'cn=cosTemplate,dc=example,dc=com' + } +- users.create(properties=user_properties) +- try: +- topo.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_ADD, +- 'objectclass', +- 'xPerson')]) +- except ldap.LDAPError as e: +- log.fatal('Failed to add objectclass to user') +- raise e ++ user = users.create(properties=user_properties) ++ ++ user.add('objectClass', 'xPerson') + + # Setup COS + log.info("Setup indirect COS...") +diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c +index 87d48908c..0e93183d2 100644 +--- a/ldap/servers/plugins/cos/cos_cache.c ++++ b/ldap/servers/plugins/cos/cos_cache.c +@@ -108,9 +108,6 @@ void * cos_get_plugin_identity(void); + #define COSTYPE_INDIRECT 3 + #define COS_DEF_ERROR_NO_TEMPLATES -2 + +-/* the global plugin handle */ +-static volatile vattr_sp_handle *vattr_handle = NULL; +- + /* both variables are protected by change_lock */ + static int cos_cache_notify_flag = 0; + static PRBool cos_cache_at_work = PR_FALSE; +@@ -289,75 +286,61 @@ static Slapi_CondVar *start_cond = NULL; + */ + int cos_cache_init(void) + { +- int ret = 0; +- +- slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_init\n"); +- +- slapi_vattrcache_cache_none(); +- cache_lock = slapi_new_mutex(); +- change_lock = slapi_new_mutex(); +- stop_lock = slapi_new_mutex(); +- something_changed = slapi_new_condvar(change_lock); +- keeprunning =1; +- start_lock = slapi_new_mutex(); +- start_cond = slapi_new_condvar(start_lock); +- started = 0; +- +- if (stop_lock == NULL || +- change_lock == NULL || +- cache_lock == NULL || +- stop_lock == NULL || +- start_lock == NULL || +- start_cond == NULL || +- something_changed == NULL) +- { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, +- "cos_cache_init - Cannot create mutexes\n" ); +- ret = -1; +- goto out; +- } +- +- /* grab the views interface */ +- if(slapi_apib_get_interface(Views_v1_0_GUID, &views_api)) +- { +- /* lets be tolerant if views is disabled */ +- views_api = 0; +- } ++ int ret = 0; + +- if (slapi_vattrspi_register((vattr_sp_handle **)&vattr_handle, +- cos_cache_vattr_get, +- cos_cache_vattr_compare, +- cos_cache_vattr_types) != 0) +- { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, +- "cos_cache_init - Cannot register as service provider\n" ); +- ret = -1; +- goto out; +- } ++ slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_init\n"); ++ ++ slapi_vattrcache_cache_none(); ++ cache_lock = slapi_new_mutex(); ++ change_lock = slapi_new_mutex(); ++ stop_lock = slapi_new_mutex(); ++ something_changed = slapi_new_condvar(change_lock); ++ keeprunning = 1; ++ start_lock = slapi_new_mutex(); ++ start_cond = slapi_new_condvar(start_lock); ++ started = 0; ++ ++ if (stop_lock == NULL || ++ change_lock == NULL || ++ cache_lock == NULL || ++ stop_lock == NULL || ++ start_lock == NULL || ++ start_cond == NULL || ++ something_changed == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, ++ "cos_cache_init - Cannot create mutexes\n"); ++ ret = -1; ++ goto out; ++ } + +- if ( PR_CreateThread (PR_USER_THREAD, +- cos_cache_wait_on_change, +- NULL, +- PR_PRIORITY_NORMAL, +- PR_GLOBAL_THREAD, +- PR_UNJOINABLE_THREAD, +- SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL ) +- { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, +- "cos_cache_init - PR_CreateThread failed\n" ); +- ret = -1; +- goto out; +- } ++ /* grab the views interface */ ++ if (slapi_apib_get_interface(Views_v1_0_GUID, &views_api)) { ++ /* lets be tolerant if views is disabled */ ++ views_api = 0; ++ } + +- /* wait for that thread to get started */ +- if (ret == 0) { +- slapi_lock_mutex(start_lock); +- while (!started) { +- while (slapi_wait_condvar(start_cond, NULL) == 0); +- } +- slapi_unlock_mutex(start_lock); +- } ++ if (PR_CreateThread(PR_USER_THREAD, ++ cos_cache_wait_on_change, ++ NULL, ++ PR_PRIORITY_NORMAL, ++ PR_GLOBAL_THREAD, ++ PR_UNJOINABLE_THREAD, ++ SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, ++ "cos_cache_init - PR_CreateThread failed\n"); ++ ret = -1; ++ goto out; ++ } + ++ /* wait for that thread to get started */ ++ if (ret == 0) { ++ slapi_lock_mutex(start_lock); ++ while (!started) { ++ while (slapi_wait_condvar(start_cond, NULL) == 0) ++ ; ++ } ++ slapi_unlock_mutex(start_lock); ++ } + + out: + slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "<-- cos_cache_init\n"); +@@ -752,321 +735,311 @@ struct dn_defs_info { + static int + cos_dn_defs_cb (Slapi_Entry* e, void *callback_data) + { +- struct dn_defs_info *info; +- cosAttrValue **pSneakyVal = 0; +- cosAttrValue *pObjectclass = 0; +- cosAttrValue *pCosTargetTree = 0; +- cosAttrValue *pCosTemplateDn = 0; +- cosAttrValue *pCosSpecifier = 0; +- cosAttrValue *pCosAttribute = 0; +- cosAttrValue *pCosOverrides = 0; +- cosAttrValue *pCosOperational = 0; +- cosAttrValue *pCosOpDefault = 0; +- cosAttrValue *pCosMerge = 0; +- cosAttrValue *pDn = 0; +- struct berval **dnVals; +- int cosType = 0; +- int valIndex = 0; +- Slapi_Attr *dnAttr; +- char *attrType = 0; +- char *norm_dn = NULL; +- info=(struct dn_defs_info *)callback_data; +- +- cos_cache_add_attrval(&pDn, slapi_entry_get_dn(e)); +- if(slapi_entry_first_attr(e, &dnAttr)) { +- goto bail; +- } ++ struct dn_defs_info *info; ++ cosAttrValue **pSneakyVal = 0; ++ cosAttrValue *pObjectclass = 0; ++ cosAttrValue *pCosTargetTree = 0; ++ cosAttrValue *pCosTemplateDn = 0; ++ cosAttrValue *pCosSpecifier = 0; ++ cosAttrValue *pCosAttribute = 0; ++ cosAttrValue *pCosOverrides = 0; ++ cosAttrValue *pCosOperational = 0; ++ cosAttrValue *pCosOpDefault = 0; ++ cosAttrValue *pCosMerge = 0; ++ cosAttrValue *pDn = 0; ++ struct berval **dnVals; ++ int cosType = 0; ++ int valIndex = 0; ++ Slapi_Attr *dnAttr; ++ char *attrType = 0; ++ char *norm_dn = NULL; ++ info = (struct dn_defs_info *)callback_data; ++ ++ cos_cache_add_attrval(&pDn, slapi_entry_get_dn(e)); ++ if (slapi_entry_first_attr(e, &dnAttr)) { ++ goto bail; ++ } + +- do { +- attrType = 0; +- /* we need to fill in the details of the definition now */ +- slapi_attr_get_type(dnAttr, &attrType); +- if(!attrType) { +- continue; +- } +- pSneakyVal = 0; +- if(!slapi_utf8casecmp((unsigned char*)attrType, (unsigned char*)"objectclass")) +- pSneakyVal = &pObjectclass; +- else if(!slapi_utf8casecmp((unsigned char*)attrType, (unsigned char*)"cosTargetTree")){ +- if(pCosTargetTree){ +- norm_dn = slapi_create_dn_string("%s", pCosTargetTree->val); +- if(norm_dn){ +- slapi_ch_free_string(&pCosTargetTree->val); +- pCosTargetTree->val = norm_dn; +- } +- } +- pSneakyVal = &pCosTargetTree; +- } else if(!slapi_utf8casecmp((unsigned char*)attrType, (unsigned char*)"cosTemplateDn")) +- pSneakyVal = &pCosTemplateDn; +- else if(!slapi_utf8casecmp((unsigned char*)attrType, (unsigned char*)"cosSpecifier")) +- pSneakyVal = &pCosSpecifier; +- else if(!slapi_utf8casecmp((unsigned char*)attrType, (unsigned char*)"cosAttribute")) +- pSneakyVal = &pCosAttribute; +- else if(!slapi_utf8casecmp((unsigned char*)attrType, (unsigned char*)"cosIndirectSpecifier")) +- pSneakyVal = &pCosSpecifier; +- if(!pSneakyVal) { +- continue; +- } +- /* It's a type we're interested in */ +- if(slapi_attr_get_bervals_copy(dnAttr, &dnVals)) { +- continue; +- } +- valIndex = 0; +- if(!dnVals) { +- continue; +- } +- for (valIndex = 0; dnVals[valIndex]; valIndex++) +- { +- if(!dnVals[valIndex]->bv_val) { +- continue; +- } +- /* +- parse any overide or default values +- and deal with them +- */ +- if(pSneakyVal == &pCosAttribute) +- { +- int qualifier_hit = 0; +- int op_qualifier_hit = 0; +- int merge_schemes_qualifier_hit = 0; +- int override_qualifier_hit =0; +- int default_qualifier_hit = 0; +- int operational_default_qualifier_hit = 0; +- do +- { +- qualifier_hit = 0; ++ do { ++ attrType = 0; ++ /* we need to fill in the details of the definition now */ ++ slapi_attr_get_type(dnAttr, &attrType); ++ if (!attrType) { ++ continue; ++ } ++ pSneakyVal = 0; ++ if (!slapi_utf8casecmp((unsigned char *)attrType, (unsigned char *)"objectclass")) ++ pSneakyVal = &pObjectclass; ++ else if (!slapi_utf8casecmp((unsigned char *)attrType, (unsigned char *)"cosTargetTree")) { ++ if (pCosTargetTree) { ++ norm_dn = slapi_create_dn_string("%s", pCosTargetTree->val); ++ if (norm_dn) { ++ slapi_ch_free_string(&pCosTargetTree->val); ++ pCosTargetTree->val = norm_dn; ++ } ++ } ++ pSneakyVal = &pCosTargetTree; ++ } else if (!slapi_utf8casecmp((unsigned char *)attrType, (unsigned char *)"cosTemplateDn")) ++ pSneakyVal = &pCosTemplateDn; ++ else if (!slapi_utf8casecmp((unsigned char *)attrType, (unsigned char *)"cosSpecifier")) ++ pSneakyVal = &pCosSpecifier; ++ else if (!slapi_utf8casecmp((unsigned char *)attrType, (unsigned char *)"cosAttribute")) ++ pSneakyVal = &pCosAttribute; ++ else if (!slapi_utf8casecmp((unsigned char *)attrType, (unsigned char *)"cosIndirectSpecifier")) ++ pSneakyVal = &pCosSpecifier; ++ if (!pSneakyVal) { ++ continue; ++ } ++ /* It's a type we're interested in */ ++ if (slapi_attr_get_bervals_copy(dnAttr, &dnVals)) { ++ continue; ++ } ++ valIndex = 0; ++ if (!dnVals) { ++ continue; ++ } ++ for (valIndex = 0; dnVals[valIndex]; valIndex++) { ++ if (!dnVals[valIndex]->bv_val) { ++ continue; ++ } ++ /* ++ parse any overide or default values ++ and deal with them ++ */ ++ if (pSneakyVal == &pCosAttribute) { ++ int qualifier_hit = 0; ++ int op_qualifier_hit = 0; ++ int merge_schemes_qualifier_hit = 0; ++ int override_qualifier_hit = 0; ++ int default_qualifier_hit = 0; ++ int operational_default_qualifier_hit = 0; ++ do { ++ qualifier_hit = 0; ++ ++ if (cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " operational")) { ++ /* matched */ ++ op_qualifier_hit = 1; ++ qualifier_hit = 1; ++ } ++ ++ if (cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " merge-schemes")) { ++ /* matched */ ++ merge_schemes_qualifier_hit = 1; ++ qualifier_hit = 1; ++ } ++ ++ if (cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " override")) { ++ /* matched */ ++ override_qualifier_hit = 1; ++ qualifier_hit = 1; ++ } ++ ++ if (cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " default")) { ++ default_qualifier_hit = 1; ++ qualifier_hit = 1; ++ } ++ ++ if (cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " operational-default")) { ++ operational_default_qualifier_hit = 1; ++ qualifier_hit = 1; ++ } ++ } while (qualifier_hit == 1); ++ ++ /* ++ * At this point, dnVals[valIndex]->bv_val ++ * is the value of cosAttribute, stripped of ++ * any qualifiers, so add this pure attribute type to ++ * the appropriate lists. ++ */ ++ ++ if (op_qualifier_hit) { ++ cos_cache_add_attrval(&pCosOperational, ++ dnVals[valIndex]->bv_val); ++ } ++ if (merge_schemes_qualifier_hit) { ++ cos_cache_add_attrval(&pCosMerge, dnVals[valIndex]->bv_val); ++ } ++ if (override_qualifier_hit) { ++ cos_cache_add_attrval(&pCosOverrides, ++ dnVals[valIndex]->bv_val); ++ } ++ if (default_qualifier_hit) { ++ /* attr is added below in pSneakyVal, in any case */ ++ } ++ ++ if (operational_default_qualifier_hit) { ++ cos_cache_add_attrval(&pCosOpDefault, ++ dnVals[valIndex]->bv_val); ++ } ++ ++ /* ++ * Each SP_handle is associated to one and only one vattr. ++ * We could consider making this a single function rather ++ * than the double-call. ++ */ ++ ++ vattr_sp_handle *vattr_handle = NULL; ++ ++ if (slapi_vattrspi_register((vattr_sp_handle **)&vattr_handle, ++ cos_cache_vattr_get, ++ cos_cache_vattr_compare, ++ cos_cache_vattr_types) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_init - Cannot register as service provider for %s\n", dnVals[valIndex]->bv_val); ++ } else { ++ slapi_vattrspi_regattr((vattr_sp_handle *)vattr_handle, dnVals[valIndex]->bv_val, NULL, NULL); ++ } ++ ++ } /* if(attrType is cosAttribute) */ ++ ++ /* ++ * Add the attributetype to the appropriate ++ * list. ++ */ ++ cos_cache_add_attrval(pSneakyVal, dnVals[valIndex]->bv_val); ++ ++ } /* for (valIndex = 0; dnVals[valIndex]; valIndex++) */ ++ ++ ber_bvecfree(dnVals); ++ dnVals = NULL; ++ } while (!slapi_entry_next_attr(e, dnAttr, &dnAttr)); ++ ++ if (pCosAttribute && (!pCosTargetTree || !pCosTemplateDn)) { ++ /* get the parent of the definition */ ++ char *orig = slapi_dn_parent(pDn->val); ++ char *parent = NULL; ++ if (orig) { ++ parent = slapi_create_dn_string("%s", orig); ++ if (!parent) { ++ parent = orig; ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, ++ "cos_dn_defs_cb - " ++ "Failed to normalize parent dn %s. " ++ "Adding the pre normalized dn.\n", ++ parent); ++ } ++ if (!pCosTargetTree) { ++ cos_cache_add_attrval(&pCosTargetTree, parent); ++ } ++ if (!pCosTemplateDn) { ++ cos_cache_add_attrval(&pCosTemplateDn, parent); ++ } ++ if (parent != orig) { ++ slapi_ch_free_string(&parent); ++ } ++ slapi_ch_free_string(&orig); ++ } else { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, ++ "cos_dn_defs_cb - " ++ "Failed to get parent dn of cos definition %s.\n", ++ pDn->val); ++ if (!pCosTemplateDn) { ++ if (!pCosTargetTree) { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - cosTargetTree and cosTemplateDn are not set.\n"); ++ } else { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - cosTemplateDn is not set.\n"); ++ } ++ } else if (!pCosTargetTree) { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - cosTargetTree is not set.\n"); ++ } ++ } ++ } + +- if(cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " operational")) +- { +- /* matched */ +- op_qualifier_hit = 1; +- qualifier_hit = 1; +- } +- +- if(cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " merge-schemes")) +- { +- /* matched */ +- merge_schemes_qualifier_hit = 1; +- qualifier_hit = 1; +- } ++ /* ++ determine the type of class of service scheme ++ */ + +- if(cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " override")) +- { +- /* matched */ +- override_qualifier_hit = 1; +- qualifier_hit = 1; +- } +- +- if(cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " default")) { +- default_qualifier_hit = 1; +- qualifier_hit = 1; +- } ++ if (pObjectclass) { ++ if (cos_cache_attrval_exists(pObjectclass, "cosDefinition")) { ++ cosType = COSTYPE_CLASSIC; ++ } else if (cos_cache_attrval_exists(pObjectclass, "cosClassicDefinition")) { ++ cosType = COSTYPE_CLASSIC; + +- if(cos_cache_backwards_stricmp_and_clip(dnVals[valIndex]->bv_val, " operational-default")) { +- operational_default_qualifier_hit = 1; +- qualifier_hit = 1; +- } +- } +- while(qualifier_hit == 1); ++ } else if (cos_cache_attrval_exists(pObjectclass, "cosPointerDefinition")) { ++ cosType = COSTYPE_POINTER; + +- /* +- * At this point, dnVals[valIndex]->bv_val +- * is the value of cosAttribute, stripped of +- * any qualifiers, so add this pure attribute type to +- * the appropriate lists. +- */ +- +- if ( op_qualifier_hit ) { +- cos_cache_add_attrval(&pCosOperational, +- dnVals[valIndex]->bv_val); +- } +- if ( merge_schemes_qualifier_hit ) { +- cos_cache_add_attrval(&pCosMerge, dnVals[valIndex]->bv_val); +- } +- if ( override_qualifier_hit ) { +- cos_cache_add_attrval(&pCosOverrides, +- dnVals[valIndex]->bv_val); +- } +- if ( default_qualifier_hit ) { +- /* attr is added below in pSneakyVal, in any case */ +- } ++ } else if (cos_cache_attrval_exists(pObjectclass, "cosIndirectDefinition")) { ++ cosType = COSTYPE_INDIRECT; + +- if ( operational_default_qualifier_hit ) { +- cos_cache_add_attrval(&pCosOpDefault, +- dnVals[valIndex]->bv_val); +- } ++ } else ++ cosType = COSTYPE_BADTYPE; ++ } + +- slapi_vattrspi_regattr((vattr_sp_handle *)vattr_handle, +- dnVals[valIndex]->bv_val, NULL, NULL); +- } /* if(attrType is cosAttribute) */ ++ /* ++ we should now have a full definition, ++ do some sanity checks because we don't ++ want bogus entries in the cache ++ then ship it ++ */ ++ ++ /* these must exist */ ++ if (pDn && pObjectclass && ++ ((cosType == COSTYPE_CLASSIC && ++ pCosTemplateDn && ++ pCosSpecifier && ++ pCosAttribute) || ++ (cosType == COSTYPE_POINTER && ++ pCosTemplateDn && ++ pCosAttribute) || ++ (cosType == COSTYPE_INDIRECT && ++ pCosSpecifier && ++ pCosAttribute))) { ++ int rc = 0; ++ /* ++ we'll leave the referential integrity stuff ++ up to the referint plug-in and assume all ++ is good - if it's not then we just have a ++ useless definition and we'll nag copiously later. ++ */ ++ char *pTmpDn = slapi_ch_strdup(pDn->val); /* because dn gets hosed on error */ ++ ++ if (!(rc = cos_cache_add_defn(info->pDefs, &pDn, cosType, ++ &pCosTargetTree, &pCosTemplateDn, ++ &pCosSpecifier, &pCosAttribute, ++ &pCosOverrides, &pCosOperational, ++ &pCosMerge, &pCosOpDefault))) { ++ info->ret = 0; /* we have succeeded to add the defn*/ ++ } else { ++ /* ++ * Failed but we will continue the search for other defs ++ * Don't reset info->ret....it keeps track of any success ++ */ ++ if (rc == COS_DEF_ERROR_NO_TEMPLATES) { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - Skipping CoS Definition %s" ++ "--no CoS Templates found, which should be added before the CoS Definition.\n", ++ pTmpDn); ++ } else { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - Skipping CoS Definition %s\n" ++ "--error(%d)\n", ++ pTmpDn, rc); ++ } ++ } + +- /* +- * Add the attributetype to the appropriate +- * list. +- */ +- cos_cache_add_attrval(pSneakyVal, dnVals[valIndex]->bv_val); +- +- }/* for (valIndex = 0; dnVals[valIndex]; valIndex++) */ +- +- ber_bvecfree( dnVals ); +- dnVals = NULL; +- } while(!slapi_entry_next_attr(e, dnAttr, &dnAttr)); +- +- if (pCosAttribute && (!pCosTargetTree || !pCosTemplateDn)) { +- /* get the parent of the definition */ +- char *orig = slapi_dn_parent(pDn->val); +- char *parent = NULL; +- if (orig) { +- parent = slapi_create_dn_string("%s", orig); +- if (!parent) { +- parent = orig; +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, +- "cos_dn_defs_cb - " +- "Failed to normalize parent dn %s. " +- "Adding the pre normalized dn.\n", +- parent); +- } +- if (!pCosTargetTree) { +- cos_cache_add_attrval(&pCosTargetTree, parent); +- } +- if (!pCosTemplateDn) { +- cos_cache_add_attrval(&pCosTemplateDn, parent); +- } +- if (parent != orig) { +- slapi_ch_free_string(&parent); +- } +- slapi_ch_free_string(&orig); +- } else { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, +- "cos_dn_defs_cb - " +- "Failed to get parent dn of cos definition %s.\n", +- pDn->val); +- if (!pCosTemplateDn) { +- if (!pCosTargetTree) { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - cosTargetTree and cosTemplateDn are not set.\n"); +- } else { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - cosTemplateDn is not set.\n"); +- } +- } else if (!pCosTargetTree) { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - cosTargetTree is not set.\n"); +- } +- } +- } +- +- /* +- determine the type of class of service scheme +- */ +- +- if(pObjectclass) +- { +- if(cos_cache_attrval_exists(pObjectclass, "cosDefinition")) +- { +- cosType = COSTYPE_CLASSIC; +- } +- else if(cos_cache_attrval_exists(pObjectclass, "cosClassicDefinition")) +- { +- cosType = COSTYPE_CLASSIC; +- +- } +- else if(cos_cache_attrval_exists(pObjectclass, "cosPointerDefinition")) +- { +- cosType = COSTYPE_POINTER; +- +- } +- else if(cos_cache_attrval_exists(pObjectclass, "cosIndirectDefinition")) +- { +- cosType = COSTYPE_INDIRECT; +- +- } +- else +- cosType = COSTYPE_BADTYPE; +- } +- +- /* +- we should now have a full definition, +- do some sanity checks because we don't +- want bogus entries in the cache +- then ship it +- */ +- +- /* these must exist */ +- if(pDn && pObjectclass && +- ( +- (cosType == COSTYPE_CLASSIC && +- pCosTemplateDn && +- pCosSpecifier && +- pCosAttribute ) +- || +- (cosType == COSTYPE_POINTER && +- pCosTemplateDn && +- pCosAttribute ) +- || +- (cosType == COSTYPE_INDIRECT && +- pCosSpecifier && +- pCosAttribute ) +- ) +- ) +- { +- int rc = 0; +- /* +- we'll leave the referential integrity stuff +- up to the referint plug-in and assume all +- is good - if it's not then we just have a +- useless definition and we'll nag copiously later. +- */ +- char *pTmpDn = slapi_ch_strdup(pDn->val); /* because dn gets hosed on error */ +- +- if(!(rc = cos_cache_add_defn(info->pDefs, &pDn, cosType, +- &pCosTargetTree, &pCosTemplateDn, +- &pCosSpecifier, &pCosAttribute, +- &pCosOverrides, &pCosOperational, +- &pCosMerge, &pCosOpDefault))) { +- info->ret = 0; /* we have succeeded to add the defn*/ +- } else { +- /* +- * Failed but we will continue the search for other defs +- * Don't reset info->ret....it keeps track of any success +- */ +- if ( rc == COS_DEF_ERROR_NO_TEMPLATES) { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - Skipping CoS Definition %s" +- "--no CoS Templates found, which should be added before the CoS Definition.\n", +- pTmpDn); +- } else { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - Skipping CoS Definition %s\n" +- "--error(%d)\n", +- pTmpDn, rc); +- } +- } +- +- slapi_ch_free_string(&pTmpDn); +- } +- else +- { +- /* +- this definition is brain dead - bail +- if we have a dn use it to report, if not then *really* bad +- things are going on +- */ +- if(pDn) +- { +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - " +- "Incomplete cos definition detected in %s, discarding from cache.\n",pDn->val); +- } +- else +- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - " +- "Incomplete cos definition detected, no DN to report, discarding from cache.\n"); +- +- if(pCosTargetTree) +- cos_cache_del_attrval_list(&pCosTargetTree); +- if(pCosTemplateDn) +- cos_cache_del_attrval_list(&pCosTemplateDn); +- if(pCosSpecifier) +- cos_cache_del_attrval_list(&pCosSpecifier); +- if(pCosAttribute) +- cos_cache_del_attrval_list(&pCosAttribute); +- if(pDn) +- cos_cache_del_attrval_list(&pDn); +- } ++ slapi_ch_free_string(&pTmpDn); ++ } else { ++ /* ++ this definition is brain dead - bail ++ if we have a dn use it to report, if not then *really* bad ++ things are going on ++ */ ++ if (pDn) { ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - " ++ "Incomplete cos definition detected in %s, discarding from cache.\n", ++ pDn->val); ++ } else ++ slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_dn_defs_cb - " ++ "Incomplete cos definition detected, no DN to report, discarding from cache.\n"); ++ ++ if (pCosTargetTree) ++ cos_cache_del_attrval_list(&pCosTargetTree); ++ if (pCosTemplateDn) ++ cos_cache_del_attrval_list(&pCosTemplateDn); ++ if (pCosSpecifier) ++ cos_cache_del_attrval_list(&pCosSpecifier); ++ if (pCosAttribute) ++ cos_cache_del_attrval_list(&pCosAttribute); ++ if (pDn) ++ cos_cache_del_attrval_list(&pDn); ++ } + bail: + /* we don't keep the objectclasses, so lets free them */ + if(pObjectclass) { +diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c +index 3697eaa97..3e1724963 100644 +--- a/ldap/servers/plugins/roles/roles_cache.c ++++ b/ldap/servers/plugins/roles/roles_cache.c +@@ -48,9 +48,6 @@ static char *allUserAttributes[] = { + /* views scoping */ + static void **views_api; + +-/* Service provider handler */ +-static vattr_sp_handle *vattr_handle = NULL; +- + /* List of nested roles */ + typedef struct _role_object_nested { + Slapi_DN *dn; /* value of attribute nsroledn in a nested role definition */ +@@ -224,13 +221,16 @@ int roles_cache_init() + + /* Register a callback on backends creation|modification|deletion, + so that we update the corresponding cache */ +- slapi_register_backend_state_change(NULL, roles_cache_trigger_update_suffix); +- +- if ( slapi_vattrspi_register((vattr_sp_handle **)&vattr_handle, +- roles_sp_get_value, +- roles_sp_compare_value, +- roles_sp_list_types) ) +- { ++ slapi_register_backend_state_change(NULL, roles_cache_trigger_update_suffix); ++ ++ /* Service provider handler - only used once! and freed by vattr! */ ++ vattr_sp_handle *vattr_handle = NULL; ++ ++ ++ if (slapi_vattrspi_register((vattr_sp_handle **)&vattr_handle, ++ roles_sp_get_value, ++ roles_sp_compare_value, ++ roles_sp_list_types)) { + slapi_log_err(SLAPI_LOG_ERR, ROLES_PLUGIN_SUBSYSTEM, + "roles_cache_init - slapi_vattrspi_register failed\n"); + +@@ -648,22 +648,20 @@ void roles_cache_stop() + + slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "--> roles_cache_stop\n"); + +- /* Go through all the roles list and trigger the associated structure */ +- slapi_rwlock_wrlock(global_lock); +- current_role = roles_list; +- while ( current_role ) +- { +- slapi_lock_mutex(current_role->change_lock); +- current_role->keeprunning = 0; +- next_role = current_role->next; +- slapi_notify_condvar(current_role->something_changed, 1 ); +- slapi_unlock_mutex(current_role->change_lock); +- +- current_role = next_role; +- } +- slapi_rwlock_unlock(global_lock); +- slapi_ch_free((void **)&vattr_handle); +- roles_list = NULL; ++ /* Go through all the roles list and trigger the associated structure */ ++ slapi_rwlock_wrlock(global_lock); ++ current_role = roles_list; ++ while (current_role) { ++ slapi_lock_mutex(current_role->change_lock); ++ current_role->keeprunning = 0; ++ next_role = current_role->next; ++ slapi_notify_condvar(current_role->something_changed, 1); ++ slapi_unlock_mutex(current_role->change_lock); ++ ++ current_role = next_role; ++ } ++ slapi_rwlock_unlock(global_lock); ++ roles_list = NULL; + + slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "<-- roles_cache_stop\n"); + } +diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c +index ef4d7f279..84e01cd62 100644 +--- a/ldap/servers/slapd/vattr.c ++++ b/ldap/servers/slapd/vattr.c +@@ -1843,8 +1843,15 @@ static int vattr_map_create(void) + return 0; + } + +-void vattr_map_entry_free(vattr_map_entry *vae) { +- slapi_ch_free((void **)&(vae->sp_list)); ++void ++vattr_map_entry_free(vattr_map_entry *vae) ++{ ++ vattr_sp_handle *list_entry = vae->sp_list; ++ while (list_entry != NULL) { ++ vattr_sp_handle *next_entry = list_entry->next; ++ slapi_ch_free((void **)&list_entry); ++ list_entry = next_entry; ++ } + slapi_ch_free_string(&(vae->type_name)); + slapi_ch_free((void **)&vae); + } +@@ -2134,16 +2141,9 @@ int slapi_vattr_schema_check_type(Slapi_Entry *e, char *type) + + vattr_map_entry *vattr_map_entry_new(char *type_name, vattr_sp_handle *sph, void* hint) + { +- vattr_map_entry *result = NULL; +- vattr_sp_handle *sp_copy = NULL; +- +- sp_copy = (vattr_sp_handle*)slapi_ch_calloc(1, sizeof (vattr_sp_handle)); +- sp_copy->sp = sph->sp; +- sp_copy->hint = hint; +- +- result = (vattr_map_entry*)slapi_ch_calloc(1, sizeof (vattr_map_entry)); +- result->type_name = slapi_ch_strdup(type_name); +- result->sp_list = sp_copy; ++ vattr_map_entry *result = (vattr_map_entry *)slapi_ch_calloc(1, sizeof(vattr_map_entry)); ++ result->type_name = slapi_ch_strdup(type_name); ++ result->sp_list = sph; + + /* go get schema */ + result->objectclasses = vattr_map_entry_build_schema(type_name); +@@ -2259,6 +2259,16 @@ we'd need to hold a lock on the read path, which we don't want to do. + So any SP which relinquishes its need to handle a type needs to continue + to handle the calls on it, but return nothing */ + /* DBDB need to sort out memory ownership here, it's not quite right */ ++/* ++ * This function was inconsistent. We would allocated and "kind of", ++ * copy the sp_handle here for the vattr_map_entry_new path. But we ++ * would "take ownership" for the existing entry and the list addition ++ * path. Instead now, EVERY sp_handle we take, we take ownership of ++ * and the CALLER must allocate a new one each time. ++ * ++ * Better idea, is that regattr should just take the fn pointers ++ * and callers never *see* the sp_handle structure at all. ++ */ + + int vattr_map_sp_insert(char *type_to_add, vattr_sp_handle *sp, void *hint) + { +-- +2.13.6 + diff --git a/SOURCES/0081-Ticket-49441-Import-crashes-with-large-indexed-binar.patch b/SOURCES/0081-Ticket-49441-Import-crashes-with-large-indexed-binar.patch new file mode 100644 index 0000000..3e65c49 --- /dev/null +++ b/SOURCES/0081-Ticket-49441-Import-crashes-with-large-indexed-binar.patch @@ -0,0 +1,96 @@ +From df5000efced2d00aa0fc6546fcf6fc7b02e27256 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 6 Nov 2017 22:30:55 -0500 +Subject: [PATCH] Ticket 49441 - Import crashes with large indexed binary + attributes + +Bug Description: Importing an ldif file that contains entries with large + binary attributes that are indexed crashes the server. + The crash occurs when "encoding" the binary value to a + string for debug logging, where we "underflow" the buffer + space index which then allows the string buffer to overflow. + +Fix Description: While filling the string buffer with the encoded binary + value we need to make sure if the buffer space is greater + than zero before decrementing it. + + Also check if trace logging is being used before we actually + call the logging function which calls the "encoded" function + first. This way we avoid this costly "encoding" on every + index call we make. + +https://pagure.io/389-ds-base/issue/49441 + +Reviewed by: firstyear(Thanks!) +--- + ldap/servers/slapd/back-ldbm/index.c | 21 ++++++++++----------- + 1 file changed, 10 insertions(+), 11 deletions(-) + +diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c +index d4de28ca3..d62052a22 100644 +--- a/ldap/servers/slapd/back-ldbm/index.c ++++ b/ldap/servers/slapd/back-ldbm/index.c +@@ -808,7 +808,10 @@ encode (const struct berval* data, char buf[BUFSIZ]) + bufSpace -= (s - first); + } + do { +- *bufNext++ = '\\'; --bufSpace; ++ if (bufSpace) { ++ *bufNext++ = '\\'; ++ --bufSpace; ++ } + if (bufSpace < 2) { + memcpy (bufNext, "..", 2); + bufNext += 2; +@@ -903,8 +906,10 @@ index_read_ext_allids( + slapi_log_err(SLAPI_LOG_ERR, "index_read_ext_allids", "NULL prefix\n"); + return NULL; + } +- slapi_log_err(SLAPI_LOG_TRACE, "index_read_ext_allids", "=> ( \"%s\" %s \"%s\" )\n", +- type, prefix, encode (val, buf)); ++ if (slapi_is_loglevel_set(LDAP_DEBUG_TRACE)) { ++ slapi_log_err(SLAPI_LOG_TRACE, "index_read_ext_allids", "=> ( \"%s\" %s \"%s\" )\n", ++ type, prefix, encode (val, buf)); ++ } + + basetype = typebuf; + if ( (basetmp = slapi_attr_basetype( type, typebuf, sizeof(typebuf) )) +@@ -1737,16 +1742,13 @@ addordel_values( + */ + key.flags = DB_DBT_USERMEM; + key.ulen = tmpbuflen; +-#ifdef LDAP_ERROR_LOGGING +- /* XXX if ( slapd_ldap_debug & LDAP_DEBUG_TRACE ) XXX */ +- { ++ if (slapi_is_loglevel_set(LDAP_DEBUG_TRACE)) { + char encbuf[BUFSIZ]; + + slapi_log_err(SLAPI_LOG_TRACE, "addordel_values", "%s_value(\"%s\")\n", + (flags & BE_INDEX_ADD) ? "add" : "del", + encoded (&key, encbuf)); + } +-#endif + + if (NULL != txn) { + db_txn = txn->back_txn_txn; +@@ -1907,16 +1909,13 @@ addordel_values_sv( + */ + key.flags = DB_DBT_USERMEM; + key.ulen = tmpbuflen; +-#ifdef LDAP_ERROR_LOGGING +- /* XXX if ( slapd_ldap_debug & LDAP_DEBUG_TRACE ) XXX */ +- { ++ if (slapi_is_loglevel_set(LDAP_DEBUG_TRACE)) { + char encbuf[BUFSIZ]; + + slapi_log_err(SLAPI_LOG_TRACE, "addordel_values_sv", "%s_value(\"%s\")\n", + (flags & BE_INDEX_ADD) ? "add" : "del", + encoded (&key, encbuf)); + } +-#endif + + if (NULL != txn) { + db_txn = txn->back_txn_txn; +-- +2.13.6 + diff --git a/SOURCES/0082-Ticket-49431-replicated-MODRDN-fails-breaking-replic.patch b/SOURCES/0082-Ticket-49431-replicated-MODRDN-fails-breaking-replic.patch new file mode 100644 index 0000000..502dda1 --- /dev/null +++ b/SOURCES/0082-Ticket-49431-replicated-MODRDN-fails-breaking-replic.patch @@ -0,0 +1,38 @@ +From 557f4d4ed5e37f09691d383dd8189b642ade8f2b Mon Sep 17 00:00:00 2001 +From: Ludwig Krispenz +Date: Sun, 29 Oct 2017 16:32:11 +0100 +Subject: [PATCH] Ticket 49431 - replicated MODRDN fails breaking replication + +Bug: in urp modrdn_operation not a full dn was passed to generate the conflict rdn passed + and so the sufix test failed + +Fix: provide full dn of new entry + +Reviewed by: Mark, Thanks +--- + ldap/servers/plugins/replication/urp.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c +index 64810e9d4..21319d9f7 100644 +--- a/ldap/servers/plugins/replication/urp.c ++++ b/ldap/servers/plugins/replication/urp.c +@@ -433,7 +433,14 @@ urp_modrdn_operation( Slapi_PBlock *pb ) + /* The target entry is a loser */ + + char *newrdn_with_uniqueid; +- newrdn_with_uniqueid= get_rdn_plus_uniqueid (sessionid, newrdn, op_uniqueid); ++ char *newdn = NULL; ++ if (new_parent_entry) { ++ newdn = slapi_ch_smprintf("%s,%s", newrdn, slapi_entry_get_dn(new_parent_entry)); ++ } else { ++ newdn = slapi_ch_smprintf("%s,%s", newrdn, slapi_entry_get_dn(parent_entry)); ++ } ++ newrdn_with_uniqueid= get_rdn_plus_uniqueid (sessionid, newdn, op_uniqueid); ++ slapi_ch_free_string(&newdn); + if(newrdn_with_uniqueid==NULL) + { + op_result= LDAP_OPERATIONS_ERROR; +-- +2.13.6 + diff --git a/SOURCES/389-ds-base-devel.README b/SOURCES/389-ds-base-devel.README new file mode 100644 index 0000000..190c874 --- /dev/null +++ b/SOURCES/389-ds-base-devel.README @@ -0,0 +1,4 @@ +For detailed information on developing plugins for +389 Directory Server visit. + +http://port389/wiki/Plugins diff --git a/SOURCES/389-ds-base-git.sh b/SOURCES/389-ds-base-git.sh new file mode 100644 index 0000000..784e7b6 --- /dev/null +++ b/SOURCES/389-ds-base-git.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +DATE=`date +%Y%m%d` +# use a real tag name here +VERSION=1.3.1.6 +PKGNAME=389-ds-base +TAG=${TAG:-$PKGNAME-$VERSION} +URL="http://git.fedorahosted.org/git/?p=389/ds.git;a=snapshot;h=$TAG;sf=tgz" +SRCNAME=$PKGNAME-$VERSION + +wget -O $SRCNAME.tar.gz "$URL" + +echo convert tgz format to tar.bz2 format + +gunzip $PKGNAME-$VERSION.tar.gz +bzip2 $PKGNAME-$VERSION.tar diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec new file mode 100644 index 0000000..ed028d0 --- /dev/null +++ b/SPECS/389-ds-base.spec @@ -0,0 +1,2859 @@ + +%global pkgname dirsrv +# for a pre-release, define the prerel field e.g. .a1 .rc2 - comment out for official release +# also remove the space between % and global - this space is needed because +# fedpkg verrel stupidly ignores comment lines +#% global prerel .rc3 +# also need the relprefix field for a pre-release e.g. .0 - also comment out for official release +#% global relprefix 0. + +# If perl-Socket-2.000 or newer is available, set 0 to use_Socket6. +%global use_Socket6 0 +%global use_nunc_stans 1 + +%if %{_arch} != "s390x" && %{_arch} != "s390" +%global use_tcmalloc 1 +%else +%global use_tcmalloc 0 +%endif + +# fedora 15 and later uses tmpfiles.d +# otherwise, comment this out +%{!?with_tmpfiles_d: %global with_tmpfiles_d %{_sysconfdir}/tmpfiles.d} + +# systemd support +%global groupname %{pkgname}.target + +# set PIE flag +%global _hardened_build 1 + +Summary: 389 Directory Server (base) +Name: 389-ds-base +Version: 1.3.6.1 +Release: %{?relprefix}24%{?prerel}%{?dist} +License: GPLv3+ +URL: https://www.port389.org/ +Group: System Environment/Daemons +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +Conflicts: selinux-policy-base < 3.9.8 +Requires: %{name}-libs = %{version}-%{release} +Provides: ldif2ldbm >= 0 + +BuildRequires: nspr-devel +BuildRequires: nss-devel +BuildRequires: svrcore-devel >= 4.1.3 +BuildRequires: openldap-devel +BuildRequires: libdb-devel +BuildRequires: cyrus-sasl-devel +BuildRequires: icu +BuildRequires: libicu-devel +BuildRequires: pcre-devel +BuildRequires: gcc-c++ +# The following are needed to build the snmp ldap-agent +BuildRequires: net-snmp-devel +%ifnarch sparc sparc64 ppc ppc64 s390 s390x +BuildRequires: lm_sensors-devel +%endif +BuildRequires: bzip2-devel +BuildRequires: zlib-devel +BuildRequires: openssl-devel +BuildRequires: tcp_wrappers +# the following is for the pam passthru auth plug-in +BuildRequires: pam-devel +BuildRequires: systemd-units +BuildRequires: systemd-devel +# Needed to support regeneration of the autotool artifacts. +BuildRequires: autoconf +BuildRequires: automake +BuildRequires: libtool +%if %{use_nunc_stans} +BuildRequires: libevent-devel +BuildRequires: libtalloc-devel +BuildRequires: libtevent-devel +%endif +# For tests! +#BuildRequires: libcmocka-devel +BuildRequires: doxygen + +# this is needed for using semanage from our setup scripts +Requires: policycoreutils-python +Requires: /usr/sbin/semanage +Requires: libsemanage-python + +Requires: selinux-policy >= 3.13.1-137 + +# the following are needed for some of our scripts +Requires: openldap-clients +# use_openldap assumes perl-Mozilla-LDAP is built with openldap support +Requires: perl-Mozilla-LDAP + +# this is needed to setup SSL if you are not using the +# administration server package +Requires: nss-tools + +# these are not found by the auto-dependency method +# they are required to support the mandatory LDAP SASL mechs +Requires: cyrus-sasl-gssapi +Requires: cyrus-sasl-md5 +Requires: cyrus-sasl-plain + +# this is needed for verify-db.pl +Requires: libdb-utils + +# This picks up libperl.so as a Requires, so we add this versioned one +Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version)) + +# for the init script +Requires(post): systemd-units +Requires(preun): systemd-units +Requires(postun): systemd-units + +# for setup-ds.pl +Requires: bind-utils + +# for setup-ds.pl to support ipv6 +%if %{use_Socket6} +Requires: perl-Socket6 +%else +Requires: perl-Socket +%endif +Requires: perl-NetAddr-IP +Requires: systemd-libs +Requires: svrcore >= 4.1.3 + +# upgrade path from monolithic % {name} (including -libs & -devel) to % {name} + % {name}-snmp +Obsoletes: %{name} <= 1.3.5.4 + +%if %{use_tcmalloc} +BuildRequires: gperftools-devel +Requires: gperftools-libs +%endif + +Source0: http://www.port389.org/binaries/%{name}-%{version}%{?prerel}.tar.bz2 +# 389-ds-git.sh should be used to generate the source tarball from git +Source1: %{name}-git.sh +Source2: %{name}-devel.README +Patch0: 0000-Ticket-49164-Change-NS-to-acq-rel-semantics-for-atom.patch +Patch1: 0001-Issue-49170-sync-plugin-thread-count-not-handled-cor.patch +Patch2: 0002-Ticket-49165-pw_verify-did-not-handle-external-auth.patch +Patch3: 0003-Issue-49169-Fix-covscan-errors.patch +Patch4: 0004-Ticket-49171-Nunc-Stans-incorrectly-reports-a-timeou.patch +Patch5: 0005-Issue-49169-Fix-covscan-errors-regression.patch +Patch6: 0006-Issue-49062-Reset-agmt-update-staus-and-total-init +Patch7: 0007-Issue-49065-dbmon.sh-fails-if-you-have-nsslapd-requi.patch +Patch8: 0008-Issue-49095-targetattr-wildcard-evaluation-is-incorr.patch +Patch9: 0009-Issue-49157-ds-logpipe.py-crashes-for-non-existing-u.patch +Patch10: 0010-Fix-double-free-in-_cl5NewDBFile-error-path.patch +Patch11: 0011-Issue-49188-retrocl-can-crash-server-at-shutdown.patch +Patch12: 0012-Ticket-49177-rpm-would-not-create-valid-pkgconfig-fi.patch +Patch13: 0013-Ticket-49076-To-debug-DB_DEADLOCK-condition-allow-to.patch +Patch14: 0014-Issue-49192-Deleting-suffix-can-hang-server.patch +Patch15: 0015-Ticket-49174-nunc-stans-can-not-use-negative-timeout.patch +Patch16: 0016-Issue-48989-Integer-overflow.patch +Patch17: 0017-Issue-49035-dbmon.sh-shows-pages-in-use-that-exceeds.patch +Patch18: 0018-Issue-49177-Fix-pkg-config-file.patch +Patch19: 0019-Issue-49205-Fix-logconv.pl-man-page.patch +Patch20: 0020-Issue-49039-password-min-age-should-be-ignored-if-pa.patch +Patch21: 0021-fix-for-cve-2017-2668-simple-return-text-if-suffix-n.patch +Patch22: 0022-Issue-47662-CLI-args-get-removed.patch +Patch23: 0023-Issue-49210-Fix-regression-when-checking-is-password.patch +Patch24: 0024-Ticket-49209-Hang-due-to-omitted-replica-lock-releas.patch +Patch25: 0025-Ticket-49184-Overflow-in-memberof.patch +Patch26: 0026-Ticket-49196-Autotune-generates-crit-messages.patch +Patch27: 0027-Issue-49221-During-an-upgrade-the-provided-localhost.patch +Patch28: 0028-Ticket-48864-Add-cgroup-memory-limit-detection-to-38.patch +Patch29: 0029-Ticket-49204-Fix-lower-bounds-on-import-autosize-On-.patch +Patch30: 0030-Ticket-49231-fix-sasl-mech-handling.patch +Patch31: 0031-Ticket-49230-slapi_register_plugin-creates-config-en.patch +Patch32: 0032-49227-ldapsearch-for-nsslapd-errorlog-level-re.patch +Patch33: 0033-Ticket-48989-fix-perf-counters.patch +Patch34: 0034-Ticket-48681-logconv.pl-fix-sasl-bind-stats.patch +Patch35: 0035-Ticket-49241-Update-man-page-and-usage-for-db2bak.pl.patch +Patch36: 0036-Ticket-7662-db2index-not-properly-evalauating-argume.patch +Patch37: 0037-Ticket-49075-Adjust-logging-severity-levels.patch +Patch38: 0038-Ticket-49231-Fix-backport-issue.patch +Patch39: 0039-Ticket-49231-Fix-backport-issue-part2.patch +Patch40: 0040-Ticket-48681-logconv.pl-Fix-SASL-Bind-stats-and-rewo.patch +Patch41: 0041-Ticket-49157-ds-logpipe.py-crashes-for-non-existing-.patch +Patch42: 0042-Ticket-49249-cos_cache-is-erroneously-logging-schema.patch +Patch43: 0043-Ticket-49238-AddressSanitizer-heap-use-after-free-in.patch +Patch44: 0044-Ticket-49246-ns-slapd-crashes-in-role-cache-creation.patch +Patch45: 0045-Ticket-49258-Allow-nsslapd-cache-autosize-to-be-modi.patch +Patch46: 0046-Ticket-49261-Fix-script-usage-and-man-pages.patch +Patch47: 0047-Ticket-48864-Fix-FreeIPA-build.patch +Patch48: 0048-Ticket-49157-fix-error-in-ds-logpipe.py.patch +Patch49: 0049-Ticket-49267-autosize-split-of-0-results-in-dbcache-.patch +Patch50: 0050-Ticket-49231-force-EXTERNAL-always.patch +Patch51: 0051-Ticket-48538-Failed-to-delete-old-semaphore.patch +Patch52: 0052-Ticket-49257-Reject-nsslapd-cachememsize-nsslapd-cac.patch +Patch53: 0053-Ticket-49257-Reject-dbcachesize-updates-while-auto-c.patch +Patch54: 0054-Ticket-49184-adjust-logging-level-in-MO-plugin.patch +Patch55: 0055-Ticket-49241-add-symblic-link-location-to-db2bak.pl-.patch +Patch56: 0056-Ticket-49313-Change-the-retrochangelog-default-cache.patch +Patch57: 0057-Ticket-49287-v3-extend-csnpl-handling-to-multiple-ba.patch +Patch58: 0058-Ticket-49336-SECURITY-Locked-account-provides-differ.patch +Patch59: 0059-Ticket-49298-force-sync-on-shutdown.patch +Patch60: 0060-Ticket-49334-fix-backup-restore-if-changelog-exists.patch +Patch61: 0061-Ticket-49356-mapping-tree-crash-can-occur-during-tot.patch +Patch62: 0062-Ticket-49330-Improve-ndn-cache-performance-1.3.6.patch +Patch63: 0063-Ticket-49330-Add-endian-header-file-check-to-configu.patch +Patch64: 0064-Ticket-49257-only-register-modify-callbacks.patch +Patch65: 0065-Ticket-49291-slapi_search_internal_callback_pb-may-S.patch +Patch66: 0066-Ticket-49370-local-password-policies-should-use-the-.patch +Patch67: 0067-Ticket-49380-Crash-when-adding-invalid-replication.patch +Patch68: 0068-Ticket-49380-Add-CI-test.patch +Patch69: 0069-Ticket-49327-password-expired-control-not-sent-durin.patch +Patch70: 0070-Ticket-49379-Allowed-sasl-mapping-requires-restart.patch +Patch71: 0071-Fix-cherry-pick-error-from-sasl-mech-commit.patch +Patch72: 0072-Ticket-49389-unable-to-retrieve-specific-cosAttribut.patch +Patch73: 0073-Ticket-49180-backport-1.3.6-errors-log-filled-with-a.patch +Patch74: 0074-Ticket-48894-harden-valueset_array_to_sorted_quick-v.patch +Patch75: 0075-Ticket-49401-improve-valueset-sorted-performance-on-.patch +Patch76: 0076-Ticket-49401-Fix-compiler-incompatible-pointer-types.patch +Patch77: 0077-Ticket-48235-Remove-memberOf-global-lock.patch +Patch78: 0078-Ticket-49402-Adding-a-database-entry-with-the-same-d.patch +Patch79: 0079-Ticket-49439-cleanallruv-is-not-logging-information.patch +Patch80: 0080-Ticket-49436-double-free-in-COS-in-some-conditions.patch +Patch81: 0081-Ticket-49441-Import-crashes-with-large-indexed-binar.patch +Patch82: 0082-Ticket-49431-replicated-MODRDN-fails-breaking-replic.patch + +%description +389 Directory Server is an LDAPv3 compliant server. The base package includes +the LDAP server and command line utilities for server administration. + +%package libs +Summary: Core libraries for 389 Directory Server +Group: System Environment/Daemons +BuildRequires: nspr-devel +BuildRequires: nss-devel +BuildRequires: svrcore-devel >= 4.1.3 +BuildRequires: openldap-devel +BuildRequires: libdb-devel +BuildRequires: cyrus-sasl-devel +BuildRequires: libicu-devel +BuildRequires: pcre-devel +%if %{use_nunc_stans} +BuildRequires: libtalloc-devel +BuildRequires: libevent-devel +BuildRequires: libtevent-devel +%endif +BuildRequires: systemd-devel + +%description libs +Core libraries for the 389 Directory Server base package. These libraries +are used by the main package and the -devel package. This allows the -devel +package to be installed with just the -libs package and without the main package. + +%package devel +Summary: Development libraries for 389 Directory Server +Group: Development/Libraries +Requires: %{name}-libs = %{version}-%{release} +Requires: pkgconfig +Requires: nspr-devel +Requires: nss-devel +Requires: svrcore-devel >= 4.1.3 +Requires: openldap-devel +%if %{use_nunc_stans} +Requires: libtalloc +Requires: libevent +Requires: libtevent +%endif +Requires: systemd-libs + +%description devel +Development Libraries and headers for the 389 Directory Server base package. + +%package snmp +Summary: SNMP Agent for 389 Directory Server +Group: System Environment/Daemons +Requires: %{name} = %{version}-%{release} +# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp +Obsoletes: %{name} <= 1.3.6.0 + +%description snmp +SNMP Agent for the 389 Directory Server base package. + +%package tests +Summary: The lib389 Continuous Integration Tests +Group: Development/Libraries +Requires: python-lib389 +BuildArch: noarch + +%description tests +The lib389 CI tests that can be run against the Directory Server. + +%prep +%setup -q -n %{name}-%{version}%{?prerel} +cp %{SOURCE2} README.devel +%patch0 -p1 +%patch1 -p1 +%patch2 -p1 +%patch3 -p1 +%patch4 -p1 +%patch5 -p1 +%patch6 -p1 +%patch7 -p1 +%patch8 -p1 +%patch9 -p1 +%patch10 -p1 +%patch11 -p1 +%patch12 -p1 +%patch13 -p1 +%patch14 -p1 +%patch15 -p1 +%patch16 -p1 +%patch17 -p1 +%patch18 -p1 +%patch19 -p1 +%patch20 -p1 +%patch21 -p1 +%patch22 -p1 +%patch23 -p1 +%patch24 -p1 +%patch25 -p1 +%patch26 -p1 +%patch27 -p1 +%patch28 -p1 +%patch29 -p1 +%patch30 -p1 +%patch31 -p1 +%patch32 -p1 +%patch33 -p1 +%patch34 -p1 +%patch35 -p1 +%patch36 -p1 +%patch37 -p1 +%patch38 -p1 +%patch39 -p1 +%patch40 -p1 +%patch41 -p1 +%patch42 -p1 +%patch43 -p1 +%patch44 -p1 +%patch45 -p1 +%patch46 -p1 +%patch47 -p1 +%patch48 -p1 +%patch49 -p1 +%patch50 -p1 +%patch51 -p1 +%patch52 -p1 +%patch53 -p1 +%patch54 -p1 +%patch55 -p1 +%patch56 -p1 +%patch57 -p1 +%patch58 -p1 +%patch59 -p1 +%patch60 -p1 +%patch61 -p1 +%patch62 -p1 +%patch63 -p1 +%patch64 -p1 +%patch65 -p1 +%patch66 -p1 +%patch67 -p1 +%patch68 -p1 +%patch69 -p1 +%patch70 -p1 +%patch71 -p1 +%patch72 -p1 +%patch73 -p1 +%patch74 -p1 +%patch75 -p1 +%patch76 -p1 +%patch77 -p1 +%patch78 -p1 +%patch79 -p1 +%patch80 -p1 +%patch81 -p1 +%patch82 -p1 + +%build + +OPENLDAP_FLAG="--with-openldap" +%{?with_tmpfiles_d: TMPFILES_FLAG="--with-tmpfiles-d=%{with_tmpfiles_d}"} +# hack hack hack https://bugzilla.redhat.com/show_bug.cgi?id=833529 +NSSARGS="--with-svrcore-inc=%{_includedir} --with-svrcore-lib=%{_libdir} --with-nss-lib=%{_libdir} --with-nss-inc=%{_includedir}/nss3" +%if %{use_nunc_stans} +NUNC_STANS_FLAGS="--enable-nunc-stans" +%endif +%if %{use_tcmalloc} +TCMALLOC_FLAGS="--enable-tcmalloc" +%endif + +# Rebuild the autotool artifacts now. +autoreconf -fiv + +%configure --enable-autobind --with-selinux $OPENLDAP_FLAG $TMPFILES_FLAG \ + --with-systemdsystemunitdir=%{_unitdir} \ + --with-systemdsystemconfdir=%{_sysconfdir}/systemd/system \ + --with-perldir=/usr/bin \ + --with-systemdgroupname=%{groupname} $NSSARGS $NUNC_STANS_FLAGS \ + --with-systemd $TCMALLOC_FLAGS + +# Generate symbolic info for debuggers +export XCFLAGS=$RPM_OPT_FLAGS + +%ifarch x86_64 ppc64 ia64 s390x sparc64 +export USE_64=1 +%endif + +make %{?_smp_mflags} + + +%install +rm -rf $RPM_BUILD_ROOT + +make DESTDIR="$RPM_BUILD_ROOT" install + +# Copy in our docs from doxygen. +cp -r %{_builddir}/%{name}-%{version}%{?prerel}/man/man3 $RPM_BUILD_ROOT/%{_mandir}/man3 + +mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname} +mkdir -p $RPM_BUILD_ROOT/var/lib/%{pkgname} +mkdir -p $RPM_BUILD_ROOT/var/lock/%{pkgname} + +# for systemd +mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/systemd/system/%{groupname}.wants + +#remove libtool archives and static libs +find %{buildroot} -type f -name "*.la" -delete +find %{buildroot} -type f -name "*.a" -delete +#rm -f $RPM_BUILD_ROOT%{_libdir}/%{pkgname}/*.a +#rm -f $RPM_BUILD_ROOT%{_libdir}/%{pkgname}/*.la +#rm -f $RPM_BUILD_ROOT%{_libdir}/%{pkgname}/plugins/*.a +#rm -f $RPM_BUILD_ROOT%{_libdir}/%{pkgname}/plugins/*.la + +# Why are we not making this a proper python package? +pushd ../%{name}-%{version}%{?prerel} +cp -r dirsrvtests $RPM_BUILD_ROOT/%{_sysconfdir}/%{pkgname} +find $RPM_BUILD_ROOT/%{_sysconfdir}/%{pkgname}/dirsrvtests -type f -name '*.pyc' -delete +find $RPM_BUILD_ROOT/%{_sysconfdir}/%{pkgname}/dirsrvtests -type f -name '*.pyo' -delete +find $RPM_BUILD_ROOT/%{_sysconfdir}/%{pkgname}/dirsrvtests -type d -name '__pycache__' -delete +popd + +# make sure perl scripts have a proper shebang +sed -i -e 's|#{{PERL-EXEC}}|#!/usr/bin/perl|' $RPM_BUILD_ROOT%{_datadir}/%{pkgname}/script-templates/template-*.pl + +%clean +rm -rf $RPM_BUILD_ROOT + +%post +output=/dev/null +output2=/dev/null +# reload to pick up any changes to systemd files +/bin/systemctl daemon-reload >$output 2>&1 || : +# reload to pick up any shared lib changes +/sbin/ldconfig +# find all instances +instances="" # instances that require a restart after upgrade +ninst=0 # number of instances found in total +if [ -n "$DEBUGPOSTTRANS" ] ; then + output=$DEBUGPOSTTRANS + output2=${DEBUGPOSTTRANS}.upgrade +fi + +# Soft static allocation for UID and GID +USERNAME="dirsrv" +ALLOCATED_UID=389 +GROUPNAME="dirsrv" +ALLOCATED_GID=389 +HOMEDIR="/usr/share/dirsrv" + +getent group $GROUPNAME >/dev/null || /usr/sbin/groupadd -f -g $ALLOCATED_GID -r $GROUPNAME +if ! getent passwd $USERNAME >/dev/null ; then + /usr/sbin/useradd -r -u $ALLOCATED_UID -g $GROUPNAME -d $HOMEDIR -s /sbin/nologin -c "user for 389-ds-base" $USERNAME +fi + +echo looking for instances in %{_sysconfdir}/%{pkgname} > $output 2>&1 || : +instbase="%{_sysconfdir}/%{pkgname}" +for dir in $instbase/slapd-* ; do + echo dir = $dir >> $output 2>&1 || : + if [ ! -d "$dir" ] ; then continue ; fi + case "$dir" in *.removed) continue ;; esac + basename=`basename $dir` + inst="%{pkgname}@`echo $basename | sed -e 's/slapd-//g'`" + echo found instance $inst - getting status >> $output 2>&1 || : + if /bin/systemctl -q is-active $inst ; then + echo instance $inst is running >> $output 2>&1 || : + instances="$instances $inst" + else + echo instance $inst is not running >> $output 2>&1 || : + fi + ninst=`expr $ninst + 1` +done +if [ $ninst -eq 0 ] ; then + echo no instances to upgrade >> $output 2>&1 || : + exit 0 # have no instances to upgrade - just skip the rest +fi +# shutdown all instances +echo shutting down all instances . . . >> $output 2>&1 || : +for inst in $instances ; do + echo stopping instance $inst >> $output 2>&1 || : + /bin/systemctl stop $inst >> $output 2>&1 || : +done +echo remove pid files . . . >> $output 2>&1 || : +/bin/rm -f /var/run/%{pkgname}*.pid /var/run/%{pkgname}*.startpid +# do the upgrade +echo upgrading instances . . . >> $output 2>&1 || : +DEBUGPOSTSETUPOPT=`/usr/bin/echo $DEBUGPOSTSETUP | /usr/bin/sed -e "s/[^d]//g"` +if [ -n "$DEBUGPOSTSETUPOPT" ] ; then + %{_sbindir}/setup-ds.pl -$DEBUGPOSTSETUPOPT -u -s General.UpdateMode=offline >> $output 2>&1 || : +else + %{_sbindir}/setup-ds.pl -u -s General.UpdateMode=offline >> $output 2>&1 || : +fi + +# restart instances that require it +for inst in $instances ; do + echo restarting instance $inst >> $output 2>&1 || : + /bin/systemctl start $inst >> $output 2>&1 || : +done +exit 0 + +%preun +if [ $1 -eq 0 ]; then # Final removal + # remove instance specific service files/links + rm -rf %{_sysconfdir}/systemd/system/%{groupname}.wants/* > /dev/null 2>&1 || : +fi + +%postun +/sbin/ldconfig +if [ $1 = 0 ]; then # Final removal + rm -rf /var/run/%{pkgname} +fi + +%post snmp +%systemd_post %{pkgname}-snmp.service + +%preun snmp +%systemd_preun %{pkgname}-snmp.service %{groupname} + +%postun snmp +%systemd_postun_with_restart %{pkgname}-snmp.service + +%files +%defattr(-,root,root,-) +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl +%dir %{_sysconfdir}/%{pkgname} +%dir %{_sysconfdir}/%{pkgname}/schema +%config(noreplace)%{_sysconfdir}/%{pkgname}/schema/*.ldif +%dir %{_sysconfdir}/%{pkgname}/config +%dir %{_sysconfdir}/systemd/system/%{groupname}.wants +%config(noreplace)%{_sysconfdir}/%{pkgname}/config/slapd-collations.conf +%config(noreplace)%{_sysconfdir}/%{pkgname}/config/certmap.conf +%config(noreplace)%{_sysconfdir}/%{pkgname}/config/template-initconfig +%config(noreplace)%{_sysconfdir}/sysconfig/%{pkgname} +%config(noreplace)%{_sysconfdir}/sysconfig/%{pkgname}.systemd +%{_datadir}/%{pkgname} +%{_unitdir} +%{_bindir}/* +%{_sbindir}/* +%{_libdir}/%{pkgname}/perl +%{_libdir}/%{pkgname}/python +%dir %{_libdir}/%{pkgname}/plugins +%{_libdir}/%{pkgname}/plugins/*.so +%dir %{_localstatedir}/lib/%{pkgname} +%dir %{_localstatedir}/log/%{pkgname} +%ghost %dir %{_localstatedir}/lock/%{pkgname} +%{_mandir}/man1/* +%{_mandir}/man8/* +%exclude %{_sbindir}/ldap-agent* +%exclude %{_mandir}/man1/ldap-agent.1.gz +%exclude %{_unitdir}/%{pkgname}-snmp.service + +%files devel +%defattr(-,root,root,-) +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel +%{_includedir}/%{pkgname} +%{_libdir}/%{pkgname}/libslapd.so +%{_libdir}/%{pkgname}/libns-dshttpd.so +%{_mandir}/man3/* +%if %{use_nunc_stans} +%{_libdir}/%{pkgname}/libnunc-stans.so +%{_libdir}/%{pkgname}/libsds.so +%endif +%{_libdir}/pkgconfig/* + +%files libs +%defattr(-,root,root,-) +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel +%dir %{_libdir}/%{pkgname} +%{_libdir}/%{pkgname}/libslapd.so.* +%{_libdir}/%{pkgname}/libns-dshttpd-*.so +%if %{use_nunc_stans} +%{_libdir}/%{pkgname}/libnunc-stans.so.* +%{_libdir}/%{pkgname}/libsds.so.* +%endif + +%files snmp +%defattr(-,root,root,-) +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel +%config(noreplace)%{_sysconfdir}/%{pkgname}/config/ldap-agent.conf +%{_sbindir}/ldap-agent* +%{_mandir}/man1/ldap-agent.1.gz +%{_unitdir}/%{pkgname}-snmp.service + +%files tests +%defattr(-,root,root,-) +%doc LICENSE LICENSE.GPLv3+ +%{_sysconfdir}/%{pkgname}/dirsrvtests + +%changelog +* Fri Nov 10 2017 Mark Reynolds - 1.3.6.1-24 +- Bump version to 1.3.6.1-24 +- Resolves: Bug 1508978 - replicated MODRDN fails breaking replication +- Resolves: Bug 1511940 - heap corruption during import +- Resolves: Bug 1510319 - [abrt] 389-ds-base: SLL_Next(): ns-slapd killed by SIGSEGV +- Resolves: Bug 1509347 - cleanallruv task is not logging any information + +* Fri Oct 27 2017 Mark Reynolds - 1.3.6.1-23 +- Bump version to 1.3.6.1-23 +- Resolves: Bug 1504536 - [memberOf Plugin] bulk deleting users causes deadlock when there are multiple backends +- Resolves: Bug 1503001 - Adding a database entry fails if the same database was deleted after an import +- Resolves: Bug 1506912 - Improve valueset sort performance during valueset purging + +* Mon Oct 9 2017 Mark Reynolds - 1.3.6.1-22 +- Bump version to 1.3.6.1-22 +- Resolves: Bug 1499668 - Errors log filled with attrlist_replace + +* Thu Oct 5 2017 Mark Reynolds - 1.3.6.1-21 +- Bump verions to 1.3.6.1-21 +- Resolves: Bug 1498958 - unable to retrieve specific cosAttribute when subtree password policy is configured + +* Mon Sep 18 2017 Mark Reynolds - 1.3.6.1-20 +- Bump verions to 1.3.6.1-20 +- Resolves: Bug 1489693 - PasswordCheckSyntax attribute fails to validate cn, sn, uid +- Resovles: Bug 1492829 - patch should of been applied to 7.4 but got missed +- Resolves: Bug 1486128 - Performance issues with RHDS 10 - NDN cache investigation +- Resolves: Bug 1489694 - crash in send_ldap_result +- Resolves: Bug 1491778 - crash when adding invalid repl agmt +- Resolves: Bug 1492830 - password expired control not sent +- Resolves: Bug 1492833 - sasl-mechanisms removed during upgrade + +* Mon Aug 21 2017 Mark Reynolds - 1.3.6.1-19 +- Bump version to 1.3.6.1-19 +- Remove old mozldap and db4 requirements +- Resolves: Bug 1483865 - Crash while binding to a server during replication online init + +* Tue Aug 8 2017 Mark Reynolds - 1.3.6.1-18 +- Bump version to 1.3.6.1-18 +- Require srvcore 4.1.3 +- Resolves: Bug 1479757 - dse.ldif and fsync +- Resolves: Bug 1479755 - backup fails if changelog is enabled +- Resolves: Bug 1479756 - Locked account provides different return code if password is correct + +* Mon Jul 31 2017 Mark Reynolds - 1.3.6.1-17 +- Bump version to 1.3.6.1-17 +- Resolves: Bug 1476161 - replication halt - pending list first CSN not committed, pending list increasing +- Resolves: Bug 1476162 - Change the retrochangelog default cache size + +* Tue Jun 6 2017 Mark Reynolds - 1.3.6.1-16 +- Bump version to 1.3.6.1-16 +- Resolves: Bug 1444938 - nsslapd-allowed-sasl-mechanisms doesn't reset to default values without a restart +- Resolves: Bug 1447015 - Adjust db2bak.pl help and man page to reflect changes introduced to the script +- Resolves: Bug 1450896 - Manual resetting of nsslapd-dbcachesize using ldapmodify +- Resolves: Bug 1454921 - Fixup memberof task throws error "memberof_fix_memberof_callback: Weird +- Resolves: Bug 1456774 - ipa-replica server fails to upgrade + +* Tue May 23 2017 Mark Reynolds - 1.3.6.1-15 +- Bump version to 1.3.6.1-15 +- Resolves: Bug 1429770 - ds-logpipe.py crashes for non-existing users +- Resolves: Bug 1444938 - nsslapd-allowed-sasl-mechanisms doesn't reset to default values without a restart +- Resolves: Bug 1450896 - Manual resetting of nsslapd-dbcachesize using ldapmodify +- Resolves: Bug 1357682 - RHDS fails to start with message: "Failed to delete old semaphore for stats file" +- Resolves: Bug 1452739 - Zero value of nsslapd-cache-autosize-split makes dbcache to be equal 0 + +* Fri May 19 2017 Mark Reynolds - 1.3.6.1-14 +- Bump version to 1.3.6.1-14 +- Resolves: Bug 1450910 - Modifying "nsslapd-cache-autosize" parameter using ldapmodify command is failing. +- Resolves: Bug 1450893 - When nsslapd-cache-autosize is not set in dse.ldif, ldapsearch does not show the default value +- Resolves: Bug 1449098 - ns-slapd crashes in role cache creation +- Resolves: Bug 1441522 - AddressSanitizer: heap-use-after-free in libreplication-plugin.so +- Resolves: Bug 1437492 - "ERR - cos-plugin - cos_cache_query_attr - cos attribute krbPwdPolicyReference failed schema check" in error log +- Resolves: Bug 1429770 - ds-logpipe.py crashes for non-existing users +- Resolves: Bug 1451657 - -v option is not working for db2ldif.pl + +* Fri May 5 2017 Mark Reynolds - 1.3.6.1-13 +- Bump version to 1.3.6.1-13 +- Resolves: Bug 1444938 - Fix backport issue from build 1.3.6.1-10 (part 2) + +* Fri May 5 2017 Mark Reynolds - 1.3.6.1-12 +- Bump version to 1.3.6.1-12 +- Resolves: Bug 1444938 - Fix backport issue from build 1.3.6.1-10 + +* Fri May 5 2017 Mark Reynolds - 1.3.6.1-11 +- Bump version to 1.3.6.1-11 +- Resolves: Bug 1410207 - Utility command had better use INFO log level for the output +- Resolves: Bug 1049190 - Better input argument validation and error messages for db2index and db2index.pl + +* Fri May 5 2017 Mark Reynolds - 1.3.6.1-10 +- Bump version to 1.3.6.1-10 +- Resolves: Bug 1444938 - nsslapd-allowed-sasl-mechanisms doesn't reset to default val +- Resolves: Bug 1111400 - logconv.pl lists sasl binds with no dn as anonymous +- Resolves: Bug 1377452 - Integer overflow in performance counters +- Resolves: Bug 1441790 - ldapserch for nsslapd-errorlog-level returns incorrect values +- Resolves: Bug 1444431 - ERR - symload_report_error - Netscape Portable Runtime error -5975 +- Resolves: Bug 1447015 - Adjust db2bak.pl help and man page to reflect changes introduced to the script + +* Wed Apr 19 2017 Mark Reynolds - 1.3.6.1-9 +- Bump version to 1.3.6.1-9 +- Resolves: Bug 1442880 - setup-ds-admin.pl -u with nsslapd-localhost changed +- Resolves: Bug 1443682 - util_info_sys_pages should be able to detect memory restrictions in a cgroup + +* Wed Apr 19 2017 Mark Reynolds - 1.3.6.1-8 +- Bump version to 1.3.6.1-8 +- Resolves: Bug 1432016 - Possible deadlock while installing an ipa replica +- Resolves: Bug 1438029 - Overflow in memberof + +* Tue Apr 11 2017 Mark Reynolds - 1.3.6.1-7 +- Bump version to 1.3.6.1-7 +- Resolves: bug 1394899 - RHDS should ignore passwordMinAge if "password must reset" is set(fix crash regression) +- Resolves: bug 1381326 - dirsrv-snmp.service is provided by 389-ds-base instead of 389-ds-base-snmp +- Resolves: bug 1049190 - Better input argument validation and error messages for db2index and db2index.pl. + +* Mon Apr 3 2017 Mark Reynolds - 1.3.6.1-6 +- Bump version to 1.3.6.1-6 +- Resolves: bug 1437006 - EMBARGOED CVE-2017-2668 389-ds-base: Remote crash via crafted LDAP messages +- Resolves: bug 1341689 - dbmon.sh / cn=monitor] nsslapd-db-pages-in-use is increasing +- Resolves: bug 1394899 - RHDS should ignore passwordMinAge if "password must reset" is set +- Resolves: bug 1397288 - typo in logconv.pl man page +- Resolves: bug 1436994 - incorrect pathes in pkg-config files +- Resolves: bug 1396448 - Add a hard dependency for >=selinux-policy-3.13.1-75 + +* Tue Mar 28 2017 Mark Reynolds - 1.3.6.1-5 +- Bump version to 1.3.6.1-5 +- Resolves: bug 1377452 - Integer overflow in counters and monitor +- Resolves: bug 1425907 - Harden password storage scheme +- Resolves: bug 1431207 - ns-slapd killed by SIGABRT + +* Mon Mar 27 2017 Mark Reynolds - 1.3.6.1-4 +- Bump version to 1.3.6.1-4 +- Resolves: bug 1379424 - Reset-agmt-update-staus-and-total-init +- Resolves: bug 1394000 - dbmon.sh-fails-if-you-have-nsslapd-requi.patch +- Resolves: bug 1417344 - targetattr-wildcard-evaluation-is-incorr.patch +- Resolves: bug 1429770 - ds-logpipe.py-crashes-for-non-existing-u.patch +- Resolves: bug 1433697 - Fix-double-free-in-_cl5NewDBFile-error-path.patch +- Resolves: bug 1433996 - retrocl-can-crash-server-at-shutdown.patch +- Resolves: bug 1434967 - rpm-would-not-create-valid-pkgconfig-fi.patch +- Resolves: bug 1417338 - To-debug-DB_DEADLOCK-condition-allow-to.patch +- Resolves: bug 1433850 - Deleting-suffix-can-hang-server.patch + +* Tue Mar 14 2017 Mark Reynolds - 1.3.6.1-3 +- Bump version to 1.3.6.1-3 +- Fix spec file to include the tests + +* Tue Mar 14 2017 Mark Reynolds - 1.3.6.1-2 +- Bump version to 1.3.6.1-2 +- Resolves: bug 1431877 - 389-1.3.6.1-1.el7 covscan errors +- Resolves: bug 1432206 - content sync plugin can hang server shutdown +- Resolves: bug 1432149 - sasl external binds fail in 1.3.6.1 + +* Wed Mar 8 2017 Mark Reynolds - 1.3.6.1-1 +- Bump version to 1.3.6.1-1 +- Resolves: bug 1388567 - Rebase 389-ds-base to 1.3.6 in RHEL-7.4 + +* Mon Oct 31 2016 Noriko Hosoi - 1.3.5.10-12 +- Release 1.3.5.10-12 +- Resolves: bug 1384785 - Replica install fails with old IPA master sometimes during replication process (DS 48992) +- Resolves: bug 1388501 - 389-ds-base is missing runtime dependency - bind-utils (DS 48328) +- Resolves: bug 1388581 - Replication stops working only when fips mode is set to true (DS 48909) +- Resolves: bug 1390342 - ns-accountstatus.pl shows wrong status for accounts inactivated by Account policy plugin (DS 49014) +- Resolves: bug 1390343 - trace args debug logging must be more restrictive (DS 49009) + +* Tue Sep 13 2016 Noriko Hosoi - 1.3.5.10-11 +- Release 1.3.5.10-11 +- Resolves: bug 1321124 - Replication changelog can incorrectly skip over updates + +* Thu Sep 1 2016 Noriko Hosoi - 1.3.5.10-10 +- Release 1.3.5.10-10 +- Resolves: bug 1370300 - set proper update status to replication agreement in case of failure (DS 48957) +- Resolves: bug 1209094 - Allow logging of rejected changes (DS 48969) + +* Tue Aug 30 2016 Noriko Hosoi - 1.3.5.10-9 +- Release 1.3.5.10-9 +- Resolves: bug 1364190 - Change example in /etc/sysconfig/dirsrv to use tcmalloc (DS 48950) +- Resolves: bug 1366828 - audit on failure doesn't work if attribute nsslapd-auditlog-logging-enabled is NOT enabled (DS 48958) +- Resolves: bug 1368520 - Crash in import_wait_for_space_in_fifo() (DS 48960) +- Resolves: bug 1368956 - man page of ns-accountstatus.pl shows redundant entries for -p port option +- Resolves: bug 1369537 - passwordMinAge attribute doesn't limit the minimum age of the password (DS 48967) +- Resolves: bug 1369570 - cleanallruv changelog cleaning incorrectly impacts all backends (DS 48964) +- Resolves: bug 1369425 - ACI behaves erratically (DS 48972) +- Resolves: bug 1370300 - set proper update status to replication agreement in case of failure (DS 48957) +- Resolves: bug 1209094 - Allow logging of rejected changes (DS 48969) +- Resolves: bug 1371283 - Server Side Sorting crashes the server. (DS 48970) +- Resolves: bug 1371284 - Disabling CLEAR password storage scheme will crash server when setting a password (DS 48975) + +* Thu Aug 18 2016 Noriko Hosoi - 1.3.5.10-8 +- Release 1.3.5.10-8 +- Resolves: bug 1321124 - Replication changelog can incorrectly skip over updates (DS 48954) +- Resolves: bug 1364190 - Change example in /etc/sysconfig/dirsrv to use tcmalloc (DS 48950) +- Resolves: bug 1366561 - ns-accountstatus.pl giving error even "No such object (32)" (DS 48956) + +* Mon Aug 8 2016 Noriko Hosoi - 1.3.5.10-7 +- Release 1.3.5.10-7 +- Resolves: bug 1316580 - dirsrv service doesn't ask for pin when pin.txt is missing (DS 48450) +- Resolves: bug 1360976 - fixing a compiler warning + +* Thu Aug 4 2016 Noriko Hosoi - 1.3.5.10-6 +- Release 1.3.5.10-6 +- Resolves: bug 1326077 - Page result search should return empty cookie if there is no returned entry (DS 48928) +- Resolves: bug 1360447 - nsslapd-workingdir is empty when ns-slapd is started by systemd (DS 48939) +- Resolves: bug 1360327 - remove-ds.pl deletes an instance even if wrong prefix was specified (DS 48934) +- Resolves: bug 1349815 - DS logs have warning:ancestorid not indexed for all CS subsystems (DS 48940) +- Resolves: bug 1329061 - 389-ds-base-1.3.4.0-29.el7_2 "hang" (DS 48882) +- Resolves: bug 1360976 - EMBARGOED CVE-2016-5405 389-ds-base: Password verification vulnerable to timing attack +- Resolves: bug 1361134 - When fine-grained policy is applied, a sub-tree has a priority over a user while changing password (DS 48943) +- Resolves: bug 1361321 - Duplicate collation entries (DS 48936) +- Resolves: bug 1316580 - dirsrv service doesn't ask for pin when pin.txt is missing (DS 48450) +- Resolves: bug 1350799 - CVE-2016-4992 389-ds-base: Information disclosure via repeat + +* Thu Jul 14 2016 Noriko Hosoi - 1.3.5.10-5 +- Release 1.3.5.10-5 +- Resolves: bug 1333184 - (389-ds-base-1.3.5) Fixing coverity issues. (DS 48919) + +* Thu Jul 14 2016 Noriko Hosoi - 1.3.5.10-4 +- Release 1.3.5.10-4 +- Resolves: bug 1209128 - [RFE] Add a utility to get the status of Directory Server instances (DS 48144) +- Resolves: bug 1333184 - (389-ds-base-1.3.5) Fixing coverity issues. (DS 48919) +- Resolves: bug 1350799 - CVE-2016-4992 389-ds-base: Information disclosure via repeat +- Resolves: bug 1354660 - flow control in replication also blocks receiving results (DS 48767) +- Resolves: bug 1356261 - Fixup tombstone task needs to set proper flag when updating (DS 48924) +- Resolves: bug 1355760 - ns-slapd crashes during the deletion of backend (DS 48922) +- Resolves: bug 1353629 - DS shuts down automatically if dnaThreshold is set to 0 in a MMR setup (DS 48916) +- Resolves: bug 1355879 - nunc-stans: ns-slapd crashes during startup with SIGILL on AMD Opteron 280 (DS 48925) + +* Mon Jul 11 2016 Noriko Hosoi - 1.3.5.10-3 +- Release 1.3.5.10-3 +- Resolves: bug 1354374 - Fixing the tarball version in the sources file. + +* Mon Jul 11 2016 Noriko Hosoi - 1.3.5.10-2 +- Release 1.3.5.10-2 +- Resolves: bug 1353714 - If a cipher is disabled do not attempt to look it up (DS 48743) +- Resolves: bug 1353592 - Setup-ds.pl --update fails - regression (DS 48755) +- Resolves: bug 1353544 - db2bak.pl task enters infinitive loop when bak fs is almost full (DS 48914) +- Resolves: bug 1354374 - Upgrade to 389-ds-base >= 1.3.5.5 doesn't install 389-ds-base-snmp (DS 48918) + +* Wed Jun 29 2016 Noriko Hosoi - 1.3.5.10-1 +- Release 1.3.5.10-1 +- Resolves: bug 1333184 - (389-ds-base-1.3.5) Fixing coverity issues. (DS 48905) + +* Wed Jun 29 2016 Noriko Hosoi - 1.3.5.9-1 +- Release 1.3.5.9-1 +- Resolves: bug 1349571 - Improve MMR replication convergence (DS 48636) +- Resolves: bug 1304682 - "stale" automember rule (associated to a removed group) causes discrepancies in the database (DS 48637) +- Resolves: bug 1314956 - moving an entry cause next on-line init to skip entry has no parent, ending at line 0 of file "(bulk import)" (DS 48755) +- Resolves: bug 1316731 - syncrepl search returning error 329; plugin sending a bad error code (DS 48904) +- Resolves: bug 1346741 - ns-slapd crashes during the shutdown after adding attribute with a matching rule (DS 48891) +- Resolves: bug 1349577 - Values of dbcachetries/dbcachehits in cn=monitor could overflow. (DS 48899) +- Resolves: bug 1272682 - nunc-stans: ns-slapd killed by SIGABRT (DS 48898) +- Resolves: bug 1346043 - repl-monitor displays colors incorrectly for the time lag > 60 min (DS 47538) +- Resolves: bug 1350632 - ns-slapd shutdown crashes if pwdstorageschema name is from stack. (DS 48902) + +* Tue Jun 21 2016 Noriko Hosoi - 1.3.5.8-1 +- Release 1.3.5.8-1 +- Resolves: bug 1290101 - proxyauth support does not work when bound as directory manager (DS 48366) + +* Tue Jun 21 2016 Noriko Hosoi - 1.3.5.7-1 +- Release 1.3.5.7-1 +- Resolves: bug 1196282 - substring index with nssubstrbegin: 1 is not being used with filters like (attr=x*) (DS 48109) +- Resolves: bug 1303794 - Import readNSState.py from RichM's repo (DS 48449) +- Resolves: bug 1290101 - proxyauth support does not work when bound as directory manager (DS 48366) +- Resolves: bug 1338872 - Wrong result code display in audit-failure log (DS 48892) +- Resolves: bug 1346043 - repl-monitor displays colors incorrectly for the time lag > 60 min (DS 47538) +- Resolves: bug 1346741 - ns-slapd crashes during the shutdown after adding attribute with a matching rule (DS 48891) +- Resolves: bug 1347407 - By default aci can be read by anonymous (DS 48354) +- Resolves: bug 1347412 - cn=SNMP,cn=config entry can be read by anonymous (DS 48893) + +* Tue Jun 14 2016 Noriko Hosoi - 1.3.5.6-1 +- Release 1.3.5.6-1 +- Resolves: bug 1273549 - [RFE] Improve timestamp resolution in logs (DS 47982) +- Resolves: bug 1321124 - Replication changelog can incorrectly skip over updates (DS 48766, DS 48636) +- Resolves: bug 1233926 - "matching rules" in ACI's "bind rules not fully evaluated (DS 48234) +- Resolves: bug 1346165 - 389-ds-base-1.3.5.5-1.el7.x86_64 requires policycoreutils-py + +* Mon Jun 13 2016 Noriko Hosoi - 1.3.5.5-1 +- Release 1.3.5.5-1 +- Resolves: bug 1018944 - [RFE] Enhance password change tracking (DS 48833) +- Resolves: bug 1344414 - [RFE] adding pre/post extop ability (DS 48880) +- Resolves: bug 1303794 - Import readNSState.py from RichM's repo (DS 48449) +- Resolves: bug 1257568 - /usr/lib64/dirsrv/libnunc-stans.so is owned by both -libs and -devel (DS 48404) +- Resolves: bug 1314956 - moving an entry cause next on-line init to skip entry has no parent, ending at line 0 of file "(bulk import)" (DS 48755) +- Resolves: bug 1342609 - At startup DES to AES password conversion causes timeout in start script (DS 48862) +- Resolves: bug 1316328 - search returns no entry when OR filter component contains non readable attribute (DS 48275) +- Resolves: bug 1280456 - setup-ds should detect if port is already defined (DS 48336) +- Resolves: bug 1312557 - dirsrv service fails to start when nsslapd-listenhost is configured (DS 48747) +- Resolves: bug 1326077 - Page result search should return empty cookie if there is no returned entry (DS 48752) +- Resolves: bug 1340307 - Running db2index with no options breaks replication (DS 48854) +- Resolves: bug 1337195 - Regression introduced in matching rules by DS 48746 (DS 48844) +- Resolves: bug 1335492 - Modifier's name is not recorded in the audit log with modrdn and moddn operations (DS 48834) +- Resolves: bug 1316741 - ldctl should support -H with ldap uris (DS 48754) + +* Wed May 18 2016 Noriko Hosoi - 1.3.5.4-1 +- release 1.3.5.4-1 +- Resolves: bug 1334455 - db2ldif is not taking into account multiple suffixes or backends (DS 48828) +- Resolves: bug 1241563 - The "repl-monitor" web page does not display "year" in date. (DS 48220) +- Resolves: bug 1335618 - Server ram sanity checks work in isolation (DS 48617) +- Resolves: bug 1333184 - (389-ds-base-1.3.5) Fixing coverity issues. (DS 48837) + +* Sat May 7 2016 Noriko Hosoi - 1.3.5.3-1 +- release 1.3.5.3-1 +- Resolves: bug 1209128 - [RFE] Add a utility to get the status of Directory Server instances (DS 48144) +- Resolves: bug 1332533 - ns-accountstatus.pl gives error message on execution along with results. (DS 48815) +- Resolves: bug 1332709 - password history is not updated when an admin resets the password (DS 48813) +- Resolves: bug 1333184 - (389-ds-base-1.3.5) Fixing coverity issues. (DS 48822) +- Resolves: bug 1333515 - Enable DS to offer weaker DH params in NSS (DS 48798) + +* Tue May 3 2016 Noriko Hosoi - 1.3.5.2-1 +- release 1.3.5.2-1 +- Resolves: bug 1270020 - Rebase 389-ds-base to 1.3.5 in RHEL-7.3 +- Resolves: bug 1288229 - many attrlist_replace errors in connection with cleanallruv (DS 48283) +- Resolves: bug 1315893 - License tag does not match actual license of code (DS 48757) +- Resolves: bug 1320715 - DES to AES password conversion fails if a backend is empty (DS 48777) +- Resolves: bug 190862 - [RFE] Default password syntax settings don't work with fine-grained policies (DS 142) +- Resolves: bug 1018944 - [RFE] Enhance password change tracking (DS 548) +- Resolves: bug 1143066 - The dirsrv user/group should be created in rpm %pre, and ideally with fixed uid/gid (DS 48285) +- Resolves: bug 1153758 - [RFE] Support SASL/GSSAPI when ns-slapd is behind a load-balancer (DS 48332) +- Resolves: bug 1160902 - search, matching rules and filter error "unsupported type 0xA9" (DS 48016) +- Resolves: bug 1186512 - High memory fragmentation observed in ns-slapd; OOM-Killer invoked (DS 48377, 48129) +- Resolves: bug 1196282 - substring index with nssubstrbegin: 1 is not being used with filters like (attr=x*) (DS 48109) +- Resolves: bug 1209094 - [RFE] Allow logging of rejected changes (DS 48145, 48280) +- Resolves: bug 1209128 - [RFE] Add a utility to get the status of Directory Server instances (DS 48144) +- Resolves: bug 1210842 - [RFE] Add PIDFile option to systemd service file (DS 47951) +- Resolves: bug 1223510 - [RFE] it could be nice to have nsslapd-maxbersize default to bigger than 2Mb (DS 48326) +- Resolves: bug 1229799 - ldclt-bin killed by SIGSEGV (DS 48289) +- Resolves: bug 1249908 - No validation check for the value for nsslapd-db-locks. (DS 48244) +- Resolves: bug 1254887 - No man page entry for - option '-u' of dbgen.pl for adding group entries with uniquemembers (DS 48290) +- Resolves: bug 1255557 - db2index creates index entry from deleted records (DS 48252) +- Resolves: bug 1258610 - total update request must not be lost (DS 48255) +- Resolves: bug 1258611 - dna plugin needs to handle binddn groups for authorization (DS 48258) +- Resolves: bug 1259624 - [RFE] Provide a utility to detect accounts locked due to inactivity (DS 48269) +- Resolves: bug 1259950 - Add config setting to MemberOf Plugin to add required objectclass got memberOf attribute (DS 48267) +- Resolves: bug 1266510 - Linked Attributes plug-in - wrong behaviour when adding valid and broken links (DS 48295) +- Resolves: bug 1266532 - Linked Attributes plug-in - won't update links after MODRDN operation (DS 48294) +- Resolves: bug 1267750 - pagedresults - when timed out, search results could have been already freed. (DS 48299) +- Resolves: bug 1269378 - ds-logpipe.py with wrong arguments - python exception in the output (DS 48302) +- Resolves: bug 1271330 - nunc-stans: Attempt to release connection that is not acquired (DS 48311) +- Resolves: bug 1272677 - nunc stans: ns-slapd killed by SIGTERM +- Resolves: bug 1272682 - nunc-stans: ns-slapd killed by SIGABRT +- Resolves: bug 1273142 - crash in Managed Entry plugin (DS 48312) +- Resolves: bug 1273549 - [RFE] Improve timestamp resolution in logs (DS 47982) +- Resolves: bug 1273550 - Deadlock between two MODs on the same entry between entry cache and backend lock (DS 47978) +- Resolves: bug 1273555 - deadlock in mep delete post op (DS 47976) +- Resolves: bug 1273584 - lower password history minimum to 1 (DS 48394) +- Resolves: bug 1275763 - [RFE] add setup-ds.pl option to disable instance specific scripts (DS 47840) +- Resolves: bug 1276072 - [RFE] Allow RHDS to be setup using a DNS CNAME alias for General.FullMachineName (DS 48328) +- Resolves: bug 1278567 - SimplePagedResults -- abandon could happen between the abandon check and sending results (DS 48338) +- Resolves: bug 1278584 - Share nsslapd-threadnumber in the case nunc-stans is enabled, as well. (DS 48339) +- Resolves: bug 1278755 - deadlock on connection mutex (DS 48341) +- Resolves: bug 1278987 - Cannot upgrade a consumer to supplier in a multimaster environment (DS 48325) +- Resolves: bug 1280123 - acl - regression - trailing ', (comma)' in macro matched value is not removed. (DS 48344) +- Resolves: bug 1290111 - [RFE] Support for rfc3673 '+' to return operational attributes (DS 48363) +- Resolves: bug 1290141 - With exhausted range, part of DNA shared configuration is deleted after server restart (DS 48362) +- Resolves: bug 1290242 - SimplePagedResults -- in the search error case, simple paged results slot was not released. (DS 48375) +- Resolves: bug 1290600 - The 'eq' index does not get updated properly when deleting and re-adding attributes in the same ldapmodify operation (DS 48370) +- Resolves: bug 1295947 - 389-ds hanging after a few minutes of operation (DS 48406, revert 48338) +- Resolves: bug 1296310 - ldclt - segmentation fault error while binding (DS 48400) +- Resolves: bug 1299758 - CVE-2016-0741 389-ds-base: Worker threads do not detect abnormally closed connections causing DoS [rhel-7.3] +- Resolves: bug 1301097 - logconv.pl displays negative operation speeds (DS 48446) +- Resolves: bug 1302823 - Crash in slapi_get_object_extension (DS 48536) +- Resolves: bug 1303641 - heap corruption at schema replication. (DS 48492) +- Resolves: bug 1307151 - keep alive entries can break replication (DS 48445) +- Resolves: bug 1310848 - Supplier can skip a failing update, although it should retry. (DS 47788) +- Resolves: bug 1314557 - change severity of some messages related to "keep alive" enties (DS 48420) +- Resolves: bug 1316580 - dirsrv service doesn't ask for pin when pin.txt is missing (DS 48450) +- Resolves: bug 1316742 - no plugin calls in tombstone purging (DS 48759) +- Resolves: bug 1319329 - [RFE] add nsslapd-auditlog-logging-enabled: off to template-dse.ldif (DS 48145) +- Resolves: bug 1320295 - If nsSSL3 is on, even if SSL v3 is not really enabled, a confusing message is logged. (DS 48775) +- Resolves: bug 1326520 - db2index uses a buffer size derived from dbcachesize (DS 48383) +- Resolves: bug 1328936 - objectclass values could be dropped on the consumer (DS 48799) +- Resolves: bug 1287475 - [RFE] response control for password age should be sent by default by RHDS (DS 48369) +- Resolves: bug 1331343 - Paged results search returns the blank list of entries (DS 48808) + +* Mon Oct 5 2015 Noriko Hosoi - 1.3.4.0-19 +- release 1.3.4.0-19 +- Resolves: bug 1228823 - async simple paged results issue (DS 48299, DS 48192) +- Resolves: bug 1266944 - ns-slapd crash during ipa-replica-manage del (DS 48298) + +* Tue Sep 22 2015 Noriko Hosoi - 1.3.4.0-18 +- release 1.3.4.0-18 +- Resolves: bug 1259949 - Fractional replication evaluates several times the same CSN (DS 48266, DS 48284) + +* Fri Sep 18 2015 Noriko Hosoi - 1.3.4.0-17 +- release 1.3.4.0-17 +- Resolves: bug 1259949 - A backport error (coverity -- unused variable 'init_retry') + +* Fri Sep 18 2015 Noriko Hosoi - 1.3.4.0-16 +- release 1.3.4.0-16 +- Resolves: bug 1243970 - In MMR, double free coould occur under some special condition (DS 48276, DS 48226) +- Resolves: bug 1259949 - Fractional replication evaluates several times the same CSN (DS 48266) +- Resolves: bug 1241723 - cleanallruv - fix regression with server shutdown (DS 48217) +- Resolves: bug 1264224 - segfault in ns-slapd due to accessing Slapi_DN freed in pre bind plug-in (DS 48188) + +* Fri Sep 4 2015 Noriko Hosoi - 1.3.4.0-15 +- release 1.3.4.0-15 +- Resolves: bug 1258996 - Complex filter in a search request doen't work as expected. (regression) (DS 48265) +- Resolves: bug 1179370 - COS cache doesn't properly mark vattr cache as invalid when there are multiple suffixes (DS 47981) + +* Tue Aug 25 2015 Noriko Hosoi - 1.3.4.0-14 +- release 1.3.4.0-14 +- Resolves: bug 1246389 - wrong password check if passwordInHistory is decreased. (DS 48228) +- Resolves: bug 1255851 - Shell CLI fails with usage errors if an argument containing white spaces is given (DS 48254) +- Resolves: bug 1256938 - Unable to dereference unqiemember attribute because it is dn [#UID] not dn syntax (DS 47757) + +* Wed Aug 19 2015 Noriko Hosoi - 1.3.4.0-13 +- release 1.3.4.0-13 +- Resolves: bug 1245519 - remove debug logging from retro cl (DS 47831) + +* Tue Aug 18 2015 Noriko Hosoi - 1.3.4.0-12 +- release 1.3.4.0-12 +- Resolves: bug 1252133 - replica upgrade failed in starting dirsrv service (DS 48243) +- Resolves: bug 1254344 - Server crashes in ACL_LasFindFlush during shutdown if ACIs contain IP addresss restrictions (DS 48233) + +* Fri Aug 14 2015 Noriko Hosoi - 1.3.4.0-11 +- release 1.3.4.0-11 +- Resolves: bug 1249784 - ipa-dnskeysyncd unhandled exception on named-pkcs11 start (DS 48249) +- Resolves: bug 1252082 - removing chaining database links trigger valgrind read error (DS 47686) +- Resolves: bug 1252207 - bashisms in 389-ds-base admin scripts (DS 47511) +- Resolves: bug 1252533 - Man pages and help for remove-ds.pl doesn't display "-a" option (DS 48245) +- Resolves: bug 1252781 - Slapd crashes reported from latest builds (DS 48250) + +* Mon Aug 10 2015 Noriko Hosoi - 1.3.4.0-10 +- release 1.3.4.0-10 +- Resolves: bug 1245519 - Fix coverity issues (DS 47931) + +* Fri Aug 7 2015 Noriko Hosoi - 1.3.4.0-9 +- release 1.3.4.0-9 +- Resolves: bug 1240876 - verify_db.pl doesn't verify DB specified by -a option. (DS 48215) +- Resolves: bug 1245235 - winsync lastlogon attribute not syncing between IPA & Windows 2008. (DS 48232) +- Resolves: bug 1245519 - Deadlock with retrochangelog, memberof plugin (DS 47931) +- Resolves: bug 1246389 - wrong password check if passwordInHistory is decreased. (DS 48228) +- Resolves: bug 1247811 - logconv autobind handling regression caused by 47446 (DS 48231) +- Resolves: bug 1250177 - Investigate betxn plugins to ensure they return the correct error code (DS 47810) + +* Thu Jul 23 2015 Noriko Hosoi - 1.3.4.0-8 +- release 1.3.4.0-8 +- Resolves: bug 1160243 - [RFE] allow logconv.pl -S/-E switches to work even when exact/same timestamps are not present in access log file (DS 47910) +- Resolves: bug 1172037 - winsync range retrieval gets only 5000 values upon initialization (DS 48010) +- Resolves: bug 1242531 - logconv.pl should handle *.tar.xz, *.txz, *.xz log files (DS 48224) +- Resolves: bug 1243950 - When starting a replica agreement a deadlock can occur with an op updating nsuniqueid index (DS 48179) +- Resolves: bug 1243970 - In MMR, double free coould occur under some special condition (DS 48226) +- Resolves: bug 1244926 - Crash while triming the retro changelog (DS 48206) + +* Thu Jul 16 2015 Noriko Hosoi - 1.3.4.0-7 +- release 1.3.4.0-7 +- Resolves: bug 1235060 - Fix coverity issues - 07/14/2015 (DS 48203) +- Resolves: bug 1242531 - redux - logconv.pl should handle *.tar.xz, *.txz, *.xz log files (DS 48224) + +* Tue Jul 14 2015 Noriko Hosoi - 1.3.4.0-6 +- release 1.3.4.0-6 +- Resolves: bug 1240845 - cleanallruv should completely clean changelog (DS 48208) +- Resolves: bug 1095603 - Any negative LDAP error code number reported as Illegal error by ldclt. (DS 47799) +- Resolves: bug 1168675 - Inconsistent behaviour of DS when LDAP Sync is used with an invalid cookie (DS 48013) +- Resolves: bug 1241723 - cleanAllRUV hangs shutdown if not all of the replicas are online (DS 48217) +- Resolves: bug 1241497 - crash in ns-slapd when deleting winSyncSubtreePair from sync agreement (DS 48216) +- Resolves: bug 1240404 - Silent install needs to properly exit when INF file is missing (DS 48119) +- Resolves: bug 1240406 - Remove warning suppression in 1.3.4 (DS 47878) +- Resolves: bug 1242683 - Winsync fails when AD users have multiple spaces (two)inside the value of the rdn attribute (DS 48223) +- Resolves: bug 1160243 - logconv.pl - validate start and end time args (DS 47910) +- Resolves: bug 1242531 - logconv.pl should handle *.tar.xz, *.txz, *.xz log files (DS 48224) +- Resolves: bug 1230996 - CI test: fixing test cases for ticket 48194 (DS 48194) + +* Tue Jul 7 2015 Noriko Hosoi - 1.3.4.0-5 +- release 1.3.4.0-5 +- Resolves: bug 1235060 - Fix coverity issues (DS 48203) + +* Tue Jul 7 2015 Noriko Hosoi - 1.3.4.0-4 +- release 1.3.4.0-4 +- Resolves: bug 1240404 - setup-ds.pl does not log invalid --file path errors the same (DS 48119) +- Resolves: bug 1240406 - setup -u stops after first failure (DS 47878) + +* Mon Jul 6 2015 Noriko Hosoi - 1.3.4.0-3 +- release 1.3.4.0-3 +- Resolves: bug 1228823 - async simple paged results issue (DS 48192) +- Resolves: bug 1237325 - reindex off-line twice could provoke index corruption (DS 48212) +- Resolves: bug 1238790 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value (DS 48214) + +* Wed Jun 24 2015 Noriko Hosoi - 1.3.4.0-2 +- release 1.3.4.0-2 +- Resolves: bug 1235060 - Fix coverity issues +- Resolves: bug 1235387 - Slow replication when deleting large quantities of multi-valued attributes (DS 48195) + +* Fri Jun 19 2015 Noriko Hosoi - 1.3.4.0-1 +- Release 1.3.4.0-1 (rebase) +- Enable nunc-stans for x86_64. +- Resolves: bug 1034325 - Linked attributes betxnpreoperation - transaction not aborted when linked entry does not exit (DS 47640) +- Resolves: bug 1052755 - Retro Changelog Plugin accepts invalid value in nsslapd-changelogmaxage attribute (DS 47669) +- Resolves: bug 1096409 - RHDS keeps on logging write_changelog_and_ruv: failed to update RUV for unknown (DS 47801) +- Resolves: bug 1145378 - Adding an entry with an invalid password as rootDN is incorrectly rejected (DS 47900) +- Resolves: bug 1145382 - Bad manipulation of passwordhistory (DS 47905) +- Resolves: bug 1154147 - Uniqueness plugin: should allow to exclude some subtrees from its scope (DS 47927) +- Resolves: bug 1171358 - Make ReplicaWaitForAsyncResults configurable (DS 47957) +- Resolves: bug 1171663 - MODDN fails when entry doesn't have memberOf attribute and new DN is in the scope of memberOfExcludeSubtree (DS 47526) +- Resolves: bug 1174457 - [RFE] memberOf - add option to skip nested group lookups during delete operations (DS 47963) +- Resolves: bug 1178640 - db2bak.pl man page should be improved. (DS 48008) +- Resolves: bug 1179370 - COS cache doesn't properly mark vattr cache as invalid when there are multiple suffixes (DS 47981) +- Resolves: bug 1180331 - Local Password Policies for Nested OU's not honoured (DS 47980) +- Resolves: bug 1180776 - nsslapd-db-locks modify not taking into account (DS 47934) +- Resolves: bug 1181341 - nsslapd-changelogtrim-interval and nsslapd-changelogcompactdb-interval are not validated (DS 47617) +- Resolves: bug 1185882 - ns-activate.pl fails to activate account if it was disabled on AD (DS 48001) +- Resolves: bug 1186548 - ns-slapd crash in shutdown phase (DS 48005) +- Resolves: bug 1189154 - DNS errors after IPA upgrade due to broken ReplSync (DS 48030) +- Resolves: bug 1206309 - winsync sets AccountUserControl in AD to 544 (DS 47723) +- Resolves: bug 1210845 - slapd crashes during Dogtag clone reinstallation (DS 47966) +- Resolves: bug 1210850 - add an option '-u' to dbgen.pl for adding group entries with (DS 48025) +- Resolves: bug 1210852 - aci with wildcard and macro not correctly evaluated (DS 48141) + +* Fri Jun 12 2015 Noriko Hosoi - 1.3.3.1-19 +- release 1.3.3.1-19 +- Resolves: bug 1230996 - nsSSL3Ciphers preference not enforced server side (DS 48194) + +* Fri Jun 5 2015 Noriko Hosoi - 1.3.3.1-18 +- release 1.3.3.1-18 +- Resolves: bug 1228823 - async simple paged results issue (DS 48146, DS 48192) + +* Tue Jun 2 2015 Noriko Hosoi - 1.3.3.1-17 +- release 1.3.3.1-17 +- Resolves: bug 1226510 - idm/ipa 389-ds-base entry cache converges to 500 KB in dblayer_is_cachesize_sane (DS 48190) + +* Tue Apr 21 2015 Noriko Hosoi - 1.3.3.1-16 +- release 1.3.3.1-16 +- Resolves: bug 1212894 - CVE-2015-1854 389ds-base: access control bypass with modrdn + +* Mon Feb 23 2015 Noriko Hosoi - 1.3.3.1-15 +- release 1.3.3.1-15 +- Setting correct build tag 'rhel-7.1-z-candidate' + +* Mon Feb 23 2015 Noriko Hosoi - 1.3.3.1-14 +- release 1.3.3.1-14 +- Resolves: bug 1189154 - DNS errors after IPA upgrade due to broken ReplSync (DS 48030) + Fixes spec file to make sure all the server instances are stopped before upgrade +- Resolves: bug 1186548 - ns-slapd crash in shutdown phase (DS 48005) + +* Sun Jan 25 2015 Noriko Hosoi - 1.3.3.1-13 +- release 1.3.3.1-13 +- Resolves: bug 1183655 - Fixed Covscan FORWARD_NULL defects (DS 47988) + +* Sun Jan 25 2015 Noriko Hosoi - 1.3.3.1-12 +- release 1.3.3.1-12 +- Resolves: bug 1182477 - Windows Sync accidentally cleared raw_entry (DS 47989) +- Resolves: bug 1180325 - upgrade script fails if /etc and /var are on different file systems (DS 47991 ) +- Resolves: bug 1183655 - Schema learning mechanism, in replication, unable to extend an existing definition (DS 47988) + +* Mon Jan 5 2015 Noriko Hosoi - 1.3.3.1-11 +- release 1.3.3.1-11 +- Resolves: bug 1080186 - During delete operation do not refresh cache entry if it is a tombstone (DS 47750) + +* Wed Dec 17 2014 Noriko Hosoi - 1.3.3.1-10 +- release 1.3.3.1-10 +- Resolves: bug 1172731 - CVE-2014-8112 password hashing bypassed when "nsslapd-unhashed-pw-switch" is set to off +- Resolves: bug 1166265 - DS hangs during online total update (DS 47942) +- Resolves: bug 1168151 - CVE-2014-8105 information disclosure through 'cn=changelog' subtree +- Resolves: bug 1044170 - Allow memberOf suffixes to be configurable (DS 47526) +- Resolves: bug 1171356 - Bind DN tracking unable to write to internalModifiersName without special permissions (DS 47950) +- Resolves: bug 1153737 - logconv.pl -- support parsing/showing/reporting different protocol versions (DS 47949) +- Resolves: bug 1171355 - start dirsrv after chrony on RHEL7 and Fedora (DS 47947) +- Resolves: bug 1170707 - cos_cache_build_definition_list does not stop during server shutdown (DS 47967) +- Resolves: bug 1170708 - COS memory leak when rebuilding the cache (DS - Ticket 47969) +- Resolves: bug 1170709 - Account lockout attributes incorrectly updated after failed SASL Bind (DS 47970) +- Resolves: bug 1166260 - cookie_change_info returns random negative number if there was no change in a tree (DS 47960) +- Resolves: bug 1012991 - Error log levels not displayed correctly (DS 47636) +- Resolves: bug 1108881 - rsearch filter error on any search filter (DS 47722) +- Resolves: bug 994690 - Allow dynamically adding/enabling/disabling/removing plugins without requiring a server restart (DS 47451) +- Resolves: bug 1162997 - Running a plugin task can crash the server (DS 47451) +- Resolves: bug 1166252 - RHEL7.1 ns-slapd segfault when ipa-replica-install restarts (DS 47451) +- Resolves: bug 1172597 - Crash if setting invalid plugin config area for MemberOf Plugin (DS 47525) +- Resolves: bug 1139882 - coverity defects found in 1.3.3.x (DS 47965) + +* Thu Nov 13 2014 Noriko Hosoi - 1.3.3.1-9 +- release 1.3.3.1-9 +- Resolves: bug 1153737 - Disable SSL v3, by default. (DS 47928) +- Resolves: bug 1163461 - Should not check aci syntax when deleting an aci (DS 47953) + +* Mon Nov 10 2014 Noriko Hosoi - 1.3.3.1-8 +- release 1.3.3.1-8 +- Resolves: bug 1156607 - Crash in entry_add_present_values_wsi_multi_valued (DS 47937) +- Resolves: bug 1153737 - Disable SSL v3, by default (DS 47928, DS 47945, DS 47948) +- Resolves: bug 1158804 - Malformed cookie for LDAP Sync makes DS crash (DS 47939) + +* Thu Oct 23 2014 Noriko Hosoi - 1.3.3.1-7 +- release 1.3.3.1-7 +- Resolves: bug 1153737 - Disable SSL v3, by default (DS 47928) + +* Fri Oct 10 2014 Noriko Hosoi - 1.3.3.1-6 +- release 1.3.3.1-6 +- Resolves: bug 1151287 - dynamically added macro aci is not evaluated on the fly (DS 47922) +- Resolves: bug 1080186 - Need to move slapi_pblock_set(pb, SLAPI_MODRDN_EXISTING_ENTRY, original_entry->ep_entry) prior to original_entry overwritten (DS 47897) +- Resolves: bug 1150694 - Encoding of SearchResultEntry is missing tag (DS 47920) +- Resolves: bug 1150695 - ldbm_back_modify SLAPI_PLUGIN_BE_PRE_MODIFY_FN does not return even if one of the preop plugins fails. (DS 47919) +- Resolves: bug 1139882 - Fix remaining compiler warnings (DS 47892) +- Resolves: bug 1150206 - result of dna_dn_is_shared_config is incorrectly used (DS 47918) + +* Wed Oct 1 2014 Noriko Hosoi - 1.3.3.1-5 +- release 1.3.3.1-5 +- Resolves: bug 1139882 - coverity defects found in 1.3.3.x (DS 47892) + +* Wed Oct 1 2014 Noriko Hosoi - 1.3.3.1-4 +- release 1.3.3.1-4 +- Resolves: bug 1080186 - Creating a glue fails if one above level is a conflict or missing (DS 47750) +- Resolves: bug 1145846 - 389-ds 1.3.3.0 does not adjust cipher suite configuration on upgrade, breaks itself and pki-server (DS 47908) +- Resolves: bug 1117979 - harden the list of ciphers available by default (phase 2) (DS 47838) + - provide enabled ciphers as search result (DS 47880) + +* Fri Sep 12 2014 Rich Megginson - 1.3.3.1-3 +- release 1.3.3.1-3 +- Resolves: bug 1139882 - coverity defects found in 1.3.3.1 + +* Thu Sep 11 2014 Noriko Hosoi - 1.3.3.1-2 +- release 1.3.3.1-2 +- Resolves: bug 1079099 - Simultaneous adding a user and binding as the user could fail in the password policy check (DS 47748) +- Resolves: bug 1080186 - Creating a glue fails if one above level is a conflict or missing (DS 47834) +- Resolves: bug 1139882 - coverity defects found in 1.3.3.1 (DS 47890) +- Resolves: bug 1112702 - Broken dereference control with the FreeIPA 4.0 ACIs (DS 47885 - deref plugin should not return references with noc access rights) +- Resolves: bug 1117979 - harden the list of ciphers available by default (DS 47838, DS 47895) +- Resolves: bug 1080186 - Creating a glue fails if one above level is a conflict or missing (DS 47889 - DS crashed during ipa-server-install on test_ava_filter) + +* Fri Sep 5 2014 Noriko Hosoi - 1.3.3.1-1 +- release 1.3.3.1-1 +- Resolves: bug 746646 - RFE: easy way to configure which users and groups to sync with winsync +- Resolves: bug 881372 - nsDS5BeginReplicaRefresh attribute accepts any value and it doesn't throw any error when server restarts. +- Resolves: bug 920597 - Possible to add invalid ACI value +- Resolves: bug 921162 - Possible to add nonexistent target to ACI +- Resolves: bug 923799 - if nsslapd-cachememsize set to the number larger than the RAM available, should result in proper error message. +- Resolves: bug 924937 - Attribute "dsOnlyMemberUid" not allowed when syncing nested posix groups from AD with posixWinsync +- Resolves: bug 951754 - Self entry access ACI not working properly +- Resolves: bug 952517 - Dirsrv instance failed to start with Segmentation fault (core dump) after modifying 7-bit check plugin +- Resolves: bug 952682 - nsslapd-db-transaction-batch-val turns to -1 +- Resolves: bug 966443 - Plugin library path validation +- Resolves: bug 975176 - Non-directory manager can change the individual userPassword's storage scheme +- Resolves: bug 979465 - IPA replica's - "SASL encrypted packet length exceeds maximum allowed limit" +- Resolves: bug 982597 - Some attributes in cn=config should not be multivalued +- Resolves: bug 987009 - 389-ds-base - shebang with /usr/bin/env +- Resolves: bug 994690 - RFE: Allow dynamically adding/enabling/disabling/removing plugins without requiring a server restart +- Resolves: bug 1012991 - errorlog-level 16384 is listed as 0 in cn=config +- Resolves: bug 1013736 - Enabling/Disabling DNA plug-in throws "ldap_modify: Server Unwilling to Perform (53)" error +- Resolves: bug 1014380 - setup-ds.pl doesn't lookup the "root" group correctly +- Resolves: bug 1020459 - rsa_null_sha should not be enabled by default +- Resolves: bug 1024541 - start dirsrv after ntpd +- Resolves: bug 1029959 - Managed Entries betxnpreoperation - transaction not aborted upon failure to create managed entry +- Resolves: bug 1031216 - add dbmon.sh +- Resolves: bug 1044133 - Indexed search with filter containing '&' and "!" with attribute subtypes gives wrong result +- Resolves: bug 1044134 - should set LDAP_OPT_X_SASL_NOCANON to LDAP_OPT_ON by default +- Resolves: bug 1044135 - make connection buffer size adjustable +- Resolves: bug 1044137 - posix winsync should support ADD user/group entries from DS to AD +- Resolves: bug 1044138 - mep_pre_op: Unable to fetch origin entry +- Resolves: bug 1044139 - [RFE] Support RFC 4527 Read Entry Controls +- Resolves: bug 1044140 - Allow search to look up 'in memory RUV' +- Resolves: bug 1044141 - MMR stress test with dna enabled causes a deadlock +- Resolves: bug 1044142 - winsync doesn't sync DN valued attributes if DS DN value doesn't exist +- Resolves: bug 1044143 - modrdn + NSMMReplicationPlugin - Consumer failed to replay change +- Resolves: bug 1044144 - resurrected entry is not correctly indexed +- Resolves: bug 1044146 - Add a warning message when a connection hits the max number of threads +- Resolves: bug 1044147 - 7-bit check plugin does not work for userpassword attribute +- Resolves: bug 1044148 - The backend name provided to bak2db is not validated +- Resolves: bug 1044149 - Winsync should support range retrieval +- Resolves: bug 1044150 - 7-bit checking is not necessary for userPassword +- Resolves: bug 1044151 - With SeLinux, ports can be labelled per range. setup-ds.pl or setup-ds-admin.pl fail to detect already ranged labelled ports +- Resolves: bug 1044152 - ChainOnUpdate: "cn=directory manager" can modify userRoot on consumer without changes being chained or replicated. Directory integrity compromised. +- Resolves: bug 1044153 - mods optimizer +- Resolves: bug 1044154 - multi master replication allows schema violation +- Resolves: bug 1044156 - DS crashes with some 7-bit check plugin configurations +- Resolves: bug 1044157 - Some updates of "passwordgraceusertime" are useless when updating "userpassword" +- Resolves: bug 1044159 - [RFE] Support 'Content Synchronization Operation' (SyncRepl) - RFC 4533 +- Resolves: bug 1044160 - remove-ds.pl should remove /var/lock/dirsrv +- Resolves: bug 1044162 - enhance retro changelog +- Resolves: bug 1044163 - updates to ruv entry are written to retro changelog +- Resolves: bug 1044164 - Password administrators should be able to violate password policy +- Resolves: bug 1044168 - Schema replication between DS versions may overwrite newer base schema +- Resolves: bug 1044169 - ACIs do not allow attribute subtypes in targetattr keyword +- Resolves: bug 1044170 - Allow memberOf suffixes to be configurable +- Resolves: bug 1044171 - Allow referential integrity suffixes to be configurable +- Resolves: bug 1044172 - Plugin library path validation prevents intentional loading of out-of-tree modules +- Resolves: bug 1044173 - make referential integrity configuration more flexible +- Resolves: bug 1044177 - allow configuring changelog trim interval +- Resolves: bug 1044179 - objectclass may, must lists skip rest of objectclass once first is found in sup +- Resolves: bug 1044180 - memberOf on a user is converted to lowercase +- Resolves: bug 1044181 - report unindexed internal searches +- Resolves: bug 1044183 - With 1.3.04 and subtree-renaming OFF, when a user is deleted after restarting the server, the same entry can't be added +- Resolves: bug 1044185 - dbscan on entryrdn should show all matching values +- Resolves: bug 1044187 - logconv.pl - RFE - add on option for a minimum etime for unindexed search stats +- Resolves: bug 1044188 - Recognize compressed log files +- Resolves: bug 1044191 - support TLSv1.1 and TLSv1.2, if supported by NSS +- Resolves: bug 1044193 - default nsslapd-sasl-max-buffer-size should be 2MB +- Resolves: bug 1044194 - Complex filter in a search request doen't work as expected. +- Resolves: bug 1044196 - Automember plug-in should treat MODRDN operations as ADD operations +- Resolves: bug 1044198 - Replication of the schema may overwrite consumer 'attributetypes' even if consumer definition is a superset +- Resolves: bug 1044202 - db2bak.pl issue when specifying non-default directory +- Resolves: bug 1044203 - Allow referint plugin to use an alternate config area +- Resolves: bug 1044205 - Allow memberOf to use an alternate config area +- Resolves: bug 1044210 - idl switch does not work +- Resolves: bug 1044211 - make old-idl tunable +- Resolves: bug 1044212 - IDL-style can become mismatched during partial restoration +- Resolves: bug 1044213 - backend performance - introduce optimization levels +- Resolves: bug 1044215 - using transaction batchval violates durability +- Resolves: bug 1044216 - examine replication code to reduce amount of stored state information +- Resolves: bug 1048980 - 7-bit check plugin not checking MODRDN operation +- Resolves: bug 1049030 - Windows Sync group issues +- Resolves: bug 1052751 - Page control does not work if effective rights control is specified +- Resolves: bug 1052754 - Allow nsDS5ReplicaBindDN to be a group DN +- Resolves: bug 1057803 - logconv errors when search has invalid bind dn +- Resolves: bug 1060032 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime. +- Resolves: bug 1061060 - betxn: retro changelog broken after cancelled transaction +- Resolves: bug 1061572 - improve dbgen rdn generation, output and man page. +- Resolves: bug 1063990 - single valued attribute replicated ADD does not work +- Resolves: bug 1064006 - Size returned by slapi_entry_size is not accurate +- Resolves: bug 1064986 - Replication retry time attributes cannot be added +- Resolves: bug 1067090 - Missing warning for invalid replica backoff configuration +- Resolves: bug 1072032 - Updating nsds5ReplicaHost attribute in a replication agreement fails with error 53 +- Resolves: bug 1074306 - Under heavy stress, failure of turning a tombstone into glue makes the server hung +- Resolves: bug 1074447 - Part of DNA shared configuration is deleted after server restart +- Resolves: bug 1076729 - Continuous add/delete of an entry in MMR setup causes entryrdn-index conflict +- Resolves: bug 1077884 - ldap/servers/slapd/back-ldbm/dblayer.c: possible minor problem with sscanf +- Resolves: bug 1077897 - Memory leak with proxy auth control +- Resolves: bug 1079099 - Simultaneous adding a user and binding as the user could fail in the password policy check +- Resolves: bug 1080186 - Creating a glue fails if one above level is a conflict or missing +- Resolves: bug 1082967 - attribute uniqueness plugin fails when set as a chaining component +- Resolves: bug 1085011 - Directory Server crash reported from reliab15 execution +- Resolves: bug 1086890 - empty modify returns LDAP_INVALID_DN_SYNTAX +- Resolves: bug 1086902 - mem leak in do_bind when there is an error +- Resolves: bug 1086904 - mem leak in do_search - rawbase not freed upon certain errors +- Resolves: bug 1086908 - Performing deletes during tombstone purging results in operation errors +- Resolves: bug 1090178 - #481 breaks possibility to reassemble memberuid list +- Resolves: bug 1092099 - A replicated MOD fails (Unwilling to perform) if it targets a tombstone +- Resolves: bug 1092342 - nsslapd-ndn-cache-max-size accepts any invalid value. +- Resolves: bug 1092648 - Negative value of nsSaslMapPriority is not reset to lowest priority +- Resolves: bug 1097004 - Problem with deletion while replicated +- Resolves: bug 1098654 - db2bak.pl error with changelogdb +- Resolves: bug 1099654 - Normalization from old DN format to New DN format doesnt handel condition properly when there is space in a suffix after the seperator operator. +- Resolves: bug 1108405 - find a way to remove replication plugin errors messages "changelog iteration code returned a dummy entry with csn %s, skipping ..." +- Resolves: bug 1108407 - managed entry plugin fails to update managed entry pointer on modrdn operation +- Resolves: bug 1108865 - memory leak in ldapsearch filter objectclass=* +- Resolves: bug 1108870 - ACI warnings in error log +- Resolves: bug 1108872 - Logconv.pl with an empty access log gives lots of errors +- Resolves: bug 1108874 - logconv.pl memory continually grows +- Resolves: bug 1108881 - rsearch filter error on any search filter +- Resolves: bug 1108895 - [RFE - RHDS9] CLI report to monitor replication +- Resolves: bug 1108902 - rhds91 389-ds-base-1.2.11.15-31.el6_5.x86_64 crash in db4 __dbc_get_pp env = 0x0 ? +- Resolves: bug 1108909 - single valued attribute replicated ADD does not work +- Resolves: bug 1109334 - 389 Server crashes if uniqueMember is invalid syntax and memberOf plugin is enabled. +- Resolves: bug 1109336 - Parent numsubordinate count can be incorrectly updated if an error occurs +- Resolves: bug 1109339 - Nested tombstones become orphaned after purge +- Resolves: bug 1109354 - Tombstone purging can crash the server if the backend is stopped/disabled +- Resolves: bug 1109357 - Coverity issue in 1.3.3 +- Resolves: bug 1109364 - valgrind - value mem leaks, uninit mem usage +- Resolves: bug 1109375 - provide default syntax plugin +- Resolves: bug 1109378 - Environment variables are not passed when DS is started via service +- Resolves: bug 1111364 - Updating winsync one-way sync does not affect the behaviour dynamically +- Resolves: bug 1112824 - Broken dereference control with the FreeIPA 4.0 ACIs +- Resolves: bug 1113605 - server restart wipes out index config if there is a default index +- Resolves: bug 1115177 - attrcrypt_generate_key calls slapd_pk11_TokenKeyGenWithFlags with improper macro +- Resolves: bug 1117021 - Server deadlock if online import started while server is under load +- Resolves: bug 1117975 - paged results control is not working in some cases when we have a subsuffix. +- Resolves: bug 1117979 - harden the list of ciphers available by default +- Resolves: bug 1117981 - Fix various typos in manpages & code +- Resolves: bug 1117982 - Fix hyphens used as minus signed and other manpage mistakes +- Resolves: bug 1118002 - server crashes deleting a replication agreement +- Resolves: bug 1118006 - RFE - forcing passwordmustchange attribute by non-cn=directory manager +- Resolves: bug 1118007 - [RFE] Make it possible for privileges to be provided to an admin user to import an LDIF file containing hashed passwords +- Resolves: bug 1118014 - Enhance ACIs to have more control over MODRDN operations +- Resolves: bug 1118021 - Return all attributes in rootdse without explicit request +- Resolves: bug 1118025 - Slow ldapmodify operation time for large quantities of multi-valued attribute values +- Resolves: bug 1118032 - Schema Replication Issue +- Resolves: bug 1118034 - 389 DS Server crashes and dies while handles paged searches from clients +- Resolves: bug 1118043 - Failed deletion of aci: no such attribute +- Resolves: bug 1118048 - If be_txn plugin fails in ldbm_back_add, adding entry is double freed. +- Resolves: bug 1118051 - Add switch to disable pre-hashed password checking +- Resolves: bug 1118054 - Make ldbm_back_seq independently support transactions +- Resolves: bug 1118055 - Add operations rejected by betxn plugins remain in cache +- Resolves: bug 1118057 - online import crashes server if using verbose error logging +- Resolves: bug 1118059 - add fixup-memberuid.pl script +- Resolves: bug 1118060 - winsync plugin modify is broken +- Resolves: bug 1118066 - memberof scope: allow to exclude subtrees +- Resolves: bug 1118069 - 389-ds production segfault: __memcpy_sse2_unaligned () at ../sysdeps/x86_64/multiarch/memcpy-sse2-unaligned.S:144 +- Resolves: bug 1118074_DELETE_FN - plugin returned error" messages +- Resolves: bug 1118076 - ds logs many "Operation error fetching Null DN" messages +- Resolves: bug 1118077 - Improve import logging and abort handling +- Resolves: bug 1118079 - Multi master replication initialization incomplete after restore of one master +- Resolves: bug 1118080 - Don't add unhashed password mod if we don't have an unhashed value +- Resolves: bug 1118081 - Investigate betxn plugins to ensure they return the correct error code +- Resolves: bug 1118082 - The error result text message should be obtained just prior to sending result +- Resolves: bug 1123865 - CVE-2014-3562 389-ds-base: 389-ds: unauthenticated information disclosure [rhel-7.1] + +* Fri May 2 2014 Noriko Hosoi - 1.3.1.6-26 +- release 1.3.1.6-26 +- Resolves: bug 1085011 - Directory Server crash reported from reliab15 execution (Ticket 346) + +* Mon Mar 31 2014 Noriko Hosoi - 1.3.1.6-25 +- release 1.3.1.6-25 +- Resolves: bug 1082740 - ns-slapd crash in reliability 15 + +* Thu Mar 13 2014 Noriko Hosoi - 1.3.1.6-24 +- release 1.3.1.6-24 +- Resolves: bug 1074084 - e_uniqueid fails to set if an entry is a conflict entry (Ticket 47735); regression - sub-type length in attribute type was mistakenly subtracted. + +* Tue Mar 11 2014 Noriko Hosoi - 1.3.1.6-23 +- Resolves: bug 1074850 - EMBARGOED CVE-2014-0132 389-ds-base: 389-ds: flaw in parsing authzid can lead to privilege escalation [rhel-7.0] (Ticket 47739 - directory server is insecurely misinterpreting authzid on a SASL/GSSAPI bind) (Added 0095-Ticket-47739-directory-server-is-insecurely-misinter.patch) + + Tue Mar 11 2014 Noriko Hosoi - 1.3.1.6-23 +- release 1.3.1.6-22 +- Resolves: bug 1074850 - EMBARGOED CVE-2014-0132 389-ds-base: 389-ds: flaw in parsing authzid can lead to privilege escalation [rhel-7.0] (Ticket 47739 - directory server is insecurely misinterpreting authzid on a SASL/GSSAPI bind) + +* Mon Mar 10 2014 Noriko Hosoi - 1.3.1.6-22 +- release 1.3.1.6-22 +- Resolves: bug 1074084 - e_uniqueid fails to set if an entry is a conflict entry (Ticket 47735) + +* Tue Feb 25 2014 Noriko Hosoi - 1.3.1.6-21 +- release 1.3.1.6-21 +- Resolves: bug 918694 - Fix covscan defect FORWARD_NULL (Ticket 408) +- Resolves: bug 918717 - Fix covscan defect COMPILER WARNINGS (Ticket 571) + +* Tue Feb 25 2014 Noriko Hosoi - 1.3.1.6-20 +- release 1.3.1.6-20 +- Resolves: bug 1065242 - 389-ds-base, conflict occurs at yum installation if multilib_policy=all. (Ticket 47709) + +* Tue Feb 18 2014 Noriko Hosoi - 1.3.1.6-19 +- release 1.3.1.6-19 +- Resolves: bug 1065971 - Enrolling a host into IdM/IPA always takes two attempts (Ticket 47704) + +* Mon Feb 3 2014 Noriko Hosoi - 1.3.1.6-18 +- release 1.3.1.6-18 +- Resolves: bug 838656 - logconv.pl tool removes the access logs contents if "-M" is not correctly used (Ticket 471) +- Resolves: bug 922538 - improve dbgen rdn generation, output (Ticket 47374) +- Resolves: bug 970750 - flush.pl is not included in perl5 (Ticket 47374) +- Resolves: bug 1013898 - Fix various issues with logconv.pl (Ticket 471) + +* Wed Jan 29 2014 Noriko Hosoi - 1.3.1.6-17 +- release 1.3.1.6-17 +- Resolves: bug 853106 - Deleting attribute present in nsslapd-allowed-to-delete-attrs returns Operations error (Ticket 443) +- Resolves: bug 1049525 - Server hangs in cos_cache when adding a user entry (Ticket 47649) + +* Wed Jan 29 2014 Daniel Mach - 1.3.1.6-16 +- Mass rebuild 2014-01-24 + +* Tue Jan 21 2014 Noriko Hosoi - 1.3.1.6-15 +- release 1.3.1.6-15 +- Resolves: bug 918702 - better error message when cache overflows (Ticket 342) +- Resolves: bug 1009679 - replication stops with excessive clock skew (Ticket 47516) +- Resolves: bug 1042855 - Unable to delete protocol timeout attribute (Ticket 47620) +- Resolves: bug 918694 - Fix crash when disabling/enabling the setting (Ticket 408) +- Resolves: bug 853355 - config_set_allowed_to_delete_attrs: Valgrind reports Invalid read (Ticket 47660) + +* Wed Jan 8 2014 Noriko Hosoi - 1.3.1.6-14 +- release 1.3.1.6-14 +- Resolves: bug 853355 - Possible to add invalid attribute to nsslapd-allowed-to-delete-attrs (Ticket 447) +- Resolves: bug 1034739 - Impossible to configure nsslapd-allowed-sasl-mechanisms (Ticket 47613) +- Resolves: bug 1038639 - 389-ds rejects nsds5ReplicaProtocolTimeout attribut; Fix logically dead code; Fix dereferenced NULL pointer in agmtlist_modify_callback(); Fix missing left brackete (Ticket 47620) +- Resolves: bug 1042855 - nsds5ReplicaProtocolTimeout attribute is not validated when added to replication agreement; Config value validation improvement (Ticket 47620) +- Resolves: bug 918717 - server does not accept 0 length LDAP Control sequence (Ticket 571) +- Resolves: bug 1034902 - replica init/bulk import errors should be more verbose (Ticket 47606) +- Resolves: bug 1044219 - fix memleak caused by 47347 (Ticket 47623) +- Resolves: bug 1049522 - Crash after replica is installed; Fix cherry-pick error for 1.3.2 and 1.3.1 (Ticket 47620) +- Resolves: bug 1049568 - changelog iteration should ignore cleaned rids when getting the minCSN (Ticket 47627) + +* Fri Dec 27 2013 Daniel Mach - 1.3.1.6-13 +- Mass rebuild 2013-12-27 + +* Tue Dec 10 2013 Noriko Hosoi - 1.3.1.6-12 +- release 1.3.1.6-12 +- Resolves: bug 1038639 - 389-ds rejects nsds5ReplicaProtocolTimeout attribute (Ticket 47620) +- Resolves: bug 1034898 - automember plugin task memory leaks (Ticket 47592) +- Resolves: bug 1034451 - Possible to specify invalid SASL mechanism in nsslapd-allowed-sasl-mechanisms (Ticket 47614) +- Resolves: bug 1032318 - entries with empty objectclass attribute value can be hidden (Ticket 47591) +- Resolves: bug 1032316 - attrcrypt fails to find unlocked key (Ticket 47596) +- Resolves: bug 1031227 - Reduce lock scope in retro changelog plug-in (Ticket 47599) +- Resolves: bug 1031226 - Convert ldbm_back_seq code to be transaction aware (Ticket 47598) +- Resolves: bug 1031225 - Convert retro changelog plug-in to betxn (Ticket 47597) +- Resolves: bug 1031223 - hard coded limit of 64 masters in agreement and changelog code (Ticket 47587) +- Resolves: bug 1034739 - Impossible to configure nsslapd-allowed-sasl-mechanisms (Ticket 47613) +- Resolves: bug 1035824 - Automember betxnpreoperation - transaction not aborted when group entry does not exist (Ticket 47622) + +* Thu Nov 21 2013 Rich Megginson - 1.3.1.6-11 +- Resolves: bug 1024979 - CVE-2013-4485 389-ds-base: DoS due to improper handling of ger attr searches + +* Tue Nov 12 2013 Rich Megginson - 1.3.1.6-10 +- release 1.3.1.6-10 +- Resolves: bug 1018893 DS91: ns-slapd stuck in DS_Sleep +- had to revert earlier change - does not work and breaks ipa + +* Tue Nov 12 2013 Noriko Hosoi - 1.3.1.6-9 +- release 1.3.1.6-9 +- Resolves: bug 1028440 - Winsync replica initialization and incremental updates from DS to AD fails on RHEL +- Resolves: bug 1027502 - Replication Failures related to skipped entries due to cleaned rids +- Resolves: bug 1027047 - Winsync plugin segfault during incremental backoff + +* Wed Nov 6 2013 Noriko Hosoi - 1.3.1.6-8 +- release 1.3.1.6-8 +- Resolves: bug 971111 - DNA plugin failed to fetch replication agreement +- Resolves: bug 1026931 - 1.2.11.29 crash when removing entries from cache + +* Mon Oct 21 2013 Rich Megginson - 1.3.1.6-7 +- Resolves: bug 1018893 DS91: ns-slapd stuck in DS_Sleep +- Resolves: bug 1018914 fixup memberof task does not work: task entry not added + +* Fri Oct 11 2013 Rich Megginson - 1.3.1.6-6 +- Resolves: bug 1013900 - logconv: some stats do not work across server restarts +- previous patch introduced regressions +- fixed by c2eced0 ticket #47550 and e2a880b Ticket #47550 and 8b10f83 Ticket #47551 +- Resolves: bug 1008610 - tmpfiles.d references /var/lock when they should reference /run/lock +- previous patch not complete, fixed by a11be5c Ticket 47513 +- Resolves: bug 1016749 - DS crashes when "cn=Directory Manager" is changing it's password +- cherry picked upstream f786600 Ticket 47329 and b67e230 Coverity Fixes +- Resolves: bug 1015252 locale "nl" not supported by collation plugin +- Resolves: bug 1016317 Need to update supported locales +- Resolves: bug 1016722 memory leak in range searches + +* Tue Oct 1 2013 Rich Megginson - 1.3.1.6-5 +- Resolves: bug 1013896 - logconv.pl - Use of comma-less variable list is deprecated +- Resolves: bug 1008256 - backend txn plugin fixup tasks should be done in a txn +- Resolves: bug 1013738 - CLEANALLRUV doesnt run across all replicas +- Resolves: bug 1011220 - PassSync removes User must change password flag on the Windows side +- Resolves: bug 1008610 - tmpfiles.d references /var/lock when they should reference /run/lock +- Resolves: bug 1012125 - Set up replcation/agreement before initializing the sub suffix, the sub suffix is not found by ldapsearch +- Resolves: bug 1013063 - RUV tombstone search with scope "one" doesn`t work +- Resolves: bug 1013893 - Indexed search are logged with 'notes=U' in the access logs +- Resolves: bug 1013894 - improve logconv.pl performance with large access logs +- Resolves: bug 1013898 - Fix various issues with logconv.pl +- Resolves: bug 1013897 - logconv.pl uses /var/tmp for BDB temp files +- Resolves: bug 1013900 - logconv: some stats do not work across server restarts +- Resolves: bug 1014354 - Coverity fixes - 12023, 12024, and 12025 + +* Fri Sep 13 2013 Noriko Hosoi - 1.3.1.6-4 +- bump version to 1.3.1.6-4 +- Resolves Bug 1007988 - Under specific values of nsDS5ReplicaName, replication may get broken or updates missing (Ticket 47489) +- Resolves Bug 853931 - Allow macro aci keywords to be case-insensitive (Ticket 449) +- Resolves Bug 1006563 - automember rebuild task not working as expected (Ticket 47507) + +* Fri Sep 6 2013 Rich Megginson - 1.3.1.6-3 +- Ticket #47455 - valgrind - value mem leaks, uninit mem usage +- Ticket 47500 - start-dirsrv/restart-dirsrv/stop-disrv do not register with systemd correctly + +* Mon Aug 26 2013 Noriko Hosoi - 1.3.1.6-2 +- bump version to 1.3.1.6-2 +- Resolves Bug 1000633 - ns-slapd crash due to bogus DN +- Ticket #47488 - Users from AD sub OU does not sync to IPA + +* Thu Aug 01 2013 Noriko Hosoi - 1.3.1.6-1 +- bump version to 1.3.1.6 +- Ticket 47455 - valgrind - value mem leaks, uninit mem usage +- fix coverity 11915 - dead code - introduced with fix for ticket 346 +- fix coverity 11895 - null deref - caused by fix to ticket 47392 +- fix compiler warning in posix winsync code for posix_group_del_memberuid_callback +- Fix compiler warnings for Ticket 47395 and 47397 +- fix compiler warning (cherry picked from commit 904416f4631d842a105851b4a9931ae17822a107) +- Ticket 47450 - Fix compiler formatting warning errors for 32/64 bit arch +- fix compiler warnings +- Fix compiler warning (cherry picked from commit ec6ebc0b0f085a82041d993ab2450a3922ef5502) + +* Tue Jul 30 2013 Noriko Hosoi - 1.3.1.5-1 +- bump version to 1.3.1.5 +- Ticket 47456 - delete present values should append values to deleted values +- Ticket 47455 - valgrind - value mem leaks, uninit mem usage +- Ticket 47448 - Segfault in 389-ds-base-1.3.1.4-1.fc19 when setting up FreeIPA replication +- Ticket 47440 - Fix runtime errors caused by last patch. +- Ticket 47440 - Fix compilation warnings and header files +- Ticket 47405 - CVE-2013-2219 ACLs inoperative in some search scenarios +- Ticket 47447 - logconv.pl man page missing -m,-M,-B,-D +- Ticket 47378 - fix recent compiler warnings +- Ticket 47427 - Overflow in nsslapd-disk-monitoring-threshold +- Ticket 47449 - deadlock after adding and deleting entries +- Ticket 47441 - Disk Monitoring not checking filesystem with logs +- Ticket 47427 - Overflow in nsslapd-disk-monitoring-threshold + +* Fri Jul 19 2013 Noriko Hosoi - 1.3.1.4-1 +- bump version to 1.3.1.4 +- Ticket 47435 - Very large entryusn values after enabling the USN plugin and the lastusn value is negat +- Ticket 47424 - Replication problem with add-delete requests on single-valued attributes +- Ticket 47367 - (phase 2) ldapdelete returns non-leaf entry error while trying to remove a leaf entry +- Ticket 47367 - (phase 1) ldapdelete returns non-leaf entry error while trying to remove a leaf entry +- Ticket 47421 - memory leaks in set_krb5_creds +- Ticket 346 - version 4 Slow ldapmodify operation time for large quantities of multi-valued attribute v +- Ticket 47369 version2 - provide default syntax plugin +- Ticket 47427 - Overflow in nsslapd-disk-monitoring-threshold +- Ticket 47339 - RHDS denies MODRDN access if ACI list contains any DENY rule +- Ticket 47427 - Overflow in nsslapd-disk-monitoring-threshold +- Ticket 47428 - Memory leak in 389-ds-base 1.2.11.15 +- Ticket 47392 - ldbm errors when adding/modifying/deleting entries +- Ticket 47385 - Disk Monitoring is not triggered as expected. +- Ticket 47410 - changelog db deadlocks with DNA and replication + +* Fri Jul 19 2013 Rich Megginson - 1.3.1.3-1 +- bump version to 1.3.1.3 +- Ticket 47374 - flush.pl is not included in perl5 +- Ticket 47391 - deleting and adding userpassword fails to update the password (additional fix) +- Ticket 47393 - Attribute are not encrypted on a consumer after a full initialization +- Ticket 47395 47397 - v2 correct behaviour of account policy if only stateattr is configured or no alternate attr is configured +- Ticket 47396 - crash on modrdn of tombstone +- Ticket 47400 - MMR stress test with dna enabled causes a deadlock +- Ticket 47409 - allow setting db deadlock rejection policy +- Ticket 47419 - Unhashed userpassword can accidentally get removed from mods +- Ticket 47420 - An upgrade script 80upgradednformat.pl fails to handle a server instance name incuding '-' + +* Fri Jul 12 2013 Jan Safranek - 1.3.1.2-2 +- Rebuilt for new net-snmp + +* Sat Jun 15 2013 Noriko Hosoi - 1.3.1.2-1 +- bump version to 1.3.1.2 +- Ticket 47391 - deleting and adding userpassword fails to update the password +- Coverity Fixes (Part 7) + +* Fri Jun 14 2013 Noriko Hosoi - 1.3.1.1-1 +- bump version to 1.3.1.1 +- Ticket 402 - nhashed#user#password in entry extension +- Ticket 511 - Revision - allow turning off vattr lookup in search entry return +- Ticket 580 - Wrong error code return when using EXTERNAL SASL and no client certificate +- Ticket 47327 - error syncing group if group member user is not synced +- Ticket 47355 - dse.ldif doesn't replicate update to nsslapd-sasl-mapping-fallback +- Ticket 47359 - new ldap connections can block ldaps and ldapi connections +- Ticket 47362 - ipa upgrade selinuxusermap data not replicating +- Ticket 47375 - flush_ber error sending back start_tls response will deadlock +- Ticket 47376 - DESC should not be empty as per RFC 2252 (ldapv3) +- Ticket 47377 - make listen backlog size configurable +- Ticket 47378 - fix recent compiler warnings +- Ticket 47383 - connections attribute in cn=snmp,cn=monitor is counted twice +- Ticket 47385 - DS not shutting down when disk monitoring threshold is reached +- Coverity Fixes (part 1) +- Coverity Fixes (Part 2) +- Coverity Fixes (Part 3) +- Coverity Fixes (Part 4) +- Coverity Fixes (Part 5) + +* Thu May 02 2013 Noriko Hosoi - 1.3.1.0-1 +- bump version to 1.3.1.0 +- Ticket 332 - Command line perl scripts should attempt most secure connection type first +- Ticket 342 - better error message when cache overflows +- Ticket 417 - RFE - forcing passwordmustchange attribute by non-cn=directory manager +- Ticket 419 - logconv.pl - improve memory management +- Ticket 422 - 389-ds-base - Can't call method "getText" +- Ticket 433 - multiple bugs in start-dirsrv, stop-dirsrv, restart-dirsrv scripts +- Ticket 458 - RFE - Make it possible for privileges to be provided to an admin user to import an LDIF file containing hashed passwords +- Ticket 471 - logconv.pl tool removes the access logs contents if "-M" is not correctly used +- Ticket 487 - Possible to add invalid attribute values to PAM PTA plugin configuration +- Ticket 502 - setup-ds.pl script should wait if "semanage.trans.LOCK" presen +- Ticket 505 - use lock-free access name2asi and oid2asi tables (additional) +- Ticket 508 - lock-free access to FrontendConfig structure +- Ticket 511 - allow turning off vattr lookup in search entry return +- Ticket 525 - Introducing a user visible configuration variable for controlling replication retry time +- Ticket 528 - RFE - get rid of instance specific scripts +- Ticket 529 - dn normalization must handle multiple space characters in attributes +- Ticket 532 - RUV is not getting updated for both Master and consumer +- Ticket 533 - only scan for attributes to decrypt if there are encrypted attrs configured +- Ticket 534 - RFE: Add SASL mappings fallback +- Ticket 537 - Improvement of range search +- Ticket 539 - logconv.pl should handle microsecond timing +- Ticket 543 - Sorting with attributes in ldapsearch gives incorrect result +- Ticket 545 - Segfault during initial LDIF import: str2entry_dupcheck() +- Ticket 547 - Incorrect assumption in ndn cache +- Ticket 550 - posix winsync will not create memberuid values if group entry become posix group in the same sync interval +- Ticket 551 - Multivalued rootdn-days-allowed in RootDN Access Control plugin always results in access control violation +- Ticket 552 - Adding rootdn-open-time without rootdn-close-time to RootDN Acess Control results in inconsistent configuration +- Ticket 558 - Replication - make timeout for protocol shutdown configurable +- Ticket 561 - disable writing unhashed#user#password to changelog +- Ticket 563 - DSCreate.pm: Error messages cannot be used in the if expression since they could be localized. +- Ticket 565 - turbo mode and replication - allow disable of turbo mode +- Ticket 571 - server does not accept 0 length LDAP Control sequence +- Ticket 574 - problems with dbcachesize disk space calculation +- Ticket 583 - dirsrv fails to start on reboot due to /var/run/dirsrv permissions +- Ticket 585 - Behaviours of "db2ldif -a " and "db2ldif.pl -a " are inconsistent +- Ticket 587 - Replication error messages in the DS error logs +- Ticket 588 - Create MAN pages for command line scripts +- Ticket 600 - Server should return unavailableCriticalExtension when processing a badly formed critical control +- Ticket 603 - A logic error in str2simple +- Ticket 604 - Required attribute not checked during search operation +- Ticket 608 - Posix Winsync plugin throws "posix_winsync_end_update_cb: failed to add task entry" error message +- Ticket 611 - logconv.pl missing stats for StartTLS, LDAPI, and AUTOBIND +- Ticket 612 - improve dbgen rdn generation, output +- Ticket 613 - ldclt: add timestamp, interval, nozeropad, other improvements +- Ticket 616 - High contention on computed attribute lock +- Ticket 618 - Crash at shutdown while stopping replica agreements +- Ticket 620 - Better logging of error messages for 389-ds-base +- Ticket 621 - modify operations without values need to be written to the changelog +- Ticket 622 - DS logging errors "libdb: BDB0171 seek: 2147483648: (262144 * 8192) + 0: No such file or directory +- Ticket 631 - Replication: "Incremental update started" status message without consumer initialized +- Ticket 633 - allow nsslapd-nagle to be disabled, and also tcp cork +- Ticket 47299 - allow cmdline scripts to work with non-root user +- Ticket 47302 - get rid of sbindir start/stop/restart slapd scripts +- Ticket 47303 - start/stop/restart dirsrv scripts should report and error if no instances +- Ticket 47304 - reinitialization of a master with a disabled agreement hangs +- Ticket 47311 - segfault in db2ldif(trigger by a cleanallruv task) +- Ticket 47312 - replace PR_GetFileInfo with PR_GetFileInfo64 +- Ticket 47315 - filter option in fixup-memberof requires more clarification +- Ticket 47325 - Crash at shutdown on a replica aggrement +- Ticket 47330 - changelog db extension / upgrade is obsolete +- Ticket 47336 - logconv.pl -m not working for all stats +- Ticket 47341 - logconv.pl -m time calculation is wrong +- Ticket 47343 - 389-ds-base: Does not support aarch64 in f19 and rawhide +- Ticket 47347 - Simple paged results should support async search +- Ticket 47348 - add etimes to per second/minute stats +- Ticket 47349 - DS instance crashes under a high load + +* Thu Mar 28 2013 Noriko Hosoi - 1.3.0.5-1 +- bump version to 1.3.0.5 +- Ticket 47308 - unintended information exposure when anonymous access is set to rootdse +- Ticket 628 - crash in aci evaluation +- Ticket 627 - ns-slapd crashes sporadically with segmentation fault in libslapd.so +- Ticket 634 - Deadlock in DNA plug-in Ticket #576 - DNA: use event queue for config update only at the start up +- Ticket 632 - 389-ds-base cannot handle Kerberos tickets with PAC +- Ticket 623 - cleanAllRUV task fails to cleanup config upon completion + +* Mon Mar 11 2013 Mark Reynolds - 1.3.0.4-1 +- e53d691 bump version to 1.3.0.4 +- Bug 912964 - CVE-2013-0312 389-ds: unauthenticated denial of service vulnerability in handling of LDAPv3 control data +- Ticket 570 - DS returns error 20 when replacing values of a multi-valued attribute (only when replication is enabled) +- Ticket 490 - Slow role performance when using a lot of roles +- Ticket 590 - ns-slapd segfaults while trying to delete a tombstone entry + +* Wed Feb 13 2013 Noriko Hosoi - 1.3.0.3-1 +- bump version to 1.3.0.3 +- Ticket #584 - Existence of an entry is not checked when its password is to be deleted +- Ticket 562 - Crash when deleting suffix + +* Fri Feb 01 2013 Parag Nemade - 1.3.0.2-2 +- Rebuild for icu 50 + +* Wed Jan 16 2013 Noriko Hosoi - 1.3.0.2-1 +- bump version to 1.3.0.2 +- Ticket #542 - Cannot dynamically set nsslapd-maxbersize + +* Wed Jan 16 2013 Noriko Hosoi - 1.3.0.1-1 +- bump version to 1.3.0.1 +- Ticket 556 - Don't overwrite certmap.conf during upgrade + +* Tue Jan 08 2013 Noriko Hosoi - 1.3.0.0-1 +- bump version to 1.3.0.0 + +* Tue Jan 08 2013 Noriko Hosoi - 1.3.0-0.3.rc3 +- bump version to 1.3.0.rc3 +- Ticket 549 - DNA plugin no longer reports additional info when range is depleted +- Ticket 541 - need to set plugin as off in ldif template +- Ticket 541 - RootDN Access Control plugin is missing after upgrade + +* Fri Dec 14 2012 Noriko Hosoi - 1.3.0-0.2.rc2 +- bump version to 1.3.0.rc2 +- Trac Ticket #497 - Escaped character cannot be used in the substring search filter +- Ticket 509 - lock-free access to be->be_suffixlock +- Trac Ticket #522 - betxn: upgrade is not implemented yet + +* Tue Dec 11 2012 Noriko Hosoi - 1.3.0-0.1.rc1 +- bump version to 1.3.0.rc1 +- Ticket #322 - Create DOAP description for the 389 Directory Server project +- Trac Ticket #499 - Handling URP results is not corrrect +- Ticket 509 - lock-free access to be->be_suffixlock +- Ticket 456 - improve entry cache sizing +- Trac Ticket #531 - loading an entry from the database should use str2entry_f +- Trac Ticket #536 - Clean up compiler warnings for 1.3 +- Trac Ticket #531 - loading an entry from the database should use str2entry_fast +- Ticket 509 - lock-free access to be->be_suffixlock +- Ticket 527 - ns-slapd segfaults if it cannot rename the logs +- Ticket 395 - RFE: 389-ds shouldn't advertise in the rootDSE that we can handle a sasl mech if we really can't +- Ticket 216 - disable replication agreements +- Ticket 518 - dse.ldif is 0 length after server kill or machine kill +- Ticket 393 - Change in winSyncInterval does not take immediate effect +- Ticket 20 - Allow automember to work on entries that have already been added +- Coverity Fixes +- Ticket 349 - nsViewFilter syntax issue in 389DS 1.2.5 +- Ticket 337 - improve CLEANRUV functionality +- Fix for ticket 504 +- Ticket 394 - modify-delete userpassword +- minor fixes for bdb 4.2/4.3 and mozldap +- Trac Ticket #276 - Multiple threads simultaneously working on connection's private buffer causes ns-slapd to abort +- Fix for ticket 465: cn=monitor showing stats for other db instances +- Ticket 507 - use mutex for FrontendConfig lock instead of rwlock +- Fix for ticket 510 Avoid creating an attribute just to determine the syntax for a type, look up the syntax directly by type +- Coverity defect: Resource leak 13110 +- Ticket 517 - crash in DNA if no dnaMagicRegen is specified +- Trac Ticket #520 - RedHat Directory Server crashes (segfaults) when moving ldap entry +- Trac Ticket #519 - Search with a complex filter including range search is slow +- Trac Ticket #500 - Newly created users with organizationalPerson objectClass fails to sync from AD to DS with missing attribute error +- Trac Ticket #311 - IP lookup failing with multiple DNS entries +- Trac Ticket #447 - Possible to add invalid attribute to nsslapd-allowed-to-delete-attrs +- Trac Ticket #443 - Deleting attribute present in nsslapd-allowed-to-delete-attrs returns Operations error +- Ticket #503 - Improve AD version in winsync log message +- Trac Ticket #190 - Un-resolvable server in replication agreement produces unclear error message +- Coverity fixes +- Trac Ticket #391 - Slapd crashes when deleting backends while operations are still in progress +- Trac Ticket #448 - Possible to set invalid macros in Macro ACIs +- Trac Ticket #498 - Cannot abaondon simple paged result search +- Coverity defects +- Trac Ticket #494 - slapd entered to infinite loop during new index addition +- Fixing compiler warnings in the posix-winsync plugin +- Coverity defects +- Ticket 147 - Internal Password Policy usage very inefficient +- Ticket 495 - internalModifiersname not updated by DNA plugin +- Revert "Ticket 495 - internalModifiersname not updated by DNA plugin" +- Ticket 495 - internalModifiersname not updated by DNA plugin +- Ticket 468 - if pam_passthru is enabled, need to AC_CHECK_HEADERS([security/pam_appl.h]) +- Ticket 486 - nsslapd-enablePlugin should not be multivalued +- Ticket 488 - Doc: DS error log messages with typo +- Trac Ticket #451 - Allow db2ldif to be quiet +- Ticket #491 - multimaster_extop_cleanruv returns wrong error codes +- Ticket #481 - expand nested posix groups +- Trac Ticket #455 - Insufficient rights to unhashed#user#password when user deletes his password +- Ticket #446 - anonymous limits are being applied to directory manager + +* Tue Oct 9 2012 Mark Reynolds - 1.3.0.a1-1 +Ticket #28 MOD operations with chained delete/add get back error 53 on backend config +Ticket #173 ds-logpipe.py script's man page and script help should be updated for -t option. +Ticket #196 RFE: Interpret IPV6 addresses for ACIs, replication, and chaining +Ticket #218 RFE - Make RIP working with Replicated Entries +Ticket #328 make sure all internal search filters are properly escaped +Ticket #329 389-admin build fails on F-18 with new apache +Ticket #344 deadlock in replica_write_ruv +Ticket #351 use betxn plugins by default +Ticket #352 make cos, roles, views betxn aware +Ticket #356 logconv.pl - RFE - track bind info +Ticket #365 Audit log - clear text password in user changes +Ticket #370 Opening merge qualifier CoS entry using RHDS console changes the entry. +Ticket #372 Setting nsslapd-listenhost or nsslapd-securelistenhost breaks ACI processing +Ticket #386 Overconsumption of memory with large cachememsize and heavy use of ldapmodify +Ticket #402 unhashedTicket #userTicket #password in entry extension +Ticket #408 Create a normalized dn cache +Ticket #453 db2index with -tattrname:type,type fails +Ticket #461 fix build problem with mozldap c sdk +Ticket #462 add test for include file mntent.h +Ticket #463 different parameters of getmntent in Solaris + +* Tue Sep 25 2012 Rich Megginson - 1.2.11.15-1 +- Trac Ticket #470 - 389 prevents from adding a posixaccount with userpassword after schema reload +- Ticket 477 - CLEANALLRUV if there are only winsync agmts task will hang +- Ticket 457 - dirsrv init script returns 0 even when few or all instances fail to start +- Ticket 473 - change VERSION.sh to have console version be major.minor +- Ticket 475 - Root DN Access Control - improve value checking for config +- Trac Ticket #466 - entry_apply_mod - ADD: Failed to set unhashed#user#password to extension +- Ticket 474 - Root DN Access Control - days allowed not working correctly +- Ticket 467 - CLEANALLRUV abort task should be able to ignore down replicas +- 0b79915 fix compiler warnings in ticket 374 code +- Ticket 452 - automember rebuild task adds users to groups that do not match the configuration scope + +* Fri Sep 7 2012 Rich Megginson - 1.2.11.14-1 +- Ticket 450 - CLEANALLRUV task gets stuck on winsync replication agreement +- Ticket 386 - large memory growth with ldapmodify(heap fragmentation) +- this patch doesn't fix the bug - it allows us to experiment with +- different values of mxfast +- Ticket #374 - consumer can go into total update mode for no reason + +* Tue Sep 4 2012 Rich Megginson - 1.2.11.13-1 +- Ticket #426 - support posix schema for user and group sync +- 1) plugin config ldif must contain pluginid, etc. during upgrade or it +- will fail due to schema errors +- 2) posix winsync should have a lower precedence (25) than the default (50) +- so that it will be run first +- 3) posix winsync should support the Winsync API v3 - the v2 functions are +- just stubs for now - but the precedence cb is active + +* Thu Aug 30 2012 Rich Megginson - 1.2.11.12-1 +- 8e5087a Coverity defects - 13089: Dereference after null check ldbm_back_delete +- Trac Ticket #437 - variable dn should not be used in ldbm_back_delete +- ba1f5b2 fix coverity resource leak in windows_plugin_add +- e3e81db Simplify program flow: change while loops to for +- a0d5dc0 Fix logic errors: del_mod should be latched (might not be last mod), and avoid skipping add-mods (int value 0) +- 0808f7e Simplify program flow: make adduids/moduids/deluids action blocks all similar +- 77eb760 Simplify program flow: eliminate unnecessary continue +- c9e9db7 Memory leaks: unmatched slapi_attr_get_valueset and slapi_value_new +- a4ca0cc Change "return"s in modGroupMembership to "break"s to avoid leaking +- d49035c Factorize into new isPosixGroup function +- 3b61c03 coverity - posix winsync mem leaks, null check, deadcode, null ref, use after free +- 33ce2a9 fix mem leaks with parent dn log message, setting winsync windows domain +- Ticket #440 - periodic dirsync timed event causes server to loop repeatedly +- Ticket #355 - winsync should not delete entry that appears to be out of scope +- Ticket 436 - nsds5ReplicaEnabled can be set with any invalid values. +- 487932d coverity - mbo dead code - winsync leaks, deadcode, null check, test code +- 2734a71 CLEANALLRUV coverity fixes +- Ticket #426 - support posix schema for user and group sync +- Ticket #430 - server to server ssl client auth broken with latest openldap + +* Mon Aug 20 2012 Mark Reynolds - 1.2.11.11-1 +6c0778f bumped version to 1.2.11.11 +Ticket 429 - added nsslapd-readonly to DS schema +Ticket 403 - fix CLEANALLRUV regression from last commit +Trac Ticket #346 - Slow ldapmodify operation time for large quantities of multi-valued attribute values + +* Wed Aug 15 2012 Mark Reynolds - 1.2.11.10-1 +db6b354 bumped version to 1.2.11.10 +Ticket 403 - CLEANALLRUV revisions + +* Tue Aug 7 2012 Mark Reynolds - 1.2.11.9-1 +ea05e69 Bumped version to 1.2.11.9 +Ticket 407 - dna memory leak - fix crash from prev fix + +* Fri Aug 3 2012 Mark Reynolds - 1.2.11.8-1 +ddcf669 bump version to 1.2.11.8 for offical release +Ticket #425 - support multiple winsync plugins +Ticket 403 - cleanallruv coverity fixes +Ticket 407 - memory leak in dna plugin +Ticket 403 - CLEANALLRUV feature +Ticket 413 - "Server is unwilling to perform" when running ldapmodify on nsds5ReplicaStripAttrs +3168f04 Coverity defects +5ff0a02 COVERITY FIXES +Ticket #388 - Improve replication agreement status messages +0760116 Update the slapi-plugin documentation on new slapi functions, and added a slapi function for checking on shutdowns +Ticket #369 - restore of replica ldif file on second master after deleting two records shows only 1 deletion +Ticket #409 - Report during startup if nsslapd-cachememsize is too small +Ticket #412 - memberof performance enhancement +12813: Uninitialized pointer read string_values2keys +Ticket #346 - Slow ldapmodify operation time for large quantities of multi-valued attribute values +Ticket #346 - Slow ldapmodify operation time for large quantities of multi-valued attribute values +Ticket #410 - Referential integrity plug-in does not work when update interval is not zero +Ticket #406 - Impossible to rename entry (modrdn) with Attribute Uniqueness plugin enabled +Ticket #405 - referint modrdn not working if case is different +Ticket 399 - slapi_ldap_bind() doesn't check bind results + +* Wed Jul 18 2012 Fedora Release Engineering - 1.2.11.7-2.2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Thu Jun 28 2012 Petr Pisar - 1.2.11.7-2.1 +- Perl 5.16 rebuild + +* Wed Jun 27 2012 Rich Megginson - 1.2.11.7-2 +- Ticket 378 - unhashed#user#password visible after changing password +- fix func declaration from previous patch +- Ticket 366 - Change DS to purge ticket from krb cache in case of authentication error + +* Wed Jun 27 2012 Rich Megginson - 1.2.11.7-1 +- Trac Ticket 396 - Account Usability Control Not Working + +* Thu Jun 21 2012 Rich Megginson - 1.2.11.6-1 +- Ticket #378 - audit log does not log unhashed password: enabled, by default. +- Ticket #378 - unhashed#user#password visible after changing password +- Ticket #365 - passwords in clear text in the audit log + +* Tue Jun 19 2012 Rich Megginson - 1.2.11.5-2 +- workaround for https://bugzilla.redhat.com/show_bug.cgi?id=833529 + +* Mon Jun 18 2012 Rich Megginson - 1.2.11.5-1 +- Ticket #387 - managed entry sometimes doesn't delete the managed entry +- 5903815 improve txn test index handling +- Ticket #360 - ldapmodify returns Operations error - fix delete caching +- bcfa9e3 Coverity Fix for CLEANALLRUV +- Trac Ticket #335 - transaction retries need to be cache aware +- Ticket #389 - ADD operations not in audit log +- 44cdc84 fix coverity issues with uninit vals, no return checking +- Ticket 368 - Make the cleanAllRUV task one step +- Ticket #110 - RFE limiting root DN by host, IP, time of day, day of week + +* Mon Jun 11 2012 Petr Pisar - 1.2.11.4-1.1 +- Perl 5.16 rebuild + +* Tue May 22 2012 Rich Megginson - 1.2.11.4-1 +- Ticket #360 - ldapmodify returns Operations error +- Ticket #321 - krbExtraData is being null modified and replicated on each ssh login +- Trac Ticket #359 - Database RUV could mismatch the one in changelog under the stress +- Ticket #361: Bad DNs in ACIs can segfault ns-slapd +- Trac Ticket #338 - letters in object's cn get converted to lowercase when renaming object +- Ticket #337 - Improve CLEANRUV task + +* Sat May 5 2012 Rich Megginson - 1.2.11.3-1 +- Ticket #358 - managed entry doesn't delete linked entry + +* Fri May 4 2012 Rich Megginson - 1.2.11.2-1 +- Ticket #351 - use betxn plugins by default +- revert - make no plugins betxn by default - too great a risk +- for deadlocks until we can test this better +- Ticket #348 - crash in ldap_initialize with multiple threads +- fixes PR_Init problem in ldclt + +* Wed May 2 2012 Rich Megginson - 1.2.11.1-1 +- f227f11 Suppress alert on unavailable port with forced setup +- Ticket #353 - coverity 12625-12629 - leaks, dead code, unchecked return +- Ticket #351 - use betxn plugins by default +- Trac Ticket #345 - db deadlock return should not log error +- Ticket #348 - crash in ldap_initialize with multiple threads +- Ticket #214 - Adding Replication agreement should complain if required nsds5ReplicaCredentials not supplied +- Ticket #207 - [RFE] enable attribute that tracks when a password was last set +- Ticket #216 - RFE - Disable replication agreements +- Ticket #337 - RFE - Improve CLEANRUV functionality +- Ticket #326 - MemberOf plugin should work on all backends +- Trac Ticket #19 - Convert entryUSN plugin to transaction aware type +- Ticket #347 - IPA dirsvr seg-fault during system longevity test +- Trac Ticket #310 - Avoid calling escape_string() for logged DNs +- Trac Ticket #338 - letters in object's cn get converted to lowercase when renaming object +- Ticket #183 - passwordMaxFailure should lockout password one sooner +- Trac Ticket #335 - transaction retries need to be cache aware +- Ticket #336 - [abrt] 389-ds-base-1.2.10.4-2.fc16: index_range_read_ext: Process /usr/sbin/ns-slapd was killed by signal 11 (SIGSEGV) +- Ticket #325 - logconv.pl : use of getopts to parse command line options +- Ticket #336 - [abrt] 389-ds-base-1.2.10.4-2.fc16: index_range_read_ext: Process /usr/sbin/ns-slapd was killed by signal 11 (SIGSEGV) +- 554e29d Coverity Fixes +- Trac Ticket #46 - (additional 2) setup-ds-admin.pl does not like ipv6 only hostnames +- Ticket #183 - passwordMaxFailure should lockout password one sooner - and should be configurable to avoid regressions +- Ticket #315 - small fix to libglobs +- Ticket #315 - ns-slapd exits/crashes if /var fills up +- Ticket #20 - Allow automember to work on entries that have already been added +- Trac Ticket #45 - Fine Grained Password policy: if passwordHistory is on, deleting the password fails. + +* Fri Mar 30 2012 Rich Megginson - 1.2.11-0.1.a1 +- 453eb97 schema def must have DESC '' - close paren must be preceded by space +- Trac Ticket #46 - (additional) setup-ds-admin.pl does not like ipv6 only hostnames +- Ticket #331 - transaction errors with db 4.3 and db 4.2 +- Ticket #261 - Add Solaris i386 +- Ticket #316 and Ticket #70 - add post add/mod and AD add callback hooks +- Ticket #324 - Sync with group attribute containing () fails +- Ticket #319 - ldap-agent crashes on start with signal SIGSEGV +- 77cacd9 coverity 12606 Logically dead code +- Trac Ticket #303 - make DNA range requests work with transactions +- Ticket #320 - allow most plugins to be betxn plugins +- Ticket #24 - Add nsTLS1 to the DS schema +- Ticket #271 - Slow shutdown when you have 100+ replication agreements +- TIcket #285 - compilation fixes for '--format-security' +- Ticket 211 - Avoid preop range requests non-DNA operations +- Ticket #271 - replication code cleanup +- Ticket 317 - RHDS fractional replication with excluded password policy attributes leads to wrong error messages. +- Ticket #308 - Automembership plugin fails if data and config area mixed in the plugin configuration +- Ticket #292 - logconv.pl reporting unindexed search with different search base than shown in access logs +- 6f8680a coverity 12563 Read from pointer after free (fix 2) +- e6a9b22 coverity 12563 Read from pointer after free +- 245d494 Config changes fail because of unknown attribute "internalModifiersname" +- Ticket #191 - Implement SO_KEEPALIVE in network calls +- Ticket #289 - allow betxn plugin config changes +- 93adf5f destroy the entry cache and dn cache in the dse post op delete callback +- e2532d8 init txn thread private data for all database modes +- Ticket #291 - cannot use & in a sasl map search filter +- 6bf6e79 Schema Reload crash fix +- 60b2d12 Fixing compiler warnings +- Trac Ticket #260 - 389 DS does not support multiple paging controls on a single connection +- Ticket #302 - use thread local storage for internalModifiersName & internalCreatorsName +- fdcc256 Minor bug fix introcuded by commit 69c9f3bf7dd9fe2cadd5eae0ab72ce218b78820e +- Ticket #306 - void function cannot return value +- ticket 181 - Allow PAM passthru plug-in to have multiple config entries +- ticket 211 - Use of uninitialized variables in ldbm_back_modify() +- Ticket #74 - Add schema for DNA plugin (RFE) +- Ticket #301 - implement transaction support using thread local storage +- Ticket #211 - dnaNextValue gets incremented even if the user addition fails +- 144af59 coverity uninit var and resource leak +- Trac Ticket #34 - remove-ds.pl does not remove everything +- Trac Ticket #169 - allow 389 to use db5 +- bc78101 fix compiler warning in acct policy plugin +- Trac Ticket #84 - 389 Directory Server Unnecessary Checkpoints +- Trac Ticket #27 - SASL/PLAIN binds do not work +- Ticket #129 - Should only update modifyTimestamp/modifiersName on MODIFYops +- Ticket #17 - new replication optimizations + +* Tue Mar 27 2012 Noriko Hosoi - 1.2.10.4-4 +- Ticket #46 - (revised) setup-ds-admin.pl does not like ipv6 only hostnames +- Ticket #66 - 389-ds-base spec file does not have a BuildRequires on gcc-c++ + +* Fri Mar 23 2012 Noriko Hosoi - 1.2.10.4-3 +- Ticket #46 - setup-ds-admin.pl does not like ipv6 only hostnames + +* Wed Mar 21 2012 Rich Megginson - 1.2.10.4-2 +- get rid of posttrans - move update code to post + +* Tue Mar 13 2012 Rich Megginson - 1.2.10.4-1 +- Ticket #305 - Certain CMP operations hang or cause ns-slapd to crash + +* Mon Mar 5 2012 Rich Megginson - 1.2.10.3-1 +- b05139b memleak in normalize_mods2bvals +- c0eea24 memleak in mep_parse_config_entry +- 90bc9eb handle null smods +- Ticket #305 - Certain CMP operations hang or cause ns-slapd to crash +- Ticket #306 - void function cannot return value +- ticket 304 - Fix kernel version checking in dsktune + +* Thu Feb 23 2012 Rich Megginson - 1.2.10.2-1 +- Trac Ticket #298 - crash when replicating orphaned tombstone entry +- Ticket #281 - TLS not working with latest openldap +- Trac Ticket #290 - server hangs during shutdown if betxn pre/post op fails +- Trac Ticket #26 - Please support setting defaultNamingContext in the rootdse + +* Tue Feb 14 2012 Noriko Hosoi - 1.2.10.1-2 +- Ticket #124 - add Provides: ldif2ldbm to rpm + +* Tue Feb 14 2012 Rich Megginson - 1.2.10.1-1 +- Ticket #294 - 389 DS Segfaults during replica install in FreeIPA + +* Mon Feb 13 2012 Rich Megginson - 1.2.10.0-1 +- Ticket 284 - Remove unnecessary SNMP MIB files +- Ticket 51 - memory leaks in 389-ds-base-1.2.8.2-1.el5? +- Ticket 175 - logconv.pl improvements + +* Fri Feb 10 2012 Noriko Hosoi - 1.2.10-0.10.rc1.2 +- Introducing use_db4 macro to support db5 (libdb). + +* Fri Feb 10 2012 Petr Pisar - 1.2.10-0.10.rc1.1 +- Rebuild against PCRE 8.30 + +* Thu Feb 2 2012 Rich Megginson - 1.2.10-0.10.rc1 +- ad9dd30 coverity 12488 Resource leak In attr_index_config(): Leak of memory or pointers to system resources +- Ticket #281 - TLS not working with latest openldap +- Ticket #280 - extensible binary filters do not work +- Ticket #279 - filter normalization does not use matching rules +- Trac Ticket #275 - Invalid read reported by valgrind +- Ticket #277 - cannot set repl referrals or state +- Ticket #278 - Schema replication update failed: Invalid syntax +- Ticket #39 - Account Policy Plugin does not work for simple binds when PAM Pass Through Auth plugin is enabled +- Ticket #13 - slapd process exits when put the database on read only mode while updates are coming to the server +- Ticket #87 - Manpages fixes +- c493fb4 fix a couple of minor coverity issues +- Ticket #55 - Limit of 1024 characters for nsMatchingRule +- Trac Ticket #274 - Reindexing entryrdn fails if ancestors are also tombstoned +- Ticket #6 - protocol error from proxied auth operation +- Ticket #38 - nisDomain schema is incorrect +- Ticket #273 - ruv tombstone searches don't work after reindex entryrdn +- Ticket #29 - Samba3-schema is missing sambaTrustedDomainPassword +- Ticket #22 - RFE: Support sendmail LDAP routing schema +- Ticket #161 - Review and address latest Coverity issues +- Ticket #140 - incorrect memset parameters +- Trac Ticket 35 - Log not clear enough on schema errors +- Trac Ticket 139 - eliminate the use of char *dn in favor of Slapi_DN *dn +- Trac Ticket #52 - FQDN set to nsslapd-listenhost makes the server start fail if IPv4-mapped-IPv6 address is given + +* Tue Jan 24 2012 Rich Megginson - 1.2.10-0.9.a8 +- Ticket #272 - add tombstonenumsubordinates to schema + +* Mon Jan 23 2012 Rich Megginson - 1.2.10-0.8.a7 +- fixes for systemd - remove .pid files after shutting down servers +- Ticket #263 - add systemd include directive +- Ticket #264 - upgrade needs better check for "server is running" + +* Fri Jan 20 2012 Rich Megginson - 1.2.10-0.7.a7 +- Ticket #262 - pid file not removed with systemd +- Ticket #50 - server should not call a plugin after the plugin close function is called +- Ticket #18 - Data inconsitency during replication +- Ticket #49 - better handling for server shutdown while long running tasks are active +- Ticket #15 - Get rid of rwlock.h/rwlock.c and just use slapi_rwlock instead +- Ticket #257 - repl-monitor doesn't work if leftmost hostnames are the same +- Ticket #12 - 389 DS DNA Plugin / Replication failing on GSSAPI +- 6aaeb77 add a hack to disable sasl hostname canonicalization +- Ticket 168 - minssf should not apply to rootdse +- Ticket #177 - logconv.pl doesn't detect restarts +- Ticket #159 - Managed Entry Plugin runs against managed entries upon any update without validating +- Ticket 75 - Unconfigure plugin opperations are being called. +- Ticket 26 - Please support setting defaultNamingContext in the rootdse. +- Ticket #71 - unable to delete managed entry config +- Ticket #167 - Mixing transaction and non-transaction plugins can cause deadlock +- Ticket #256 - debug build assertion in ACL_EvalDestroy() +- Ticket #4 - bak2db gets stuck in infinite loop +- Ticket #162 - Infinite loop / spin inside strcmpi_fast, acl_read_access_allowed_on_attr, server DoS +- Ticket #3: acl cache overflown problem +- Ticket 1 - pre-normalize filter and pre-compile substring regex - and other optimizations +- Ticket 2 - If node entries are tombstone'd, subordinate entries fail to get the full DN. + +* Thu Jan 12 2012 Fedora Release Engineering - 1.2.10-0.6.a6.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild + +* Thu Dec 15 2011 Rich Megginson - 1.2.10-0.6.a6 +- Bug 755725 - 389 programs linked against openldap crash during shutdown +- Bug 755754 - Unable to start dirsrv service using systemd +- Bug 745259 - Incorrect entryUSN index under high load in replicated environment +- d439e3a use slapi_hexchar2int and slapi_str_to_u8 everywhere +- 5910551 csn_init_as_string should not use sscanf +- b53ba00 reduce calls to csn_as_string and slapi_log_error +- c897267 fix member variable name error in slapi_uniqueIDFormat +- 66808e5 uniqueid formatting - use slapi_u8_to_hex instead of sprintf +- 580a875 csn_as_string - use slapi_uN_to_hex instead of sprintf +- Bug 751645 - crash when simple paged fails to send entry to client +- Bug 752155 - Use restorecon after creating init script lock file + +* Fri Nov 4 2011 Rich Megginson - 1.2.10-0.5.a5 +- Bug 751495 - 'setup-ds.pl -u' fails with undefined routine 'updateSystemD' +- Bug 750625 750624 750622 744946 Coverity issues +- Bug 748575 - part 2 - rhds81 modrdn operation and 100% cpu use in replication +- Bug 748575 - rhds81 modrn operation and 100% cpu use in replication +- Bug 745259 - Incorrect entryUSN index under high load in replicated environment +- f639711 Reduce the number of DN normalization +- c06a8fa Keep unhashed password psuedo-attribute in the adding entry +- Bug 744945 - nsslapd-counters attribute value cannot be set to "off" +- 8d3b921 Use new PLUGIN_CONFIG_ENTRY feature to allow switching between txn and regular +- d316a67 Change referential integrity to be a betxnpostoperation plugin + +* Fri Oct 7 2011 Rich Megginson - 1.2.10-0.4.a4 +- Bug 741744 - part3 - MOD operations with chained delete/add get back error 53 +- 1d2f5a0 make memberof transaction aware and able to be a betxnpostoperation plug in +- b6d3ba7 pass the plugin config entry to the plugin init function +- 28f7bfb set the ENTRY_POST_OP for modrdn betxnpostoperation plugins +- Bug 743966 - Compiler warnings in account usability plugin + +* Wed Oct 5 2011 Rich Megginson - 1.2.10.a3-0.3 +- 498c42b fix transaction support in ldbm_delete + +* Wed Oct 5 2011 Rich Megginson - 1.2.10.a2-0.2 +- Bug 740942 - allow resource limits to be set for paged searches independently of limits for other searches/operations +- Bug 741744 - MOD operations with chained delete/add get back error 53 on backend config +- Bug 742324 - allow nsslapd-idlistscanlimit to be set dynamically and per-user + +* Wed Sep 21 2011 Rich Megginson - 1.2.10.a1-0.1 +- Bug 695736 - Providing native systemd file + +* Wed Sep 7 2011 Rich Megginson - 1.2.9.10-2 +- corrected source + +* Wed Sep 7 2011 Rich Megginson - 1.2.9.10-1 +- Bug 735114 - renaming a managed entry does not update mepmanagedby + +* Thu Sep 1 2011 Rich Megginson - 1.2.9.9-1 +- Bug 735121 - simple paged search + ip/dns based ACI hangs server +- Bug 722292 - (cov#11030) Leak of mapped_sdn in winsync rename code +- Bug 703990 - cross-platform - Support upgrade from Red Hat Directory Server +- Introducing an environment variable USE_VALGRIND to clean up the entry cache and dn cache on exit. + +* Wed Aug 31 2011 Rich Megginson - 1.2.9.8-1 +- Bug 732153 - subtree and user account lockout policies implemented? +- Bug 722292 - Entries in DS are not updated properly when using WinSync API + +* Wed Aug 24 2011 Rich Megginson - 1.2.9.7-1 +- Bug 733103 - large targetattr list with syntax errors cause server to crash or hang +- Bug 633803 - passwordisglobalpolicy attribute brakes TLS chaining +- Bug 732541 - Ignore error 32 when adding automember config +- Bug 728592 - Allow ns-slapd to start with an invalid server cert + +* Wed Aug 10 2011 Rich Megginson - 1.2.9.6-1 +- Bug 728510 - Run dirsync after sending updates to AD +- Bug 729717 - Fatal error messages when syncing deletes from AD +- Bug 729369 - upgrade DB to upgrade from entrydn to entryrdn format is not working. +- Bug 729378 - delete user subtree container in AD + modify password in DS == DS crash +- Bug 723937 - Slapi_Counter API broken on 32-bit F15 +- fixed again - separate tests for atomic ops and atomic bool cas + +* Mon Aug 8 2011 Rich Megginson - 1.2.9.5-1 +- Bug 727511 - ldclt SSL search requests are failing with "illegal error number -1" error +- Fix another coverity NULL deref in previous patch + +* Thu Aug 4 2011 Rich Megginson - 1.2.9.4-1 +- Bug 727511 - ldclt SSL search requests are failing with "illegal error number -1" error +- Fix coverity NULL deref in previous patch + +* Wed Aug 3 2011 Rich Megginson - 1.2.9.3-1 +- Bug 727511 - ldclt SSL search requests are failing with "illegal error number -1" error +- previous patch broke build on el5 + +* Wed Aug 3 2011 Rich Megginson - 1.2.9.2-1 +- Bug 727511 - ldclt SSL search requests are failing with "illegal error number -1" error + +* Tue Aug 2 2011 Rich Megginson - 1.2.9.1-2 +- Bug 723937 - Slapi_Counter API broken on 32-bit F15 +- fixed to use configure test for GCC provided 64-bit atomic functions + +* Wed Jul 27 2011 Rich Megginson - 1.2.9.1-1 +- Bug 663752 - Cert renewal for attrcrypt and encchangelog +- this was "re-fixed" due to a deadlock condition with cl2ldif task cancel +- Bug 725953 - Winsync: DS entries fail to sync to AD, if the User's CN entry contains a comma +- Bug 725743 - Make memberOf use PRMonitor for it's operation lock +- Bug 725542 - Instance upgrade fails when upgrading 389-ds-base package +- Bug 723937 - Slapi_Counter API broken on 32-bit F15 + +* Thu Jul 21 2011 Petr Sabata - 1.2.9.0-1.2 +- Perl mass rebuild + +* Wed Jul 20 2011 Petr Sabata - 1.2.9.0-1.1 +- Perl mass rebuild + +* Fri Jul 15 2011 Rich Megginson - 1.2.9.0-1 +- Bug 720059 - RDN with % can cause crashes or missing entries +- Bug 709468 - RSA Authentication Server timeouts when using simple paged results on RHDS 8.2. +- Bug 691313 - Need TLS/SSL error messages in repl status and errors log +- Bug 712855 - Directory Server 8.2 logs "Netscape Portable Runtime error -5961 (TCP connection reset by peer.)" to error log whereas Directory Server 8.1 did not +- Bug 713209 - Update sudo schema +- Bug 719069 - clean up compiler warnings in 389-ds-base 1.2.9 +- Bug 718303 - Intensive updates on masters could break the consumer's cache +- Bug 711679 - unresponsive LDAP service when deleting vlv on replica + +* Mon Jun 27 2011 Rich Megginson - 1.2.9-0.2.a2 +- 389-ds-base-1.2.9.a2 +- look for separate openldap ldif library +- Split automember regex rules into separate entries +- writing Inf file shows SchemaFile = ARRAY(0xhexnum) +- add support for ldif files with changetype: add +- Bug 716980 - winsync uses old AD entry if new one not found +- Bug 697694 - rhds82 - incr update state stop_fatal_error "requires administrator action", with extop_result: 9 +- bump console version to 1.2.6 +- Bug 711679 - unresponsive LDAP service when deleting vlv on replica +- Bug 703703 - setup-ds-admin.pl asks for legal agreement to a non-existant file +- Bug 706209 - LEGAL: RHEL6.1 License issue for 389-ds-base package +- Bug 663752 - Cert renewal for attrcrypt and encchangelog +- Bug 706179 - DS can not restart after create a new objectClass has entryusn attribute +- Bug 711906 - ns-slapd segfaults using suffix referrals +- Bug 707384 - only allow FIPS approved cipher suites in FIPS mode +- Bug 710377 - Import with chain-on-update crashes ns-slapd +- Bug 709826 - Memory leak: when extra referrals configured + +* Fri Jun 17 2011 Marcela Mašláňová - 1.2.9-0.1.a1.2 +- Perl mass rebuild + +* Fri Jun 10 2011 Marcela Mašláňová - 1.2.9-0.1.a1.1 +- Perl 5.14 mass rebuild + +* Thu May 26 2011 Rich Megginson - 1.2.9-0.1.a1 +- 389-ds-base-1.2.9.a1 +- Auto Membership +- More Coverity fixes + +* Mon May 2 2011 Rich Megginson - 1.2.8.3-1 +- 389-ds-base-1.2.8.3 +- Bug 700145 - userpasswd not replicating +- Bug 700557 - Linked attrs callbacks access free'd pointers after close +- Bug 694336 - Group sync hangs Windows initial Sync +- Bug 700215 - ldclt core dumps +- Bug 695779 - windows sync can lose old values when a new value is added +- Bug 697027 - 12 - minor memory leaks found by Valgrind + TET + +* Thu Apr 14 2011 Rich Megginson - 1.2.8.2-1 +- 389-ds-base-1.2.8.2 +- Bug 696407 - If an entry with a mixed case RDN is turned to be +- a tombstone, it fails to assemble DN from entryrdn + +* Fri Apr 8 2011 Rich Megginson - 1.2.8.1-1 +- 389-ds-base-1.2.8.1 +- Bug 693962 - Full replica push loses some entries with multi-valued RDNs + +* Tue Apr 5 2011 Rich Megginson - 1.2.8.0-1 +- 389-ds-base-1.2.8.0 +- Bug 693473 - rhds82 rfe - windows_tot_run to log Sizelimit exceeded instead of LDAP error - -1 +- Bug 692991 - rhds82 - windows_tot_run: failed to obtain data to send to the consumer; LDAP error - -1 +- Bug 693466 - Unable to change schema online +- Bug 693503 - matching rules do not inherit from superior attribute type +- Bug 693455 - nsMatchingRule does not work with multiple values +- Bug 693451 - cannot use localized matching rules +- Bug 692331 - Segfault on index update during full replication push on 1.2.7.5 + +* Mon Apr 4 2011 Rich Megginson - 1.2.8-0.10.rc5 +- 389-ds-base-1.2.8.rc5 +- Bug 692469 - Replica install fails after step for "enable GSSAPI for replication" + +* Tue Mar 29 2011 Rich Megginson - 1.2.8-0.9.rc4 +- 389-ds-base-1.2.8.rc4 +- Bug 668385 - DS pipe log script is executed as many times as the dirsrv serv +ice is restarted +- 389-ds-base-1.2.8.rc3 +- Bug 690955 - Mrclone fails due to the replica generation id mismatch + +* Tue Mar 22 2011 Rich Megginson - 1.2.8-0.8.rc2 +- 389-ds-base-1.2.8 release candidate 2 - git tag 389-ds-base-1.2.8.rc2 +- Bug 689537 - (cov#10610) Fix Coverity NULL pointer dereferences +- Bug 689866 - ns-newpwpolicy.pl needs to use the new DN format +- Bug 681015 - RFE: allow fine grained password policy duration attributes +- in days, hours, minutes, as well +- Bug 684996 - Exported tombstone cannot be imported correctly +- Bug 683250 - slapd crashing when traffic replayed +- Bug 668909 - Can't modify replication agreement in some cases +- Bug 504803 - Allow maxlogsize to be set if logmaxdiskspace is -1 +- Bug 644784 - Memory leak in "testbind.c" plugin +- Bug 680558 - Winsync plugin fails to restrain itself to the configured subtree + +* Mon Mar 7 2011 Caolán McNamara - 1.2.8-0.7.rc1 +- rebuild for icu 4.6 + +* Wed Mar 2 2011 Rich Megginson - 1.2.8-0.6.rc1 +- 389-ds-base-1.2.8 release candidate 1 - git tag 389-ds-base-1.2.8.rc1 +- Bug 518890 - setup-ds-admin.pl - improve hostname validation +- Bug 681015 - RFE: allow fine grained password policy duration attributes in +- days, hours, minutes, as well +- Bug 514190 - setup-ds-admin.pl --debug does not log to file +- Bug 680555 - ns-slapd segfaults if I have more than 100 DBs +- Bug 681345 - setup-ds.pl should set SuiteSpotGroup automatically +- Bug 674852 - crash in ldap-agent when using OpenLDAP +- Bug 679978 - modifying attr value crashes the server, which is supposed to +- be indexed as substring type, but has octetstring syntax +- Bug 676655 - winsync stops working after server restart +- Bug 677705 - ds-logpipe.py script is failing to validate "-s" and +- "--serverpid" options with "-t". +- Bug 625424 - repl-monitor.pl doesn't work in hub node + +* Mon Feb 28 2011 Rich Megginson - 1.2.8-0.5.a3 +- Bug 676598 - 389-ds-base multilib: file conflicts +- split off libs into a separate -libs package + +* Thu Feb 24 2011 Rich Megginson - 1.2.8-0.4.a3 +- do not create /var/run/dirsrv - setup will create it instead +- remove the fedora-ds initscript upgrade stuff - we do not support that anymore +- convert the remaining lua stuff to plain old shell script + +* Wed Feb 9 2011 Rich Megginson - 1.2.8-0.3.a3 +- 1.2.8.a3 release - git tag 389-ds-base-1.2.8.a3 +- Bug 675320 - empty modify operation with repl on or lastmod off will crash server +- Bug 675265 - preventryusn gets added to entries on a failed delete +- Bug 677774 - added support for tmpfiles.d +- Bug 666076 - dirsrv crash (1.2.7.5) with multiple simple paged result search +es +- Bug 672468 - Don't use empty path elements in LD_LIBRARY_PATH +- Bug 671199 - Don't allow other to write to rundir +- Bug 678646 - Ignore tombstone operations in managed entry plug-in +- Bug 676053 - export task followed by import task causes cache assertion +- Bug 677440 - clean up compiler warnings in 389-ds-base 1.2.8 +- Bug 675113 - ns-slapd core dump in windows_tot_run if oneway sync is used +- Bug 676689 - crash while adding a new user to be synced to windows +- Bug 604881 - admin server log files have incorrect permissions/ownerships +- Bug 668385 - DS pipe log script is executed as many times as the dirsrv serv +ice is restarted +- Bug 675853 - dirsrv crash segfault in need_new_pw() + +* Mon Feb 07 2011 Fedora Release Engineering - 1.2.8-0.2.a2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild + +* Thu Feb 3 2011 Rich Megginson - 1.2.8-0.2.a2 +- 1.2.8.a2 release - git tag 389-ds-base-1.2.8.a2 +- Bug 674430 - Improve error messages for attribute uniqueness +- Bug 616213 - insufficient stack size for HP-UX on PA-RISC +- Bug 615052 - intrinsics and 64-bit atomics code fails to compile +- on PA-RISC +- Bug 151705 - Need to update Console Cipher Preferences with new ciphers +- Bug 668862 - init scripts return wrong error code +- Bug 670616 - Allow SSF to be set for local (ldapi) connections +- Bug 667935 - DS pipe log script's logregex.py plugin is not redirecting the +- log output to the text file +- Bug 668619 - slapd stops responding +- Bug 624547 - attrcrypt should query the given slot/token for +- supported ciphers +- Bug 646381 - Faulty password for nsmultiplexorcredentials does not give any +- error message in logs + +* Fri Jan 21 2011 Nathan Kinder - 1.2.8-0.1.a1 +- 1.2.8-0.1.a1 release - git tag 389-ds-base-1.2.8.a1 +- many bug fixes + +* Thu Dec 16 2010 Rich Megginson - 1.2.7.5-1 +- 1.2.7.5 release - git tag 389-ds-base-1.2.7.5 +- Bug 663597 - Memory leaks in normalization code + +* Tue Dec 14 2010 Rich Megginson - 1.2.7.4-2 +- Resolves: bug 656541 - use %ghost on files in /var/lock + +* Fri Dec 10 2010 Rich Megginson - 1.2.7.4-1 +- 1.2.7.4 release - git tag 389-ds-base-1.2.7.4 +- Bug 661792 - Valid managed entry config rejected + +* Wed Dec 8 2010 Rich Megginson - 1.2.7.3-1 +- 1.2.7.3 release - git tag 389-ds-base-1.2.7.3 +- Bug 658312 - Invalid free in Managed Entry plug-in +- Bug 641944 - Don't normalize non-DN RDN values + +* Fri Dec 3 2010 Rich Megginson - 1.2.7.2-1 +- 1.2.7.2 release - git tag 389-ds-base-1.2.7.2 +- Bug 659456 - Incorrect usage of ber_printf() in winsync code +- Bug 658309 - Process escaped characters in managed entry mappings +- Bug 197886 - Initialize return value for UUID generation code +- Bug 658312 - Allow mapped attribute types to be quoted +- Bug 197886 - Avoid overflow of UUID generator + +* Tue Nov 23 2010 Rich Megginson - 1.2.7.1-2 +- last commit had bogus commit log + +* Tue Nov 23 2010 Rich Megginson - 1.2.7.1-1 +- 1.2.7.1 release - git tag 389-ds-base-1.2.7.1 +- Bug 656515 - Allow Name and Optional UID syntax for grouping attributes +- Bug 656392 - Remove calls to ber_err_print() +- Bug 625950 - hash nsslapd-rootpw changes in audit log + +* Tue Nov 16 2010 Nathan Kinder - 1.2.7-2 +- 1.2.7 release - git tag 389-ds-base-1.2.7 + +* Fri Nov 12 2010 Nathan Kinder - 1.2.7-1 +- Bug 648949 - Merge dirsrv and dirsrv-admin policy modules into base policy + +* Tue Nov 9 2010 Rich Megginson - 1.2.7-0.6.a5 +- 1.2.7.a5 release - git tag 389-ds-base-1.2.7.a5 +- Bug 643979 - Strange byte sequence for attribute with no values (nsslapd-ref +erral) +- Bug 635009 - Add one-way AD sync capability +- Bug 572018 - Upgrading from 1.2.5 to 1.2.6.a2 deletes userRoot +- put replication config entries in separate file +- Bug 567282 - server can not abandon searchRequest of "simple paged results" +- Bug 329751 - "nested" filtered roles searches candidates more than needed +- Bug 521088 - DNA should check ACLs before getting a value from the range + +* Mon Nov 1 2010 Rich Megginson - 1.2.7-0.5.a4 +- 1.2.7.a4 release - git tag 389-ds-base-1.2.7.a4 +- Bug 647932 - multiple memberOf configuration adding memberOf where there is +no member +- Bug 491733 - dbtest crashes +- Bug 606545 - core schema should include numSubordinates +- Bug 638773 - permissions too loose on pid and lock files +- Bug 189985 - Improve attribute uniqueness error message +- Bug 619623 - attr-unique-plugin ignores requiredObjectClass on modrdn operat +ions +- Bug 619633 - Make attribute uniqueness obey requiredObjectClass + +* Wed Oct 27 2010 Rich Megginson - 1.2.7-0.4.a3 +- 1.2.7.a3 release - a2 was never released - this is a rebuild to pick up +- Bug 644608 - RHDS 8.1->8.2 upgrade fails to properly migrate ACIs +- Adding the ancestorid fix code to ##upgradednformat.pl. + +* Fri Oct 22 2010 Rich Megginson - 1.2.7-0.3.a3 +- 1.2.7.a3 release - a2 was never released +- Bug 644608 - RHDS 8.1->8.2 upgrade fails to properly migrate ACIs +- Bug 629681 - Retro Changelog trimming does not behave as expected +- Bug 645061 - Upgrade: 06inetorgperson.ldif and 05rfc4524.ldif +- are not upgraded in the server instance schema dir + +* Tue Oct 19 2010 Rich Megginson - 1.2.7-0.2.a2 +- 1.2.7.a2 release - a1 was the OpenLDAP testday release +- git tag 389-ds-base-1.2.7.a2 +- added openldap support on platforms that use openldap with moznss +- for crypto (F-14 and later) +- many bug fixes +- Account Policy Plugin (keep track of last login, disable old accounts) + +* Fri Oct 8 2010 Rich Megginson - 1.2.7-0.1.a1 +- added openldap support + +* Wed Sep 29 2010 Rich Megginson - 1.2.6.1-3 +- bump rel to rebuild again + +* Mon Sep 27 2010 Rich Megginson - 1.2.6.1-2 +- bump rel to rebuild + +* Thu Sep 23 2010 Rich Megginson - 1.2.6.1-1 +- This is the 1.2.6.1 release - git tag 389-ds-base-1.2.6.1 +- Bug 634561 - Server crushes when using Windows Sync Agreement +- Bug 635987 - Incorrect sub scope search result with ACL containing ldap:///self +- Bug 612264 - ACI issue with (targetattr='userPassword') +- Bug 606920 - anonymous resource limit- nstimelimit - also applied to "cn=directory manager" +- Bug 631862 - crash - delete entries not in cache + referint + +* Thu Aug 26 2010 Rich Megginson - 1.2.6-1 +- This is the final 1.2.6 release + +* Tue Aug 10 2010 Rich Megginson - 1.2.6-0.11.rc7 +- 1.2.6 release candidate 7 +- git tag 389-ds-base-1.2.6.rc7 +- Bug 621928 - Unable to enable replica (rdn problem?) on 1.2.6 rc6 + +* Mon Aug 2 2010 Rich Megginson - 1.2.6-0.10.rc6 +- 1.2.6 release candidate 6 +- git tag 389-ds-base-1.2.6.rc6 +- Bug 617013 - repl-monitor.pl use cpu upto 90% +- Bug 616618 - 389 v1.2.5 accepts 2 identical entries with different DN formats +- Bug 547503 - replication broken again, with 389 MMR replication and TCP errors +- Bug 613833 - Allow dirsrv_t to bind to rpc ports +- Bug 612242 - membership change on DS does not show on AD +- Bug 617629 - Missing aliases in new schema files +- Bug 619595 - Upgrading sub suffix under non-normalized suffix disappears +- Bug 616608 - SIGBUS in RDN index reads on platforms with strict alignments +- Bug 617862 - Replication: Unable to delete tombstone errors +- Bug 594745 - Get rid of dirsrv_lib_t label + +* Wed Jul 14 2010 Rich Megginson - 1.2.6-0.9.rc3 +- make selinux-devel explicit Require the base package in order +- to comply with Fedora Licensing Guidelines + +* Thu Jul 1 2010 Rich Megginson - 1.2.6-0.8.rc3 +- 1.2.6 release candidate 3 +- git tag 389-ds-base-1.2.6.rc3 +- Bug 603942 - null deref in _ger_parse_control() for subjectdn +- 609256 - Selinux: pwdhash fails if called via Admin Server CGI +- 578296 - Attribute type entrydn needs to be added when subtree rename switch is on +- 605827 - In-place upgrade: upgrade dn format should not run in setup-ds-admin.pl +- Bug 604453 - SASL Stress and Server crash: Program quits with the assertion failure in PR_Poll +- Bug 604453 - SASL Stress and Server crash: Program quits with the assertion failure in PR_Poll +- 606920 - anonymous resource limit - nstimelimit - also applied to "cn=directory manager" + +* Wed Jun 16 2010 Rich Megginson - 1.2.6-0.7.rc2 +- 1.2.6 release candidate 2 + +* Mon Jun 14 2010 Nathan Kinder - 1.2.6-0.6.rc1 +- install replication session plugin header with devel package + +* Wed Jun 9 2010 Rich Megginson - 1.2.6-0.5.rc1 +- 1.2.6 release candidate 1 + +* Tue Jun 01 2010 Marcela Maslanova - 1.2.6-0.4.a4.1 +- Mass rebuild with perl-5.12.0 + +* Wed May 26 2010 Rich Megginson - 1.2.6-0.4.a4 +- 1.2.6.a4 release + +* Wed Apr 7 2010 Nathan Kinder - 1.2.6-0.4.a3 +- 1.2.6.a3 release +- add managed entries plug-in +- many bug fixes +- moved selinux subpackage into base package + +* Fri Apr 2 2010 Caolán McNamara - 1.2.6-0.3.a2 +- rebuild for icu 4.4 + +* Tue Mar 2 2010 Rich Megginson - 1.2.6-0.2.a2 +- 1.2.6.a2 release +- add support for matching rules +- many bug fixes + +* Thu Jan 14 2010 Nathan Kinder - 1.2.6-0.1.a1 +- 1.2.6.a1 release +- Added SELinux policy and subpackages + +* Tue Jan 12 2010 Rich Megginson - 1.2.5-1 +- 1.2.5 final release + +* Mon Jan 4 2010 Rich Megginson - 1.2.5-0.5.rc4 +- 1.2.5.rc4 release + +* Thu Dec 17 2009 Rich Megginson - 1.2.5-0.4.rc3 +- 1.2.5.rc3 release + +* Mon Dec 7 2009 Rich Megginson - 1.2.5-0.3.rc2 +- 1.2.5.rc2 release + +* Wed Dec 2 2009 Rich Megginson - 1.2.5-0.2.rc1 +- 1.2.5.rc1 release + +* Thu Nov 12 2009 Rich Megginson - 1.2.5-0.1.a1 +- 1.2.5.a1 release + +* Thu Oct 29 2009 Rich Megginson - 1.2.4-1 +- 1.2.4 release +- resolves bug 221905 - added support for Salted MD5 (SMD5) passwords - primarily for migration +- resolves bug 529258 - Make upgrade remove obsolete schema from 99user.ldif + +* Mon Sep 14 2009 Rich Megginson - 1.2.3-1 +- 1.2.3 release +- added template-initconfig to %files +- %posttrans now runs update to update the server instances +- servers are shutdown, then restarted if running before install +- scriptlets mostly use lua now to pass data among scriptlet phases + +* Tue Sep 01 2009 Caolán McNamara - 1.2.2-2 +- rebuild with new openssl to fix dependencies + +* Tue Aug 25 2009 Rich Megginson - 1.2.2-1 +- backed out - added template-initconfig to %files - this change is for the next major release +- bump version to 1.2.2 +- fix reopened 509472 db2index all does not reindex all the db backends correctly +- fix 518520 - pre hashed salted passwords do not work +- see https://bugzilla.redhat.com/show_bug.cgi?id=518519 for the list of +- bugs fixed in 1.2.2 + +* Fri Aug 21 2009 Tomas Mraz - 1.2.1-5 +- rebuilt with new openssl + +* Wed Aug 19 2009 Noriko Hosoi - 1.2.1-4 +- added template-initconfig to %files + +* Wed Aug 12 2009 Rich Megginson - 1.2.1-3 +- added BuildRequires pcre + +* Fri Jul 24 2009 Fedora Release Engineering - 1.2.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild + +* Mon May 18 2009 Rich Megginson - 1.2.1-1 +- change name to 389 +- change version to 1.2.1 +- added initial support for numeric string syntax +- added initial support for syntax validation +- added initial support for paged results including sorting + +* Tue Apr 28 2009 Rich Megginson - 1.2.0-4 +- final release 1.2.0 +- Resolves: bug 475338 - LOG: the intenal type of maxlogsize, maxdiskspace and minfreespace should be 64-bit integer +- Resolves: bug 496836 - SNMP ldap-agent on Solaris: Unable to open semaphore for server: 389 +- CVS tag: FedoraDirSvr_1_2_0 FedoraDirSvr_1_2_0_20090428 + +* Mon Apr 6 2009 Rich Megginson - 1.2.0-3 +- re-enable ppc builds + +* Thu Apr 2 2009 Rich Megginson - 1.2.0-2 +- exclude ppc builds - needs extensive porting work + +* Mon Mar 30 2009 Rich Megginson - 1.2.0-1 +- new release 1.2.0 +- Made devel package depend on mozldap-devel +- only create run dir if it does not exist +- CVS tag: FedoraDirSvr_1_2_0_RC1 FedoraDirSvr_1_2_0_RC1_20090330 + +* Thu Oct 30 2008 Noriko Hosoi - 1.1.3-7 +- added db4-utils to Requires for verify-db.pl + +* Mon Oct 13 2008 Noriko Hosoi - 1.1.3-6 +- Enabled LDAPI autobind + +* Thu Oct 9 2008 Rich Megginson - 1.1.3-5 +- updated update to patch bug463991-bdb47.patch + +* Thu Oct 9 2008 Rich Megginson - 1.1.3-4 +- updated patch bug463991-bdb47.patch + +* Mon Sep 29 2008 Rich Megginson - 1.1.3-3 +- added patch bug463991-bdb47.patch +- make ds work with bdb 4.7 + +* Wed Sep 24 2008 Rich Megginson - 1.1.3-2 +- rolled back bogus winsync memory leak fix + +* Tue Sep 23 2008 Rich Megginson - 1.1.3-1 +- winsync api improvements for modify operations + +* Fri Jun 13 2008 Rich Megginson - 1.1.2-1 +- This is the 1.1.2 release. The bugs fixed can be found here +- https://bugzilla.redhat.com/showdependencytree.cgi?id=452721 +- Added winsync-plugin.h to the devel subpackage + +* Fri Jun 6 2008 Rich Megginson - 1.1.1-2 +- bump rev to rebuild and pick up new version of ICU + +* Fri May 23 2008 Rich Megginson - 1.1.1-1 +- 1.1.1 release candidate - several bug fixes + +* Wed Apr 16 2008 Rich Megginson - 1.1.0.1-4 +- fix bugzilla 439829 - patch to allow working with NSS 3.11.99 and later + +* Tue Mar 18 2008 Tom "spot" Callaway - 1.1.0.1-3 +- add patch to allow server to work with NSS 3.11.99 and later +- do NSS_Init after fork but before detaching from console + +* Tue Mar 18 2008 Tom "spot" Callaway - 1.1.0.1-3 +- add Requires for versioned perl (libperl.so) + +* Wed Feb 27 2008 Rich Megginson - 1.1.0.1-2 +- previous fix for 434403 used the wrong patch +- this is the right one + +* Wed Feb 27 2008 Rich Megginson - 1.1.0.1-1 +- Resolves bug 434403 - GCC 4.3 build fails +- Rolled new source tarball which includes Nathan's fix for the struct ucred +- NOTE: Change version back to 1.1.1 for next release +- this release was pulled from CVS tag FedoraDirSvr110_gcc43 + +* Tue Feb 19 2008 Fedora Release Engineering - 1.1.0-5 +- Autorebuild for GCC 4.3 + +* Thu Dec 20 2007 Rich Megginson - 1.1.0-4 +- This is the GA release of Fedora DS 1.1 +- Removed version numbers for BuildRequires and Requires +- Added full URL to source tarball + +* Fri Dec 07 2007 Release Engineering - 1.1.0-3 +- Rebuild for deps + +* Wed Nov 7 2007 Rich Megginson - 1.1.0-2.0 +- This is the beta2 release +- new file added to package - /etc/sysconfig/dirsrv - for setting +- daemon environment as is usual in other linux daemons + +* Thu Aug 16 2007 Rich Megginson - 1.1.0-1.2 +- fix build breakage due to open() +- mock could not find BuildRequires: db4-devel >= 4.2.52 +- mock works if >= version is removed - it correctly finds db4.6 + +* Fri Aug 10 2007 Rich Megginson - 1.1.0-1.1 +- Change pathnames to use the pkgname macro which is dirsrv +- get rid of cvsdate in source name + +* Fri Jul 20 2007 Rich Megginson - 1.1.0-0.3.20070720 +- Added Requires for perldap, cyrus sasl plugins +- Removed template-migrate* files +- Added perl module directory +- Removed install.inf - setup-ds.pl can now easily generate one + +* Mon Jun 18 2007 Nathan Kinder - 1.1.0-0.2.20070320 +- added requires for mozldap-tools + +* Tue Mar 20 2007 Rich Megginson - 1.1.0-0.1.20070320 +- update to latest sources +- added migrateTo11 to allow migrating instances from 1.0.x to 1.1 +- ldapi support +- fixed pam passthru plugin ENTRY method + +* Fri Feb 23 2007 Rich Megginson - 1.1.0-0.1.20070223 +- Renamed package to fedora-ds-base, but keep names of paths/files/services the same +- use the shortname macro (fedora-ds) for names of paths, files, and services instead +- of name, so that way we can continue to use e.g. /etc/fedora-ds instead of /etc/fedora-ds-base +- updated to latest sources + +* Tue Feb 13 2007 Rich Megginson - 1.1.0-0.1.20070213 +- More cleanup suggested by Dennis Gilmore +- This is the fedora extras candidate based on cvs tag FedoraDirSvr110a1 + +* Fri Feb 9 2007 Rich Megginson - 1.1.0-1.el4.20070209 +- latest sources +- added init scripts +- use /etc as instconfigdir + +* Wed Feb 7 2007 Rich Megginson - 1.1.0-1.el4.20070207 +- latest sources +- moved all executables to _bindir + +* Mon Jan 29 2007 Rich Megginson - 1.1.0-1.el4.20070129 +- latest sources +- added /var/tmp/fedora-ds to dirs + +* Fri Jan 26 2007 Rich Megginson - 1.1.0-8.el4.20070125 +- added logconv.pl +- added slapi-plugin.h to devel package +- added explicit dirs for /var/log/fedora-ds et. al. + +* Thu Jan 25 2007 Rich Megginson - 1.1.0-7.el4.20070125 +- just move all .so files into the base package from the devel package + +* Thu Jan 25 2007 Rich Megginson - 1.1.0-6.el4.20070125 +- Move the plugin *.so files into the main package instead of the devel +- package because they are loaded directly by name via dlopen + +* Fri Jan 19 2007 Rich Megginson - 1.1.0-5.el4.20070125 +- Move the script-templates directory to datadir/fedora-ds + +* Fri Jan 19 2007 Rich Megginson - 1.1.0-4.el4.20070119 +- change mozldap to mozldap6 + +* Fri Jan 19 2007 Rich Megginson - 1.1.0-3.el4.20070119 +- remove . from cvsdate define + +* Fri Jan 19 2007 Rich Megginson - 1.1.0-2.el4.20070119 +- Having a problem building in Brew - may be Release format + +* Fri Jan 19 2007 Rich Megginson - 1.1.0-1.el4.cvs20070119 +- Changed version to 1.1.0 and added Release 1.el4.cvs20070119 +- merged in changes from Fedora Extras candidate spec file + +* Mon Jan 15 2007 Rich Megginson - 1.1-0.1.cvs20070115 +- Bump component versions (nspr, nss, svrcore, mozldap) to their latest +- remove unneeded patches + +* Tue Jan 09 2007 Dennis Gilmore - 1.1-0.1.cvs20070108 +- update to a cvs snapshot +- fedorafy the spec +- create -devel subpackage +- apply a patch to use mozldap not mozldap6 +- apply a patch to allow --prefix to work correctly + +* Mon Dec 4 2006 Rich Megginson - 1.0.99-16 +- Fixed the problem where the server would crash upon shutdown in dblayer +- due to a race condition among the database housekeeping threads +- Fix a problem with normalized absolute paths for db directories + +* Tue Nov 28 2006 Rich Megginson - 1.0.99-15 +- Touch all of the ldap/admin/src/scripts/*.in files so that they +- will be newer than their corresponding script template files, so +- that make will rebuild them. + +* Mon Nov 27 2006 Rich Megginson - 1.0.99-14 +- Chown new schema files when copying during instance creation + +* Tue Nov 21 2006 Rich Megginson - 1.0.99-13 +- Configure will get ldapsdk_bindir from pkg-config, or $libdir/mozldap6 + +* Tue Nov 21 2006 Rich Megginson - 1.0.99-12 +- use eval to sed ./configure into ../configure + +* Tue Nov 21 2006 Rich Megginson - 1.0.99-11 +- jump through hoops to be able to run ../configure + +* Tue Nov 21 2006 Rich Megginson - 1.0.99-10 +- Need to make built dir in setup section + +* Tue Nov 21 2006 Rich Megginson - 1.0.99-9 +- The template scripts needed to use @libdir@ instead of hardcoding +- /usr/lib +- Use make DESTDIR=$RPM_BUILD_ROOT install instead of % makeinstall +- do the actual build in a "built" subdirectory, until we remove +- the old script templates + +* Thu Nov 16 2006 Rich Megginson - 1.0.99-8 +- Make replication plugin link with libdb + +* Wed Nov 15 2006 Rich Megginson - 1.0.99-7 +- Have make define LIBDIR, BINDIR, etc. for C code to use +- especially for create_instance.h + +* Tue Nov 14 2006 Rich Megginson - 1.0.99-6 +- Forgot to checkin new config.h.in for AC_CONFIG_HEADERS + +* Tue Nov 14 2006 Rich Megginson - 1.0.99-5 +- Add perldap as a Requires; update sources + +* Thu Nov 9 2006 Rich Megginson - 1.0.99-4 +- Fix ds_newinst.pl +- Remove obsolete #defines + +* Thu Nov 9 2006 Rich Megginson - 1.0.99-3 +- Update sources; rebuild to populate brew yum repo with dirsec-nss + +* Tue Nov 7 2006 Rich Megginson - 1.0.99-2 +- Update sources + +* Thu Nov 2 2006 Rich Megginson - 1.0.99-1 +- initial revision