diff --git a/SOURCES/0078-Ticket-49649.patch b/SOURCES/0078-Ticket-49649.patch
new file mode 100644
index 0000000..887eefd
--- /dev/null
+++ b/SOURCES/0078-Ticket-49649.patch
@@ -0,0 +1,40 @@
+From e4d51884e3ca36b8256c33936dc31e77e0ad4736 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Tue, 8 May 2018 12:35:43 -0400
+Subject: [PATCH] Ticket 49649
+
+Description:  Fix crpyt.h include
+
+https://pagure.io/389-ds-base/issue/49649
+
+Reviewed by: mreynolds(one line commit rule)
+
+(cherry picked from commit 2817f0c49401056835a79aafd8f8d4edc9113d1d)
+---
+ ldap/servers/plugins/pwdstorage/crypt_pwd.c | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
+index 0dccd1b51..19894bd80 100644
+--- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c
++++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
+@@ -20,15 +20,7 @@
+ #include <string.h>
+ #include <sys/types.h>
+ #include <sys/socket.h>
+-#if defined(hpux) || defined(LINUX) || defined(__FreeBSD__)
+-#ifndef __USE_XOPEN
+-#define __USE_XOPEN /* linux */
+-#endif              /* __USE_XOPEN */
+-#include <unistd.h>
+-#else /* hpux */
+-#include <crypt.h>
+-#endif /* hpux */
+-
++#include <crypt.h>  /* for crypt_r */
+ #include "pwdstorage.h"
+ 
+ static PRLock *cryptlock = NULL; /* Some implementations of crypt are not thread safe.  ie. ours & Irix */
+-- 
+2.13.6
+
diff --git a/SOURCES/0079-Ticket-49665-Upgrade-script-doesn-t-enable-PBKDF2-pa.patch b/SOURCES/0079-Ticket-49665-Upgrade-script-doesn-t-enable-PBKDF2-pa.patch
new file mode 100644
index 0000000..a34d3b1
--- /dev/null
+++ b/SOURCES/0079-Ticket-49665-Upgrade-script-doesn-t-enable-PBKDF2-pa.patch
@@ -0,0 +1,53 @@
+From a13a83465c685d6ec8d47b6f10646986ded16a68 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Wed, 9 May 2018 16:36:48 -0400
+Subject: [PATCH] Ticket 49665 - Upgrade script doesn't enable PBKDF2 password
+ storage plug-in
+
+Description:  There is no upgrade script to add the PBKDF2 plugin, this
+              fix adds the script.
+
+https://pagure.io/389-ds-base/issue/49665
+
+Reviewed by: ?
+
+(cherry picked from commit dc690dd231a626b3b6a2019fee51e3cb15db7962)
+---
+ Makefile.am                                          |  1 +
+ ldap/admin/src/scripts/50pbkdf2pwdstorageplugin.ldif | 12 ++++++++++++
+ 2 files changed, 13 insertions(+)
+ create mode 100644 ldap/admin/src/scripts/50pbkdf2pwdstorageplugin.ldif
+
+diff --git a/Makefile.am b/Makefile.am
+index 8834a7819..055d480aa 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -949,6 +949,7 @@ update_DATA = ldap/admin/src/scripts/exampleupdate.pl \
+ 	ldap/admin/src/scripts/50refintprecedence.ldif \
+ 	ldap/admin/src/scripts/50retroclprecedence.ldif \
+ 	ldap/admin/src/scripts/50rootdnaccesscontrolplugin.ldif \
++	ldap/admin/src/scripts/50pbkdf2pwdstorageplugin.ldif \
+ 	ldap/admin/src/scripts/50contentsync.ldif \
+ 	ldap/admin/src/scripts/60upgradeschemafiles.pl \
+ 	ldap/admin/src/scripts/60upgradeconfigfiles.pl \
+diff --git a/ldap/admin/src/scripts/50pbkdf2pwdstorageplugin.ldif b/ldap/admin/src/scripts/50pbkdf2pwdstorageplugin.ldif
+new file mode 100644
+index 000000000..462d5534a
+--- /dev/null
++++ b/ldap/admin/src/scripts/50pbkdf2pwdstorageplugin.ldif
+@@ -0,0 +1,12 @@
++dn: cn=PBKDF2_SHA256,cn=Password Storage Schemes,cn=plugins,cn=config
++objectclass: top
++objectclass: nsSlapdPlugin
++cn: PBKDF2_SHA256
++nsslapd-pluginpath: libpwdstorage-plugin
++nsslapd-plugininitfunc: pbkdf2_sha256_pwd_storage_scheme_init
++nsslapd-plugintype: pwdstoragescheme
++nsslapd-pluginenabled: on
++nsslapd-pluginDescription: DESC
++nsslapd-pluginVersion: PACKAGE_VERSION
++nsslapd-pluginVendor: VENDOR
++nsslapd-pluginid: ID
+-- 
+2.13.6
+
diff --git a/SOURCES/0080-Ticket-49665-Upgrade-script-doesn-t-enable-CRYPT-pas.patch b/SOURCES/0080-Ticket-49665-Upgrade-script-doesn-t-enable-CRYPT-pas.patch
new file mode 100644
index 0000000..0bf3735
--- /dev/null
+++ b/SOURCES/0080-Ticket-49665-Upgrade-script-doesn-t-enable-CRYPT-pas.patch
@@ -0,0 +1,79 @@
+From 1c077cff1ce49f5380192325a6947c623019c365 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Wed, 9 May 2018 16:39:23 -0400
+Subject: [PATCH] Ticket 49665 - Upgrade script doesn't enable CRYPT password
+ storage plug-in
+
+Description:  There is no upgrade script to add the new CRYPT plugins, this
+              fix adds the script.
+
+https://pagure.io/389-ds-base/issue/49665
+
+Reviewed by: vashirov(Thanks!)
+
+(cherry picked from commit 91dc832411a1bb6e8bf62bb72c36777ddc63770f)
+---
+ Makefile.am                                        |  1 +
+ .../admin/src/scripts/50cryptpwdstorageplugin.ldif | 38 ++++++++++++++++++++++
+ 2 files changed, 39 insertions(+)
+ create mode 100644 ldap/admin/src/scripts/50cryptpwdstorageplugin.ldif
+
+diff --git a/Makefile.am b/Makefile.am
+index 055d480aa..4f62a899b 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -950,6 +950,7 @@ update_DATA = ldap/admin/src/scripts/exampleupdate.pl \
+ 	ldap/admin/src/scripts/50retroclprecedence.ldif \
+ 	ldap/admin/src/scripts/50rootdnaccesscontrolplugin.ldif \
+ 	ldap/admin/src/scripts/50pbkdf2pwdstorageplugin.ldif \
++	ldap/admin/src/scripts/50cryptpwdstorageplugin.ldif \
+ 	ldap/admin/src/scripts/50contentsync.ldif \
+ 	ldap/admin/src/scripts/60upgradeschemafiles.pl \
+ 	ldap/admin/src/scripts/60upgradeconfigfiles.pl \
+diff --git a/ldap/admin/src/scripts/50cryptpwdstorageplugin.ldif b/ldap/admin/src/scripts/50cryptpwdstorageplugin.ldif
+new file mode 100644
+index 000000000..0a4a50776
+--- /dev/null
++++ b/ldap/admin/src/scripts/50cryptpwdstorageplugin.ldif
+@@ -0,0 +1,38 @@
++dn: cn=CRYPT-MD5,cn=Password Storage Schemes,cn=plugins,cn=config
++objectClass: top
++objectClass: nsSlapdPlugin
++cn: CRYPT-MD5
++nsslapd-pluginPath: libpwdstorage-plugin
++nsslapd-pluginInitfunc: crypt_md5_pwd_storage_scheme_init
++nsslapd-pluginType: pwdstoragescheme
++nsslapd-pluginEnabled: on
++nsslapd-pluginId: ID
++nsslapd-pluginVersion: PACKAGE_VERSION
++nsslapd-pluginVendor: VENDOR
++nsslapd-pluginDescription: DESC
++
++dn: cn=CRYPT-SHA256,cn=Password Storage Schemes,cn=plugins,cn=config
++objectClass: top
++objectClass: nsSlapdPlugin
++cn: CRYPT-SHA256
++nsslapd-pluginPath: libpwdstorage-plugin
++nsslapd-pluginInitfunc: crypt_sha256_pwd_storage_scheme_init
++nsslapd-pluginType: pwdstoragescheme
++nsslapd-pluginEnabled: on
++nsslapd-pluginId: ID
++nsslapd-pluginVersion: PACKAGE_VERSION
++nsslapd-pluginVendor: VENDOR
++nsslapd-pluginDescription: DESC
++
++dn: cn=CRYPT-SHA512,cn=Password Storage Schemes,cn=plugins,cn=config
++objectClass: top
++objectClass: nsSlapdPlugin
++cn: CRYPT-SHA512
++nsslapd-pluginPath: libpwdstorage-plugin
++nsslapd-pluginInitfunc: crypt_sha512_pwd_storage_scheme_init
++nsslapd-pluginType: pwdstoragescheme
++nsslapd-pluginEnabled: on
++nsslapd-pluginId: ID
++nsslapd-pluginVersion: PACKAGE_VERSION
++nsslapd-pluginVendor: VENDOR
++nsslapd-pluginDescription: DESC
+-- 
+2.13.6
+
diff --git a/SOURCES/0081-Ticket-49671-Readonly-replicas-should-not-write-inte.patch b/SOURCES/0081-Ticket-49671-Readonly-replicas-should-not-write-inte.patch
new file mode 100644
index 0000000..534b4c5
--- /dev/null
+++ b/SOURCES/0081-Ticket-49671-Readonly-replicas-should-not-write-inte.patch
@@ -0,0 +1,205 @@
+From 279489884f56cfc97d1ad9afdf92da3ad3b05b70 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 11 May 2018 10:53:06 -0400
+Subject: [PATCH] Ticket 49671 - Readonly replicas should not write internal
+ ops to changelog
+
+Bug Description:  When a hub receives an update that triggers the memberOf
+                  plugin, but that interal operation has no csn and that
+                  causes the update to the changelog to fail and break
+                  replication.
+
+Fix Description:  Do not write internal updates with no csns to the changelog
+                  on read-only replicas.
+
+https://pagure.io/389-ds-base/issue/49671
+
+Reviewed by: simon, tbordaz, and lkrispen (Thanks!!!)
+
+(cherry picked from commit afb755bd95f1643665ea34c5a5fa2bb26bfa21b9)
+---
+ .../tests/suites/replication/cascading_test.py     | 150 +++++++++++++++++++++
+ ldap/servers/plugins/replication/repl5_plugins.c   |  10 ++
+ 2 files changed, 160 insertions(+)
+ create mode 100644 dirsrvtests/tests/suites/replication/cascading_test.py
+
+diff --git a/dirsrvtests/tests/suites/replication/cascading_test.py b/dirsrvtests/tests/suites/replication/cascading_test.py
+new file mode 100644
+index 000000000..7331f20e9
+--- /dev/null
++++ b/dirsrvtests/tests/suites/replication/cascading_test.py
+@@ -0,0 +1,150 @@
++# --- BEGIN COPYRIGHT BLOCK ---
++# Copyright (C) 2018 Red Hat, Inc.
++# All rights reserved.
++#
++# License: GPL (version 3 or any later version).
++# See LICENSE for details.
++# --- END COPYRIGHT BLOCK ---
++#
++import logging
++import pytest
++import os
++import ldap
++from lib389._constants import *
++from lib389.replica import ReplicationManager
++from lib389.plugins import MemberOfPlugin
++from lib389.agreement import Agreements
++from lib389.idm.user import UserAccount, TEST_USER_PROPERTIES
++from lib389.idm.group import Groups
++from lib389.topologies import topology_m1h1c1 as topo
++
++DEBUGGING = os.getenv("DEBUGGING", default=False)
++if DEBUGGING:
++    logging.getLogger(__name__).setLevel(logging.DEBUG)
++else:
++    logging.getLogger(__name__).setLevel(logging.INFO)
++log = logging.getLogger(__name__)
++
++BIND_DN = 'uid=tuser1,ou=People,dc=example,dc=com'
++BIND_RDN = 'tuser1'
++
++
++def config_memberof(server):
++    """Configure memberOf plugin and configure fractional
++    to prevent total init to send memberof
++    """
++
++    memberof = MemberOfPlugin(server)
++    memberof.enable()
++    memberof.set_autoaddoc('nsMemberOf')
++    server.restart()
++    agmts = Agreements(server)
++    for agmt in agmts.list():
++        log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % agmt.dn)
++        agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '),
++                          ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf'))
++
++
++def test_basic_with_hub(topo):
++    """Check that basic operations work in cascading replication, this includes
++    testing plugins that perform internal operatons, and replicated password
++    policy state attributes.
++
++    :id: 4ac85552-45bc-477b-89a4-226dfff8c6cc
++    :setup: 1 master, 1 hub, 1 consumer
++    :steps:
++        1. Enable memberOf plugin and set password account lockout settings
++        2. Restart the instance
++        3. Add a user
++        4. Add a group
++        5. Test that the replication works
++        6. Add the user as a member to the group
++        7. Test that the replication works
++        8. Issue bad binds to update passwordRetryCount
++        9. Test that replicaton works
++        10. Check that passwordRetyCount was replicated
++    :expectedresults:
++        1. Should be a success
++        2. Should be a success
++        3. Should be a success
++        4. Should be a success
++        5. Should be a success
++        6. Should be a success
++        7. Should be a success
++        8. Should be a success
++        9. Should be a success
++        10. Should be a success
++    """
++
++    repl_manager = ReplicationManager(DEFAULT_SUFFIX)
++    master = topo.ms["master1"]
++    consumer = topo.cs["consumer1"]
++    hub = topo.hs["hub1"]
++
++    for inst in topo:
++        config_memberof(inst)
++        inst.config.set('passwordlockout', 'on')
++        inst.config.set('passwordlockoutduration', '60')
++        inst.config.set('passwordmaxfailure', '3')
++        inst.config.set('passwordIsGlobalPolicy', 'on')
++
++    # Create user
++    user1 = UserAccount(master, BIND_DN)
++    user_props = TEST_USER_PROPERTIES.copy()
++    user_props.update({'sn': BIND_RDN,
++                       'cn': BIND_RDN,
++                       'uid': BIND_RDN,
++                       'inetUserStatus': '1',
++                       'objectclass': 'extensibleObject',
++                       'userpassword': PASSWORD})
++    user1.create(properties=user_props, basedn=SUFFIX)
++
++    # Create group
++    groups = Groups(master, DEFAULT_SUFFIX)
++    group = groups.create(properties={'cn': 'group'})
++
++    # Test replication
++    repl_manager.test_replication(master, consumer)
++
++    # Trigger memberOf plugin by adding user to group
++    group.replace('member', user1.dn)
++
++    # Test replication once more
++    repl_manager.test_replication(master, consumer)
++
++    # Issue bad password to update passwordRetryCount
++    try:
++        master.simple_bind_s(user1.dn, "badpassword")
++    except:
++        pass
++
++    # Test replication one last time
++    master.simple_bind_s(DN_DM, PASSWORD)
++    repl_manager.test_replication(master, consumer)
++
++    # Finally check if passwordRetyCount was replicated to the hub and consumer
++    user1 = UserAccount(hub, BIND_DN)
++    count = user1.get_attr_val_int('passwordRetryCount')
++    if count is None:
++        log.fatal('PasswordRetyCount was not replicated to hub')
++        assert False
++    if int(count) != 1:
++        log.fatal('PasswordRetyCount has unexpected value: {}'.format(count))
++        assert False
++
++    user1 = UserAccount(consumer, BIND_DN)
++    count = user1.get_attr_val_int('passwordRetryCount')
++    if count is None:
++        log.fatal('PasswordRetyCount was not replicated to consumer')
++        assert False
++    if int(count) != 1:
++        log.fatal('PasswordRetyCount has unexpected value: {}'.format(count))
++        assert False
++
++
++if __name__ == '__main__':
++    # Run isolated
++    # -s for DEBUG mode
++    CURRENT_FILE = os.path.realpath(__file__)
++    pytest.main(["-s", CURRENT_FILE])
++
+diff --git a/ldap/servers/plugins/replication/repl5_plugins.c b/ldap/servers/plugins/replication/repl5_plugins.c
+index 0aee8829a..324e38263 100644
+--- a/ldap/servers/plugins/replication/repl5_plugins.c
++++ b/ldap/servers/plugins/replication/repl5_plugins.c
+@@ -1059,6 +1059,16 @@ write_changelog_and_ruv(Slapi_PBlock *pb)
+             goto common_return;
+         }
+ 
++        /* Skip internal operations with no op csn if this is a read-only replica */
++        if (op_params->csn == NULL &&
++            operation_is_flag_set(op, OP_FLAG_INTERNAL) &&
++            replica_get_type(r) == REPLICA_TYPE_READONLY)
++        {
++            slapi_log_err(SLAPI_LOG_REPL, "write_changelog_and_ruv",
++                          "Skipping internal operation on read-only replica\n");
++            goto common_return;
++        }
++
+         /* we might have stripped all the mods - in that case we do not
+            log the operation */
+         if (op_params->operation_type != SLAPI_OPERATION_MODIFY ||
+-- 
+2.13.6
+
diff --git a/SOURCES/0082-Ticket-49696-replicated-operations-should-be-seriali.patch b/SOURCES/0082-Ticket-49696-replicated-operations-should-be-seriali.patch
new file mode 100644
index 0000000..b84eaef
--- /dev/null
+++ b/SOURCES/0082-Ticket-49696-replicated-operations-should-be-seriali.patch
@@ -0,0 +1,48 @@
+From f0b41ec12f957612c69ae5be3bbbb6e2d6db2530 Mon Sep 17 00:00:00 2001
+From: Ludwig Krispenz <lkrispen@redhat.com>
+Date: Thu, 17 May 2018 10:31:58 +0200
+Subject: [PATCH]     Ticket 49696: replicated operations should be serialized
+
+    Bug: there was a scenario where two threads could process replication operations in parallel.
+         The reason was that for a new repl start request the repl conn flag is not set and the
+         connection is made readable.
+         When the start repl op is finished, the flagi set, but in a small window the supplier could
+         already have sent updates and more_data would trigger this thread also to continue to process
+         repl operations.
+
+    Fix: In the situation where a thread successfully processed a start repl request and just set the repl_conn
+         flag  do not use more_data.
+
+    Reviewed by: Thierry, thanks
+---
+ ldap/servers/slapd/connection.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
+index 5ca32a333..b5030f0cb 100644
+--- a/ldap/servers/slapd/connection.c
++++ b/ldap/servers/slapd/connection.c
+@@ -1822,9 +1822,17 @@ connection_threadmain()
+ 
+             /* If we're in turbo mode, we keep our reference to the connection alive */
+             /* can't use the more_data var because connection could have changed in another thread */
+-            more_data = conn_buffered_data_avail_nolock(conn, &conn_closed) ? 1 : 0;
+-            slapi_log_err(SLAPI_LOG_CONNS, "connection_threadmain", "conn %" PRIu64 " check more_data %d thread_turbo_flag %d\n",
+-                          conn->c_connid, more_data, thread_turbo_flag);
++            slapi_log_err(SLAPI_LOG_CONNS, "connection_threadmain", "conn %" PRIu64 " check more_data %d thread_turbo_flag %d"
++                          "repl_conn_bef %d, repl_conn_now %d\n",
++                          conn->c_connid, more_data, thread_turbo_flag,
++                          replication_connection, conn->c_isreplication_session);
++            if (!replication_connection &&  conn->c_isreplication_session) {
++                /* it a connection that was just flagged as replication connection */
++                more_data = 0;
++            } else {
++                /* normal connection or already established replication connection */
++                more_data = conn_buffered_data_avail_nolock(conn, &conn_closed) ? 1 : 0;
++            }
+             if (!more_data) {
+                 if (!thread_turbo_flag) {
+                     /*
+-- 
+2.13.6
+
diff --git a/SOURCES/0083-Ticket-48184-clean-up-and-delete-connections-at-shut.patch b/SOURCES/0083-Ticket-48184-clean-up-and-delete-connections-at-shut.patch
new file mode 100644
index 0000000..29170f4
--- /dev/null
+++ b/SOURCES/0083-Ticket-48184-clean-up-and-delete-connections-at-shut.patch
@@ -0,0 +1,335 @@
+From 5a5d3dffd0b36edb543fd31fa53d7128dd5161c2 Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Fri, 18 May 2018 10:13:46 +0200
+Subject: [PATCH] Ticket 48184 - clean up and delete connections at shutdown
+ (2nd try)
+
+Bug description:
+    During shutdown we would not close connections.
+    In the past this may have just been an annoyance, but now with the way
+    nunc-stans works, io events can still trigger on open xeisting connectinos
+    during shutdown.
+
+    Because of NS dynamic it can happen that several jobs wants to work on the
+    same connection. In such case (a job is already set in c_job) we delay the
+    new job that will retry.
+    In addition:
+	- some call needed c_mutex
+	- test uninitialized nunc-stans in case of shutdown while startup is not completed
+
+Fix Description:  Close connections during shutdown rather than
+    leaving them alive.
+
+https://pagure.io/389-ds-base/issue/48184
+
+Reviewed by:
+	Original was Ludwig and Viktor
+	Second fix reviewed by Mark
+
+Platforms tested: F26
+
+Flag Day: no
+
+Doc impact: no
+
+(cherry picked from commit e562157ca3e97867d902996cc18fb04f90dc10a8)
+---
+ ldap/servers/slapd/connection.c |   2 +
+ ldap/servers/slapd/conntable.c  |  13 ++++
+ ldap/servers/slapd/daemon.c     | 131 ++++++++++++++++++++++++++++------------
+ ldap/servers/slapd/fe.h         |   1 +
+ ldap/servers/slapd/slap.h       |   1 +
+ 5 files changed, 108 insertions(+), 40 deletions(-)
+
+diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
+index b5030f0cb..76e83112b 100644
+--- a/ldap/servers/slapd/connection.c
++++ b/ldap/servers/slapd/connection.c
+@@ -1716,7 +1716,9 @@ connection_threadmain()
+         if ((tag != LDAP_REQ_UNBIND) && !thread_turbo_flag && !replication_connection) {
+             if (!more_data) {
+                 conn->c_flags &= ~CONN_FLAG_MAX_THREADS;
++                PR_EnterMonitor(conn->c_mutex);
+                 connection_make_readable_nolock(conn);
++                PR_ExitMonitor(conn->c_mutex);
+                 /* once the connection is readable, another thread may access conn,
+                  * so need locking from here on */
+                 signal_listner();
+diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
+index 7c57b47cd..f2f763dfa 100644
+--- a/ldap/servers/slapd/conntable.c
++++ b/ldap/servers/slapd/conntable.c
+@@ -91,6 +91,19 @@ connection_table_abandon_all_operations(Connection_Table *ct)
+     }
+ }
+ 
++void
++connection_table_disconnect_all(Connection_Table *ct)
++{
++    for (size_t i = 0; i < ct->size; i++) {
++        if (ct->c[i].c_mutex) {
++            Connection *c = &(ct->c[i]);
++            PR_EnterMonitor(c->c_mutex);
++            disconnect_server_nomutex(c, c->c_connid, -1, SLAPD_DISCONNECT_ABORT, ECANCELED);
++            PR_ExitMonitor(c->c_mutex);
++        }
++    }
++}
++
+ /* Given a file descriptor for a socket, this function will return
+  * a slot in the connection table to use.
+  *
+diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
+index fcc461a90..50e67474e 100644
+--- a/ldap/servers/slapd/daemon.c
++++ b/ldap/servers/slapd/daemon.c
+@@ -1087,12 +1087,18 @@ slapd_daemon(daemon_ports_t *ports, ns_thrpool_t *tp)
+         /* we have exited from ns_thrpool_wait. This means we are shutting down! */
+         /* Please see https://firstyear.fedorapeople.org/nunc-stans/md_docs_job-safety.html */
+         /* tldr is shutdown needs to run first to allow job_done on an ARMED job */
+-        for (size_t i = 0; i < listeners; i++) {
+-            PRStatus shutdown_status = ns_job_done(listener_idxs[i].ns_job);
+-            if (shutdown_status != PR_SUCCESS) {
+-                slapi_log_err(SLAPI_LOG_CRIT, "ns_set_shutdown", "Failed to shutdown listener idx %" PRIu64 " !\n", i);
++        for (uint64_t i = 0; i < listeners; i++) {
++            PRStatus shutdown_status;
++
++            if (listener_idxs[i].ns_job) {
++                shutdown_status = ns_job_done(listener_idxs[i].ns_job);
++                if (shutdown_status != PR_SUCCESS) {
++                    slapi_log_err(SLAPI_LOG_CRIT, "ns_set_shutdown", "Failed to shutdown listener idx %" PRIu64 " !\n", i);
++                }
++                PR_ASSERT(shutdown_status == PR_SUCCESS);
++            } else {
++                slapi_log_err(SLAPI_LOG_CRIT, "slapd_daemon", "Listeners uninitialized. Possibly the server was shutdown while starting\n");
+             }
+-            PR_ASSERT(shutdown_status == PR_SUCCESS);
+             listener_idxs[i].ns_job = NULL;
+         }
+     } else {
+@@ -1176,6 +1182,32 @@ slapd_daemon(daemon_ports_t *ports, ns_thrpool_t *tp)
+     housekeeping_stop(); /* Run this after op_thread_cleanup() logged sth */
+     disk_monitoring_stop();
+ 
++    /*
++     * Now that they are abandonded, we need to mark them as done.
++     * In NS while it's safe to allow excess jobs to be cleaned by
++     * by the walk and ns_job_done of remaining queued events, the
++     * issue is that if we allow something to live past this point
++     * the CT is freed from underneath, and bad things happen (tm).
++     *
++     * NOTE: We do this after we stop psearch, because there could
++     * be a race between flagging the psearch done, and users still
++     * try to send on the connection. Similar with op_threads.
++     */
++    connection_table_disconnect_all(the_connection_table);
++
++    /*
++     * WARNING: Normally we should close the tp in main
++     * but because of issues in the current connection design
++     * we need to close it here to guarantee events won't fire!
++     *
++     * All the connection close jobs "should" complete before
++     * shutdown at least.
++     */
++    if (enable_nunc_stans) {
++        ns_thrpool_shutdown(tp);
++        ns_thrpool_wait(tp);
++    }
++
+     threads = g_get_active_threadcnt();
+     if (threads > 0) {
+         slapi_log_err(SLAPI_LOG_INFO, "slapd_daemon",
+@@ -1628,25 +1660,18 @@ ns_handle_closure(struct ns_job_t *job)
+     Connection *c = (Connection *)ns_job_get_data(job);
+     int do_yield = 0;
+ 
+-/* this function must be called from the event loop thread */
+-#ifdef DEBUG
+-    PR_ASSERT(0 == NS_JOB_IS_THREAD(ns_job_get_type(job)));
+-#else
+-    /* This doesn't actually confirm it's in the event loop thread, but it's a start */
+-    if (NS_JOB_IS_THREAD(ns_job_get_type(job)) != 0) {
+-        slapi_log_err(SLAPI_LOG_ERR, "ns_handle_closure", "Attempt to close outside of event loop thread %" PRIu64 " for fd=%d\n",
+-                      c->c_connid, c->c_sd);
+-        return;
+-    }
+-#endif
+-
+     PR_EnterMonitor(c->c_mutex);
++    /* Assert we really have the right job state. */
++    PR_ASSERT(job == c->c_job);
+ 
+     connection_release_nolock_ext(c, 1); /* release ref acquired for event framework */
+     PR_ASSERT(c->c_ns_close_jobs == 1);  /* should be exactly 1 active close job - this one */
+     c->c_ns_close_jobs--;                /* this job is processing closure */
++    /* Because handle closure will add a new job, we need to detach our current one. */
++    c->c_job = NULL;
+     do_yield = ns_handle_closure_nomutex(c);
+     PR_ExitMonitor(c->c_mutex);
++    /* Remove this task now. */
+     ns_job_done(job);
+     if (do_yield) {
+         /* closure not done - another reference still outstanding */
+@@ -1659,14 +1684,25 @@ ns_handle_closure(struct ns_job_t *job)
+ /**
+  * Schedule more I/O for this connection, or make sure that it
+  * is closed in the event loop.
++ * caller must hold c_mutex
++ * It returns
++ *  0 on success
++ *  1 on need to retry
+  */
+-void
+-ns_connection_post_io_or_closing(Connection *conn)
++static int
++ns_connection_post_io_or_closing_try(Connection *conn)
+ {
+     struct timeval tv;
+ 
+     if (!enable_nunc_stans) {
+-        return;
++        return 0;
++    }
++
++    /*
++     * Cancel any existing ns jobs we have registered.
++     */
++    if (conn->c_job != NULL) {
++        return 1;
+     }
+ 
+     if (CONN_NEEDS_CLOSING(conn)) {
+@@ -1676,15 +1712,12 @@ ns_connection_post_io_or_closing(Connection *conn)
+             slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_post_io_or_closing", "Already a close "
+                                                                                "job in progress on conn %" PRIu64 " for fd=%d\n",
+                           conn->c_connid, conn->c_sd);
+-            return;
++            return 0;
+         } else {
+-            /* just make sure we schedule the event to be closed in a timely manner */
+-            tv.tv_sec = 0;
+-            tv.tv_usec = slapd_wakeup_timer * 1000;
+             conn->c_ns_close_jobs++;                                                      /* now 1 active closure job */
+             connection_acquire_nolock_ext(conn, 1 /* allow acquire even when closing */); /* event framework now has a reference */
+-            ns_result_t job_result = ns_add_timeout_job(conn->c_tp, &tv, NS_JOB_TIMER,
+-                                                        ns_handle_closure, conn, NULL);
++            /* Close the job asynchronously. Why? */
++            ns_result_t job_result = ns_add_job(conn->c_tp, NS_JOB_TIMER, ns_handle_closure, conn, &(conn->c_job));
+             if (job_result != NS_SUCCESS) {
+                 if (job_result == NS_SHUTDOWN) {
+                     slapi_log_err(SLAPI_LOG_INFO, "ns_connection_post_io_or_closing", "post closure job "
+@@ -1723,12 +1756,12 @@ ns_connection_post_io_or_closing(Connection *conn)
+              * The error occurs when we get a connection in a closing state.
+              * For now we return, but there is probably a better way to handle the error case.
+              */
+-            return;
++            return 0;
+         }
+ #endif
+         ns_result_t job_result = ns_add_io_timeout_job(conn->c_tp, conn->c_prfd, &tv,
+                                                        NS_JOB_READ | NS_JOB_PRESERVE_FD,
+-                                                       ns_handle_pr_read_ready, conn, NULL);
++                                                       ns_handle_pr_read_ready, conn, &(conn->c_job));
+         if (job_result != NS_SUCCESS) {
+             if (job_result == NS_SHUTDOWN) {
+                 slapi_log_err(SLAPI_LOG_INFO, "ns_connection_post_io_or_closing", "post I/O job for "
+@@ -1745,6 +1778,28 @@ ns_connection_post_io_or_closing(Connection *conn)
+                           conn->c_connid, conn->c_sd);
+         }
+     }
++    return 0;
++}
++void
++ns_connection_post_io_or_closing(Connection *conn)
++{
++    while (ns_connection_post_io_or_closing_try(conn)) {
++	/* we should retry later */
++	
++	/* We are not suppose to work immediately on the connection that is taken by
++	 * another job
++	 * release the lock and give some time
++	 */
++	
++	if (CONN_NEEDS_CLOSING(conn) && conn->c_ns_close_jobs) {
++	    return;
++	} else {
++	    PR_ExitMonitor(conn->c_mutex);
++	    DS_Sleep(PR_MillisecondsToInterval(100));
++
++	    PR_EnterMonitor(conn->c_mutex);
++	}
++    }
+ }
+ 
+ /* This function must be called without the thread flag, in the
+@@ -1757,19 +1812,12 @@ ns_handle_pr_read_ready(struct ns_job_t *job)
+     int maxthreads = config_get_maxthreadsperconn();
+     Connection *c = (Connection *)ns_job_get_data(job);
+ 
+-/* this function must be called from the event loop thread */
+-#ifdef DEBUG
+-    PR_ASSERT(0 == NS_JOB_IS_THREAD(ns_job_get_type(job)));
+-#else
+-    /* This doesn't actually confirm it's in the event loop thread, but it's a start */
+-    if (NS_JOB_IS_THREAD(ns_job_get_type(job)) != 0) {
+-        slapi_log_err(SLAPI_LOG_ERR, "ns_handle_pr_read_ready", "Attempt to handle read ready outside of event loop thread %" PRIu64 " for fd=%d\n",
+-                      c->c_connid, c->c_sd);
+-        return;
+-    }
+-#endif
+-
+     PR_EnterMonitor(c->c_mutex);
++    /* Assert we really have the right job state. */
++    PR_ASSERT(job == c->c_job);
++
++    /* On all code paths we remove the job, so set it null now */
++    c->c_job = NULL;
+ 
+     slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "activity on conn %" PRIu64 " for fd=%d\n",
+                   c->c_connid, c->c_sd);
+@@ -1829,6 +1877,7 @@ ns_handle_pr_read_ready(struct ns_job_t *job)
+         slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "queued conn %" PRIu64 " for fd=%d\n",
+                       c->c_connid, c->c_sd);
+     }
++    /* Since we call done on the job, we need to remove it here. */
+     PR_ExitMonitor(c->c_mutex);
+     ns_job_done(job);
+     return;
+@@ -2451,7 +2500,9 @@ ns_handle_new_connection(struct ns_job_t *job)
+      * that poll() was avoided, even at the expense of putting this new fd back
+      * in nunc-stans to poll for read ready.
+      */
++    PR_EnterMonitor(c->c_mutex);
+     ns_connection_post_io_or_closing(c);
++    PR_ExitMonitor(c->c_mutex);
+     return;
+ }
+ 
+diff --git a/ldap/servers/slapd/fe.h b/ldap/servers/slapd/fe.h
+index 4d25a9fb8..f47bb6145 100644
+--- a/ldap/servers/slapd/fe.h
++++ b/ldap/servers/slapd/fe.h
+@@ -100,6 +100,7 @@ extern Connection_Table *the_connection_table; /* JCM - Exported from globals.c
+ Connection_Table *connection_table_new(int table_size);
+ void connection_table_free(Connection_Table *ct);
+ void connection_table_abandon_all_operations(Connection_Table *ct);
++void connection_table_disconnect_all(Connection_Table *ct);
+ Connection *connection_table_get_connection(Connection_Table *ct, int sd);
+ int connection_table_move_connection_out_of_active_list(Connection_Table *ct, Connection *c);
+ void connection_table_move_connection_on_to_active_list(Connection_Table *ct, Connection *c);
+diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
+index 03355f5fe..de4ac35c0 100644
+--- a/ldap/servers/slapd/slap.h
++++ b/ldap/servers/slapd/slap.h
+@@ -1650,6 +1650,7 @@ typedef struct conn
+     void *c_io_layer_cb_data;            /* callback data */
+     struct connection_table *c_ct;       /* connection table that this connection belongs to */
+     ns_thrpool_t *c_tp;                  /* thread pool for this connection */
++    struct ns_job_t *c_job;              /* If it exists, the current ns_job_t */
+     int c_ns_close_jobs;                 /* number of current close jobs */
+     char *c_ipaddr;                      /* ip address str - used by monitor */
+ } Connection;
+-- 
+2.13.6
+
diff --git a/SOURCES/0084-Ticket-49576-Update-ds-replcheck-for-new-conflict-en.patch b/SOURCES/0084-Ticket-49576-Update-ds-replcheck-for-new-conflict-en.patch
new file mode 100644
index 0000000..8d21a62
--- /dev/null
+++ b/SOURCES/0084-Ticket-49576-Update-ds-replcheck-for-new-conflict-en.patch
@@ -0,0 +1,938 @@
+From 19945c4807f6b3269fb65100ddaea5f596f68e72 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 18 May 2018 07:29:11 -0400
+Subject: [PATCH 1/6] Ticket 49576 - Update ds-replcheck for new conflict
+ entries
+
+Description:  This patch addresses the recvent changes to conflict
+              entries and tombstones.
+
+https://pagure.io/389-ds-base/issue/49576
+
+Reviewed by: tbordaz(Thanks!)
+
+(cherry picked from commit 53e58cdbfb2a2672ac21cd9b6d59f8b345478324)
+---
+ ldap/admin/src/scripts/ds-replcheck | 456 +++++++++++++++++++---------
+ 1 file changed, 312 insertions(+), 144 deletions(-)
+
+diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
+index 45c4670a3..b801ccaa8 100755
+--- a/ldap/admin/src/scripts/ds-replcheck
++++ b/ldap/admin/src/scripts/ds-replcheck
+@@ -1,7 +1,7 @@
+ #!/usr/bin/python
+ 
+ # --- BEGIN COPYRIGHT BLOCK ---
+-# Copyright (C) 2017 Red Hat, Inc.
++# Copyright (C) 2018 Red Hat, Inc.
+ # All rights reserved.
+ #
+ # License: GPL (version 3 or any later version).
+@@ -9,6 +9,7 @@
+ # --- END COPYRIGHT BLOCK ---
+ #
+ 
++import os
+ import re
+ import time
+ import ldap
+@@ -20,7 +21,7 @@ from ldap.ldapobject import SimpleLDAPObject
+ from ldap.cidict import cidict
+ from ldap.controls import SimplePagedResultsControl
+ 
+-VERSION = "1.2"
++VERSION = "1.3"
+ RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
+ LDAP = 'ldap'
+ LDAPS = 'ldaps'
+@@ -36,6 +37,7 @@ class Entry(object):
+     ''' This is a stripped down version of Entry from python-lib389.
+     Once python-lib389 is released on RHEL this class will go away.
+     '''
++
+     def __init__(self, entrydata):
+         if entrydata:
+             self.dn = entrydata[0]
+@@ -51,7 +53,7 @@ class Entry(object):
+ 
+ 
+ def get_entry(entries, dn):
+-    ''' Loop over enties looking for a matching dn
++    ''' Loop over a list of enties looking for a matching dn
+     '''
+     for entry in entries:
+         if entry.dn == dn:
+@@ -60,7 +62,7 @@ def get_entry(entries, dn):
+ 
+ 
+ def remove_entry(rentries, dn):
+-    ''' Remove an entry from the array of entries
++    ''' Remove an entry from the list of entries
+     '''
+     for entry in rentries:
+         if entry.dn == dn:
+@@ -69,7 +71,7 @@ def remove_entry(rentries, dn):
+ 
+ 
+ def extract_time(stateinfo):
+-    ''' Take the nscpEntryWSI attribute and get the most recent timestamp from
++    ''' Take the nscpEntryWSI(state info) attribute and get the most recent timestamp from
+     one of the csns (vucsn, vdcsn, mdcsn, adcsn)
+ 
+     Return the timestamp in decimal
+@@ -87,7 +89,7 @@ def extract_time(stateinfo):
+ 
+ 
+ def convert_timestamp(timestamp):
+-    ''' Convert createtimestamp to ctime: 20170405184656Z -> Wed Apr  5 19:46:56 2017
++    ''' Convert createtimestamp to ctime: 20170405184656Z ----> Wed Apr  5 19:46:56 2017
+     '''
+     time_tuple = (int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]),
+                   int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14]),
+@@ -97,27 +99,43 @@ def convert_timestamp(timestamp):
+ 
+ 
+ def convert_entries(entries):
+-    '''Convert and normalize the ldap entries.  Take note of conflicts and tombstones
+-    '''
++    '''For online report.  Convert and normalize the ldap entries.  Take note of
++    conflicts and tombstones '''
+     new_entries = []
+     conflict_entries = []
++    glue_entries = []
+     result = {}
+     tombstones = 0
++
+     for entry in entries:
+         new_entry = Entry(entry)
+         new_entry.data = {k.lower(): v for k, v in list(new_entry.data.items())}
+-        if 'nsds5replconflict' in new_entry.data:
++        if new_entry.dn.endswith("cn=mapping tree,cn=config"):
++            '''Skip replica entry (ldapsearch brings this in because the filter
++            we use triggers an internal operation to return the config entry - so
++            it must be skipped
++            '''
++            continue
++        if ('nsds5replconflict' in new_entry.data and 'nsTombstone' not in new_entry.data['objectclass'] and
++            'nstombstone' not in new_entry.data['objectclass']):
++            # This is a conflict entry that is NOT a tombstone entry (should this be reconsidered?)
+             conflict_entries.append(new_entry)
++            if 'glue' in new_entry.data['objectclass']:
++                # A glue entry here is not necessarily a glue entry there.  Keep track of
++                # them for when we check missing entries
++                glue_entries.append(new_entry)
+         else:
+             new_entries.append(new_entry)
+ 
+         if 'nstombstonecsn' in new_entry.data:
++            # Maintain tombstone count
+             tombstones += 1
+     del entries
+ 
+     result['entries'] = new_entries
+     result['conflicts'] = conflict_entries
+     result['tombstones'] = tombstones
++    result['glue'] = glue_entries
+ 
+     return result
+ 
+@@ -174,20 +192,60 @@ def get_ruv_report(opts):
+     return report
+ 
+ 
++def remove_attr_state_info(attr):
++    state_attr = None
++    idx = attr.find(';')
++    if idx > 0:
++        state_attr = attr  # preserve state info for diff report
++        if ";deleted" in attr:
++            # Ignore this attribute it was deleted
++            return None, state_attr
++        attr = attr[:idx]
++
++    return attr.lower(), state_attr
++
++def add_attr_entry(entry, val, attr, state_attr):
++    ''' Offline mode (ldif comparision) Add the attr to the entry, and if there
++    is state info add nscpentrywsi attr - we need consistency with online mode
++    to make code simpler '''
++    if attr is not None:
++        if attr in entry:
++            entry[attr].append(val)
++        else:
++            entry[attr] = [val]
++
++    # Handle state info for diff report
++    if state_attr is not None:
++        state_attr = state_attr + ": " + val
++        if 'nscpentrywsi' in entry:
++            entry['nscpentrywsi'].append(state_attr)
++        else:
++            entry['nscpentrywsi'] = [state_attr]
++    val = ""
++
++
+ #
+ # Offline mode helper functions
+ #
+-def ldif_search(LDIF, dn, conflicts=False):
+-    ''' Search ldif by DN
++def ldif_search(LDIF, dn):
++    ''' Offline mode -  Search ldif for a single DN.  We need to factor in that
++    DN's and attribute values can wrap lines and are identified by a leading
++    white space.  So we can't fully process an attribute until we get to the
++    next attribute.
+     '''
+     result = {}
+     data = {}
+     found_conflict = False
++    found_subentry = False
+     found_part_dn = False
++    found_part_val = False
++    found_attr = False
++    found_tombstone = False
++    found_glue = False
+     found = False
+-    reset_line = False
+     count = 0
+-
++    ignore_list = ['conflictcsn', 'modifytimestamp', 'modifiersname']
++    val = ""
+     result['entry'] = None
+     result['conflict'] = None
+     result['tombstone'] = False
+@@ -195,54 +253,132 @@ def ldif_search(LDIF, dn, conflicts=False):
+     for line in LDIF:
+         count += 1
+         line = line.rstrip()
+-        if reset_line:
+-            reset_line = False
+-            line = prev_line
++
+         if found:
++            # We found our entry, now build up the entry (account from line wrap)
+             if line == "":
+-                # End of entry
++                # End of entry - update entry's last attribute value and break out
++                add_attr_entry(data, val, attr, state_attr)
++                val = ""
++                # Done!
+                 break
+ 
+             if line[0] == ' ':
+-                # continuation line
+-                prev = data[attr][len(data[attr]) - 1]
+-                data[attr][len(data[attr]) - 1] = prev + line.strip()
++                # continuation line (wrapped value)
++                val += line[1:]
++                found_part_val = True
+                 continue
++            elif found_part_val:
++                # We have the complete value now (it was wrapped)
++                found_part_val = False
++                found_attr = False
++                add_attr_entry(data, val, attr, state_attr)
++
++                # Now that the value is added to the entry lets process the new attribute...
++                value_set = line.split(":", 1)
++                attr, state_attr = remove_attr_state_info(value_set[0])
++
++                if attr in ignore_list or (attr is None and state_attr is None):
++                    # Skip it
++                    found_attr = False
++                    attr = None
++                    continue
+ 
+-            value_set = line.split(":", 1)
+-            attr = value_set[0].lower()
+-            if attr.startswith('nsds5replconflict'):
+-                found_conflict = True
+-            if attr.startswith('nstombstonecsn'):
+-                result['tombstone'] = True
+-
+-            if attr in data:
+-                data[attr].append(value_set[1].strip())
++                val = value_set[1].strip()
++                found_attr = True
++
++                if attr is not None:
++                    # Set the entry type flags
++                    if attr.startswith('nsds5replconflict'):
++                        found_conflict = True
++                    if attr.startswith("objectclass") and val == "ldapsubentry":
++                        found_subentry = True
++                    if attr.startswith('nstombstonecsn'):
++                        result['tombstone'] = True
++                        found_tombstone = True
++                continue
+             else:
+-                data[attr] = [value_set[1].strip()]
++                # New attribute...
++                if found_attr:
++                    # But first we have to add the previous complete attr value to the entry data
++                    add_attr_entry(data, val, attr, state_attr)
++
++                # Process new attribute
++                value_set = line.split(":", 1)
++                attr, state_attr = remove_attr_state_info(value_set[0])
++                if attr is None or attr in ignore_list:
++                    # Skip it (its deleted)
++                    found_attr = False
++                    attr = None
++                    continue
++
++                val = value_set[1].strip()
++                found_attr = True
++
++                # Set the entry type flags
++                if attr.startswith('nsds5replconflict'):
++                    found_conflict = True
++                if attr.startswith("objectclass") and (val == "ldapsubentry" or val == "glue"):
++                    if val == "glue":
++                        found_glue = True
++                    found_subentry = True
++                if attr.startswith('nstombstonecsn'):
++                    result['tombstone'] = True
++                    found_tombstone = True
++                continue
++
+         elif found_part_dn:
+             if line[0] == ' ':
++                # DN is still wrapping, keep building up the dn value
+                 part_dn += line[1:].lower()
+             else:
+-                # We have the full dn
++                # We now have the full dn
+                 found_part_dn = False
+-                reset_line = True
+-                prev_line = line
+                 if part_dn == dn:
++                    # We found our entry
+                     found = True
++
++                    # But now we have a new attribute to process
++                    value_set = line.split(":", 1)
++                    attr, state_attr = remove_attr_state_info(value_set[0])
++                    if attr is None or attr in ignore_list:
++                        # Skip it (its deleted)
++                        found_attr = False
++                        attr = None
++                        continue
++
++                    val = value_set[1].strip()
++                    found_attr = True
++
++                    if attr.startswith('nsds5replconflict'):
++                        found_conflict = True
++                    if attr.startswith("objectclass") and val == "ldapsubentry":
++                        found_subentry = True
++
++                    if attr.startswith('nstombstonecsn'):
++                        result['tombstone'] = True
++                        found_tombstone = True
+                     continue
++
+         if line.startswith('dn: '):
+             if line[4:].lower() == dn:
++                # We got our full DN, now process the entry
+                 found = True
+                 continue
+             else:
++                # DN wraps the line, keep looping until we get the whole value
+                 part_dn = line[4:].lower()
+                 found_part_dn = True
+ 
++    # Keep track of entry index - we use this later when searching the LDIF again
+     result['idx'] = count
+-    if found_conflict:
++
++    result['glue'] = None
++    if found_conflict and found_subentry and found_tombstone is False:
+         result['entry'] = None
+         result['conflict'] = Entry([dn, data])
++        if found_glue:
++            result['glue'] = result['conflict']
+     elif found:
+         result['conflict'] = None
+         result['entry'] = Entry([dn, data])
+@@ -251,7 +387,7 @@ def ldif_search(LDIF, dn, conflicts=False):
+ 
+ 
+ def get_dns(LDIF, opts):
+-    ''' Get all the DN's
++    ''' Get all the DN's from an LDIF file
+     '''
+     dns = []
+     found = False
+@@ -275,7 +411,7 @@ def get_dns(LDIF, opts):
+ 
+ 
+ def get_ldif_ruv(LDIF, opts):
+-    ''' Search the ldif and get the ruv entry
++    ''' Search the LDIF and get the ruv entry
+     '''
+     LDIF.seek(0)
+     result = ldif_search(LDIF, opts['ruv_dn'])
+@@ -283,7 +419,7 @@ def get_ldif_ruv(LDIF, opts):
+ 
+ 
+ def cmp_entry(mentry, rentry, opts):
+-    ''' Compare the two entries, and return a diff map
++    ''' Compare the two entries, and return a "diff map"
+     '''
+     diff = {}
+     diff['dn'] = mentry['dn']
+@@ -307,6 +443,7 @@ def cmp_entry(mentry, rentry, opts):
+                 diff['missing'].append(" - Replica missing attribute: \"%s\"" % (mattr))
+                 diff_count += 1
+                 if 'nscpentrywsi' in mentry.data:
++                    # Great we have state info so we can provide details about the missing attribute
+                     found = False
+                     for val in mentry.data['nscpentrywsi']:
+                         if val.lower().startswith(mattr + ';'):
+@@ -316,6 +453,7 @@ def cmp_entry(mentry, rentry, opts):
+                             diff['missing'].append(" - Master's State Info: %s" % (val))
+                             diff['missing'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
+                 else:
++                    # No state info, just move on
+                     diff['missing'].append("")
+ 
+         elif mentry.data[mattr] != rentry.data[mattr]:
+@@ -335,6 +473,9 @@ def cmp_entry(mentry, rentry, opts):
+                     if not found:
+                         diff['diff'].append("      Master: ")
+                         for val in mentry.data[mattr]:
++                            # This is an "origin" value which means it's never been
++                            # updated since replication was set up.  So its the
++                            # original value
+                             diff['diff'].append("        - Origin value: %s" % (val))
+                         diff['diff'].append("")
+ 
+@@ -350,10 +491,13 @@ def cmp_entry(mentry, rentry, opts):
+                     if not found:
+                         diff['diff'].append("      Replica: ")
+                         for val in rentry.data[mattr]:
++                            # This is an "origin" value which means it's never been
++                            # updated since replication was set up.  So its the
++                            # original value
+                             diff['diff'].append("        - Origin value: %s" % (val))
+                         diff['diff'].append("")
+                 else:
+-                    # no state info
++                    # no state info, report what we got
+                     diff['diff'].append("      Master: ")
+                     for val in mentry.data[mattr]:
+                         diff['diff'].append("        - %s: %s" % (mattr, val))
+@@ -436,40 +580,62 @@ def do_offline_report(opts, output_file=None):
+     MLDIF.seek(idx)
+     RLDIF.seek(idx)
+ 
+-    # Compare the master entries with the replica's
++    """ Compare the master entries with the replica's.  Take our list of dn's from
++    the master ldif and get that entry( dn) from the master and replica ldif.  In
++    this phase we keep keep track of conflict/tombstone counts, and we check for
++    missing entries and entry differences.   We only need to do the entry diff
++    checking in this phase - we do not need to do it when process the replica dn's
++    because if the entry exists in both LDIF's then we already checked or diffs
++    while processing the master dn's.
++    """
+     print ("Comparing Master to Replica...")
+     missing = False
+     for dn in master_dns:
+-        mresult = ldif_search(MLDIF, dn, True)
+-        rresult = ldif_search(RLDIF, dn, True)
++        mresult = ldif_search(MLDIF, dn)
++        rresult = ldif_search(RLDIF, dn)
++
++        if dn in replica_dns:
++            if (rresult['entry'] is not None or rresult['glue'] is not None or
++                rresult['conflict'] is not None or rresult['tombstone']):
++                """ We can safely remove this DN from the replica dn list as it
++                does not need to be checked again.  This also speeds things up
++                when doing the replica vs master phase.
++                """
++                replica_dns.remove(dn)
+ 
+         if mresult['tombstone']:
+             mtombstones += 1
++            # continue
++        if rresult['tombstone']:
++            rtombstones += 1
+ 
+         if mresult['conflict'] is not None or rresult['conflict'] is not None:
++            # If either entry is a conflict we still process it here
+             if mresult['conflict'] is not None:
+                 mconflicts.append(mresult['conflict'])
++            if rresult['conflict'] is not None:
++                rconflicts.append(rresult['conflict'])
+         elif rresult['entry'] is None:
+-            # missing entry - restart the search from beginning
++            # missing entry - restart the search from beginning in case it got skipped
+             RLDIF.seek(0)
+             rresult = ldif_search(RLDIF, dn)
+-            if rresult['entry'] is None:
+-                # missing entry in rentries
+-                RLDIF.seek(mresult['idx'])  # Set the cursor to the last good line
++            if rresult['entry'] is None and rresult['glue'] is None:
++                # missing entry in Replica(rentries)
++                RLDIF.seek(mresult['idx'])  # Set the LDIF cursor/index to the last good line
+                 if not missing:
+-                    missing_report += ('Replica is missing entries:\n')
++                    missing_report += ('  Entries missing on Replica:\n')
+                     missing = True
+                 if mresult['entry'] and 'createtimestamp' in mresult['entry'].data:
+-                    missing_report += ('  - %s  (Master\'s creation date:  %s)\n' %
++                    missing_report += ('   - %s  (Created on Master at: %s)\n' %
+                                        (dn, convert_timestamp(mresult['entry'].data['createtimestamp'][0])))
+                 else:
+                     missing_report += ('  - %s\n' % dn)
+-            else:
++            elif mresult['tombstone'] is False:
+                 # Compare the entries
+                 diff = cmp_entry(mresult['entry'], rresult['entry'], opts)
+                 if diff:
+                     diff_report.append(format_diff(diff))
+-        else:
++        elif mresult['tombstone'] is False:
+             # Compare the entries
+             diff = cmp_entry(mresult['entry'], rresult['entry'], opts)
+             if diff:
+@@ -478,7 +644,10 @@ def do_offline_report(opts, output_file=None):
+     if missing:
+         missing_report += ('\n')
+ 
+-    # Search Replica, and look for missing entries only.  Count entries as well
++    """ Search Replica, and look for missing entries only.  We already did the
++    diff checking, so its only missing entries we are worried about. Count the
++    remaining conflict & tombstone entries as well.
++    """
+     print ("Comparing Replica to Master...")
+     MLDIF.seek(0)
+     RLDIF.seek(0)
+@@ -486,26 +655,26 @@ def do_offline_report(opts, output_file=None):
+     for dn in replica_dns:
+         rresult = ldif_search(RLDIF, dn)
+         mresult = ldif_search(MLDIF, dn)
+-
+         if rresult['tombstone']:
+             rtombstones += 1
+-        if mresult['entry'] is not None or rresult['conflict'] is not None:
+-            if rresult['conflict'] is not None:
+-                rconflicts.append(rresult['conflict'])
++            # continue
++
++        if rresult['conflict'] is not None:
++            rconflicts.append(rresult['conflict'])
+         elif mresult['entry'] is None:
+             # missing entry
+             MLDIF.seek(0)
+             mresult = ldif_search(MLDIF, dn)
+-            if mresult['entry'] is None and mresult['conflict'] is not None:
+-                MLDIF.seek(rresult['idx'])  # Set the cursor to the last good line
++            if mresult['entry'] is None and mresult['glue'] is None:
++                MLDIF.seek(rresult['idx'])  # Set the LDIF cursor/index to the last good line
+                 if not missing:
+-                    missing_report += ('Master is missing entries:\n')
++                    missing_report += ('  Entries missing on Master:\n')
+                     missing = True
+-                if 'createtimestamp' in rresult['entry'].data:
+-                    missing_report += ('  - %s  (Replica\'s creation date:  %s)\n' %
++                if rresult['entry'] and 'createtimestamp' in rresult['entry'].data:
++                    missing_report += ('   - %s  (Created on Replica at: %s)\n' %
+                                        (dn, convert_timestamp(rresult['entry'].data['createtimestamp'][0])))
+                 else:
+-                    missing_report += ('  - %s\n')
++                    missing_report += ('  - %s\n' % dn)
+     if missing:
+         missing_report += ('\n')
+ 
+@@ -553,8 +722,8 @@ def do_offline_report(opts, output_file=None):
+         print(final_report)
+ 
+ 
+-def check_for_diffs(mentries, rentries, report, opts):
+-    ''' Check for diffs, return the updated report
++def check_for_diffs(mentries, mglue, rentries, rglue, report, opts):
++    ''' Online mode only - Check for diffs, return the updated report
+     '''
+     diff_report = []
+     m_missing = []
+@@ -569,18 +738,26 @@ def check_for_diffs(mentries, rentries, report, opts):
+     for mentry in mentries:
+         rentry = get_entry(rentries, mentry.dn)
+         if rentry:
+-            diff = cmp_entry(mentry, rentry, opts)
+-            if diff:
+-                diff_report.append(format_diff(diff))
++            if 'nsTombstone' not in rentry.data['objectclass'] and 'nstombstone' not in rentry.data['objectclass']:
++                diff = cmp_entry(mentry, rentry, opts)
++                if diff:
++                    diff_report.append(format_diff(diff))
+             # Now remove the rentry from the rentries so we can find stragglers
+             remove_entry(rentries, rentry.dn)
+         else:
+-            # Add missing entry in Replica
+-            r_missing.append(mentry)
++            rentry = get_entry(rglue, mentry.dn)
++            if rentry:
++                # Glue entry nothing to compare
++                remove_entry(rentries, rentry.dn)
++            else:
++                # Add missing entry in Replica
++                r_missing.append(mentry)
+ 
+     for rentry in rentries:
+         # We should not have any entries if we are sync
+-        m_missing.append(rentry)
++        mentry = get_entry(mglue, rentry.dn)
++        if mentry is None:
++            m_missing.append(rentry)
+ 
+     if len(diff_report) > 0:
+         report['diff'] += diff_report
+@@ -609,6 +786,12 @@ def connect_to_replicas(opts):
+         ruri = "%s://%s:%s/" % (opts['rprotocol'], opts['rhost'], opts['rport'])
+     replica = SimpleLDAPObject(ruri)
+ 
++    # Set timeouts
++    master.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
++    master.set_option(ldap.OPT_TIMEOUT,5.0)
++    replica.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
++    replica.set_option(ldap.OPT_TIMEOUT,5.0)
++
+     # Setup Secure Conenction
+     if opts['certdir'] is not None:
+         # Setup Master
+@@ -620,7 +803,7 @@ def connect_to_replicas(opts):
+                 try:
+                     master.start_tls_s()
+                 except ldap.LDAPError as e:
+-                    print('TLS negotiation failed on Master: %s' % str(e))
++                    print('TLS negotiation failed on Master: {}'.format(str(e)))
+                     exit(1)
+ 
+         # Setup Replica
+@@ -632,7 +815,7 @@ def connect_to_replicas(opts):
+                 try:
+                     replica.start_tls_s()
+                 except ldap.LDAPError as e:
+-                    print('TLS negotiation failed on Master: %s' % str(e))
++                    print('TLS negotiation failed on Master: {}'.format(str(e)))
+                     exit(1)
+ 
+     # Open connection to master
+@@ -642,7 +825,8 @@ def connect_to_replicas(opts):
+         print("Cannot connect to %r" % muri)
+         exit(1)
+     except ldap.LDAPError as e:
+-        print("Error: Failed to authenticate to Master: %s", str(e))
++        print("Error: Failed to authenticate to Master: ({}).  "
++              "Please check your credentials and LDAP urls are correct.".format(str(e)))
+         exit(1)
+ 
+     # Open connection to replica
+@@ -652,7 +836,8 @@ def connect_to_replicas(opts):
+         print("Cannot connect to %r" % ruri)
+         exit(1)
+     except ldap.LDAPError as e:
+-        print("Error: Failed to authenticate to Replica: %s", str(e))
++        print("Error: Failed to authenticate to Replica: ({}).  "
++              "Please check your credentials and LDAP urls are correct.".format(str(e)))
+         exit(1)
+ 
+     # Get the RUVs
+@@ -665,7 +850,7 @@ def connect_to_replicas(opts):
+             print("Error: Master does not have an RUV entry")
+             exit(1)
+     except ldap.LDAPError as e:
+-        print("Error: Failed to get Master RUV entry: %s", str(e))
++        print("Error: Failed to get Master RUV entry: {}".format(str(e)))
+         exit(1)
+ 
+     print ("Gathering Replica's RUV...")
+@@ -678,7 +863,7 @@ def connect_to_replicas(opts):
+             exit(1)
+ 
+     except ldap.LDAPError as e:
+-        print("Error: Failed to get Replica RUV entry: %s", str(e))
++        print("Error: Failed to get Replica RUV entry: {}".format(str(e)))
+         exit(1)
+ 
+     return (master, replica, opts)
+@@ -687,6 +872,7 @@ def connect_to_replicas(opts):
+ def print_online_report(report, opts, output_file):
+     ''' Print the online report
+     '''
++
+     print ('Preparing final report...')
+     m_missing = len(report['m_missing'])
+     r_missing = len(report['r_missing'])
+@@ -711,22 +897,23 @@ def print_online_report(report, opts, output_file):
+         missing = True
+         final_report += ('\nMissing Entries\n')
+         final_report += ('=====================================================\n\n')
+-        if m_missing > 0:
+-            final_report += ('  Entries missing on Master:\n')
+-            for entry in report['m_missing']:
++
++        if r_missing > 0:
++            final_report += ('  Entries missing on Replica:\n')
++            for entry in report['r_missing']:
+                 if 'createtimestamp' in entry.data:
+-                    final_report += ('   - %s  (Created on Replica at: %s)\n' %
++                    final_report += ('   - %s  (Created on Master at: %s)\n' %
+                                      (entry.dn, convert_timestamp(entry.data['createtimestamp'][0])))
+                 else:
+                     final_report += ('   - %s\n' % (entry.dn))
+ 
+-        if r_missing > 0:
+-            if m_missing > 0:
++        if m_missing > 0:
++            if r_missing > 0:
+                 final_report += ('\n')
+-            final_report += ('  Entries missing on Replica:\n')
+-            for entry in report['r_missing']:
++            final_report += ('  Entries missing on Master:\n')
++            for entry in report['m_missing']:
+                 if 'createtimestamp' in entry.data:
+-                    final_report += ('   - %s  (Created on Master at: %s)\n' %
++                    final_report += ('   - %s  (Created on Replica at: %s)\n' %
+                                      (entry.dn, convert_timestamp(entry.data['createtimestamp'][0])))
+                 else:
+                     final_report += ('   - %s\n' % (entry.dn))
+@@ -751,7 +938,8 @@ def print_online_report(report, opts, output_file):
+ def remove_state_info(entry):
+     ''' Remove the state info for the attributes used in the conflict report
+     '''
+-    attrs = ['objectclass', 'nsds5replconflict', 'createtimestamp']
++    attrs = ['objectclass', 'nsds5replconflict', 'createtimestamp' , 'modifytimestamp']
++    # attrs = ['createtimestamp']
+     for key, val in list(entry.data.items()):
+         for attr in attrs:
+             if key.lower().startswith(attr):
+@@ -766,9 +954,6 @@ def get_conflict_report(mentries, rentries, verbose, format_conflicts=False):
+     r_conflicts = []
+ 
+     for entry in mentries:
+-        if format_conflicts:
+-            remove_state_info(entry)
+-
+         if 'glue' in entry.data['objectclass']:
+             m_conflicts.append({'dn': entry.dn, 'conflict': entry.data['nsds5replconflict'][0],
+                                 'date': entry.data['createtimestamp'][0], 'glue': 'yes'})
+@@ -776,9 +961,6 @@ def get_conflict_report(mentries, rentries, verbose, format_conflicts=False):
+             m_conflicts.append({'dn': entry.dn, 'conflict': entry.data['nsds5replconflict'][0],
+                                 'date': entry.data['createtimestamp'][0], 'glue': 'no'})
+     for entry in rentries:
+-        if format_conflicts:
+-            remove_state_info(entry)
+-
+         if 'glue' in entry.data['objectclass']:
+             r_conflicts.append({'dn': entry.dn, 'conflict': entry.data['nsds5replconflict'][0],
+                                 'date': entry.data['createtimestamp'][0], 'glue': 'yes'})
+@@ -790,7 +972,7 @@ def get_conflict_report(mentries, rentries, verbose, format_conflicts=False):
+         report = "\n\nConflict Entries\n"
+         report += "=====================================================\n\n"
+         if len(m_conflicts) > 0:
+-            report += ('Master Conflict Entries: %d\n' % (len(m_conflicts)))
++            report += ('Master Conflict Entries:  %d\n' % (len(m_conflicts)))
+             if verbose:
+                 for entry in m_conflicts:
+                     report += ('\n - %s\n' % (entry['dn']))
+@@ -799,7 +981,7 @@ def get_conflict_report(mentries, rentries, verbose, format_conflicts=False):
+                     report += ('    - Created:    %s\n' % (convert_timestamp(entry['date'])))
+ 
+         if len(r_conflicts) > 0:
+-            if len(m_conflicts) > 0:
++            if len(m_conflicts) > 0 and verbose:
+                 report += "\n"  # add spacer
+             report += ('Replica Conflict Entries: %d\n' % (len(r_conflicts)))
+             if verbose:
+@@ -814,46 +996,6 @@ def get_conflict_report(mentries, rentries, verbose, format_conflicts=False):
+         return ""
+ 
+ 
+-def get_tombstones(replica, opts):
+-    ''' Return the number of tombstones
+-    '''
+-    paged_ctrl = SimplePagedResultsControl(True, size=opts['pagesize'], cookie='')
+-    controls = [paged_ctrl]
+-    req_pr_ctrl = controls[0]
+-    count = 0
+-
+-    try:
+-        msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+-                                   '(&(objectclass=nstombstone)(nstombstonecsn=*))',
+-                                   ['dn'], serverctrls=controls)
+-    except ldap.LDAPError as e:
+-        print("Error: Failed to get tombstone entries: %s", str(e))
+-        exit(1)
+-
+-    done = False
+-    while not done:
+-        rtype, rdata, rmsgid, rctrls = replica.result3(msgid)
+-        count += len(rdata)
+-
+-        pctrls = [
+-                c
+-                for c in rctrls
+-                if c.controlType == SimplePagedResultsControl.controlType
+-                ]
+-        if pctrls:
+-            if pctrls[0].cookie:
+-                # Copy cookie from response control to request control
+-                req_pr_ctrl.cookie = pctrls[0].cookie
+-                msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+-                                           '(&(objectclass=nstombstone)(nstombstonecsn=*))',
+-                                           ['dn'], serverctrls=controls)
+-            else:
+-                done = True  # No more pages available
+-        else:
+-            done = True
+-    return count
+-
+-
+ def do_online_report(opts, output_file=None):
+     ''' Check for differences between two replicas
+     '''
+@@ -880,7 +1022,7 @@ def do_online_report(opts, output_file=None):
+     req_pr_ctrl = controls[0]
+     try:
+         master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+-                                         "(|(objectclass=*)(objectclass=ldapsubentry))",
++                                         "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
+                                          ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
+                                          serverctrls=controls)
+     except ldap.LDAPError as e:
+@@ -888,7 +1030,7 @@ def do_online_report(opts, output_file=None):
+         exit(1)
+     try:
+         replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+-                                           "(|(objectclass=*)(objectclass=ldapsubentry))",
++                                           "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
+                                            ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
+                                            serverctrls=controls)
+     except ldap.LDAPError as e:
+@@ -918,7 +1060,9 @@ def do_online_report(opts, output_file=None):
+         rconflicts += rresult['conflicts']
+ 
+         # Check for diffs
+-        report = check_for_diffs(mresult['entries'], rresult['entries'], report, opts)
++        report = check_for_diffs(mresult['entries'], mresult['glue'],
++                                 rresult['entries'], rresult['glue'],
++                                 report, opts)
+ 
+         if not m_done:
+             # Master
+@@ -933,7 +1077,7 @@ def do_online_report(opts, output_file=None):
+                     req_pr_ctrl.cookie = m_pctrls[0].cookie
+                     master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+                         "(|(objectclass=*)(objectclass=ldapsubentry))",
+-                        ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'], serverctrls=controls)
++                        ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+                 else:
+                     m_done = True  # No more pages available
+             else:
+@@ -953,7 +1097,7 @@ def do_online_report(opts, output_file=None):
+                     req_pr_ctrl.cookie = r_pctrls[0].cookie
+                     replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+                         "(|(objectclass=*)(objectclass=ldapsubentry))",
+-                        ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'], serverctrls=controls)
++                        ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+                 else:
+                     r_done = True  # No more pages available
+             else:
+@@ -961,10 +1105,8 @@ def do_online_report(opts, output_file=None):
+ 
+     # Get conflicts & tombstones
+     report['conflict'] = get_conflict_report(mconflicts, rconflicts, opts['conflicts'])
+-    report['mtombstones'] = get_tombstones(master, opts)
+-    report['rtombstones'] = get_tombstones(replica, opts)
+-    report['m_count'] += report['mtombstones']
+-    report['r_count'] += report['rtombstones']
++    report['mtombstones'] = mresult['tombstones']
++    report['rtombstones'] = rresult['tombstones']
+ 
+     # Do the final report
+     print_online_report(report, opts, output_file)
+@@ -1027,11 +1169,16 @@ def main():
+ 
+     # Parse the ldap URLs
+     if args.murl is not None and args.rurl is not None:
++        # Make sure the URLs are different
++        if args.murl == args.rurl:
++            print("Master and Replica LDAP URLs are the same, they must be different")
++            exit(1)
++
+         # Parse Master url
+-        murl = ldapurl.LDAPUrl(args.murl)
+         if not ldapurl.isLDAPUrl(args.murl):
+             print("Master LDAP URL is invalid")
+             exit(1)
++        murl = ldapurl.LDAPUrl(args.murl)
+         if murl.urlscheme in VALID_PROTOCOLS:
+             opts['mprotocol'] = murl.urlscheme
+         else:
+@@ -1052,10 +1199,10 @@ def main():
+             opts['mport'] = parts[1]
+ 
+         # Parse Replica url
+-        rurl = ldapurl.LDAPUrl(args.rurl)
+         if not ldapurl.isLDAPUrl(args.rurl):
+             print("Replica LDAP URL is invalid")
+             exit(1)
++        rurl = ldapurl.LDAPUrl(args.rurl)
+         if rurl.urlscheme in VALID_PROTOCOLS:
+             opts['rprotocol'] = rurl.urlscheme
+         else:
+@@ -1075,11 +1222,19 @@ def main():
+             opts['rhost'] = parts[0]
+             opts['rport'] = parts[1]
+ 
++    # Validate certdir
++    opts['certdir'] = None
++    if args.certdir:
++        if os.path.exists() and os.path.isdir(certdir):
++            opts['certdir'] = args.certdir
++        else:
++            print("certificate directory ({}) does not exist or is not a directory".format(args.certdir))
++            exit(1)
++
+     # Initialize the options
+     opts['binddn'] = args.binddn
+     opts['bindpw'] = args.bindpw
+     opts['suffix'] = args.suffix
+-    opts['certdir'] = args.certdir
+     opts['starttime'] = int(time.time())
+     opts['verbose'] = args.verbose
+     opts['mldif'] = args.mldif
+@@ -1109,6 +1264,18 @@ def main():
+ 
+     if opts['mldif'] is not None and opts['rldif'] is not None:
+         print ("Performing offline report...")
++
++        # Validate LDIF files, must exist and not be empty
++        for ldif_dir in [opts['mldif'], opts['rldif']]:
++            if not os.path.exists(ldif_dir):
++                print ("LDIF file ({}) does not exist".format(ldif_dir))
++                exit(1)
++            if os.path.getsize(ldif_dir) == 0:
++                print ("LDIF file ({}) is empty".format(ldif_dir))
++                exit(1)
++        if opts['mldif'] == opts['rldif']:
++            print("The Master and Replica LDIF files must be different")
++            exit(1)
+         do_offline_report(opts, OUTPUT_FILE)
+     else:
+         print ("Performing online report...")
+@@ -1118,5 +1285,6 @@ def main():
+         print('Finished writing report to "%s"' % (args.file))
+         OUTPUT_FILE.close()
+ 
++
+ if __name__ == '__main__':
+     main()
+-- 
+2.17.0
+
diff --git a/SOURCES/0085-Ticket-49576-Add-support-of-deletedattribute-in-ds-r.patch b/SOURCES/0085-Ticket-49576-Add-support-of-deletedattribute-in-ds-r.patch
new file mode 100644
index 0000000..8b4e655
--- /dev/null
+++ b/SOURCES/0085-Ticket-49576-Add-support-of-deletedattribute-in-ds-r.patch
@@ -0,0 +1,33 @@
+From 00ebe4e4298fb19d9b8fc78b16053fb0b92eea9f Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 25 May 2018 09:47:31 -0400
+Subject: [PATCH] Ticket 49576 - Add support of ";deletedattribute" in
+ ds-replcheck
+
+Description: Also need to check for ";deletedattribute" when processing LDIF file
+
+https://pagure.io/389-ds-base/issue/49576
+
+Reviewed by: tbordaz(Thanks!)
+
+(cherry picked from commit 9e046a35a0f771e77c788cddee2cbddee6ae0571)
+---
+ ldap/admin/src/scripts/ds-replcheck | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
+index b801ccaa8..661c9e0ce 100755
+--- a/ldap/admin/src/scripts/ds-replcheck
++++ b/ldap/admin/src/scripts/ds-replcheck
+@@ -197,7 +197,7 @@ def remove_attr_state_info(attr):
+     idx = attr.find(';')
+     if idx > 0:
+         state_attr = attr  # preserve state info for diff report
+-        if ";deleted" in attr:
++        if ";deleted" in attr or ";deletedattribute" in attr:
+             # Ignore this attribute it was deleted
+             return None, state_attr
+         attr = attr[:idx]
+-- 
+2.17.0
+
diff --git a/SOURCES/0086-Ticket-49726-DS-only-accepts-RSA-and-Fortezza-cipher.patch b/SOURCES/0086-Ticket-49726-DS-only-accepts-RSA-and-Fortezza-cipher.patch
new file mode 100644
index 0000000..9d14858
--- /dev/null
+++ b/SOURCES/0086-Ticket-49726-DS-only-accepts-RSA-and-Fortezza-cipher.patch
@@ -0,0 +1,529 @@
+From b6894f921a0635dba97a0745ce75917284e5e5ff Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Sun, 27 May 2018 10:48:55 -0400
+Subject: [PATCH] Ticket 49726 - DS only accepts RSA and Fortezza cipher
+ families
+
+Bug Description:  Currently DS only accepts fortezza and RSA cipher families.
+                  This prevents things like ECC certificates from being used.
+
+Fix Description:  Instead of hardcoding the cipher families, just grab the
+                  current type and use it.
+
+                  Also cleaned up code: removed unncessary "ifdefs", and switched
+                  for loops to use size_t.
+
+https://pagure.io/389-ds-base/issue/49726
+
+Reviewed by: ?
+
+(cherry picked from commit 27a16a068887e5b9fcab3b4507d58a18e6f1d1ec)
+---
+ ldap/servers/slapd/ssl.c | 136 ++++++---------------------------------
+ 1 file changed, 20 insertions(+), 116 deletions(-)
+
+diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
+index 36b09fd16..b8eba2da4 100644
+--- a/ldap/servers/slapd/ssl.c
++++ b/ldap/servers/slapd/ssl.c
+@@ -31,28 +31,11 @@
+ #include "fe.h"
+ #include "certdb.h"
+ 
+-#if !defined(USE_OPENLDAP)
+-#include "ldap_ssl.h"
+-#endif
+-
+ /* For IRIX... */
+ #ifndef MAXPATHLEN
+ #define MAXPATHLEN 1024
+ #endif
+ 
+-#if NSS_VMAJOR * 100 + NSS_VMINOR >= 315
+-/* TLS1.2 is defined in RFC5246. */
+-#define NSS_TLS12 1
+-#elif NSS_VMAJOR * 100 + NSS_VMINOR >= 314
+-/* TLS1.1 is defined in RFC4346. */
+-#define NSS_TLS11 1
+-#else
+-#define NSS_TLS10 1
+-#endif
+-
+-#if NSS_VMAJOR * 100 + NSS_VMINOR >= 320
+-#define HAVE_NSS_DHE 1
+-#endif
+ 
+ /******************************************************************************
+  * Default SSL Version Rule
+@@ -70,10 +53,9 @@
+ 
+ extern char *slapd_SSL3ciphers;
+ extern symbol_t supported_ciphers[];
+-#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
+ static SSLVersionRange enabledNSSVersions;
+ static SSLVersionRange slapdNSSVersions;
+-#endif
++
+ 
+ /* dongle_file_name is set in slapd_nss_init when we set the path for the
+    key, cert, and secmod files - the dongle file must be in the same directory
+@@ -109,12 +91,10 @@ static char *configDN = "cn=encryption,cn=config";
+ #define CIPHER_SET_DEFAULTWEAKCIPHER 0x10  /* allowWeakCipher is not set in cn=encryption */
+ #define CIPHER_SET_ALLOWWEAKCIPHER 0x20    /* allowWeakCipher is on */
+ #define CIPHER_SET_DISALLOWWEAKCIPHER 0x40 /* allowWeakCipher is off */
+-
+-#ifdef HAVE_NSS_DHE
+ #define CIPHER_SET_DEFAULTWEAKDHPARAM 0x100  /* allowWeakDhParam is not set in cn=encryption */
+ #define CIPHER_SET_ALLOWWEAKDHPARAM 0x200    /* allowWeakDhParam is on */
+ #define CIPHER_SET_DISALLOWWEAKDHPARAM 0x400 /* allowWeakDhParam is off */
+-#endif
++
+ 
+ #define CIPHER_SET_ISDEFAULT(flag) \
+     (((flag)&CIPHER_SET_DEFAULT) ? PR_TRUE : PR_FALSE)
+@@ -145,10 +125,7 @@ static char *configDN = "cn=encryption,cn=config";
+ #define CIPHER_IS_WEAK 0x4
+ #define CIPHER_IS_DEPRECATED 0x8
+ 
+-#ifdef HAVE_NSS_DHE
+ static int allowweakdhparam = CIPHER_SET_DEFAULTWEAKDHPARAM;
+-#endif
+-
+ 
+ static char **cipher_names = NULL;
+ static char **enabled_cipher_names = NULL;
+@@ -225,12 +202,10 @@ static lookup_cipher _lookup_cipher[] = {
+     /*{"tls_dhe_dss_1024_des_sha",          ""}, */
+     {"tls_dhe_dss_1024_rc4_sha", "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA"},
+     {"tls_dhe_dss_rc4_128_sha", "TLS_DHE_DSS_WITH_RC4_128_SHA"},
+-#if defined(NSS_TLS12)
+     /* New in NSS 3.15 */
+     {"tls_rsa_aes_128_gcm_sha", "TLS_RSA_WITH_AES_128_GCM_SHA256"},
+     {"tls_dhe_rsa_aes_128_gcm_sha", "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"},
+     {"tls_dhe_dss_aes_128_gcm_sha", NULL}, /* not available */
+-#endif
+     {NULL, NULL}};
+ 
+ /* E.g., "SSL3", "TLS1.2", "Unknown SSL version: 0x0" */
+@@ -317,7 +292,6 @@ getSupportedCiphers(void)
+     SSLCipherSuiteInfo info;
+     char *sep = "::";
+     int number_of_ciphers = SSL_NumImplementedCiphers;
+-    int i;
+     int idx = 0;
+     PRBool isFIPS = slapd_pk11_isFIPS();
+ 
+@@ -325,7 +299,7 @@ getSupportedCiphers(void)
+ 
+     if ((cipher_names == NULL) && (_conf_ciphers)) {
+         cipher_names = (char **)slapi_ch_calloc((number_of_ciphers + 1), sizeof(char *));
+-        for (i = 0; _conf_ciphers[i].name != NULL; i++) {
++        for (size_t i = 0; _conf_ciphers[i].name != NULL; i++) {
+             SSL_GetCipherSuiteInfo((PRUint16)_conf_ciphers[i].num, &info, sizeof(info));
+             /* only support FIPS approved ciphers in FIPS mode */
+             if (!isFIPS || info.isFIPS) {
+@@ -341,7 +315,6 @@ getSupportedCiphers(void)
+     return cipher_names;
+ }
+ 
+-#ifdef HAVE_NSS_DHE
+ int
+ get_allow_weak_dh_param(Slapi_Entry *e)
+ {
+@@ -365,7 +338,6 @@ get_allow_weak_dh_param(Slapi_Entry *e)
+     slapi_ch_free((void **)&val);
+     return allow;
+ }
+-#endif
+ 
+ 
+ char **
+@@ -374,7 +346,6 @@ getEnabledCiphers(void)
+     SSLCipherSuiteInfo info;
+     char *sep = "::";
+     int number_of_ciphers = 0;
+-    int x;
+     int idx = 0;
+     PRBool enabled;
+ 
+@@ -383,14 +354,14 @@ getEnabledCiphers(void)
+         return NULL;
+     }
+     if ((enabled_cipher_names == NULL) && _conf_ciphers) {
+-        for (x = 0; _conf_ciphers[x].name; x++) {
++        for (size_t x = 0; _conf_ciphers[x].name; x++) {
+             SSL_CipherPrefGetDefault(_conf_ciphers[x].num, &enabled);
+             if (enabled) {
+                 number_of_ciphers++;
+             }
+         }
+         enabled_cipher_names = (char **)slapi_ch_calloc((number_of_ciphers + 1), sizeof(char *));
+-        for (x = 0; _conf_ciphers[x].name; x++) {
++        for (size_t x = 0; _conf_ciphers[x].name; x++) {
+             SSL_CipherPrefGetDefault(_conf_ciphers[x].num, &enabled);
+             if (enabled) {
+                 SSL_GetCipherSuiteInfo((PRUint16)_conf_ciphers[x].num, &info, sizeof(info));
+@@ -472,9 +443,6 @@ getSSLVersionRange(char **min, char **max)
+         }
+         return -1;
+     }
+-#if defined(NSS_TLS10)
+-    return -1; /* not supported */
+-#else          /* NSS_TLS11 or newer */
+     if (min) {
+         *min = slapi_getSSLVersion_str(slapdNSSVersions.min, NULL, 0);
+     }
+@@ -482,10 +450,8 @@ getSSLVersionRange(char **min, char **max)
+         *max = slapi_getSSLVersion_str(slapdNSSVersions.max, NULL, 0);
+     }
+     return 0;
+-#endif
+ }
+ 
+-#if defined(USE_OPENLDAP)
+ void
+ getSSLVersionRangeOL(int *min, int *max)
+ {
+@@ -499,10 +465,7 @@ getSSLVersionRangeOL(int *min, int *max)
+     if (!slapd_ssl_listener_is_initialized()) {
+         return;
+     }
+-#if defined(NSS_TLS10)
+-    *max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_0;
+-    return;
+-#else /* NSS_TLS11 or newer */
++
+     if (min) {
+         switch (slapdNSSVersions.min) {
+         case SSL_LIBRARY_VERSION_3_0:
+@@ -550,14 +513,11 @@ getSSLVersionRangeOL(int *min, int *max)
+         }
+     }
+     return;
+-#endif
+ }
+-#endif /* USE_OPENLDAP */
+ 
+ static void
+ _conf_init_ciphers(void)
+ {
+-    int x;
+     SECStatus rc;
+     SSLCipherSuiteInfo info;
+     const PRUint16 *implementedCiphers = SSL_GetImplementedCiphers();
+@@ -568,7 +528,7 @@ _conf_init_ciphers(void)
+     }
+     _conf_ciphers = (cipherstruct *)slapi_ch_calloc(SSL_NumImplementedCiphers + 1, sizeof(cipherstruct));
+ 
+-    for (x = 0; implementedCiphers && (x < SSL_NumImplementedCiphers); x++) {
++    for (size_t x = 0; implementedCiphers && (x < SSL_NumImplementedCiphers); x++) {
+         rc = SSL_GetCipherSuiteInfo(implementedCiphers[x], &info, sizeof info);
+         if (SECFailure == rc) {
+             slapi_log_err(SLAPI_LOG_ERR, "Security Initialization",
+@@ -598,7 +558,6 @@ _conf_init_ciphers(void)
+ static void
+ _conf_setallciphers(int flag, char ***suplist, char ***unsuplist)
+ {
+-    int x;
+     SECStatus rc;
+     PRBool setdefault = CIPHER_SET_ISDEFAULT(flag);
+     PRBool enabled = CIPHER_SET_ISALL(flag);
+@@ -608,7 +567,7 @@ _conf_setallciphers(int flag, char ***suplist, char ***unsuplist)
+ 
+     _conf_init_ciphers();
+ 
+-    for (x = 0; implementedCiphers && (x < SSL_NumImplementedCiphers); x++) {
++    for (size_t x = 0; implementedCiphers && (x < SSL_NumImplementedCiphers); x++) {
+         if (_conf_ciphers[x].flags & CIPHER_IS_DEFAULT) {
+             /* certainly, not the first time. */
+             setme = PR_TRUE;
+@@ -663,11 +622,10 @@ charray2str(char **ary, const char *delim)
+ void
+ _conf_dumpciphers(void)
+ {
+-    int x;
+     PRBool enabled;
+     /* {"SSL3","rc4", SSL_EN_RC4_128_WITH_MD5}, */
+     slapd_SSL_info("Configured NSS Ciphers");
+-    for (x = 0; _conf_ciphers[x].name; x++) {
++    for (size_t x = 0; _conf_ciphers[x].name; x++) {
+         SSL_CipherPrefGetDefault(_conf_ciphers[x].num, &enabled);
+         if (enabled) {
+             slapd_SSL_info("\t%s: enabled%s%s%s", _conf_ciphers[x].name,
+@@ -687,7 +645,8 @@ char *
+ _conf_setciphers(char *setciphers, int flags)
+ {
+     char *t, err[MAGNUS_ERROR_LEN];
+-    int x, i, active;
++    int active;
++    size_t x = 0;
+     char *raw = setciphers;
+     char **suplist = NULL;
+     char **unsuplist = NULL;
+@@ -772,7 +731,7 @@ _conf_setciphers(char *setciphers, int flags)
+                 }
+             }
+             if (lookup) { /* lookup with old cipher name and get NSS cipherSuiteName */
+-                for (i = 0; _lookup_cipher[i].alias; i++) {
++                for (size_t i = 0; _lookup_cipher[i].alias; i++) {
+                     if (!PL_strcasecmp(setciphers, _lookup_cipher[i].alias)) {
+                         if (enabled && !_lookup_cipher[i].name[0]) {
+                             slapd_SSL_warn("Cipher suite %s is not available in NSS %d.%d.  Ignoring %s",
+@@ -915,9 +874,8 @@ getChildren(char *dn)
+         slapi_pblock_get(new_pb, SLAPI_PLUGIN_INTOP_RESULT, &search_result);
+         slapi_pblock_get(new_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &e);
+         if (e != NULL) {
+-            int i;
+             list = (char **)slapi_ch_malloc(sizeof(*list) * (nEntries + 1));
+-            for (i = 0; e[i] != NULL; i++) {
++            for (size_t i = 0; e[i] != NULL; i++) {
+                 list[i] = slapi_ch_strdup(slapi_entry_get_dn(e[i]));
+             }
+             list[nEntries] = NULL;
+@@ -935,8 +893,7 @@ static void
+ freeChildren(char **list)
+ {
+     if (list != NULL) {
+-        int i;
+-        for (i = 0; list[i] != NULL; i++) {
++        for (size_t i = 0; list[i] != NULL; i++) {
+             slapi_ch_free((void **)(&list[i]));
+         }
+         slapi_ch_free((void **)(&list));
+@@ -1017,7 +974,6 @@ warn_if_no_key_file(const char *dir, int no_log)
+     return ret;
+ }
+ 
+-#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
+ /*
+  * If non NULL buf and positive bufsize is given,
+  * the memory is used to store the version string.
+@@ -1183,7 +1139,6 @@ restrict_SSLVersionRange(void)
+         }
+     }
+ }
+-#endif
+ 
+ /*
+  * slapd_nss_init() is always called from main(), even if we do not
+@@ -1206,7 +1161,6 @@ slapd_nss_init(int init_ssl __attribute__((unused)), int config_available __attr
+     int create_certdb = 0;
+     PRUint32 nssFlags = 0;
+     char *certdir;
+-#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
+     char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH];
+     /* Get the range of the supported SSL version */
+     SSL_VersionRangeGetSupported(ssl_variant_stream, &enabledNSSVersions);
+@@ -1216,7 +1170,6 @@ slapd_nss_init(int init_ssl __attribute__((unused)), int config_available __attr
+     slapi_log_err(SLAPI_LOG_CONFIG, "Security Initialization",
+                   "slapd_nss_init - Supported range by NSS: min: %s, max: %s\n",
+                   emin, emax);
+-#endif
+ 
+     /* set in slapd_bootstrap_config,
+        thus certdir is available even if config_available is false */
+@@ -1385,9 +1338,7 @@ slapd_ssl_init()
+     char *val = NULL;
+     PK11SlotInfo *slot;
+     Slapi_Entry *entry = NULL;
+-#ifdef HAVE_NSS_DHE
+     SECStatus rv = SECFailure;
+-#endif
+ 
+     /* Get general information */
+ 
+@@ -1396,7 +1347,6 @@ slapd_ssl_init()
+     val = slapi_entry_attr_get_charptr(entry, "nssslSessionTimeout");
+     ciphers = slapi_entry_attr_get_charptr(entry, "nsssl3ciphers");
+ 
+-#ifdef HAVE_NSS_DHE
+     allowweakdhparam = get_allow_weak_dh_param(entry);
+     if (allowweakdhparam & CIPHER_SET_ALLOWWEAKDHPARAM) {
+         slapd_SSL_warn("notice, generating new WEAK DH param");
+@@ -1405,7 +1355,6 @@ slapd_ssl_init()
+             slapd_SSL_error("Warning, unable to generate weak dh parameters");
+         }
+     }
+-#endif
+ 
+     /* We are currently using the value of sslSessionTimeout
+        for ssl3SessionTimeout, see SSL_ConfigServerSessionIDCache() */
+@@ -1527,7 +1476,6 @@ slapd_ssl_init()
+     return 0;
+ }
+ 
+-#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
+ /*
+  * val:   sslVersionMin/Max value set in cn=encription,cn=config (INPUT)
+  * rval:  Corresponding value to set SSLVersionRange (OUTPUT)
+@@ -1541,7 +1489,7 @@ static int
+ set_NSS_version(char *val, PRUint16 *rval, int ismin)
+ {
+     char *vp, *endp;
+-    int vnum;
++    int64_t vnum;
+     char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH];
+ 
+     if (NULL == rval) {
+@@ -1662,7 +1610,6 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin)
+                 }
+             }
+         } else if (tlsv < 1.3) { /* TLS1.2 */
+-#if defined(NSS_TLS12)
+             if (ismin) {
+                 if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_2) {
+                     slapd_SSL_warn("The value of sslVersionMin "
+@@ -1685,7 +1632,6 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin)
+                     (*rval) = SSL_LIBRARY_VERSION_TLS_1_2;
+                 }
+             }
+-#endif
+         } else { /* Specified TLS is newer than supported */
+             if (ismin) {
+                 slapd_SSL_warn("The value of sslVersionMin "
+@@ -1720,7 +1666,6 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin)
+ #undef SSLLEN
+ #undef TLSSTR
+ #undef TLSLEN
+-#endif
+ 
+ int
+ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+@@ -1740,12 +1685,10 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+     char *tmpDir;
+     Slapi_Entry *e = NULL;
+     PRBool fipsMode = PR_FALSE;
+-#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
+     PRUint16 NSSVersionMin = enabledNSSVersions.min;
+     PRUint16 NSSVersionMax = enabledNSSVersions.max;
+     char mymin[VERSION_STR_LENGTH], mymax[VERSION_STR_LENGTH];
+     char newmax[VERSION_STR_LENGTH];
+-#endif
+     char cipher_string[1024];
+     int allowweakcipher = CIPHER_SET_DEFAULTWEAKCIPHER;
+     int_fast16_t renegotiation = (int_fast16_t)SSL_RENEGOTIATE_REQUIRES_XTN;
+@@ -1964,15 +1907,13 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+                 }
+ 
+                 if (SECSuccess == rv) {
++                    SSLKEAType certKEA;
+ 
+-#ifdef HAVE_NSS_DHE
+-                    /* Step If we want weak dh params, flag it on the socket now! */
+-
++                    /* If we want weak dh params, flag it on the socket now! */
+                     rv = SSL_OptionSet(*fd, SSL_ENABLE_SERVER_DHE, PR_TRUE);
+                     if (rv != SECSuccess) {
+                         slapd_SSL_warn("Warning, unable to start DHE");
+                     }
+-
+                     if (allowweakdhparam & CIPHER_SET_ALLOWWEAKDHPARAM) {
+                         slapd_SSL_warn("notice, allowing weak parameters on socket.");
+                         rv = SSL_EnableWeakDHEPrimeGroup(*fd, PR_TRUE);
+@@ -1980,13 +1921,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+                             slapd_SSL_warn("Warning, unable to allow weak DH params on socket.");
+                         }
+                     }
+-#endif
+ 
+-                    if (slapd_pk11_fortezzaHasKEA(cert) == PR_TRUE) {
+-                        rv = SSL_ConfigSecureServer(*fd, cert, key, kt_fortezza);
+-                    } else {
+-                        rv = SSL_ConfigSecureServer(*fd, cert, key, kt_rsa);
+-                    }
++                    certKEA = NSS_FindCertKEAType(cert);
++                    rv = SSL_ConfigSecureServer(*fd, cert, key, certKEA);
+                     if (SECSuccess != rv) {
+                         errorCode = PR_GetError();
+                         slapd_SSL_warn("ConfigSecureServer: "
+@@ -2140,7 +2077,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+             enableTLS1 = PR_TRUE; /* If available, enable TLS1 */
+         }
+         slapi_ch_free_string(&val);
+-#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
+         val = slapi_entry_attr_get_charptr(e, "sslVersionMin");
+         if (val) {
+             (void)set_NSS_version(val, &NSSVersionMin, 1);
+@@ -2161,9 +2097,8 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+                            mymax, newmax);
+             NSSVersionMax = enabledNSSVersions.max;
+         }
+-#endif
+     }
+-#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
++
+     if (NSSVersionMin > 0) {
+         /* Use new NSS API SSL_VersionRangeSet (NSS3.14 or newer) */
+         slapdNSSVersions.min = NSSVersionMin;
+@@ -2183,7 +2118,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+                             mymin, mymax);
+         }
+     } else {
+-#endif
+         /* deprecated code */
+         sslStatus = SSL_OptionSet(pr_sock, SSL_ENABLE_SSL3, enableSSL3);
+         if (sslStatus != SECSuccess) {
+@@ -2202,9 +2136,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
+                            enableTLS1 ? "enable" : "disable",
+                            errorCode, slapd_pr_strerror(errorCode));
+         }
+-#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
+     }
+-#endif
+ 
+     val = NULL;
+     if (e != NULL) {
+@@ -2382,12 +2314,8 @@ slapd_SSL_client_auth(LDAP *ld)
+                              */
+                             token = slapi_ch_strdup(internalTokenName);
+                         }
+-#if defined(USE_OPENLDAP)
+                         /* openldap needs tokenname:certnick */
+                         PR_snprintf(cert_name, sizeof(cert_name), "%s:%s", token, personality);
+-#else
+-                        PL_strncpyz(cert_name, personality, sizeof(cert_name));
+-#endif
+                         slapi_ch_free_string(&ssltoken);
+                     } else {
+                         /* external PKCS #11 token - attach token name */
+@@ -2461,7 +2389,6 @@ slapd_SSL_client_auth(LDAP *ld)
+                            "(no password). (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)",
+                            errorCode, slapd_pr_strerror(errorCode));
+         } else {
+-#if defined(USE_OPENLDAP)
+             if (slapi_client_uses_non_nss(ld)  && config_get_extract_pem()) {
+                 char *certdir = config_get_certdir();
+                 char *keyfile = NULL;
+@@ -2532,29 +2459,6 @@ slapd_SSL_client_auth(LDAP *ld)
+                                    cert_name);
+                 }
+             }
+-/*
+-             * not sure what else needs to be done for client auth - don't
+-             * currently have a way to pass in the password to use to unlock
+-             * the keydb - nor a way to disable caching
+-             */
+-#else /* !USE_OPENLDAP */
+-            rc = ldapssl_enable_clientauth(ld, SERVER_KEY_NAME, pw, cert_name);
+-            if (rc != 0) {
+-                errorCode = PR_GetError();
+-                slapd_SSL_error("ldapssl_enable_clientauth(%s, %s) %i (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)",
+-                                SERVER_KEY_NAME, cert_name, rc,
+-                                errorCode, slapd_pr_strerror(errorCode));
+-            } else {
+-                /*
+-                 * We cannot allow NSS to cache outgoing client auth connections -
+-                 * each client auth connection must have it's own non-shared SSL
+-                 * connection to the peer so that it will go through the
+-                 * entire handshake protocol every time including the use of its
+-                 * own unique client cert - see bug 605457
+-                 */
+-                ldapssl_set_option(ld, SSL_NO_CACHE, PR_TRUE);
+-            }
+-#endif
+         }
+     }
+ 
+-- 
+2.17.0
+
diff --git a/SOURCES/0087-Ticket-48184-clean-up-and-delete-connections-at-shut.patch b/SOURCES/0087-Ticket-48184-clean-up-and-delete-connections-at-shut.patch
new file mode 100644
index 0000000..b49c92e
--- /dev/null
+++ b/SOURCES/0087-Ticket-48184-clean-up-and-delete-connections-at-shut.patch
@@ -0,0 +1,190 @@
+From 240cfa58c62571b92640a385cfcce6d858cb00dc Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Wed, 30 May 2018 15:48:11 +0200
+Subject: [PATCH] Ticket 48184 - clean up and delete connections at shutdown
+ (3rd)
+
+Bug description:
+        During shutdown we would not close connections.
+        In the past this may have just been an annoyance, but now with the way
+        nunc-stans works, io events can still trigger on open xeisting connectinos
+        during shutdown.
+
+Fix Description:
+        Because of NS dynamic it can happen that several jobs wants to work on the
+        same connection. In such case (a job is already set in c_job) we delay the
+        new job that will retry.
+        In addition:
+            - some call needed c_mutex
+            - test uninitialized nunc-stans in case of shutdown while startup is not completed
+
+	If it is not possible to schedule immediately a job it is sometime useless to wait:
+		- if the connection is already freed, just cancel the scheduled job
+		  and do not register a new one
+		- If we are in middle of a shutdown we do not know if the
+		  scheduled job is ns_handle_closure, so cancel the scheduled
+		  job and schedule ns_handle_closure.
+
+https://pagure.io/389-ds-base/issue/48184
+
+Reviewed by:
+            Original fix reviewed by Ludwig and Viktor
+            Second   fix reviewed by Mark
+	    Third    fix reviewed by Mark
+
+Platforms tested: F26
+
+Flag Day: no
+
+Doc impact: no
+---
+ ldap/servers/slapd/connection.c | 10 +++--
+ ldap/servers/slapd/conntable.c  |  2 +-
+ ldap/servers/slapd/daemon.c     | 67 +++++++++++++++++++++++++--------
+ ldap/servers/slapd/proto-slap.h |  2 +-
+ 4 files changed, 60 insertions(+), 21 deletions(-)
+
+diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
+index 76e83112b..c54e7c26c 100644
+--- a/ldap/servers/slapd/connection.c
++++ b/ldap/servers/slapd/connection.c
+@@ -741,14 +741,18 @@ connection_acquire_nolock(Connection *conn)
+ 
+ /* returns non-0 if connection can be reused and 0 otherwise */
+ int
+-connection_is_free(Connection *conn)
++connection_is_free(Connection *conn, int use_lock)
+ {
+     int rc;
+ 
+-    PR_EnterMonitor(conn->c_mutex);
++    if (use_lock) {
++        PR_EnterMonitor(conn->c_mutex);
++    }
+     rc = conn->c_sd == SLAPD_INVALID_SOCKET && conn->c_refcnt == 0 &&
+          !(conn->c_flags & CONN_FLAG_CLOSING);
+-    PR_ExitMonitor(conn->c_mutex);
++    if (use_lock) {
++        PR_ExitMonitor(conn->c_mutex);
++    }
+ 
+     return rc;
+ }
+diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
+index f2f763dfa..114871d17 100644
+--- a/ldap/servers/slapd/conntable.c
++++ b/ldap/servers/slapd/conntable.c
+@@ -129,7 +129,7 @@ connection_table_get_connection(Connection_Table *ct, int sd)
+             break;
+         }
+ 
+-        if (connection_is_free(&(ct->c[index]))) {
++        if (connection_is_free(&(ct->c[index]), 1 /*use lock */)) {
+             break;
+         }
+     }
+diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
+index 50e67474e..35cfe7de0 100644
+--- a/ldap/servers/slapd/daemon.c
++++ b/ldap/servers/slapd/daemon.c
+@@ -1699,7 +1699,8 @@ ns_connection_post_io_or_closing_try(Connection *conn)
+     }
+ 
+     /*
+-     * Cancel any existing ns jobs we have registered.
++     * A job was already scheduled.
++     * Let it be dispatched first
+      */
+     if (conn->c_job != NULL) {
+         return 1;
+@@ -1780,25 +1781,59 @@ ns_connection_post_io_or_closing_try(Connection *conn)
+     }
+     return 0;
+ }
++
++/*
++ * Tries to schedule I/O for this connection
++ * If the connection is already busy with a scheduled I/O
++ * it can wait until scheduled I/O is dispatched
++ *
++ * caller must hold c_mutex
++ */
+ void
+ ns_connection_post_io_or_closing(Connection *conn)
+ {
+     while (ns_connection_post_io_or_closing_try(conn)) {
+-	/* we should retry later */
+-	
+-	/* We are not suppose to work immediately on the connection that is taken by
+-	 * another job
+-	 * release the lock and give some time
+-	 */
+-	
+-	if (CONN_NEEDS_CLOSING(conn) && conn->c_ns_close_jobs) {
+-	    return;
+-	} else {
+-	    PR_ExitMonitor(conn->c_mutex);
+-	    DS_Sleep(PR_MillisecondsToInterval(100));
+-
+-	    PR_EnterMonitor(conn->c_mutex);
+-	}
++        /* Here a job is currently scheduled (c->job is set) and not yet dispatched
++         * Job can be either:
++         *  - ns_handle_closure
++         *  - ns_handle_pr_read_ready
++         */
++
++        if (connection_is_free(conn, 0)) {
++            PRStatus shutdown_status;
++
++            /* The connection being freed,
++             * It means that ns_handle_closure already completed and the connection
++             * is no longer on the active list.
++             * The scheduled job is useless and scheduling a new one as well
++             */
++            shutdown_status = ns_job_done(conn->c_job);
++            if (shutdown_status != PR_SUCCESS) {
++                slapi_log_err(SLAPI_LOG_CRIT, "ns_connection_post_io_or_closing", "Failed cancel a job on a freed connection %d !\n", conn->c_sd);
++            }
++            conn->c_job = NULL;
++            return;
++        }
++        if (g_get_shutdown() && CONN_NEEDS_CLOSING(conn)) {
++            PRStatus shutdown_status;
++
++            /* This is shutting down cancel any scheduled job to register ns_handle_closure
++             */
++            shutdown_status = ns_job_done(conn->c_job);
++            if (shutdown_status != PR_SUCCESS) {
++                slapi_log_err(SLAPI_LOG_CRIT, "ns_connection_post_io_or_closing", "Failed to cancel a job during shutdown %d !\n", conn->c_sd);
++            }
++            conn->c_job = NULL;
++            continue;
++        }
++
++        /* We are not suppose to work immediately on the connection that is taken by
++         * another job
++         * release the lock and give some time
++         */
++        PR_ExitMonitor(conn->c_mutex);
++        DS_Sleep(PR_MillisecondsToInterval(100));
++        PR_EnterMonitor(conn->c_mutex);
+     }
+ }
+ 
+diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
+index b13334ad1..f54bc6bc5 100644
+--- a/ldap/servers/slapd/proto-slap.h
++++ b/ldap/servers/slapd/proto-slap.h
+@@ -1431,7 +1431,7 @@ int connection_acquire_nolock(Connection *conn);
+ int connection_acquire_nolock_ext(Connection *conn, int allow_when_closing);
+ int connection_release_nolock(Connection *conn);
+ int connection_release_nolock_ext(Connection *conn, int release_only);
+-int connection_is_free(Connection *conn);
++int connection_is_free(Connection *conn, int user_lock);
+ int connection_is_active_nolock(Connection *conn);
+ #if defined(USE_OPENLDAP)
+ ber_slen_t openldap_read_function(Sockbuf_IO_Desc *sbiod, void *buf, ber_len_t len);
+-- 
+2.17.0
+
diff --git a/SOURCES/0088-Ticket-49736-Hardening-of-active-connection-list.patch b/SOURCES/0088-Ticket-49736-Hardening-of-active-connection-list.patch
new file mode 100644
index 0000000..1a6143d
--- /dev/null
+++ b/SOURCES/0088-Ticket-49736-Hardening-of-active-connection-list.patch
@@ -0,0 +1,76 @@
+From 1f3e1ad55f72a885e27db41be28ce1037ff0ce93 Mon Sep 17 00:00:00 2001
+From: Thierry Bordaz <tbordaz@redhat.com>
+Date: Fri, 1 Jun 2018 16:12:40 +0200
+Subject: [PATCH] Ticket 49736 - Hardening of active connection list
+
+Bug Description:
+	In case of a bug in the management of the connection refcnt
+	it can happen that there are several attempts to move a connection
+	out of the active list.
+
+	It triggers a crash because when derefencing c->c_prev.
+	c_prev is never NULL on the active list
+
+Fix Description:
+	The fix tests if the connection is already out of the active list.
+	If such case, it just returns.
+
+	A potential issue that is not addressed by this fix is:
+	Thread A and Thread B are using 'c' but c->refcnt=1 (it should be 2)
+	Thread A "closes" 'c', 'c' is move out of active list (free) because of refcnt=0
+	A new connection happens selecting the free connection 'c', moving it to the active list.
+	Thread C is using 'c' from the new connection c->refcnt=1
+	Thread B "closes" 'c', 'c' is moved out of the active list.
+	-> new operation coming on 'c' will not be detected
+	-> Thread C will likely crash when sending result
+
+https://pagure.io/389-ds-base/issue/49736
+
+Reviewed by: Mark Reynolds (thanks!)
+
+Platforms tested: F26
+
+Flag Day: no
+
+Doc impact: no
+
+(cherry picked from commit b0e05806232b781eed3ff102485045a358d7659b)
+---
+ ldap/servers/slapd/conntable.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
+index 114871d17..cb68a1119 100644
+--- a/ldap/servers/slapd/conntable.c
++++ b/ldap/servers/slapd/conntable.c
+@@ -243,6 +243,27 @@ connection_table_move_connection_out_of_active_list(Connection_Table *ct, Connec
+     int c_sd; /* for logging */
+     /* we always have previous element because list contains a dummy header */;
+     PR_ASSERT(c->c_prev);
++    if (c->c_prev == NULL) {
++        /* c->c_prev is set when the connection is moved ON the active list
++         * So this connection is already OUT of the active list
++         *
++         * Not sure how to recover from here.
++         * Considering c->c_prev is NULL we can assume refcnt is now 0
++         * and connection_cleanup was already called.
++         * If it is not the case, then consequences are:
++         *  - Leak some memory (connext, unsent page result entries, various buffers)
++         *  - hanging connection (fd not closed)
++         * A option would be to call connection_cleanup here.
++         *
++         * The logged message helps to know how frequently the problem exists
++         */
++        slapi_log_err(SLAPI_LOG_CRIT,
++                      "connection_table_move_connection_out_of_active_list",
++                      "conn %d is already OUT of the active list (refcnt is %d)\n",
++                      c->c_sd, c->c_refcnt);
++
++        return 0;
++    }
+ 
+ #ifdef FOR_DEBUGGING
+     slapi_log_err(SLAPI_LOG_DEBUG, "connection_table_move_connection_out_of_active_list", "Moving connection out of active list\n");
+-- 
+2.17.0
+
diff --git a/SOURCES/0089-Ticket-49652-DENY-aci-s-are-not-handled-properly.patch b/SOURCES/0089-Ticket-49652-DENY-aci-s-are-not-handled-properly.patch
new file mode 100644
index 0000000..e0a8c9b
--- /dev/null
+++ b/SOURCES/0089-Ticket-49652-DENY-aci-s-are-not-handled-properly.patch
@@ -0,0 +1,285 @@
+From 5b7d67bdef7810c661ae4ba1fdfa620c86985661 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 27 Apr 2018 08:34:51 -0400
+Subject: [PATCH] Ticket 49652 - DENY aci's are not handled properly
+
+Bug Description:  There are really two issues here.  One, when a resource
+                  is denied by a DENY aci the cached results for that resource
+                  are not proprely set, and on the same connection if the same
+                  operation repeated it will be allowed instead of denied because
+                  the cache result was not proprely updated.
+
+                  Two, if there are no ALLOW aci's on a resource, then we don't
+                  check the deny rules, and resources that are restricted are
+                  returned to the client.
+
+Fix Description:  For issue one, when an entry is denied access reset all the
+                  attributes' cache results to DENIED as it's possible previously
+                  evaluated aci's granted access to some of these attributes which
+                  are still present in the acl result cache.
+
+                  For issue two, if there are no ALLOW aci's on a resource but
+                  there are DENY aci's, then set the aclpb state flags to
+                  process DENY aci's
+
+https://pagure.io/389-ds-base/issue/49652
+
+Reviewed by: tbordaz & lkrispenz(Thanks!!)
+
+(cherry picked from commit d77c7f0754f67022b42784c05be8a493a00f2ec5)
+---
+ dirsrvtests/tests/suites/acl/acl_deny_test.py | 198 ++++++++++++++++++
+ ldap/servers/plugins/acl/acl.c                |  24 ++-
+ 2 files changed, 220 insertions(+), 2 deletions(-)
+ create mode 100644 dirsrvtests/tests/suites/acl/acl_deny_test.py
+
+diff --git a/dirsrvtests/tests/suites/acl/acl_deny_test.py b/dirsrvtests/tests/suites/acl/acl_deny_test.py
+new file mode 100644
+index 000000000..285664150
+--- /dev/null
++++ b/dirsrvtests/tests/suites/acl/acl_deny_test.py
+@@ -0,0 +1,198 @@
++import logging
++import pytest
++import os
++import ldap
++import time
++from lib389._constants import *
++from lib389.topologies import topology_st as topo
++from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES
++from lib389.idm.domain import Domain
++
++DEBUGGING = os.getenv("DEBUGGING", default=False)
++if DEBUGGING:
++    logging.getLogger(__name__).setLevel(logging.DEBUG)
++else:
++    logging.getLogger(__name__).setLevel(logging.INFO)
++log = logging.getLogger(__name__)
++
++BIND_DN2 = 'uid=tuser,ou=People,dc=example,dc=com'
++BIND_RDN2 = 'tuser'
++BIND_DN = 'uid=tuser1,ou=People,dc=example,dc=com'
++BIND_RDN = 'tuser1'
++SRCH_FILTER = "uid=tuser1"
++SRCH_FILTER2 = "uid=tuser"
++
++aci_list_A = ['(targetattr != "userPassword") (version 3.0; acl "Anonymous access"; allow (read, search, compare)userdn = "ldap:///anyone";)',
++              '(targetattr = "*") (version 3.0;acl "allow tuser";allow (all)(userdn = "ldap:///uid=tuser5,ou=People,dc=example,dc=com");)',
++              '(targetattr != "uid || mail") (version 3.0; acl "deny-attrs"; deny (all) (userdn = "ldap:///anyone");)',
++              '(targetfilter = "(inetUserStatus=1)") ( version 3.0; acl "deny-specific-entry"; deny(all) (userdn = "ldap:///anyone");)']
++
++aci_list_B = ['(targetattr != "userPassword") (version 3.0; acl "Anonymous access"; allow (read, search, compare)userdn = "ldap:///anyone";)',
++              '(targetattr != "uid || mail") (version 3.0; acl "deny-attrs"; deny (all) (userdn = "ldap:///anyone");)',
++              '(targetfilter = "(inetUserStatus=1)") ( version 3.0; acl "deny-specific-entry"; deny(all) (userdn = "ldap:///anyone");)']
++
++
++@pytest.fixture(scope="module")
++def aci_setup(topo):
++    topo.standalone.log.info("Add {}".format(BIND_DN))
++    user = UserAccount(topo.standalone, BIND_DN)
++    user_props = TEST_USER_PROPERTIES.copy()
++    user_props.update({'sn': BIND_RDN,
++                       'cn': BIND_RDN,
++                       'uid': BIND_RDN,
++                       'inetUserStatus': '1',
++                       'objectclass': 'extensibleObject',
++                       'userpassword': PASSWORD})
++    user.create(properties=user_props, basedn=SUFFIX)
++
++    topo.standalone.log.info("Add {}".format(BIND_DN2))
++    user2 = UserAccount(topo.standalone, BIND_DN2)
++    user_props = TEST_USER_PROPERTIES.copy()
++    user_props.update({'sn': BIND_RDN2,
++                       'cn': BIND_RDN2,
++                       'uid': BIND_RDN2,
++                       'userpassword': PASSWORD})
++    user2.create(properties=user_props, basedn=SUFFIX)
++
++
++def test_multi_deny_aci(topo, aci_setup):
++    """Test that mutliple deny rules work, and that they the cache properly
++    stores the result
++
++    :id: 294c366d-850e-459e-b5a0-3cc828ec3aca
++    :setup: Standalone Instance
++    :steps:
++        1. Add aci_list_A aci's and verify two searches on the same connection
++           behave the same
++        2. Add aci_list_B aci's and verify search fails as expected
++    :expectedresults:
++        1. Both searches do not return any entries
++        2. Seaches do not return any entries
++    """
++
++    if DEBUGGING:
++        # Maybe add aci logging?
++        pass
++
++    suffix = Domain(topo.standalone, DEFAULT_SUFFIX)
++
++    for run in range(2):
++        topo.standalone.log.info("Pass " + str(run + 1))
++
++        # Test ACI List A
++        topo.standalone.log.info("Testing two searches behave the same...")
++        topo.standalone.simple_bind_s(DN_DM, PASSWORD)
++        suffix.set('aci', aci_list_A, ldap.MOD_REPLACE)
++        time.sleep(1)
++
++        topo.standalone.simple_bind_s(BIND_DN, PASSWORD)
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER)
++        if entries and entries[0]:
++            topo.standalone.log.fatal("Incorrectly got an entry returned from search 1")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER)
++        if entries and entries[0]:
++            topo.standalone.log.fatal("Incorrectly got an entry returned from search 2")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2)
++        if entries is None or len(entries) == 0:
++            topo.standalone.log.fatal("Failed to get entry as good user")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2)
++        if entries is None or len(entries) == 0:
++            topo.standalone.log.fatal("Failed to get entry as good user")
++            assert False
++
++        # Bind a different user who has rights
++        topo.standalone.simple_bind_s(BIND_DN2, PASSWORD)
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2)
++        if entries is None or len(entries) == 0:
++            topo.standalone.log.fatal("Failed to get entry as good user")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2)
++        if entries is None or len(entries) == 0:
++            topo.standalone.log.fatal("Failed to get entry as good user (2)")
++            assert False
++
++        if run > 0:
++            # Second pass
++            topo.standalone.restart()
++
++        # Reset ACI's and do the second test
++        topo.standalone.log.info("Testing search does not return any entries...")
++        topo.standalone.simple_bind_s(DN_DM, PASSWORD)
++        suffix.set('aci', aci_list_B, ldap.MOD_REPLACE)
++        time.sleep(1)
++
++        topo.standalone.simple_bind_s(BIND_DN, PASSWORD)
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER)
++        if entries and entries[0]:
++            topo.standalone.log.fatal("Incorrectly got an entry returned from search 1")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER)
++        if entries and entries[0]:
++            topo.standalone.log.fatal("Incorrectly got an entry returned from search 2")
++            assert False
++
++        if run > 0:
++            # Second pass
++            topo.standalone.restart()
++
++        # Bind as different user who has rights
++        topo.standalone.simple_bind_s(BIND_DN2, PASSWORD)
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2)
++        if entries is None or len(entries) == 0:
++            topo.standalone.log.fatal("Failed to get entry as good user")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2)
++        if entries is None or len(entries) == 0:
++            topo.standalone.log.fatal("Failed to get entry as good user (2)")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER)
++        if entries and entries[0]:
++            topo.standalone.log.fatal("Incorrectly got an entry returned from search 1")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER)
++        if entries and entries[0]:
++            topo.standalone.log.fatal("Incorrectly got an entry returned from search 2")
++            assert False
++
++        # back to user 1
++        topo.standalone.simple_bind_s(BIND_DN, PASSWORD)
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2)
++        if entries is None or len(entries) == 0:
++            topo.standalone.log.fatal("Failed to get entry as user1")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2)
++        if entries is None or len(entries) == 0:
++            topo.standalone.log.fatal("Failed to get entry as user1 (2)")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER)
++        if entries and entries[0]:
++            topo.standalone.log.fatal("Incorrectly got an entry returned from search 1")
++            assert False
++
++        entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER)
++        if entries and entries[0]:
++            topo.standalone.log.fatal("Incorrectly got an entry returned from search 2")
++            assert False
++
++    topo.standalone.log.info("Test PASSED")
++
++
++if __name__ == '__main__':
++    # Run isolated
++    # -s for DEBUG mode
++    CURRENT_FILE = os.path.realpath(__file__)
++    pytest.main(["-s", CURRENT_FILE])
++
+diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
+index bc154c78f..6d105f4fa 100644
+--- a/ldap/servers/plugins/acl/acl.c
++++ b/ldap/servers/plugins/acl/acl.c
+@@ -1088,9 +1088,23 @@ acl_read_access_allowed_on_entry(
+                     ** a DENY rule, then we don't have access to
+                     ** the entry ( nice trick to get in )
+                     */
+-                    if (aclpb->aclpb_state &
+-                        ACLPB_EXECUTING_DENY_HANDLES)
++                    if (aclpb->aclpb_state & ACLPB_EXECUTING_DENY_HANDLES) {
++                        aclEvalContext *c_ContextEval = &aclpb->aclpb_curr_entryEval_context;
++                        AclAttrEval *c_attrEval = NULL;
++                        /*
++                         * The entire entry is blocked, but previously evaluated allow aci's might
++                         * show some of the attributes as readable in the acl cache, so reset all
++                         * the cached attributes' status to FAIL.
++                         */
++                        for (size_t j = 0; j < c_ContextEval->acle_numof_attrs; j++) {
++                            c_attrEval = &c_ContextEval->acle_attrEval[j];
++                            c_attrEval->attrEval_r_status &= ~ACL_ATTREVAL_SUCCESS;
++                            c_attrEval->attrEval_r_status |= ACL_ATTREVAL_FAIL;
++                            c_attrEval->attrEval_s_status &= ~ACL_ATTREVAL_SUCCESS;
++                            c_attrEval->attrEval_s_status |= ACL_ATTREVAL_FAIL;
++                        }
+                         return LDAP_INSUFFICIENT_ACCESS;
++                    }
+ 
+                     /* The other case is I don't have an
+                     ** explicit allow rule -- which is fine.
+@@ -2908,6 +2922,12 @@ acl__TestRights(Acl_PBlock *aclpb, int access, const char **right, const char **
+         result_reason->deciding_aci = NULL;
+         result_reason->reason = ACL_REASON_NO_MATCHED_RESOURCE_ALLOWS;
+ 
++        /* If we have deny handles we should process them */
++        if (aclpb->aclpb_num_deny_handles > 0) {
++            aclpb->aclpb_state &= ~ACLPB_EXECUTING_ALLOW_HANDLES;
++            aclpb->aclpb_state |= ACLPB_EXECUTING_DENY_HANDLES;
++        }
++
+         TNF_PROBE_1_DEBUG(acl__TestRights_end, "ACL", "",
+                           tnf_string, no_allows, "");
+ 
+-- 
+2.17.0
+
diff --git a/SOURCES/0090-Ticket-49576-ds-replcheck-fix-certificate-directory-.patch b/SOURCES/0090-Ticket-49576-ds-replcheck-fix-certificate-directory-.patch
new file mode 100644
index 0000000..fa70099
--- /dev/null
+++ b/SOURCES/0090-Ticket-49576-ds-replcheck-fix-certificate-directory-.patch
@@ -0,0 +1,32 @@
+From d385d452001c91d01893b5ddc9e47f8200223ce9 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Mon, 11 Jun 2018 11:52:57 -0400
+Subject: [PATCH] Ticket 49576 - ds-replcheck: fix certificate directory
+ verification
+
+Description:  The tool would crash if you attempted to use a certificate
+              directory for conntacting replicas.
+
+https://pagure.io/389-ds-base/issue/49576
+
+Reviewed by: spichugi(Thanks!)
+---
+ ldap/admin/src/scripts/ds-replcheck | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
+index 661c9e0ce..62f911034 100755
+--- a/ldap/admin/src/scripts/ds-replcheck
++++ b/ldap/admin/src/scripts/ds-replcheck
+@@ -1225,7 +1225,7 @@ def main():
+     # Validate certdir
+     opts['certdir'] = None
+     if args.certdir:
+-        if os.path.exists() and os.path.isdir(certdir):
++        if os.path.exists(args.certdir) and os.path.isdir(args.certdir):
+             opts['certdir'] = args.certdir
+         else:
+             print("certificate directory ({}) does not exist or is not a directory".format(args.certdir))
+-- 
+2.17.0
+
diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec
index 34031e9..56f8b3c 100644
--- a/SPECS/389-ds-base.spec
+++ b/SPECS/389-ds-base.spec
@@ -39,7 +39,7 @@
 Summary:          389 Directory Server (%{variant})
 Name:             389-ds-base
 Version:          1.3.7.5
-Release:          %{?relprefix}21%{?prerel}%{?dist}
+Release:          %{?relprefix}24%{?prerel}%{?dist}
 License:          GPLv3+
 URL:              https://www.port389.org/
 Group:            System Environment/Daemons
@@ -224,6 +224,19 @@ Patch74:          0074-Ticket-49566-ds-replcheck-needs-to-work-with-hidden-.patc
 Patch75:          0075-Ticket-49460-replica_write_ruv-log-a-failure-even-wh.patch  
 Patch76:          0076-Ticket-49631-same-csn-generated-twice.patch
 Patch77:          0077-CVE-2018-1089-Crash-from-long-search-filter.patch
+Patch78:          0078-Ticket-49649.patch
+Patch79:          0079-Ticket-49665-Upgrade-script-doesn-t-enable-PBKDF2-pa.patch
+Patch80:          0080-Ticket-49665-Upgrade-script-doesn-t-enable-CRYPT-pas.patch
+Patch81:          0081-Ticket-49671-Readonly-replicas-should-not-write-inte.patch
+Patch82:          0082-Ticket-49696-replicated-operations-should-be-seriali.patch
+Patch83:          0083-Ticket-48184-clean-up-and-delete-connections-at-shut.patch
+Patch84:          0084-Ticket-49576-Update-ds-replcheck-for-new-conflict-en.patch
+Patch85:          0085-Ticket-49576-Add-support-of-deletedattribute-in-ds-r.patch
+Patch86:          0086-Ticket-49726-DS-only-accepts-RSA-and-Fortezza-cipher.patch
+Patch87:          0087-Ticket-48184-clean-up-and-delete-connections-at-shut.patch
+Patch88:          0088-Ticket-49736-Hardening-of-active-connection-list.patch
+Patch89:          0089-Ticket-49652-DENY-aci-s-are-not-handled-properly.patch
+Patch90:          0090-Ticket-49576-ds-replcheck-fix-certificate-directory-.patch
 
 %description
 389 Directory Server is an LDAPv3 compliant server.  The base package includes
@@ -246,7 +259,6 @@ BuildRequires:    libtevent-devel
 BuildRequires:    systemd-devel
 %if %{use_asan}
 Requires:    libasan
-Requires:    llvm
 %endif
 
 
@@ -276,7 +288,6 @@ Development Libraries and headers for the 389 Directory Server base package.
 Summary:          SNMP Agent for 389 Directory Server
 Group:            System Environment/Daemons
 Requires:         %{name} = %{version}-%{release}
-# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
 Obsoletes:        %{name} <= 1.3.6.0
 
 %description      snmp
@@ -580,6 +591,24 @@ fi
 %{_sysconfdir}/%{pkgname}/dirsrvtests
 
 %changelog
+* Wed Jun 13 2018 Mark Reynolds <mreynolds@redhat.com> - 1.3.7.5-24
+- Bump version to 1.3.7.5-24
+- Resolves: Bug 1580257 - Fix certificate directory verification
+
+* Fri Jun 1 2018 Mark Reynolds <mreynolds@redhat.com> - 1.3.7.5-23
+- Bump version to 1.3.7.5-23
+- Resolves: Bug 1581588 - ACI deny rules do not work correctly
+- Resolves: Bug 1582747 - DS only accepts RSA and Fortezza cipher families
+
+* Mon May 21 2018 Mark Reynolds <mreynolds@redhat.com> - 1.3.7.5-22
+- Bump version to 1.3.5.7-22
+- Resolves: Bug 1563079 - adjustment of csn_generator can fail so next generated csn can be equal to the most recent one received
+- Resolves: Bug 1579702 - Replication stops working when MemberOf plugin is enabled on hub and consumer
+- Resolves: Bug 1579698 - replicated operations should be serialized
+- Resolves: Bug 1579700 - Upgrade script doesn't enable PBKDF password storage plug-in
+- Resolves: Bug 1580257 - ds-replcheck LDIF comparision fails when checking for conflicts
+- Resolves: Bug 1580523 - ns-slapd segfaults with ERR - connection_release_nolock_ext - conn=0 fd=0 Attempt to release connection that is not acquired
+
 * Thu Apr 5 2018 Mark Reynolds <mreynolds@redhat.com> - 1.3.7.5-21
 - Bump version to 1.3.7.5-21
 - Resolves: Bug 1559818 - EMBARGOED CVE-2018-1089 389-ds-base: ns-slapd crash via large filter value in ldapsearch