diff --git a/SOURCES/0077-Ticket-48370-The-eq-index-does-not-get-updated-prope.patch b/SOURCES/0077-Ticket-48370-The-eq-index-does-not-get-updated-prope.patch
new file mode 100644
index 0000000..d4c29f9
--- /dev/null
+++ b/SOURCES/0077-Ticket-48370-The-eq-index-does-not-get-updated-prope.patch
@@ -0,0 +1,929 @@
+From 4a52c95b2f7815c15efd84daf57ced08e7855cc2 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Mon, 7 Dec 2015 16:45:06 -0500
+Subject: [PATCH 77/78] Ticket 48370 - The 'eq' index does not get updated
+ properly                when deleting and re-adding attributes in            
+    the same modify operation
+
+Bug Description:  If you delete several values of the same attribute, and
+                  add at least one of them back in the same operation, the
+                  equality index does not get updated.
+
+Fix Description:  Modify the logic of the index code to update the index if
+                  at least one of the values in the entry changes.
+
+                  Also did pep8 cleanup of create_test.py
+
+https://fedorahosted.org/389/ticket/48370
+
+Reviewed by: wibrown(Thanks!)
+
+(cherry picked from commit 63b80b5c31ebda51445c662903a28e2a79ebe60a)
+(cherry picked from commit 4a53592ec89d288f182c509dc7fcc104d8cbc4a8)
+---
+ dirsrvtests/create_test.py              | 393 +++++++++++++++++++-------------
+ dirsrvtests/tickets/ticket48370_test.py | 236 +++++++++++++++++++
+ ldap/servers/slapd/back-ldbm/index.c    |  29 +--
+ 3 files changed, 480 insertions(+), 178 deletions(-)
+ create mode 100644 dirsrvtests/tickets/ticket48370_test.py
+
+diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py
+index 941e922..5293991 100755
+--- a/dirsrvtests/create_test.py
++++ b/dirsrvtests/create_test.py
+@@ -22,14 +22,16 @@ import optparse
+ 
+ 
+ def displayUsage():
+-    print ('\nUsage:\ncreate_ticket.py -t|--ticket <ticket number> -s|--suite <suite name> ' +
+-           '[ i|--instances <number of standalone instances> [ -m|--masters <number of masters> ' +
+-           '-h|--hubs <number of hubs> -c|--consumers <number of consumers> ] ' +
+-           '-o|--outputfile ]\n')
+-    print ('If only "-t" is provided then a single standalone instance is created.  ' +
+-           'Or you can create a test suite script using "-s|--suite" instead of using "-t|--ticket".' +
+-           'The "-i" option can add mulitple standalone instances(maximum 10).  ' +
+-           'However, you can not mix "-i" with the replication options(-m, -h , -c).  ' +
++    print ('\nUsage:\ncreate_ticket.py -t|--ticket <ticket number> ' +
++           '-s|--suite <suite name> ' +
++           '[ i|--instances <number of standalone instances> ' +
++           '[ -m|--masters <number of masters> -h|--hubs <number of hubs> ' +
++           '-c|--consumers <number of consumers> ] -o|--outputfile ]\n')
++    print ('If only "-t" is provided then a single standalone instance is ' +
++           'created.  Or you can create a test suite script using ' +
++           '"-s|--suite" instead of using "-t|--ticket".  The "-i" option ' +
++           'can add mulitple standalone instances(maximum 10).  However, you' +
++           ' can not mix "-i" with the replication options(-m, -h , -c).  ' +
+            'There is a maximum of 10 masters, 10 hubs, and 10 consumers.')
+     exit(1)
+ 
+@@ -59,34 +61,47 @@ if len(sys.argv) > 0:
+         displayUsage()
+ 
+     if args.ticket and args.suite:
+-        print 'You must choose either "-t|--ticket" or "-s|--suite", but not both.'
++        print('You must choose either "-t|--ticket" or "-s|--suite", ' +
++              'but not both.')
+         displayUsage()
+ 
+     if int(args.masters) == 0:
+         if int(args.hubs) > 0 or int(args.consumers) > 0:
+-            print 'You must use "-m|--masters" if you want to have hubs and/or consumers'
++            print('You must use "-m|--masters" if you want to have hubs ' +
++                  'and/or consumers')
+             displayUsage()
+ 
+-    if not args.masters.isdigit() or int(args.masters) > 10 or int(args.masters) < 0:
+-        print 'Invalid value for "--masters", it must be a number and it can not be greater than 10'
++    if not args.masters.isdigit() or \
++           int(args.masters) > 10 or \
++           int(args.masters) < 0:
++        print('Invalid value for "--masters", it must be a number and it can' +
++              ' not be greater than 10')
+         displayUsage()
+ 
+     if not args.hubs.isdigit() or int(args.hubs) > 10 or int(args.hubs) < 0:
+-        print 'Invalid value for "--hubs", it must be a number and it can not be greater than 10'
++        print('Invalid value for "--hubs", it must be a number and it can ' +
++              'not be greater than 10')
+         displayUsage()
+ 
+-    if not args.consumers.isdigit() or int(args.consumers) > 10 or int(args.consumers) < 0:
+-        print 'Invalid value for "--consumers", it must be a number and it can not be greater than 10'
++    if not args.consumers.isdigit() or \
++           int(args.consumers) > 10 or \
++           int(args.consumers) < 0:
++        print('Invalid value for "--consumers", it must be a number and it ' +
++              'can not be greater than 10')
+         displayUsage()
+ 
+     if args.inst:
+-        if not args.inst.isdigit() or int(args.inst) > 10 or int(args.inst) < 1:
+-            print ('Invalid value for "--instances", it must be a number greater than 0 ' +
+-                   'and not greater than 10')
++        if not args.inst.isdigit() or \
++               int(args.inst) > 10 or \
++               int(args.inst) < 1:
++            print('Invalid value for "--instances", it must be a number ' +
++                  'greater than 0 and not greater than 10')
+             displayUsage()
+         if int(args.inst) > 0:
+-            if int(args.masters) > 0 or int(args.hubs) > 0 or int(args.consumers) > 0:
+-                print 'You can not mix "--instances" with replication.'
++            if int(args.masters) > 0 or \
++               int(args.hubs) > 0 or \
++               int(args.consumers) > 0:
++                print('You can not mix "--instances" with replication.')
+                 displayUsage()
+ 
+     # Extract usable values
+@@ -120,9 +135,11 @@ if len(sys.argv) > 0:
+     #
+     # Write the imports
+     #
+-    TEST.write('import os\nimport sys\nimport time\nimport ldap\nimport logging\nimport pytest\n')
+-    TEST.write('from lib389 import DirSrv, Entry, tools, tasks\nfrom lib389.tools import DirSrvTools\n' +
+-               'from lib389._constants import *\nfrom lib389.properties import *\n' +
++    TEST.write('import os\nimport sys\nimport time\nimport ldap\n' +
++               'import logging\nimport pytest\n')
++    TEST.write('from lib389 import DirSrv, Entry, tools, tasks\nfrom ' +
++               'lib389.tools import DirSrvTools\nfrom lib389._constants ' +
++               'import *\nfrom lib389.properties import *\n' +
+                'from lib389.tasks import *\nfrom lib389.utils import *\n\n')
+ 
+     #
+@@ -154,13 +171,16 @@ if len(sys.argv) > 0:
+ 
+         for idx in range(masters):
+             TEST.write('        master' + str(idx + 1) + '.open()\n')
+-            TEST.write('        self.master' + str(idx + 1) + ' = master' + str(idx + 1) + '\n')
++            TEST.write('        self.master' + str(idx + 1) + ' = master' +
++                       str(idx + 1) + '\n')
+         for idx in range(hubs):
+             TEST.write('        hub' + str(idx + 1) + '.open()\n')
+-            TEST.write('        self.hub' + str(idx + 1) + ' = hub' + str(idx + 1) + '\n')
++            TEST.write('        self.hub' + str(idx + 1) + ' = hub' +
++                       str(idx + 1) + '\n')
+         for idx in range(consumers):
+             TEST.write('        consumer' + str(idx + 1) + '.open()\n')
+-            TEST.write('        self.consumer' + str(idx + 1) + ' = consumer' + str(idx + 1) + '\n')
++            TEST.write('        self.consumer' + str(idx + 1) + ' = consumer' +
++                       str(idx + 1) + '\n')
+         TEST.write('\n\n')
+     else:
+         #
+@@ -184,7 +204,8 @@ if len(sys.argv) > 0:
+             else:
+                 idx = str(idx)
+             TEST.write('        standalone' + idx + '.open()\n')
+-            TEST.write('        self.standalone' + idx + ' = standalone' + idx + '\n')
++            TEST.write('        self.standalone' + idx + ' = standalone' +
++                       idx + '\n')
+         TEST.write('\n\n')
+ 
+     #
+@@ -194,7 +215,8 @@ if len(sys.argv) > 0:
+     TEST.write('def topology(request):\n')
+     TEST.write('    global installation1_prefix\n')
+     TEST.write('    if installation1_prefix:\n')
+-    TEST.write('        args_instance[SER_DEPLOYED_DIR] = installation1_prefix\n\n')
++    TEST.write('        args_instance[SER_DEPLOYED_DIR] = ' +
++               'installation1_prefix\n\n')
+ 
+     if repl_deployment:
+         #
+@@ -204,20 +226,25 @@ if len(sys.argv) > 0:
+             idx = str(idx + 1)
+             TEST.write('    # Creating master ' + idx + '...\n')
+             TEST.write('    master' + idx + ' = DirSrv(verbose=False)\n')
+-            TEST.write('    args_instance[SER_HOST] = HOST_MASTER_' + idx + '\n')
+-            TEST.write('    args_instance[SER_PORT] = PORT_MASTER_' + idx + '\n')
+-            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_' + idx + '\n')
+-            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
++            TEST.write('    args_instance[SER_HOST] = HOST_MASTER_' + idx +
++                       '\n')
++            TEST.write('    args_instance[SER_PORT] = PORT_MASTER_' + idx +
++                       '\n')
++            TEST.write('    args_instance[SER_SERVERID_PROP] = ' +
++                       'SERVERID_MASTER_' + idx + '\n')
++            TEST.write('    args_instance[SER_CREATION_SUFFIX] = ' +
++                       'DEFAULT_SUFFIX\n')
+             TEST.write('    args_master = args_instance.copy()\n')
+             TEST.write('    master' + idx + '.allocate(args_master)\n')
+-            TEST.write('    instance_master' + idx + ' = master' + idx + '.exists()\n')
++            TEST.write('    instance_master' + idx + ' = master' + idx +
++                       '.exists()\n')
+             TEST.write('    if instance_master' + idx + ':\n')
+             TEST.write('        master' + idx + '.delete()\n')
+             TEST.write('    master' + idx + '.create()\n')
+             TEST.write('    master' + idx + '.open()\n')
+-            TEST.write('    master' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
+-                                            'role=REPLICAROLE_MASTER, ' +
+-                                            'replicaId=REPLICAID_MASTER_' + idx + ')\n\n')
++            TEST.write('    master' + idx + '.replica.enableReplication' +
++                       '(suffix=SUFFIX, role=REPLICAROLE_MASTER, ' +
++                       'replicaId=REPLICAID_MASTER_' + idx + ')\n\n')
+ 
+         for idx in range(hubs):
+             idx = str(idx + 1)
+@@ -225,37 +252,45 @@ if len(sys.argv) > 0:
+             TEST.write('    hub' + idx + ' = DirSrv(verbose=False)\n')
+             TEST.write('    args_instance[SER_HOST] = HOST_HUB_' + idx + '\n')
+             TEST.write('    args_instance[SER_PORT] = PORT_HUB_' + idx + '\n')
+-            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_HUB_' + idx + '\n')
+-            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
++            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_HUB_' +
++                       idx + '\n')
++            TEST.write('    args_instance[SER_CREATION_SUFFIX] = ' +
++                       'DEFAULT_SUFFIX\n')
+             TEST.write('    args_hub = args_instance.copy()\n')
+             TEST.write('    hub' + idx + '.allocate(args_hub)\n')
+-            TEST.write('    instance_hub' + idx + ' = hub' + idx + '.exists()\n')
++            TEST.write('    instance_hub' + idx + ' = hub' + idx +
++                       '.exists()\n')
+             TEST.write('    if instance_hub' + idx + ':\n')
+             TEST.write('        hub' + idx + '.delete()\n')
+             TEST.write('    hub' + idx + '.create()\n')
+             TEST.write('    hub' + idx + '.open()\n')
+-            TEST.write('    hub' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
+-                                            'role=REPLICAROLE_HUB, ' +
+-                                            'replicaId=REPLICAID_HUB_' + idx + ')\n\n')
++            TEST.write('    hub' + idx + '.replica.enableReplication' +
++                       '(suffix=SUFFIX, role=REPLICAROLE_HUB, ' +
++                       'replicaId=REPLICAID_HUB_' + idx + ')\n\n')
+ 
+         for idx in range(consumers):
+             idx = str(idx + 1)
+             TEST.write('    # Creating consumer ' + idx + '...\n')
+             TEST.write('    consumer' + idx + ' = DirSrv(verbose=False)\n')
+-            TEST.write('    args_instance[SER_HOST] = HOST_CONSUMER_' + idx + '\n')
+-            TEST.write('    args_instance[SER_PORT] = PORT_CONSUMER_' + idx + '\n')
+-            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_' + idx + '\n')
+-            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
++            TEST.write('    args_instance[SER_HOST] = HOST_CONSUMER_' + idx +
++                       '\n')
++            TEST.write('    args_instance[SER_PORT] = PORT_CONSUMER_' + idx +
++                       '\n')
++            TEST.write('    args_instance[SER_SERVERID_PROP] = ' +
++                       'SERVERID_CONSUMER_' + idx + '\n')
++            TEST.write('    args_instance[SER_CREATION_SUFFIX] = ' +
++                       'DEFAULT_SUFFIX\n')
+             TEST.write('    args_consumer = args_instance.copy()\n')
+             TEST.write('    consumer' + idx + '.allocate(args_consumer)\n')
+-            TEST.write('    instance_consumer' + idx + ' = consumer' + idx + '.exists()\n')
++            TEST.write('    instance_consumer' + idx + ' = consumer' + idx +
++                       '.exists()\n')
+             TEST.write('    if instance_consumer' + idx + ':\n')
+             TEST.write('        consumer' + idx + '.delete()\n')
+             TEST.write('    consumer' + idx + '.create()\n')
+             TEST.write('    consumer' + idx + '.open()\n')
+-            TEST.write('    consumer' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
+-                                            'role=REPLICAROLE_CONSUMER, ' +
+-                                            'replicaId=CONSUMER_REPLICAID)\n\n')
++            TEST.write('    consumer' + idx + '.replica.enableReplication' +
++                       '(suffix=SUFFIX, role=REPLICAROLE_CONSUMER, ' +
++                       'replicaId=CONSUMER_REPLICAID)\n\n')
+ 
+         #
+         # Create the master agreements
+@@ -274,39 +309,61 @@ if len(sys.argv) > 0:
+                 if master_idx == idx:
+                     # skip ourselves
+                     continue
+-                TEST.write('    # Creating agreement from master ' + str(master_idx) + ' to master ' + str(idx) + '\n')
+-                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
+-                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
+-                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
+-                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
+-                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
+-                TEST.write('    m' + str(master_idx) + '_m' + str(idx) + '_agmt = master' + str(master_idx) +
++                TEST.write('    # Creating agreement from master ' +
++                           str(master_idx) + ' to master ' + str(idx) + '\n')
++                TEST.write("    properties = {RA_NAME:      " +
++                           "r'meTo_$host:$port',\n")
++                TEST.write("                  RA_BINDDN:    " +
++                           "defaultProperties[REPLICATION_BIND_DN],\n")
++                TEST.write("                  RA_BINDPW:    " +
++                           "defaultProperties[REPLICATION_BIND_PW],\n")
++                TEST.write("                  RA_METHOD:    " +
++                           "defaultProperties[REPLICATION_BIND_METHOD],\n")
++                TEST.write("                  RA_TRANSPORT_PROT: " +
++                           "defaultProperties[REPLICATION_TRANSPORT]}\n")
++                TEST.write('    m' + str(master_idx) + '_m' + str(idx) +
++                           '_agmt = master' + str(master_idx) +
+                             '.agreement.create(suffix=SUFFIX, host=master' +
+-                            str(idx) + '.host, port=master' + str(idx) + '.port, properties=properties)\n')
+-                TEST.write('    if not m' + str(master_idx) + '_m' + str(idx) + '_agmt:\n')
+-                TEST.write('        log.fatal("Fail to create a master -> master replica agreement")\n')
++                            str(idx) + '.host, port=master' + str(idx) +
++                            '.port, properties=properties)\n')
++                TEST.write('    if not m' + str(master_idx) + '_m' + str(idx) +
++                           '_agmt:\n')
++                TEST.write('        log.fatal("Fail to create a master -> ' +
++                           'master replica agreement")\n')
+                 TEST.write('        sys.exit(1)\n')
+-                TEST.write('    log.debug("%s created" % m' + str(master_idx) + '_m' + str(idx) + '_agmt)\n\n')
++                TEST.write('    log.debug("%s created" % m' + str(master_idx) +
++                           '_m' + str(idx) + '_agmt)\n\n')
+                 agmt_count += 1
+ 
+             for idx in range(hubs):
+                 idx += 1
+                 #
+-                # Create agreements from each master to each hub (master -> hub)
++                # Create agmts from each master to each hub (master -> hub)
+                 #
+-                TEST.write('    # Creating agreement from master ' + str(master_idx) + ' to hub ' + str(idx) + '\n')
+-                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
+-                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
+-                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
+-                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
+-                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
+-                TEST.write('    m' + str(master_idx) + '_h' + str(idx) + '_agmt = master' + str(master_idx) +
+-                            '.agreement.create(suffix=SUFFIX, host=hub' +
+-                            str(idx) + '.host, port=hub' + str(idx) + '.port, properties=properties)\n')
+-                TEST.write('    if not m' + str(master_idx) + '_h' + str(idx) + '_agmt:\n')
+-                TEST.write('        log.fatal("Fail to create a master -> hub replica agreement")\n')
++                TEST.write('    # Creating agreement from master ' +
++                           str(master_idx) + ' to hub ' + str(idx) + '\n')
++                TEST.write("    properties = {RA_NAME:      " +
++                           "r'meTo_$host:$port',\n")
++                TEST.write("                  RA_BINDDN:    " +
++                           "defaultProperties[REPLICATION_BIND_DN],\n")
++                TEST.write("                  RA_BINDPW:    " +
++                           "defaultProperties[REPLICATION_BIND_PW],\n")
++                TEST.write("                  RA_METHOD:    " +
++                           "defaultProperties[REPLICATION_BIND_METHOD],\n")
++                TEST.write("                  RA_TRANSPORT_PROT: " +
++                           "defaultProperties[REPLICATION_TRANSPORT]}\n")
++                TEST.write('    m' + str(master_idx) + '_h' + str(idx) +
++                           '_agmt = master' + str(master_idx) +
++                           '.agreement.create(suffix=SUFFIX, host=hub' +
++                           str(idx) + '.host, port=hub' + str(idx) +
++                           '.port, properties=properties)\n')
++                TEST.write('    if not m' + str(master_idx) + '_h' + str(idx) +
++                           '_agmt:\n')
++                TEST.write('        log.fatal("Fail to create a master -> ' +
++                           'hub replica agreement")\n')
+                 TEST.write('        sys.exit(1)\n')
+-                TEST.write('    log.debug("%s created" % m' + str(master_idx) + '_h' + str(idx) + '_agmt)\n\n')
++                TEST.write('    log.debug("%s created" % m' + str(master_idx) +
++                           '_h' + str(idx) + '_agmt)\n\n')
+                 agmt_count += 1
+ 
+         #
+@@ -322,24 +379,35 @@ if len(sys.argv) > 0:
+                 #
+                 # Create agreements from each hub to each consumer
+                 #
+-                TEST.write('    # Creating agreement from hub ' + str(hub_idx) + ' to consumer ' + str(idx) + '\n')
+-                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
+-                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
+-                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
+-                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
+-                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
+-                TEST.write('    h' + str(hub_idx) + '_c' + str(idx) + '_agmt = hub' +
+-                            str(hub_idx) + '.agreement.create(suffix=SUFFIX, host=consumer' +
+-                            str(idx) + '.host, port=consumer' + str(idx) + '.port, properties=properties)\n')
+-                TEST.write('    if not h' + str(hub_idx) + '_c' + str(idx) + '_agmt:\n')
+-                TEST.write('        log.fatal("Fail to create a hub -> consumer replica agreement")\n')
++                TEST.write('    # Creating agreement from hub ' + str(hub_idx)
++                           + ' to consumer ' + str(idx) + '\n')
++                TEST.write("    properties = {RA_NAME:      " +
++                           "r'meTo_$host:$port',\n")
++                TEST.write("                  RA_BINDDN:    " +
++                           "defaultProperties[REPLICATION_BIND_DN],\n")
++                TEST.write("                  RA_BINDPW:    " +
++                           "defaultProperties[REPLICATION_BIND_PW],\n")
++                TEST.write("                  RA_METHOD:    " +
++                           "defaultProperties[REPLICATION_BIND_METHOD],\n")
++                TEST.write("                  RA_TRANSPORT_PROT: " +
++                           "defaultProperties[REPLICATION_TRANSPORT]}\n")
++                TEST.write('    h' + str(hub_idx) + '_c' + str(idx) +
++                           '_agmt = hub' + str(hub_idx) +
++                           '.agreement.create(suffix=SUFFIX, host=consumer' +
++                           str(idx) + '.host, port=consumer' + str(idx) +
++                           '.port, properties=properties)\n')
++                TEST.write('    if not h' + str(hub_idx) + '_c' + str(idx) +
++                           '_agmt:\n')
++                TEST.write('        log.fatal("Fail to create a hub -> ' +
++                           'consumer replica agreement")\n')
+                 TEST.write('        sys.exit(1)\n')
+-                TEST.write('    log.debug("%s created" % h' + str(hub_idx) + '_c' + str(idx) + '_agmt)\n\n')
++                TEST.write('    log.debug("%s created" % h' + str(hub_idx) +
++                           '_c' + str(idx) + '_agmt)\n\n')
+                 agmt_count += 1
+ 
+         if hubs == 0:
+             #
+-            # No Hubs, see if there are any consumers to create agreements to...
++            # No Hubs, see if there are any consumers to create agreements to
+             #
+             for idx in range(masters):
+                 master_idx = idx + 1
+@@ -351,27 +419,40 @@ if len(sys.argv) > 0:
+                     #
+                     # Create agreements from each master to each consumer
+                     #
+-                    TEST.write('    # Creating agreement from master ' + str(master_idx) +
+-                               ' to consumer ' + str(idx) + '\n')
+-                    TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
+-                    TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
+-                    TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
+-                    TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
+-                    TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
+-                    TEST.write('    m' + str(master_idx) + '_c' + str(idx) + '_agmt = master' + str(master_idx) +
+-                                '.agreement.create(suffix=SUFFIX, host=consumer' +
+-                                str(idx) + '.host, port=consumer' + str(idx) +
+-                                '.port, properties=properties)\n')
+-                    TEST.write('    if not m' + str(master_idx) + '_c' + str(idx) + '_agmt:\n')
+-                    TEST.write('        log.fatal("Fail to create a hub -> consumer replica agreement")\n')
++                    TEST.write('    # Creating agreement from master ' +
++                               str(master_idx) + ' to consumer ' + str(idx) +
++                               '\n')
++                    TEST.write("    properties = {RA_NAME:      " +
++                               "r'meTo_$host:$port',\n")
++                    TEST.write("                  RA_BINDDN:    " +
++                               "defaultProperties[REPLICATION_BIND_DN],\n")
++                    TEST.write("                  RA_BINDPW:    " +
++                               "defaultProperties[REPLICATION_BIND_PW],\n")
++                    TEST.write("                  RA_METHOD:    " +
++                               "defaultProperties[REPLICATION_BIND_METHOD],\n")
++                    TEST.write("                  RA_TRANSPORT_PROT: " +
++                               "defaultProperties[REPLICATION_TRANSPORT]}\n")
++                    TEST.write('    m' + str(master_idx) + '_c' + str(idx) +
++                               '_agmt = master' + str(master_idx) +
++                               '.agreement.create(suffix=SUFFIX, ' +
++                               'host=consumer' + str(idx) +
++                               '.host, port=consumer' + str(idx) +
++                               '.port, properties=properties)\n')
++                    TEST.write('    if not m' + str(master_idx) + '_c' +
++                               str(idx) + '_agmt:\n')
++                    TEST.write('        log.fatal("Fail to create a hub -> ' +
++                               'consumer replica agreement")\n')
+                     TEST.write('        sys.exit(1)\n')
+-                    TEST.write('    log.debug("%s created" % m' + str(master_idx) + '_c' + str(idx) + '_agmt)\n\n')
++                    TEST.write('    log.debug("%s created" % m' +
++                               str(master_idx) + '_c' + str(idx) +
++                               '_agmt)\n\n')
+                     agmt_count += 1
+ 
+         #
+         # Add sleep that allows all the agreemnts to get situated
+         #
+-        TEST.write('    # Allow the replicas to get situated with the new agreements...\n')
++        TEST.write('    # Allow the replicas to get situated with the new ' +
++                   'agreements...\n')
+         TEST.write('    time.sleep(5)\n\n')
+ 
+         #
+@@ -388,7 +469,8 @@ if len(sys.argv) > 0:
+                 continue
+             TEST.write('    master1.agreement.init(SUFFIX, HOST_MASTER_' +
+                        str(idx) + ', PORT_MASTER_' + str(idx) + ')\n')
+-            TEST.write('    master1.waitForReplInit(m1_m' + str(idx) + '_agmt)\n')
++            TEST.write('    master1.waitForReplInit(m1_m' + str(idx) +
++                       '_agmt)\n')
+ 
+         # Hubs
+         consumers_inited = False
+@@ -396,23 +478,27 @@ if len(sys.argv) > 0:
+             idx += 1
+             TEST.write('    master1.agreement.init(SUFFIX, HOST_HUB_' +
+                    str(idx) + ', PORT_HUB_' + str(idx) + ')\n')
+-            TEST.write('    master1.waitForReplInit(m1_h' + str(idx) + '_agmt)\n')
++            TEST.write('    master1.waitForReplInit(m1_h' + str(idx) +
++                       '_agmt)\n')
+             for idx in range(consumers):
+                 if consumers_inited:
+                     continue
+                 idx += 1
+                 TEST.write('    hub1.agreement.init(SUFFIX, HOST_CONSUMER_' +
+                            str(idx) + ', PORT_CONSUMER_' + str(idx) + ')\n')
+-                TEST.write('    hub1.waitForReplInit(h1_c' + str(idx) + '_agmt)\n')
++                TEST.write('    hub1.waitForReplInit(h1_c' + str(idx) +
++                           '_agmt)\n')
+             consumers_inited = True
+ 
+         # Consumers (master -> consumer)
+         if hubs == 0:
+             for idx in range(consumers):
+                 idx += 1
+-                TEST.write('    master1.agreement.init(SUFFIX, HOST_CONSUMER_' +
+-                           str(idx) + ', PORT_CONSUMER_' + str(idx) + ')\n')
+-                TEST.write('    master1.waitForReplInit(m1_c' + str(idx) + '_agmt)\n')
++                TEST.write('    master1.agreement.init(SUFFIX, ' +
++                           'HOST_CONSUMER_' + str(idx) + ', PORT_CONSUMER_' +
++                           str(idx) + ')\n')
++                TEST.write('    master1.waitForReplInit(m1_c' + str(idx) +
++                           '_agmt)\n')
+ 
+         TEST.write('\n')
+ 
+@@ -420,7 +506,7 @@ if len(sys.argv) > 0:
+         # Write replicaton check
+         #
+         if agmt_count > 0:
+-            # Find the lowest replica type in the deployment(consumer -> master)
++            # Find the lowest replica type (consumer -> master)
+             if consumers > 0:
+                 replica = 'consumer1'
+             elif hubs > 0:
+@@ -428,7 +514,8 @@ if len(sys.argv) > 0:
+             else:
+                 replica = 'master2'
+             TEST.write('    # Check replication is working...\n')
+-            TEST.write('    if master1.testReplication(DEFAULT_SUFFIX, ' + replica + '):\n')
++            TEST.write('    if master1.testReplication(DEFAULT_SUFFIX, ' +
++                       replica + '):\n')
+             TEST.write("        log.info('Replication is working.')\n")
+             TEST.write('    else:\n')
+             TEST.write("        log.fatal('Replication is not working.')\n")
+@@ -465,15 +552,22 @@ if len(sys.argv) > 0:
+                 idx = str(idx)
+             TEST.write('    # Creating standalone instance ' + idx + '...\n')
+             TEST.write('    standalone' + idx + ' = DirSrv(verbose=False)\n')
+-            TEST.write('    args_instance[SER_HOST] = HOST_STANDALONE' + idx + '\n')
+-            TEST.write('    args_instance[SER_PORT] = PORT_STANDALONE' + idx + '\n')
+-            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE' + idx + '\n')
+-            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
+-            TEST.write('    args_standalone' + idx + ' = args_instance.copy()\n')
+-            TEST.write('    standalone' + idx + '.allocate(args_standalone' + idx + ')\n')
++            TEST.write('    args_instance[SER_HOST] = HOST_STANDALONE' +
++                       idx + '\n')
++            TEST.write('    args_instance[SER_PORT] = PORT_STANDALONE' +
++                       idx + '\n')
++            TEST.write('    args_instance[SER_SERVERID_PROP] = ' +
++                       'SERVERID_STANDALONE' + idx + '\n')
++            TEST.write('    args_instance[SER_CREATION_SUFFIX] = ' +
++                       'DEFAULT_SUFFIX\n')
++            TEST.write('    args_standalone' + idx + ' = args_instance.copy' +
++                       '()\n')
++            TEST.write('    standalone' + idx + '.allocate(args_standalone' +
++                       idx + ')\n')
+ 
+             # Get the status of the instance and restart it if it exists
+-            TEST.write('    instance_standalone' + idx + ' = standalone' + idx + '.exists()\n')
++            TEST.write('    instance_standalone' + idx + ' = standalone' +
++                       idx + '.exists()\n')
+ 
+             # Remove the instance
+             TEST.write('    if instance_standalone' + idx + ':\n')
+@@ -503,12 +597,20 @@ if len(sys.argv) > 0:
+         TEST.write('def test_ticket' + ticket + '(topology):\n')
+         TEST.write("    '''\n")
+         if repl_deployment:
+-            TEST.write('    Write your replication testcase here.\n\n')
+-            TEST.write('    To access each DirSrv instance use:  topology.master1, topology.master2,\n' +
+-                       '        ..., topology.hub1, ..., topology.consumer1, ...\n')
++            TEST.write('    """Write your replication testcase here.\n\n')
++            TEST.write('    To access each DirSrv instance use:  ' +
++                       'topology.master1, topology.master2,\n' +
++                       '        ..., topology.hub1, ..., topology.consumer1' +
++                       ',...\n\n')
++            TEST.write('    Also, if you need any testcase initialization,\n')
++            TEST.write('    please, write additional fixture for that' +
++                       '(include ' + 'finalizer).\n')
+         else:
+-            TEST.write('    Write your testcase here...\n')
+-        TEST.write("    '''\n\n")
++            TEST.write('    """Write your testcase here...\n\n')
++            TEST.write('    Also, if you need any testcase initialization,\n')
++            TEST.write('    please, write additional fixture for that' +
++                       '(include finalizer).\n')
++        TEST.write('    """\n\n')
+         TEST.write("    log.info('Test complete')\n")
+         TEST.write("\n\n")
+     else:
+@@ -520,43 +622,11 @@ if len(sys.argv) > 0:
+ 
+         # Write the first initial empty test function
+         TEST.write('def test_' + suite + '_#####(topology):\n')
+-        TEST.write("    '''\n")
+-        TEST.write('    Write a single test here...\n')
+-        TEST.write("    '''\n\n    return\n\n\n")
+-
+-    #
+-    # Write the final function here - delete each instance
+-    #
+-    if ticket:
+-        TEST.write('def test_ticket' + ticket + '_final(topology):\n')
+-    else:
+-        # suite
+-        TEST.write('def test_' + suite + '_final(topology):\n')
+-    if repl_deployment:
+-        for idx in range(masters):
+-            idx += 1
+-            TEST.write('    topology.master' + str(idx) + '.delete()\n')
+-        for idx in range(hubs):
+-            idx += 1
+-            TEST.write('    topology.hub' + str(idx) + '.delete()\n')
+-        for idx in range(consumers):
+-            idx += 1
+-            TEST.write('    topology.consumer' + str(idx) + '.delete()\n')
+-    else:
+-        for idx in range(instances):
+-            idx += 1
+-            if idx == 1:
+-                idx = ''
+-            else:
+-                idx = str(idx)
+-            TEST.write('    topology.standalone' + idx + '.delete()\n')
+-
+-    if ticket:
+-        TEST.write("    log.info('Testcase PASSED')\n")
+-    else:
+-        # suite
+-        TEST.write("    log.info('" + suite + " test suite PASSED')\n")
+-    TEST.write('\n\n')
++        TEST.write('    """Write a single test here...\n\n')
++        TEST.write('    Also, if you need any test suite initialization,\n')
++        TEST.write('    please, write additional fixture for that(include ' +
++                   'finalizer).\n')
++        TEST.write('    """\n\n    return\n\n\n')
+ 
+     #
+     # Write the main function
+@@ -576,7 +646,10 @@ if len(sys.argv) > 0:
+     TEST.write('\n\n')
+ 
+     TEST.write("if __name__ == '__main__':\n")
+-    TEST.write('    run_isolated()\n\n')
++    TEST.write('    # Run isolated\n')
++    TEST.write('    # -s for DEBUG mode\n')
++    TEST.write('    CURRENT_FILE = os.path.realpath(__file__)\n')
++    TEST.write('    pytest.main("-s %s" % CURRENT_FILE)\n')
+ 
+     #
+     # Done, close things up
+diff --git a/dirsrvtests/tickets/ticket48370_test.py b/dirsrvtests/tickets/ticket48370_test.py
+new file mode 100644
+index 0000000..f5b1f47
+--- /dev/null
++++ b/dirsrvtests/tickets/ticket48370_test.py
+@@ -0,0 +1,236 @@
++import os
++import ldap
++import logging
++import pytest
++from lib389 import DirSrv, Entry
++from lib389._constants import *
++from lib389.properties import *
++from lib389.tasks import *
++from lib389.utils import *
++
++logging.getLogger(__name__).setLevel(logging.DEBUG)
++log = logging.getLogger(__name__)
++
++installation1_prefix = None
++
++
++class TopologyStandalone(object):
++    def __init__(self, standalone):
++        standalone.open()
++        self.standalone = standalone
++
++
++@pytest.fixture(scope="module")
++def topology(request):
++    global installation1_prefix
++    if installation1_prefix:
++        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
++
++    # Creating standalone instance ...
++    standalone = DirSrv(verbose=False)
++    args_instance[SER_HOST] = HOST_STANDALONE
++    args_instance[SER_PORT] = PORT_STANDALONE
++    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
++    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
++    args_standalone = args_instance.copy()
++    standalone.allocate(args_standalone)
++    instance_standalone = standalone.exists()
++    if instance_standalone:
++        standalone.delete()
++    standalone.create()
++    standalone.open()
++
++    # Delete each instance in the end
++    def fin():
++        standalone.delete()
++    request.addfinalizer(fin)
++
++    # Clear out the tmp dir
++    standalone.clearTmpDir(__file__)
++
++    return TopologyStandalone(standalone)
++
++
++def test_ticket48370(topology):
++    """
++    Deleting attirbute values and readding a value does not properly update
++    the pres index.  The values are not actually deleted from the index
++    """
++
++    DN = 'uid=user0099,' + DEFAULT_SUFFIX
++
++    #
++    # Add an entry
++    #
++    topology.standalone.add_s(Entry((DN, {
++                              'objectclass': ['top', 'person',
++                                              'organizationalPerson',
++                                              'inetorgperson',
++                                              'posixAccount'],
++                              'givenname': 'test',
++                              'sn': 'user',
++                              'loginshell': '/bin/bash',
++                              'uidNumber': '10099',
++                              'gidNumber': '10099',
++                              'gecos': 'Test User',
++                              'mail': ['user0099@dev.null',
++                                       'alias@dev.null',
++                                       'user0099@redhat.com'],
++                              'cn': 'Test User',
++                              'homeDirectory': '/home/user0099',
++                              'uid': 'admin2',
++                              'userpassword': 'password'})))
++
++    #
++    # Perform modify (delete & add mail attributes)
++    #
++    try:
++        topology.standalone.modify_s(DN, [(ldap.MOD_DELETE,
++                                           'mail',
++                                           'user0099@dev.null'),
++                                          (ldap.MOD_DELETE,
++                                           'mail',
++                                           'alias@dev.null'),
++                                          (ldap.MOD_ADD,
++                                           'mail', 'user0099@dev.null')])
++    except ldap.LDAPError as e:
++        log.fatal('Failedto modify user: ' + str(e))
++        assert False
++
++    #
++    # Search using deleted attribute value- no entries should be returned
++    #
++    try:
++        entry = topology.standalone.search_s(DEFAULT_SUFFIX,
++                                             ldap.SCOPE_SUBTREE,
++                                             'mail=alias@dev.null')
++        if entry:
++            log.fatal('Entry incorrectly returned')
++            assert False
++    except ldap.LDAPError as e:
++        log.fatal('Failed to search for user: ' + str(e))
++        assert False
++
++    #
++    # Search using existing attribute value - the entry should be returned
++    #
++    try:
++        entry = topology.standalone.search_s(DEFAULT_SUFFIX,
++                                             ldap.SCOPE_SUBTREE,
++                                             'mail=user0099@dev.null')
++        if entry is None:
++            log.fatal('Entry not found, but it should have been')
++            assert False
++    except ldap.LDAPError as e:
++        log.fatal('Failed to search for user: ' + str(e))
++        assert False
++
++    #
++    # Delete the last values
++    #
++    try:
++        topology.standalone.modify_s(DN, [(ldap.MOD_DELETE,
++                                           'mail',
++                                           'user0099@dev.null'),
++                                          (ldap.MOD_DELETE,
++                                           'mail',
++                                           'user0099@redhat.com')
++                                          ])
++    except ldap.LDAPError as e:
++        log.fatal('Failed to modify user: ' + str(e))
++        assert False
++
++    #
++    # Search using deleted attribute value - no entries should be returned
++    #
++    try:
++        entry = topology.standalone.search_s(DEFAULT_SUFFIX,
++                                             ldap.SCOPE_SUBTREE,
++                                             'mail=user0099@redhat.com')
++        if entry:
++            log.fatal('Entry incorrectly returned')
++            assert False
++    except ldap.LDAPError as e:
++        log.fatal('Failed to search for user: ' + str(e))
++        assert False
++
++    #
++    # Make sure presence index is correctly updated - no entries should be
++    # returned
++    #
++    try:
++        entry = topology.standalone.search_s(DEFAULT_SUFFIX,
++                                             ldap.SCOPE_SUBTREE,
++                                             'mail=*')
++        if entry:
++            log.fatal('Entry incorrectly returned')
++            assert False
++    except ldap.LDAPError as e:
++        log.fatal('Failed to search for user: ' + str(e))
++        assert False
++
++    #
++    # Now add the attributes back, and lets run a different set of tests with
++    # a different number of attributes
++    #
++    try:
++        topology.standalone.modify_s(DN, [(ldap.MOD_ADD,
++                                           'mail',
++                                           ['user0099@dev.null',
++                                            'alias@dev.null'])])
++    except ldap.LDAPError as e:
++        log.fatal('Failedto modify user: ' + str(e))
++        assert False
++
++    #
++    # Remove and readd some attibutes
++    #
++    try:
++        topology.standalone.modify_s(DN, [(ldap.MOD_DELETE,
++                                           'mail',
++                                           'alias@dev.null'),
++                                          (ldap.MOD_DELETE,
++                                           'mail',
++                                           'user0099@dev.null'),
++                                          (ldap.MOD_ADD,
++                                           'mail', 'user0099@dev.null')])
++    except ldap.LDAPError as e:
++        log.fatal('Failedto modify user: ' + str(e))
++        assert False
++
++    #
++    # Search using deleted attribute value - no entries should be returned
++    #
++    try:
++        entry = topology.standalone.search_s(DEFAULT_SUFFIX,
++                                             ldap.SCOPE_SUBTREE,
++                                             'mail=alias@dev.null')
++        if entry:
++            log.fatal('Entry incorrectly returned')
++            assert False
++    except ldap.LDAPError as e:
++        log.fatal('Failed to search for user: ' + str(e))
++        assert False
++
++    #
++    # Search using existing attribute value - the entry should be returned
++    #
++    try:
++        entry = topology.standalone.search_s(DEFAULT_SUFFIX,
++                                             ldap.SCOPE_SUBTREE,
++                                             'mail=user0099@dev.null')
++        if entry is None:
++            log.fatal('Entry not found, but it should have been')
++            assert False
++    except ldap.LDAPError as e:
++        log.fatal('Failed to search for user: ' + str(e))
++        assert False
++
++    log.info('Test PASSED')
++
++
++if __name__ == '__main__':
++    # Run isolated
++    # -s for DEBUG mode
++    CURRENT_FILE = os.path.realpath(__file__)
++    pytest.main("-s %s" % CURRENT_FILE)
+diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
+index 2adf2f8..00e78a7 100644
+--- a/ldap/servers/slapd/back-ldbm/index.c
++++ b/ldap/servers/slapd/back-ldbm/index.c
+@@ -727,31 +727,24 @@ index_add_mods(
+                     flags = BE_INDEX_DEL|BE_INDEX_PRESENCE|BE_INDEX_EQUALITY;
+                 } else {
+                     flags = BE_INDEX_DEL;
+-
+-                    /* If the same value doesn't exist in a subtype, set
+-                     * BE_INDEX_EQUALITY flag so the equality index is
+-                     * removed.
+-                     */
+                     curr_attr = NULL;
+                     slapi_entry_attr_find(olde->ep_entry,
+-                                          mods[i]->mod_type, &curr_attr);
++                                          mods[i]->mod_type,
++                                          &curr_attr);
+                     if (curr_attr) {
+-                        int found = 0;
+                         for (j = 0; mods_valueArray[j] != NULL; j++ ) {
+-                    	    if ( slapi_valueset_find(curr_attr, all_vals, mods_valueArray[j])) {
+-                                /* The same value found in evals. 
+-                                 * We don't touch the equality index. */
+-                                found = 1;
++                            if ( !slapi_valueset_find(curr_attr, all_vals, mods_valueArray[j]) ) {
++                                /*
++                                 * If the mod del value is not found in all_vals
++                                 * we need to update the equality index as the
++                                 * final value(s) have changed
++                                 */
++                                if (!(flags & BE_INDEX_EQUALITY)) {
++                                    flags |= BE_INDEX_EQUALITY;
++                                }
+                                 break;
+                             }
+                         }
+-                        /* 
+-                         * to-be-deleted curr_attr does not exist in the 
+-                         * new value set evals.  So, we can remove it.
+-                         */
+-                        if (!found && !(flags & BE_INDEX_EQUALITY)) {
+-                            flags |= BE_INDEX_EQUALITY;
+-                        }
+                     } 
+                 }
+ 
+-- 
+2.4.3
+
diff --git a/SOURCES/0078-Ticket-48375-SimplePagedResults-in-the-search-error-.patch b/SOURCES/0078-Ticket-48375-SimplePagedResults-in-the-search-error-.patch
new file mode 100644
index 0000000..11ceecf
--- /dev/null
+++ b/SOURCES/0078-Ticket-48375-SimplePagedResults-in-the-search-error-.patch
@@ -0,0 +1,58 @@
+From ba82865fe34c4b6f1a3df283b4848f29ee99ae05 Mon Sep 17 00:00:00 2001
+From: Noriko Hosoi <nhosoi@redhat.com>
+Date: Wed, 9 Dec 2015 12:05:24 -0800
+Subject: [PATCH 78/78] Ticket #48375 - SimplePagedResults -- in the search
+ error case, simple paged results slot was not released.
+
+Description: If a simple paged results search fails in the backend,
+the simple paged results slot was not released.  This patch adds it.
+
+https://fedorahosted.org/389/ticket/48375
+
+Reviewed by tbordaz@redhat.com (Thank you, Thierry!!)
+
+(cherry picked from commit 5a54717bfa40e3ef987bd85c5806125e49b2b278)
+(cherry picked from commit b91aad03b660aea85cb745554f27101c690f8402)
+---
+ ldap/servers/slapd/opshared.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
+index 586ca1f..5cafc3c 100644
+--- a/ldap/servers/slapd/opshared.c
++++ b/ldap/servers/slapd/opshared.c
+@@ -814,15 +814,26 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
+              * wait the end of the loop to send back this error 
+              */
+             flag_no_such_object = 1;
+-            break;
++        } else {
++            /* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
++             * have sent the result -
++             * Set a flag here so we don't return another result. */
++            sent_result = 1;
+         }
+-        /* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
+-         * have sent the result -
+-         * Set a flag here so we don't return another result. */
+-        sent_result = 1;
+         /* fall through */
+   
+       case -1:    /* an error occurred */            
++        /* PAGED RESULTS */
++        if (op_is_pagedresults(operation)) {
++            /* cleanup the slot */
++            PR_Lock(pb->pb_conn->c_mutex);
++            pagedresults_set_search_result(pb->pb_conn, operation, NULL, 1, pr_idx);
++            rc = pagedresults_set_current_be(pb->pb_conn, NULL, pr_idx, 1);
++            PR_Unlock(pb->pb_conn->c_mutex);
++        }
++        if (1 == flag_no_such_object) {
++            break;
++        }
+         slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
+         if (err == LDAP_NO_SUCH_OBJECT)
+         {
+-- 
+2.4.3
+
diff --git a/SOURCES/0079-Ticket-48283-many-attrlist_replace-errors-in-connect.patch b/SOURCES/0079-Ticket-48283-many-attrlist_replace-errors-in-connect.patch
new file mode 100644
index 0000000..9346196
--- /dev/null
+++ b/SOURCES/0079-Ticket-48283-many-attrlist_replace-errors-in-connect.patch
@@ -0,0 +1,38 @@
+From 818f6a27ff92bf7adb5f378f985e9c8f36193812 Mon Sep 17 00:00:00 2001
+From: Ludwig Krispenz <lkrispen@redhat.com>
+Date: Tue, 22 Sep 2015 17:51:35 +0200
+Subject: [PATCH] Ticket 48283 - many attrlist_replace errors in connection
+ with cleanallruv
+
+Bug Description:  attrlist_replace error messages are logged because the
+                  list of values contains duplicate attributes
+
+Fix Description:  the duplicate values can appear because when a replica
+                  is removed from the ruv the array is compacted, but
+                  memcpy is used instead of memmove
+
+https://fedorahosted.org/389/ticket/48283
+
+Reviewed by: Rich, Thanks
+
+(cherry picked from commit 2674f5594a2eb088be34728c12c1169df36b1588)
+---
+ ldap/servers/slapd/dl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/ldap/servers/slapd/dl.c b/ldap/servers/slapd/dl.c
+index 8233519..c6858f3 100644
+--- a/ldap/servers/slapd/dl.c
++++ b/ldap/servers/slapd/dl.c
+@@ -219,7 +219,7 @@ void *dl_delete (DataList *dl, const void *element, CMPFN cmpfn, FREEFN freefn)
+ 
+ 			if (i != dl->element_count - 1)
+ 			{
+-				memcpy (&dl->elements[i], &dl->elements[i+1], (dl->element_count - i - 1) * sizeof (void*));
++				memmove (&dl->elements[i], &dl->elements[i+1], (dl->element_count - i - 1) * sizeof (void*));
+ 			}
+ 		
+ 			dl->element_count --;
+-- 
+2.4.3
+
diff --git a/SOURCES/0080-Revert-Ticket-48338-SimplePagedResults-abandon-could.patch b/SOURCES/0080-Revert-Ticket-48338-SimplePagedResults-abandon-could.patch
new file mode 100644
index 0000000..661eb52
--- /dev/null
+++ b/SOURCES/0080-Revert-Ticket-48338-SimplePagedResults-abandon-could.patch
@@ -0,0 +1,41 @@
+From 45ea72050bfafa3dab744cec4338dd8ddca41a0c Mon Sep 17 00:00:00 2001
+From: Noriko Hosoi <nhosoi@redhat.com>
+Date: Thu, 7 Jan 2016 18:04:19 -0800
+Subject: [PATCH 80/81] Revert "Ticket #48338 - SimplePagedResults -- abandon
+ could happen between the abandon check and sending results"
+
+This reverts commit 79ca67d1fc5d50d8a9ae6b686b9564f3960f8592.
+
+The commit caused the bug 1296694 - ns-slapd crash in ipa context -
+c_mutex lock memory corruption and self locks
+
+(cherry picked from commit 181847863bda74c2e3d77b6a7d9278350d50d4cc)
+(cherry picked from commit c8b1817896af7db6e4fab42734b827e002a7a25b)
+---
+ ldap/servers/slapd/pblock.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
+index f2017be..bf57a33 100644
+--- a/ldap/servers/slapd/pblock.c
++++ b/ldap/servers/slapd/pblock.c
+@@ -223,12 +223,14 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			memset( value, 0, sizeof( PRNetAddr ));
+ 			break;
+ 		}
+-		/* For fields with atomic access, remove the PR_Lock(c_mutex) */
++		PR_Lock( pblock->pb_conn->c_mutex );
+ 		if ( pblock->pb_conn->cin_addr == NULL ) {
+ 			memset( value, 0, sizeof( PRNetAddr ));
+ 		} else {
+-			(*(PRNetAddr *)value) = *(pblock->pb_conn->cin_addr);
++			(*(PRNetAddr *)value) =
++			    *(pblock->pb_conn->cin_addr);
+ 		}
++		PR_Unlock( pblock->pb_conn->c_mutex );
+ 		break;
+ 	case SLAPI_CONN_SERVERNETADDR:
+ 		if (pblock->pb_conn == NULL)
+-- 
+2.4.3
+
diff --git a/SOURCES/0081-Ticket-48406-Avoid-self-deadlock-by-PR_Lock-conn-c_m.patch b/SOURCES/0081-Ticket-48406-Avoid-self-deadlock-by-PR_Lock-conn-c_m.patch
new file mode 100644
index 0000000..cb99e65
--- /dev/null
+++ b/SOURCES/0081-Ticket-48406-Avoid-self-deadlock-by-PR_Lock-conn-c_m.patch
@@ -0,0 +1,1510 @@
+From ae9df61b523152e01051afa8c115c97fe59310b5 Mon Sep 17 00:00:00 2001
+From: Noriko Hosoi <nhosoi@redhat.com>
+Date: Mon, 11 Jan 2016 15:53:28 -0800
+Subject: [PATCH 81/81] Ticket #48406 - Avoid self deadlock by
+ PR_Lock(conn->c_mutex)
+
+Description:  Fixing ticket 48338 introduced a self deadlock.
+To avoid the self deadlock, tried to remove PR_Lock(conn->c_mutex)
+which looked harmless, but it introduced a crash by memory corruption.
+
+This patch replaces PR_Lock/Unlock with PR_EnterMonitor/ExitMonitor,
+respectively.
+
+https://fedorahosted.org/389/ticket/48406
+
+Reviewed by rmeggins@redhat.com, lkrispen@redhat.com, and wibrown@redhat.com.
+Thank you, Rich, Ludwig and William!
+
+(cherry picked from commit f25f804a8bce83b3790e7045dfc03230d7ece1af)
+(cherry picked from commit 84da7d05ddc5a963b0d025df08f38a6ccd7d90d2)
+---
+ ldap/servers/slapd/abandon.c         |   4 +-
+ ldap/servers/slapd/bind.c            |   4 +-
+ ldap/servers/slapd/connection.c      |  64 +++++++++++-----------
+ ldap/servers/slapd/conntable.c       |  19 +++----
+ ldap/servers/slapd/daemon.c          |  22 ++++----
+ ldap/servers/slapd/extendop.c        |  10 ++--
+ ldap/servers/slapd/operation.c       |  18 +++----
+ ldap/servers/slapd/opshared.c        |  18 +++----
+ ldap/servers/slapd/pagedresults.c    | 100 +++++++++++++++++------------------
+ ldap/servers/slapd/pblock.c          |  72 ++++++++++++-------------
+ ldap/servers/slapd/psearch.c         |  10 ++--
+ ldap/servers/slapd/saslbind.c        |  26 ++++-----
+ ldap/servers/slapd/slap.h            |   2 +-
+ ldap/servers/slapd/start_tls_extop.c |  10 ++--
+ ldap/servers/slapd/unbind.c          |   4 +-
+ 15 files changed, 192 insertions(+), 191 deletions(-)
+
+diff --git a/ldap/servers/slapd/abandon.c b/ldap/servers/slapd/abandon.c
+index 761b895..9a39f6a 100644
+--- a/ldap/servers/slapd/abandon.c
++++ b/ldap/servers/slapd/abandon.c
+@@ -77,7 +77,7 @@ do_abandon( Slapi_PBlock *pb )
+ 	 * flag and abort the operation at a convenient time.
+ 	 */
+ 
+-	PR_Lock( pb->pb_conn->c_mutex );
++	PR_EnterMonitor(pb->pb_conn->c_mutex);
+ 	for ( o = pb->pb_conn->c_ops; o != NULL; o = o->o_next ) {
+ 		if ( o->o_msgid == id && o != pb->pb_op)
+ 			break;
+@@ -138,7 +138,7 @@ do_abandon( Slapi_PBlock *pb )
+ 			o->o_results.r.r_search.nentries, current_time() - o->o_time );
+ 	}
+ 
+-	PR_Unlock( pb->pb_conn->c_mutex );
++	PR_ExitMonitor(pb->pb_conn->c_mutex);
+ 	/*
+ 	 * Wake up the persistent searches, so they
+ 	 * can notice if they've been abandoned.
+diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
+index 474b508..f81edfb 100644
+--- a/ldap/servers/slapd/bind.c
++++ b/ldap/servers/slapd/bind.c
+@@ -258,7 +258,7 @@ do_bind( Slapi_PBlock *pb )
+         slapi_pblock_get (pb, SLAPI_PWPOLICY, &pw_response_requested);
+     }
+ 
+-    PR_Lock( pb->pb_conn->c_mutex );
++    PR_EnterMonitor(pb->pb_conn->c_mutex);
+ 
+     bind_credentials_clear( pb->pb_conn, PR_FALSE, /* do not lock conn */
+                             PR_FALSE /* do not clear external creds. */ );
+@@ -291,7 +291,7 @@ do_bind( Slapi_PBlock *pb )
+      * bound user can work properly
+      */
+     pb->pb_conn->c_needpw = 0;
+-    PR_Unlock( pb->pb_conn->c_mutex );
++    PR_ExitMonitor(pb->pb_conn->c_mutex);
+ 
+     log_bind_access(pb, dn?dn:"empty", method, version, saslmech, NULL);
+ 
+diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
+index fc3b741..a3d123e 100644
+--- a/ldap/servers/slapd/connection.c
++++ b/ldap/servers/slapd/connection.c
+@@ -147,7 +147,7 @@ connection_done(Connection *conn)
+ 	}
+ 	if (NULL != conn->c_mutex)
+ 	{
+-		PR_DestroyLock(conn->c_mutex);
++		PR_DestroyMonitor(conn->c_mutex);
+ 	}
+ 	if (NULL != conn->c_pdumutex)
+ 	{
+@@ -738,10 +738,10 @@ int connection_is_free (Connection *conn)
+ {
+     int rc;
+ 
+-    PR_Lock(conn->c_mutex);
++    PR_EnterMonitor(conn->c_mutex);
+     rc = conn->c_sd == SLAPD_INVALID_SOCKET && conn->c_refcnt == 0 &&
+          !(conn->c_flags & CONN_FLAG_CLOSING);
+-    PR_Unlock(conn->c_mutex);
++    PR_ExitMonitor(conn->c_mutex);
+ 
+     return rc;
+ }
+@@ -1128,7 +1128,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i
+ 	PRInt32 syserr = 0;
+ 	size_t buffer_data_avail;
+ 
+-	PR_Lock(conn->c_mutex);
++	PR_EnterMonitor(conn->c_mutex);
+ 	/*
+ 	 * if the socket is still valid, get the ber element
+ 	 * waiting for us on this connection. timeout is handled
+@@ -1317,15 +1317,15 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i
+ 	}
+ 	op->o_tag = *tag;
+ done:
+-	PR_Unlock(conn->c_mutex);
++	PR_ExitMonitor(conn->c_mutex);
+ 	return ret;
+ }
+ 
+ void connection_make_readable(Connection *conn)
+ {
+-	PR_Lock( conn->c_mutex );
++	PR_EnterMonitor(conn->c_mutex);
+ 	conn->c_gettingber = 0;
+-	PR_Unlock( conn->c_mutex );
++	PR_ExitMonitor(conn->c_mutex);
+ 	signal_listner();
+ }
+ 
+@@ -1347,7 +1347,7 @@ void connection_check_activity_level(Connection *conn)
+ {
+ 	int current_count = 0;
+ 	int delta_count = 0;
+-	PR_Lock( conn->c_mutex );
++	PR_EnterMonitor(conn->c_mutex);
+ 	/* get the current op count */
+ 	current_count = conn->c_opscompleted;
+ 	/* compare to the previous op count */
+@@ -1358,7 +1358,7 @@ void connection_check_activity_level(Connection *conn)
+ 	conn->c_private->previous_op_count = current_count;
+ 	/* update the last checked time */
+ 	conn->c_private->previous_count_check_time = current_time();
+-	PR_Unlock( conn->c_mutex );
++	PR_ExitMonitor(conn->c_mutex);
+ 	LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " activity level = %d\n",conn->c_connid,delta_count,0); 
+ }
+ 
+@@ -1402,7 +1402,7 @@ void connection_enter_leave_turbo(Connection *conn, int current_turbo_flag, int
+ 	int connection_count = 0;
+ 	int our_rank = 0;
+ 	int threshold_rank = 0;
+-	PR_Lock(conn->c_mutex);
++	PR_EnterMonitor(conn->c_mutex);
+ 	/* We can already be in turbo mode, or not */
+ 	current_mode = current_turbo_flag;
+ 	if (pagedresults_in_use_nolock(conn)) {
+@@ -1458,7 +1458,7 @@ void connection_enter_leave_turbo(Connection *conn, int current_turbo_flag, int
+ 		}
+ 	  }
+ 	}
+-	PR_Unlock(conn->c_mutex);
++	PR_ExitMonitor(conn->c_mutex);
+ 	if (current_mode != new_mode) {
+ 		if (current_mode) {
+ 			LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " leaving turbo mode\n",conn->c_connid,0,0); 
+@@ -1564,13 +1564,13 @@ connection_threadmain()
+ 			*/
+ 			PR_Sleep(PR_INTERVAL_NO_WAIT);
+ 
+-			PR_Lock(conn->c_mutex);
++			PR_EnterMonitor(conn->c_mutex);
+ 			/* Make our own pb in turbo mode */
+ 			connection_make_new_pb(pb,conn);
+ 			if (connection_call_io_layer_callbacks(conn)) {
+ 				LDAPDebug0Args( LDAP_DEBUG_ANY, "Error: could not add/remove IO layers from connection\n" );
+ 			}
+-			PR_Unlock(conn->c_mutex);
++			PR_ExitMonitor(conn->c_mutex);
+ 			if (! config_check_referral_mode()) {
+ 			  slapi_counter_increment(ops_initiated);
+ 			  slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsInOps); 
+@@ -1685,7 +1685,7 @@ connection_threadmain()
+  */
+ 			} else if (!enable_nunc_stans) { /* more data in conn - just put back on work_q - bypass poll */
+ 				bypasspollcnt++;
+-				PR_Lock(conn->c_mutex);
++				PR_EnterMonitor(conn->c_mutex);
+ 				/* don't do this if it would put us over the max threads per conn */
+ 				if (conn->c_threadnumber < maxthreads) {
+ 					/* for turbo, c_idlesince is set above - for !turbo and
+@@ -1700,7 +1700,7 @@ connection_threadmain()
+ 					/* keep count of how many times maxthreads has blocked an operation */
+ 					conn->c_maxthreadsblocked++;
+ 				}
+-				PR_Unlock(conn->c_mutex);
++				PR_ExitMonitor(conn->c_mutex);
+ 			}
+ 		}
+ 
+@@ -1736,14 +1736,14 @@ connection_threadmain()
+ 
+ done:	
+ 		if (doshutdown) {
+-			PR_Lock(conn->c_mutex);
++			PR_EnterMonitor(conn->c_mutex);
+ 			connection_remove_operation_ext(pb, conn, op);
+ 			connection_make_readable_nolock(conn);
+ 			conn->c_threadnumber--;
+ 			slapi_counter_decrement(conns_in_maxthreads);
+ 			slapi_counter_decrement(g_get_global_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads);
+ 			connection_release_nolock(conn);
+-			PR_Unlock(conn->c_mutex);
++			PR_ExitMonitor(conn->c_mutex);
+ 			signal_listner();
+ 			return;
+ 		}
+@@ -1760,9 +1760,9 @@ done:
+ 		slapi_counter_increment(ops_completed);
+ 		/* If this op isn't a persistent search, remove it */
+ 		if ( pb->pb_op->o_flags & OP_FLAG_PS ) {
+-			    PR_Lock( conn->c_mutex );
++			    PR_EnterMonitor(conn->c_mutex);
+ 			    connection_release_nolock (conn); /* psearch acquires ref to conn - release this one now */
+-			    PR_Unlock( conn->c_mutex );
++			    PR_ExitMonitor(conn->c_mutex);
+ 			    /* ps_add makes a shallow copy of the pb - so we
+ 			     * can't free it or init it here - just memset it to 0
+ 			     * ps_send_results will call connection_remove_operation_ext to free it
+@@ -1770,7 +1770,7 @@ done:
+ 			    memset(pb, 0, sizeof(*pb));
+ 		} else {
+ 			/* delete from connection operation queue & decr refcnt */
+-			PR_Lock( conn->c_mutex );
++			PR_EnterMonitor(conn->c_mutex);
+ 			connection_remove_operation_ext( pb, conn, op );
+ 
+ 			/* If we're in turbo mode, we keep our reference to the connection alive */
+@@ -1811,7 +1811,7 @@ done:
+ 					signal_listner();
+ 				}
+ 			}
+-			PR_Unlock( conn->c_mutex );
++			PR_ExitMonitor(conn->c_mutex);
+ 		}
+ 	} /* while (1) */
+ }
+@@ -2071,16 +2071,16 @@ op_copy_identity(Connection *conn, Operation *op)
+     size_t dnlen;
+     size_t typelen;
+ 
+-	PR_Lock( conn->c_mutex );
+-	dnlen= conn->c_dn ? strlen (conn->c_dn) : 0;
+-	typelen= conn->c_authtype ? strlen (conn->c_authtype) : 0;
++    PR_EnterMonitor(conn->c_mutex);
++    dnlen= conn->c_dn ? strlen (conn->c_dn) : 0;
++    typelen= conn->c_authtype ? strlen (conn->c_authtype) : 0;
+ 
+-	slapi_sdn_done(&op->o_sdn);
+-	slapi_ch_free_string(&(op->o_authtype));
++    slapi_sdn_done(&op->o_sdn);
++    slapi_ch_free_string(&(op->o_authtype));
+     if (dnlen <= 0 && typelen <= 0) {
+         op->o_authtype = NULL;
+     } else {
+-	    slapi_sdn_set_dn_byval(&op->o_sdn,conn->c_dn);
++        slapi_sdn_set_dn_byval(&op->o_sdn,conn->c_dn);
+         op->o_authtype = slapi_ch_strdup(conn->c_authtype);
+         /* set the thread data bind dn index */
+         slapi_td_set_dn(slapi_ch_strdup(conn->c_dn));
+@@ -2103,14 +2103,14 @@ op_copy_identity(Connection *conn, Operation *op)
+         op->o_ssf = conn->c_local_ssf;
+     }
+ 
+-    PR_Unlock( conn->c_mutex );
++    PR_ExitMonitor(conn->c_mutex);
+ }
+ 
+ /* Sets the SSL SSF in the connection struct. */
+ static void
+ connection_set_ssl_ssf(Connection *conn)
+ {
+-	PR_Lock( conn->c_mutex );
++	PR_EnterMonitor(conn->c_mutex);
+ 
+ 	if (conn->c_flags & CONN_FLAG_SSL) {
+ 		SSL_SecurityStatus(conn->c_prfd, NULL, NULL, NULL, &(conn->c_ssl_ssf), NULL, NULL);
+@@ -2118,7 +2118,7 @@ connection_set_ssl_ssf(Connection *conn)
+ 		conn->c_ssl_ssf = 0;
+ 	}
+ 
+-	PR_Unlock( conn->c_mutex );
++	PR_ExitMonitor(conn->c_mutex);
+ }
+ 
+ static int
+@@ -2165,9 +2165,9 @@ log_ber_too_big_error(const Connection *conn, ber_len_t ber_len,
+ void
+ disconnect_server( Connection *conn, PRUint64 opconnid, int opid, PRErrorCode reason, PRInt32 error )
+ {
+-	PR_Lock( conn->c_mutex );
++	PR_EnterMonitor(conn->c_mutex);
+ 	disconnect_server_nomutex( conn, opconnid, opid, reason, error );
+-	PR_Unlock( conn->c_mutex );
++	PR_ExitMonitor(conn->c_mutex);
+ }
+ 
+ static ps_wakeup_all_fn_ptr ps_wakeup_all_fn = NULL;
+diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
+index d5b9058..37da9a5 100644
+--- a/ldap/servers/slapd/conntable.c
++++ b/ldap/servers/slapd/conntable.c
+@@ -85,11 +85,11 @@ connection_table_abandon_all_operations(Connection_Table *ct)
+ 	int	i;
+ 	for ( i = 0; i < ct->size; i++ )
+ 	{
+-		if ( ct->c[i].c_mutex != NULL )
++		if ( ct->c[i].c_mutex )
+ 		{
+-			PR_Lock( ct->c[i].c_mutex );
++			PR_EnterMonitor(ct->c[i].c_mutex);
+ 			connection_abandon_operations( &ct->c[i] );
+-			PR_Unlock( ct->c[i].c_mutex );
++			PR_ExitMonitor(ct->c[i].c_mutex);
+ 		}
+ 	}
+ }
+@@ -139,7 +139,7 @@ connection_table_get_connection(Connection_Table *ct, int sd)
+ 		if ( c->c_mutex == NULL )
+ 		{
+ 			PR_Lock( ct->table_mutex );
+-			c->c_mutex = PR_NewLock();
++			c->c_mutex = PR_NewMonitor();
+ 			c->c_pdumutex = PR_NewLock();
+ 			PR_Unlock( ct->table_mutex );
+ 			if ( c->c_mutex == NULL || c->c_pdumutex == NULL )
+@@ -360,7 +360,7 @@ connection_table_as_entry(Connection_Table *ct, Slapi_Entry *e)
+ 		/* Can't take c_mutex if holding table_mutex; temporarily unlock */ 
+ 		PR_Unlock( ct->table_mutex );
+ 
+-		PR_Lock( ct->c[i].c_mutex );
++		PR_EnterMonitor(ct->c[i].c_mutex);
+ 		if ( ct->c[i].c_sd != SLAPD_INVALID_SOCKET )
+ 		{
+ 			char buf2[20];
+@@ -420,7 +420,7 @@ connection_table_as_entry(Connection_Table *ct, Slapi_Entry *e)
+ 			attrlist_merge( &e->e_attrs, "connection", vals );
+ 			slapi_ch_free_string(&newbuf);
+ 		}
+-		PR_Unlock( ct->c[i].c_mutex );
++		PR_ExitMonitor(ct->c[i].c_mutex);
+ 	}
+ 
+ 	PR_snprintf( buf, sizeof(buf), "%d", nconns );
+@@ -458,14 +458,15 @@ void
+ connection_table_dump_activity_to_errors_log(Connection_Table *ct)
+ {
+ 	int i;
++
+ 	for ( i = 0; i < ct->size; i++ )
+ 	{
+ 		Connection *c= &(ct->c[i]);
+-		if ( c->c_mutex != NULL )
++		if ( c->c_mutex )
+ 		{
+ 			/* Find the connection we are referring to */
+ 			int j= c->c_fdi;
+-			PR_Lock( c->c_mutex );
++			PR_EnterMonitor(c->c_mutex);
+ 			if ( (c->c_sd != SLAPD_INVALID_SOCKET) && 
+ 			     (j >= 0) && (c->c_prfd == ct->fd[j].fd) )
+ 			{
+@@ -475,7 +476,7 @@ connection_table_dump_activity_to_errors_log(Connection_Table *ct)
+ 					LDAPDebug( LDAP_DEBUG_CONNS,"activity on %d%s\n", i, r ? "r" : "",0 );
+ 				}
+ 			}
+-			PR_Unlock( c->c_mutex );
++			PR_ExitMonitor(c->c_mutex);
+ 		}
+ 	}
+ }
+diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
+index 5d70647..355f0fc 100644
+--- a/ldap/servers/slapd/daemon.c
++++ b/ldap/servers/slapd/daemon.c
+@@ -1612,7 +1612,7 @@ setup_pr_read_pds(Connection_Table *ct, PRFileDesc **n_tcps, PRFileDesc **s_tcps
+ 		}
+ 		else
+ 		{
+-			PR_Lock( c->c_mutex );
++			PR_EnterMonitor(c->c_mutex);
+ 			if (c->c_flags & CONN_FLAG_CLOSING)
+ 			{
+ 				/* A worker thread has marked that this connection
+@@ -1661,7 +1661,7 @@ setup_pr_read_pds(Connection_Table *ct, PRFileDesc **n_tcps, PRFileDesc **s_tcps
+ 					c->c_fdi = SLAPD_INVALID_SOCKET_INDEX;
+ 				}
+ 			}
+-			PR_Unlock( c->c_mutex );
++			PR_ExitMonitor(c->c_mutex);
+ 		}
+ 		c = next;
+ 	}
+@@ -1680,7 +1680,7 @@ handle_timeout( void )
+ 	time_t curtime = current_time();
+ 
+ 	if (0 == prevtime) {
+-		prevtime = time (&housekeeping_fire_time);		
++		prevtime = time (&housekeeping_fire_time);
+ 	}
+ 
+ 	if ( difftime(curtime, prevtime) >= 
+@@ -1740,7 +1740,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll)
+ 	{
+ 		if ( c->c_mutex != NULL )
+ 		{
+-			PR_Lock( c->c_mutex );
++			PR_EnterMonitor(c->c_mutex);
+ 			if ( connection_is_active_nolock (c) && c->c_gettingber == 0 )
+ 			{
+ 			    PRInt16 out_flags;
+@@ -1797,7 +1797,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll)
+ 								   SLAPD_DISCONNECT_IDLE_TIMEOUT, EAGAIN );
+ 				}
+ 			}
+-			PR_Unlock( c->c_mutex );
++			PR_ExitMonitor(c->c_mutex);
+ 		}
+ 	}
+ }
+@@ -1843,12 +1843,12 @@ ns_handle_closure(struct ns_job_t *job)
+ 		return;
+ 	}
+ #endif
+-	PR_Lock(c->c_mutex);
++	PR_EnterMonitor(c->c_mutex);
+ 	connection_release_nolock_ext(c, 1); /* release ref acquired for event framework */
+ 	PR_ASSERT(c->c_ns_close_jobs == 1); /* should be exactly 1 active close job - this one */
+ 	c->c_ns_close_jobs--; /* this job is processing closure */
+ 	do_yield = ns_handle_closure_nomutex(c);
+-	PR_Unlock(c->c_mutex);
++	PR_ExitMonitor(c->c_mutex);
+ 	ns_job_done(job);
+ 	if (do_yield) {
+ 		/* closure not done - another reference still outstanding */
+@@ -1939,7 +1939,7 @@ ns_handle_pr_read_ready(struct ns_job_t *job)
+ 	}
+ #endif
+ 
+-	PR_Lock(c->c_mutex);
++	PR_EnterMonitor(c->c_mutex);
+ 	LDAPDebug2Args(LDAP_DEBUG_CONNS, "activity on conn %" NSPRIu64 " for fd=%d\n",
+ 		       c->c_connid, c->c_sd);
+ 	/* if we were called due to some i/o event, see what the state of the socket is */
+@@ -1986,7 +1986,7 @@ ns_handle_pr_read_ready(struct ns_job_t *job)
+ 		LDAPDebug2Args(LDAP_DEBUG_CONNS, "queued conn %" NSPRIu64 " for fd=%d\n",
+ 			       c->c_connid, c->c_sd);
+ 	}
+-	PR_Unlock(c->c_mutex);
++	PR_ExitMonitor(c->c_mutex);
+ 	ns_job_done(job);
+ 	return;
+ }
+@@ -2493,7 +2493,7 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i
+ 		PR_Close(pr_acceptfd);
+ 		return -1;
+ 	}
+-	PR_Lock( conn->c_mutex );
++	PR_EnterMonitor(conn->c_mutex);
+ 
+ 	/*
+ 	 * Set the default idletimeout and the handle.  We'll update c_idletimeout
+@@ -2592,7 +2592,7 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i
+ 		connection_table_move_connection_on_to_active_list(the_connection_table,conn);
+ 	}
+ 
+-	PR_Unlock( conn->c_mutex );
++	PR_ExitMonitor(conn->c_mutex);
+ 
+ 	g_increment_current_conn_count();
+ 
+diff --git a/ldap/servers/slapd/extendop.c b/ldap/servers/slapd/extendop.c
+index 94036c6..8d0b8fb 100644
+--- a/ldap/servers/slapd/extendop.c
++++ b/ldap/servers/slapd/extendop.c
+@@ -61,7 +61,7 @@ static void extop_handle_import_start(Slapi_PBlock *pb, char *extoid,
+         send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, NULL, 0, NULL);
+         return;
+     }
+-	suffix = slapi_sdn_get_dn(sdn);
++    suffix = slapi_sdn_get_dn(sdn);
+     /*    be = slapi_be_select(sdn); */
+     be = slapi_mapping_tree_find_backend_for_sdn(sdn);
+     if (be == NULL || be == defbackend_get_backend()) {
+@@ -135,10 +135,10 @@ static void extop_handle_import_start(Slapi_PBlock *pb, char *extoid,
+     /* okay, the import is starting now -- save the backend in the
+      * connection block & mark this connection as belonging to a bulk import
+      */
+-    PR_Lock(pb->pb_conn->c_mutex);
++    PR_EnterMonitor(pb->pb_conn->c_mutex);
+     pb->pb_conn->c_flags |= CONN_FLAG_IMPORT;
+     pb->pb_conn->c_bi_backend = be;
+-    PR_Unlock(pb->pb_conn->c_mutex);
++    PR_ExitMonitor(pb->pb_conn->c_mutex);
+ 
+     slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, EXTOP_BULK_IMPORT_START_OID);
+     bv.bv_val = NULL;
+@@ -160,11 +160,11 @@ static void extop_handle_import_done(Slapi_PBlock *pb, char *extoid,
+     struct berval bv;
+     int ret;
+ 
+-    PR_Lock(pb->pb_conn->c_mutex);
++    PR_EnterMonitor(pb->pb_conn->c_mutex);
+     pb->pb_conn->c_flags &= ~CONN_FLAG_IMPORT;
+     be = pb->pb_conn->c_bi_backend;
+     pb->pb_conn->c_bi_backend = NULL;
+-    PR_Unlock(pb->pb_conn->c_mutex);
++    PR_ExitMonitor(pb->pb_conn->c_mutex);
+ 
+     if ((be == NULL) || (be->be_wire_import == NULL)) {
+         /* can this even happen? */
+diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
+index 869298b..6c95552 100644
+--- a/ldap/servers/slapd/operation.c
++++ b/ldap/servers/slapd/operation.c
+@@ -570,7 +570,7 @@ int slapi_connection_acquire(Slapi_Connection *conn)
+ {
+     int rc;
+ 
+-    PR_Lock(conn->c_mutex);
++    PR_EnterMonitor(conn->c_mutex);
+     /* rc = connection_acquire_nolock(conn); */
+     /* connection in the closing state can't be acquired */
+     if (conn->c_flags & CONN_FLAG_CLOSING)
+@@ -586,7 +586,7 @@ int slapi_connection_acquire(Slapi_Connection *conn)
+         conn->c_refcnt++;
+         rc = 0;
+     }
+-    PR_Unlock(conn->c_mutex);
++    PR_ExitMonitor(conn->c_mutex);
+     return(rc);
+ }
+ 
+@@ -596,7 +596,7 @@ slapi_connection_remove_operation( Slapi_PBlock *pb, Slapi_Connection *conn, Sla
+ 	int rc = 0;
+ 	Slapi_Operation **olist= &conn->c_ops;
+ 	Slapi_Operation **tmp;
+-	PR_Lock( conn->c_mutex );
++	PR_EnterMonitor(conn->c_mutex);
+ 	/* connection_remove_operation_ext(pb, conn,op); */
+ 	for ( tmp = olist; *tmp != NULL && *tmp != op; tmp = &(*tmp)->o_next )
+ 		;	/* NULL */
+@@ -614,15 +614,15 @@ slapi_connection_remove_operation( Slapi_PBlock *pb, Slapi_Connection *conn, Sla
+ 	if (release) {
+ 		/* connection_release_nolock(conn); */
+ 		if (conn->c_refcnt <= 0) {
+-        		slapi_log_error(SLAPI_LOG_FATAL, "connection",
+-		                "conn=%" NSPRIu64 " fd=%d Attempt to release connection that is not acquired\n",
+-		                conn->c_connid, conn->c_sd);
+-        		rc = -1;
++			slapi_log_error(SLAPI_LOG_FATAL, "connection",
++			                "conn=%" NSPRIu64 " fd=%d Attempt to release connection that is not acquired\n",
++			                conn->c_connid, conn->c_sd);
++			rc = -1;
+ 		} else {
+-        		conn->c_refcnt--;
++			conn->c_refcnt--;
+ 			rc = 0;
+ 		}
+ 	}
+-	PR_Unlock( conn->c_mutex );
++	PR_ExitMonitor(conn->c_mutex);
+ 	return (rc);
+ }
+diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
+index 5cafc3c..e76ca0f 100644
+--- a/ldap/servers/slapd/opshared.c
++++ b/ldap/servers/slapd/opshared.c
+@@ -675,7 +675,7 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
+        * In async paged result case, the search result might be released
+        * by other theads.  We need to double check it in the locked region.
+        */
+-      PR_Lock(pb->pb_conn->c_mutex);
++      PR_EnterMonitor(pb->pb_conn->c_mutex);
+       pr_search_result = pagedresults_get_search_result(pb->pb_conn, operation, 1/*locked*/, pr_idx);
+       if (pr_search_result) {
+         if (pagedresults_is_abandoned_or_notavailable(pb->pb_conn, 1/*locked*/, pr_idx)) {
+@@ -683,7 +683,7 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
+           /* Previous operation was abandoned and the simplepaged object is not in use. */
+           send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
+           rc = LDAP_SUCCESS;
+-          PR_Unlock(pb->pb_conn->c_mutex);
++          PR_ExitMonitor(pb->pb_conn->c_mutex);
+           goto free_and_return;
+         } else {
+           slapi_pblock_set( pb, SLAPI_SEARCH_RESULT_SET, pr_search_result );
+@@ -692,7 +692,7 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
+           /* search result could be reset in the backend/dse */
+           slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
+           pagedresults_set_search_result(pb->pb_conn, operation, sr, 1/*locked*/, pr_idx);
+-          PR_Unlock(pb->pb_conn->c_mutex);
++          PR_ExitMonitor(pb->pb_conn->c_mutex);
+         }
+       } else {
+         pr_stat = PAGEDRESULTS_SEARCH_END;
+@@ -826,10 +826,10 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
+         /* PAGED RESULTS */
+         if (op_is_pagedresults(operation)) {
+             /* cleanup the slot */
+-            PR_Lock(pb->pb_conn->c_mutex);
++            PR_EnterMonitor(pb->pb_conn->c_mutex);
+             pagedresults_set_search_result(pb->pb_conn, operation, NULL, 1, pr_idx);
+             rc = pagedresults_set_current_be(pb->pb_conn, NULL, pr_idx, 1);
+-            PR_Unlock(pb->pb_conn->c_mutex);
++            PR_ExitMonitor(pb->pb_conn->c_mutex);
+         }
+         if (1 == flag_no_such_object) {
+             break;
+@@ -871,11 +871,11 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
+             slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
+             if (PAGEDRESULTS_SEARCH_END == pr_stat) {
+               /* no more entries, but at least another backend */
+-              PR_Lock(pb->pb_conn->c_mutex);
++              PR_EnterMonitor(pb->pb_conn->c_mutex);
+               pagedresults_set_search_result(pb->pb_conn, operation, NULL, 1, pr_idx);
+               be->be_search_results_release(&sr);
+               rc = pagedresults_set_current_be(pb->pb_conn, next_be, pr_idx, 1);
+-              PR_Unlock(pb->pb_conn->c_mutex);
++              PR_ExitMonitor(pb->pb_conn->c_mutex);
+               if (NULL == next_be) {
+                   /* no more entries && no more backends */
+                   curr_search_count = -1;
+@@ -900,9 +900,9 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
+             next_be = NULL; /* to break the loop */
+             if (operation->o_status & SLAPI_OP_STATUS_ABANDONED) {
+                 /* It turned out this search was abandoned. */
+-                PR_Lock(pb->pb_conn->c_mutex);
++                PR_EnterMonitor(pb->pb_conn->c_mutex);
+                 pagedresults_free_one_msgid_nolock( pb->pb_conn, operation->o_msgid);
+-                PR_Unlock(pb->pb_conn->c_mutex);
++                PR_ExitMonitor(pb->pb_conn->c_mutex);
+                 /* paged-results-request was abandoned; making an empty cookie. */
+                 pagedresults_set_response_control(pb, 0, estimate, -1, pr_idx);
+                 send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
+diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
+index 4458cfb..d394dab 100644
+--- a/ldap/servers/slapd/pagedresults.c
++++ b/ldap/servers/slapd/pagedresults.c
+@@ -98,7 +98,7 @@ pagedresults_parse_control_value( Slapi_PBlock *pb,
+         return LDAP_UNWILLING_TO_PERFORM;
+     }
+ 
+-    PR_Lock(conn->c_mutex);
++    PR_EnterMonitor(conn->c_mutex);
+     /* the ber encoding is no longer needed */
+     ber_free(ber, 1);
+     if ( cookie.bv_len <= 0 ) {
+@@ -204,7 +204,7 @@ bail:
+             }
+         }
+     }
+-    PR_Unlock(conn->c_mutex);
++    PR_ExitMonitor(conn->c_mutex);
+ 
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_parse_control_value: idx %d\n", *index);
+@@ -301,7 +301,7 @@ pagedresults_free_one( Connection *conn, Operation *op, int index )
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_free_one: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (conn->c_pagedresults.prl_count <= 0) {
+             LDAPDebug2Args(LDAP_DEBUG_TRACE, "pagedresults_free_one: "
+                            "conn=%d paged requests list count is %d\n",
+@@ -312,7 +312,7 @@ pagedresults_free_one( Connection *conn, Operation *op, int index )
+             conn->c_pagedresults.prl_count--;
+             rc = 0;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+ 
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_free_one: %d\n", rc);
+@@ -364,11 +364,11 @@ pagedresults_get_current_be(Connection *conn, int index)
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_get_current_be: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             be = conn->c_pagedresults.prl_list[index].pr_current_be;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_get_current_be: %p\n", be);
+@@ -382,12 +382,12 @@ pagedresults_set_current_be(Connection *conn, Slapi_Backend *be, int index, int
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_set_current_be: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        if (!nolock) PR_Lock(conn->c_mutex);
++        if (!nolock) PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             conn->c_pagedresults.prl_list[index].pr_current_be = be;
+         }
+         rc = 0;
+-        if (!nolock) PR_Unlock(conn->c_mutex);
++        if (!nolock) PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_set_current_be: %d\n", rc);
+@@ -406,13 +406,13 @@ pagedresults_get_search_result(Connection *conn, Operation *op, int locked, int
+                    locked?"locked":"not locked", index);
+     if (conn && (index > -1)) {
+         if (!locked) {
+-            PR_Lock(conn->c_mutex);
++            PR_EnterMonitor(conn->c_mutex);
+         }
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             sr = conn->c_pagedresults.prl_list[index].pr_search_result_set;
+         }
+         if (!locked) {
+-            PR_Unlock(conn->c_mutex);
++            PR_ExitMonitor(conn->c_mutex);
+         }
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+@@ -431,7 +431,7 @@ pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int lo
+                    "--> pagedresults_set_search_result: idx=%d, sr=%p\n",
+                    index, sr);
+     if (conn && (index > -1)) {
+-        if (!locked) PR_Lock(conn->c_mutex);
++        if (!locked) PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             PagedResults *prp = conn->c_pagedresults.prl_list + index;
+             if (!(prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED) || !sr) {
+@@ -440,7 +440,7 @@ pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int lo
+             }
+             rc = 0;
+         }
+-        if (!locked) PR_Unlock(conn->c_mutex);
++        if (!locked) PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_set_search_result: %d\n", rc);
+@@ -457,11 +457,11 @@ pagedresults_get_search_result_count(Connection *conn, Operation *op, int index)
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_get_search_result_count: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             count = conn->c_pagedresults.prl_list[index].pr_search_result_count;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_get_search_result_count: %d\n", count);
+@@ -479,11 +479,11 @@ pagedresults_set_search_result_count(Connection *conn, Operation *op,
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_set_search_result_count: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             conn->c_pagedresults.prl_list[index].pr_search_result_count = count;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+         rc = 0;
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+@@ -504,11 +504,11 @@ pagedresults_get_search_result_set_size_estimate(Connection *conn,
+                   "--> pagedresults_get_search_result_set_size_estimate: "
+                   "idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             count = conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_get_search_result_set_size_estimate: %d\n",
+@@ -529,11 +529,11 @@ pagedresults_set_search_result_set_size_estimate(Connection *conn,
+                   "--> pagedresults_set_search_result_set_size_estimate: "
+                   "idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate = count;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+         rc = 0;
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+@@ -552,11 +552,11 @@ pagedresults_get_with_sort(Connection *conn, Operation *op, int index)
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_get_with_sort: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             flags = conn->c_pagedresults.prl_list[index].pr_flags&CONN_FLAG_PAGEDRESULTS_WITH_SORT;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_get_with_sort: %p\n", flags);
+@@ -574,14 +574,14 @@ pagedresults_set_with_sort(Connection *conn, Operation *op,
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_set_with_sort: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             if (flags & OP_FLAG_SERVER_SIDE_SORTING) {
+                 conn->c_pagedresults.prl_list[index].pr_flags |=
+                                                CONN_FLAG_PAGEDRESULTS_WITH_SORT;
+             }
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+         rc = 0;
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_set_with_sort: %d\n", rc);
+@@ -598,11 +598,11 @@ pagedresults_get_unindexed(Connection *conn, Operation *op, int index)
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_get_unindexed: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             flags = conn->c_pagedresults.prl_list[index].pr_flags&CONN_FLAG_PAGEDRESULTS_UNINDEXED;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_get_unindexed: %p\n", flags);
+@@ -619,12 +619,12 @@ pagedresults_set_unindexed(Connection *conn, Operation *op, int index)
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_set_unindexed: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             conn->c_pagedresults.prl_list[index].pr_flags |=
+                                                CONN_FLAG_PAGEDRESULTS_UNINDEXED;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+         rc = 0;
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+@@ -642,11 +642,11 @@ pagedresults_get_sort_result_code(Connection *conn, Operation *op, int index)
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_get_sort_result_code: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             code = conn->c_pagedresults.prl_list[index].pr_sort_result_code;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_get_sort_result_code: %d\n", code);
+@@ -664,11 +664,11 @@ pagedresults_set_sort_result_code(Connection *conn, Operation *op,
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_set_sort_result_code: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             conn->c_pagedresults.prl_list[index].pr_sort_result_code = code;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+         rc = 0;
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+@@ -687,11 +687,11 @@ pagedresults_set_timelimit(Connection *conn, Operation *op,
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_set_timelimit: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             conn->c_pagedresults.prl_list[index].pr_timelimit = timelimit;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+         rc = 0;
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_set_timelimit: %d\n", rc);
+@@ -749,7 +749,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
+     }
+ 
+     if (needlock) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+     }
+     for (i = 0; conn->c_pagedresults.prl_list &&
+                 i < conn->c_pagedresults.prl_maxlen; i++) {
+@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
+     }
+     conn->c_pagedresults.prl_count = 0;
+     if (needlock) {
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_cleanup: %d\n", rc);
+     return rc;
+@@ -794,7 +794,7 @@ pagedresults_cleanup_all(Connection *conn, int needlock)
+     }
+ 
+     if (needlock) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+     }
+     for (i = 0; conn->c_pagedresults.prl_list &&
+                 i < conn->c_pagedresults.prl_maxlen; i++) {
+@@ -813,7 +813,7 @@ pagedresults_cleanup_all(Connection *conn, int needlock)
+     conn->c_pagedresults.prl_maxlen = 0;
+     conn->c_pagedresults.prl_count = 0;
+     if (needlock) {
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_cleanup_all: %d\n", rc);
+     return rc;
+@@ -832,7 +832,7 @@ pagedresults_check_or_set_processing(Connection *conn, int index)
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_check_or_set_processing\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             ret = (conn->c_pagedresults.prl_list[index].pr_flags &
+                    CONN_FLAG_PAGEDRESULTS_PROCESSING);
+@@ -840,7 +840,7 @@ pagedresults_check_or_set_processing(Connection *conn, int index)
+             conn->c_pagedresults.prl_list[index].pr_flags |=
+                                               CONN_FLAG_PAGEDRESULTS_PROCESSING;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_check_or_set_processing: %d\n", ret);
+@@ -859,7 +859,7 @@ pagedresults_reset_processing(Connection *conn, int index)
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_reset_processing: idx=%d\n", index);
+     if (conn && (index > -1)) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             ret = (conn->c_pagedresults.prl_list[index].pr_flags &
+                    CONN_FLAG_PAGEDRESULTS_PROCESSING);
+@@ -867,7 +867,7 @@ pagedresults_reset_processing(Connection *conn, int index)
+             conn->c_pagedresults.prl_list[index].pr_flags &=
+                                              ~CONN_FLAG_PAGEDRESULTS_PROCESSING;
+         }
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_reset_processing: %d\n", ret);
+@@ -981,9 +981,9 @@ pagedresults_lock( Connection *conn, int index )
+     if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
+         return;
+     }
+-    PR_Lock(conn->c_mutex);
++    PR_EnterMonitor(conn->c_mutex);
+     prp = conn->c_pagedresults.prl_list + index;
+-    PR_Unlock(conn->c_mutex);
++    PR_ExitMonitor(conn->c_mutex);
+     if (prp->pr_mutex) {
+         PR_Lock(prp->pr_mutex);
+     }
+@@ -997,9 +997,9 @@ pagedresults_unlock( Connection *conn, int index )
+     if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
+         return;
+     }
+-    PR_Lock(conn->c_mutex);
++    PR_EnterMonitor(conn->c_mutex);
+     prp = conn->c_pagedresults.prl_list + index;
+-    PR_Unlock(conn->c_mutex);
++    PR_ExitMonitor(conn->c_mutex);
+     if (prp->pr_mutex) {
+         PR_Unlock(prp->pr_mutex);
+     }
+@@ -1014,11 +1014,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde
+         return 1; /* not abandoned, but do not want to proceed paged results op. */
+     }
+     if (!locked) {
+-        PR_Lock(conn->c_mutex);
++        PR_EnterMonitor(conn->c_mutex);
+     }
+     prp = conn->c_pagedresults.prl_list + index;
+     if (!locked) {
+-        PR_Unlock(conn->c_mutex);
++        PR_ExitMonitor(conn->c_mutex);
+     }
+     return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
+ }
+@@ -1042,12 +1042,12 @@ pagedresults_set_search_result_pb(Slapi_PBlock *pb, void *sr, int locked)
+     LDAPDebug2Args(LDAP_DEBUG_TRACE,
+                    "--> pagedresults_set_search_result_pb: idx=%d, sr=%p\n", index, sr);
+     if (conn && (index > -1)) {
+-        if (!locked) PR_Lock(conn->c_mutex);
++        if (!locked) PR_EnterMonitor(conn->c_mutex);
+         if (index < conn->c_pagedresults.prl_maxlen) {
+             conn->c_pagedresults.prl_list[index].pr_search_result_set = sr;
+             rc = 0;
+         }
+-        if (!locked) PR_Unlock(conn->c_mutex);
++        if (!locked) PR_ExitMonitor(conn->c_mutex);
+     }
+     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
+                   "<-- pagedresults_set_search_result_pb: %d\n", rc);
+diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
+index bf57a33..d373d99 100644
+--- a/ldap/servers/slapd/pblock.c
++++ b/ldap/servers/slapd/pblock.c
+@@ -117,7 +117,7 @@ if ( PBLOCK ->pb_plugin->plg_type != TYPE) return( -1 )
+ int
+ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ {
+-    char *authtype;
++	char *authtype;
+ 	Slapi_Backend		*be;
+ 
+ 	PR_ASSERT( NULL != pblock );
+@@ -174,10 +174,10 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 		          "Connection is NULL and hence cannot access SLAPI_CONN_DN \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		(*(char **)value) = (NULL == pblock->pb_conn->c_dn ? NULL :
+ 		    slapi_ch_strdup( pblock->pb_conn->c_dn ));
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_AUTHTYPE:/* deprecated */
+ 		if (pblock->pb_conn == NULL) {
+@@ -185,9 +185,9 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 		          "Connection is NULL and hence cannot access SLAPI_CONN_AUTHTYPE \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
+-                authtype = pblock->pb_conn->c_authtype;
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
++		authtype = pblock->pb_conn->c_authtype;
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+                 if (authtype == NULL) {
+                     (*(char **)value) = NULL;
+                 } else if (strcasecmp(authtype, SLAPD_AUTH_NONE) == 0) {
+@@ -212,10 +212,10 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 		          "Connection is NULL and hence cannot access SLAPI_CONN_AUTHMETHOD \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		(*(char **)value) = pblock->pb_conn->c_authtype ?
+                     slapi_ch_strdup(pblock->pb_conn->c_authtype) : NULL;
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_CLIENTNETADDR:
+ 		if (pblock->pb_conn == NULL)
+@@ -223,14 +223,14 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			memset( value, 0, sizeof( PRNetAddr ));
+ 			break;
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		if ( pblock->pb_conn->cin_addr == NULL ) {
+ 			memset( value, 0, sizeof( PRNetAddr ));
+ 		} else {
+ 			(*(PRNetAddr *)value) =
+ 			    *(pblock->pb_conn->cin_addr);
+ 		}
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_SERVERNETADDR:
+ 		if (pblock->pb_conn == NULL)
+@@ -238,14 +238,14 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			memset( value, 0, sizeof( PRNetAddr ));
+ 			break;
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		if ( pblock->pb_conn->cin_destaddr == NULL ) {
+ 			memset( value, 0, sizeof( PRNetAddr ));
+ 		} else {
+ 			(*(PRNetAddr *)value) =
+ 				*(pblock->pb_conn->cin_destaddr);
+ 		}
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_CLIENTIP:
+ 		if (pblock->pb_conn == NULL)
+@@ -253,7 +253,7 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			memset( value, 0, sizeof( struct in_addr ));
+ 			break;
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		if ( pblock->pb_conn->cin_addr == NULL ) {
+ 			memset( value, 0, sizeof( struct in_addr ));
+ 		} else {
+@@ -268,7 +268,7 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 				memset( value, 0, sizeof( struct in_addr ));
+ 			}
+ 		}
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_SERVERIP:
+ 		if (pblock->pb_conn == NULL)
+@@ -276,7 +276,7 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			memset( value, 0, sizeof( struct in_addr ));
+ 			break;
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		if ( pblock->pb_conn->cin_destaddr == NULL ) {
+ 			memset( value, 0, sizeof( PRNetAddr ));
+ 		} else {
+@@ -292,7 +292,7 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			}
+ 
+ 		}
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_IS_REPLICATION_SESSION:
+ 		if (pblock->pb_conn == NULL) {
+@@ -300,9 +300,9 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 		          "Connection is NULL and hence cannot access SLAPI_CONN_IS_REPLICATION_SESSION \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		(*(int *)value) = pblock->pb_conn->c_isreplication_session;
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_IS_SSL_SESSION:
+ 		if (pblock->pb_conn == NULL) {
+@@ -310,9 +310,9 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 		          "Connection is NULL and hence cannot access SLAPI_CONN_IS_SSL_SESSION \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		(*(int *)value) = pblock->pb_conn->c_flags & CONN_FLAG_SSL;
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_SASL_SSF:
+ 		if (pblock->pb_conn == NULL) {
+@@ -320,9 +320,9 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			  "Connection is NULL and hence cannot access SLAPI_CONN_SASL_SSF \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		(*(int *)value) = pblock->pb_conn->c_sasl_ssf;
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_SSL_SSF:
+ 		if (pblock->pb_conn == NULL) {
+@@ -330,9 +330,9 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			  "Connection is NULL and hence cannot access SLAPI_CONN_SSL_SSF \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		(*(int *)value) = pblock->pb_conn->c_ssl_ssf;
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_LOCAL_SSF:
+ 		if (pblock->pb_conn == NULL) {
+@@ -340,9 +340,9 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ 			    "Connection is NULL and hence cannot access SLAPI_CONN_LOCAL_SSF \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		(*(int *)value) = pblock->pb_conn->c_local_ssf;
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_CERT:
+ 		if (pblock->pb_conn == NULL) {
+@@ -1953,7 +1953,7 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+ int
+ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
+ {
+-    char *authtype;
++	char *authtype;
+ 
+ 	PR_ASSERT( NULL != pblock );
+ 
+@@ -2020,10 +2020,10 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
+ 		          "Connection is NULL and hence cannot access SLAPI_CONN_AUTHMETHOD \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
+-                slapi_ch_free((void**)&pblock->pb_conn->c_authtype);
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
++		slapi_ch_free((void**)&pblock->pb_conn->c_authtype);
+ 		pblock->pb_conn->c_authtype = slapi_ch_strdup((char *) value);
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 	case SLAPI_CONN_IS_REPLICATION_SESSION:
+ 		if (pblock->pb_conn == NULL) {
+@@ -2031,9 +2031,9 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
+ 		          "Connection is NULL and hence cannot access SLAPI_CONN_IS_REPLICATION_SESSION \n", 0, 0, 0 );
+ 			return (-1);
+ 		}
+-		PR_Lock( pblock->pb_conn->c_mutex );
++		PR_EnterMonitor(pblock->pb_conn->c_mutex);
+ 		pblock->pb_conn->c_isreplication_session = *((int *) value);
+-		PR_Unlock( pblock->pb_conn->c_mutex );
++		PR_ExitMonitor(pblock->pb_conn->c_mutex);
+ 		break;
+ 
+ 	/* stuff related to config file processing */
+@@ -3571,7 +3571,7 @@ bind_credentials_clear( Connection *conn, PRBool lock_conn,
+ 		PRBool clear_externalcreds )
+ {
+     if ( lock_conn ) {
+-        PR_Lock( conn->c_mutex );
++        PR_EnterMonitor(conn->c_mutex);
+     }
+ 
+     if ( conn->c_dn != NULL ) {		/* a non-anonymous bind has occurred */
+@@ -3597,7 +3597,7 @@ bind_credentials_clear( Connection *conn, PRBool lock_conn,
+     }
+ 
+     if ( lock_conn ) {
+-        PR_Unlock( conn->c_mutex );
++        PR_ExitMonitor(conn->c_mutex);
+     }
+ 
+ }
+@@ -3653,10 +3653,10 @@ void
+ bind_credentials_set( Connection *conn, char *authtype, char *normdn,
+ 		char *extauthtype, char *externaldn, CERTCertificate *clientcert, Slapi_Entry * bind_target_entry )
+ {
+-	PR_Lock( conn->c_mutex );
++	PR_EnterMonitor(conn->c_mutex);
+ 	bind_credentials_set_nolock(conn, authtype, normdn,
+ 		extauthtype, externaldn, clientcert, bind_target_entry);
+-	PR_Unlock( conn->c_mutex );
++	PR_ExitMonitor(conn->c_mutex);
+ }
+ 
+ void
+diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c
+index c9d23cf..00f13be 100644
+--- a/ldap/servers/slapd/psearch.c
++++ b/ldap/servers/slapd/psearch.c
+@@ -277,9 +277,9 @@ ps_send_results( void *arg )
+ 
+     /* need to acquire a reference to this connection so that it will not
+        be released or cleaned up out from under us */
+-    PR_Lock( ps->ps_pblock->pb_conn->c_mutex );
++    PR_EnterMonitor(ps->ps_pblock->pb_conn->c_mutex);
+     conn_acq_flag = connection_acquire_nolock(ps->ps_pblock->pb_conn);    
+-    PR_Unlock( ps->ps_pblock->pb_conn->c_mutex );
++    PR_ExitMonitor(ps->ps_pblock->pb_conn->c_mutex);
+ 
+ 	if (conn_acq_flag) {
+ 		slapi_log_error(SLAPI_LOG_CONNS, "Persistent Search",
+@@ -397,7 +397,7 @@ ps_send_results( void *arg )
+ 
+     conn = ps->ps_pblock->pb_conn; /* save to release later - connection_remove_operation_ext will NULL the pb_conn */
+     /* Clean up the connection structure */
+-    PR_Lock( conn->c_mutex );
++    PR_EnterMonitor(conn->c_mutex);
+ 
+ 	slapi_log_error(SLAPI_LOG_CONNS, "Persistent Search",
+ 					"conn=%" NSPRIu64 " op=%d Releasing the connection and operation\n",
+@@ -407,9 +407,9 @@ ps_send_results( void *arg )
+ 
+     /* Decrement the connection refcnt */
+     if (conn_acq_flag == 0) { /* we acquired it, so release it */
+-	connection_release_nolock (conn);
++        connection_release_nolock (conn);
+     }
+-    PR_Unlock( conn->c_mutex );
++    PR_ExitMonitor(conn->c_mutex);
+     conn = NULL;
+ 
+     PR_DestroyLock ( ps->ps_lock );
+diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c
+index 1e3e94d..7259d78 100644
+--- a/ldap/servers/slapd/saslbind.c
++++ b/ldap/servers/slapd/saslbind.c
+@@ -659,7 +659,7 @@ char **ids_sasl_listmech(Slapi_PBlock *pb)
+     if (sasl_conn == NULL) return ret;
+ 
+     /* sasl library mechanisms are connection dependent */
+-    PR_Lock(pb->pb_conn->c_mutex);
++    PR_EnterMonitor(pb->pb_conn->c_mutex);
+     if (sasl_listmech(sasl_conn, 
+                       NULL,     /* username */
+                       "", ",", "",
+@@ -672,7 +672,7 @@ char **ids_sasl_listmech(Slapi_PBlock *pb)
+         charray_free(others);
+         slapi_ch_free((void**)&dupstr);
+     }
+-    PR_Unlock(pb->pb_conn->c_mutex);
++    PR_ExitMonitor(pb->pb_conn->c_mutex);
+ 
+     LDAPDebug( LDAP_DEBUG_TRACE, "<= ids_sasl_listmech\n", 0, 0, 0 );
+ 
+@@ -755,13 +755,13 @@ void ids_sasl_check_bind(Slapi_PBlock *pb)
+     PR_ASSERT(pb);
+     PR_ASSERT(pb->pb_conn);
+ 
+-    PR_Lock(pb->pb_conn->c_mutex); /* BIG LOCK */
++    PR_EnterMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+     continuing = pb->pb_conn->c_flags & CONN_FLAG_SASL_CONTINUE;
+     pb->pb_conn->c_flags &= ~CONN_FLAG_SASL_CONTINUE; /* reset flag */
+ 
+     sasl_conn = (sasl_conn_t*)pb->pb_conn->c_sasl_conn;
+     if (sasl_conn == NULL) {
+-        PR_Unlock(pb->pb_conn->c_mutex);
++        PR_ExitMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+         send_ldap_result( pb, LDAP_AUTH_METHOD_NOT_SUPPORTED, NULL,
+                           "sasl library unavailable", 0, NULL );
+         return;
+@@ -842,7 +842,7 @@ void ids_sasl_check_bind(Slapi_PBlock *pb)
+         if (sasl_conn == NULL) {
+             send_ldap_result( pb, LDAP_AUTH_METHOD_NOT_SUPPORTED, NULL,
+                           "sasl library unavailable", 0, NULL );
+-            PR_Unlock(pb->pb_conn->c_mutex); /* BIG LOCK */
++            PR_ExitMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+             return;
+         }
+     }
+@@ -858,7 +858,7 @@ sasl_check_result:
+         /* retrieve the authenticated username */
+         if (sasl_getprop(sasl_conn, SASL_USERNAME,
+                          (const void**)&username) != SASL_OK) {
+-            PR_Unlock(pb->pb_conn->c_mutex); /* BIG LOCK */
++            PR_ExitMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+             send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL,
+                              "could not obtain sasl username", 0, NULL);
+             break;
+@@ -879,7 +879,7 @@ sasl_check_result:
+             }
+         }
+         if (dn == NULL) {
+-            PR_Unlock(pb->pb_conn->c_mutex); /* BIG LOCK */
++            PR_ExitMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+             send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL,
+                              "could not get auth dn from sasl", 0, NULL);
+             break;
+@@ -920,7 +920,7 @@ sasl_check_result:
+                                     slapi_ch_strdup(normdn), 
+                                     NULL, NULL, NULL, bind_target_entry);
+ 
+-        PR_Unlock(pb->pb_conn->c_mutex); /* BIG LOCK */
++        PR_ExitMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+ 
+         if (plugin_call_plugins( pb, SLAPI_PLUGIN_PRE_BIND_FN ) != 0){
+             break;
+@@ -995,9 +995,9 @@ sasl_check_result:
+         /* see if we negotiated a security layer */
+         if (*ssfp > 0) {
+             /* Enable SASL I/O on the connection */
+-            PR_Lock(pb->pb_conn->c_mutex);
++            PR_EnterMonitor(pb->pb_conn->c_mutex);
+             connection_set_io_layer_cb(pb->pb_conn, sasl_io_enable, NULL, NULL);
+-            PR_Unlock(pb->pb_conn->c_mutex);
++            PR_ExitMonitor(pb->pb_conn->c_mutex);
+         }
+ 
+         /* send successful result */
+@@ -1010,7 +1010,7 @@ sasl_check_result:
+ 
+     case SASL_CONTINUE:         /* another step needed */
+         pb->pb_conn->c_flags |= CONN_FLAG_SASL_CONTINUE;
+-        PR_Unlock(pb->pb_conn->c_mutex); /* BIG LOCK */
++        PR_ExitMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+ 
+         if (plugin_call_plugins( pb, SLAPI_PLUGIN_PRE_BIND_FN ) != 0){
+             break;
+@@ -1032,7 +1032,7 @@ sasl_check_result:
+ 
+     case SASL_NOMECH:
+ 
+-        PR_Unlock(pb->pb_conn->c_mutex); /* BIG LOCK */
++        PR_ExitMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+         send_ldap_result(pb, LDAP_AUTH_METHOD_NOT_SUPPORTED, NULL,
+                          "sasl mechanism not supported", 0, NULL);
+         break;
+@@ -1040,7 +1040,7 @@ sasl_check_result:
+     default:                    /* other error */
+         errstr = sasl_errdetail(sasl_conn);
+ 
+-        PR_Unlock(pb->pb_conn->c_mutex); /* BIG LOCK */
++        PR_ExitMonitor(pb->pb_conn->c_mutex); /* BIG LOCK */
+         send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL,
+                          (char*)errstr, 0, NULL);
+         break;
+diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
+index 823568d..2641f76 100644
+--- a/ldap/servers/slapd/slap.h
++++ b/ldap/servers/slapd/slap.h
+@@ -1411,7 +1411,7 @@ typedef struct conn {
+ 	PRInt32			c_opscompleted;	/* # ops completed		  */
+ 	PRInt32			c_threadnumber; /* # threads used in this conn    */
+ 	int				c_refcnt;	/* # ops refering to this conn    */
+-	PRLock			*c_mutex;	/* protect each conn structure    */
++	PRMonitor		*c_mutex;	/* protect each conn structure; need to be re-entrant */ 
+ 	PRLock			*c_pdumutex;	/* only write one pdu at a time   */
+ 	time_t			c_idlesince;	/* last time of activity on conn  */
+ 	int			c_idletimeout;	/* local copy of idletimeout */
+diff --git a/ldap/servers/slapd/start_tls_extop.c b/ldap/servers/slapd/start_tls_extop.c
+index 69b8607..af8d8f7 100644
+--- a/ldap/servers/slapd/start_tls_extop.c
++++ b/ldap/servers/slapd/start_tls_extop.c
+@@ -172,7 +172,7 @@ start_tls( Slapi_PBlock *pb )
+ 	/* At least we know that the request was indeed an Start TLS one. */
+ 
+ 	conn = pb->pb_conn;
+-	PR_Lock( conn->c_mutex );
++	PR_EnterMonitor(conn->c_mutex);
+ 	/* cannot call slapi_send_ldap_result with mutex locked - will deadlock if ber_flush returns error */
+ 	if ( conn->c_prfd == (PRFileDesc *) NULL ) {
+ 		slapi_log_error( SLAPI_LOG_PLUGIN, "start_tls",
+@@ -246,10 +246,10 @@ start_tls( Slapi_PBlock *pb )
+ 	 * we send a success response back to the client. */
+ 	ldapmsg = "Start TLS request accepted.Server willing to negotiate SSL.";
+ unlock_and_return:
+-	PR_Unlock( conn->c_mutex );
++	PR_ExitMonitor(conn->c_mutex);
+ 	slapi_send_ldap_result( pb, ldaprc, NULL, ldapmsg, 0, NULL );
+ 
+-	return( SLAPI_PLUGIN_EXTENDED_SENT_RESULT );	
++	return( SLAPI_PLUGIN_EXTENDED_SENT_RESULT );
+ 
+ }/* start_tls */
+ 
+@@ -312,7 +312,7 @@ start_tls_graceful_closure( Connection *c, Slapi_PBlock * pb, int is_initiator )
+ 	   */
+ 	}
+ 
+-	PR_Lock( c->c_mutex );
++	PR_EnterMonitor(c->c_mutex);
+ 
+ 	/* "Unimport" the socket from SSL, i.e. get rid of the upper layer of the 
+ 	 * file descriptor stack, which represents SSL. 
+@@ -342,7 +342,7 @@ start_tls_graceful_closure( Connection *c, Slapi_PBlock * pb, int is_initiator )
+ 
+ 	bind_credentials_clear( c, PR_FALSE, PR_TRUE );
+ 
+-	PR_Unlock( c->c_mutex );
++	PR_ExitMonitor(c->c_mutex);
+ 
+ 	return ( SLAPI_PLUGIN_EXTENDED_SENT_RESULT );
+ }    
+diff --git a/ldap/servers/slapd/unbind.c b/ldap/servers/slapd/unbind.c
+index 9b6a70f..c0dec9d 100644
+--- a/ldap/servers/slapd/unbind.c
++++ b/ldap/servers/slapd/unbind.c
+@@ -73,9 +73,9 @@ do_unbind( Slapi_PBlock *pb )
+ 	}
+ 
+ 	/* target spec is used to decide which plugins are applicable for the operation */
+-	PR_Lock( pb->pb_conn->c_mutex );
++	PR_EnterMonitor(pb->pb_conn->c_mutex);
+ 	operation_set_target_spec_str (operation, pb->pb_conn->c_dn);
+-	PR_Unlock( pb->pb_conn->c_mutex );
++	PR_ExitMonitor(pb->pb_conn->c_mutex);
+ 
+ 	/* ONREPL - plugins should be called and passed bind dn and, possibly, other data */
+ 
+-- 
+2.4.3
+
diff --git a/SOURCES/0082-Ticket-48412-worker-threads-do-not-detect-abnormally.patch b/SOURCES/0082-Ticket-48412-worker-threads-do-not-detect-abnormally.patch
new file mode 100644
index 0000000..212d4ce
--- /dev/null
+++ b/SOURCES/0082-Ticket-48412-worker-threads-do-not-detect-abnormally.patch
@@ -0,0 +1,173 @@
+From da9f4a9942f7a41ce8d07c7a73f67a0799424266 Mon Sep 17 00:00:00 2001
+From: Mark Reynolds <mreynolds@redhat.com>
+Date: Fri, 15 Jan 2016 11:35:16 -0500
+Subject: [PATCH] Ticket 48412 - worker threads do not detect abnormally closed
+ connections
+
+Bug Description:  If a connection is abnormally closed there can still be
+                  data in the connection buffer(bytes vs offset).  This prevents
+                  the connection from being removed from the connection table.
+                  The worker thread then goes into a loop trying to read this data
+                  on an already closed connection.  If there are enough abnormally
+                  closed conenction eventually all the worker threads are stuck,
+                  and new connections are not accepted.
+
+Fix Description:  When looking if there is more data in the buffer check if the
+                  connection was closed, and return 0 (no more data).
+
+                  Also did a little code cleanup.
+
+https://fedorahosted.org/389/ticket/48412
+
+Reviewed by: rmeggins(Thanks!)
+
+(cherry picked from commit 30c4852a3d9ca527b78c0f89df5909bc9a268392)
+(cherry picked from commit cd45d032421b0ecf76d8cbb9b1c3aeef7680d9a2)
+---
+ ldap/servers/slapd/connection.c | 46 ++++++++++++++++++++++++++++-------------
+ 1 file changed, 32 insertions(+), 14 deletions(-)
+
+diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
+index a3d123e..3e435a7 100644
+--- a/ldap/servers/slapd/connection.c
++++ b/ldap/servers/slapd/connection.c
+@@ -1102,9 +1102,16 @@ connection_read_ldap_data(Connection *conn, PRInt32 *err)
+ }
+ 
+ static size_t
+-conn_buffered_data_avail_nolock(Connection *conn)
++conn_buffered_data_avail_nolock(Connection *conn, int *conn_closed)
+ {
+-	return conn->c_private->c_buffer_bytes - conn->c_private->c_buffer_offset;
++	if ( (conn->c_sd == SLAPD_INVALID_SOCKET) || (conn->c_flags & CONN_FLAG_CLOSING) ) {
++		/* connection is closed - ignore the buffer */
++		*conn_closed = 1;
++		return 0;
++	} else {
++		*conn_closed = 0;
++		return conn->c_private->c_buffer_bytes - conn->c_private->c_buffer_offset;
++	}
+ }
+ 
+ /* Upon returning from this function, we have either: 
+@@ -1127,6 +1134,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i
+ 	PRErrorCode err = 0;
+ 	PRInt32 syserr = 0;
+ 	size_t buffer_data_avail;
++	int conn_closed = 0;
+ 
+ 	PR_EnterMonitor(conn->c_mutex);
+ 	/*
+@@ -1142,7 +1150,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i
+ 	
+ 	*tag = LBER_DEFAULT;
+ 	/* First check to see if we have buffered data from "before" */
+-	if ((buffer_data_avail = conn_buffered_data_avail_nolock(conn))) {
++	if ((buffer_data_avail = conn_buffered_data_avail_nolock(conn, &conn_closed))) {
+ 		/* If so, use that data first */
+ 		if ( 0 != get_next_from_buffer( buffer
+ 				+ conn->c_private->c_buffer_offset,
+@@ -1157,7 +1165,7 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i
+ 	while (*tag == LBER_DEFAULT) {
+ 		int ioblocktimeout_waits = config_get_ioblocktimeout() / CONN_TURBO_TIMEOUT_INTERVAL;
+ 		/* We should never get here with data remaining in the buffer */
+-		PR_ASSERT( !new_operation || 0 == conn_buffered_data_avail_nolock(conn) );
++		PR_ASSERT( !new_operation || !conn_buffered_data_avail_nolock(conn, &conn_closed));
+ 		/* We make a non-blocking read call */
+ 		if (CONNECTION_BUFFER_OFF != conn->c_private->use_buffer) {
+ 			ret = connection_read_ldap_data(conn,&err);
+@@ -1269,8 +1277,12 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i
+ 		}
+ 	}
+ 	/* If there is remaining buffered data, set the flag to tell the caller */
+-	if (conn_buffered_data_avail_nolock(conn)) {
++	if (conn_buffered_data_avail_nolock(conn, &conn_closed)) {
+ 		*remaining_data = 1;
++	} else if (conn_closed){
++		/* connection closed */
++		ret = CONN_DONE;
++		goto done;
+ 	}
+ 
+ 	if ( *tag != LDAP_TAG_MESSAGE ) {
+@@ -1521,7 +1533,7 @@ connection_threadmain()
+ 					continue;
+ 				case CONN_SHUTDOWN:
+ 					LDAPDebug( LDAP_DEBUG_TRACE, 
+-					"op_thread received shutdown signal\n", 					0,  0, 0 );
++					"op_thread received shutdown signal\n", 0, 0, 0 );
+ 					g_decr_active_threadcnt();
+ 					return;
+ 				case CONN_FOUND_WORK_TO_DO:
+@@ -1542,8 +1554,9 @@ connection_threadmain()
+ 							Slapi_DN *anon_sdn = slapi_sdn_new_normdn_byref( anon_dn );
+ 							reslimit_update_from_dn( pb->pb_conn, anon_sdn );
+ 							slapi_sdn_free( &anon_sdn );
+-							if (slapi_reslimit_get_integer_limit(pb->pb_conn, pb->pb_conn->c_idletimeout_handle,
+-									&idletimeout)
++							if (slapi_reslimit_get_integer_limit(pb->pb_conn,
++							                                     pb->pb_conn->c_idletimeout_handle,
++							                                     &idletimeout)
+ 								== SLAPI_RESLIMIT_STATUS_SUCCESS)
+ 							{
+ 								pb->pb_conn->c_idletimeout = idletimeout;
+@@ -1581,7 +1594,7 @@ connection_threadmain()
+ 		op = pb->pb_op;
+ 		maxthreads = config_get_maxthreadsperconn();
+ 		more_data = 0;
+-		ret = connection_read_operation(conn,op,&tag,&more_data);
++		ret = connection_read_operation(conn, op, &tag, &more_data);
+ 		if ((ret == CONN_DONE) || (ret == CONN_TIMEDOUT)) {
+ 			slapi_log_error(SLAPI_LOG_CONNS, "connection_threadmain",
+ 					"conn %" NSPRIu64 " read not ready due to %d - thread_turbo_flag %d more_data %d "
+@@ -1614,7 +1627,8 @@ connection_threadmain()
+ 		/* turn off turbo mode immediately if any pb waiting in global queue */
+ 		if (thread_turbo_flag && !WORK_Q_EMPTY) {
+ 			thread_turbo_flag = 0;
+-			LDAPDebug2Args(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " leaving turbo mode - pb_q is not empty %d\n",conn->c_connid,work_q_size);
++			LDAPDebug2Args(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " leaving turbo mode - pb_q is not empty %d\n",
++			               conn->c_connid,work_q_size);
+ 		}
+ #endif
+ 		
+@@ -1639,7 +1653,8 @@ connection_threadmain()
+ 				 * should call connection_make_readable after the op is removed
+ 				 * connection_make_readable(conn);
+ 				 */
+-				LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " leaving turbo mode due to %d\n",conn->c_connid,ret,0);
++				LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " leaving turbo mode due to %d\n",
++				          conn->c_connid,ret,0);
+ 				goto done;
+ 			case CONN_SHUTDOWN:
+ 				LDAPDebug( LDAP_DEBUG_TRACE, 
+@@ -1695,7 +1710,8 @@ connection_threadmain()
+ 					 */
+ 					conn->c_idlesince = curtime;
+ 					connection_activity(conn, maxthreads);
+-					LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " queued because more_data\n",conn->c_connid,0,0);
++					LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " queued because more_data\n",
++					          conn->c_connid,0,0);
+ 				} else {
+ 					/* keep count of how many times maxthreads has blocked an operation */
+ 					conn->c_maxthreadsblocked++;
+@@ -1770,13 +1786,15 @@ done:
+ 			    memset(pb, 0, sizeof(*pb));
+ 		} else {
+ 			/* delete from connection operation queue & decr refcnt */
++			int conn_closed = 0;
+ 			PR_EnterMonitor(conn->c_mutex);
+ 			connection_remove_operation_ext( pb, conn, op );
+ 
+ 			/* If we're in turbo mode, we keep our reference to the connection alive */
+ 			/* can't use the more_data var because connection could have changed in another thread */
+-			more_data = conn_buffered_data_avail_nolock(conn) ? 1 : 0;
+-			LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " check more_data %d thread_turbo_flag %d\n",conn->c_connid,more_data,thread_turbo_flag);
++			more_data = conn_buffered_data_avail_nolock(conn, &conn_closed) ? 1 : 0;
++			LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " check more_data %d thread_turbo_flag %d\n",
++			          conn->c_connid,more_data,thread_turbo_flag);
+ 			if (!more_data) {
+ 				if (!thread_turbo_flag) {
+ 					/*
+-- 
+2.4.3
+
diff --git a/SOURCES/0083-Ticket-48341-deadlock-on-connection-mutex.patch b/SOURCES/0083-Ticket-48341-deadlock-on-connection-mutex.patch
new file mode 100644
index 0000000..eb0047e
--- /dev/null
+++ b/SOURCES/0083-Ticket-48341-deadlock-on-connection-mutex.patch
@@ -0,0 +1,38 @@
+From 666fdac51b94450391e8fec8d16db34db09502ae Mon Sep 17 00:00:00 2001
+From: Ludwig Krispenz <lkrispen@redhat.com>
+Date: Wed, 13 Jan 2016 13:15:53 +0100
+Subject: [PATCH] Ticket: 48341 - deadlock on connection mutex
+
+If  thread is blocked in connection_read_operation() it holds the connection mutex
+and the main thread iterating through the connection table is also blocked.
+
+But if the main thread would get the mutex it would just detect that the connection has still the
+C_gettingber flag set and immediately release the lock.
+
+The check if c_gettingber == 0 can be done without holding the mutex and so the deadlock
+can be avoided
+
+Reviewed by Rich, Thanks
+
+(cherry picked from commit a1635fc45f681ed9066f6beed9be7e1672490f9f)
+---
+ ldap/servers/slapd/daemon.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
+index 355f0fc..d25c44d 100644
+--- a/ldap/servers/slapd/daemon.c
++++ b/ldap/servers/slapd/daemon.c
+@@ -1740,6 +1740,9 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll)
+ 	{
+ 		if ( c->c_mutex != NULL )
+ 		{
++			/* this check can be done without acquiring the mutex */
++			if (c->c_gettingber) continue;
++
+ 			PR_EnterMonitor(c->c_mutex);
+ 			if ( connection_is_active_nolock (c) && c->c_gettingber == 0 )
+ 			{
+-- 
+2.4.3
+
diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec
index afb86e5..fbc9ba2 100644
--- a/SPECS/389-ds-base.spec
+++ b/SPECS/389-ds-base.spec
@@ -34,7 +34,7 @@
 Summary:          389 Directory Server (base)
 Name:             389-ds-base
 Version:          1.3.4.0
-Release:          %{?relprefix}21%{?prerel}%{?dist}
+Release:          %{?relprefix}26%{?prerel}%{?dist}
 License:          GPLv2 with exceptions
 URL:              http://port389.org/
 Group:            System Environment/Daemons
@@ -199,6 +199,13 @@ Patch72:          0073-Ticket-48325-Replica-promotion-leaves-RUV-out-of-ord.patc
 Patch73:          0074-Ticket-48344-acl-regression-trailing-comma-in-macro-.patch
 Patch74:          0075-Ticket-48339-Share-nsslapd-threadnumber-in-the-case-.patch
 Patch75:          0076-Ticket-48338-SimplePagedResults-abandon-could-happen.patch
+Patch76:          0077-Ticket-48370-The-eq-index-does-not-get-updated-prope.patch
+Patch77:          0078-Ticket-48375-SimplePagedResults-in-the-search-error-.patch
+Patch78:          0079-Ticket-48283-many-attrlist_replace-errors-in-connect.patch
+Patch79:          0080-Revert-Ticket-48338-SimplePagedResults-abandon-could.patch
+Patch80:          0081-Ticket-48406-Avoid-self-deadlock-by-PR_Lock-conn-c_m.patch
+Patch81:          0082-Ticket-48412-worker-threads-do-not-detect-abnormally.patch
+Patch82:          0083-Ticket-48341-deadlock-on-connection-mutex.patch
 
 %description
 389 Directory Server is an LDAPv3 compliant server.  The base package includes
@@ -338,6 +345,13 @@ cp %{SOURCE2} README.devel
 %patch73 -p1
 %patch74 -p1
 %patch75 -p1
+%patch76 -p1
+%patch77 -p1
+%patch78 -p1
+%patch79 -p1
+%patch80 -p1
+%patch81 -p1
+%patch82 -p1
 
 %build
 %if %{use_nunc_stans}
@@ -532,6 +546,27 @@ fi
 %endif
 
 %changelog
+* Mon Jan 25 2016 Noriko Hosoi <nhosoi@redhat.com> - 1.3.4.0-26
+- release 1.3.4.0-26
+- Resolves: bug 1299346 - deadlock on connection mutex (DS 48341)
+
+* Thu Jan 21 2016 Noriko Hosoi <nhosoi@redhat.com> - 1.3.4.0-25
+- release 1.3.4.0-25
+- Resolves: bug 1299757 - CVE-2016-0741 389-ds-base: Worker threads do not detect abnormally closed connections causing DoS
+
+* Wed Jan 13 2016 Noriko Hosoi <nhosoi@redhat.com> - 1.3.4.0-24
+- release 1.3.4.0-24
+- Resolves: bug 1298105 - 389-ds hanging after a few minutes of operation (DS 48406)
+
+* Tue Jan  5 2016 Noriko Hosoi <nhosoi@redhat.com> - 1.3.4.0-23
+- release 1.3.4.0-23
+- Resolves: bug 1295684 - many attrlist_replace errors in connection with cleanallruv (DS 48283)
+
+* Fri Dec 11 2015 Noriko Hosoi <nhosoi@redhat.com> - 1.3.4.0-22
+- release 1.3.4.0-22
+- Resolves: bug 1290725 - SimplePagedResults -- in the search error case, simple paged results slot was not released. (DS 48375)
+- Resolves: bug 1290726 - The 'eq' index does not get updated properly when deleting and re-adding attributes in the same modify operation (DS 48370)
+
 * Wed Nov 18 2015 Noriko Hosoi <nhosoi@redhat.com> - 1.3.4.0-21
 - release 1.3.4.0-21
 - Resolves: bug 1278730 - SimplePagedResults -- abandon could happen between the abandon check and sending results -- Fixing a regression introduced in 1.3.4.0-20 (DS 48338)