Blame SOURCES/0022-Ticket-50078-cannot-add-cenotaph-in-read-only-consum.patch

769039
From 7b4cb7aebdf5264e12e4ffad96fd21b3d7d2a14f Mon Sep 17 00:00:00 2001
769039
From: Ludwig Krispenz <lkrispen@redhat.com>
769039
Date: Tue, 11 Dec 2018 11:06:44 +0100
769039
Subject: [PATCH] Ticket 50078 - cannot add cenotaph in read only consumer
769039
769039
Bug: For modrdn operations a cenotaph entry is created to be used in later conflict
769039
     resolution procedures, this is done by an internal add operation and
769039
     fails on hubs and consumers
769039
769039
Fix: Add the "bypass referral" flag to the internal add operation to allow it
769039
769039
Reviewed by: Thierry, thanks
769039
---
769039
 dirsrvtests/tests/tickets/ticket50078_test.py | 68 +++++++++++++++++++
769039
 ldap/servers/plugins/replication/urp.c        |  2 +-
769039
 2 files changed, 69 insertions(+), 1 deletion(-)
769039
 create mode 100644 dirsrvtests/tests/tickets/ticket50078_test.py
769039
769039
diff --git a/dirsrvtests/tests/tickets/ticket50078_test.py b/dirsrvtests/tests/tickets/ticket50078_test.py
769039
new file mode 100644
769039
index 000000000..3f6c5ec2d
769039
--- /dev/null
769039
+++ b/dirsrvtests/tests/tickets/ticket50078_test.py
769039
@@ -0,0 +1,68 @@
769039
+import pytest
769039
+from lib389.utils import *
769039
+from lib389.topologies import topology_m1h1c1
769039
+from lib389.idm.user import UserAccounts
769039
+
769039
+from lib389._constants import (DEFAULT_SUFFIX, REPLICA_RUV_FILTER, defaultProperties,
769039
+                              REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD,
769039
+                              REPLICATION_TRANSPORT, SUFFIX, RA_NAME, RA_BINDDN, RA_BINDPW,
769039
+                              RA_METHOD, RA_TRANSPORT_PROT, SUFFIX)
769039
+
769039
+logging.getLogger(__name__).setLevel(logging.DEBUG)
769039
+log = logging.getLogger(__name__)
769039
+
769039
+TEST_USER = "test_user"
769039
+
769039
+def test_ticket50078(topology_m1h1c1):
769039
+    """
769039
+    Test that for a MODRDN operation the cenotaph entry is created on
769039
+    a hub or consumer.
769039
+    """
769039
+
769039
+    M1 = topology_m1h1c1.ms["master1"]
769039
+    H1 = topology_m1h1c1.hs["hub1"]
769039
+    C1 = topology_m1h1c1.cs["consumer1"]
769039
+    #
769039
+    # Test replication is working
769039
+    #
769039
+    if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]):
769039
+        log.info('Replication is working.')
769039
+    else:
769039
+        log.fatal('Replication is not working.')
769039
+        assert False
769039
+
769039
+    ua = UserAccounts(M1, DEFAULT_SUFFIX)
769039
+    ua.create(properties={
769039
+            'uid': "%s%d" % (TEST_USER, 1),
769039
+            'cn' : "%s%d" % (TEST_USER, 1),
769039
+            'sn' : 'user',
769039
+            'uidNumber' : '1000',
769039
+            'gidNumber' : '2000',
769039
+            'homeDirectory' : '/home/testuser'
769039
+            })
769039
+
769039
+    user = ua.get('%s1' % TEST_USER)
769039
+    log.info("  Rename the test entry %s..." % user)
769039
+    user.rename('uid=test_user_new')
769039
+
769039
+    # wait until replication is in sync
769039
+    if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]):
769039
+        log.info('Replication is working.')
769039
+    else:
769039
+        log.fatal('Replication is not working.')
769039
+        assert False
769039
+
769039
+    # check if cenotaph was created on hub and consumer
769039
+    ents = H1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))")
769039
+    assert len(ents) == 1
769039
+
769039
+    ents = C1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))")
769039
+    assert len(ents) == 1
769039
+
769039
+
769039
+
769039
+if __name__ == '__main__':
769039
+    # Run isolated
769039
+    # -s for DEBUG mode
769039
+    CURRENT_FILE = os.path.realpath(__file__)
769039
+    pytest.main("-s %s" % CURRENT_FILE)
769039
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
769039
index 11c5da7cf..37fe77379 100644
769039
--- a/ldap/servers/plugins/replication/urp.c
769039
+++ b/ldap/servers/plugins/replication/urp.c
769039
@@ -911,7 +911,7 @@ urp_fixup_add_cenotaph (Slapi_PBlock *pb, char *sessionid, CSN *opcsn)
769039
                                     cenotaph,
769039
                                     NULL,
769039
                                     repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION),
769039
-                                    OP_FLAG_REPL_FIXUP|OP_FLAG_NOOP|OP_FLAG_CENOTAPH_ENTRY);
769039
+                                    OP_FLAG_REPL_FIXUP|OP_FLAG_NOOP|OP_FLAG_CENOTAPH_ENTRY|SLAPI_OP_FLAG_BYPASS_REFERRALS);
769039
     slapi_add_internal_pb(add_pb);
769039
     slapi_pblock_get(add_pb, SLAPI_PLUGIN_INTOP_RESULT, &ret);
769039
 
769039
-- 
769039
2.17.2
769039