andykimpe / rpms / 389-ds-base

Forked from rpms/389-ds-base 4 months ago
Clone
Blob Blame History Raw
From 3e7c37a2d605faf7e2f4c5b8ef39309e1b231de1 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 7 Jan 2015 08:59:06 -0500
Subject: [PATCH 291/305] Ticket 47981 - COS cache doesn't properly mark vattr
 cache as  invalid when there are multiple suffixes

Bug Description:  When rebuilding the COS cache, we check each suffix for COS entries.
                  If the last suffix checked does not contain any COS entries, then the
                  virtual attribute cache is incorrectly not invalidated.  This allows
                  for already cached entries to hold onto the old COS attributes/values.

Fix Description:  Only set the vattr_cacheable flag if a suffix contains COS entries, not
                  if it does not - by default the flag is not set.

https://fedorahosted.org/389/ticket/47981

Reviewed by: nhosoi(Thanks!)

(cherry picked from commit 42e2df3858a4e14706d57b5c907d1d3768f4d970)
(cherry picked from commit c1ba7eb8c41c44bd7689bfc2526a2aa7fff47284)
---
 dirsrvtests/tickets/ticket47981_test.py | 345 ++++++++++++++++++++++++++++++++
 ldap/servers/plugins/cos/cos_cache.c    |  34 ++--
 2 files changed, 359 insertions(+), 20 deletions(-)
 create mode 100644 dirsrvtests/tickets/ticket47981_test.py

diff --git a/dirsrvtests/tickets/ticket47981_test.py b/dirsrvtests/tickets/ticket47981_test.py
new file mode 100644
index 0000000..2a16ce6
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47981_test.py
@@ -0,0 +1,345 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+BRANCH = 'ou=people,' + DEFAULT_SUFFIX
+USER_DN = 'uid=user1,%s' % (BRANCH)
+BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
+BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com'
+BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+             'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+                  'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+SECOND_SUFFIX = 'o=netscaperoot'
+BE_NAME = 'netscaperoot'
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+@pytest.fixture(scope="module")
+def topology(request):
+    '''
+        This fixture is used to standalone topology for the 'module'.
+        At the beginning, It may exists a standalone instance.
+        It may also exists a backup for the standalone instance.
+
+        Principle:
+            If standalone instance exists:
+                restart it
+            If backup of standalone exists:
+                create/rebind to standalone
+
+                restore standalone instance from backup
+            else:
+                Cleanup everything
+                    remove instance
+                    remove backup
+                Create instance
+                Create backup
+    '''
+    global installation_prefix
+
+    if installation_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+    standalone = DirSrv(verbose=False)
+
+    # Args for the standalone instance
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+
+    # Get the status of the backups
+    backup_standalone = standalone.checkBackupFS()
+
+    # Get the status of the instance and restart it if it exists
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        # assuming the instance is already stopped, just wait 5 sec max
+        standalone.stop(timeout=5)
+        standalone.start(timeout=10)
+
+    if backup_standalone:
+        # The backup exist, assuming it is correct
+        # we just re-init the instance with it
+        if not instance_standalone:
+            standalone.create()
+            # Used to retrieve configuration information (dbdir, confdir...)
+            standalone.open()
+
+        # restore standalone instance from backup
+        standalone.stop(timeout=10)
+        standalone.restoreFS(backup_standalone)
+        standalone.start(timeout=10)
+
+    else:
+        # We should be here only in two conditions
+        #      - This is the first time a test involve standalone instance
+        #      - Something weird happened (instance/backup destroyed)
+        #        so we discard everything and recreate all
+
+        # Remove the backup. So even if we have a specific backup file
+        # (e.g backup_standalone) we clear backup that an instance may have created
+        if backup_standalone:
+            standalone.clearBackupFS()
+
+        # Remove the instance
+        if instance_standalone:
+            standalone.delete()
+
+        # Create the instance
+        standalone.create()
+
+        # Used to retrieve configuration information (dbdir, confdir...)
+        standalone.open()
+
+        # Time to create the backups
+        standalone.stop(timeout=10)
+        standalone.backupfile = standalone.backupFS()
+        standalone.start(timeout=10)
+
+    # clear the tmp directory
+    standalone.clearTmpDir(__file__)
+
+    #
+    # Here we have standalone instance up and running
+    # Either coming from a backup recovery
+    # or from a fresh (re)init
+    # Time to return the topology
+    return TopologyStandalone(standalone)
+
+
+def addSubtreePwPolicy(inst):
+    #
+    # Add subtree policy to the people branch
+    #
+    try:
+        inst.add_s(Entry((BRANCH_CONTAINER, {
+                          'objectclass': 'top nsContainer'.split(),
+                          'cn': 'nsPwPolicyContainer'
+                          })))
+    except ldap.LDAPError, e:
+        log.error('Failed to add subtree container for ou=people: error ' + e.message['desc'])
+        assert False
+
+    # Add the password policy subentry
+    try:
+        inst.add_s(Entry((BRANCH_PWP, {
+                          'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+                          'passwordMustChange': 'off',
+                          'passwordExp': 'off',
+                          'passwordHistory': 'off',
+                          'passwordMinAge': '0',
+                          'passwordChange': 'off',
+                          'passwordStorageScheme': 'ssha'
+                          })))
+    except ldap.LDAPError, e:
+        log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
+        assert False
+
+    # Add the COS template
+    try:
+        inst.add_s(Entry((BRANCH_COS_TMPL, {
+                          'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+                          'cosPriority': '1',
+                          'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+                          'pwdpolicysubentry': BRANCH_PWP
+                          })))
+    except ldap.LDAPError, e:
+        log.error('Failed to add COS template: error ' + e.message['desc'])
+        assert False
+
+    # Add the COS definition
+    try:
+        inst.add_s(Entry((BRANCH_COS_DEF, {
+                          'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+                          'costemplatedn': BRANCH_COS_TMPL,
+                          'cosAttribute': 'pwdpolicysubentry default operational-default'
+                          })))
+    except ldap.LDAPError, e:
+        log.error('Failed to add COS def: error ' + e.message['desc'])
+        assert False
+    time.sleep(0.5)
+
+
+def delSubtreePwPolicy(inst):
+    try:
+        inst.delete_s(BRANCH_COS_DEF)
+    except ldap.LDAPError, e:
+        log.error('Failed to delete COS def: error ' + e.message['desc'])
+        assert False
+
+    try:
+        inst.delete_s(BRANCH_COS_TMPL)
+    except ldap.LDAPError, e:
+        log.error('Failed to delete COS template: error ' + e.message['desc'])
+        assert False
+
+    try:
+        inst.delete_s(BRANCH_PWP)
+    except ldap.LDAPError, e:
+        log.error('Failed to delete COS password policy: error ' + e.message['desc'])
+        assert False
+
+    try:
+        inst.delete_s(BRANCH_CONTAINER)
+    except ldap.LDAPError, e:
+        log.error('Failed to delete COS container: error ' + e.message['desc'])
+        assert False
+    time.sleep(0.5)
+
+
+def test_ticket47981(topology):
+    """
+        If there are multiple suffixes, and the last suffix checked does not contain any COS entries,
+        while other suffixes do, then the vattr cache is not invalidated as it should be.  Then any
+        cached entries will still contain the old COS attributes/values.
+    """
+
+    log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users')
+
+    #
+    # Create a second backend that does not have any COS entries
+    #
+    log.info('Adding second suffix that will not contain any COS entries...\n')
+
+    topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
+    topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
+    try:
+        topology.standalone.add_s(Entry((SECOND_SUFFIX, {
+                          'objectclass': 'top organization'.split(),
+                          'o': BE_NAME})))
+    except ldap.ALREADY_EXISTS:
+        pass
+    except ldap.LDAPError, e:
+        log.error('Failed to create suffix entry: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Add People branch, it might already exist
+    #
+    log.info('Add our test entries to the default suffix, and proceed with the test...')
+
+    try:
+        topology.standalone.add_s(Entry((BRANCH, {
+                          'objectclass': 'top extensibleObject'.split(),
+                          'ou': 'level4'
+                          })))
+    except ldap.ALREADY_EXISTS:
+        pass
+    except ldap.LDAPError, e:
+        log.error('Failed to add ou=people: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Add a user to the branch
+    #
+    try:
+        topology.standalone.add_s(Entry((USER_DN, {
+                          'objectclass': 'top extensibleObject'.split(),
+                          'uid': 'user1'
+                          })))
+    except ldap.LDAPError, e:
+        log.error('Failed to add user1: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Enable password policy and add the subtree policy
+    #
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+    except ldap.LDAPError, e:
+        log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
+        assert False
+
+    addSubtreePwPolicy(topology.standalone)
+
+    #
+    # Now check the user has its expected passwordPolicy subentry
+    #
+    try:
+        entries = topology.standalone.search_s(USER_DN,
+                                              ldap.SCOPE_BASE,
+                                              '(objectclass=top)',
+                                              ['pwdpolicysubentry', 'dn'])
+        if not entries[0].hasAttr('pwdpolicysubentry'):
+            log.fatal('User does not have expected pwdpolicysubentry!')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+        assert False
+
+    #
+    # Delete the password policy and make sure it is removed from the same user
+    #
+    delSubtreePwPolicy(topology.standalone)
+    try:
+        entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        if entries[0].hasAttr('pwdpolicysubentry'):
+            log.fatal('User unexpectedly does have the pwdpolicysubentry!')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+        assert False
+
+    #
+    # Add the subtree policvy back and see if the user now has it
+    #
+    addSubtreePwPolicy(topology.standalone)
+    try:
+        entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        if not entries[0].hasAttr('pwdpolicysubentry'):
+            log.fatal('User does not have expected pwdpolicysubentry!')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+        assert False
+
+    # If we got here the test passed
+    log.info('Test PASSED')
+
+
+def test_ticket47981_final(topology):
+    topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+    '''
+        run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+        To run isolated without py.test, you need to
+            - edit this file and comment '@pytest.fixture' line before 'topology' function.
+            - set the installation prefix
+            - run this program
+    '''
+    global installation_prefix
+    installation_prefix = None
+
+    topo = topology(True)
+    test_ticket47981(topo)
+
+if __name__ == '__main__':
+    run_isolated()
\ No newline at end of file
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 76cc8fd..f41c3de 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -300,7 +300,7 @@ static int cos_cache_add_tmpl(cosTemplates **pTemplates, cosAttrValue *dn, cosAt
 
 /* cosDefinitions manipulation */
 static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_cacheable);
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable);
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs);
 static int cos_cache_add_defn(cosDefinitions **pDefs, cosAttrValue **dn, int cosType, cosAttrValue **tree, cosAttrValue **tmpDn, cosAttrValue **spec, cosAttrValue **pAttrs, cosAttrValue **pOverrides, cosAttrValue **pOperational, cosAttrValue **pCosMerge, cosAttrValue **pCosOpDefault);
 static int cos_cache_entry_is_cos_related( Slapi_Entry *e);
 
@@ -659,9 +659,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
 	LDAPDebug( LDAP_DEBUG_TRACE, "--> cos_cache_build_definition_list\n",0,0,0);
 
 	/*
-		the class of service definitions may be anywhere in the DIT,
-		so our first task is to find them.
-	*/
+	 * The class of service definitions may be anywhere in the DIT,
+	 * so our first task is to find them.
+	 */
 
 	attrs[0] = "namingcontexts";
 	attrs[1] = 0;
@@ -669,9 +669,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
 	LDAPDebug( LDAP_DEBUG_PLUGIN, "cos: Building class of service cache after status change.\n",0,0,0);
 
 	/*
-	 * XXXrbyrne: this looks really ineficient--should be using
+	 * XXXrbyrne: this looks really inefficient--should be using
 	 * slapi_get_next_suffix(), rather than searching for namingcontexts.
-	*/
+	 */
 
 	pSuffixSearch = slapi_search_internal("",LDAP_SCOPE_BASE,"(objectclass=*)",NULL,attrs,0);
 	if(pSuffixSearch)
@@ -711,19 +711,21 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
 							{
 								/* here's a suffix, lets search it... */
 								if(suffixVals[valIndex]->bv_val)
-									if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs, vattr_cacheable))
+								{
+									if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs))
+									{
+										*vattr_cacheable = -1;
 										cos_def_available = 1;
-								
+										break;
+									}
+								}
 								valIndex++;
 							}
-
-
 							ber_bvecfree( suffixVals );
 							suffixVals = NULL;
 						}
 					}
 				}
-
 			} while(!slapi_entry_next_attr(pSuffixList[suffixIndex], suffixAttr, &suffixAttr));
 		}
 		suffixIndex++;
@@ -749,7 +751,6 @@ next:
 		slapi_pblock_destroy(pSuffixSearch);
 	}
 
-
 	LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_build_definition_list\n",0,0,0);
 	return ret;
 }
@@ -790,10 +791,6 @@ cos_dn_defs_cb (Slapi_Entry* e, void *callback_data)
 	char *norm_dn = NULL;
 	info=(struct dn_defs_info *)callback_data;
 	
-			
-	/* assume cacheable */
-	info->vattr_cacheable = -1;
-
 	cos_cache_add_attrval(&pDn, slapi_entry_get_dn(e));
 	if(slapi_entry_first_attr(e, &dnAttr)) {
 		goto bail;
@@ -1119,7 +1116,7 @@ bail:
 
 #define DN_DEF_FILTER "(&(|(objectclass=cosSuperDefinition)(objectclass=cosDefinition))(objectclass=ldapsubentry))"
 
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable)
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs)
 {
 	Slapi_PBlock *pDnSearch = 0;
 	struct dn_defs_info info = {NULL, 0, 0};
@@ -1127,7 +1124,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
 	if (pDnSearch) {
 		info.ret=-1; /* assume no good defs */
 		info.pDefs=pDefs;
-		info.vattr_cacheable = 0; /* assume not cacheable */
 		slapi_search_internal_set_pb(pDnSearch, dn, LDAP_SCOPE_SUBTREE,
 									 DN_DEF_FILTER,NULL,0,
 									 NULL,NULL,cos_get_plugin_identity(),0);
@@ -1139,8 +1135,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
 		slapi_pblock_destroy (pDnSearch);
 	}
 
-	*vattr_cacheable = info.vattr_cacheable;
-
 	return info.ret;
 }
 
-- 
1.9.3