From 80e8d8fc8eb44d45af5285308cda37553611f688 Mon Sep 17 00:00:00 2001 From: William Brown Date: Sat, 9 Jul 2016 19:02:37 +1000 Subject: [PATCH 04/15] Ticket 48916 - DNA Threshold set to 0 causes SIGFPE Bug Description: If the DNA threshold was set to 0, a divide by zero would occur when requesting ranges. Fix Description: Prevent the config from setting a value of 0 for dna threshold. If an existing site has a threshold of 0, we guard the divide operation, and return an operations error instead. https://fedorahosted.org/389/ticket/48916 Author: wibrown Review by: nhosoi, mreynolds (Thank you!) (cherry picked from commit 05ebb6d10cf0ec8e03c59bade7f819ddb1fdcf78) --- .gitignore | 1 + dirsrvtests/tests/tickets/ticket48916_test.py | 253 ++++++++++++++++++++++++++ ldap/servers/plugins/dna/dna.c | 40 +++- 3 files changed, 289 insertions(+), 5 deletions(-) create mode 100644 dirsrvtests/tests/tickets/ticket48916_test.py diff --git a/.gitignore b/.gitignore index f6583c2..f92bcd8 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ autom4te.cache .cproject .project .settings +.cache *.a *.dirstamp *.la diff --git a/dirsrvtests/tests/tickets/ticket48916_test.py b/dirsrvtests/tests/tickets/ticket48916_test.py new file mode 100644 index 0000000..44c96da --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48916_test.py @@ -0,0 +1,253 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +DEBUGGING = False + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + + +log = logging.getLogger(__name__) + + +class TopologyReplication(object): + """The Replication Topology Class""" + def __init__(self, master1, master2): + """Init""" + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + """Create Replication Deployment""" + + # Creating master 1... + if DEBUGGING: + master1 = DirSrv(verbose=True) + else: + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + if DEBUGGING: + master2 = DirSrv(verbose=True) + else: + master2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + def fin(): + """If we are debugging just stop the instances, otherwise remove + them + """ + if DEBUGGING: + master1.stop() + master2.stop() + else: + master1.delete() + master2.delete() + + request.addfinalizer(fin) + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, master2) + + +def _create_user(inst, idnum): + inst.add_s(Entry( + ('uid=user%s,ou=People,%s' % (idnum, DEFAULT_SUFFIX), { + 'objectClass' : 'top account posixAccount'.split(' '), + 'cn' : 'user', + 'uid' : 'user%s' % idnum, + 'homeDirectory' : '/home/user%s' % idnum, + 'loginShell' : '/bin/nologin', + 'gidNumber' : '-1', + 'uidNumber' : '-1', + }) + )) + +def test_ticket48916(topology): + """ + https://bugzilla.redhat.com/show_bug.cgi?id=1353629 + + This is an issue with ID exhaustion in DNA causing a crash. + + To access each DirSrv instance use: topology.master1, topology.master2, + ..., topology.hub1, ..., topology.consumer1,... + + + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # Enable the plugin on both servers + + dna_m1 = topology.master1.plugins.get('Distributed Numeric Assignment Plugin') + dna_m2 = topology.master2.plugins.get('Distributed Numeric Assignment Plugin') + + # Configure it + # Create the container for the ranges to go into. + + topology.master1.add_s(Entry( + ('ou=Ranges,%s' % DEFAULT_SUFFIX, { + 'objectClass' : 'top organizationalUnit'.split(' '), + 'ou' : 'Ranges', + }) + )) + + # Create the dnaAdmin? + + # For now we just pinch the dn from the dna_m* types, and add the relevant child config + # but in the future, this could be a better plugin template type from lib389 + + config_dn = dna_m1.dn + + topology.master1.add_s(Entry( + ('cn=uids,%s' % config_dn, { + 'objectClass' : 'top dnaPluginConfig'.split(' '), + 'cn': 'uids', + 'dnatype': 'uidNumber gidNumber'.split(' '), + 'dnafilter': '(objectclass=posixAccount)', + 'dnascope': '%s' % DEFAULT_SUFFIX, + 'dnaNextValue': '1', + 'dnaMaxValue': '50', + 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX, + 'dnaThreshold': '0', + 'dnaRangeRequestTimeout': '60', + 'dnaMagicRegen': '-1', + 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX, + 'dnaRemoteBindCred': 'secret123', + 'dnaNextRange': '80-90' + }) + )) + + topology.master2.add_s(Entry( + ('cn=uids,%s' % config_dn, { + 'objectClass' : 'top dnaPluginConfig'.split(' '), + 'cn': 'uids', + 'dnatype': 'uidNumber gidNumber'.split(' '), + 'dnafilter': '(objectclass=posixAccount)', + 'dnascope': '%s' % DEFAULT_SUFFIX, + 'dnaNextValue': '61', + 'dnaMaxValue': '70', + 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX, + 'dnaThreshold': '2', + 'dnaRangeRequestTimeout': '60', + 'dnaMagicRegen': '-1', + 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX, + 'dnaRemoteBindCred': 'secret123', + }) + )) + + + # Enable the plugins + dna_m1.enable() + dna_m2.enable() + + # Restart the instances + topology.master1.restart(60) + topology.master2.restart(60) + + # Wait for a replication ..... + time.sleep(40) + + # Allocate the 10 members to exhaust + + for i in range(1,11): + _create_user(topology.master2, i) + + # Allocate the 11th + _create_user(topology.master2, 11) + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index 2908443..cf640d8 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -1244,6 +1244,12 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply) slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, "----------> %s [%s]\n", DNA_THRESHOLD, value); + if (entry->threshold <= 0) { + entry->threshold = 1; + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "----------> %s too low, setting to [%s]\n", DNA_THRESHOLD, value); + } + slapi_ch_free_string(&value); } else { entry->threshold = 1; @@ -2171,7 +2177,7 @@ static int dna_dn_is_config(char *dn) int ret = 0; slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, - "--> dna_is_config\n"); + "--> dna_is_config %s\n", dn); if (slapi_dn_issuffix(dn, getPluginDN())) { ret = 1; @@ -3404,18 +3410,21 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr) /* Did we already service all of these configured types? */ if (dna_list_contains_types(generated_types, config_entry->types)) { + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " no types to act upon.\n"); goto next; } /* is the entry in scope? */ if (config_entry->scope && !slapi_dn_issuffix(dn, config_entry->scope)) { + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn not in scope\n"); goto next; } /* is this entry in an excluded scope? */ for (i = 0; config_entry->excludescope && config_entry->excludescope[i]; i++) { if (slapi_dn_issuffix(dn, slapi_sdn_get_dn(config_entry->excludescope[i]))) { + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn in excluded scope\n"); goto next; } } @@ -3424,7 +3433,8 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr) if (config_entry->slapi_filter) { ret = slapi_vattr_filter_test(pb, e, config_entry->slapi_filter, 0); if (LDAP_SUCCESS != ret) { - goto next; + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn does not match filter\n"); + goto next; } } @@ -3454,6 +3464,8 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr) } if (types_to_generate && types_to_generate[0]) { + + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " adding %s to %s as -2\n", types_to_generate[0], dn); /* add - add to entry */ for (i = 0; types_to_generate && types_to_generate[i]; i++) { slapi_entry_attr_set_charptr(e, types_to_generate[i], @@ -3492,6 +3504,7 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr) slapi_lock_mutex(config_entry->lock); ret = dna_first_free_value(config_entry, &setval); + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " retrieved value %" PRIu64 " ret %d\n", setval, ret); if (LDAP_SUCCESS != ret) { /* check if we overflowed the configured range */ if (setval > config_entry->maxval) { @@ -4022,18 +4035,22 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype) "--> dna_be_txn_pre_op\n"); if (!slapi_plugin_running(pb)) { + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing, plugin not running\n"); goto bail; } if (0 == (dn = dna_get_dn(pb))) { + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing, is dna dn\n"); goto bail; } if (dna_dn_is_config(dn)) { + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing is dna config dn\n"); goto bail; } if (dna_isrepl(pb)) { + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing replicated operation\n"); /* if repl, the dna values should be already in the entry. */ goto bail; } @@ -4045,6 +4062,7 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype) } if (e == NULL) { + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing entry is NULL\n"); goto bail; } else if (LDAP_CHANGETYPE_MODIFY == modtype) { slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); @@ -4056,32 +4074,39 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype) if (!PR_CLIST_IS_EMPTY(dna_global_config)) { list = PR_LIST_HEAD(dna_global_config); + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " using global config...\n"); while (list != dna_global_config && LDAP_SUCCESS == ret) { config_entry = (struct configEntry *) list; /* Did we already service all of these configured types? */ if (dna_list_contains_types(generated_types, config_entry->types)) { + slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " All types already serviced\n"); goto next; } /* is the entry in scope? */ if (config_entry->scope) { - if (!slapi_dn_issuffix(dn, config_entry->scope)) + if (!slapi_dn_issuffix(dn, config_entry->scope)) { + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry not in scope of dnaScope!\n"); goto next; + } } /* is this entry in an excluded scope? */ for (i = 0; config_entry->excludescope && config_entry->excludescope[i]; i++) { if (slapi_dn_issuffix(dn, slapi_sdn_get_dn(config_entry->excludescope[i]))) { + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry in excluded scope, next\n"); goto next; } } - + /* does the entry match the filter? */ if (config_entry->slapi_filter) { - if(LDAP_SUCCESS != slapi_vattr_filter_test(pb,e,config_entry->slapi_filter, 0)) + if(LDAP_SUCCESS != slapi_vattr_filter_test(pb,e,config_entry->slapi_filter, 0)) { + slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry does not match filter\n"); goto next; + } } if (LDAP_CHANGETYPE_ADD == modtype) { @@ -4526,6 +4551,11 @@ dna_release_range(char *range_dn, PRUint64 *lower, PRUint64 *upper) * it instead of from the active range */ if (config_entry->next_range_lower != 0) { /* Release up to half of our values from the next range. */ + if (config_entry->threshold == 0) { + ret = LDAP_UNWILLING_TO_PERFORM; + goto bail; + } + release = (((config_entry->next_range_upper - config_entry->next_range_lower + 1) / 2) / config_entry->threshold) * config_entry->threshold; -- 2.4.11