import 389-ds-base-1.4.3.23-2.module+el8.5.0+11209+cb479c8d

This commit is contained in:
CentOS Sources 2021-06-14 19:48:12 +00:00
parent 1ce915e06c
commit 91ce386728
46 changed files with 11305 additions and 10887 deletions

View file

@ -1,2 +1,2 @@
90cda7aea8d8644eea5a2af28c72350dd915db34 SOURCES/389-ds-base-1.4.3.16.tar.bz2 c69c175a2f27053dffbfefac9c84ff16c7ff4cbf SOURCES/389-ds-base-1.4.3.23.tar.bz2
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2 9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2

2
.gitignore vendored
View file

@ -1,2 +1,2 @@
SOURCES/389-ds-base-1.4.3.16.tar.bz2 SOURCES/389-ds-base-1.4.3.23.tar.bz2
SOURCES/jemalloc-5.2.1.tar.bz2 SOURCES/jemalloc-5.2.1.tar.bz2

View file

@ -1,159 +0,0 @@
From 81dcaf1c37c2de24c46672df8d4f968c2fb40a6e Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 11 Nov 2020 08:59:18 -0500
Subject: [PATCH 1/3] Issue 4383 - Do not normalize escaped spaces in a DN
Bug Description: Adding an entry with an escaped leading space leads to many
problems. Mainly id2entry can get corrupted during an
import of such an entry, and the entryrdn index is not
updated correctly
Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact.
Relates: https://github.com/389ds/389-ds-base/issues/4383
Reviewed by: firstyear, progier, and tbordaz (Thanks!!!)
---
.../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++-
ldap/servers/slapd/dn.c | 8 +-
2 files changed, 77 insertions(+), 6 deletions(-)
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
index 543718689..7939a99a7 100644
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,13 +7,12 @@
# --- END COPYRIGHT BLOCK ---
import ldap
-import logging
import pytest
import os
from lib389.schema import Schema
from lib389.config import Config
from lib389.idm.user import UserAccounts
-from lib389.idm.group import Groups
+from lib389.idm.group import Group, Groups
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import log, topology_st as topo
@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo):
4. Success
"""
- # Create group
+ # Create group
groups = Groups(topo.standalone, DEFAULT_SUFFIX)
group = groups.create(properties={'cn': ' test'})
@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo):
groups.list()
+@pytest.mark.parametrize("props, rawdn", [
+ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"),
+ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")])
+def test_dn_syntax_spaces_delete(topo, props, rawdn):
+ """Test that an entry with a space as the first character in the DN can be
+ deleted without error. We also want to make sure the indexes are properly
+ updated by repeatedly adding and deleting the entry, and that the entry cache
+ is properly maintained.
+
+ :id: b993f37c-c2b0-4312-992c-a9048ff98965
+ :parametrized: yes
+ :setup: Standalone Instance
+ :steps:
+ 1. Create a group with a DN that has a space as the first/last
+ character.
+ 2. Delete group
+ 3. Add group
+ 4. Modify group
+ 5. Restart server and modify entry
+ 6. Delete group
+ 7. Add group back
+ 8. Delete group using specific DN
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ """
+
+ # Create group
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Delete group (verifies DN/RDN parsing works and cache is correct)
+ group.delete()
+
+ # Add group again (verifies entryrdn index was properly updated)
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Modify the group (verifies dn/rdn parsing is correct)
+ group.replace('description', 'escaped space group')
+
+ # Restart the server. This will pull the entry from the database and
+ # convert it into a cache entry, which is different than how a client
+ # first adds an entry and is put into the cache before being written to
+ # disk.
+ topo.standalone.restart()
+
+ # Make sure we can modify the entry (verifies cache entry was created
+ # correctly)
+ group.replace('description', 'escaped space group after restart')
+
+ # Make sure it can still be deleted (verifies cache again).
+ group.delete()
+
+ # Add it back so we can delete it using a specific DN (sanity test to verify
+ # another DN/RDN parsing variation).
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+ group = Group(topo.standalone, dn=rawdn)
+ group.delete()
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
index 2af3f38fc..3980b897f 100644
--- a/ldap/servers/slapd/dn.c
+++ b/ldap/servers/slapd/dn.c
@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
s++;
}
}
- } else if (s + 2 < ends &&
- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
+ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
/* esc hexpair ==> real character */
int n = slapi_hexchar2int(*(s + 1));
int n2 = slapi_hexchar2int(*(s + 2));
@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
if (n == 0) { /* don't change \00 */
*d++ = *++s;
*d++ = *++s;
+ } else if (n == 32) { /* leave \20 (space) intact */
+ *d++ = *s;
+ *d++ = *++s;
+ *d++ = *++s;
+ s++;
} else {
*d++ = n;
s += 3;
--
2.26.2

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,322 @@
From 1e1c2b23c35282481628af7e971ac683da334502 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Tue, 27 Apr 2021 17:00:15 +0100
Subject: [PATCH 02/12] Issue 4701 - RFE - Exclude attributes from retro
changelog (#4723)
Description: When the retro changelog plugin is enabled it writes the
added/modified values to the "cn-changelog" suffix. In
some cases an entries attribute values can be of a
sensitive nature and should be excluded. This RFE adds
functionality that will allow an admin exclude certain
attributes from the retro changelog DB.
Relates: https://github.com/389ds/389-ds-base/issues/4701
Reviewed by: mreynolds389, droideck (Thanks folks)
---
.../tests/suites/retrocl/basic_test.py | 292 ++++++++++++++++++
1 file changed, 292 insertions(+)
create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
new file mode 100644
index 000000000..112c73cb9
--- /dev/null
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
@@ -0,0 +1,292 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import logging
+import ldap
+import time
+import pytest
+from lib389.topologies import topology_st
+from lib389.plugins import RetroChangelogPlugin
+from lib389._constants import *
+from lib389.utils import *
+from lib389.tasks import *
+from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
+from lib389.cli_base.dsrc import dsrc_arg_concat
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
+from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
+
+pytestmark = pytest.mark.tier1
+
+USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX
+USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX
+USER_PW = 'password'
+ATTR_HOMEPHONE = 'homePhone'
+ATTR_CARLICENSE = 'carLicense'
+
+log = logging.getLogger(__name__)
+
+def test_retrocl_exclude_attr_add(topology_st):
+ """ Test exclude attribute feature of the retrocl plugin for add operation
+
+ :id: 3481650f-2070-45ef-9600-2500cfc51559
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Enable dynamic plugins
+ 2. Confige retro changelog plugin
+ 3. Add an entry
+ 4. Ensure entry attrs are in the changelog
+ 5. Exclude an attr
+ 6. Add another entry
+ 7. Ensure excluded attr is not in the changelog
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ """
+
+ st = topology_st.standalone
+
+ log.info('Enable dynamic plugins')
+ try:
+ st.config.set('nsslapd-dynamic-plugins', 'on')
+ except ldap.LDAPError as e:
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
+ assert False
+
+ log.info('Configure retrocl plugin')
+ rcl = RetroChangelogPlugin(st)
+ rcl.disable()
+ rcl.enable()
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
+
+ log.info('Restarting instance')
+ try:
+ st.restart()
+ except ldap.LDAPError as e:
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
+ assert False
+
+ users = UserAccounts(st, DEFAULT_SUFFIX)
+
+ log.info('Adding user1')
+ try:
+ user1 = users.create(properties={
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'uidNumber': '11',
+ 'gidNumber': '111',
+ 'givenname': 'user1',
+ 'homePhone': '0861234567',
+ 'carLicense': '131D16674',
+ 'mail': 'user1@whereever.com',
+ 'homeDirectory': '/home/user1',
+ 'userpassword': USER_PW})
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError as e:
+ log.error("Failed to add user1")
+
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
+ try:
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ except ldap.LDAPError as e:
+ log.fatal("Changelog search failed, error: " +str(e))
+ assert False
+ assert len(cllist) > 0
+ if cllist[0].hasAttr('changes'):
+ clstr = (cllist[0].getValue('changes')).decode()
+ assert ATTR_HOMEPHONE in clstr
+ assert ATTR_CARLICENSE in clstr
+
+ log.info('Excluding attribute ' + ATTR_HOMEPHONE)
+ args = FakeArgs()
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
+ args.instance = 'standalone1'
+ args.basedn = None
+ args.binddn = None
+ args.starttls = False
+ args.pwdfile = None
+ args.bindpw = None
+ args.prompt = False
+ args.exclude_attrs = ATTR_HOMEPHONE
+ args.func = retrochangelog_add
+ dsrc_inst = dsrc_arg_concat(args, None)
+ inst = connect_instance(dsrc_inst, False, args)
+ result = args.func(inst, None, log, args)
+ disconnect_instance(inst)
+ assert result is None
+
+ log.info("5s delay for retrocl plugin to restart")
+ time.sleep(5)
+
+ log.info('Adding user2')
+ try:
+ user2 = users.create(properties={
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'uidNumber': '22',
+ 'gidNumber': '222',
+ 'givenname': 'user2',
+ 'homePhone': '0879088363',
+ 'carLicense': '04WX11038',
+ 'mail': 'user2@whereever.com',
+ 'homeDirectory': '/home/user2',
+ 'userpassword': USER_PW})
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError as e:
+ log.error("Failed to add user2")
+
+ log.info('Verify homePhone attr is not in the changelog changestring')
+ try:
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
+ assert len(cllist) > 0
+ if cllist[0].hasAttr('changes'):
+ clstr = (cllist[0].getValue('changes')).decode()
+ assert ATTR_HOMEPHONE not in clstr
+ assert ATTR_CARLICENSE in clstr
+ except ldap.LDAPError as e:
+ log.fatal("Changelog search failed, error: " +str(e))
+ assert False
+
+def test_retrocl_exclude_attr_mod(topology_st):
+ """ Test exclude attribute feature of the retrocl plugin for mod operation
+
+ :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Enable dynamic plugins
+ 2. Confige retro changelog plugin
+ 3. Add user1 entry
+ 4. Ensure entry attrs are in the changelog
+ 5. Exclude an attr
+ 6. Modify user1 entry
+ 7. Ensure excluded attr is not in the changelog
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ """
+
+ st = topology_st.standalone
+
+ log.info('Enable dynamic plugins')
+ try:
+ st.config.set('nsslapd-dynamic-plugins', 'on')
+ except ldap.LDAPError as e:
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
+ assert False
+
+ log.info('Configure retrocl plugin')
+ rcl = RetroChangelogPlugin(st)
+ rcl.disable()
+ rcl.enable()
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
+
+ log.info('Restarting instance')
+ try:
+ st.restart()
+ except ldap.LDAPError as e:
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
+ assert False
+
+ users = UserAccounts(st, DEFAULT_SUFFIX)
+
+ log.info('Adding user1')
+ try:
+ user1 = users.create(properties={
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'uidNumber': '11',
+ 'gidNumber': '111',
+ 'givenname': 'user1',
+ 'homePhone': '0861234567',
+ 'carLicense': '131D16674',
+ 'mail': 'user1@whereever.com',
+ 'homeDirectory': '/home/user1',
+ 'userpassword': USER_PW})
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError as e:
+ log.error("Failed to add user1")
+
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
+ try:
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ except ldap.LDAPError as e:
+ log.fatal("Changelog search failed, error: " +str(e))
+ assert False
+ assert len(cllist) > 0
+ if cllist[0].hasAttr('changes'):
+ clstr = (cllist[0].getValue('changes')).decode()
+ assert ATTR_HOMEPHONE in clstr
+ assert ATTR_CARLICENSE in clstr
+
+ log.info('Excluding attribute ' + ATTR_CARLICENSE)
+ args = FakeArgs()
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
+ args.instance = 'standalone1'
+ args.basedn = None
+ args.binddn = None
+ args.starttls = False
+ args.pwdfile = None
+ args.bindpw = None
+ args.prompt = False
+ args.exclude_attrs = ATTR_CARLICENSE
+ args.func = retrochangelog_add
+ dsrc_inst = dsrc_arg_concat(args, None)
+ inst = connect_instance(dsrc_inst, False, args)
+ result = args.func(inst, None, log, args)
+ disconnect_instance(inst)
+ assert result is None
+
+ log.info("5s delay for retrocl plugin to restart")
+ time.sleep(5)
+
+ log.info('Modify user1 carLicense attribute')
+ try:
+ st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
+ except ldap.LDAPError as e:
+ log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
+ assert False
+
+ log.info('Verify carLicense attr is not in the changelog changestring')
+ try:
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ assert len(cllist) > 0
+ # There will be 2 entries in the changelog for this user, we are only
+ #interested in the second one, the modify operation.
+ if cllist[1].hasAttr('changes'):
+ clstr = (cllist[1].getValue('changes')).decode()
+ assert ATTR_CARLICENSE not in clstr
+ except ldap.LDAPError as e:
+ log.fatal("Changelog search failed, error: " +str(e))
+ assert False
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
--
2.26.3

View file

@ -1,232 +0,0 @@
From 29c9e1c3c760f0941b022d45d14c248e9ceb9738 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 3 Nov 2020 12:18:50 +0100
Subject: [PATCH 2/3] ticket 2058: Add keep alive entry after on-line
initialization - second version (#4399)
Bug description:
Keep alive entry is not created on target master after on line initialization,
and its RUVelement stays empty until a direct update is issued on that master
Fix description:
The patch allows a consumer (configured as a master) to create (if it did not
exist before) the consumer's keep alive entry. It creates it at the end of a
replication session at a time we are sure the changelog exists and will not
be reset. It allows a consumer to have RUVelement with csn in the RUV at the
first incoming replication session.
That is basically lkrispen's proposal with an associated pytest testcase
Second version changes:
- moved the testcase to suites/replication/regression_test.py
- set up the topology from a 2 master topology then
reinitialized the replicas from an ldif without replication metadata
rather than using the cli.
- search for keepalive entries using search_s instead of getEntry
- add a comment about keep alive entries purpose
last commit:
- wait that ruv are in sync before checking keep alive entries
Reviewed by: droideck, Firstyear
Platforms tested: F32
relates: #2058
---
.../suites/replication/regression_test.py | 130 ++++++++++++++++++
.../plugins/replication/repl5_replica.c | 14 ++
ldap/servers/plugins/replication/repl_extop.c | 4 +
3 files changed, 148 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 844d762b9..14b9d6a44 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -98,6 +98,30 @@ def _move_ruv(ldif_file):
for dn, entry in ldif_list:
ldif_writer.unparse(dn, entry)
+def _remove_replication_data(ldif_file):
+ """ Remove the replication data from ldif file:
+ db2lif without -r includes some of the replica data like
+ - nsUniqueId
+ - keepalive entries
+ This function filters the ldif fil to remove these data
+ """
+
+ with open(ldif_file) as f:
+ parser = ldif.LDIFRecordList(f)
+ parser.parse()
+
+ ldif_list = parser.all_records
+ # Iterate on a copy of the ldif entry list
+ for dn, entry in ldif_list[:]:
+ if dn.startswith('cn=repl keep alive'):
+ ldif_list.remove((dn,entry))
+ else:
+ entry.pop('nsUniqueId')
+ with open(ldif_file, 'w') as f:
+ ldif_writer = ldif.LDIFWriter(f)
+ for dn, entry in ldif_list:
+ ldif_writer.unparse(dn, entry)
+
@pytest.fixture(scope="module")
def topo_with_sigkill(request):
@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2):
assert len(m1entries) == len(m2entries)
+def get_keepalive_entries(instance,replica):
+ # Returns the keep alive entries that exists with the suffix of the server instance
+ try:
+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
+ ['cn', 'nsUniqueId', 'modifierTimestamp'])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
+ assert False
+ # No error, so lets log the keepalive entries
+ if log.isEnabledFor(logging.DEBUG):
+ for ret in entries:
+ log.debug("Found keepalive entry:\n"+str(ret));
+ return entries
+
+def verify_keepalive_entries(topo, expected):
+ #Check that keep alive entries exists (or not exists) for every masters on every masters
+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master.
+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but
+ # not for the general case as keep alive associated with no more existing master may exists
+ # (for example after: db2ldif / demote a master / ldif2db / init other masters)
+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
+ # should be done.
+ for masterId in topo.ms:
+ master=topo.ms[masterId]
+ for replica in Replicas(master).list():
+ if (replica.get_role() != ReplicaRole.MASTER):
+ continue
+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}'
+ log.debug(f'Checking keepAliveEntries on {replica_info}')
+ keepaliveEntries = get_keepalive_entries(master, replica);
+ expectedCount = len(topo.ms) if expected else 0
+ foundCount = len(keepaliveEntries)
+ if (foundCount == expectedCount):
+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
+ else:
+ log.error(f'{foundCount} Keepalive entries are found '
+ f'while {expectedCount} were expected on {replica_info}.')
+ assert False
+
+
+def test_online_init_should_create_keepalive_entries(topo_m2):
+ """Check that keep alive entries are created when initializinf a master from another one
+
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+ :setup: Two masters replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2 Init both masters from that ldif
+ 3 Check that keep alive entries does not exists
+ 4 Perform on line init of master2 from master1
+ 5 Check that keep alive entries exists
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No keepalive entrie should exists on any masters
+ 4. No error while initializing master2
+ 5. All keepalive entries should exist on every masters
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ """ Replica state is now as if CLI setup has been done using:
+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master1 repl-agmt create --suffix "${SUFFIX}"
+ dsconf master2 repl-agmt create --suffix "${SUFFIX}"
+ """
+
+ # Step 3: No keepalive entrie should exists on any masters
+ verify_keepalive_entries(topo_m2, False)
+
+ # Step 4: Perform on line init of master2 from master1
+ agmt = Agreements(m1).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 5: All keepalive entries should exists on every masters
+ # Verify the keep alive entry once replication is in sync
+ # (that is the step that fails when bug is not fixed)
+ repl.wait_for_ruv(m2,m1)
+ verify_keepalive_entries(topo_m2, True);
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f01782330..f0ea0f8ef 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -373,6 +373,20 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+/******************************************************************************
+ ******************** REPLICATION KEEP ALIVE ENTRIES **************************
+ ******************************************************************************
+ * They are subentries of the replicated suffix and there is one per master. *
+ * These entries exist only to trigger a change that get replicated over the *
+ * topology. *
+ * Their main purpose is to generate records in the changelog and they are *
+ * updated from time to time by fractional replication to insure that at *
+ * least a change must be replicated by FR after a great number of not *
+ * replicated changes are found in the changelog. The interest is that the *
+ * fractional RUV get then updated so less changes need to be walked in the *
+ * changelog when searching for the first change to send *
+ ******************************************************************************/
+
#define KEEP_ALIVE_ATTR "keepalivetimestamp"
#define KEEP_ALIVE_ENTRY "repl keep alive"
#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index 14c8e0bcc..af486f730 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
*/
if (cl5GetState() == CL5_STATE_OPEN) {
replica_log_ruv_elements(r);
+ /* now that the changelog is open and started, we can alos cretae the
+ * keep alive entry without risk that db and cl will not match
+ */
+ replica_subentry_check(replica_get_root(r), replica_get_rid(r));
}
/* ONREPL code that dealt with new RUV, etc was moved into the code
--
2.26.2

File diff suppressed because it is too large Load diff

View file

@ -1,513 +0,0 @@
From e202c62c3b4c92163d2de9f3da9a9f3efc81e4b8 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Thu, 12 Nov 2020 18:50:04 +0100
Subject: [PATCH 3/3] do not add referrals for masters with different data
generation #2054 (#4427)
Bug description:
The problem is that some operation mandatory in the usual cases are
also performed when replication cannot take place because the
database set are differents (i.e: RUV generation ids are different)
One of the issue is that the csn generator state is updated when
starting a replication session (it is a problem when trying to
reset the time skew, as freshly reinstalled replicas get infected
by the old ones)
A second issue is that the RUV got updated when ending a replication session
(which may add replica that does not share the same data set,
then update operations on consumer retun referrals towards wrong masters
Fix description:
The fix checks the RUVs generation id before updating the csn generator
and before updating the RUV.
Reviewed by: mreynolds
firstyear
vashirov
Platforms tested: F32
---
.../suites/replication/regression_test.py | 290 ++++++++++++++++++
ldap/servers/plugins/replication/repl5.h | 1 +
.../plugins/replication/repl5_inc_protocol.c | 20 +-
.../plugins/replication/repl5_replica.c | 39 ++-
src/lib389/lib389/dseldif.py | 37 +++
5 files changed, 368 insertions(+), 19 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 14b9d6a44..a72af6b30 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
from lib389.pwpolicy import PwPolicyManager
from lib389.utils import *
from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2
+from lib389.topologies import topology_m2c2 as topo_m2c2
from lib389._constants import *
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.idm.user import UserAccount
@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager
from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager
from lib389.agreement import Agreements
from lib389 import pid_from_file
+from lib389.dseldif import *
pytestmark = pytest.mark.tier1
@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2):
verify_keepalive_entries(topo_m2, True);
+def get_agreement(agmts, consumer):
+ # Get agreement towards consumer among the agremment list
+ for agmt in agmts.list():
+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and
+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host):
+ return agmt
+ return None;
+
+
+def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
+ """Check that RUV url is not updated if RUV generation uuid are different
+
+ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 3. Perform on line init from master1 to consumer1
+ and from master2 to consumer2
+ 4. Perform update on both masters
+ 5. Check that c1 RUV does not contains URL towards m2
+ 6. Check that c2 RUV does contains URL towards m2
+ 7. Perform on line init from master1 to master2
+ 8. Perform update on master2
+ 9. Check that c1 RUV does contains URL towards m2
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No error and Initialization done.
+ 4. No error
+ 5. master2 replicaid should not be in the consumer1 RUV
+ 6. master2 replicaid should be in the consumer2 RUV
+ 7. No error and Initialization done.
+ 8. No error
+ 9. master2 replicaid should be in the consumer1 RUV
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 3: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 4: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 5: Check that c1 RUV does not contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv}")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
+ #Note: this assertion fails if issue 2054 is not fixed.
+ assert False
+
+ # Step 6: Check that c2 RUV does contains URL towards m2
+ ruv = replica_c2.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+ # Step 7: Perform on line init from master1 to master2
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 8: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 9: Check that c1 RUV does contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
+ """Check that csngen remote offset is not updated if RUV generation uuid are different
+
+ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Disable m1<->m2 agreement to avoid propagate timeSkew
+ 2. Generate ldif without replication data
+ 3. Increase time skew on master2
+ 4. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 5. Perform on line init from master1 to consumer1 and master2 to consumer2
+ 6. Perform update on both masters
+ 7: Check that c1 has no time skew
+ 8: Check that c2 has time skew
+ 9. Init master2 from master1
+ 10. Perform update on master2
+ 11. Check that c1 has time skew
+ :expectedresults:
+ 1. No error
+ 2. No error while generating ldif
+ 3. No error
+ 4. No error while importing the ldif file
+ 5. No error and Initialization done.
+ 6. No error
+ 7. c1 time skew should be lesser than threshold
+ 8. c2 time skew should be higher than threshold
+ 9. No error and Initialization done.
+ 10. No error
+ 11. c1 time skew should be higher than threshold
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
+ m1_m2.pause()
+ m2_m1.pause()
+
+ # Step 2: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 3: Increase time skew on master2
+ timeSkew=6*3600
+ # We can modify master2 time skew
+ # But the time skew on the consumer may be smaller
+ # depending on when the cnsgen generation time is updated
+ # and when first csn get replicated.
+ # Since we use timeSkew has threshold value to detect
+ # whether there are time skew or not,
+ # lets add a significative margin (longer than the test duration)
+ # to avoid any risk of erroneous failure
+ timeSkewMargin = 300
+ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin)
+
+ # Step 4: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 5: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 6: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 7: Check that c1 has no time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew >= timeSkew):
+ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+ c1.start()
+
+ # Step 8: Check that c2 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c2.stop()
+ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
+ c2_timeSkew = int(c2_nsState['time_skew'])
+ log.debug(f"c2 time skew: {c2_timeSkew}")
+ if (c2_timeSkew < timeSkew):
+ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}")
+ assert False
+ c2.start()
+
+ # Step 9: Perform on line init from master1 to master2
+ m1_c1.pause()
+ m1_m2.resume()
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 10: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 11: Check that c1 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew < timeSkew):
+ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index b35f724c2..f1c596a3f 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -708,6 +708,7 @@ void replica_dump(Replica *r);
void replica_set_enabled(Replica *r, PRBool enable);
Replica *replica_get_replica_from_dn(const Slapi_DN *dn);
Replica *replica_get_replica_from_root(const char *repl_root);
+int replica_check_generation(Replica *r, const RUV *remote_ruv);
int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl);
Replica *replica_get_replica_for_op(Slapi_PBlock *pb);
/* the functions below manipulate replica hash */
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 29b1fb073..af5e5897c 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv)
} else if (NULL == remote_ruv) {
return_value = EXAMINE_RUV_PRISTINE_REPLICA;
} else {
- char *local_gen = NULL;
- char *remote_gen = ruv_get_replica_generation(remote_ruv);
- Object *local_ruv_obj;
- RUV *local_ruv;
-
PR_ASSERT(NULL != prp->replica);
- local_ruv_obj = replica_get_ruv(prp->replica);
- if (NULL != local_ruv_obj) {
- local_ruv = (RUV *)object_get_data(local_ruv_obj);
- PR_ASSERT(local_ruv);
- local_gen = ruv_get_replica_generation(local_ruv);
- object_release(local_ruv_obj);
- }
- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
- return_value = EXAMINE_RUV_GENERATION_MISMATCH;
- } else {
+ if (replica_check_generation(prp->replica, remote_ruv)) {
return_value = EXAMINE_RUV_OK;
+ } else {
+ return_value = EXAMINE_RUV_GENERATION_MISMATCH;
}
- slapi_ch_free((void **)&remote_gen);
- slapi_ch_free((void **)&local_gen);
}
return return_value;
}
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f0ea0f8ef..7e56d6557 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv)
replica_unlock(r->repl_lock);
}
+/*
+ * Check if replica generation is the same than the remote ruv one
+ */
+int
+replica_check_generation(Replica *r, const RUV *remote_ruv)
+{
+ int return_value;
+ char *local_gen = NULL;
+ char *remote_gen = ruv_get_replica_generation(remote_ruv);
+ Object *local_ruv_obj;
+ RUV *local_ruv;
+
+ PR_ASSERT(NULL != r);
+ local_ruv_obj = replica_get_ruv(r);
+ if (NULL != local_ruv_obj) {
+ local_ruv = (RUV *)object_get_data(local_ruv_obj);
+ PR_ASSERT(local_ruv);
+ local_gen = ruv_get_replica_generation(local_ruv);
+ object_release(local_ruv_obj);
+ }
+ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
+ return_value = PR_FALSE;
+ } else {
+ return_value = PR_TRUE;
+ }
+ slapi_ch_free_string(&remote_gen);
+ slapi_ch_free_string(&local_gen);
+ return return_value;
+}
+
/*
* Update one particular CSN in an RUV. This is meant to be called
* whenever (a) the server has processed a client operation and
@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn)
PR_ASSERT(r && ruv);
+ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */
+ {
+ return 0;
+ }
+
rc = ruv_get_max_csn(ruv, &csn);
if (rc != RUV_SUCCESS) {
return -1;
@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv)
replica_lock(r->repl_lock);
local_ruv = (RUV *)object_get_data(r->repl_ruv);
-
- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) {
+ if (is_cleaned_rid(supplier_id) || local_ruv == NULL ||
+ !replica_check_generation(r, supplier_ruv)) {
replica_unlock(r->repl_lock);
return;
}
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
index 10baba4d7..6850c9a8a 100644
--- a/src/lib389/lib389/dseldif.py
+++ b/src/lib389/lib389/dseldif.py
@@ -317,6 +317,43 @@ class DSEldif(DSLint):
return states
+ def _increaseTimeSkew(self, suffix, timeSkew):
+ # Increase csngen state local_offset by timeSkew
+ # Warning: instance must be stopped before calling this function
+ assert (timeSkew >= 0)
+ nsState = self.readNsState(suffix)[0]
+ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}')
+ oldNsState = self.get(nsState['dn'], 'nsState', True)
+ self._instance.log.debug(f'oldNsState is {oldNsState}')
+
+ # Lets reencode the new nsState
+ from lib389.utils import print_nice_time
+ if pack('<h', 1) == pack('=h',1):
+ end = '<'
+ elif pack('>h', 1) == pack('=h',1):
+ end = '>'
+ else:
+ raise ValueError("Unknown endian, unable to proceed")
+
+ thelen = len(oldNsState)
+ if thelen <= 20:
+ pad = 2 # padding for short H values
+ timefmt = 'I' # timevals are unsigned 32-bit int
+ else:
+ pad = 6 # padding for short H values
+ timefmt = 'Q' # timevals are unsigned 64-bit int
+ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad)
+ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']),
+ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew,
+ int(nsState['remote_offset']), int(nsState['seq_num'])))
+ newNsState = newNsState.decode('utf-8')
+ self._instance.log.debug(f'newNsState is {newNsState}')
+ # Lets replace the value.
+ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState')
+ attr_i = next(iter(attr_data))
+ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}"
+ self._update()
+
class FSChecks(DSLint):
"""This is for the healthcheck feature, check commonly used system config files the
--
2.26.2

View file

@ -0,0 +1,373 @@
From c167d6127db45d8426437c273060c8c8f7fbcb9b Mon Sep 17 00:00:00 2001
From: Firstyear <william.brown@suse.com>
Date: Wed, 23 Sep 2020 09:19:34 +1000
Subject: [PATCH 04/12] Ticket 4326 - entryuuid fixup did not work correctly
(#4328)
Bug Description: due to an oversight in how fixup tasks
worked, the entryuuid fixup task did not work correctly and
would not persist over restarts.
Fix Description: Correctly implement entryuuid fixup.
fixes: #4326
Author: William Brown <william@blackhats.net.au>
Review by: mreynolds (thanks!)
---
.../tests/suites/entryuuid/basic_test.py | 24 +++-
src/plugins/entryuuid/src/lib.rs | 43 ++++++-
src/slapi_r_plugin/src/constants.rs | 5 +
src/slapi_r_plugin/src/entry.rs | 8 ++
src/slapi_r_plugin/src/lib.rs | 2 +
src/slapi_r_plugin/src/macros.rs | 2 +-
src/slapi_r_plugin/src/modify.rs | 118 ++++++++++++++++++
src/slapi_r_plugin/src/pblock.rs | 7 ++
src/slapi_r_plugin/src/value.rs | 4 +
9 files changed, 206 insertions(+), 7 deletions(-)
create mode 100644 src/slapi_r_plugin/src/modify.rs
diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py
index beb73701d..4d8a40909 100644
--- a/dirsrvtests/tests/suites/entryuuid/basic_test.py
+++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py
@@ -12,6 +12,7 @@ import time
import shutil
from lib389.idm.user import nsUserAccounts, UserAccounts
from lib389.idm.account import Accounts
+from lib389.idm.domain import Domain
from lib389.topologies import topology_st as topology
from lib389.backend import Backends
from lib389.paths import Paths
@@ -190,6 +191,7 @@ def test_entryuuid_fixup_task(topology):
3. Enable the entryuuid plugin
4. Run the fixup
5. Assert the entryuuid now exists
+ 6. Restart and check they persist
:expectedresults:
1. Success
@@ -197,6 +199,7 @@ def test_entryuuid_fixup_task(topology):
3. Success
4. Success
5. Suddenly EntryUUID!
+ 6. Still has EntryUUID!
"""
# 1. Disable the plugin
plug = EntryUUIDPlugin(topology.standalone)
@@ -220,7 +223,22 @@ def test_entryuuid_fixup_task(topology):
assert(task.is_complete() and task.get_exit_code() == 0)
topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,))
- # 5. Assert the uuid.
- euuid = account.get_attr_val_utf8('entryUUID')
- assert(euuid is not None)
+ # 5.1 Assert the uuid on the user.
+ euuid_user = account.get_attr_val_utf8('entryUUID')
+ assert(euuid_user is not None)
+
+ # 5.2 Assert it on the domain entry.
+ domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX)
+ euuid_domain = domain.get_attr_val_utf8('entryUUID')
+ assert(euuid_domain is not None)
+
+ # Assert it persists after a restart.
+ topology.standalone.restart()
+ # 6.1 Assert the uuid on the use.
+ euuid_user_2 = account.get_attr_val_utf8('entryUUID')
+ assert(euuid_user_2 == euuid_user)
+
+ # 6.2 Assert it on the domain entry.
+ euuid_domain_2 = domain.get_attr_val_utf8('entryUUID')
+ assert(euuid_domain_2 == euuid_domain)
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
index 6b5e8d1bb..92977db05 100644
--- a/src/plugins/entryuuid/src/lib.rs
+++ b/src/plugins/entryuuid/src/lib.rs
@@ -187,9 +187,46 @@ impl SlapiPlugin3 for EntryUuid {
}
}
-pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> {
- assign_uuid(&mut e);
- Ok(())
+pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError> {
+ /* Supply a modification to the entry. */
+ let sdn = e.get_sdnref();
+
+ /* Sanity check that entryuuid doesn't already exist */
+ if e.contains_attr("entryUUID") {
+ log_error!(
+ ErrorLevel::Trace,
+ "skipping fixup for -> {}",
+ sdn.to_dn_string()
+ );
+ return Ok(());
+ }
+
+ // Setup the modifications
+ let mut mods = SlapiMods::new();
+
+ let u: Uuid = Uuid::new_v4();
+ let uuid_value = Value::from(&u);
+ let values: ValueArray = std::iter::once(uuid_value).collect();
+ mods.append(ModType::Replace, "entryUUID", values);
+
+ /* */
+ let lmod = Modify::new(&sdn, mods, plugin_id())?;
+
+ match lmod.execute() {
+ Ok(_) => {
+ log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
+ Ok(())
+ }
+ Err(e) => {
+ log_error!(
+ ErrorLevel::Error,
+ "entryuuid_fixup_mapfn -> fixup failed -> {} {:?}",
+ sdn.to_dn_string(),
+ e
+ );
+ Err(PluginError::GenericFailure)
+ }
+ }
}
#[cfg(test)]
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
index cf76ccbdb..34845c2f4 100644
--- a/src/slapi_r_plugin/src/constants.rs
+++ b/src/slapi_r_plugin/src/constants.rs
@@ -5,6 +5,11 @@ use std::os::raw::c_char;
pub const LDAP_SUCCESS: i32 = 0;
pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50;
+#[repr(i32)]
+pub enum OpFlags {
+ ByassReferrals = 0x0040_0000,
+}
+
#[repr(i32)]
/// The set of possible function handles we can register via the pblock. These
/// values correspond to slapi-plugin.h.
diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs
index 034efe692..22ae45189 100644
--- a/src/slapi_r_plugin/src/entry.rs
+++ b/src/slapi_r_plugin/src/entry.rs
@@ -70,6 +70,14 @@ impl EntryRef {
}
}
+ pub fn contains_attr(&self, name: &str) -> bool {
+ let cname = CString::new(name).expect("invalid attr name");
+ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) };
+
+ // If it's null, it's not present, so flip the logic.
+ !va.is_null()
+ }
+
pub fn add_value(&mut self, a: &str, v: &ValueRef) {
// turn the attr to a c string.
// TODO FIX
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
index d7fc22e52..076907bae 100644
--- a/src/slapi_r_plugin/src/lib.rs
+++ b/src/slapi_r_plugin/src/lib.rs
@@ -9,6 +9,7 @@ pub mod dn;
pub mod entry;
pub mod error;
pub mod log;
+pub mod modify;
pub mod pblock;
pub mod plugin;
pub mod search;
@@ -24,6 +25,7 @@ pub mod prelude {
pub use crate::entry::EntryRef;
pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError};
pub use crate::log::{log_error, ErrorLevel};
+ pub use crate::modify::{ModType, Modify, SlapiMods};
pub use crate::pblock::{Pblock, PblockRef};
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
pub use crate::search::{Search, SearchScope};
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
index 030449632..bc8dfa60f 100644
--- a/src/slapi_r_plugin/src/macros.rs
+++ b/src/slapi_r_plugin/src/macros.rs
@@ -825,7 +825,7 @@ macro_rules! slapi_r_search_callback_mapfn {
let e = EntryRef::new(raw_e);
let data_ptr = raw_data as *const _;
let data = unsafe { &(*data_ptr) };
- match $cb_mod_ident(e, data) {
+ match $cb_mod_ident(&e, data) {
Ok(_) => LDAPError::Success as i32,
Err(e) => e as i32,
}
diff --git a/src/slapi_r_plugin/src/modify.rs b/src/slapi_r_plugin/src/modify.rs
new file mode 100644
index 000000000..30864377a
--- /dev/null
+++ b/src/slapi_r_plugin/src/modify.rs
@@ -0,0 +1,118 @@
+use crate::constants::OpFlags;
+use crate::dn::SdnRef;
+use crate::error::{LDAPError, PluginError};
+use crate::pblock::Pblock;
+use crate::plugin::PluginIdRef;
+use crate::value::{slapi_value, ValueArray};
+
+use std::ffi::CString;
+use std::ops::{Deref, DerefMut};
+use std::os::raw::c_char;
+
+extern "C" {
+ fn slapi_modify_internal_set_pb_ext(
+ pb: *const libc::c_void,
+ dn: *const libc::c_void,
+ mods: *const *const libc::c_void,
+ controls: *const *const libc::c_void,
+ uniqueid: *const c_char,
+ plugin_ident: *const libc::c_void,
+ op_flags: i32,
+ );
+ fn slapi_modify_internal_pb(pb: *const libc::c_void);
+ fn slapi_mods_free(smods: *const *const libc::c_void);
+ fn slapi_mods_get_ldapmods_byref(smods: *const libc::c_void) -> *const *const libc::c_void;
+ fn slapi_mods_new() -> *const libc::c_void;
+ fn slapi_mods_add_mod_values(
+ smods: *const libc::c_void,
+ mtype: i32,
+ attrtype: *const c_char,
+ value: *const *const slapi_value,
+ );
+}
+
+#[derive(Debug)]
+#[repr(i32)]
+pub enum ModType {
+ Add = 0,
+ Delete = 1,
+ Replace = 2,
+}
+
+pub struct SlapiMods {
+ inner: *const libc::c_void,
+ vas: Vec<ValueArray>,
+}
+
+impl Drop for SlapiMods {
+ fn drop(&mut self) {
+ unsafe { slapi_mods_free(&self.inner as *const *const libc::c_void) }
+ }
+}
+
+impl SlapiMods {
+ pub fn new() -> Self {
+ SlapiMods {
+ inner: unsafe { slapi_mods_new() },
+ vas: Vec::new(),
+ }
+ }
+
+ pub fn append(&mut self, modtype: ModType, attrtype: &str, values: ValueArray) {
+ // We can get the value array pointer here to push to the inner
+ // because the internal pointers won't change even when we push them
+ // to the list to preserve their lifetime.
+ let vas = values.as_ptr();
+ // We take ownership of this to ensure it lives as least as long as our
+ // slapimods structure.
+ self.vas.push(values);
+ // now we can insert these to the modes.
+ let c_attrtype = CString::new(attrtype).expect("failed to allocate attrtype");
+ unsafe { slapi_mods_add_mod_values(self.inner, modtype as i32, c_attrtype.as_ptr(), vas) };
+ }
+}
+
+pub struct Modify {
+ pb: Pblock,
+ mods: SlapiMods,
+}
+
+pub struct ModifyResult {
+ pb: Pblock,
+}
+
+impl Modify {
+ pub fn new(dn: &SdnRef, mods: SlapiMods, plugin_id: PluginIdRef) -> Result<Self, PluginError> {
+ let pb = Pblock::new();
+ let lmods = unsafe { slapi_mods_get_ldapmods_byref(mods.inner) };
+ // OP_FLAG_ACTION_LOG_ACCESS
+
+ unsafe {
+ slapi_modify_internal_set_pb_ext(
+ pb.deref().as_ptr(),
+ dn.as_ptr(),
+ lmods,
+ std::ptr::null(),
+ std::ptr::null(),
+ plugin_id.raw_pid,
+ OpFlags::ByassReferrals as i32,
+ )
+ };
+
+ Ok(Modify { pb, mods })
+ }
+
+ pub fn execute(self) -> Result<ModifyResult, LDAPError> {
+ let Modify {
+ mut pb,
+ mods: _mods,
+ } = self;
+ unsafe { slapi_modify_internal_pb(pb.deref().as_ptr()) };
+ let result = pb.get_op_result();
+
+ match result {
+ 0 => Ok(ModifyResult { pb }),
+ _e => Err(LDAPError::from(result)),
+ }
+ }
+}
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
index b69ce1680..0f83914f3 100644
--- a/src/slapi_r_plugin/src/pblock.rs
+++ b/src/slapi_r_plugin/src/pblock.rs
@@ -11,6 +11,7 @@ pub use crate::log::{log_error, ErrorLevel};
extern "C" {
fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
+ fn slapi_pblock_destroy(pb: *const libc::c_void);
fn slapi_pblock_new() -> *const libc::c_void;
}
@@ -41,6 +42,12 @@ impl DerefMut for Pblock {
}
}
+impl Drop for Pblock {
+ fn drop(&mut self) {
+ unsafe { slapi_pblock_destroy(self.value.raw_pb) }
+ }
+}
+
pub struct PblockRef {
raw_pb: *const libc::c_void,
}
diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
index 5a40dd279..46246837a 100644
--- a/src/slapi_r_plugin/src/value.rs
+++ b/src/slapi_r_plugin/src/value.rs
@@ -96,6 +96,10 @@ impl ValueArray {
let bs = vs.into_boxed_slice();
Box::leak(bs) as *const _ as *const *const slapi_value
}
+
+ pub fn as_ptr(&self) -> *const *const slapi_value {
+ self.data.as_ptr() as *const *const slapi_value
+ }
}
impl FromIterator<Value> for ValueArray {
--
2.26.3

View file

@ -1,179 +0,0 @@
From 826a1bb4ea88915ac492828d1cc4a901623f7866 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 14 May 2020 14:31:47 +1000
Subject: [PATCH 1/2] Ticket 50933 - Update 2307compat.ldif
Bug Description: This resolves a potential conflict between 60nis.ldif
in freeipa and others with 2307compat, by removing the conflicting
definitions from 2307bis that were included.
Fix Description: By not including these in 2307compat, this means that
sites that rely on the values provided by 2307bis may ALSO need
60nis.ldif to be present. However, these nis values seem like they are
likely very rare in reality, and this also will avoid potential
issues with freeipa. It also is the least disruptive as we don't need
to change an already defined file, and we don't have values where the name
to oid relationship changes.
Fixes: #50933
https://pagure.io/389-ds-base/issue/50933
Author: William Brown <william@blackhats.net.au>
Review by: tbordaz (Thanks!)
---
ldap/schema/10rfc2307compat.ldif | 66 --------------------------------
ldap/schema/60autofs.ldif | 39 ++++++++++++-------
2 files changed, 26 insertions(+), 79 deletions(-)
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
index 8810231ac..78c588d08 100644
--- a/ldap/schema/10rfc2307compat.ldif
+++ b/ldap/schema/10rfc2307compat.ldif
@@ -176,50 +176,6 @@ attributeTypes: (
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
SINGLE-VALUE
)
-attributeTypes: (
- 1.3.6.1.1.1.1.28 NAME 'nisPublicKey'
- DESC 'NIS public key'
- EQUALITY octetStringMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.29 NAME 'nisSecretKey'
- DESC 'NIS secret key'
- EQUALITY octetStringMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.30 NAME 'nisDomain'
- DESC 'NIS domain'
- EQUALITY caseIgnoreIA5Match
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.31 NAME 'automountMapName'
- DESC 'automount Map Name'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.32 NAME 'automountKey'
- DESC 'Automount Key value'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.33 NAME 'automountInformation'
- DESC 'Automount information'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
# end of attribute types - beginning of objectclasses
objectClasses: (
1.3.6.1.1.1.2.0 NAME 'posixAccount' SUP top AUXILIARY
@@ -324,28 +280,6 @@ objectClasses: (
seeAlso $ serialNumber'
MAY ( bootFile $ bootParameter $ cn $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber )
)
-objectClasses: (
- 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' SUP top AUXILIARY
- DESC 'An object with a public and secret key'
- MUST ( cn $ nisPublicKey $ nisSecretKey )
- MAY ( uidNumber $ description )
- )
-objectClasses: (
- 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' SUP top AUXILIARY
- DESC 'Associates a NIS domain with a naming context'
- MUST nisDomain
- )
-objectClasses: (
- 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURAL
- MUST ( automountMapName )
- MAY description
- )
-objectClasses: (
- 1.3.6.1.1.1.2.17 NAME 'automount' SUP top STRUCTURAL
- DESC 'Automount information'
- MUST ( automountKey $ automountInformation )
- MAY description
- )
## namedObject is needed for groups without members
objectClasses: (
1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STRUCTURAL
diff --git a/ldap/schema/60autofs.ldif b/ldap/schema/60autofs.ldif
index 084e9ec30..de3922aa2 100644
--- a/ldap/schema/60autofs.ldif
+++ b/ldap/schema/60autofs.ldif
@@ -6,7 +6,23 @@ dn: cn=schema
################################################################################
#
attributeTypes: (
- 1.3.6.1.1.1.1.33
+ 1.3.6.1.1.1.1.31 NAME 'automountMapName'
+ DESC 'automount Map Name'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.32 NAME 'automountKey'
+ DESC 'Automount Key value'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.33
NAME 'automountInformation'
DESC 'Information used by the autofs automounter'
EQUALITY caseExactIA5Match
@@ -18,25 +34,22 @@ attributeTypes: (
################################################################################
#
objectClasses: (
- 1.3.6.1.1.1.2.17
- NAME 'automount'
- DESC 'An entry in an automounter map'
+ 1.3.6.1.1.1.2.16
+ NAME 'automountMap'
+ DESC 'An group of related automount objects'
SUP top
STRUCTURAL
- MUST ( cn $ automountInformation )
- MAY ( description )
+ MAY ( ou $ automountMapName $ description )
X-ORIGIN 'draft-howard-rfc2307bis'
)
-#
-################################################################################
-#
objectClasses: (
- 1.3.6.1.1.1.2.16
- NAME 'automountMap'
- DESC 'An group of related automount objects'
+ 1.3.6.1.1.1.2.17
+ NAME 'automount'
+ DESC 'An entry in an automounter map'
SUP top
STRUCTURAL
- MUST ( ou )
+ MUST ( automountInformation )
+ MAY ( cn $ description $ automountKey )
X-ORIGIN 'draft-howard-rfc2307bis'
)
#
--
2.26.2

View file

@ -0,0 +1,192 @@
From b2e0a1d405d15383064e547fd15008bc136d3efe Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Thu, 17 Dec 2020 08:22:23 +1000
Subject: [PATCH 05/12] Issue 4498 - BUG - entryuuid replication may not work
(#4503)
Bug Description: EntryUUID can be duplicated in replication,
due to a missing check in assign_uuid
Fix Description: Add a test case to determine how this occurs,
and add the correct check for existing entryUUID.
fixes: https://github.com/389ds/389-ds-base/issues/4498
Author: William Brown <william@blackhats.net.au>
Review by: @mreynolds389
---
.../tests/suites/entryuuid/replicated_test.py | 77 +++++++++++++++++++
rpm.mk | 2 +-
src/plugins/entryuuid/src/lib.rs | 20 ++++-
src/slapi_r_plugin/src/constants.rs | 2 +
src/slapi_r_plugin/src/pblock.rs | 7 ++
5 files changed, 106 insertions(+), 2 deletions(-)
create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py
diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
new file mode 100644
index 000000000..a2ebc8ff7
--- /dev/null
+++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
@@ -0,0 +1,77 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 William Brown <william@blackhats.net.au>
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import ldap
+import pytest
+import logging
+from lib389.topologies import topology_m2 as topo_m2
+from lib389.idm.user import nsUserAccounts
+from lib389.paths import Paths
+from lib389.utils import ds_is_older
+from lib389._constants import *
+from lib389.replica import ReplicationManager
+
+default_paths = Paths()
+
+pytestmark = pytest.mark.tier1
+
+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
+
+def test_entryuuid_with_replication(topo_m2):
+ """ Check that entryuuid works with replication
+
+ :id: a5f15bf9-7f63-473a-840c-b9037b787024
+
+ :setup: two node mmr
+
+ :steps:
+ 1. Create an entry on one server
+ 2. Wait for replication
+ 3. Assert it is on the second
+
+ :expectedresults:
+ 1. Success
+ 1. Success
+ 1. Success
+ """
+
+ server_a = topo_m2.ms["supplier1"]
+ server_b = topo_m2.ms["supplier2"]
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000)
+ euuid_a = account_a.get_attr_vals_utf8('entryUUID')
+ print("🧩 %s" % euuid_a)
+ assert(euuid_a is not None)
+ assert(len(euuid_a) == 1)
+
+ repl.wait_for_replication(server_a, server_b)
+
+ account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000")
+ euuid_b = account_b.get_attr_vals_utf8('entryUUID')
+ print("🧩 %s" % euuid_b)
+
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,))
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,))
+
+ assert(euuid_b is not None)
+ assert(len(euuid_b) == 1)
+ assert(euuid_b == euuid_a)
+
+ account_b.set("description", "update")
+ repl.wait_for_replication(server_b, server_a)
+
+ euuid_c = account_a.get_attr_vals_utf8('entryUUID')
+ print("🧩 %s" % euuid_c)
+ assert(euuid_c is not None)
+ assert(len(euuid_c) == 1)
+ assert(euuid_c == euuid_a)
+
diff --git a/rpm.mk b/rpm.mk
index 02f5bba37..d1cdff7df 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -25,7 +25,7 @@ TSAN_ON = 0
# Undefined Behaviour Sanitizer
UBSAN_ON = 0
-RUST_ON = 0
+RUST_ON = 1
# PERL_ON is deprecated and turns on the LEGACY_ON, this for not breaking people's workflows.
PERL_ON = 1
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
index 92977db05..0197c5e83 100644
--- a/src/plugins/entryuuid/src/lib.rs
+++ b/src/plugins/entryuuid/src/lib.rs
@@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma
fn assign_uuid(e: &mut EntryRef) {
let sdn = e.get_sdnref();
+ // 🚧 safety barrier 🚧
+ if e.contains_attr("entryUUID") {
+ log_error!(
+ ErrorLevel::Trace,
+ "assign_uuid -> entryUUID exists, skipping dn {}",
+ sdn.to_dn_string()
+ );
+ return;
+ }
+
// We could consider making these lazy static.
let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn");
let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn");
@@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid {
}
fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
- log_error!(ErrorLevel::Trace, "betxn_pre_add");
+ if pb.get_is_replicated_operation() {
+ log_error!(
+ ErrorLevel::Trace,
+ "betxn_pre_add -> replicated operation, will not change"
+ );
+ return Ok(());
+ }
+
+ log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
assign_uuid(&mut e);
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
index 34845c2f4..aa0691acc 100644
--- a/src/slapi_r_plugin/src/constants.rs
+++ b/src/slapi_r_plugin/src/constants.rs
@@ -164,6 +164,8 @@ pub(crate) enum PblockType {
AddEntry = 60,
/// SLAPI_BACKEND
Backend = 130,
+ /// SLAPI_IS_REPLICATED_OPERATION
+ IsReplicationOperation = 142,
/// SLAPI_PLUGIN_MR_NAMES
MRNames = 624,
/// SLAPI_PLUGIN_SYNTAX_NAMES
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
index 0f83914f3..718ff2ca7 100644
--- a/src/slapi_r_plugin/src/pblock.rs
+++ b/src/slapi_r_plugin/src/pblock.rs
@@ -279,4 +279,11 @@ impl PblockRef {
pub fn get_op_result(&mut self) -> i32 {
self.get_value_i32(PblockType::OpResult).unwrap_or(-1)
}
+
+ pub fn get_is_replicated_operation(&mut self) -> bool {
+ let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0);
+ // Because rust returns the result of the last evaluation, we can
+ // just return if not equal 0.
+ i != 0
+ }
}
--
2.26.3

View file

@ -1,36 +0,0 @@
From 3d9ced9e340678cc02b1a36c2139492c95ef15a6 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 12 Aug 2020 12:46:42 -0400
Subject: [PATCH 2/2] Issue 50933 - Fix OID change between 10rfc2307 and
10rfc2307compat
Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to
match the standard OID, but this breaks replication with
older versions of DS.
Fix Description: Continue to use the old(invalid?) oid for nisMap so that
replication does not break in a mixed version environment.
Fixes: https://pagure.io/389-ds-base/issue/50933
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/schema/10rfc2307compat.ldif | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
index 78c588d08..8ba72e1e3 100644
--- a/ldap/schema/10rfc2307compat.ldif
+++ b/ldap/schema/10rfc2307compat.ldif
@@ -253,7 +253,7 @@ objectClasses: (
MAY ( nisNetgroupTriple $ memberNisNetgroup $ description )
)
objectClasses: (
- 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL
+ 1.3.6.1.1.1.2.13 NAME 'nisMap' SUP top STRUCTURAL
DESC 'A generic abstraction of a NIS map'
MUST nisMapName
MAY description
--
2.26.2

View file

@ -0,0 +1,626 @@
From 04c44e74503a842561b6c6e58001faf86d924b20 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 7 Dec 2020 11:00:45 -0500
Subject: [PATCH 06/12] Issue 4421 - Unable to build with Rust enabled in
closed environment
Description: Add Makefile flags and update rpm.mk that allow updating
and downloading all the cargo/rust dependencies. This is
needed for nightly tests and upstream/downstream releases.
Fixes: https://github.com/389ds/389-ds-base/issues/4421
Reviewed by: firstyear(Thanks!)
---
rpm.mk | 3 +-
rpm/389-ds-base.spec.in | 2 +-
src/Cargo.lock | 563 ----------------------------------------
3 files changed, 3 insertions(+), 565 deletions(-)
delete mode 100644 src/Cargo.lock
diff --git a/rpm.mk b/rpm.mk
index d1cdff7df..ef810c63c 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -44,6 +44,7 @@ update-cargo-dependencies:
cargo update --manifest-path=./src/Cargo.toml
download-cargo-dependencies:
+ cargo update --manifest-path=./src/Cargo.toml
cargo vendor --manifest-path=./src/Cargo.toml
cargo fetch --manifest-path=./src/Cargo.toml
tar -czf vendor.tar.gz vendor
@@ -114,7 +115,7 @@ rpmbuildprep:
cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \
fi
-srpms: rpmroot srpmdistdir tarballs rpmbuildprep
+srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
rm -rf $(RPMBUILD)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index b9f85489b..d80de8422 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -357,7 +357,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug"
%endif
%if %{use_rust}
-RUST_FLAGS="--enable-rust"
+RUST_FLAGS="--enable-rust --enable-rust-offline"
%endif
%if %{use_legacy}
diff --git a/src/Cargo.lock b/src/Cargo.lock
deleted file mode 100644
index 33d7b8f23..000000000
--- a/src/Cargo.lock
+++ /dev/null
@@ -1,563 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-[[package]]
-name = "ansi_term"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
-dependencies = [
- "winapi",
-]
-
-[[package]]
-name = "atty"
-version = "0.2.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-dependencies = [
- "hermit-abi",
- "libc",
- "winapi",
-]
-
-[[package]]
-name = "autocfg"
-version = "1.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
-
-[[package]]
-name = "base64"
-version = "0.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
-
-[[package]]
-name = "bitflags"
-version = "1.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
-
-[[package]]
-name = "byteorder"
-version = "1.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
-
-[[package]]
-name = "cbindgen"
-version = "0.9.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
-dependencies = [
- "clap",
- "log",
- "proc-macro2",
- "quote",
- "serde",
- "serde_json",
- "syn",
- "tempfile",
- "toml",
-]
-
-[[package]]
-name = "cc"
-version = "1.0.67"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
-dependencies = [
- "jobserver",
-]
-
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
-[[package]]
-name = "clap"
-version = "2.33.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
-dependencies = [
- "ansi_term",
- "atty",
- "bitflags",
- "strsim",
- "textwrap",
- "unicode-width",
- "vec_map",
-]
-
-[[package]]
-name = "entryuuid"
-version = "0.1.0"
-dependencies = [
- "cc",
- "libc",
- "paste",
- "slapi_r_plugin",
- "uuid",
-]
-
-[[package]]
-name = "entryuuid_syntax"
-version = "0.1.0"
-dependencies = [
- "cc",
- "libc",
- "paste",
- "slapi_r_plugin",
- "uuid",
-]
-
-[[package]]
-name = "fernet"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
-dependencies = [
- "base64",
- "byteorder",
- "getrandom",
- "openssl",
- "zeroize",
-]
-
-[[package]]
-name = "foreign-types"
-version = "0.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
-dependencies = [
- "foreign-types-shared",
-]
-
-[[package]]
-name = "foreign-types-shared"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
-
-[[package]]
-name = "getrandom"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
-dependencies = [
- "cfg-if",
- "libc",
- "wasi",
-]
-
-[[package]]
-name = "hermit-abi"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "itoa"
-version = "0.4.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
-
-[[package]]
-name = "jobserver"
-version = "0.1.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "lazy_static"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-
-[[package]]
-name = "libc"
-version = "0.2.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
-
-[[package]]
-name = "librnsslapd"
-version = "0.1.0"
-dependencies = [
- "cbindgen",
- "libc",
- "slapd",
-]
-
-[[package]]
-name = "librslapd"
-version = "0.1.0"
-dependencies = [
- "cbindgen",
- "libc",
- "slapd",
-]
-
-[[package]]
-name = "log"
-version = "0.4.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
-name = "once_cell"
-version = "1.7.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
-
-[[package]]
-name = "openssl"
-version = "0.10.34"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
-dependencies = [
- "bitflags",
- "cfg-if",
- "foreign-types",
- "libc",
- "once_cell",
- "openssl-sys",
-]
-
-[[package]]
-name = "openssl-sys"
-version = "0.9.63"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
-dependencies = [
- "autocfg",
- "cc",
- "libc",
- "pkg-config",
- "vcpkg",
-]
-
-[[package]]
-name = "paste"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
-dependencies = [
- "paste-impl",
- "proc-macro-hack",
-]
-
-[[package]]
-name = "paste-impl"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
-dependencies = [
- "proc-macro-hack",
-]
-
-[[package]]
-name = "pkg-config"
-version = "0.3.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
-
-[[package]]
-name = "ppv-lite86"
-version = "0.2.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
-
-[[package]]
-name = "proc-macro-hack"
-version = "0.5.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
-dependencies = [
- "unicode-xid",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "rand"
-version = "0.8.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
-dependencies = [
- "libc",
- "rand_chacha",
- "rand_core",
- "rand_hc",
-]
-
-[[package]]
-name = "rand_chacha"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
-dependencies = [
- "ppv-lite86",
- "rand_core",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.6.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
-dependencies = [
- "getrandom",
-]
-
-[[package]]
-name = "rand_hc"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
-dependencies = [
- "rand_core",
-]
-
-[[package]]
-name = "redox_syscall"
-version = "0.2.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
-dependencies = [
- "bitflags",
-]
-
-[[package]]
-name = "remove_dir_all"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
-dependencies = [
- "winapi",
-]
-
-[[package]]
-name = "rsds"
-version = "0.1.0"
-
-[[package]]
-name = "ryu"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
-
-[[package]]
-name = "serde"
-version = "1.0.126"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
-dependencies = [
- "serde_derive",
-]
-
-[[package]]
-name = "serde_derive"
-version = "1.0.126"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "serde_json"
-version = "1.0.64"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
-dependencies = [
- "itoa",
- "ryu",
- "serde",
-]
-
-[[package]]
-name = "slapd"
-version = "0.1.0"
-dependencies = [
- "fernet",
-]
-
-[[package]]
-name = "slapi_r_plugin"
-version = "0.1.0"
-dependencies = [
- "lazy_static",
- "libc",
- "paste",
- "uuid",
-]
-
-[[package]]
-name = "strsim"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
-
-[[package]]
-name = "syn"
-version = "1.0.72"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-xid",
-]
-
-[[package]]
-name = "synstructure"
-version = "0.12.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
- "unicode-xid",
-]
-
-[[package]]
-name = "tempfile"
-version = "3.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
-dependencies = [
- "cfg-if",
- "libc",
- "rand",
- "redox_syscall",
- "remove_dir_all",
- "winapi",
-]
-
-[[package]]
-name = "textwrap"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
-dependencies = [
- "unicode-width",
-]
-
-[[package]]
-name = "toml"
-version = "0.5.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "unicode-width"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
-
-[[package]]
-name = "unicode-xid"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
-
-[[package]]
-name = "uuid"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
-dependencies = [
- "getrandom",
-]
-
-[[package]]
-name = "vcpkg"
-version = "0.2.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d"
-
-[[package]]
-name = "vec_map"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
-
-[[package]]
-name = "wasi"
-version = "0.10.2+wasi-snapshot-preview1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
-
-[[package]]
-name = "winapi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-dependencies = [
- "winapi-i686-pc-windows-gnu",
- "winapi-x86_64-pc-windows-gnu",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-
-[[package]]
-name = "zeroize"
-version = "1.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
-dependencies = [
- "zeroize_derive",
-]
-
-[[package]]
-name = "zeroize_derive"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
- "synstructure",
-]
--
2.26.3

View file

@ -1,147 +0,0 @@
From 1085823bf5586d55103cfba249fdf212e9afcb7c Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 4 Jun 2020 11:51:53 +1000
Subject: [PATCH] Ticket 51131 - improve mutex alloc in conntable
Bug Description: We previously did delayed allocation
of mutexs, which @tbordaz noted can lead to high usage
of the pthread mutex init routines. This was done under
the conntable lock, as well as cleaning the connection
Fix Description: rather than delayed allocation, we
initialise everything at start up instead, which means
that while startup may have a delay, at run time we have
a smaller and lighter connection allocation routine,
that is able to release the CT lock sooner.
https://pagure.io/389-ds-base/issue/51131
Author: William Brown <william@blackhats.net.au>
Review by: ???
---
ldap/servers/slapd/conntable.c | 86 +++++++++++++++++++---------------
1 file changed, 47 insertions(+), 39 deletions(-)
diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
index b23dc3435..feb9c0d75 100644
--- a/ldap/servers/slapd/conntable.c
+++ b/ldap/servers/slapd/conntable.c
@@ -138,10 +138,21 @@ connection_table_new(int table_size)
ct->conn_next_offset = 1;
ct->conn_free_offset = 1;
+ pthread_mutexattr_t monitor_attr = {0};
+ pthread_mutexattr_init(&monitor_attr);
+ pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
+
/* We rely on the fact that we called calloc, which zeros the block, so we don't
* init any structure element unless a zero value is troublesome later
*/
for (i = 0; i < table_size; i++) {
+ /*
+ * Technically this is a no-op due to calloc, but we should always be
+ * careful with things like this ....
+ */
+ ct->c[i].c_state = CONN_STATE_FREE;
+ /* Start the conn setup. */
+
LBER_SOCKET invalid_socket;
/* DBDB---move this out of here once everything works */
ct->c[i].c_sb = ber_sockbuf_alloc();
@@ -161,11 +172,20 @@ connection_table_new(int table_size)
ct->c[i].c_prev = NULL;
ct->c[i].c_ci = i;
ct->c[i].c_fdi = SLAPD_INVALID_SOCKET_INDEX;
- /*
- * Technically this is a no-op due to calloc, but we should always be
- * careful with things like this ....
- */
- ct->c[i].c_state = CONN_STATE_FREE;
+
+ if (pthread_mutex_init(&(ct->c[i].c_mutex), &monitor_attr) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n");
+ exit(1);
+ }
+
+ ct->c[i].c_pdumutex = PR_NewLock();
+ if (ct->c[i].c_pdumutex == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n");
+ exit(1);
+ }
+
+ /* Ready to rock, mark as such. */
+ ct->c[i].c_state = CONN_STATE_INIT;
/* Prepare the connection into the freelist. */
ct->c_freelist[i] = &(ct->c[i]);
}
@@ -241,44 +261,32 @@ connection_table_get_connection(Connection_Table *ct, int sd)
/* Never use slot 0 */
ct->conn_next_offset += 1;
}
- /* Now prep the slot for usage. */
- PR_ASSERT(c->c_next == NULL);
- PR_ASSERT(c->c_prev == NULL);
- PR_ASSERT(c->c_extension == NULL);
-
- if (c->c_state == CONN_STATE_FREE) {
-
- c->c_state = CONN_STATE_INIT;
-
- pthread_mutexattr_t monitor_attr = {0};
- pthread_mutexattr_init(&monitor_attr);
- pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
- if (pthread_mutex_init(&(c->c_mutex), &monitor_attr) != 0) {
- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n");
- exit(1);
- }
-
- c->c_pdumutex = PR_NewLock();
- if (c->c_pdumutex == NULL) {
- c->c_pdumutex = NULL;
- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n");
- exit(1);
- }
- }
- /* Let's make sure there's no cruft left on there from the last time this connection was used. */
- /* Note: no need to lock c->c_mutex because this function is only
- * called by one thread (the slapd_daemon thread), and if we got this
- * far then `c' is not being used by any operation threads, etc.
- */
- connection_cleanup(c);
- c->c_ct = ct; /* pointer to connection table that owns this connection */
+ PR_Unlock(ct->table_mutex);
} else {
- /* couldn't find a Connection */
+ /* couldn't find a Connection, table must be full */
slapi_log_err(SLAPI_LOG_CONNS, "connection_table_get_connection", "Max open connections reached\n");
+ PR_Unlock(ct->table_mutex);
+ return NULL;
}
- /* We could move this to before the c alloc as there is no point to remain here. */
- PR_Unlock(ct->table_mutex);
+ /* Now prep the slot for usage. */
+ PR_ASSERT(c != NULL);
+ PR_ASSERT(c->c_next == NULL);
+ PR_ASSERT(c->c_prev == NULL);
+ PR_ASSERT(c->c_extension == NULL);
+ PR_ASSERT(c->c_state == CONN_STATE_INIT);
+ /* Let's make sure there's no cruft left on there from the last time this connection was used. */
+
+ /*
+ * Note: no need to lock c->c_mutex because this function is only
+ * called by one thread (the slapd_daemon thread), and if we got this
+ * far then `c' is not being used by any operation threads, etc. The
+ * memory ordering will be provided by the work queue sending c to a
+ * thread.
+ */
+ connection_cleanup(c);
+ /* pointer to connection table that owns this connection */
+ c->c_ct = ct;
return c;
}
--
2.26.2

View file

@ -1,66 +0,0 @@
From a9f53e9958861e6a7a827bd852d72d51a6512396 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 25 Nov 2020 18:07:34 +0100
Subject: [PATCH] Issue 4297 - 2nd fix for on ADD replication URP issue
internal searches with filter containing unescaped chars (#4439)
Bug description:
Previous fix is buggy because slapi_filter_escape_filter_value returns
a escaped filter component not an escaped assertion value.
Fix description:
use the escaped filter component
relates: https://github.com/389ds/389-ds-base/issues/4297
Reviewed by: William Brown
Platforms tested: F31
---
ldap/servers/plugins/replication/urp.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index f41dbc72d..ed340c9d8 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
- char *escaped_basedn;
+ char *escaped_filter;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1605,15 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
- char *escaped_basedn;
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
+ char *escaped_filter;
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
--
2.26.2

View file

@ -0,0 +1,412 @@
From 279bdb5148eb0b67ddab40c4dd9d08e9e1672f13 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Fri, 26 Jun 2020 10:27:56 +1000
Subject: [PATCH 07/12] Ticket 51175 - resolve plugin name leaking
Bug Description: Previously pblock.c assumed that all plugin
names were static c strings. Rust can't create static C
strings, so these were intentionally leaked.
Fix Description: Rather than leak these, we do a dup/free
through the slapiplugin struct instead, meaning we can use
ephemeral, and properly managed strings in rust. This does not
affect any other existing code which will still handle the
static strings correctly.
https://pagure.io/389-ds-base/issue/51175
Author: William Brown <william@blackhats.net.au>
Review by: mreynolds, tbordaz (Thanks!)
---
Makefile.am | 1 +
configure.ac | 2 +-
ldap/servers/slapd/pagedresults.c | 6 +--
ldap/servers/slapd/pblock.c | 9 ++--
ldap/servers/slapd/plugin.c | 7 +++
ldap/servers/slapd/pw_verify.c | 1 +
ldap/servers/slapd/tools/pwenc.c | 2 +-
src/slapi_r_plugin/README.md | 6 +--
src/slapi_r_plugin/src/charray.rs | 32 ++++++++++++++
src/slapi_r_plugin/src/lib.rs | 8 ++--
src/slapi_r_plugin/src/macros.rs | 17 +++++---
src/slapi_r_plugin/src/syntax_plugin.rs | 57 +++++++------------------
12 files changed, 85 insertions(+), 63 deletions(-)
create mode 100644 src/slapi_r_plugin/src/charray.rs
diff --git a/Makefile.am b/Makefile.am
index 627953850..36434cf17 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1312,6 +1312,7 @@ rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a
libslapi_r_plugin_SOURCES = \
src/slapi_r_plugin/src/backend.rs \
src/slapi_r_plugin/src/ber.rs \
+ src/slapi_r_plugin/src/charray.rs \
src/slapi_r_plugin/src/constants.rs \
src/slapi_r_plugin/src/dn.rs \
src/slapi_r_plugin/src/entry.rs \
diff --git a/configure.ac b/configure.ac
index b3cf77d08..61bf35e4a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -122,7 +122,7 @@ if test "$enable_debug" = yes ; then
debug_defs="-DDEBUG -DMCC_DEBUG"
debug_cflags="-g3 -O0 -rdynamic"
debug_cxxflags="-g3 -O0 -rdynamic"
- debug_rust_defs="-C debuginfo=2"
+ debug_rust_defs="-C debuginfo=2 -Z macro-backtrace"
cargo_defs=""
rust_target_dir="debug"
else
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
index d8b8798b6..e3444e944 100644
--- a/ldap/servers/slapd/pagedresults.c
+++ b/ldap/servers/slapd/pagedresults.c
@@ -738,10 +738,10 @@ pagedresults_cleanup(Connection *conn, int needlock)
int i;
PagedResults *prp = NULL;
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n");
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); */
if (NULL == conn) {
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n");
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); */
return 0;
}
@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
if (needlock) {
pthread_mutex_unlock(&(conn->c_mutex));
}
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc);
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */
return rc;
}
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index 1ad9d0399..f7d1f8885 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -3351,13 +3351,15 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
- pblock->pb_plugin->plg_syntax_names = (char **)value;
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_names == NULL);
+ pblock->pb_plugin->plg_syntax_names = slapi_ch_array_dup((char **)value);
break;
case SLAPI_PLUGIN_SYNTAX_OID:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
- pblock->pb_plugin->plg_syntax_oid = (char *)value;
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_oid == NULL);
+ pblock->pb_plugin->plg_syntax_oid = slapi_ch_strdup((char *)value);
break;
case SLAPI_PLUGIN_SYNTAX_FLAGS:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
@@ -3806,7 +3808,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
- pblock->pb_plugin->plg_mr_names = (char **)value;
+ PR_ASSERT(pblock->pb_plugin->plg_mr_names == NULL);
+ pblock->pb_plugin->plg_mr_names = slapi_ch_array_dup((char **)value);
break;
case SLAPI_PLUGIN_MR_COMPARE:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index 282b98738..e6b48de60 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -2694,6 +2694,13 @@ plugin_free(struct slapdplugin *plugin)
if (plugin->plg_type == SLAPI_PLUGIN_PWD_STORAGE_SCHEME || plugin->plg_type == SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME) {
slapi_ch_free_string(&plugin->plg_pwdstorageschemename);
}
+ if (plugin->plg_type == SLAPI_PLUGIN_SYNTAX) {
+ slapi_ch_free_string(&plugin->plg_syntax_oid);
+ slapi_ch_array_free(plugin->plg_syntax_names);
+ }
+ if (plugin->plg_type == SLAPI_PLUGIN_MATCHINGRULE) {
+ slapi_ch_array_free(plugin->plg_mr_names);
+ }
release_componentid(plugin->plg_identity);
slapi_counter_destroy(&plugin->plg_op_counter);
if (!plugin->plg_group) {
diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c
index 4f0944b73..4ff1fa2fd 100644
--- a/ldap/servers/slapd/pw_verify.c
+++ b/ldap/servers/slapd/pw_verify.c
@@ -111,6 +111,7 @@ pw_verify_token_dn(Slapi_PBlock *pb) {
if (fernet_verify_token(dn, cred->bv_val, key, tok_ttl) != 0) {
rc = SLAPI_BIND_SUCCESS;
}
+ slapi_ch_free_string(&key);
#endif
return rc;
}
diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c
index 1629c06cd..d89225e34 100644
--- a/ldap/servers/slapd/tools/pwenc.c
+++ b/ldap/servers/slapd/tools/pwenc.c
@@ -34,7 +34,7 @@
int ldap_syslog;
int ldap_syslog_level;
-int slapd_ldap_debug = LDAP_DEBUG_ANY;
+/* int slapd_ldap_debug = LDAP_DEBUG_ANY; */
int detached;
FILE *error_logfp;
FILE *access_logfp;
diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md
index af9743ec9..1c9bcbf17 100644
--- a/src/slapi_r_plugin/README.md
+++ b/src/slapi_r_plugin/README.md
@@ -15,7 +15,7 @@ the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html)
> warning about danger.
This document will not detail the specifics of unsafe or the invariants you must adhere to for rust
-to work with C.
+to work with C. Failure to uphold these invariants will lead to less than optimal consequences.
If you still want to see more about the plugin bindings, go on ...
@@ -135,7 +135,7 @@ associated functions.
Now, you may notice that not all members of the trait are implemented. This is due to a feature
of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide
template versions of these functions. If you "overwrite" them, your implementation is used. Unlike
-OO, you may not inherit or call the default function.
+OO, you may not inherit or call the default function.
If a default is not provided you *must* implement that function to be considered valid. Today (20200422)
this only applies to `start` and `close`.
@@ -183,7 +183,7 @@ It's important to understand how Rust manages memory both on the stack and the h
As a result, this means that we must express in code, assertions about the proper ownership of memory
and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible
for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or
-*hand waving* magical failures that are eXtReMeLy FuN to debug.
+*hand waving* magical failures that are `eXtReMeLy FuN` to debug.
### Reference Types
diff --git a/src/slapi_r_plugin/src/charray.rs b/src/slapi_r_plugin/src/charray.rs
new file mode 100644
index 000000000..d2e44693c
--- /dev/null
+++ b/src/slapi_r_plugin/src/charray.rs
@@ -0,0 +1,32 @@
+use std::ffi::CString;
+use std::iter::once;
+use std::os::raw::c_char;
+use std::ptr;
+
+pub struct Charray {
+ pin: Vec<CString>,
+ charray: Vec<*const c_char>,
+}
+
+impl Charray {
+ pub fn new(input: &[&str]) -> Result<Self, ()> {
+ let pin: Result<Vec<_>, ()> = input
+ .iter()
+ .map(|s| CString::new(*s).map_err(|_e| ()))
+ .collect();
+
+ let pin = pin?;
+
+ let charray: Vec<_> = pin
+ .iter()
+ .map(|s| s.as_ptr())
+ .chain(once(ptr::null()))
+ .collect();
+
+ Ok(Charray { pin, charray })
+ }
+
+ pub fn as_ptr(&self) -> *const *const c_char {
+ self.charray.as_ptr()
+ }
+}
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
index 076907bae..be28cac95 100644
--- a/src/slapi_r_plugin/src/lib.rs
+++ b/src/slapi_r_plugin/src/lib.rs
@@ -1,9 +1,11 @@
-// extern crate lazy_static;
+#[macro_use]
+extern crate lazy_static;
#[macro_use]
pub mod macros;
pub mod backend;
pub mod ber;
+pub mod charray;
mod constants;
pub mod dn;
pub mod entry;
@@ -20,6 +22,7 @@ pub mod value;
pub mod prelude {
pub use crate::backend::{BackendRef, BackendRefTxn};
pub use crate::ber::BerValRef;
+ pub use crate::charray::Charray;
pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS};
pub use crate::dn::{Sdn, SdnRef};
pub use crate::entry::EntryRef;
@@ -30,8 +33,7 @@ pub mod prelude {
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
pub use crate::search::{Search, SearchScope};
pub use crate::syntax_plugin::{
- matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr,
- SlapiSubMr, SlapiSyntaxPlugin1,
+ matchingrule_register, SlapiOrdMr, SlapiSubMr, SlapiSyntaxPlugin1,
};
pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef};
pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef};
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
index bc8dfa60f..97fc5d7ef 100644
--- a/src/slapi_r_plugin/src/macros.rs
+++ b/src/slapi_r_plugin/src/macros.rs
@@ -249,6 +249,7 @@ macro_rules! slapi_r_syntax_plugin_hooks {
paste::item! {
use libc;
use std::convert::TryFrom;
+ use std::ffi::CString;
#[no_mangle]
pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 {
@@ -261,15 +262,15 @@ macro_rules! slapi_r_syntax_plugin_hooks {
};
// Setup the names/oids that this plugin provides syntaxes for.
-
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) };
- match pb.register_syntax_names(name_ptr) {
+ // DS will clone these, so they can be ephemeral to this function.
+ let name_vec = Charray::new($hooks_ident::attr_supported_names().as_slice()).expect("invalid supported names");
+ match pb.register_syntax_names(name_vec.as_ptr()) {
0 => {},
e => return e,
};
- let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) };
- match pb.register_syntax_oid(name_ptr) {
+ let attr_oid = CString::new($hooks_ident::attr_oid()).expect("invalid attr oid");
+ match pb.register_syntax_oid(attr_oid.as_ptr()) {
0 => {},
e => return e,
};
@@ -430,7 +431,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
e => return e,
};
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) };
+ let name_vec = Charray::new($hooks_ident::eq_mr_supported_names().as_slice()).expect("invalid mr supported names");
+ let name_ptr = name_vec.as_ptr();
// SLAPI_PLUGIN_MR_NAMES
match pb.register_mr_names(name_ptr) {
0 => {},
@@ -672,7 +674,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
e => return e,
};
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) };
+ let name_vec = Charray::new($hooks_ident::ord_mr_supported_names().as_slice()).expect("invalid ord supported names");
+ let name_ptr = name_vec.as_ptr();
// SLAPI_PLUGIN_MR_NAMES
match pb.register_mr_names(name_ptr) {
0 => {},
diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs
index e7d5c01bd..86f84bdd8 100644
--- a/src/slapi_r_plugin/src/syntax_plugin.rs
+++ b/src/slapi_r_plugin/src/syntax_plugin.rs
@@ -1,11 +1,11 @@
use crate::ber::BerValRef;
// use crate::constants::FilterType;
+use crate::charray::Charray;
use crate::error::PluginError;
use crate::pblock::PblockRef;
use crate::value::{ValueArray, ValueArrayRef};
use std::cmp::Ordering;
use std::ffi::CString;
-use std::iter::once;
use std::os::raw::c_char;
use std::ptr;
@@ -26,37 +26,6 @@ struct slapi_matchingRuleEntry {
mr_compat_syntax: *const *const c_char,
}
-pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char {
- let n = CString::new(name)
- .expect("An invalid string has been hardcoded!")
- .into_boxed_c_str();
- let n_ptr = n.as_ptr();
- // Now we intentionally leak the name here, and the pointer will remain valid.
- Box::leak(n);
- n_ptr
-}
-
-pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char {
- let n_arr: Vec<CString> = names
- .iter()
- .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!"))
- .collect();
- let n_arr = n_arr.into_boxed_slice();
- let n_ptr_arr: Vec<*const c_char> = n_arr
- .iter()
- .map(|v| v.as_ptr())
- .chain(once(ptr::null()))
- .collect();
- let n_ptr_arr = n_ptr_arr.into_boxed_slice();
-
- // Now we intentionally leak these names here,
- let _r_n_arr = Box::leak(n_arr);
- let r_n_ptr_arr = Box::leak(n_ptr_arr);
-
- let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char;
- name_ptr
-}
-
// oid - the oid of the matching rule
// name - the name of the mr
// desc - description
@@ -69,20 +38,24 @@ pub unsafe fn matchingrule_register(
syntax: &str,
compat_syntax: &[&str],
) -> i32 {
- let oid_ptr = name_to_leaking_char(oid);
- let name_ptr = name_to_leaking_char(name);
- let desc_ptr = name_to_leaking_char(desc);
- let syntax_ptr = name_to_leaking_char(syntax);
- let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax);
+ // Make everything CStrings that live long enough.
+
+ let oid_cs = CString::new(oid).expect("invalid oid");
+ let name_cs = CString::new(name).expect("invalid name");
+ let desc_cs = CString::new(desc).expect("invalid desc");
+ let syntax_cs = CString::new(syntax).expect("invalid syntax");
+
+ // We have to do this so the cstrings live long enough.
+ let compat_syntax_ca = Charray::new(compat_syntax).expect("invalid compat_syntax");
let new_mr = slapi_matchingRuleEntry {
- mr_oid: oid_ptr,
+ mr_oid: oid_cs.as_ptr(),
_mr_oidalias: ptr::null(),
- mr_name: name_ptr,
- mr_desc: desc_ptr,
- mr_syntax: syntax_ptr,
+ mr_name: name_cs.as_ptr(),
+ mr_desc: desc_cs.as_ptr(),
+ mr_syntax: syntax_cs.as_ptr(),
_mr_obsolete: 0,
- mr_compat_syntax: compat_syntax_ptr,
+ mr_compat_syntax: compat_syntax_ca.as_ptr(),
};
let new_mr_ptr = &new_mr as *const _;
--
2.26.3

View file

@ -1,502 +0,0 @@
From 4faec52810e12070ef72da347bb590c57d8761e4 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 20 Nov 2020 17:47:18 -0500
Subject: [PATCH 1/2] Issue 3657 - Add options to dsctl for dsrc file
Description: Add options to create, modify, delete, and display
the .dsrc CLI tool shortcut file.
Relates: https://github.com/389ds/389-ds-base/issues/3657
Reviewed by: firstyear(Thanks!)
---
dirsrvtests/tests/suites/clu/dsrc_test.py | 136 ++++++++++
src/lib389/cli/dsctl | 2 +
src/lib389/lib389/cli_ctl/dsrc.py | 312 ++++++++++++++++++++++
3 files changed, 450 insertions(+)
create mode 100644 dirsrvtests/tests/suites/clu/dsrc_test.py
create mode 100644 src/lib389/lib389/cli_ctl/dsrc.py
diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py
new file mode 100644
index 000000000..1b27700ec
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dsrc_test.py
@@ -0,0 +1,136 @@
+import logging
+import pytest
+import os
+from os.path import expanduser
+from lib389.cli_base import FakeArgs
+from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc
+from lib389._constants import DEFAULT_SUFFIX, DN_DM
+from lib389.topologies import topology_st as topo
+
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def setup(topo, request):
+ """Preserve any existing .dsrc file"""
+
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ backup_file = dsrc_file + ".original"
+ if os.path.exists(dsrc_file):
+ os.rename(dsrc_file, backup_file)
+
+ def fin():
+ if os.path.exists(backup_file):
+ os.rename(backup_file, dsrc_file)
+
+ request.addfinalizer(fin)
+
+
+def test_dsrc(topo, setup):
+ """Test "dsctl dsrc" command
+
+ :id: 0610de6c-e167-4761-bdab-3e677b2d44bb
+ :setup: Standalone Instance
+ :steps:
+ 1. Test creation works
+ 2. Test creating duplicate section
+ 3. Test adding an additional inst config works
+ 4. Test removing an instance works
+ 5. Test modify works
+ 6. Test delete works
+ 7. Test display fails when no file is present
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ """
+
+ inst = topo.standalone
+ serverid = inst.serverid
+ second_inst_name = "Second"
+ second_inst_basedn = "o=second"
+ different_suffix = "o=different"
+
+ # Setup our args
+ args = FakeArgs()
+ args.basedn = DEFAULT_SUFFIX
+ args.binddn = DN_DM
+ args.json = None
+ args.uri = None
+ args.saslmech = None
+ args.tls_cacertdir = None
+ args.tls_cert = None
+ args.tls_key = None
+ args.tls_reqcert = None
+ args.starttls = None
+ args.cancel_starttls = None
+ args.pwdfile = None
+ args.do_it = True
+
+ # Create a dsrc configuration entry
+ create_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains("basedn = " + args.basedn)
+ assert topo.logcap.contains("binddn = " + args.binddn)
+ assert topo.logcap.contains("[" + serverid + "]")
+ topo.logcap.flush()
+
+ # Attempt to add duplicate instance section
+ with pytest.raises(ValueError):
+ create_dsrc(inst, log, args)
+
+ # Test adding a second instance works correctly
+ inst.serverid = second_inst_name
+ args.basedn = second_inst_basedn
+ create_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains("basedn = " + args.basedn)
+ assert topo.logcap.contains("[" + second_inst_name + "]")
+ topo.logcap.flush()
+
+ # Delete second instance
+ delete_dsrc(inst, log, args)
+ inst.serverid = serverid # Restore original instance name
+ display_dsrc(inst, topo.logcap.log, args)
+ assert not topo.logcap.contains("[" + second_inst_name + "]")
+ assert not topo.logcap.contains("basedn = " + args.basedn)
+ # Make sure first instance config is still present
+ assert topo.logcap.contains("[" + serverid + "]")
+ assert topo.logcap.contains("binddn = " + args.binddn)
+ topo.logcap.flush()
+
+ # Modify the config
+ args.basedn = different_suffix
+ modify_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains(different_suffix)
+ topo.logcap.flush()
+
+ # Remove an arg from the config
+ args.basedn = ""
+ modify_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert not topo.logcap.contains(different_suffix)
+ topo.logcap.flush()
+
+ # Remove the last entry, which should delete the file
+ delete_dsrc(inst, log, args)
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ assert not os.path.exists(dsrc_file)
+
+ # Make sure display fails
+ with pytest.raises(ValueError):
+ display_dsrc(inst, log, args)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
+
diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
index fe9bc10e9..69f069297 100755
--- a/src/lib389/cli/dsctl
+++ b/src/lib389/cli/dsctl
@@ -23,6 +23,7 @@ from lib389.cli_ctl import tls as cli_tls
from lib389.cli_ctl import health as cli_health
from lib389.cli_ctl import nsstate as cli_nsstate
from lib389.cli_ctl import dbgen as cli_dbgen
+from lib389.cli_ctl import dsrc as cli_dsrc
from lib389.cli_ctl.instance import instance_remove_all
from lib389.cli_base import (
disconnect_instance,
@@ -61,6 +62,7 @@ cli_tls.create_parser(subparsers)
cli_health.create_parser(subparsers)
cli_nsstate.create_parser(subparsers)
cli_dbgen.create_parser(subparsers)
+cli_dsrc.create_parser(subparsers)
argcomplete.autocomplete(parser)
diff --git a/src/lib389/lib389/cli_ctl/dsrc.py b/src/lib389/lib389/cli_ctl/dsrc.py
new file mode 100644
index 000000000..e49c7f819
--- /dev/null
+++ b/src/lib389/lib389/cli_ctl/dsrc.py
@@ -0,0 +1,312 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import json
+from os.path import expanduser
+from os import path, remove
+from ldapurl import isLDAPUrl
+from ldap.dn import is_dn
+import configparser
+
+
+def create_dsrc(inst, log, args):
+ """Create the .dsrc file
+
+ [instance]
+ uri = ldaps://hostname:port
+ basedn = dc=example,dc=com
+ binddn = uid=user,....
+ saslmech = [EXTERNAL|PLAIN]
+ tls_cacertdir = /path/to/cacertdir
+ tls_cert = /path/to/user.crt
+ tls_key = /path/to/user.key
+ tls_reqcert = [never, hard, allow]
+ starttls = [true, false]
+ pwdfile = /path/to/file
+ """
+
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+
+ # Verify this section does not already exist
+ instances = config.sections()
+ if inst.serverid in instances:
+ raise ValueError("There is already a configuration section for this instance!")
+
+ # Process and validate the args
+ config[inst.serverid] = {}
+
+ if args.uri is not None:
+ if not isLDAPUrl(args.uri):
+ raise ValueError("The uri is not a valid LDAP URL!")
+ if args.uri.startswith("ldapi"):
+ # We must use EXTERNAL saslmech for LDAPI
+ args.saslmech = "EXTERNAL"
+ config[inst.serverid]['uri'] = args.uri
+ if args.basedn is not None:
+ if not is_dn(args.basedn):
+ raise ValueError("The basedn is not a valid DN!")
+ config[inst.serverid]['basedn'] = args.basedn
+ if args.binddn is not None:
+ if not is_dn(args.binddn):
+ raise ValueError("The binddn is not a valid DN!")
+ config[inst.serverid]['binddn'] = args.binddn
+ if args.saslmech is not None:
+ if args.saslmech not in ['EXTERNAL', 'PLAIN']:
+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!")
+ config[inst.serverid]['saslmech'] = args.saslmech
+ if args.tls_cacertdir is not None:
+ if not path.exists(args.tls_cacertdir):
+ raise ValueError('--tls-cacertdir directory does not exist!')
+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir
+ if args.tls_cert is not None:
+ if not path.exists(args.tls_cert):
+ raise ValueError('--tls-cert does not point to an existing file!')
+ config[inst.serverid]['tls_cert'] = args.tls_cert
+ if args.tls_key is not None:
+ if not path.exists(args.tls_key):
+ raise ValueError('--tls-key does not point to an existing file!')
+ config[inst.serverid]['tls_key'] = args.tls_key
+ if args.tls_reqcert is not None:
+ if args.tls_reqcert not in ['never', 'hard', 'allow']:
+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!')
+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert
+ if args.starttls:
+ config[inst.serverid]['starttls'] = 'true'
+ if args.pwdfile is not None:
+ if not path.exists(args.pwdfile):
+ raise ValueError('--pwdfile does not exist!')
+ config[inst.serverid]['pwdfile'] = args.pwdfile
+
+ if len(config[inst.serverid]) == 0:
+ # No args set
+ raise ValueError("You must set at least one argument for the new dsrc file!")
+
+ # Print a preview of the config
+ log.info(f'Updating "{dsrc_file}" with:\n')
+ log.info(f' [{inst.serverid}]')
+ for k, v in config[inst.serverid].items():
+ log.info(f' {k} = {v}')
+
+ # Perform confirmation?
+ if not args.do_it:
+ while 1:
+ val = input(f'\nUpdate "{dsrc_file}" ? [yes]: ').rstrip().lower()
+ if val == '' or val == 'y' or val == 'yes':
+ break
+ if val == 'n' or val == 'no':
+ return
+
+ # Now write the file
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+
+ log.info(f'Successfully updated: {dsrc_file}')
+
+
+def modify_dsrc(inst, log, args):
+ """Modify the instance config
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+
+ if path.exists(dsrc_file):
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+
+ # Verify we have a section to modify
+ instances = config.sections()
+ if inst.serverid not in instances:
+ raise ValueError("There is no configuration section for this instance to modify!")
+
+ # Process and validate the args
+ if args.uri is not None:
+ if not isLDAPUrl(args.uri):
+ raise ValueError("The uri is not a valid LDAP URL!")
+ if args.uri.startswith("ldapi"):
+ # We must use EXTERNAL saslmech for LDAPI
+ args.saslmech = "EXTERNAL"
+ if args.uri == '':
+ del config[inst.serverid]['uri']
+ else:
+ config[inst.serverid]['uri'] = args.uri
+ if args.basedn is not None:
+ if not is_dn(args.basedn):
+ raise ValueError("The basedn is not a valid DN!")
+ if args.basedn == '':
+ del config[inst.serverid]['basedn']
+ else:
+ config[inst.serverid]['basedn'] = args.basedn
+ if args.binddn is not None:
+ if not is_dn(args.binddn):
+ raise ValueError("The binddn is not a valid DN!")
+ if args.binddn == '':
+ del config[inst.serverid]['binddn']
+ else:
+ config[inst.serverid]['binddn'] = args.binddn
+ if args.saslmech is not None:
+ if args.saslmech not in ['EXTERNAL', 'PLAIN']:
+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!")
+ if args.saslmech == '':
+ del config[inst.serverid]['saslmech']
+ else:
+ config[inst.serverid]['saslmech'] = args.saslmech
+ if args.tls_cacertdir is not None:
+ if not path.exists(args.tls_cacertdir):
+ raise ValueError('--tls-cacertdir directory does not exist!')
+ if args.tls_cacertdir == '':
+ del config[inst.serverid]['tls_cacertdir']
+ else:
+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir
+ if args.tls_cert is not None:
+ if not path.exists(args.tls_cert):
+ raise ValueError('--tls-cert does not point to an existing file!')
+ if args.tls_cert == '':
+ del config[inst.serverid]['tls_cert']
+ else:
+ config[inst.serverid]['tls_cert'] = args.tls_cert
+ if args.tls_key is not None:
+ if not path.exists(args.tls_key):
+ raise ValueError('--tls-key does not point to an existing file!')
+ if args.tls_key == '':
+ del config[inst.serverid]['tls_key']
+ else:
+ config[inst.serverid]['tls_key'] = args.tls_key
+ if args.tls_reqcert is not None:
+ if args.tls_reqcert not in ['never', 'hard', 'allow']:
+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!')
+ if args.tls_reqcert == '':
+ del config[inst.serverid]['tls_reqcert']
+ else:
+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert
+ if args.starttls:
+ config[inst.serverid]['starttls'] = 'true'
+ if args.cancel_starttls:
+ config[inst.serverid]['starttls'] = 'false'
+ if args.pwdfile is not None:
+ if not path.exists(args.pwdfile):
+ raise ValueError('--pwdfile does not exist!')
+ if args.pwdfile == '':
+ del config[inst.serverid]['pwdfile']
+ else:
+ config[inst.serverid]['pwdfile'] = args.pwdfile
+
+ # Okay now rewrite the file
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+
+ log.info(f'Successfully updated: {dsrc_file}')
+ else:
+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!')
+
+
+def delete_dsrc(inst, log, args):
+ """Delete the .dsrc file
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ if path.exists(dsrc_file):
+ if not args.do_it:
+ # Get confirmation
+ while 1:
+ val = input(f'\nAre you sure you want to remove this instances configuration ? [no]: ').rstrip().lower()
+ if val == 'y' or val == 'yes':
+ break
+ if val == '' or val == 'n' or val == 'no':
+ return
+
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+ instances = config.sections()
+ if inst.serverid not in instances:
+ raise ValueError("The is no configuration for this instance")
+
+ # Update the config object
+ del config[inst.serverid]
+
+ if len(config.sections()) == 0:
+ # The file would be empty so just delete it
+ try:
+ remove(dsrc_file)
+ log.info(f'Successfully removed: {dsrc_file}')
+ return
+ except OSError as e:
+ raise ValueError(f'Failed to delete "{dsrc_file}", error: {str(e)}')
+ else:
+ # write the updated config
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+ else:
+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!')
+
+ log.info(f'Successfully updated: {dsrc_file}')
+
+def display_dsrc(inst, log, args):
+ """Display the contents of the ~/.dsrc file
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+
+ if not path.exists(dsrc_file):
+ raise ValueError(f'There is no dsrc file "{dsrc_file}" to display!')
+
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+ instances = config.sections()
+
+ for inst_section in instances:
+ if args.json:
+ log.info(json.dumps({inst_section: dict(config[inst_section])}, indent=4))
+ else:
+ log.info(f'[{inst_section}]')
+ for k, v in config[inst_section].items():
+ log.info(f'{k} = {v}')
+ log.info("")
+
+
+def create_parser(subparsers):
+ dsrc_parser = subparsers.add_parser('dsrc', help="Manage the .dsrc file")
+ subcommands = dsrc_parser.add_subparsers(help="action")
+
+ # Create .dsrc file
+ dsrc_create_parser = subcommands.add_parser('create', help='Generate the .dsrc file')
+ dsrc_create_parser.set_defaults(func=create_dsrc)
+ dsrc_create_parser.add_argument('--uri', help="The URI (LDAP URL) for the Directory Server instance.")
+ dsrc_create_parser.add_argument('--basedn', help="The default database suffix.")
+ dsrc_create_parser.add_argument('--binddn', help="The default Bind DN used or authentication.")
+ dsrc_create_parser.add_argument('--saslmech', help="The SASL mechanism to use: PLAIN or EXTERNAL.")
+ dsrc_create_parser.add_argument('--tls-cacertdir', help="The directory containing the Trusted Certificate Authority certificate.")
+ dsrc_create_parser.add_argument('--tls-cert', help="The absolute file name to the server certificate.")
+ dsrc_create_parser.add_argument('--tls-key', help="The absolute file name to the server certificate key.")
+ dsrc_create_parser.add_argument('--tls-reqcert', help="Request certificate strength: 'never', 'allow', 'hard'")
+ dsrc_create_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.")
+ dsrc_create_parser.add_argument('--pwdfile', help="The absolute path to a file containing the Bind DN's password.")
+ dsrc_create_parser.add_argument('--do-it', action='store_true', help="Create the file without any confirmation.")
+
+ dsrc_modify_parser = subcommands.add_parser('modify', help='Modify the .dsrc file')
+ dsrc_modify_parser.set_defaults(func=modify_dsrc)
+ dsrc_modify_parser.add_argument('--uri', nargs='?', const='', help="The URI (LDAP URL) for the Directory Server instance.")
+ dsrc_modify_parser.add_argument('--basedn', nargs='?', const='', help="The default database suffix.")
+ dsrc_modify_parser.add_argument('--binddn', nargs='?', const='', help="The default Bind DN used or authentication.")
+ dsrc_modify_parser.add_argument('--saslmech', nargs='?', const='', help="The SASL mechanism to use: PLAIN or EXTERNAL.")
+ dsrc_modify_parser.add_argument('--tls-cacertdir', nargs='?', const='', help="The directory containing the Trusted Certificate Authority certificate.")
+ dsrc_modify_parser.add_argument('--tls-cert', nargs='?', const='', help="The absolute file name to the server certificate.")
+ dsrc_modify_parser.add_argument('--tls-key', nargs='?', const='', help="The absolute file name to the server certificate key.")
+ dsrc_modify_parser.add_argument('--tls-reqcert', nargs='?', const='', help="Request certificate strength: 'never', 'allow', 'hard'")
+ dsrc_modify_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.")
+ dsrc_modify_parser.add_argument('--cancel-starttls', action='store_true', help="Do not use startTLS for connection to the server.")
+ dsrc_modify_parser.add_argument('--pwdfile', nargs='?', const='', help="The absolute path to a file containing the Bind DN's password.")
+ dsrc_modify_parser.add_argument('--do-it', action='store_true', help="Update the file without any confirmation.")
+
+ # Delete the instance from the .dsrc file
+ dsrc_delete_parser = subcommands.add_parser('delete', help='Delete instance configuration from the .dsrc file.')
+ dsrc_delete_parser.set_defaults(func=delete_dsrc)
+ dsrc_delete_parser.add_argument('--do-it', action='store_true',
+ help="Delete this instance's configuration from the .dsrc file.")
+
+ # Display .dsrc file
+ dsrc_display_parser = subcommands.add_parser('display', help='Display the contents of the .dsrc file.')
+ dsrc_display_parser.set_defaults(func=display_dsrc)
--
2.26.2

View file

@ -0,0 +1,37 @@
From 40e9a4835a6e95f021a711a7c42ce0c1bddc5ba4 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 21 May 2021 13:09:12 -0400
Subject: [PATCH 08/12] Issue 4773 - Enable interval feature of DNA plugin
Description: Enable the dormant interval feature in DNA plugin
relates: https://github.com/389ds/389-ds-base/issues/4773
Review by: mreynolds (one line commit rule)
---
ldap/servers/plugins/dna/dna.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index bf6b74a99..928a3f54a 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1023,7 +1023,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
/* Set the default interval to 1 */
entry->interval = 1;
-#ifdef DNA_ENABLE_INTERVAL
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
if (value) {
entry->interval = strtoull(value, 0, 0);
@@ -1032,7 +1031,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
"dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval);
-#endif
value = slapi_entry_attr_get_charptr(e, DNA_GENERATE);
if (value) {
--
2.26.3

View file

@ -1,902 +0,0 @@
From 201cb1147c0a34bddbd3e5c03aecd804c47a9905 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Thu, 19 Nov 2020 10:21:10 +0100
Subject: [PATCH 2/2] Issue 4440 - BUG - ldifgen with --start-idx option fails
with unsupported operand (#4444)
Bug description:
Got TypeError exception when usign:
dsctl -v slapd-localhost ldifgen users --suffix
dc=example,dc=com --parent ou=people,dc=example,dc=com
--number 100000 --generic --start-idx=50
The reason is that by default python parser provides
value for numeric options:
as an integer if specified by "--option value" or
as a string if specified by "--option=value"
Fix description:
convert the numeric parameters to integer when using it.
options impacted are:
- in users subcommand: --number , --start-idx
- in mod-load subcommand: --num-users, --add-users,
--del-users, --modrdn-users, --mod-users
FYI: An alternative solution would have been to indicate the
parser that these values are an integer. But two reasons
leaded me to implement the first solution:
- first solution fix the problem for all users while the
second one fixes only dsctl command.
- first solution is easier to test:
I just added a new test file generated by a script
that duplicated existing ldifgen test, renamed the
test cases and replaced the numeric arguments by
strings.
Second solution would need to redesign the test framework
to be able to test the parser.
relates: https://github.com/389ds/389-ds-base/issues/4440
Reviewed by:
Platforms tested: F32
(cherry picked from commit 3c3e1f30cdb046a1aabb93aacebcf261a76a0892)
---
.../tests/suites/clu/dbgen_test_usan.py | 806 ++++++++++++++++++
src/lib389/lib389/cli_ctl/dbgen.py | 10 +-
src/lib389/lib389/dbgen.py | 3 +
3 files changed, 814 insertions(+), 5 deletions(-)
create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test_usan.py
diff --git a/dirsrvtests/tests/suites/clu/dbgen_test_usan.py b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py
new file mode 100644
index 000000000..80ff63417
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py
@@ -0,0 +1,806 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+
+"""
+ This file contains tests similar to dbgen_test.py
+ except that paramaters that are number are expressed as string
+ (to mimic the parameters parser default behavior which returns an
+ int when parsing "option value" and a string when parsing "option=value"
+ This file has been generated by usign:
+sed '
+9r z1
+s/ test_/ test_usan/
+/args.*= [0-9]/s,[0-9]*$,"&",
+/:id:/s/.$/1/
+' dbgen_test.py > dbgen_test_usan.py
+ ( with z1 file containing this comment )
+"""
+
+
+
+import subprocess
+import pytest
+
+from lib389.cli_ctl.dbgen import *
+from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates
+from lib389.idm.account import Accounts
+from lib389.idm.group import Groups
+from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_st
+from lib389.cli_base import FakeArgs
+
+pytestmark = pytest.mark.tier0
+
+LOG_FILE = '/tmp/dbgen.log'
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def set_log_file_and_ldif(topology_st, request):
+ global ldif_file
+ ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif'
+
+ fh = logging.FileHandler(LOG_FILE)
+ fh.setLevel(logging.DEBUG)
+ log.addHandler(fh)
+
+ def fin():
+ log.info('Delete files')
+ os.remove(LOG_FILE)
+ os.remove(ldif_file)
+
+ request.addfinalizer(fin)
+
+
+def run_offline_import(instance, ldif_file):
+ log.info('Stopping the server and running offline import...')
+ instance.stop()
+ assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None,
+ import_file=ldif_file)
+ instance.start()
+
+
+def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None):
+ LDAP_MOD = '/usr/bin/ldapmodify'
+ log.info('Add entries from ldif file with ldapmodify')
+ result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD,
+ '-h', instance.host, '-p', str(instance.port), '-af', ldif_file])
+ if output_to_check is not None:
+ assert output_to_check in ensure_str(result)
+
+
+def check_value_in_log_and_reset(content_list):
+ with open(LOG_FILE, 'r+') as f:
+ file_content = f.read()
+ log.info('Check if content is present in output')
+ for item in content_list:
+ assert item in file_content
+
+ log.info('Reset log file for next test')
+ f.truncate(0)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create ldif with users
+
+ :id: 426b5b94-9923-454d-a736-7e71ca985e91
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with users
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.suffix = DEFAULT_SUFFIX
+ args.parent = 'ou=people,dc=example,dc=com'
+ args.number = "1000"
+ args.rdn_cn = False
+ args.generic = True
+ args.start_idx = "50"
+ args.localize = False
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'suffix={}'.format(args.suffix),
+ 'parent={}'.format(args.parent),
+ 'number={}'.format(args.number),
+ 'rdn-cn={}'.format(args.rdn_cn),
+ 'generic={}'.format(args.generic),
+ 'start-idx={}'.format(args.start_idx),
+ 'localize={}'.format(args.localize),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create users ldif')
+ dbgen_create_users(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ run_offline_import(standalone, ldif_file)
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create ldif with group
+
+ :id: 97207413-9a93-4065-a5ec-63aa93801a31
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with group
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'myGroup'
+ args.parent = 'ou=groups,dc=example,dc=com'
+ args.suffix = DEFAULT_SUFFIX
+ args.number = "1"
+ args.num_members = "1000"
+ args.create_members = True
+ args.member_attr = 'uniquemember'
+ args.member_parent = 'ou=people,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'number={}'.format(args.number),
+ 'suffix={}'.format(args.suffix),
+ 'num-members={}'.format(args.num_members),
+ 'create-members={}'.format(args.create_members),
+ 'member-parent={}'.format(args.member_parent),
+ 'member-attr={}'.format(args.member_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create group ldif')
+ dbgen_create_groups(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+ log.info('Check that group is imported')
+ groups = Groups(standalone, DEFAULT_SUFFIX)
+ assert groups.exists(args.NAME + '-1')
+ new_group = groups.get(args.NAME + '-1')
+ new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com')
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with classic COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'classic'
+ args.NAME = 'My_Postal_Def'
+ args.parent = 'ou=cos definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = 'businessCategory'
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-specifier={}'.format(args.cos_specifier),
+ 'cos-template={}'.format(args.cos_template),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosClassicDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosTemplateDN', args.cos_template)
+ assert new_cos.present('cosSpecifier', args.cos_specifier)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: 6b26ca6d-226a-4f93-925e-faf95cc20211
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with pointer COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'pointer'
+ args.NAME = 'My_Postal_Def_pointer'
+ args.parent = 'ou=cos pointer definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = None
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-template={}'.format(args.cos_template),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosPointerDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosTemplateDN', args.cos_template)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: ab4b799e-e801-432a-a61d-badad2628201
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with indirect COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'indirect'
+ args.NAME = 'My_Postal_Def_indirect'
+ args.parent = 'ou=cos indirect definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = 'businessCategory'
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-specifier={}'.format(args.cos_specifier),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosIndirectDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosIndirectSpecifier', args.cos_specifier)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS template
+
+ :id: 544017c7-4a82-4e7d-a047-00b68a28e071
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with COS template
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'My_Template'
+ args.parent = 'ou=cos templates,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_priority = "1"
+ args.cos_attr_val = 'postalcode:12345'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-priority={}'.format(args.cos_priority),
+ 'cos-attr-val={}'.format(args.cos_attr_val),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS template ldif')
+ dbgen_create_cos_tmp(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS template is imported')
+ cos_temp = CosTemplates(standalone, args.parent)
+ assert cos_temp.exists(args.NAME)
+ new_cos = cos_temp.get(args.NAME)
+ assert new_cos.present('cosPriority', str(args.cos_priority))
+ assert new_cos.present('postalcode', '12345')
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a managed role
+
+ :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with managed role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+
+ args.NAME = 'My_Managed_Role'
+ args.parent = 'ou=managed roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'managed'
+ args.filter = None
+ args.role_dn = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create managed role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that managed role is imported')
+ roles = ManagedRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a filtered role
+
+ :id: cb3c8ea8-4234-40e2-8810-fb6a25973921
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with filtered role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+
+ args.NAME = 'My_Filtered_Role'
+ args.parent = 'ou=filtered roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'filtered'
+ args.filter = '"objectclass=posixAccount"'
+ args.role_dn = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'filter={}'.format(args.filter),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create filtered role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that filtered role is imported')
+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+ new_role = roles.get(args.NAME)
+ assert new_role.present('nsRoleFilter', args.filter)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a nested role
+
+ :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with nested role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'My_Nested_Role'
+ args.parent = 'ou=nested roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'nested'
+ args.filter = None
+ args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com']
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'role-dn={}'.format(args.role_dn),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create nested role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that nested role is imported')
+ roles = NestedRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+ new_role = roles.get(args.NAME)
+ assert new_role.present('nsRoleDN', args.role_dn[0])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create mixed modification ldif
+
+ :id: 4a2e0901-2b48-452e-a4a0-507735132c81
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate modification ldif
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.parent = DEFAULT_SUFFIX
+ args.create_users = True
+ args.delete_users = True
+ args.create_parent = False
+ args.num_users = "1000"
+ args.add_users = "100"
+ args.del_users = "999"
+ args.modrdn_users = "100"
+ args.mod_users = "10"
+ args.mod_attrs = ['cn', 'uid', 'sn']
+ args.randomize = False
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'create-users={}'.format(args.create_users),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'delete-users={}'.format(args.delete_users),
+ 'num-users={}'.format(args.num_users),
+ 'add-users={}'.format(args.add_users),
+ 'del-users={}'.format(args.del_users),
+ 'modrdn-users={}'.format(args.modrdn_users),
+ 'mod-users={}'.format(args.mod_users),
+ 'mod-attrs={}'.format(args.mod_attrs),
+ 'randomize={}'.format(args.randomize),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create modification ldif')
+ dbgen_create_mods(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file)
+
+ log.info('Check that some accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create nested ldif
+
+ :id: 9c281c28-4169-45e0-8c07-c5502d9a7581
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate nested ldif
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.suffix = DEFAULT_SUFFIX
+ args.node_limit = "100"
+ args.num_users = "600"
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'suffix={}'.format(args.suffix),
+ 'node-limit={}'.format(args.node_limit),
+ 'num-users={}'.format(args.num_users),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create nested ldif')
+ dbgen_create_nested(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+ count_ou = len(accounts.filter('(ou=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file)
+
+ standalone.restart()
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+ assert len(accounts.filter('(ou=*)')) > count_ou
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/src/lib389/lib389/cli_ctl/dbgen.py b/src/lib389/lib389/cli_ctl/dbgen.py
index 7bc3892ba..058342fb1 100644
--- a/src/lib389/lib389/cli_ctl/dbgen.py
+++ b/src/lib389/lib389/cli_ctl/dbgen.py
@@ -451,13 +451,13 @@ def dbgen_create_mods(inst, log, args):
props = {
"createUsers": args.create_users,
"deleteUsers": args.delete_users,
- "numUsers": args.num_users,
+ "numUsers": int(args.num_users),
"parent": args.parent,
"createParent": args.create_parent,
- "addUsers": args.add_users,
- "delUsers": args.del_users,
- "modrdnUsers": args.modrdn_users,
- "modUsers": args.mod_users,
+ "addUsers": int(args.add_users),
+ "delUsers": int(args.del_users),
+ "modrdnUsers": int(args.modrdn_users),
+ "modUsers": int(args.mod_users),
"random": args.randomize,
"modAttrs": args.mod_attrs
}
diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py
index 6273781a2..10fb200f7 100644
--- a/src/lib389/lib389/dbgen.py
+++ b/src/lib389/lib389/dbgen.py
@@ -220,6 +220,9 @@ def dbgen_users(instance, number, ldif_file, suffix, generic=False, entry_name="
"""
Generate an LDIF of randomly named entries
"""
+ # Lets insure that integer parameters are not string
+ number=int(number)
+ startIdx=int(startIdx)
familyname_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-FamilyNames')
givename_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-GivenNames')
familynames = []
--
2.26.2

View file

@ -0,0 +1,926 @@
From 8df95679519364d0993572ecbea72ab89e5250a5 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Thu, 20 May 2021 14:24:25 +0200
Subject: [PATCH 09/12] Issue 4623 - RFE - Monitor the current DB locks (#4762)
Description: DB lock gets exhausted because of unindexed internal searches
(under a transaction). Indexing those searches is the way to prevent exhaustion.
If db lock get exhausted during a txn, it leads to db panic and the later recovery
can possibly fail. That leads to a full reinit of the instance where the db locks
got exhausted.
Add three attributes to global BDB config: "nsslapd-db-locks-monitoring-enabled",
"nsslapd-db-locks-monitoring-threshold" and "nsslapd-db-locks-monitoring-pause".
By default, nsslapd-db-locks-monitoring-enabled is turned on, nsslapd-db-locks-monitoring-threshold is set to 90% and nsslapd-db-locks-monitoring-threshold is 500ms.
When current locks are close to the maximum locks value of 90% - returning
the next candidate will fail until the maximum of locks won't be
increased or current locks are released.
The monitoring thread runs with the configurable interval of 500ms.
Add the setting to UI and CLI tools.
Fixes: https://github.com/389ds/389-ds-base/issues/4623
Reviewed by: @Firstyear, @tbordaz, @jchapma, @mreynolds389 (Thank you!!)
---
.../suites/monitor/db_locks_monitor_test.py | 251 ++++++++++++++++++
ldap/servers/slapd/back-ldbm/back-ldbm.h | 13 +-
.../slapd/back-ldbm/db-bdb/bdb_config.c | 99 +++++++
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 85 ++++++
ldap/servers/slapd/back-ldbm/init.c | 3 +
ldap/servers/slapd/back-ldbm/ldbm_config.c | 3 +
ldap/servers/slapd/back-ldbm/ldbm_config.h | 3 +
ldap/servers/slapd/back-ldbm/ldbm_search.c | 13 +
ldap/servers/slapd/libglobs.c | 4 +-
src/cockpit/389-console/src/css/ds.css | 4 +
src/cockpit/389-console/src/database.jsx | 7 +
src/cockpit/389-console/src/index.html | 2 +-
.../src/lib/database/databaseConfig.jsx | 88 +++++-
src/lib389/lib389/backend.py | 3 +
src/lib389/lib389/cli_conf/backend.py | 10 +
15 files changed, 576 insertions(+), 12 deletions(-)
create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
new file mode 100644
index 000000000..7f9938f30
--- /dev/null
+++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
@@ -0,0 +1,251 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import pytest
+import datetime
+import subprocess
+from multiprocessing import Process, Queue
+from lib389 import pid_from_file
+from lib389.utils import ldap, os
+from lib389._constants import DEFAULT_SUFFIX, ReplicaRole
+from lib389.cli_base import LogCapture
+from lib389.idm.user import UserAccounts
+from lib389.idm.organizationalunit import OrganizationalUnits
+from lib389.tasks import AccessLog
+from lib389.backend import Backends
+from lib389.ldclt import Ldclt
+from lib389.dbgen import dbgen_users
+from lib389.tasks import ImportTask
+from lib389.index import Indexes
+from lib389.plugins import AttributeUniquenessPlugin
+from lib389.config import BDB_LDBMConfig
+from lib389.monitor import MonitorLDBM
+from lib389.topologies import create_topology, _remove_ssca_db
+
+pytestmark = pytest.mark.tier2
+db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False),
+ reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. "
+ "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.")
+
+DEBUGGING = os.getenv('DEBUGGING', default=False)
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+
+def _kill_ns_slapd(inst):
+ pid = str(pid_from_file(inst.ds_paths.pid_file))
+ cmd = ['kill', '-9', pid]
+ subprocess.Popen(cmd, stdout=subprocess.PIPE)
+
+
+@pytest.fixture(scope="function")
+def topology_st_fn(request):
+ """Create DS standalone instance for each test case"""
+
+ topology = create_topology({ReplicaRole.STANDALONE: 1})
+
+ def fin():
+ # Kill the hanging process at the end of test to prevent failures in the following tests
+ if DEBUGGING:
+ [_kill_ns_slapd(inst) for inst in topology]
+ else:
+ [_kill_ns_slapd(inst) for inst in topology]
+ assert _remove_ssca_db(topology)
+ [inst.stop() for inst in topology if inst.exists()]
+ [inst.delete() for inst in topology if inst.exists()]
+ request.addfinalizer(fin)
+
+ topology.logcap = LogCapture()
+ return topology
+
+
+@pytest.fixture(scope="function")
+def setup_attruniq_index_be_import(topology_st_fn):
+ """Enable Attribute Uniqueness, disable indexes and
+ import 120000 entries to the default backend
+ """
+ inst = topology_st_fn.standalone
+
+ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
+ inst.config.set('nsslapd-plugin-logging', 'on')
+ inst.restart()
+
+ attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config")
+ attruniq.create(properties={'cn': 'attruniq'})
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
+ attruniq.add_unique_attribute(cn)
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
+ attruniq.enable_all_subtrees()
+ attruniq.enable()
+
+ indexes = Indexes(inst)
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
+ indexes.ensure_state(properties={
+ 'cn': cn,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': 'none'})
+
+ bdb_config = BDB_LDBMConfig(inst)
+ bdb_config.replace("nsslapd-db-locks", "130000")
+ inst.restart()
+
+ ldif_dir = inst.get_ldif_dir()
+ import_ldif = ldif_dir + '/perf_import.ldif'
+
+ # Valid online import
+ import_task = ImportTask(inst)
+ dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew")
+ import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
+ import_task.wait()
+ assert import_task.is_complete()
+
+
+def create_user_wrapper(q, users):
+ try:
+ users.create_test_user()
+ except Exception as ex:
+ q.put(ex)
+
+
+def spawn_worker_thread(function, users, log, timeout, info):
+ log.info(f"Starting the thread - {info}")
+ q = Queue()
+ p = Process(target=function, args=(q,users,))
+ p.start()
+
+ log.info(f"Waiting for {timeout} seconds for the thread to finish")
+ p.join(timeout)
+
+ if p.is_alive():
+ log.info("Killing the thread as it's still running")
+ p.terminate()
+ p.join()
+ raise RuntimeError(f"Function call was aborted: {info}")
+ result = q.get()
+ if isinstance(result, Exception):
+ raise result
+ else:
+ return result
+
+
+@db_locks_monitoring_ack
+@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")])
+def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold):
+ """Test that when all of the locks are exhausted the instance still working
+ and database is not corrupted
+
+ :id: 299108cc-04d8-4ddc-b58e-99157fccd643
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
+ :steps: 1. Set nsslapd-db-locks to 11000
+ 2. Check that we stop acquiring new locks when the threshold is reached
+ 3. Check that we can regulate a pause interval for DB locks monitoring thread
+ 4. Make sure the feature works for different backends on the same suffix
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ inst = topology_st_fn.standalone
+ ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com'
+
+ backends = Backends(inst)
+ backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX,
+ 'name': ADDITIONAL_SUFFIX[-3:]})
+ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX)
+ ous.create(properties={'ou': 'newpeople'})
+
+ bdb_config = BDB_LDBMConfig(inst)
+ bdb_config.replace("nsslapd-db-locks", "11000")
+
+ # Restart server
+ inst.restart()
+
+ for lock_enabled in ["on", "off"]:
+ for lock_pause in ["100", "500", "1000"]:
+ bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled)
+ bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold)
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
+ inst.restart()
+
+ if lock_enabled == "off":
+ raised_exception = (RuntimeError, ldap.SERVER_DOWN)
+ else:
+ raised_exception = ldap.OPERATIONS_ERROR
+
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ with pytest.raises(raised_exception):
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
+ # Restart because we already run out of locks and the next unindexed searches will fail eventually
+ if lock_enabled == "off":
+ _kill_ns_slapd(inst)
+ inst.restart()
+
+ users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None)
+ with pytest.raises(raised_exception):
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
+ # In case feature is disabled - restart for the clean up
+ if lock_enabled == "off":
+ _kill_ns_slapd(inst)
+ inst.restart()
+
+
+@db_locks_monitoring_ack
+def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import):
+ """Test that DB lock pause setting increases the wait interval value for the monitoring thread
+
+ :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
+ :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%)
+ 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds)
+ 3. Make sure that the pause is successfully increased a few times in a row
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ inst = topology_st_fn.standalone
+
+ bdb_config = BDB_LDBMConfig(inst)
+ bdb_config.replace("nsslapd-db-locks", "20000")
+ lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause")
+ assert lock_pause == 500
+ lock_pause = "10000"
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
+
+ # Restart server
+ inst.restart()
+
+ lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled")
+ lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold")
+ assert lock_enabled == "on"
+ assert lock_threshold == 90
+
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ start = datetime.datetime.now()
+ with pytest.raises(ldap.OPERATIONS_ERROR):
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'")
+ end = datetime.datetime.now()
+ time_delta = end - start
+ if time_delta.seconds < 9:
+ raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. "
+ f"Finished the execution in {time_delta.seconds} seconds")
+ # In case something has failed - restart for the clean up
+ inst.restart()
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 571b0a58b..afb831c32 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -155,6 +155,8 @@ typedef unsigned short u_int16_t;
#define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */
#define DEFAULT_DBCACHE_SIZE 33554432
#define DEFAULT_DBCACHE_SIZE_STR "33554432"
+#define DEFAULT_DBLOCK_PAUSE 500
+#define DEFAULT_DBLOCK_PAUSE_STR "500"
#define DEFAULT_MODE 0600
#define DEFAULT_ALLIDSTHRESHOLD 4000
#define DEFAULT_IDL_TUNE 1
@@ -575,12 +577,21 @@ struct ldbminfo
char *li_backend_implement; /* low layer backend implementation */
int li_noparentcheck; /* check if parent exists on add */
- /* the next 3 fields are for the params that don't get changed until
+ /* db lock monitoring */
+ /* if we decide to move the values to bdb_config, we can use slapi_back_get_info function to retrieve the values */
+ int32_t li_dblock_monitoring; /* enables db locks monitoring thread - requires restart */
+ uint32_t li_dblock_monitoring_pause; /* an interval for db locks monitoring thread */
+ uint32_t li_dblock_threshold; /* when the percentage is reached, abort the search in ldbm_back_next_search_entry - requires restart*/
+ uint32_t li_dblock_threshold_reached;
+
+ /* the next 4 fields are for the params that don't get changed until
* the server is restarted (used by the admin console)
*/
char *li_new_directory;
uint64_t li_new_dbcachesize;
int li_new_dblock;
+ int32_t li_new_dblock_monitoring;
+ uint64_t li_new_dblock_threshold;
int li_new_dbncache;
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
index 738b841aa..167644943 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
@@ -190,6 +190,102 @@ bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int ap
return retval;
}
+static void *
+bdb_config_db_lock_monitoring_get(void *arg)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+
+ return (void *)((intptr_t)(li->li_new_dblock_monitoring));
+}
+
+static int
+bdb_config_db_lock_monitoring_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+ int retval = LDAP_SUCCESS;
+ int val = (int32_t)((intptr_t)value);
+
+ if (apply) {
+ if (CONFIG_PHASE_RUNNING == phase) {
+ li->li_new_dblock_monitoring = val;
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_monitoring_set",
+ "New nsslapd-db-lock-monitoring value will not take affect until the server is restarted\n");
+ } else {
+ li->li_new_dblock_monitoring = val;
+ li->li_dblock_monitoring = val;
+ }
+ }
+
+ return retval;
+}
+
+static void *
+bdb_config_db_lock_pause_get(void *arg)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+
+ return (void *)((uintptr_t)(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED)));
+}
+
+static int
+bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+ int retval = LDAP_SUCCESS;
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
+
+ if (val == 0) {
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set",
+ "%s was set to '0'. The default value will be used (%s)",
+ CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR);
+ val = DEFAULT_DBLOCK_PAUSE;
+ }
+
+ if (apply) {
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_monitoring_pause), val, __ATOMIC_RELAXED);
+ }
+ return retval;
+}
+
+static void *
+bdb_config_db_lock_threshold_get(void *arg)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+
+ return (void *)((uintptr_t)(li->li_new_dblock_threshold));
+}
+
+static int
+bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+ int retval = LDAP_SUCCESS;
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
+
+ if (val < 70 || val > 95) {
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
+ CONFIG_DB_LOCKS_THRESHOLD, val);
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set",
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
+ CONFIG_DB_LOCKS_THRESHOLD, val);
+ retval = LDAP_OPERATIONS_ERROR;
+ return retval;
+ }
+
+ if (apply) {
+ if (CONFIG_PHASE_RUNNING == phase) {
+ li->li_new_dblock_threshold = val;
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_threshold_set",
+ "New nsslapd-db-lock-monitoring-threshold value will not take affect until the server is restarted\n");
+ } else {
+ li->li_new_dblock_threshold = val;
+ li->li_dblock_threshold = val;
+ }
+ }
+ return retval;
+}
+
static void *
bdb_config_dbcachesize_get(void *arg)
{
@@ -1409,6 +1505,9 @@ static config_info bdb_config_param[] = {
{CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0},
{CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_DB_LOCKS_MONITORING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_lock_monitoring_get, &bdb_config_db_lock_monitoring_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_DB_LOCKS_THRESHOLD, CONFIG_TYPE_INT, "90", &bdb_config_db_lock_threshold_get, &bdb_config_db_lock_threshold_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_DB_LOCKS_PAUSE, CONFIG_TYPE_INT, DEFAULT_DBLOCK_PAUSE_STR, &bdb_config_db_lock_pause_get, &bdb_config_db_lock_pause_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{NULL, 0, NULL, NULL, NULL, 0}};
void
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
index 6cccad8e6..2f25f67a2 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
@@ -35,6 +35,8 @@
(env)->txn_checkpoint((env), (kbyte), (min), (flags))
#define MEMP_STAT(env, gsp, fsp, flags, malloc) \
(env)->memp_stat((env), (gsp), (fsp), (flags))
+#define LOCK_STAT(env, statp, flags, malloc) \
+ (env)->lock_stat((env), (statp), (flags))
#define MEMP_TRICKLE(env, pct, nwrotep) \
(env)->memp_trickle((env), (pct), (nwrotep))
#define LOG_ARCHIVE(env, listp, flags, malloc) \
@@ -66,6 +68,7 @@
#define NEWDIR_MODE 0755
#define DB_REGION_PREFIX "__db."
+static int locks_monitoring_threadmain(void *param);
static int perf_threadmain(void *param);
static int checkpoint_threadmain(void *param);
static int trickle_threadmain(void *param);
@@ -84,6 +87,7 @@ static int bdb_start_checkpoint_thread(struct ldbminfo *li);
static int bdb_start_trickle_thread(struct ldbminfo *li);
static int bdb_start_perf_thread(struct ldbminfo *li);
static int bdb_start_txn_test_thread(struct ldbminfo *li);
+static int bdb_start_locks_monitoring_thread(struct ldbminfo *li);
static int trans_batch_count = 0;
static int trans_batch_limit = 0;
static int trans_batch_txn_min_sleep = 50; /* ms */
@@ -1299,6 +1303,10 @@ bdb_start(struct ldbminfo *li, int dbmode)
return return_value;
}
+ if (0 != (return_value = bdb_start_locks_monitoring_thread(li))) {
+ return return_value;
+ }
+
/* We need to free the memory to avoid a leak
* Also, we have to evaluate if the performance counter
* should be preserved or not for database restore.
@@ -2885,6 +2893,7 @@ bdb_start_perf_thread(struct ldbminfo *li)
return return_value;
}
+
/* Performance thread */
static int
perf_threadmain(void *param)
@@ -2910,6 +2919,82 @@ perf_threadmain(void *param)
return 0;
}
+
+/*
+ * create a thread for locks_monitoring_threadmain
+ */
+static int
+bdb_start_locks_monitoring_thread(struct ldbminfo *li)
+{
+ int return_value = 0;
+ if (li->li_dblock_monitoring) {
+ if (NULL == PR_CreateThread(PR_USER_THREAD,
+ (VFP)(void *)locks_monitoring_threadmain, li,
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ PR_UNJOINABLE_THREAD,
+ SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+ PRErrorCode prerr = PR_GetError();
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_start_locks_monitoring_thread",
+ "Failed to create database locks monitoring thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+ prerr, slapd_pr_strerror(prerr));
+ return_value = -1;
+ }
+ }
+ return return_value;
+}
+
+
+/* DB Locks Monitoring thread */
+static int
+locks_monitoring_threadmain(void *param)
+{
+ int ret = 0;
+ uint64_t current_locks = 0;
+ uint64_t max_locks = 0;
+ uint32_t lock_exhaustion = 0;
+ PRIntervalTime interval;
+ struct ldbminfo *li = NULL;
+
+ PR_ASSERT(NULL != param);
+ li = (struct ldbminfo *)param;
+
+ dblayer_private *priv = li->li_dblayer_private;
+ bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+ PR_ASSERT(NULL != priv);
+
+ INCR_THREAD_COUNT(pEnv);
+
+ while (!BDB_CONFIG(li)->bdb_stop_threads) {
+ if (dblayer_db_uses_locking(pEnv->bdb_DB_ENV)) {
+ DB_LOCK_STAT *lockstat = NULL;
+ ret = LOCK_STAT(pEnv->bdb_DB_ENV, &lockstat, 0, (void *)slapi_ch_malloc);
+ if (0 == ret) {
+ current_locks = lockstat->st_nlocks;
+ max_locks = lockstat->st_maxlocks;
+ if (max_locks){
+ lock_exhaustion = (uint32_t)((double)current_locks / (double)max_locks * 100.0);
+ } else {
+ lock_exhaustion = 0;
+ }
+ if ((li->li_dblock_threshold) &&
+ (lock_exhaustion >= li->li_dblock_threshold)) {
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 1, __ATOMIC_RELAXED);
+ } else {
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 0, __ATOMIC_RELAXED);
+ }
+ }
+ slapi_ch_free((void **)&lockstat);
+ }
+ interval = PR_MillisecondsToInterval(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED));
+ DS_Sleep(interval);
+ }
+
+ DECR_THREAD_COUNT(pEnv);
+ slapi_log_err(SLAPI_LOG_TRACE, "locks_monitoring_threadmain", "Leaving locks_monitoring_threadmain\n");
+ return 0;
+}
+
+
/*
* create a thread for deadlock_threadmain
*/
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
index 893776699..4165c8fad 100644
--- a/ldap/servers/slapd/back-ldbm/init.c
+++ b/ldap/servers/slapd/back-ldbm/init.c
@@ -70,6 +70,9 @@ ldbm_back_init(Slapi_PBlock *pb)
/* Initialize the set of instances. */
li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor);
+ /* Init lock threshold value */
+ li->li_dblock_threshold_reached = 0;
+
/* ask the factory to give us space in the Connection object
* (only bulk import uses this)
*/
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 10cef250f..60884cf33 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -87,6 +87,9 @@ static char *ldbm_config_moved_attributes[] =
CONFIG_SERIAL_LOCK,
CONFIG_USE_LEGACY_ERRORCODE,
CONFIG_DB_DEADLOCK_POLICY,
+ CONFIG_DB_LOCKS_MONITORING,
+ CONFIG_DB_LOCKS_THRESHOLD,
+ CONFIG_DB_LOCKS_PAUSE,
""};
/* Used to add an array of entries, like the one above and
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h
index 58e64799c..6fa8292eb 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h
@@ -104,6 +104,9 @@ struct config_info
#define CONFIG_DB_VERBOSE "nsslapd-db-verbose"
#define CONFIG_DB_DEBUG "nsslapd-db-debug"
#define CONFIG_DB_LOCK "nsslapd-db-locks"
+#define CONFIG_DB_LOCKS_MONITORING "nsslapd-db-locks-monitoring-enabled"
+#define CONFIG_DB_LOCKS_THRESHOLD "nsslapd-db-locks-monitoring-threshold"
+#define CONFIG_DB_LOCKS_PAUSE "nsslapd-db-locks-monitoring-pause"
#define CONFIG_DB_NAMED_REGIONS "nsslapd-db-named-regions"
#define CONFIG_DB_PRIVATE_MEM "nsslapd-db-private-mem"
#define CONFIG_DB_PRIVATE_IMPORT_MEM "nsslapd-db-private-import-mem"
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 1a7b510d4..6e22debde 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -1472,6 +1472,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+
if ((reverse_list = operation_is_flag_set(op, OP_FLAG_REVERSE_CANDIDATE_ORDER))) {
/*
* Start at the end of the list and work our way forward. Since a single
@@ -1538,6 +1539,18 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
/* Find the next candidate entry and return it. */
while (1) {
+ if (li->li_dblock_monitoring &&
+ slapi_atomic_load_32((int32_t *)&(li->li_dblock_threshold_reached), __ATOMIC_RELAXED)) {
+ slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_next_search_entry",
+ "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold "
+ "under cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config). "
+ "Please, increase nsslapd-db-locks according to your needs.\n");
+ slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_ENTRY, NULL);
+ delete_search_result_set(pb, &sr);
+ rc = SLAPI_FAIL_GENERAL;
+ slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold)", 0, NULL);
+ goto bail;
+ }
/* check for abandon */
if (slapi_op_abandoned(pb) || (NULL == sr)) {
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 388616b36..db7d01bbc 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -8171,8 +8171,8 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply)
#if 0
debugHashTable(attr);
#endif
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored", attr);
- slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored", attr);
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored\n", attr);
+ slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored\n", attr);
return LDAP_NO_SUCH_ATTRIBUTE;
}
diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css
index 9248116e7..3cf50b593 100644
--- a/src/cockpit/389-console/src/css/ds.css
+++ b/src/cockpit/389-console/src/css/ds.css
@@ -639,6 +639,10 @@ option {
padding-right: 0 !important;
}
+.ds-vertical-scroll-auto {
+ overflow-y: auto !important;
+}
+
.alert {
max-width: 750px;
}
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
index efa3ce6d5..11cae972c 100644
--- a/src/cockpit/389-console/src/database.jsx
+++ b/src/cockpit/389-console/src/database.jsx
@@ -157,6 +157,7 @@ export class Database extends React.Component {
const attrs = config.attrs;
let db_cache_auto = false;
let import_cache_auto = false;
+ let dblocksMonitoring = false;
let dbhome = "";
if ('nsslapd-db-home-directory' in attrs) {
@@ -168,6 +169,9 @@ export class Database extends React.Component {
if (attrs['nsslapd-import-cache-autosize'] != "0") {
import_cache_auto = true;
}
+ if (attrs['nsslapd-db-locks-monitoring-enabled'][0] == "on") {
+ dblocksMonitoring = true;
+ }
this.setState(() => (
{
@@ -187,6 +191,9 @@ export class Database extends React.Component {
txnlogdir: attrs['nsslapd-db-logdirectory'],
dbhomedir: dbhome,
dblocks: attrs['nsslapd-db-locks'],
+ dblocksMonitoring: dblocksMonitoring,
+ dblocksMonitoringThreshold: attrs['nsslapd-db-locks-monitoring-threshold'],
+ dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'],
chxpoint: attrs['nsslapd-db-checkpoint-interval'],
compactinterval: attrs['nsslapd-db-compactdb-interval'],
importcacheauto: attrs['nsslapd-import-cache-autosize'],
diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html
index 1278844fc..fd0eeb669 100644
--- a/src/cockpit/389-console/src/index.html
+++ b/src/cockpit/389-console/src/index.html
@@ -12,7 +12,7 @@
</head>
-<body>
+<body class="ds-vertical-scroll-auto">
<div id="dsinstance"></div>
<script src="index.js"></script>
</body>
diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
index f6e662bca..6a71c138d 100644
--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
@@ -31,6 +31,9 @@ export class GlobalDatabaseConfig extends React.Component {
txnlogdir: this.props.data.txnlogdir,
dbhomedir: this.props.data.dbhomedir,
dblocks: this.props.data.dblocks,
+ dblocksMonitoring: this.props.data.dblocksMonitoring,
+ dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
+ dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
chxpoint: this.props.data.chxpoint,
compactinterval: this.props.data.compactinterval,
importcachesize: this.props.data.importcachesize,
@@ -47,6 +50,9 @@ export class GlobalDatabaseConfig extends React.Component {
_txnlogdir: this.props.data.txnlogdir,
_dbhomedir: this.props.data.dbhomedir,
_dblocks: this.props.data.dblocks,
+ _dblocksMonitoring: this.props.data.dblocksMonitoring,
+ _dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
+ _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
_chxpoint: this.props.data.chxpoint,
_compactinterval: this.props.data.compactinterval,
_importcachesize: this.props.data.importcachesize,
@@ -55,6 +61,7 @@ export class GlobalDatabaseConfig extends React.Component {
_import_cache_auto: this.props.data.import_cache_auto,
};
this.handleChange = this.handleChange.bind(this);
+ this.select_db_locks_monitoring = this.select_db_locks_monitoring.bind(this);
this.select_auto_cache = this.select_auto_cache.bind(this);
this.select_auto_import_cache = this.select_auto_import_cache.bind(this);
this.save_db_config = this.save_db_config.bind(this);
@@ -76,6 +83,12 @@ export class GlobalDatabaseConfig extends React.Component {
}, this.handleChange(e));
}
+ select_db_locks_monitoring (val, e) {
+ this.setState({
+ dblocksMonitoring: !this.state.dblocksMonitoring
+ }, this.handleChange(val, e));
+ }
+
handleChange(e) {
// Generic
const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value;
@@ -150,6 +163,21 @@ export class GlobalDatabaseConfig extends React.Component {
cmd.push("--locks=" + this.state.dblocks);
requireRestart = true;
}
+ if (this.state._dblocksMonitoring != this.state.dblocksMonitoring) {
+ if (this.state.dblocksMonitoring) {
+ cmd.push("--locks-monitoring-enabled=on");
+ } else {
+ cmd.push("--locks-monitoring-enabled=off");
+ }
+ requireRestart = true;
+ }
+ if (this.state._dblocksMonitoringThreshold != this.state.dblocksMonitoringThreshold) {
+ cmd.push("--locks-monitoring-threshold=" + this.state.dblocksMonitoringThreshold);
+ requireRestart = true;
+ }
+ if (this.state._dblocksMonitoringPause != this.state.dblocksMonitoringPause) {
+ cmd.push("--locks-monitoring-pause=" + this.state.dblocksMonitoringPause);
+ }
if (this.state._chxpoint != this.state.chxpoint) {
cmd.push("--checkpoint-interval=" + this.state.chxpoint);
requireRestart = true;
@@ -216,6 +244,28 @@ export class GlobalDatabaseConfig extends React.Component {
let import_cache_form;
let db_auto_checked = false;
let import_auto_checked = false;
+ let dblocksMonitor = "";
+
+ if (this.state.dblocksMonitoring) {
+ dblocksMonitor = <div className="ds-margin-top">
+ <Row className="ds-margin-top" title="Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are acquired, the server will abort the searches while the number of locks are not decreased. It helps to avoid DB corruption and long recovery. (nsslapd-db-locks-monitoring-threshold)">
+ <Col componentClass={ControlLabel} sm={4}>
+ DB Locks Threshold Percentage
+ </Col>
+ <Col sm={8}>
+ <input className="ds-input" type="number" id="dblocksMonitoringThreshold" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringThreshold} />
+ </Col>
+ </Row>
+ <Row className="ds-margin-top" title="Sets the amount of time (milliseconds) that the monitoring thread spends waiting between checks. (nsslapd-db-locks-monitoring-pause)">
+ <Col componentClass={ControlLabel} sm={4}>
+ DB Locks Pause Milliseconds
+ </Col>
+ <Col sm={8}>
+ <input className="ds-input" type="number" id="dblocksMonitoringPause" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringPause} />
+ </Col>
+ </Row>
+ </div>;
+ }
if (this.state.db_cache_auto) {
db_cache_form = <div id="auto-cache-form" className="ds-margin-left">
@@ -422,14 +472,6 @@ export class GlobalDatabaseConfig extends React.Component {
<input id="dbhomedir" value={this.state.dbhomedir} onChange={this.handleChange} className="ds-input-auto" type="text" />
</Col>
</Row>
- <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
- <Col componentClass={ControlLabel} sm={4}>
- Database Locks
- </Col>
- <Col sm={8}>
- <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
- </Col>
- </Row>
<Row className="ds-margin-top" title="Amount of time in seconds after which the Directory Server sends a checkpoint entry to the database transaction log (nsslapd-db-checkpoint-interval).">
<Col componentClass={ControlLabel} sm={4}>
Database Checkpoint Interval
@@ -446,6 +488,36 @@ export class GlobalDatabaseConfig extends React.Component {
<input id="compactinterval" value={this.state.compactinterval} onChange={this.handleChange} className="ds-input-auto" type="text" />
</Col>
</Row>
+ <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
+ <Col componentClass={ControlLabel} sm={4}>
+ Database Locks
+ </Col>
+ <Col sm={8}>
+ <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
+ </Col>
+ </Row>
+ <Row>
+ <Col sm={12}>
+ <h5 className="ds-sub-header">DB Locks Monitoring</h5>
+ <hr />
+ </Col>
+ </Row>
+ <Row>
+ <Col sm={12}>
+ <Checkbox title="Set input to be set automatically"
+ id="dblocksMonitoring"
+ checked={this.state.dblocksMonitoring}
+ onChange={this.select_db_locks_monitoring}
+ >
+ Enable Monitoring
+ </Checkbox>
+ </Col>
+ </Row>
+ <Row>
+ <Col sm={12}>
+ {dblocksMonitor}
+ </Col>
+ </Row>
</Form>
</div>
</div>
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index bcd7b383f..13bb27842 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -1011,6 +1011,9 @@ class DatabaseConfig(DSLdapObject):
'nsslapd-db-transaction-batch-max-wait',
'nsslapd-db-logbuf-size',
'nsslapd-db-locks',
+ 'nsslapd-db-locks-monitoring-enabled',
+ 'nsslapd-db-locks-monitoring-threshold',
+ 'nsslapd-db-locks-monitoring-pause',
'nsslapd-db-private-import-mem',
'nsslapd-import-cache-autosize',
'nsslapd-cache-autosize',
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
index 6bfbcb036..722764d10 100644
--- a/src/lib389/lib389/cli_conf/backend.py
+++ b/src/lib389/lib389/cli_conf/backend.py
@@ -46,6 +46,9 @@ arg_to_attr = {
'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait',
'logbufsize': 'nsslapd-db-logbuf-size',
'locks': 'nsslapd-db-locks',
+ 'locks_monitoring_enabled': 'nsslapd-db-locks-monitoring-enabled',
+ 'locks_monitoring_threshold': 'nsslapd-db-locks-monitoring-threshold',
+ 'locks_monitoring_pause': 'nsslapd-db-locks-monitoring-pause',
'import_cache_autosize': 'nsslapd-import-cache-autosize',
'cache_autosize': 'nsslapd-cache-autosize',
'cache_autosize_split': 'nsslapd-cache-autosize-split',
@@ -998,6 +1001,13 @@ def create_parser(subparsers):
'the batch count (only works when txn-batch-val is set)')
set_db_config_parser.add_argument('--logbufsize', help='Specifies the transaction log information buffer size')
set_db_config_parser.add_argument('--locks', help='Sets the maximum number of database locks')
+ set_db_config_parser.add_argument('--locks-monitoring-enabled', help='Set to "on" or "off" to monitor DB locks. When it crosses the percentage value '
+ 'set with "--locks-monitoring-threshold" ("on" by default)')
+ set_db_config_parser.add_argument('--locks-monitoring-threshold', help='Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are '
+ 'acquired, the server will abort the searches while the number of locks '
+ 'are not decreased. It helps to avoid DB corruption and long recovery.')
+ set_db_config_parser.add_argument('--locks-monitoring-pause', help='Sets the DB lock monitoring value in milliseconds for the amount of time '
+ 'that the monitoring thread spends waiting between checks.')
set_db_config_parser.add_argument('--import-cache-autosize', help='Set to "on" or "off" to automatically set the size of the import '
'cache to be used during the the import process of LDIF files')
set_db_config_parser.add_argument('--cache-autosize', help='Sets the percentage of free memory that is used in total for the database '
--
2.26.3

View file

@ -1,127 +0,0 @@
From 2a2773d4bf8553ba64b396d567fe05506b22c94c Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 24 Nov 2020 19:22:49 +0100
Subject: [PATCH] Issue 4449 - dsconf replication monitor fails to retrieve
database RUV - consumer (Unavailable) (#4451)
Bug Description:
"dsconf replication monitor" fails to retrieve database RUV entry from consumer and this
appears into the Cockpit web UI too.
The problem is that the bind credentials are not rightly propagated when trying to get
the consumers agreement status. Then supplier credntials are used instead and RUV
is searched anonymously because there is no bind dn in ldapi case.
Fix Description:
- Propagates the bind credentials when computing agreement status
- Add a credential cache because now a replica password could get asked several times:
when discovering the topology and
when getting the agreement maxcsn
- No testcase in 1.4.3 branch as the file modfied in master does not exists
- Add a comment about nonlocal keyword
Relates: #4449
Reviewers:
firstyear
droideck
mreynolds
Issue 4449: Add a comment about nonlocal keyword
(cherry picked from commit 73ee04fa12cd1de3a5e47c109e79e31c1aaaa2ab)
---
src/lib389/lib389/cli_conf/replication.py | 13 +++++++++++--
src/lib389/lib389/replica.py | 16 ++++++++++++----
2 files changed, 23 insertions(+), 6 deletions(-)
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 9dbaa320a..248972cba 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -369,9 +369,16 @@ def set_repl_config(inst, basedn, log, args):
def get_repl_monitor_info(inst, basedn, log, args):
connection_data = dsrc_to_repl_monitor(DSRC_HOME, log)
+ credentials_cache = {}
# Additional details for the connections to the topology
def get_credentials(host, port):
+ # credentials_cache is nonlocal to refer to the instance
+ # from enclosing function (get_repl_monitor_info)`
+ nonlocal credentials_cache
+ key = f'{host}:{port}'
+ if key in credentials_cache:
+ return credentials_cache[key]
found = False
if args.connections:
connections = args.connections
@@ -406,8 +413,10 @@ def get_repl_monitor_info(inst, basedn, log, args):
binddn = input(f'\nEnter a bind DN for {host}:{port}: ').rstrip()
bindpw = getpass(f"Enter a password for {binddn} on {host}:{port}: ").rstrip()
- return {"binddn": binddn,
- "bindpw": bindpw}
+ credentials = {"binddn": binddn,
+ "bindpw": bindpw}
+ credentials_cache[key] = credentials
+ return credentials
repl_monitor = ReplicationMonitor(inst)
report_dict = repl_monitor.generate_report(get_credentials, args.json)
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index c2ad2104d..3d89e61fb 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -2487,9 +2487,10 @@ class ReplicationMonitor(object):
else:
self._log = logging.getLogger(__name__)
- def _get_replica_status(self, instance, report_data, use_json):
+ def _get_replica_status(self, instance, report_data, use_json, get_credentials=None):
"""Load all of the status data to report
and add new hostname:port pairs for future processing
+ :type get_credentials: function
"""
replicas_status = []
@@ -2503,6 +2504,13 @@ class ReplicationMonitor(object):
for agmt in agmts.list():
host = agmt.get_attr_val_utf8_l("nsds5replicahost")
port = agmt.get_attr_val_utf8_l("nsds5replicaport")
+ if get_credentials is not None:
+ credentials = get_credentials(host, port)
+ binddn = credentials["binddn"]
+ bindpw = credentials["bindpw"]
+ else:
+ binddn = instance.binddn
+ bindpw = instance.bindpw
protocol = agmt.get_attr_val_utf8_l('nsds5replicatransportinfo')
# Supply protocol here because we need it only for connection
# and agreement status is already preformatted for the user output
@@ -2510,9 +2518,9 @@ class ReplicationMonitor(object):
if consumer not in report_data:
report_data[f"{consumer}:{protocol}"] = None
if use_json:
- agmts_status.append(json.loads(agmt.status(use_json=True)))
+ agmts_status.append(json.loads(agmt.status(use_json=True, binddn=binddn, bindpw=bindpw)))
else:
- agmts_status.append(agmt.status())
+ agmts_status.append(agmt.status(binddn=binddn, bindpw=bindpw))
replicas_status.append({"replica_id": replica_id,
"replica_root": replica_root,
"replica_status": "Available",
@@ -2535,7 +2543,7 @@ class ReplicationMonitor(object):
initial_inst_key = f"{self._instance.config.get_attr_val_utf8_l('nsslapd-localhost')}:{self._instance.config.get_attr_val_utf8_l('nsslapd-port')}"
# Do this on an initial instance to get the agreements to other instances
try:
- report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json)
+ report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json, get_credentials)
except ldap.LDAPError as e:
self._log.debug(f"Connection to consumer ({supplier_hostname}:{supplier_port}) failed, error: {e}")
report_data[initial_inst_key] = [{"replica_status": f"Unavailable - {e.args[0]['desc']}"}]
--
2.26.2

View file

@ -0,0 +1,33 @@
From 7573c62a2e61293a4800e67919d79341fa1a1532 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Wed, 26 May 2021 16:07:43 +0200
Subject: [PATCH 10/12] Issue 4764 - replicated operation sometime checks ACI
(#4783)
(cherry picked from commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022)
---
ldap/servers/slapd/connection.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index c7a15e775..e0c1a52d2 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1771,6 +1771,14 @@ connection_threadmain()
}
}
+ /*
+ * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done
+ * before replication session is properly set).
+ */
+ if (replication_connection) {
+ operation_set_flag(op, OP_FLAG_REPLICATED);
+ }
+
/*
* Call the do_<operation> function to process this request.
*/
--
2.26.3

View file

@ -1,63 +0,0 @@
From e540effa692976c2eef766f1f735702ba5dc0950 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 30 Nov 2020 09:03:33 +0100
Subject: [PATCH] Issue 4243 - Fix test: SyncRepl plugin provides a wrong
cookie (#4467)
Bug description:
This test case was incorrect.
During a refreshPersistent search, a cookie is sent
with the intermediate message that indicates the end of the refresh phase.
Then a second cookie is sent on the updated entry (group10)
I believed this test was successful some time ago but neither python-ldap
nor sync_repl changed (intermediate sent in post refresh).
So the testcase was never successful :(
Fix description:
The fix is just to take into account the two expected cookies
relates: https://github.com/389ds/389-ds-base/issues/4243
Reviewed by: Mark Reynolds
Platforms tested: F31
---
.../tests/suites/syncrepl_plugin/basic_test.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
index 79ec374bc..7b35537d5 100644
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
@@ -589,7 +589,7 @@ def test_sync_repl_cookie_with_failure(topology, request):
sync_repl.start()
time.sleep(5)
- # Add a test group just to check that sync_repl receives only one update
+ # Add a test group just to check that sync_repl receives that SyncControlInfo cookie
group.append(groups.create(properties={'cn': 'group%d' % 10}))
# create users, that automember/memberof will generate nested updates
@@ -610,13 +610,15 @@ def test_sync_repl_cookie_with_failure(topology, request):
time.sleep(10)
cookies = sync_repl.get_result()
- # checking that the cookie list contains only one entry
- assert len(cookies) == 1
- prev = 0
+ # checking that the cookie list contains only two entries
+ # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh
+ # the the one from SyncStateControl related to the only updated entry (group10)
+ assert len(cookies) == 2
+ prev = -1
for cookie in cookies:
log.info('Check cookie %s' % cookie)
- assert int(cookie) > 0
+ assert int(cookie) >= 0
assert int(cookie) < 1000
assert int(cookie) > prev
prev = int(cookie)
--
2.26.2

File diff suppressed because it is too large Load diff

View file

@ -1,254 +0,0 @@
From 8b0ba11c3dfb577d1696f4b71a6f4e9f8d42349f Mon Sep 17 00:00:00 2001
From: Pierre Rogier <progier@redhat.com>
Date: Mon, 30 Nov 2020 12:42:17 +0100
Subject: [PATCH] Add dsconf replication monitor test case (gitHub issue 4449)
in 1.4.3 branch
---
.../tests/suites/clu/repl_monitor_test.py | 234 ++++++++++++++++++
1 file changed, 234 insertions(+)
create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
new file mode 100644
index 000000000..b03d170c8
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -0,0 +1,234 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+import subprocess
+import pytest
+
+from lib389.cli_conf.replication import get_repl_monitor_info
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_m2
+from lib389.cli_base import FakeArgs
+from lib389.cli_base.dsrc import dsrc_arg_concat
+from lib389.cli_base import connect_instance
+
+pytestmark = pytest.mark.tier0
+
+LOG_FILE = '/tmp/monitor.log'
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def set_log_file(request):
+ fh = logging.FileHandler(LOG_FILE)
+ fh.setLevel(logging.DEBUG)
+ log.addHandler(fh)
+
+ def fin():
+ log.info('Delete files')
+ os.remove(LOG_FILE)
+
+ config = os.path.expanduser(DSRC_HOME)
+ if os.path.exists(config):
+ os.remove(config)
+
+ request.addfinalizer(fin)
+
+
+def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None):
+ with open(LOG_FILE, 'r+') as f:
+ file_content = f.read()
+
+ for item in content_list:
+ log.info('Check that "{}" is present'.format(item))
+ assert item in file_content
+
+ if second_list is not None:
+ log.info('Check for "{}"'.format(second_list))
+ for item in second_list:
+ assert item in file_content
+
+ if single_value is not None:
+ log.info('Check for "{}"'.format(single_value))
+ assert single_value in file_content
+
+ if error_list is not None:
+ log.info('Check that "{}" is not present'.format(error_list))
+ for item in error_list:
+ assert item not in file_content
+
+ log.info('Reset log file')
+ f.truncate(0)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1739718
+@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented")
+def test_dsconf_replication_monitor(topology_m2, set_log_file):
+ """Test replication monitor that was ported from legacy tools
+
+ :id: ce48020d-7c30-41b7-8f68-144c9cd757f6
+ :setup: 2 MM topology
+ :steps:
+ 1. Create DS instance
+ 2. Run replication monitor with connections option
+ 3. Run replication monitor with aliases option
+ 4. Run replication monitor with --json option
+ 5. Run replication monitor with .dsrc file created
+ 6. Run replication monitor with connections option as if using dsconf CLI
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ """
+
+ m1 = topology_m2.ms["master1"]
+ m2 = topology_m2.ms["master2"]
+
+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
+
+ connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
+ content_list = ['Replica Root: dc=example,dc=com',
+ 'Replica ID: 1',
+ 'Replica Status: Available',
+ 'Max CSN',
+ 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')',
+ 'Replica Enabled: on',
+ 'Update In Progress: FALSE',
+ 'Last Update Start:',
+ 'Last Update End:',
+ 'Number Of Changes Sent:',
+ 'Number Of Changes Skipped: None',
+ 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded',
+ 'Last Init Start:',
+ 'Last Init End:',
+ 'Last Init Status:',
+ 'Reap Active: 0',
+ 'Replication Status: In Synchronization',
+ 'Replication Lag Time:',
+ 'Supplier: ',
+ m2.host + ':' + str(m2.port),
+ 'Replica Root: dc=example,dc=com',
+ 'Replica ID: 2',
+ 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')']
+
+ error_list = ['consumer (Unavailable)',
+ 'Failed to retrieve database RUV entry from consumer']
+
+ json_list = ['type',
+ 'list',
+ 'items',
+ 'name',
+ m1.host + ':' + str(m1.port),
+ 'data',
+ '"replica_id": "1"',
+ '"replica_root": "dc=example,dc=com"',
+ '"replica_status": "Available"',
+ 'maxcsn',
+ 'agmts_status',
+ 'agmt-name',
+ '002',
+ 'replica',
+ m2.host + ':' + str(m2.port),
+ 'replica-enabled',
+ 'update-in-progress',
+ 'last-update-start',
+ 'last-update-end',
+ 'number-changes-sent',
+ 'number-changes-skipped',
+ 'last-update-status',
+ 'Error (0) Replica acquired successfully: Incremental update succeeded',
+ 'last-init-start',
+ 'last-init-end',
+ 'last-init-status',
+ 'reap-active',
+ 'replication-status',
+ 'In Synchronization',
+ 'replication-lag-time',
+ '"replica_id": "2"',
+ '001',
+ m1.host + ':' + str(m1.port)]
+
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + m2.host + ':' + str(m2.port)
+
+ connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
+ m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
+
+ aliases = ['M1=' + m1.host + ':' + str(m1.port),
+ 'M2=' + m2.host + ':' + str(m2.port)]
+
+ args = FakeArgs()
+ args.connections = connections
+ args.aliases = None
+ args.json = False
+
+ log.info('Run replication monitor with connections option')
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+
+ log.info('Run replication monitor with aliases option')
+ args.aliases = aliases
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, alias_content)
+
+ log.info('Run replication monitor with --json option')
+ args.aliases = None
+ args.json = True
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(json_list)
+
+ with open(os.path.expanduser(DSRC_HOME), 'w+') as f:
+ f.write(dsrc_content)
+
+ args.connections = None
+ args.aliases = None
+ args.json = False
+
+ log.info('Run replication monitor when .dsrc file is present with content')
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, alias_content)
+ os.remove(os.path.expanduser(DSRC_HOME))
+
+ log.info('Run replication monitor with connections option as if using dsconf CLI')
+ # Perform same test than steps 2 test but without using directly the topology instance.
+ # but with an instance similar to those than dsconf cli generates:
+ # step 2 args
+ args.connections = connections
+ args.aliases = None
+ args.json = False
+ # args needed to generate an instance with dsrc_arg_concat
+ args.instance = 'master1'
+ args.basedn = None
+ args.binddn = None
+ args.bindpw = None
+ args.pwdfile = None
+ args.prompt = False
+ args.starttls = False
+ dsrc_inst = dsrc_arg_concat(args, None)
+ inst = connect_instance(dsrc_inst, True, args)
+ get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
--
2.26.2

View file

@ -0,0 +1,155 @@
From 580880a598a8f9972994684c49593a4cf8b8969b Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Sat, 29 May 2021 13:19:53 -0400
Subject: [PATCH 12/12] Issue 4778 - RFE - Add changelog compaction task in
1.4.3
Description: In 1.4.3 the replication changelog is a separate database,
so it needs a separate "nsds5task" compaction task (COMPACT_CL5)
relates: https://github.com/389ds/389-ds-base/issues/4778
ASAN tested and approved
Reviewed by: mreynolds
---
ldap/servers/plugins/replication/cl5_api.c | 21 +++++++++----------
ldap/servers/plugins/replication/cl5_api.h | 1 +
.../replication/repl5_replica_config.c | 9 +++++++-
3 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 75a2f46f5..4c5077b48 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -266,7 +266,6 @@ static int _cl5TrimInit(void);
static void _cl5TrimCleanup(void);
static int _cl5TrimMain(void *param);
static void _cl5DoTrimming(void);
-static void _cl5CompactDBs(void);
static void _cl5PurgeRID(Object *file_obj, ReplicaId cleaned_rid);
static int _cl5PurgeGetFirstEntry(Object *file_obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key);
static int _cl5PurgeGetNextEntry(CL5Entry *entry, void *iterator, DBT *key);
@@ -3152,7 +3151,7 @@ _cl5TrimMain(void *param __attribute__((unused)))
if (slapi_current_utc_time() > compactdb_time) {
/* time to trim */
timeCompactPrev = timeNow;
- _cl5CompactDBs();
+ cl5CompactDBs();
compacting = PR_FALSE;
}
}
@@ -3250,8 +3249,8 @@ _cl5DoPurging(cleanruv_purge_data *purge_data)
}
/* clear free page files to reduce changelog */
-static void
-_cl5CompactDBs(void)
+void
+cl5CompactDBs(void)
{
int rc;
Object *fileObj = NULL;
@@ -3264,14 +3263,14 @@ _cl5CompactDBs(void)
rc = TXN_BEGIN(s_cl5Desc.dbEnv, NULL, &txnid, 0);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
- "_cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
+ "cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
rc, db_strerror(rc));
goto bail;
}
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
- "_cl5CompactDBs - compacting replication changelogs...\n");
+ "cl5CompactDBs - compacting replication changelogs...\n");
for (fileObj = objset_first_obj(s_cl5Desc.dbFiles);
fileObj;
fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) {
@@ -3284,17 +3283,17 @@ _cl5CompactDBs(void)
&c_data, DB_FREE_SPACE, NULL /*end*/);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
- "_cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
+ "cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
dbFile->replName, rc, db_strerror(rc));
goto bail;
}
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
- "_cl5CompactDBs - %s - %d pages freed\n",
+ "cl5CompactDBs - %s - %d pages freed\n",
dbFile->replName, c_data.compact_pages_free);
}
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
- "_cl5CompactDBs - compacting replication changelogs finished.\n");
+ "cl5CompactDBs - compacting replication changelogs finished.\n");
bail:
if (fileObj) {
object_release(fileObj);
@@ -3303,14 +3302,14 @@ bail:
rc = TXN_ABORT(txnid);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
- "_cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
+ "cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
rc, db_strerror(rc));
}
} else {
rc = TXN_COMMIT(txnid);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
- "_cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
+ "cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
rc, db_strerror(rc));
}
}
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
index 4b0949fb3..11db771f2 100644
--- a/ldap/servers/plugins/replication/cl5_api.h
+++ b/ldap/servers/plugins/replication/cl5_api.h
@@ -405,5 +405,6 @@ int cl5DeleteRUV(void);
void cl5CleanRUV(ReplicaId rid);
void cl5NotifyCleanup(int rid);
void trigger_cl_purging(cleanruv_purge_data *purge_data);
+void cl5CompactDBs(void);
#endif
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index a969ef82f..e708a1ccb 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -29,6 +29,8 @@
#define CLEANRUVLEN 8
#define CLEANALLRUV "CLEANALLRUV"
#define CLEANALLRUVLEN 11
+#define COMPACT_CL5 "COMPACT_CL5"
+#define COMPACT_CL5_LEN 11
#define REPLICA_RDN "cn=replica"
#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */
@@ -1050,7 +1052,6 @@ replica_config_change_flags(Replica *r, const char *new_flags, char *returntext
static int
replica_execute_task(Replica *r, const char *task_name, char *returntext, int apply_mods)
{
-
if (strcasecmp(task_name, CL2LDIF_TASK) == 0) {
if (apply_mods) {
return replica_execute_cl2ldif_task(r, returntext);
@@ -1084,6 +1085,12 @@ replica_execute_task(Replica *r, const char *task_name, char *returntext, int ap
return replica_execute_cleanall_ruv_task(r, (ReplicaId)temprid, empty_task, "no", PR_TRUE, returntext);
} else
return LDAP_SUCCESS;
+ } else if (strncasecmp(task_name, COMPACT_CL5, COMPACT_CL5_LEN) == 0) {
+ /* compact the replication changelogs */
+ if (apply_mods) {
+ cl5CompactDBs();
+ }
+ return LDAP_SUCCESS;
} else {
PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "Unsupported replica task - %s", task_name);
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
--
2.26.3

View file

@ -1,100 +0,0 @@
From 389b2c825742392365262a719be7c8f594e7e522 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 26 Nov 2020 09:08:13 +1000
Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy
Bug Description: Due to some changes in dsrc for tlsreqcert
and how def open was structured in lib389, the system ldap.conf
policy was ignored.
Fix Description: Default to using the system ldap.conf policy
if undefined in lib389 or the tls_reqcert param in dsrc.
fixes: #4460
Author: William Brown <william@blackhats.net.au>
Review by: ???
---
src/lib389/lib389/__init__.py | 11 +++++++----
src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++-------
2 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 99ea9cc6a..4e6a1905a 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object):
# Now, we are still an allocated ds object so we can be re-installed
self.state = DIRSRV_STATE_ALLOCATED
- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD,
+ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None,
usercert=None, userkey=None):
'''
It opens a ldap bound connection to dirsrv so that online
@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object):
try:
# Note this sets LDAP.OPT not SELF. Because once self has opened
# it can NOT change opts on reused (ie restart)
- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
- self.log.debug("Using certificate policy %s", reqcert)
- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert)
+ if reqcert is not None:
+ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
+ self.log.debug("Using lib389 certificate policy %s", reqcert)
+ else:
+ self.log.debug("Using /etc/openldap/ldap.conf certificate policy")
+ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT))
except ldap.LDAPError as e:
self.log.fatal('TLS negotiation failed: %s', e)
raise e
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
index fec18a5f9..9b09ea568 100644
--- a/src/lib389/lib389/cli_base/dsrc.py
+++ b/src/lib389/lib389/cli_base/dsrc.py
@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst):
'tls_cacertdir': None,
'tls_cert': None,
'tls_key': None,
- 'tls_reqcert': ldap.OPT_X_TLS_HARD,
+ 'tls_reqcert': None,
'starttls': args.starttls,
'prompt': False,
'pwdfile': None,
@@ -134,7 +134,7 @@ def dsrc_to_ldap(path, instance_name, log):
dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None)
dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None)
if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']:
- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
+ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None)
# At this point, we should check if the provided cacertdir is indeed, a dir. This can be a cause
@@ -145,16 +145,18 @@ def dsrc_to_ldap(path, instance_name, log):
dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None)
dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None)
- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard')
- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']:
- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name,
- path))
+ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None)
if dsrc_inst['tls_reqcert'] == 'never':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER
elif dsrc_inst['tls_reqcert'] == 'allow':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW
- else:
+ elif dsrc_inst['tls_reqcert'] == 'hard':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD
+ elif dsrc_inst['tls_reqcert'] is None:
+ # Use system value
+ pass
+ else:
+ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path))
dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False)
dsrc_inst['pwdfile'] = None
dsrc_inst['prompt'] = False
--
2.26.2

View file

@ -1,60 +0,0 @@
From 05b66529117d1cd85a636ab7d8fc84abdec814de Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 12 Nov 2020 13:04:21 +1000
Subject: [PATCH] Issue 4428 - BUG Paged Results with critical false causes
sigsegv in chaining
Bug Description: When a paged search through chaining backend is
received with a false criticality (such as SSSD), chaining backend
will sigsegv due to a null context.
Fix Description: When a NULL ctx is recieved to be freed, this is
as paged results have finished being sent, so we check the NULL
ctx and move on.
fixes: #4428
Author: William Brown <william@blackhats.net.au>
Review by: @droideck, @mreynolds389
---
ldap/servers/plugins/chainingdb/cb_search.c | 6 ++++++
ldap/servers/plugins/chainingdb/cb_utils.c | 4 ++++
2 files changed, 10 insertions(+)
diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c
index 69d23a6b5..d47cbc8e4 100644
--- a/ldap/servers/plugins/chainingdb/cb_search.c
+++ b/ldap/servers/plugins/chainingdb/cb_search.c
@@ -740,6 +740,12 @@ chaining_back_search_results_release(void **sr)
slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM,
"chaining_back_search_results_release\n");
+ if (ctx == NULL) {
+ /* The paged search is already complete, just return */
+ /* Could we have a ctx state flag instead? */
+ return;
+ }
+
if (ctx->readahead != ctx->tobefreed) {
slapi_entry_free(ctx->readahead);
}
diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c
index dfd5dd92c..d52fd25a6 100644
--- a/ldap/servers/plugins/chainingdb/cb_utils.c
+++ b/ldap/servers/plugins/chainingdb/cb_utils.c
@@ -279,7 +279,11 @@ cb_add_suffix(cb_backend_instance *inst, struct berval **bvals, int apply_mod, c
return LDAP_SUCCESS;
}
+#ifdef DEBUG
+static int debug_on = 1;
+#else
static int debug_on = 0;
+#endif
int
cb_debug_on()
--
2.26.2

View file

@ -1,50 +0,0 @@
From 4c133d448f451b7c3b2ff1b42806c7516d623f09 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 7 Dec 2020 00:41:27 +0100
Subject: [PATCH] Issue 4315: performance search rate: nagle triggers high rate
of setsocketopt (#4437)
Bug description:
When a socket is set with NO_DELAY=0 (nagle), written pdu are buffered
until buffer is full or tcp_cork is set. This reduce network traffic when
the application writes partial pdu.
DS write complete pdu (results/entries/..) so it gives low benefit for DS.
In addition nagle being 'on' by default, DS sets/unset socket tcp_cork to send
immediately results/entries at each operation. This is an overhead of syscalls.
Fix description:
Disable nagle by default
relates: https://github.com/389ds/389-ds-base/issues/4315
Reviewed by: @mreynolds389, @Firstyear
Platforms tested: F33
---
ldap/servers/slapd/libglobs.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 7d5374c90..f8cf162e6 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1635,12 +1635,11 @@ FrontendConfig_init(void)
#endif /* USE_SYSCONF */
init_accesscontrol = cfg->accesscontrol = LDAP_ON;
-#if defined(LINUX)
- /* On Linux, by default, we use TCP_CORK so we must enable nagle */
- init_nagle = cfg->nagle = LDAP_ON;
-#else
+
+ /* nagle triggers set/unset TCP_CORK setsockopt per operation
+ * as DS only sends complete PDU there is no benefit of nagle/tcp_cork
+ */
init_nagle = cfg->nagle = LDAP_OFF;
-#endif
init_security = cfg->security = LDAP_OFF;
init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON;
cfg->tls_check_crl = TLS_CHECK_NONE;
--
2.26.2

View file

@ -1,39 +0,0 @@
From 3007700a659ede03085f5390153cce483ce987a1 Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Fri, 4 Dec 2020 10:14:33 +1000
Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in
SSCA (#4472)
Bug Description: During SSCA creation, the server cert did not have
the machine name, which meant that the cert would not work without
reqcert = never.
Fix Description: Add the machine name as an alt name during SSCA
creation. It is not guaranteed this value is correct, but it
is better than nothing.
relates: https://github.com/389ds/389-ds-base/issues/4460
Author: William Brown <william@blackhats.net.au>
Review by: mreynolds389, droideck
---
src/lib389/lib389/instance/setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 7d42ba292..e46f2d1e5 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -887,7 +887,7 @@ class SetupDs(object):
tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir))
tlsdb_inst.import_rsa_crt(ca)
- csr = tlsdb.create_rsa_key_and_csr()
+ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']])
(ca, crt) = ssca.rsa_ca_sign_csr(csr)
tlsdb.import_rsa_crt(ca, crt)
if general['selinux']:
--
2.26.2

View file

@ -1,50 +0,0 @@
From 1386b140d8cc81d37fdea6593487fe542587ccac Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 9 Dec 2020 09:52:08 -0500
Subject: [PATCH] Issue 4483 - heap-use-after-free in slapi_be_getsuffix
Description: heap-use-after-free in slapi_be_getsuffix after disk
monitoring runs. This feature is freeing a list of
backends which it does not need to do.
Fixes: https://github.com/389ds/389-ds-base/issues/4483
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/servers/slapd/daemon.c | 13 +------------
1 file changed, 1 insertion(+), 12 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 49199e4df..691f77570 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -606,12 +606,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
now = start;
while ((now - start) < grace_period) {
if (g_get_shutdown()) {
- be_index = 0;
- if (be_list[be_index] != NULL) {
- while ((be = be_list[be_index++])) {
- slapi_be_free(&be);
- }
- }
slapi_ch_array_free(dirs);
dirs = NULL;
return;
@@ -706,12 +700,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
}
- be_index = 0;
- if (be_list[be_index] != NULL) {
- while ((be = be_list[be_index++])) {
- slapi_be_free(&be);
- }
- }
+
slapi_ch_array_free(dirs);
dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
--
2.26.2

View file

@ -1,65 +0,0 @@
From 6e827f6d5e64e0be316f4e17111b2884899d302c Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 16 Dec 2020 16:30:28 +0100
Subject: [PATCH] Issue 4480 - Unexpected info returned to ldap request (#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31
---
dirsrvtests/tests/suites/basic/basic_test.py | 1 -
ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +-
ldap/servers/slapd/result.c | 2 +-
3 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 120207321..1ae82dcdd 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -1400,7 +1400,6 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance):
assert not dscreate_long_instance.exists()
-
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 3fe86d567..10cef250f 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -1234,7 +1234,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
if (attrs) {
for (size_t i = 0; attrs[i]; i++) {
if (ldbm_config_moved_attr(attrs[i])) {
- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
+ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
break;
}
}
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 9daf3b151..ab0d79454 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -355,7 +355,7 @@ send_ldap_result_ext(
if (text) {
pbtext = text;
} else {
- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext);
+ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext);
}
if (operation == NULL) {
--
2.26.2

View file

@ -1,108 +0,0 @@
From 1fef5649ce05a17a741789cafb65269c099b396b Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Wed, 16 Dec 2020 16:21:35 +0100
Subject: [PATCH 2/3] Issue #4504 - Fix pytest test_dsconf_replication_monitor
(#4505)
(cherry picked from commit 0b08e6f35b000d1383580be59f902ac813e940f2)
---
.../tests/suites/clu/repl_monitor_test.py | 50 +++++++++++++------
1 file changed, 36 insertions(+), 14 deletions(-)
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
index b03d170c8..eb18d2da2 100644
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -9,6 +9,7 @@
import time
import subprocess
import pytest
+import re
from lib389.cli_conf.replication import get_repl_monitor_info
from lib389.tasks import *
@@ -67,6 +68,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
log.info('Reset log file')
f.truncate(0)
+def get_hostnames_from_log(port1, port2):
+ # Get the supplier host names as displayed in replication monitor output
+ with open(LOG_FILE, 'r') as logfile:
+ logtext = logfile.read()
+ # search for Supplier :hostname:port
+ # and use \D to insure there is no more number is after
+ # the matched port (i.e that 10 is not matching 101)
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m1 = 'localhost.localdomain'
+ if (match is not None):
+ host_m1 = match.group(2)
+ # Same for master 2
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m2 = 'localhost.localdomain'
+ if (match is not None):
+ host_m2 = match.group(2)
+ return (host_m1, host_m2)
@pytest.mark.ds50545
@pytest.mark.bz1739718
@@ -95,9 +115,6 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
m1 = topology_m2.ms["master1"]
m2 = topology_m2.ms["master2"]
- alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
- 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
-
connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
content_list = ['Replica Root: dc=example,dc=com',
'Replica ID: 1',
@@ -160,20 +177,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
'001',
m1.host + ':' + str(m1.port)]
- dsrc_content = '[repl-monitor-connections]\n' \
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- '\n' \
- '[repl-monitor-aliases]\n' \
- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
- 'M2 = ' + m2.host + ':' + str(m2.port)
-
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
- aliases = ['M1=' + m1.host + ':' + str(m1.port),
- 'M2=' + m2.host + ':' + str(m2.port)]
-
args = FakeArgs()
args.connections = connections
args.aliases = None
@@ -181,8 +187,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
log.info('Run replication monitor with connections option')
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+ # Prepare the data for next tests
+ aliases = ['M1=' + host_m1 + ':' + str(m1.port),
+ 'M2=' + host_m2 + ':' + str(m2.port)]
+
+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
+
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + host_m2 + ':' + str(m2.port)
+
log.info('Run replication monitor with aliases option')
args.aliases = aliases
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
--
2.26.2

View file

@ -1,374 +0,0 @@
From d7b49259ff2f9e0295bbfeaf128369ed33421974 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Mon, 30 Nov 2020 15:28:05 +0000
Subject: [PATCH 1/6] Issue 4418 - ldif2db - offline. Warn the user of skipped
entries
Bug Description: During an ldif2db import entries that do not
conform to various constraints will be skipped and not imported.
On completition of an import with skipped entries, the server
returns a success exit code and logs the skipped entry detail to
the error logs. The success exit code could lead the user to
believe that all entries were successfully imported.
Fix Description: If a skipped entry occurs during import, the
import will continue and a warning will be returned to the user.
CLI tools for offline import updated to handle warning code.
Test added to generate an incorrect ldif entry and perform an
import.
Fixes: #4418
Reviewed by: Firstyear, droideck (Thanks)
(cherry picked from commit a98fe54292e9b183a2163efbc7bdfe208d4abfb0)
---
.../tests/suites/import/import_test.py | 54 ++++++++++++++++++-
.../slapd/back-ldbm/db-bdb/bdb_import.c | 22 ++++++--
ldap/servers/slapd/main.c | 8 +++
ldap/servers/slapd/pblock.c | 24 +++++++++
ldap/servers/slapd/pblock_v3.h | 1 +
ldap/servers/slapd/slapi-private.h | 14 +++++
src/lib389/lib389/__init__.py | 18 +++----
src/lib389/lib389/_constants.py | 7 +++
src/lib389/lib389/cli_ctl/dbtasks.py | 8 ++-
9 files changed, 140 insertions(+), 16 deletions(-)
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
index 3803ecf43..b47db96ed 100644
--- a/dirsrvtests/tests/suites/import/import_test.py
+++ b/dirsrvtests/tests/suites/import/import_test.py
@@ -15,7 +15,7 @@ import pytest
import time
import glob
from lib389.topologies import topology_st as topo
-from lib389._constants import DEFAULT_SUFFIX
+from lib389._constants import DEFAULT_SUFFIX, TaskWarning
from lib389.dbgen import dbgen_users
from lib389.tasks import ImportTask
from lib389.index import Indexes
@@ -139,6 +139,38 @@ def _create_bogus_ldif(topo):
return import_ldif1
+def _create_syntax_err_ldif(topo):
+ """
+ Create an incorrect ldif entry that violates syntax check
+ """
+ ldif_dir = topo.standalone.get_ldif_dir()
+ line1 = """dn: dc=example,dc=com
+objectClass: top
+objectClass: domain
+dc: example
+dn: ou=groups,dc=example,dc=com
+objectClass: top
+objectClass: organizationalUnit
+ou: groups
+dn: uid=JHunt,ou=groups,dc=example,dc=com
+objectClass: top
+objectClass: person
+objectClass: organizationalPerson
+objectClass: inetOrgPerson
+objectclass: inetUser
+cn: James Hunt
+sn: Hunt
+uid: JHunt
+givenName:
+"""
+ with open(f'{ldif_dir}/syntax_err.ldif', 'w') as out:
+ out.write(f'{line1}')
+ os.chmod(out.name, 0o777)
+ out.close()
+ import_ldif1 = ldif_dir + '/syntax_err.ldif'
+ return import_ldif1
+
+
def test_import_with_index(topo, _import_clean):
"""
Add an index, then import via cn=tasks
@@ -214,6 +246,26 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl
topo.standalone.start()
+def test_ldif2db_syntax_check(topo):
+ """ldif2db should return a warning when a skipped entry has occured.
+ :id: 85e75670-42c5-4062-9edc-7f117c97a06f
+ :setup:
+ 1. Standalone Instance
+ 2. Ldif entry that violates syntax check rule (empty givenname)
+ :steps:
+ 1. Create an ldif file which violates the syntax checking rule
+ 2. Stop the server and import ldif file with ldif2db
+ :expected results:
+ 1. ldif2db import returns a warning to signify skipped entries
+ """
+ import_ldif1 = _create_syntax_err_ldif(topo)
+ # Import the offending LDIF data - offline
+ topo.standalone.stop()
+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1)
+ assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY
+ topo.standalone.start()
+
+
def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean):
"""Report during startup if nsslapd-cachememsize is too small
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
index e7da0517f..1e4830e99 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
@@ -2563,7 +2563,7 @@ error:
slapi_task_dec_refcount(job->task);
}
import_all_done(job, ret);
- ret = 1;
+ ret |= WARN_UPGARDE_DN_FORMAT_ALL;
} else if (NEED_DN_NORM == ret) {
import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main",
"%s complete. %s needs upgradednformat.",
@@ -2572,7 +2572,7 @@ error:
slapi_task_dec_refcount(job->task);
}
import_all_done(job, ret);
- ret = 2;
+ ret |= WARN_UPGRADE_DN_FORMAT;
} else if (NEED_DN_NORM_SP == ret) {
import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main",
"%s complete. %s needs upgradednformat spaces.",
@@ -2581,7 +2581,7 @@ error:
slapi_task_dec_refcount(job->task);
}
import_all_done(job, ret);
- ret = 3;
+ ret |= WARN_UPGRADE_DN_FORMAT_SPACE;
} else {
ret = -1;
if (job->task != NULL) {
@@ -2600,6 +2600,11 @@ error:
import_all_done(job, ret);
}
+ /* set task warning if there are no errors */
+ if((!ret) && (job->skipped)) {
+ ret |= WARN_SKIPPED_IMPORT_ENTRY;
+ }
+
/* This instance isn't busy anymore */
instance_set_not_busy(job->inst);
@@ -2637,6 +2642,7 @@ bdb_back_ldif2db(Slapi_PBlock *pb)
int total_files, i;
int up_flags = 0;
PRThread *thread = NULL;
+ int ret = 0;
slapi_pblock_get(pb, SLAPI_BACKEND, &be);
if (be == NULL) {
@@ -2764,7 +2770,15 @@ bdb_back_ldif2db(Slapi_PBlock *pb)
}
/* old style -- do it all synchronously (THIS IS GOING AWAY SOON) */
- return import_main_offline((void *)job);
+ ret = import_main_offline((void *)job);
+
+ /* no error just warning, reset ret */
+ if(ret &= WARN_SKIPPED_IMPORT_ENTRY) {
+ slapi_pblock_set_task_warning(pb, WARN_SKIPPED_IMPORT_ENTRY);
+ ret = 0;
+ }
+
+ return ret;
}
struct _import_merge_thang
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 694375b22..104f6826c 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -2069,6 +2069,14 @@ slapd_exemode_ldif2db(struct main_config *mcfg)
plugin->plg_name);
return_value = -1;
}
+
+ /* check for task warnings */
+ if(!return_value) {
+ if((return_value = slapi_pblock_get_task_warning(pb))) {
+ slapi_log_err(SLAPI_LOG_INFO, "slapd_exemode_ldif2db","returning task warning: %d\n", return_value);
+ }
+ }
+
slapi_pblock_destroy(pb);
charray_free(instances);
charray_free(mcfg->cmd_line_instance_names);
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index 454ea9cc3..1ad9d0399 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -28,12 +28,14 @@
#define SLAPI_LDIF_DUMP_REPLICA 2003
#define SLAPI_PWDPOLICY 2004
#define SLAPI_PW_ENTRY 2005
+#define SLAPI_TASK_WARNING 2006
/* Used for checking assertions about pblocks in some cases. */
#define SLAPI_HINT 9999
static PRLock *pblock_analytics_lock = NULL;
+
static PLHashNumber
hash_int_func(const void *key)
{
@@ -4315,6 +4317,28 @@ slapi_pblock_set_ldif_dump_replica(Slapi_PBlock *pb, int32_t dump_replica)
pb->pb_task->ldif_dump_replica = dump_replica;
}
+int32_t
+slapi_pblock_get_task_warning(Slapi_PBlock *pb)
+{
+#ifdef PBLOCK_ANALYTICS
+ pblock_analytics_record(pb, SLAPI_TASK_WARNING);
+#endif
+ if (pb->pb_task != NULL) {
+ return pb->pb_task->task_warning;
+ }
+ return 0;
+}
+
+void
+slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warning)
+{
+#ifdef PBLOCK_ANALYTICS
+ pblock_analytics_record(pb, SLAPI_TASK_WARNING);
+#endif
+ _pblock_assert_pb_task(pb);
+ pb->pb_task->task_warning = warning;
+}
+
void *
slapi_pblock_get_vattr_context(Slapi_PBlock *pb)
{
diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h
index 90498c0b0..b35d78565 100644
--- a/ldap/servers/slapd/pblock_v3.h
+++ b/ldap/servers/slapd/pblock_v3.h
@@ -67,6 +67,7 @@ typedef struct _slapi_pblock_task
int ldif2db_noattrindexes;
int ldif_printkey;
int task_flags;
+ int32_t task_warning;
int import_state;
int server_running; /* indicate that server is running */
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index c98c1947c..31cb33472 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1465,6 +1465,20 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes);
void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag);
void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text);
+/* task warnings */
+typedef enum task_warning_t{
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
+} task_warning;
+
+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
+
+
+int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 4e6a1905a..5b36a79e1 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -2683,7 +2683,7 @@ class DirSrv(SimpleLDAPObject, object):
# server is stopped)
#
def ldif2db(self, bename, suffixes, excludeSuffixes, encrypt,
- import_file):
+ import_file, import_cl):
"""
@param bename - The backend name of the database to import
@param suffixes - List/tuple of suffixes to import
@@ -2731,14 +2731,14 @@ class DirSrv(SimpleLDAPObject, object):
try:
result = subprocess.check_output(cmd, encoding='utf-8')
except subprocess.CalledProcessError as e:
- self.log.debug("Command: %s failed with the return code %s and the error %s",
- format_cmd_list(cmd), e.returncode, e.output)
- return False
-
- self.log.debug("ldif2db output: BEGIN")
- for line in result.split("\n"):
- self.log.debug(line)
- self.log.debug("ldif2db output: END")
+ if e.returncode == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY:
+ self.log.debug("Command: %s skipped import entry warning %s",
+ format_cmd_list(cmd), e.returncode)
+ return e.returncode
+ else:
+ self.log.debug("Command: %s failed with the return code %s and the error %s",
+ format_cmd_list(cmd), e.returncode, e.output)
+ return False
return True
diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py
index e28c602a3..38ba04565 100644
--- a/src/lib389/lib389/_constants.py
+++ b/src/lib389/lib389/_constants.py
@@ -162,6 +162,13 @@ DB2BAK = 'db2bak'
DB2INDEX = 'db2index'
DBSCAN = 'dbscan'
+# Task warnings
+class TaskWarning(IntEnum):
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0)
+ WARN_UPGRADE_DN_FORMAT = (1 << 1)
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2)
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
+
RDN_REPLICA = "cn=replica"
RETROCL_SUFFIX = "cn=changelog"
diff --git a/src/lib389/lib389/cli_ctl/dbtasks.py b/src/lib389/lib389/cli_ctl/dbtasks.py
index 590a1ea0e..02830239c 100644
--- a/src/lib389/lib389/cli_ctl/dbtasks.py
+++ b/src/lib389/lib389/cli_ctl/dbtasks.py
@@ -7,6 +7,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+from lib389._constants import TaskWarning
def dbtasks_db2index(inst, log, args):
if not inst.db2index(bename=args.backend):
@@ -44,10 +45,13 @@ def dbtasks_db2ldif(inst, log, args):
def dbtasks_ldif2db(inst, log, args):
- if not inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif,
- suffixes=None, excludeSuffixes=None):
+ ret = inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif,
+ suffixes=None, excludeSuffixes=None, import_cl=False)
+ if not ret:
log.fatal("ldif2db failed")
return False
+ elif ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY:
+ log.warn("ldif2db successful with skipped entries")
else:
log.info("ldif2db successful")
--
2.26.2

View file

@ -1,52 +0,0 @@
From 97bdef2d562e447d521202beb485c3948b0e7214 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Mon, 30 Nov 2020 15:28:05 +0000
Subject: [PATCH 2/6] Issue 4418 - ldif2db - offline. Warn the user of skipped
entries
Bug Description: During an ldif2db import entries that do not
conform to various constraints will be skipped and not imported.
On completition of an import with skipped entries, the server
returns a success exit code and logs the skipped entry detail to
the error logs. The success exit code could lead the user to
believe that all entries were successfully imported.
Fix Description: If a skipped entry occurs during import, the
import will continue and a warning will be returned to the user.
CLI tools for offline import updated to handle warning code.
Test added to generate an incorrect ldif entry and perform an
import.
Fixes: #4418
Reviewed by: Firstyear, droideck (Thanks)
---
ldap/servers/slapd/slapi-private.h | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 31cb33472..e0092d571 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1476,6 +1476,16 @@ typedef enum task_warning_t{
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
+/* task warnings */
+typedef enum task_warning_t{
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
+} task_warning;
+
+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
--
2.26.2

View file

@ -1,34 +0,0 @@
From 22fb8b2690a5fa364d252846f06b77b5fec8c602 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 7 Jan 2021 10:27:43 -0500
Subject: [PATCH 3/6] Fix cherry-pick erorr
---
ldap/servers/slapd/slapi-private.h | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index e0092d571..d5abe8ac1 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1476,17 +1476,6 @@ typedef enum task_warning_t{
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
-/* task warnings */
-typedef enum task_warning_t{
- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
- WARN_UPGRADE_DN_FORMAT = (1 << 1),
- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
-} task_warning;
-
-int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
-void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
-
int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
#ifdef __cplusplus
--
2.26.2

View file

@ -1,393 +0,0 @@
From 43f8a317bcd9040874b27cad905347a9e6bc8a6f Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Wed, 9 Dec 2020 22:42:59 +0000
Subject: [PATCH 4/6] Issue 4419 - Warn users of skipped entries during ldif2db
online import (#4476)
Bug Description: During an online ldif2db import entries that do not
conform to various constraints will be skipped and
not imported. On completition of an import with skipped
entries, the server responds with a success message
and logs the skipped entry detail to the error logs.
The success messgae could lead the user to believe
that all entries were successfully imported.
Fix Description: If a skipped entry occurs during import, the import
will continue and a warning message will be displayed.
The schema is extended with a nsTaskWarning attribute
which is used to capture and retrieve any task
warnings.
CLI tools for online import updated.
Test added to generate an incorrect ldif entry and perform an
online import.
Fixes: https://github.com/389ds/389-ds-base/issues/4419
Reviewed by: tbordaz, mreynolds389, droideck, Firstyear (Thanks)
---
.../tests/suites/import/import_test.py | 39 +++++++++++++++++--
ldap/schema/02common.ldif | 3 +-
.../back-ldbm/db-bdb/bdb_import_threads.c | 5 +++
ldap/servers/slapd/slap.h | 1 +
ldap/servers/slapd/slapi-plugin.h | 11 ++++++
ldap/servers/slapd/slapi-private.h | 8 ----
ldap/servers/slapd/task.c | 29 +++++++++++++-
src/lib389/lib389/cli_conf/backend.py | 6 ++-
src/lib389/lib389/tasks.py | 23 +++++++++--
9 files changed, 108 insertions(+), 17 deletions(-)
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
index b47db96ed..77c915026 100644
--- a/dirsrvtests/tests/suites/import/import_test.py
+++ b/dirsrvtests/tests/suites/import/import_test.py
@@ -65,6 +65,9 @@ def _import_clean(request, topo):
import_ldif = ldif_dir + '/basic_import.ldif'
if os.path.exists(import_ldif):
os.remove(import_ldif)
+ syntax_err_ldif = ldif_dir + '/syntax_err.dif'
+ if os.path.exists(syntax_err_ldif):
+ os.remove(syntax_err_ldif)
request.addfinalizer(finofaci)
@@ -141,17 +144,19 @@ def _create_bogus_ldif(topo):
def _create_syntax_err_ldif(topo):
"""
- Create an incorrect ldif entry that violates syntax check
+ Create an ldif file, which contains an entry that violates syntax check
"""
ldif_dir = topo.standalone.get_ldif_dir()
line1 = """dn: dc=example,dc=com
objectClass: top
objectClass: domain
dc: example
+
dn: ou=groups,dc=example,dc=com
objectClass: top
objectClass: organizationalUnit
ou: groups
+
dn: uid=JHunt,ou=groups,dc=example,dc=com
objectClass: top
objectClass: person
@@ -201,6 +206,34 @@ def test_import_with_index(topo, _import_clean):
assert f'{place}/userRoot/roomNumber.db' in glob.glob(f'{place}/userRoot/*.db', recursive=True)
+def test_online_import_with_warning(topo, _import_clean):
+ """
+ Import an ldif file with syntax errors, verify skipped entry warning code
+
+ :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8
+ :setup: Standalone Instance
+ :steps:
+ 1. Create standalone Instance
+ 2. Create an ldif file with an entry that violates syntax check (empty givenname)
+ 3. Online import of troublesome ldif file
+ :expected results:
+ 1. Successful import with skipped entry warning
+ """
+ topo.standalone.restart()
+
+ import_task = ImportTask(topo.standalone)
+ import_ldif1 = _create_syntax_err_ldif(topo)
+
+ # Importing the offending ldif file - online
+ import_task.import_suffix_from_ldif(ldiffile=import_ldif1, suffix=DEFAULT_SUFFIX)
+
+ # There is just a single entry in this ldif
+ import_task.wait(5)
+
+ # Check for the task nsTaskWarning attr, make sure its set to skipped entry code
+ assert import_task.present('nstaskwarning')
+ assert TaskWarning.WARN_SKIPPED_IMPORT_ENTRY == import_task.get_task_warn()
+
def test_crash_on_ldif2db(topo, _import_clean):
"""
Delete the cn=monitor entry for an LDBM backend instance. Doing this will
@@ -246,7 +279,7 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl
topo.standalone.start()
-def test_ldif2db_syntax_check(topo):
+def test_ldif2db_syntax_check(topo, _import_clean):
"""ldif2db should return a warning when a skipped entry has occured.
:id: 85e75670-42c5-4062-9edc-7f117c97a06f
:setup:
@@ -261,7 +294,7 @@ def test_ldif2db_syntax_check(topo):
import_ldif1 = _create_syntax_err_ldif(topo)
# Import the offending LDIF data - offline
topo.standalone.stop()
- ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1)
+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1, None)
assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY
topo.standalone.start()
diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
index c6dc074db..821640d03 100644
--- a/ldap/schema/02common.ldif
+++ b/ldap/schema/02common.ldif
@@ -145,6 +145,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2356 NAME 'nsTaskExitCode' DESC 'Slapi T
attributeTypes: ( 2.16.840.1.113730.3.1.2357 NAME 'nsTaskCurrentItem' DESC 'Slapi Task item' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2358 NAME 'nsTaskTotalItems' DESC 'Slapi Task total items' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2359 NAME 'nsTaskCreated' DESC 'Slapi Task creation date' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2375 NAME 'nsTaskWarning' DESC 'Slapi Task warning code' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
#
# objectclasses:
#
@@ -177,5 +178,5 @@ objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement
objectClasses: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated ) X-ORIGIN '389 Directory Server' )
+objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated $ nsTaskWarning ) X-ORIGIN '389 Directory Server' )
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
index 310893884..5c7d9c8f7 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
@@ -747,6 +747,11 @@ import_producer(void *param)
}
}
+ /* capture skipped entry warnings for this task */
+ if((job) && (job->skipped)) {
+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY);
+ }
+
slapi_value_free(&(job->usn_value));
import_free_ldif(&c);
info->state = FINISHED;
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 53c9161d1..be4d38739 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1753,6 +1753,7 @@ typedef struct slapi_task
int task_progress; /* number between 0 and task_work */
int task_work; /* "units" of work to be done */
int task_flags; /* (see above) */
+ task_warning task_warn; /* task warning */
char *task_status; /* transient status info */
char *task_log; /* appended warnings, etc */
char task_date[SLAPI_TIMESTAMP_BUFSIZE]; /* Date/time when task was created */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 96313ef2c..ddb11bc7c 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6638,6 +6638,15 @@ int slapi_config_remove_callback(int operation, int flags, const char *base, int
/* task flags (set by the task-control code) */
#define SLAPI_TASK_DESTROYING 0x01 /* queued event for destruction */
+/* task warnings */
+typedef enum task_warning_t{
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
+} task_warning;
+
+
int slapi_task_register_handler(const char *name, dseCallbackFn func);
int slapi_plugin_task_register_handler(const char *name, dseCallbackFn func, Slapi_PBlock *plugin_pb);
int slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func);
@@ -6654,6 +6663,8 @@ int slapi_task_get_refcount(Slapi_Task *task);
void slapi_task_set_destructor_fn(Slapi_Task *task, TaskCallbackFn func);
void slapi_task_set_cancel_fn(Slapi_Task *task, TaskCallbackFn func);
void slapi_task_status_changed(Slapi_Task *task);
+void slapi_task_set_warning(Slapi_Task *task, task_warning warn);
+int slapi_task_get_warning(Slapi_Task *task);
void slapi_task_log_status(Slapi_Task *task, char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 2, 3)));
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index d5abe8ac1..b956ebe63 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1465,14 +1465,6 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes);
void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag);
void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text);
-/* task warnings */
-typedef enum task_warning_t{
- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
- WARN_UPGRADE_DN_FORMAT = (1 << 1),
- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
-} task_warning;
-
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
index 936c64920..806077a16 100644
--- a/ldap/servers/slapd/task.c
+++ b/ldap/servers/slapd/task.c
@@ -46,6 +46,7 @@ static uint64_t shutting_down = 0;
#define TASK_PROGRESS_NAME "nsTaskCurrentItem"
#define TASK_WORK_NAME "nsTaskTotalItems"
#define TASK_DATE_NAME "nsTaskCreated"
+#define TASK_WARNING_NAME "nsTaskWarning"
#define DEFAULT_TTL "3600" /* seconds */
#define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */
@@ -332,7 +333,7 @@ slapi_task_status_changed(Slapi_Task *task)
LDAPMod modlist[20];
LDAPMod *mod[20];
int cur = 0, i;
- char s1[20], s2[20], s3[20];
+ char s1[20], s2[20], s3[20], s4[20];
if (shutting_down) {
/* don't care about task status updates anymore */
@@ -346,9 +347,11 @@ slapi_task_status_changed(Slapi_Task *task)
sprintf(s1, "%d", task->task_exitcode);
sprintf(s2, "%d", task->task_progress);
sprintf(s3, "%d", task->task_work);
+ sprintf(s4, "%d", task->task_warn);
NEXTMOD(TASK_PROGRESS_NAME, s2);
NEXTMOD(TASK_WORK_NAME, s3);
NEXTMOD(TASK_DATE_NAME, task->task_date);
+ NEXTMOD(TASK_WARNING_NAME, s4);
/* only add the exit code when the job is done */
if ((task->task_state == SLAPI_TASK_FINISHED) ||
(task->task_state == SLAPI_TASK_CANCELLED)) {
@@ -452,6 +455,30 @@ slapi_task_get_refcount(Slapi_Task *task)
return 0; /* return value not currently used */
}
+/*
+ * Return task warning
+ */
+int
+slapi_task_get_warning(Slapi_Task *task)
+{
+ if (task) {
+ return task->task_warn;
+ }
+
+ return 0; /* return value not currently used */
+}
+
+/*
+ * Set task warning
+ */
+void
+slapi_task_set_warning(Slapi_Task *task, task_warning warn)
+{
+ if (task) {
+ return task->task_warn |= warn;
+ }
+}
+
int
slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func)
{
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
index d7a6e670c..6bfbcb036 100644
--- a/src/lib389/lib389/cli_conf/backend.py
+++ b/src/lib389/lib389/cli_conf/backend.py
@@ -243,9 +243,13 @@ def backend_import(inst, basedn, log, args):
exclude_suffixes=args.exclude_suffixes)
task.wait(timeout=None)
result = task.get_exit_code()
+ warning = task.get_task_warn()
if task.is_complete() and result == 0:
- log.info("The import task has finished successfully")
+ if warning is None or (warning == 0):
+ log.info("The import task has finished successfully")
+ else:
+ log.info("The import task has finished successfully, with warning code {}, check the logs for more detail".format(warning))
else:
raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index dc7bb9206..bf20d1e61 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -38,6 +38,7 @@ class Task(DSLdapObject):
self._protected = False
self._exit_code = None
self._task_log = ""
+ self._task_warn = None
def status(self):
"""Return the decoded status of the task
@@ -49,6 +50,7 @@ class Task(DSLdapObject):
self._exit_code = self.get_attr_val_utf8("nsTaskExitCode")
self._task_log = self.get_attr_val_utf8("nsTaskLog")
+ self._task_warn = self.get_attr_val_utf8("nsTaskWarning")
if not self.exists():
self._log.debug("complete: task has self cleaned ...")
# The task cleaned it self up.
@@ -77,6 +79,15 @@ class Task(DSLdapObject):
return None
return None
+ def get_task_warn(self):
+ """Return task's warning code if task is complete, else None."""
+ if self.is_complete():
+ try:
+ return int(self._task_warn)
+ except TypeError:
+ return None
+ return None
+
def wait(self, timeout=120):
"""Wait until task is complete."""
@@ -390,14 +401,17 @@ class Tasks(object):
running, true if done - if true, second is the exit code - if dowait
is True, this function will block until the task is complete'''
attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
- 'nsTaskCurrentItem', 'nsTaskTotalItems']
+ 'nsTaskCurrentItem', 'nsTaskTotalItems', 'nsTaskWarning']
done = False
exitCode = 0
+ warningCode = 0
dn = entry.dn
while not done:
entry = self.conn.getEntry(dn, attrlist=attrlist)
self.log.debug("task entry %r", entry)
+ if entry.nsTaskWarning:
+ warningCode = int(entry.nsTaskWarning)
if entry.nsTaskExitCode:
exitCode = int(entry.nsTaskExitCode)
done = True
@@ -405,7 +419,7 @@ class Tasks(object):
time.sleep(1)
else:
break
- return (done, exitCode)
+ return (done, exitCode, warningCode)
def importLDIF(self, suffix=None, benamebase=None, input_file=None,
args=None):
@@ -461,8 +475,9 @@ class Tasks(object):
self.conn.add_s(entry)
exitCode = 0
+ warningCode = 0
if args and args.get(TASK_WAIT, False):
- (done, exitCode) = self.conn.tasks.checkTask(entry, True)
+ (done, exitCode, warningCode) = self.conn.tasks.checkTask(entry, True)
if exitCode:
self.log.error("Error: import task %s for file %s exited with %d",
@@ -470,6 +485,8 @@ class Tasks(object):
else:
self.log.info("Import task %s for file %s completed successfully",
cn, input_file)
+ if warningCode:
+ self.log.info("with warning code %d", warningCode)
self.dn = dn
self.entry = entry
return exitCode
--
2.26.2

View file

@ -1,149 +0,0 @@
From 61d82ef842e0e4e013937bf05d7f640be2d2fc09 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 16 Dec 2020 16:30:28 +0100
Subject: [PATCH 5/6] Issue 4480 - Unexpected info returned to ldap request
(#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31
---
dirsrvtests/tests/suites/basic/basic_test.py | 112 +++++++++++++++++++
1 file changed, 112 insertions(+)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 1ae82dcdd..02b73ee85 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -1400,6 +1400,118 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance):
assert not dscreate_long_instance.exists()
+@pytest.fixture(scope="module", params=('c=uk', 'cn=test_user', 'dc=example,dc=com', 'o=south', 'ou=sales', 'wrong=some_value'))
+def dscreate_test_rdn_value(request):
+ template_file = "/tmp/dssetup.inf"
+ template_text = f"""[general]
+config_version = 2
+# This invalid hostname ...
+full_machine_name = localhost.localdomain
+# Means we absolutely require this.
+strict_host_checking = False
+# In tests, we can be run in containers, NEVER trust
+# that systemd is there, or functional in any capacity
+systemd = False
+
+[slapd]
+instance_name = test_different_rdn
+root_dn = cn=directory manager
+root_password = someLongPassword_123
+# We do not have access to high ports in containers,
+# so default to something higher.
+port = 38999
+secure_port = 63699
+
+[backend-userroot]
+create_suffix_entry = True
+suffix = {request.param}
+"""
+
+ with open(template_file, "w") as template_fd:
+ template_fd.write(template_text)
+
+ # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389
+ tmp_env = os.environ
+ if "PYTHONPATH" in tmp_env:
+ del tmp_env["PYTHONPATH"]
+
+ def fin():
+ os.remove(template_file)
+ if request.param != "wrong=some_value":
+ try:
+ subprocess.check_call(['dsctl', 'test_different_rdn', 'remove', '--do-it'])
+ except subprocess.CalledProcessError as e:
+ log.fatal(f"Failed to remove test instance Error ({e.returncode}) {e.output}")
+ else:
+ log.info("Wrong RDN is passed, instance not created")
+ request.addfinalizer(fin)
+ return template_file, tmp_env, request.param,
+
+
+@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'),
+ reason="This test is only required with new admin cli, and requires root.")
+@pytest.mark.bz1807419
+@pytest.mark.ds50928
+def test_dscreate_with_different_rdn(dscreate_test_rdn_value):
+ """Test that dscreate works with different RDN attributes as suffix
+
+ :id: 77ed6300-6a2f-4e79-a862-1f1105f1e3ef
+ :parametrized: yes
+ :setup: None
+ :steps:
+ 1. Create template file for dscreate with different RDN attributes as suffix
+ 2. Create instance using template file
+ 3. Create instance with 'wrong=some_value' as suffix's RDN attribute
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should fail
+ """
+ try:
+ subprocess.check_call([
+ 'dscreate',
+ 'from-file',
+ dscreate_test_rdn_value[0]
+ ], env=dscreate_test_rdn_value[1])
+ except subprocess.CalledProcessError as e:
+ log.fatal(f"dscreate failed! Error ({e.returncode}) {e.output}")
+ if dscreate_test_rdn_value[2] != "wrong=some_value":
+ assert False
+ else:
+ assert True
+
+def test_bind_invalid_entry(topology_st):
+ """Test the failing bind does not return information about the entry
+
+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
+
+ :setup: Standalone instance
+
+ :steps:
+ 1: bind as non existing entry
+ 2: check that bind info does not report 'No such entry'
+
+ :expectedresults:
+ 1: pass
+ 2: pass
+ """
+
+ topology_st.standalone.restart()
+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
+ try:
+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
+ except ldap.LDAPError as e:
+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
+ log.info('exception description: ' + e.args[0]['desc'])
+ if 'info' in e.args[0]:
+ log.info('exception info: ' + e.args[0]['info'])
+ assert e.args[0]['desc'] == 'Invalid credentials'
+ assert 'info' not in e.args[0]
+ pass
+
+ log.info('test_bind_invalid_entry: PASSED')
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
--
2.26.2

View file

@ -1,99 +0,0 @@
From 3c74f736c657d007770fe866842b08d0a74772ca Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 9 Dec 2020 15:21:11 -0500
Subject: [PATCH 6/6] Issue 4414 - disk monitoring - prevent division by zero
crash
Bug Description: If a disk mount has zero total space or zero used
space then a division by zero can occur and the
server will crash.
It has also been observed that sometimes a system
can return the wrong disk entirely, and when that
happens the incorrect disk also has zero available
space which triggers the disk monitioring thread to
immediately shut the server down.
Fix Description: Check the total and used space for zero and do not
divide, just ignore it. As a preemptive measure
ignore disks from /dev, /proc, /sys (except /dev/shm).
Yes it's a bit hacky, but the true underlying cause
is not known yet. So better to be safe than sorry.
Relates: https://github.com/389ds/389-ds-base/issues/4414
Reviewed by: firstyear(Thanks!)
---
ldap/servers/slapd/daemon.c | 22 +++++++++++++++++++++-
ldap/servers/slapd/monitor.c | 13 +++++--------
2 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 691f77570..bfd965263 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -221,7 +221,27 @@ disk_mon_get_mount_point(char *dir)
}
if (s.st_dev == dev_id) {
endmntent(fp);
- return (slapi_ch_strdup(mnt->mnt_dir));
+
+ if ((strncmp(mnt->mnt_dir, "/dev", 4) == 0 && strncmp(mnt->mnt_dir, "/dev/shm", 8) != 0) ||
+ strncmp(mnt->mnt_dir, "/proc", 4) == 0 ||
+ strncmp(mnt->mnt_dir, "/sys", 4) == 0)
+ {
+ /*
+ * Ignore "mount directories" starting with /dev (except
+ * /dev/shm), /proc, /sys For some reason these mounts are
+ * occasionally/incorrectly returned. Only seen this at a
+ * customer site once. When it happens it causes disk
+ * monitoring to think the server has 0 disk space left, and
+ * it abruptly/unexpectedly shuts the server down. At this
+ * point it looks like a bug in stat(), setmntent(), or
+ * getmntent(), but there is no way to prove that since there
+ * is no way to reproduce the original issue. For now just
+ * return NULL to be safe.
+ */
+ return NULL;
+ } else {
+ return (slapi_ch_strdup(mnt->mnt_dir));
+ }
}
}
endmntent(fp);
diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c
index 562721bed..65f082986 100644
--- a/ldap/servers/slapd/monitor.c
+++ b/ldap/servers/slapd/monitor.c
@@ -131,7 +131,6 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)),
{
int32_t rc = LDAP_SUCCESS;
char **dirs = NULL;
- char buf[BUFSIZ];
struct berval val;
struct berval *vals[2];
uint64_t total_space;
@@ -143,15 +142,13 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)),
disk_mon_get_dirs(&dirs);
- for (uint16_t i = 0; dirs && dirs[i]; i++) {
+ for (size_t i = 0; dirs && dirs[i]; i++) {
+ char buf[BUFSIZ] = {0};
rc = disk_get_info(dirs[i], &total_space, &avail_space, &used_space);
- if (rc) {
- slapi_log_err(SLAPI_LOG_WARNING, "monitor_disk_info",
- "Unable to get 'cn=disk space,cn=monitor' stats for %s\n", dirs[i]);
- } else {
+ if (rc == 0 && total_space > 0 && used_space > 0) {
val.bv_len = snprintf(buf, sizeof(buf),
- "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"",
- dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space);
+ "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"",
+ dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space);
val.bv_val = buf;
attrlist_merge(&e->e_attrs, "dsDisk", vals);
}
--
2.26.2

View file

@ -1,132 +0,0 @@
From 48b30739f33d1eb526dbdd45c820129c4a4c4bcb Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 12 Jan 2021 11:06:24 +0100
Subject: [PATCH] Issue 4504 - Insure ldapi is enabled in repl_monitor_test.py
(Needed on RHEL) (#4527)
(cherry picked from commit 279556bc78ed743d7a053069621d999ec045866f)
---
.../tests/suites/clu/repl_monitor_test.py | 67 +++++++++----------
1 file changed, 31 insertions(+), 36 deletions(-)
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
index eb18d2da2..b2cb840b3 100644
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -9,7 +9,6 @@
import time
import subprocess
import pytest
-import re
from lib389.cli_conf.replication import get_repl_monitor_info
from lib389.tasks import *
@@ -18,6 +17,8 @@ from lib389.topologies import topology_m2
from lib389.cli_base import FakeArgs
from lib389.cli_base.dsrc import dsrc_arg_concat
from lib389.cli_base import connect_instance
+from lib389.replica import Replicas
+
pytestmark = pytest.mark.tier0
@@ -68,25 +69,6 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
log.info('Reset log file')
f.truncate(0)
-def get_hostnames_from_log(port1, port2):
- # Get the supplier host names as displayed in replication monitor output
- with open(LOG_FILE, 'r') as logfile:
- logtext = logfile.read()
- # search for Supplier :hostname:port
- # and use \D to insure there is no more number is after
- # the matched port (i.e that 10 is not matching 101)
- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
- match=re.search(regexp, logtext)
- host_m1 = 'localhost.localdomain'
- if (match is not None):
- host_m1 = match.group(2)
- # Same for master 2
- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
- match=re.search(regexp, logtext)
- host_m2 = 'localhost.localdomain'
- if (match is not None):
- host_m2 = match.group(2)
- return (host_m1, host_m2)
@pytest.mark.ds50545
@pytest.mark.bz1739718
@@ -115,6 +97,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
m1 = topology_m2.ms["master1"]
m2 = topology_m2.ms["master2"]
+ # Enable ldapi if not already done.
+ for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]:
+ if not inst.can_autobind():
+ # Update ns-slapd instance
+ inst.config.set('nsslapd-ldapilisten', 'on')
+ inst.config.set('nsslapd-ldapiautobind', 'on')
+ inst.restart()
+ # Ensure that updates have been sent both ways.
+ replicas = Replicas(m1)
+ replica = replicas.get(DEFAULT_SUFFIX)
+ replica.test_replication([m2])
+ replicas = Replicas(m2)
+ replica = replicas.get(DEFAULT_SUFFIX)
+ replica.test_replication([m1])
+
+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
+
connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
content_list = ['Replica Root: dc=example,dc=com',
'Replica ID: 1',
@@ -177,9 +177,20 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
'001',
m1.host + ':' + str(m1.port)]
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + m2.host + ':' + str(m2.port)
+
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
+ aliases = ['M1=' + m1.host + ':' + str(m1.port),
+ 'M2=' + m2.host + ':' + str(m2.port)]
+
args = FakeArgs()
args.connections = connections
args.aliases = None
@@ -187,24 +198,8 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
log.info('Run replication monitor with connections option')
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
- (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
- # Prepare the data for next tests
- aliases = ['M1=' + host_m1 + ':' + str(m1.port),
- 'M2=' + host_m2 + ':' + str(m2.port)]
-
- alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
- 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
-
- dsrc_content = '[repl-monitor-connections]\n' \
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- '\n' \
- '[repl-monitor-aliases]\n' \
- 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
- 'M2 = ' + host_m2 + ':' + str(m2.port)
-
log.info('Run replication monitor with aliases option')
args.aliases = aliases
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
--
2.26.2

View file

@ -1,51 +0,0 @@
From f84e75de9176218d3b47a447d07fe8fb7ca3d72f Mon Sep 17 00:00:00 2001
From: Barbora Simonova <bsmejkal@redhat.com>
Date: Mon, 11 Jan 2021 15:51:24 +0100
Subject: [PATCH] Issue 4315 - performance search rate: nagle triggers high
rate of setsocketopt
Description:
The config value of nsslapd-nagle is now set to 'off' by default.
Added a test case, that checks the value.
Relates: https://github.com/389ds/389-ds-base/issues/4315
Reviewed by: droideck (Thanks!)
---
.../tests/suites/config/config_test.py | 20 +++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index 38d1ed9ac..fda16a530 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -41,6 +41,26 @@ def big_file():
return TEMP_BIG_FILE
+@pytest.mark.bz1897248
+@pytest.mark.ds4315
+@pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher")
+def test_nagle_default_value(topo):
+ """Test that nsslapd-nagle attribute is off by default
+
+ :id: 00361f5d-d638-4d39-8231-66fa52637203
+ :setup: Standalone instance
+ :steps:
+ 1. Create instance
+ 2. Check the value of nsslapd-nagle
+ :expectedresults:
+ 1. Success
+ 2. The value of nsslapd-nagle should be off
+ """
+
+ log.info('Check the value of nsslapd-nagle attribute is off by default')
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off'
+
+
def test_maxbersize_repl(topology_m2, big_file):
"""maxbersize is ignored in the replicated operations.
--
2.26.2

View file

@ -1,98 +0,0 @@
From 00ccec335792e3fa44712427463c64eb1ff9c5be Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Tue, 12 Jan 2021 17:45:41 +0100
Subject: [PATCH] Issue 4504 - insure that repl_monitor_test use ldapi (for
RHEL) - fix merge issue (#4533)
(cherry picked from commit a880fddc192414d6283ea6832491b7349e5471dc)
---
.../tests/suites/clu/repl_monitor_test.py | 47 ++++++++++++++-----
1 file changed, 36 insertions(+), 11 deletions(-)
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
index b2cb840b3..caf6a9099 100644
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -9,6 +9,7 @@
import time
import subprocess
import pytest
+import re
from lib389.cli_conf.replication import get_repl_monitor_info
from lib389.tasks import *
@@ -69,6 +70,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
log.info('Reset log file')
f.truncate(0)
+def get_hostnames_from_log(port1, port2):
+ # Get the supplier host names as displayed in replication monitor output
+ with open(LOG_FILE, 'r') as logfile:
+ logtext = logfile.read()
+ # search for Supplier :hostname:port
+ # and use \D to insure there is no more number is after
+ # the matched port (i.e that 10 is not matching 101)
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m1 = 'localhost.localdomain'
+ if (match is not None):
+ host_m1 = match.group(2)
+ # Same for master 2
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m2 = 'localhost.localdomain'
+ if (match is not None):
+ host_m2 = match.group(2)
+ return (host_m1, host_m2)
@pytest.mark.ds50545
@pytest.mark.bz1739718
@@ -177,20 +197,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
'001',
m1.host + ':' + str(m1.port)]
- dsrc_content = '[repl-monitor-connections]\n' \
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- '\n' \
- '[repl-monitor-aliases]\n' \
- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
- 'M2 = ' + m2.host + ':' + str(m2.port)
-
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
- aliases = ['M1=' + m1.host + ':' + str(m1.port),
- 'M2=' + m2.host + ':' + str(m2.port)]
-
args = FakeArgs()
args.connections = connections
args.aliases = None
@@ -198,8 +207,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
log.info('Run replication monitor with connections option')
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+ # Prepare the data for next tests
+ aliases = ['M1=' + host_m1 + ':' + str(m1.port),
+ 'M2=' + host_m2 + ':' + str(m2.port)]
+
+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
+
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + host_m2 + ':' + str(m2.port)
+
log.info('Run replication monitor with aliases option')
args.aliases = aliases
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
--
2.26.2

View file

@ -1,70 +0,0 @@
From 2afc65fd1750afcb1667545da5625f5a932aacdd Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Wed, 13 Jan 2021 15:16:08 +0100
Subject: [PATCH] Issue 4528 - Fix cn=monitor SCOPE_ONE search (#4529)
Bug Description: While doing a ldapsearch on "cn=monitor" is
throwing err=32 with -s one.
Fix Description: 'cn=monitor' is not a real entry so we should not
trying to check if the searched suffix (cm=monitor or its children)
belongs to the searched backend.
Fixes: #4528
Reviewed by: @mreynolds389 @Firstyear @tbordaz (Thanks!)
---
ldap/servers/slapd/opshared.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index c0bc5dcd0..f5ed71144 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -240,6 +240,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
int rc = 0;
int internal_op;
Slapi_DN *basesdn = NULL;
+ Slapi_DN monitorsdn = {0};
Slapi_DN *sdn = NULL;
Slapi_Operation *operation = NULL;
Slapi_Entry *referral = NULL;
@@ -765,9 +766,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
}
} else {
/* be_suffix null means that we are searching the default backend
- * -> don't change the search parameters in pblock
- */
- if (be_suffix != NULL) {
+ * -> don't change the search parameters in pblock
+ * Also, we skip this block for 'cn=monitor' search and its subsearches
+ * as they are done by callbacks from monitor.c */
+ slapi_sdn_init_dn_byref(&monitorsdn, "cn=monitor");
+ if (!((be_suffix == NULL) || slapi_sdn_issuffix(basesdn, &monitorsdn))) {
if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL)) {
/* one level searches
* - depending on the suffix of the backend we might have to
@@ -789,8 +792,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
} else if (slapi_sdn_issuffix(basesdn, be_suffix)) {
int tmp_scope = LDAP_SCOPE_ONELEVEL;
slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
- } else
+ } else {
+ slapi_sdn_done(&monitorsdn);
goto next_be;
+ }
}
/* subtree searches :
@@ -811,7 +816,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
}
}
}
-
+ slapi_sdn_done(&monitorsdn);
slapi_pblock_set(pb, SLAPI_BACKEND, be);
slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database);
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL);
--
2.26.2

View file

@ -44,8 +44,8 @@ ExcludeArch: i686
Summary: 389 Directory Server (base) Summary: 389 Directory Server (base)
Name: 389-ds-base Name: 389-ds-base
Version: 1.4.3.16 Version: 1.4.3.23
Release: %{?relprefix}8%{?prerel}%{?dist} Release: %{?relprefix}2%{?prerel}%{?dist}
License: GPLv3+ License: GPLv3+
URL: https://www.port389.org URL: https://www.port389.org
Group: System Environment/Daemons Group: System Environment/Daemons
@ -54,6 +54,62 @@ Conflicts: freeipa-server < 4.0.3
Obsoletes: %{name} <= 1.4.0.9 Obsoletes: %{name} <= 1.4.0.9
Provides: ldif2ldbm >= 0 Provides: ldif2ldbm >= 0
##### Bundled cargo crates list - START #####
Provides: bundled(crate(ansi_term)) = 0.11.0
Provides: bundled(crate(atty)) = 0.2.14
Provides: bundled(crate(autocfg)) = 1.0.1
Provides: bundled(crate(base64)) = 0.10.1
Provides: bundled(crate(bitflags)) = 1.2.1
Provides: bundled(crate(byteorder)) = 1.4.2
Provides: bundled(crate(cbindgen)) = 0.9.1
Provides: bundled(crate(cc)) = 1.0.66
Provides: bundled(crate(cfg-if)) = 0.1.10
Provides: bundled(crate(cfg-if)) = 1.0.0
Provides: bundled(crate(clap)) = 2.33.3
Provides: bundled(crate(fernet)) = 0.1.3
Provides: bundled(crate(foreign-types)) = 0.3.2
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
Provides: bundled(crate(getrandom)) = 0.1.16
Provides: bundled(crate(hermit-abi)) = 0.1.17
Provides: bundled(crate(itoa)) = 0.4.7
Provides: bundled(crate(lazy_static)) = 1.4.0
Provides: bundled(crate(libc)) = 0.2.82
Provides: bundled(crate(librnsslapd)) = 0.1.0
Provides: bundled(crate(librslapd)) = 0.1.0
Provides: bundled(crate(log)) = 0.4.11
Provides: bundled(crate(openssl)) = 0.10.32
Provides: bundled(crate(openssl-sys)) = 0.9.60
Provides: bundled(crate(pkg-config)) = 0.3.19
Provides: bundled(crate(ppv-lite86)) = 0.2.10
Provides: bundled(crate(proc-macro2)) = 1.0.24
Provides: bundled(crate(quote)) = 1.0.8
Provides: bundled(crate(rand)) = 0.7.3
Provides: bundled(crate(rand_chacha)) = 0.2.2
Provides: bundled(crate(rand_core)) = 0.5.1
Provides: bundled(crate(rand_hc)) = 0.2.0
Provides: bundled(crate(redox_syscall)) = 0.1.57
Provides: bundled(crate(remove_dir_all)) = 0.5.3
Provides: bundled(crate(rsds)) = 0.1.0
Provides: bundled(crate(ryu)) = 1.0.5
Provides: bundled(crate(serde)) = 1.0.118
Provides: bundled(crate(serde_derive)) = 1.0.118
Provides: bundled(crate(serde_json)) = 1.0.61
Provides: bundled(crate(slapd)) = 0.1.0
Provides: bundled(crate(strsim)) = 0.8.0
Provides: bundled(crate(syn)) = 1.0.58
Provides: bundled(crate(tempfile)) = 3.1.0
Provides: bundled(crate(textwrap)) = 0.11.0
Provides: bundled(crate(toml)) = 0.5.8
Provides: bundled(crate(unicode-width)) = 0.1.8
Provides: bundled(crate(unicode-xid)) = 0.2.1
Provides: bundled(crate(vcpkg)) = 0.2.11
Provides: bundled(crate(vec_map)) = 0.8.2
Provides: bundled(crate(wasi)) = 0.9.0+wasi_snapshot_preview1
Provides: bundled(crate(winapi)) = 0.3.9
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
##### Bundled cargo crates list - END #####
BuildRequires: nspr-devel BuildRequires: nspr-devel
BuildRequires: nss-devel >= 3.34 BuildRequires: nss-devel >= 3.34
BuildRequires: perl-generators BuildRequires: perl-generators
@ -174,37 +230,22 @@ Source2: %{name}-devel.README
%if %{bundle_jemalloc} %if %{bundle_jemalloc}
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2 Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
%endif %endif
Patch01: 0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch %if %{use_rust}
Patch02: 0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch Source4: vendor-%{version}-2.tar.gz
Patch03: 0003-do-not-add-referrals-for-masters-with-different-data.patch Source5: Cargo.lock
Patch04: 0004-Ticket-50933-Update-2307compat.ldif.patch %endif
Patch05: 0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch Patch01: 0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch
Patch06: 0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch Patch02: 0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch
Patch07: 0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch Patch03: 0003-Ticket-137-Implement-EntryUUID-plugin.patch
Patch08: 0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch Patch04: 0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch
Patch09: 0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch Patch05: 0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch
Patch10: 0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch Patch06: 0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch
Patch11: 0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch Patch07: 0007-Ticket-51175-resolve-plugin-name-leaking.patch
Patch12: 0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch Patch08: 0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch
Patch13: 0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch Patch09: 0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch
Patch14: 0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch Patch10: 0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
Patch15: 0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch Patch11: 0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch
Patch16: 0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch Patch12: 0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch
Patch17: 0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch
Patch18: 0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch
Patch19: 0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch
Patch20: 0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch
Patch21: 0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch
Patch22: 0022-Fix-cherry-pick-erorr.patch
Patch23: 0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch
Patch24: 0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch
Patch25: 0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch
Patch26: 0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch
Patch27: 0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch
Patch28: 0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch
Patch29: 0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch
Patch30: 0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch
Patch31: 0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch
%description %description
389 Directory Server is an LDAPv3 compliant server. The base package includes 389 Directory Server is an LDAPv3 compliant server. The base package includes
@ -331,6 +372,10 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server
%prep %prep
%autosetup -p1 -v -n %{name}-%{version}%{?prerel} %autosetup -p1 -v -n %{name}-%{version}%{?prerel}
%if %{use_rust}
tar xvzf %{SOURCE4}
cp %{SOURCE5} src/
%endif
%if %{bundle_jemalloc} %if %{bundle_jemalloc}
%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3 %setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3
%endif %endif
@ -348,7 +393,7 @@ ASAN_FLAGS="--enable-asan --enable-debug"
%endif %endif
%if %{use_rust} %if %{use_rust}
RUST_FLAGS="--enable-rust" RUST_FLAGS="--enable-rust --enable-rust-offline"
%endif %endif
%if %{use_legacy} %if %{use_legacy}
@ -682,9 +727,6 @@ exit 0
%if %{bundle_jemalloc} %if %{bundle_jemalloc}
%{_libdir}/%{pkgname}/lib/libjemalloc.so.2 %{_libdir}/%{pkgname}/lib/libjemalloc.so.2
%endif %endif
%if %{use_rust}
%{_libdir}/%{pkgname}/librsds.so
%endif
%if %{use_legacy} %if %{use_legacy}
%files legacy-tools %files legacy-tools
@ -822,135 +864,23 @@ exit 0
%doc README.md %doc README.md
%changelog %changelog
* Wed Jan 13 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-8 * Sun May 30 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-2
- Bump version to 1.4.3.16-8 - Bump version to 1.4.3.23-2
- Resolves: Bug 1903539 - cn=monitor is throwing err=32 with scope: -s one - Resolves: Bug 1812286 - RFE - Monitor the current DB locks ( nsslapd-db-current-locks )
- Resolves: Bug 1893870 - PR_WaitCondVar() issue causes replication delay when clock jumps backwards - Resolves: Bug 1748441 - RFE - Schedule execution of "compactdb" at specific date/time
- Resolves: Bug 1938239 - RFE - Extend DNA plugin to support intervals sizes for subuids
* Thu Jan 7 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-7 * Fri May 14 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-1
- Bump version to 1.4.3.16-7 - Bump version to 1.4.3.23-1
- Resolves: Bug 1890118 - SIGFPE crash in rhds disk monitoring routine - Resolves: Bug 1947044 - Rebase 389 DS with 389-ds-base-1.4.3.23 for RHEL 8.5
- Resolves: Bug 1904991 - 389-ds:1.4/389-ds-base: information disclosure during the binding of a DN - Resolves: Bug 1850664 - RFE - Add an option for the Retro Changelog to ignore some attributes
- Resolves: Bug 1627645 - ldif2db does not change exit code when there are skipped entries - Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
- Resolves: Bug 1898541 - Changelog cache can upload updates from a wrong starting point (CSN)
- Resolves: Bug 1889562 - client psearch with multiple threads hangs if nsslapd-maxthreadsperconn is under sized
- Resolves: Bug 1924848 - Negative wtime on ldapcompare
- Resolves: Bug 1895460 - RFE - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value
- Resolves: Bug 1897614 - Performance search rate: change entry cache monitor to recursive pthread mutex
- Resolves: Bug 1939607 - hang because of incorrect accounting of readers in vattr rwlock
- Resolves: Bug 1626633 - [RFE] DS - Update the password policy to support a Temporary Password with expiration
- Resolves: Bug 1952804 - CVE-2021-3514 389-ds:1.4/389-ds-base: sync_repl NULL pointer dereference in sync_create_state_control()
* Wed Dec 16 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-6
- Bump version to 1.4.3.16-6
- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV - consumer (Unavailable) State (green) Reason (error (0)
- Resolves: Bug 1904991 - Unexpected info returned to ldap request
- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix
- Resolves: Bug 1903133 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname.
* Wed Dec 9 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-5
- Bump version to 1.4.3.16-5
- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV
- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested
- Resolves: Bug 1887415 - Sync repl - if a series of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie
* Thu Dec 3 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-4
- Bump version to 1.4.3.16-4
- Resolves: Bug 1843517 - Using ldifgen with --start-idx option fails with unsupported operand
- Resolves: Bug 1801086 - [RFE] Generate dsrc file using dsconf
- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix
* Wed Nov 25 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-3
- Bump version to 1.4.3.16-3
- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema
- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection
- Resolves: Bug 1898850 - Entries conflict not resolved by replication
* Thu Nov 19 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-2
- Bump version to 1.4.3.16-2
- Resolves: Bug 1859227 - create keep alive entry after on line init
- Resolves: Bug 1888863 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32
- Resolves: Bug 1859228 - do not add referrals for masters with different data generation
* Mon Oct 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-1
- Bump version to 1.4.3.16-1
- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1859225 - suffix management in backends incorrect
* Mon Oct 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.14-1
- Bump version to 1.4.3.14-1
- Resolves: Bug 1862529 - Rebase 389-ds-base-1.4.3 in RHEL 8.4
- Resolves: Bug 1859301 - Misleading message in access log for idle timeout
- Resolves: Bug 1889782 - Missing closing quote when reporting the details of unindexed/paged search results
- Resolves: Bug 1862971 - dsidm user status fails with Error: 'nsUserAccount' object has no attribute 'is_locked'
- Resolves: Bug 1859878 - Managed Entries configuration not being enforced
- Resolves: Bug 1851973 - Duplicate entryUSN numbers for different LDAP entries in the same backend
- Resolves: Bug 1851967 - if dbhome directory is set online backup fails
- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested
- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie
- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection
- Resolves: Bug 1872930 - dscreate: Not possible to bind to a unix domain socket
- Resolves: Bug 1861504 - ds-replcheck crashes in offline mode
- Resolves: Bug 1859282 - remove ldbm_back_entry_release
- Resolves: Bug 1859225 - suffix management in backends incorrect
- Resolves: Bug 1859224 - remove unused or unnecessary database plugin functions
- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema
- Resolves: Bug 1851975 - Add option to reject internal unindexed searches
- Resolves: Bug 1851972 - Remove code duplication from the BDB backend separation work
- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
- Resolves: Bug 1848359 - Add failover credentials to replication agreement
- Resolves: Bug 1837315 - Healthcheck code DSBLE0002 not returned on disabled suffix
* Wed Aug 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-5
- Bump version to 1.4.3.8-5
- Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
- Resolves: Bug 1748227 - Instance name length is not enforced
- Resolves: Bug 1849418 - python3-lib389 pulls unnecessary bash-completion package
* Fri Jun 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-4
- Bump version to 1.4.3.8-4
- Resolves: Bug 1806978 - ns-slapd crashes during db2ldif
- Resolves: Bug 1450863 - Log warning when tuning of nsslapd-threadnumber above or below the optimal value
- Resolves: Bug 1647017 - A distinguished value of a single valued attribute can be missing in an entry
- Resolves: Bug 1806573 - Dsctl healthcheck doesn't work when using instance name with 'slapd-'
- Resolves: Bug 1807773 - dsctl healthcheck : typo in DSREPLLE0002 Lint error suggested resolution commands
- Resolves: Bug 1843567 - Healthcheck to find notes=F
- Resolves: Bug 1845094 - User/Directory Manager can modify Password Policy attribute "pwdReset"
- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
- Resolves: Bug 1442386 - Recreating an index while changing case will create an indexfile with the old name (different case) and after restart the indexfile is abandoned
- Resolves: Bug 1672574 - nsIndexIDListScanLimit accepts any value
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
* Fri Jun 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-3
- Bump version to 1.4.3.8-3
- Resolves: Bug 1835619 - Healthcheck with --json option reports "Object of type 'bytes' is not JSON serializable" when mapping tree is deleted
- Resolves: Bug 1836428 - Directory Server ds-replcheck RFE to add a timeout command-line arg/value to wait longer when connecting to a replica server
- Resolves: Bug 1843090 - abort when a empty valueset is freed
- Resolves: Bug 1843156 - Prevent unnecessarily duplication of the target entry
- Resolves: Bug 1843157 - Check for clock errors and time skew
- Resolves: Bug 1843159 - RFE AD filter rewriter for ObjectCategory
- Resolves: Bug 1843162 - Creating Replication Manager fails if uid=repman is used
- Resolves: Bug 1816851 - Add option to healthcheck to list all the lint reports
- Resolves: Bug 1748227 - Instance name length is not enforced
- Resolves: Bug 1748244 - dscreate doesn't sanitize instance name
* Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-2
- Bump version to 1.4.3.8-2
- Resolves: Bug 1833350 - Remove cockpit dependancies that are breaking builds
* Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-1
- Bump version to 1.4.3.8-1
- Resolves: Bug 1833350 - Rebase 389-ds-base for RHEL 8.3
- Resolves: Bug 1728943 - [RFE] Advance options in RHDS Disk Monitoring Framework
- Resolves: Bug 1775285 - [RFE] Implement the Password Policy attribute "pwdReset"
- Resolves: Bug 1638875 - [RFE] extract key/certs pem file into a private namespace
- Resolves: Bug 1758478 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev
- Resolves: Bug 1795943 - Port dbmon.sh from legacy tools package
- Resolves: Bug 1798394 - Port dbgen from legacy tools package
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
- Resolves: Bug 1807419 - Unable to create a suffix with countryName either via dscreate or the admin console
- Resolves: Bug 1816848 - Database links: get_monitor() takes 1 positional argument but 2 were given
- Resolves: Bug 1816854 - Setting nsslapd-allowed-sasl-mechanisms truncates the value
- Resolves: Bug 1816857 - Searches on cn=config takes values with spaces and makes multiple attributes out of them
- Resolves: Bug 1816859 - lib389 - Replace exec() with setattr()
- Resolves: Bug 1816862 - Memory leak in indirect COS
- Resolves: Bug 1829071 - Installation of RHDS 11 fails on RHEL8 server with IPv6 disabled
- Resolves: Bug 1833515 - set 'nsslapd-enable-upgrade-hash: off' as this raises warnings in IPA
- Resolves: Bug 1790986 - cenotaph errors on modrdn operations
- Resolves: Bug 1769734 - Heavy StartTLS connection load can randomly fail with err=1
- Resolves: Bug 1758501 - LeakSanitizer: detected memory leaks in changelog5_init and perfctrs_init