import 389-ds-base-1.3.9.1-12.el7_7

This commit is contained in:
CentOS Sources 2019-11-26 05:40:55 -05:00
parent 26521db319
commit e79480e958
9 changed files with 1045 additions and 1 deletions

View file

@ -0,0 +1,52 @@
From 816175c782e708de8ae47d3788dba3a6ed0fe3d8 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 28 Oct 2019 11:01:33 -0400
Subject: [PATCH] CVE - deref plugin displays restricted attributes
Bug Description: If there is an ACI that allows "search" access to an attribute,
the deref plugin access control checks sees this is a "read"
privilege and returns the attribute's value.
Fix description: For deref plugin we are only concerned with "read" access, not
"search" access. Removed the SLAPI_ACL_SEARCH right flag when
checking access for an attribute.
Reviewed by: lkrispen
---
ldap/servers/plugins/deref/deref.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/ldap/servers/plugins/deref/deref.c b/ldap/servers/plugins/deref/deref.c
index cb5ebb830..ec1884ba3 100644
--- a/ldap/servers/plugins/deref/deref.c
+++ b/ldap/servers/plugins/deref/deref.c
@@ -573,7 +573,7 @@ deref_do_deref_attr(Slapi_PBlock *pb, BerElement *ctrlber, const char *derefdn,
Slapi_Entry **entries = NULL;
int rc;
- /* If the access check on the attributes is done without retrieveing the entry
+ /* If the access check on the attributes is done without retrieving the entry
* it cannot handle acis which need teh entry, eg to apply a targetfilter rule
* So the determination of attrs which can be dereferenced is delayed
*/
@@ -596,7 +596,7 @@ deref_do_deref_attr(Slapi_PBlock *pb, BerElement *ctrlber, const char *derefdn,
int ii;
int needattrvals = 1; /* need attrvals sequence? */
if (deref_check_access(pb, entries[0], derefdn, attrs, &retattrs,
- (SLAPI_ACL_SEARCH | SLAPI_ACL_READ))) {
+ SLAPI_ACL_READ)) {
slapi_log_err(SLAPI_LOG_PLUGIN, DEREF_PLUGIN_SUBSYSTEM,
"deref_do_deref_attr - The client does not have permission to read the requested "
"attributes in entry %s\n",
@@ -714,7 +714,7 @@ deref_pre_entry(Slapi_PBlock *pb)
attrs[1] = NULL;
if (deref_check_access(pb, ent, NULL, attrs, &retattrs,
- (SLAPI_ACL_SEARCH | SLAPI_ACL_READ))) {
+ SLAPI_ACL_READ)) {
slapi_log_err(SLAPI_LOG_PLUGIN, DEREF_PLUGIN_SUBSYSTEM,
"deref_pre_entry - The client does not have permission to read attribute %s in entry %s\n",
spec->derefattr, slapi_entry_get_dn_const(ent));
--
2.21.0

View file

@ -0,0 +1,39 @@
From 40d1c78f85a40065e6c7d1399368885d7f684f54 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 22 Aug 2019 10:26:24 -0400
Subject: [PATCH] Issue 49624 - modrdn silently fails if DB deadlock occurs
Bug Description:
If a DB Deadlock error occurs during a modrdn operation the entry
cache gets updated (corrupted), but the update is not applied to
the database.
Fix Description:
Looks like there was a copy & paste error, and the wrong attribute
was updated during the retry of the modrdn operation.
relates: https://pagure.io/389-ds-base/issue/49624
Reviewed by: lkrispenz (Thanks!)
---
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index 65610d613..433ed88fb 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -251,7 +251,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
slapi_pblock_get(pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, &dn_newsuperiordn);
slapi_sdn_free(&dn_newsuperiordn);
slapi_pblock_set(pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, orig_dn_newsuperiordn);
- orig_dn_newsuperiordn = slapi_sdn_dup(orig_dn_newsuperiordn);
+ dn_newsuperiordn = slapi_sdn_dup(orig_dn_newsuperiordn);
/* must duplicate ec before returning it to cache,
* which could free the entry. */
if ((tmpentry = backentry_dup(original_entry ? original_entry : ec)) == NULL) {
--
2.21.0

View file

@ -0,0 +1,81 @@
From 2fb026b0fe7e35dfabcb90b79fae3e60f9f95340 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 7 Aug 2019 16:57:17 -0400
Subject: [PATCH] Issue 50536 - Audit log heading written to log after every
update
Bug Description: Once the audit log is rotated the log "title" is incorrectly
written to the log after every single update. This happened
becuase when we udpated the state of the log it was applied
to a local variable, and not the log info structure itself.
Fix Description: After writting the "title", update the state of the log using
a pointer to the log info structure.
relates: https://pagure.io/389-ds-base/issue/50536
Reviewed by: lkrispenz(Thanks!)
---
ldap/servers/slapd/log.c | 14 +++++++-------
ldap/servers/slapd/proto-slap.h | 2 +-
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 2456abf1e..f308a4813 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -2073,11 +2073,11 @@ slapd_log_audit(
int retval = LDAP_SUCCESS;
int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
- int state = 0;
+ int *state;
if (sourcelog == SLAPD_AUDIT_LOG) {
- state = loginfo.log_audit_state;
+ state = &loginfo.log_audit_state;
} else if (sourcelog == SLAPD_AUDITFAIL_LOG) {
- state = loginfo.log_auditfail_state;
+ state = &loginfo.log_auditfail_state;
} else {
/* How did we even get here! */
return 1;
@@ -2106,9 +2106,9 @@ int
slapd_log_audit_internal(
char *buffer,
int buf_len,
- int state)
+ int *state)
{
- if ((state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) {
+ if ((*state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) {
LOG_AUDIT_LOCK_WRITE();
if (log__needrotation(loginfo.log_audit_fdes,
SLAPD_AUDIT_LOG) == LOG_ROTATE) {
@@ -2122,9 +2122,9 @@ slapd_log_audit_internal(
loginfo.log_audit_rotationsyncclock += PR_ABS(loginfo.log_audit_rotationtime_secs);
}
}
- if (state & LOGGING_NEED_TITLE) {
+ if (*state & LOGGING_NEED_TITLE) {
log_write_title(loginfo.log_audit_fdes);
- state &= ~LOGGING_NEED_TITLE;
+ *state &= ~LOGGING_NEED_TITLE;
}
LOG_WRITE_NOW_NO_ERR(loginfo.log_audit_fdes, buffer, buf_len, 0);
LOG_AUDIT_UNLOCK_WRITE();
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index a0648ca3c..e37f702ea 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -777,7 +777,7 @@ int slapi_log_access(int level, char *fmt, ...)
;
#endif
int slapd_log_audit(char *buffer, int buf_len, int sourcelog);
-int slapd_log_audit_internal(char *buffer, int buf_len, int state);
+int slapd_log_audit_internal(char *buffer, int buf_len, int *state);
int slapd_log_auditfail(char *buffer, int buf_len);
int slapd_log_auditfail_internal(char *buffer, int buf_len);
void log_access_flush(void);
--
2.21.0

View file

@ -0,0 +1,47 @@
From b4b8c8adcda0168cc18e40045d0a25eaf74ba4e1 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 17 Oct 2019 09:42:02 -0400
Subject: [PATCH] Issue 50636 - Crash during sasl bind
Bug Description:
Sasl bind registers IO layers (sasl_IoMethods) that will be
pushed (and called) by the next incoming operation.
So the next incoming operation should synchronize itself
with the sasl bind.
Fix Description:
The call to connection_call_io_layer_callbacks, that pushes
registered methods, must hold c_mutex so that it let
a pending sasl bind to fully register the methods.
https://pagure.io/389-ds-base/issue/50636
Reviewed by: Ludwig Krispenz, Mark Reynolds
---
ldap/servers/slapd/connection.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 945602f20..3599512af 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1584,12 +1584,14 @@ connection_threadmain()
*/
pb_conn->c_anonlimits_set = 1;
}
- PR_ExitMonitor(pb_conn->c_mutex);
-
+ /* must hold c_mutex so that it synchronizes the IO layer push
+ * with a potential pending sasl bind that is registering the IO layer
+ */
if (connection_call_io_layer_callbacks(pb_conn)) {
slapi_log_err(SLAPI_LOG_ERR, "connection_threadmain",
"Could not add/remove IO layers from connection\n");
}
+ PR_ExitMonitor(pb_conn->c_mutex);
break;
default:
break;
--
2.21.0

View file

@ -0,0 +1,70 @@
From 8355b844dbc7097ddc5639a1da0932c60ca50aee Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 16 Oct 2019 20:27:30 -0400
Subject: [PATCH] Issue 49850 - ldbm_get_nonleaf_ids() slow for databases with
many non-leaf entries
Bug Description: The logs from an LDIF import indicated that gathering non-leaf IDs
for creating the ancestorid index took an enormous amount of time,
over 10hrs. The root cause is that the parentid index btree ordering
is lexical, but the IDList being built up from it is sorted numerically.
In the existing code, the IDList is maintained in constantly sorted
order by idl_insert().
Fix Description: ldbm_get_nonleaf_ids() switches to idl_append_extend() instead idl_insert()
for building up the IDList and then sorts the result only once, using
qsort with idl_sort_cmp, after the entire list has been gathered.
The improvement on identical hardware is for the operation to take 10
seconds rather than 10 hours
Patch Author: Thomas Lackey <telackey@bozemanpass.com> Thanks for the great contribution!!!
relates: https://pagure.io/389-ds-base/issue/49850
Reviewed by: mreynolds, tbordaz, and firstyear
---
ldap/servers/slapd/back-ldbm/ancestorid.c | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/ldap/servers/slapd/back-ldbm/ancestorid.c b/ldap/servers/slapd/back-ldbm/ancestorid.c
index 24642923d..254a3aa3b 100644
--- a/ldap/servers/slapd/back-ldbm/ancestorid.c
+++ b/ldap/servers/slapd/back-ldbm/ancestorid.c
@@ -82,7 +82,14 @@ ldbm_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job)
ret = dbc->c_get(dbc, &key, &data, DB_NEXT_NODUP);
if ((ret == 0) && (*(char *)key.data == EQ_PREFIX)) {
id = (ID)strtoul((char *)key.data + 1, NULL, 10);
- idl_insert(&nodes, id);
+ /*
+ * TEL 20180711 - switch to idl_append instead of idl_insert because there is no
+ * no need to keep the list constantly sorted, which can be very expensive with
+ * large databases (exacerbated by the fact that the parentid btree ordering is
+ * lexical, but the idl_insert ordering is numeric). It is enough to gather them
+ * all together and sort them once at the end.
+ */
+ idl_append_extend(&nodes, id);
}
key_count++;
if (!(key_count % PROGRESS_INTERVAL)) {
@@ -107,6 +114,17 @@ ldbm_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job)
if (ret != 0)
ldbm_nasty("ldbm_get_nonleaf_ids", sourcefile, 13030, ret);
+ if (ret == 0) {
+ /* now sort it */
+ import_log_notice(job, SLAPI_LOG_INFO, "ldbm_get_nonleaf_ids",
+ "Starting sort of ancestorid non-leaf IDs...");
+
+ qsort((void *)&nodes->b_ids[0], nodes->b_nids, (size_t)sizeof(ID), idl_sort_cmp);
+
+ import_log_notice(job, SLAPI_LOG_INFO, "ldbm_get_nonleaf_ids",
+ "Finished sort of ancestorid non-leaf IDs.");
+ }
+
out:
/* Close the cursor */
if (dbc != NULL) {
--
2.21.0

View file

@ -0,0 +1,29 @@
From c9e602ebdb0fb8b8ee526d272e8a6fdf23a26a4b Mon Sep 17 00:00:00 2001
From: Ludwig Krispenz <lkrispen@redhat.com>
Date: Thu, 24 Oct 2019 14:26:20 +0200
Subject: [PATCH] Ticket 49850 cont -fix crash in ldbm_non_leaf
Bug: if the ldif to be imported contains only one entry there are no leaf nodes
and the call to qsort crashes
Fix: check that nodes is not NULL
---
ldap/servers/slapd/back-ldbm/ancestorid.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/servers/slapd/back-ldbm/ancestorid.c b/ldap/servers/slapd/back-ldbm/ancestorid.c
index 254a3aa3b..f26ac1364 100644
--- a/ldap/servers/slapd/back-ldbm/ancestorid.c
+++ b/ldap/servers/slapd/back-ldbm/ancestorid.c
@@ -114,7 +114,7 @@ ldbm_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job)
if (ret != 0)
ldbm_nasty("ldbm_get_nonleaf_ids", sourcefile, 13030, ret);
- if (ret == 0) {
+ if (ret == 0 && nodes) {
/* now sort it */
import_log_notice(job, SLAPI_LOG_INFO, "ldbm_get_nonleaf_ids",
"Starting sort of ancestorid non-leaf IDs...");
--
2.21.0

View file

@ -0,0 +1,682 @@
From 94703d5171853b69bb8ef9574f32bc9f0c051632 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 7 Aug 2019 20:36:53 -0400
Subject: [PATCH] Issue 50538 - cleanAllRUV task limit is not enforced for
replicated tasks
Bug Description:
There is a hard limit of 64 concurrent cleanAllRUV tasks, but this limit is
only enforced when creating "new" tasks. It was not enforced when a task was
received via an extended operation. There were also race conditions in the
existing logic that allowed the array of cleaned rids to get corrupted . This
allowed for a very large number of task threads to be created.
Fix Description:
Maintain a new counter to keep track of the number of clean and abort threads
to make sure it never over runs the rid array buffers.
relates: https://pagure.io/389-ds-base/issue/50538
Reviewed by: lkrispenz(Thanks!)
---
.../suites/replication/cleanallruv_test.py | 47 +++-
ldap/servers/plugins/replication/repl5.h | 7 +-
.../replication/repl5_replica_config.c | 247 ++++++++++--------
ldap/servers/plugins/replication/repl_extop.c | 19 +-
4 files changed, 202 insertions(+), 118 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index 620a53e1a..43801dd52 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2019 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,7 +7,6 @@
# --- END COPYRIGHT BLOCK ---
#
import threading
-
import pytest
from lib389.tasks import *
from lib389.utils import *
@@ -859,6 +858,50 @@ def test_multiple_tasks_with_force(topology_m4):
restore_master4(topology_m4)
+def test_max_tasks(topology_m4):
+ """Test we can not create more than 64 cleaning tasks
+
+ :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f
+ :setup: Replication setup with four masters
+ :steps:
+ 1. Stop masters 3 & 4
+ 2. Create over 64 tasks between m1 and m2
+ 3. Check logs to see if (>65) tasks were rejected
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ # Stop masters 3 & 4
+ m1 = topology_m4.ms["master1"]
+ m2 = topology_m4.ms["master2"]
+ m3 = topology_m4.ms["master3"]
+ m4 = topology_m4.ms["master4"]
+ m3.stop()
+ m4.stop()
+
+ # Add over 64 tasks between master1 & 2 to try to exceed the 64 task limit
+ for i in range(1, 64):
+ cruv_task = CleanAllRUVTask(m1)
+ cruv_task.create(properties={
+ 'replica-id': str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'no', # This forces these tasks to stick around
+ })
+ cruv_task = CleanAllRUVTask(m2)
+ cruv_task.create(properties={
+ 'replica-id': "10" + str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'yes', # This allows the tasks to propagate
+ })
+
+ # Check the errors log for our error message in master 1
+ assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks')
+>>>>>>> ab24aa4cb... Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index e08fec752..d414926c2 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -80,6 +80,8 @@
#define CLEANRUV_FINISHED "finished"
#define CLEANRUV_CLEANING "cleaning"
#define CLEANRUV_NO_MAXCSN "no maxcsn"
+#define CLEANALLRUV_ID "CleanAllRUV Task"
+#define ABORT_CLEANALLRUV_ID "Abort CleanAllRUV Task"
/* DS 5.0 replication protocol error codes */
#define NSDS50_REPL_REPLICA_READY 0x00 /* Replica ready, go ahead */
@@ -784,6 +786,7 @@ void multimaster_mtnode_construct_replicas(void);
void multimaster_be_state_change(void *handle, char *be_name, int old_be_state, int new_be_state);
#define CLEANRIDSIZ 64 /* maximum number for concurrent CLEANALLRUV tasks */
+#define CLEANRID_BUFSIZ 128
typedef struct _cleanruv_data
{
@@ -815,6 +818,8 @@ int get_replica_type(Replica *r);
int replica_execute_cleanruv_task_ext(Object *r, ReplicaId rid);
void add_cleaned_rid(cleanruv_data *data, char *maxcsn);
int is_cleaned_rid(ReplicaId rid);
+int32_t check_and_set_cleanruv_task_count(ReplicaId rid);
+int32_t check_and_set_abort_cleanruv_task_count(void);
int replica_cleanall_ruv_abort(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg);
void replica_cleanallruv_thread_ext(void *arg);
void stop_ruv_cleaning(void);
@@ -833,8 +838,6 @@ void set_cleaned_rid(ReplicaId rid);
void cleanruv_log(Slapi_Task *task, int rid, char *task_type, int sev_level, char *fmt, ...);
char *replica_cleanallruv_get_local_maxcsn(ReplicaId rid, char *base_dn);
-
-
/* replutil.c */
LDAPControl *create_managedsait_control(void);
LDAPControl *create_backend_control(Slapi_DN *sdn);
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index b4aff9eb4..0ba2cd976 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -30,17 +30,18 @@
#define CLEANALLRUV "CLEANALLRUV"
#define CLEANALLRUVLEN 11
#define REPLICA_RDN "cn=replica"
-#define CLEANALLRUV_ID "CleanAllRUV Task"
-#define ABORT_CLEANALLRUV_ID "Abort CleanAllRUV Task"
int slapi_log_urp = SLAPI_LOG_REPL;
-static ReplicaId cleaned_rids[CLEANRIDSIZ + 1] = {0};
-static ReplicaId pre_cleaned_rids[CLEANRIDSIZ + 1] = {0};
-static ReplicaId aborted_rids[CLEANRIDSIZ + 1] = {0};
-static Slapi_RWLock *rid_lock = NULL;
-static Slapi_RWLock *abort_rid_lock = NULL;
+static ReplicaId cleaned_rids[CLEANRID_BUFSIZ] = {0};
+static ReplicaId pre_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+static ReplicaId aborted_rids[CLEANRID_BUFSIZ] = {0};
+static PRLock *rid_lock = NULL;
+static PRLock *abort_rid_lock = NULL;
static PRLock *notify_lock = NULL;
static PRCondVar *notify_cvar = NULL;
+static PRLock *task_count_lock = NULL;
+static int32_t clean_task_count = 0;
+static int32_t abort_task_count = 0;
/* Forward Declartions */
static int replica_config_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg);
@@ -67,8 +68,6 @@ static int replica_cleanallruv_send_abort_extop(Repl_Agmt *ra, Slapi_Task *task,
static int replica_cleanallruv_check_maxcsn(Repl_Agmt *agmt, char *basedn, char *rid_text, char *maxcsn, Slapi_Task *task);
static int replica_cleanallruv_replica_alive(Repl_Agmt *agmt);
static int replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *ra, char *rid_text, Slapi_Task *task, char *force);
-static int get_cleanruv_task_count(void);
-static int get_abort_cleanruv_task_count(void);
static int replica_cleanup_task(Object *r, const char *task_name, char *returntext, int apply_mods);
static int replica_task_done(Replica *replica);
static void delete_cleaned_rid_config(cleanruv_data *data);
@@ -114,20 +113,27 @@ replica_config_init()
PR_GetError());
return -1;
}
- rid_lock = slapi_new_rwlock();
+ rid_lock = PR_NewLock();
if (rid_lock == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create rid_lock; NSPR error - %d\n",
PR_GetError());
return -1;
}
- abort_rid_lock = slapi_new_rwlock();
+ abort_rid_lock = PR_NewLock();
if (abort_rid_lock == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create abort_rid_lock; NSPR error - %d\n",
PR_GetError());
return -1;
}
+ task_count_lock = PR_NewLock();
+ if (task_count_lock == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
+ "Failed to create task_count_lock; NSPR error - %d\n",
+ PR_GetError());
+ return -1;
+ }
if ((notify_lock = PR_NewLock()) == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create notify lock; NSPR error - %d\n",
@@ -1484,12 +1490,6 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Initiating CleanAllRUV Task...");
- if (get_cleanruv_task_count() >= CLEANRIDSIZ) {
- /* we are already running the maximum number of tasks */
- cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
- "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
- return LDAP_UNWILLING_TO_PERFORM;
- }
/*
* Grab the replica
*/
@@ -1541,6 +1541,13 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
goto fail;
}
+ if (check_and_set_cleanruv_task_count(rid) != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto fail;
+ }
+
/*
* Launch the cleanallruv thread. Once all the replicas are cleaned it will release the rid
*/
@@ -1548,6 +1555,9 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
if (data == NULL) {
cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Failed to allocate cleanruv_data. Aborting task.");
rc = -1;
+ PR_Lock(task_count_lock);
+ clean_task_count--;
+ PR_Unlock(task_count_lock);
goto fail;
}
data->repl_obj = r;
@@ -1630,13 +1640,13 @@ replica_cleanallruv_thread(void *arg)
int aborted = 0;
int rc = 0;
- if (!data || slapi_is_shutting_down()) {
- return; /* no data */
- }
-
/* Increase active thread count to prevent a race condition at server shutdown */
g_incr_active_threadcnt();
+ if (!data || slapi_is_shutting_down()) {
+ goto done;
+ }
+
if (data->task) {
slapi_task_inc_refcount(data->task);
slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name,
@@ -1683,16 +1693,13 @@ replica_cleanallruv_thread(void *arg)
slapi_task_begin(data->task, 1);
}
/*
- * Presetting the rid prevents duplicate thread creation, but allows the db and changelog to still
- * process updates from the rid.
- * set_cleaned_rid() blocks updates, so we don't want to do that... yet unless we are in force mode.
- * If we are forcing a clean independent of state of other servers for this RID we can set_cleaned_rid()
+ * We have already preset this rid, but if we are forcing a clean independent of state
+ * of other servers for this RID we can set_cleaned_rid()
*/
if (data->force) {
set_cleaned_rid(data->rid);
- } else {
- preset_cleaned_rid(data->rid);
}
+
rid_text = slapi_ch_smprintf("%d", data->rid);
csn_as_string(data->maxcsn, PR_FALSE, csnstr);
/*
@@ -1862,6 +1869,9 @@ done:
/*
* If the replicas are cleaned, release the rid
*/
+ if (slapi_is_shutting_down()) {
+ stop_ruv_cleaning();
+ }
if (!aborted && !slapi_is_shutting_down()) {
/*
* Success - the rid has been cleaned!
@@ -1880,10 +1890,9 @@ done:
} else {
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Propagated task does not delete Keep alive entry (%d).", data->rid);
}
-
clean_agmts(data);
remove_cleaned_rid(data->rid);
- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d).", data->rid);
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d)", data->rid);
} else {
/*
* Shutdown or abort
@@ -1916,6 +1925,10 @@ done:
slapi_ch_free_string(&data->force);
slapi_ch_free_string(&rid_text);
slapi_ch_free((void **)&data);
+ /* decrement task count */
+ PR_Lock(task_count_lock);
+ clean_task_count--;
+ PR_Unlock(task_count_lock);
g_decr_active_threadcnt();
}
@@ -2415,16 +2428,14 @@ replica_send_cleanruv_task(Repl_Agmt *agmt, cleanruv_data *clean_data)
int
is_cleaned_rid(ReplicaId rid)
{
- int i;
-
- slapi_rwlock_rdlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && cleaned_rids[i] != 0; i++) {
+ PR_Lock(rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
if (rid == cleaned_rids[i]) {
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 0;
}
@@ -2432,16 +2443,14 @@ is_cleaned_rid(ReplicaId rid)
int
is_pre_cleaned_rid(ReplicaId rid)
{
- int i;
-
- slapi_rwlock_rdlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && pre_cleaned_rids[i] != 0; i++) {
+ PR_Lock(rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
if (rid == pre_cleaned_rids[i]) {
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 0;
}
@@ -2454,14 +2463,14 @@ is_task_aborted(ReplicaId rid)
if (rid == 0) {
return 0;
}
- slapi_rwlock_rdlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ && aborted_rids[i] != 0; i++) {
+ PR_Lock(abort_rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && aborted_rids[i] != 0; i++) {
if (rid == aborted_rids[i]) {
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
return 0;
}
@@ -2470,15 +2479,14 @@ preset_cleaned_rid(ReplicaId rid)
{
int i;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && pre_cleaned_rids[i] != rid; i++) {
if (pre_cleaned_rids[i] == 0) {
pre_cleaned_rids[i] = rid;
- pre_cleaned_rids[i + 1] = 0;
break;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2491,14 +2499,13 @@ set_cleaned_rid(ReplicaId rid)
{
int i;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && cleaned_rids[i] != rid; i++) {
if (cleaned_rids[i] == 0) {
cleaned_rids[i] = rid;
- cleaned_rids[i + 1] = 0;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2570,15 +2577,14 @@ add_aborted_rid(ReplicaId rid, Replica *r, char *repl_root)
int rc;
int i;
- slapi_rwlock_wrlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(abort_rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ; i++) {
if (aborted_rids[i] == 0) {
aborted_rids[i] = rid;
- aborted_rids[i + 1] = 0;
break;
}
}
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
/*
* Write the rid to the config entry
*/
@@ -2621,21 +2627,24 @@ delete_aborted_rid(Replica *r, ReplicaId rid, char *repl_root, int skip)
char *data;
char *dn;
int rc;
- int i;
if (r == NULL)
return;
if (skip) {
/* skip the deleting of the config, and just remove the in memory rid */
- slapi_rwlock_wrlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ && aborted_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- aborted_rids[i] = aborted_rids[i + 1];
- }
- slapi_rwlock_unlock(abort_rid_lock);
+ ReplicaId new_abort_rids[CLEANRID_BUFSIZ] = {0};
+ int32_t idx = 0;
+
+ PR_Lock(abort_rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (aborted_rids[i] != rid) {
+ new_abort_rids[idx] = aborted_rids[i];
+ idx++;
+ }
+ }
+ memcpy(aborted_rids, new_abort_rids, sizeof(new_abort_rids));
+ PR_Unlock(abort_rid_lock);
} else {
/* only remove the config, leave the in-memory rid */
dn = replica_get_dn(r);
@@ -2793,27 +2802,31 @@ bail:
void
remove_cleaned_rid(ReplicaId rid)
{
- int i;
- /*
- * Remove this rid, and optimize the array
- */
- slapi_rwlock_wrlock(rid_lock);
+ ReplicaId new_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+ ReplicaId new_pre_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+ size_t idx = 0;
+
+ PR_Lock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && cleaned_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- cleaned_rids[i] = cleaned_rids[i + 1];
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (cleaned_rids[i] != rid) {
+ new_cleaned_rids[idx] = cleaned_rids[i];
+ idx++;
+ }
}
+ memcpy(cleaned_rids, new_cleaned_rids, sizeof(new_cleaned_rids));
+
/* now do the preset cleaned rids */
- for (i = 0; i < CLEANRIDSIZ && pre_cleaned_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- pre_cleaned_rids[i] = pre_cleaned_rids[i + 1];
+ idx = 0;
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (pre_cleaned_rids[i] != rid) {
+ new_pre_cleaned_rids[idx] = pre_cleaned_rids[i];
+ idx++;
+ }
}
+ memcpy(pre_cleaned_rids, new_pre_cleaned_rids, sizeof(new_pre_cleaned_rids));
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2841,16 +2854,6 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
char *ridstr = NULL;
int rc = SLAPI_DSE_CALLBACK_OK;
- if (get_abort_cleanruv_task_count() >= CLEANRIDSIZ) {
- /* we are already running the maximum number of tasks */
- PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
- "Exceeded maximum number of active ABORT CLEANALLRUV tasks(%d)",
- CLEANRIDSIZ);
- cleanruv_log(task, -1, ABORT_CLEANALLRUV_ID, SLAPI_LOG_ERR, "%s", returntext);
- *returncode = LDAP_OPERATIONS_ERROR;
- return SLAPI_DSE_CALLBACK_ERROR;
- }
-
/* allocate new task now */
task = slapi_new_task(slapi_entry_get_ndn(e));
@@ -2935,6 +2938,16 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
*/
certify_all = "no";
}
+
+ if (check_and_set_abort_cleanruv_task_count() != LDAP_SUCCESS) {
+ /* we are already running the maximum number of tasks */
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Exceeded maximum number of active ABORT CLEANALLRUV tasks(%d)",
+ CLEANRIDSIZ);
+ cleanruv_log(task, -1, ABORT_CLEANALLRUV_ID, SLAPI_LOG_ERR, "%s", returntext);
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
+ goto out;
+ }
/*
* Create payload
*/
@@ -3143,6 +3156,9 @@ done:
slapi_ch_free_string(&data->certify);
slapi_sdn_free(&data->sdn);
slapi_ch_free((void **)&data);
+ PR_Lock(task_count_lock);
+ abort_task_count--;
+ PR_Unlock(task_count_lock);
g_decr_active_threadcnt();
}
@@ -3494,36 +3510,43 @@ replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *agmt, char *rid_text,
return rc;
}
-static int
-get_cleanruv_task_count(void)
+/*
+ * Before starting a cleanAllRUV task make sure there are not
+ * too many task threads already running. If everything is okay
+ * also pre-set the RID now so rebounding extended ops do not
+ * try to clean it over and over.
+ */
+int32_t
+check_and_set_cleanruv_task_count(ReplicaId rid)
{
- int i, count = 0;
+ int32_t rc = 0;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
- if (pre_cleaned_rids[i] != 0) {
- count++;
- }
+ PR_Lock(task_count_lock);
+ if (clean_task_count >= CLEANRIDSIZ) {
+ rc = -1;
+ } else {
+ clean_task_count++;
+ preset_cleaned_rid(rid);
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(task_count_lock);
- return count;
+ return rc;
}
-static int
-get_abort_cleanruv_task_count(void)
+int32_t
+check_and_set_abort_cleanruv_task_count(void)
{
- int i, count = 0;
+ int32_t rc = 0;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
- if (aborted_rids[i] != 0) {
- count++;
+ PR_Lock(task_count_lock);
+ if (abort_task_count > CLEANRIDSIZ) {
+ rc = -1;
+ } else {
+ abort_task_count++;
}
- }
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(task_count_lock);
- return count;
+ return rc;
}
/*
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index 68e2544b4..0c2abb6d5 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1393,6 +1393,12 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
rc = LDAP_OPERATIONS_ERROR;
goto out;
}
+ if (check_and_set_abort_cleanruv_task_count() != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active abort CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto out;
+ }
/*
* Prepare the abort data
*/
@@ -1499,6 +1505,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
if (force == NULL) {
force = "no";
}
+
maxcsn = csn_new();
csn_init_by_string(maxcsn, csnstr);
/*
@@ -1535,13 +1542,21 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
goto free_and_return;
}
+ if (check_and_set_cleanruv_task_count((ReplicaId)rid) != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto free_and_return;
+ }
+
if (replica_get_type(r) != REPLICA_TYPE_READONLY) {
/*
* Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid
*
* This will also release mtnode_ext->replica
*/
- slapi_log_err(SLAPI_LOG_INFO, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Launching cleanAllRUV thread...\n");
+
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread...\n");
data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data));
if (data == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate "
@@ -1635,7 +1650,7 @@ free_and_return:
ber_printf(resp_bere, "{s}", CLEANRUV_ACCEPTED);
ber_flatten(resp_bere, &resp_bval);
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
- slapi_send_ldap_result(pb, LDAP_SUCCESS, NULL, NULL, 0, NULL);
+ slapi_send_ldap_result(pb, rc, NULL, NULL, 0, NULL);
/* resp_bere */
if (NULL != resp_bere) {
ber_free(resp_bere, 1);
--
2.21.0

View file

@ -0,0 +1,24 @@
From cbbadc2d339eef9e3220cbaa8578d17b95b66265 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 1 Nov 2019 09:52:18 -0400
Subject: [PATCH 2/2] Fix cherry-pick error for cleanAllRUV issue
---
dirsrvtests/tests/suites/replication/cleanallruv_test.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index 43801dd52..caf214b19 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -899,7 +899,6 @@ def test_max_tasks(topology_m4):
# Check the errors log for our error message in master 1
assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks')
->>>>>>> ab24aa4cb... Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
if __name__ == '__main__':
--
2.21.0

View file

@ -39,7 +39,7 @@
Summary: 389 Directory Server (%{variant})
Name: 389-ds-base
Version: 1.3.9.1
Release: %{?relprefix}10%{?prerel}%{?dist}
Release: %{?relprefix}12%{?prerel}%{?dist}
License: GPLv3+
URL: https://www.port389.org/
Group: System Environment/Daemons
@ -179,6 +179,14 @@ Patch30: 0030-Ticket-50389-ns-slapd-craches-while-two-threads-are-.patc
Patch31: 0031-Issue-50123-with_tmpfiles_d-is-associated-with-syste.patch
Patch32: 0032-Issue-50426-nsSSL3Ciphers-is-limited-to-1024-charact.patch
Patch33: 0033-Ticket-50329-2nd-Possible-Security-Issue-DOS-due-to-.patch
Patch34: 0034-CVE-deref-plugin-displays-restricted-attributes.patch
Patch35: 0035-Issue-49624-modrdn-silently-fails-if-DB-deadlock-occ.patch
Patch36: 0036-Issue-50536-Audit-log-heading-written-to-log-after-e.patch
Patch37: 0037-Issue-50636-Crash-during-sasl-bind.patch
Patch38: 0038-Issue-49850-ldbm_get_nonleaf_ids-slow-for-databases-.patch
Patch39: 0039-Ticket-49850-cont-fix-crash-in-ldbm_non_leaf.patch
Patch40: 0040-Issue-50538-cleanAllRUV-task-limit-is-not-enforced-f.patch
Patch41: 0041-Fix-cherry-pick-error-for-cleanAllRUV-issue.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
@ -531,6 +539,18 @@ fi
%{_sysconfdir}/%{pkgname}/dirsrvtests
%changelog
* Fri Nov 1 2019 Mark Reynolds <mreynolds@redhat.com> - 1.3.9.1-12
- Bump version to 1.3.9.1-12
- Resolves: Bug 1767622 - CleanAllRUV task limit not enforced
* Mon Oct 28 2019 Mark Reynolds <mreynolds@redhat.com> - 1.3.9.1-11
- Bump version to 1.3.9.1-11
- Resolves: Bug 1748198 - EMBARGOED CVE-2019-14824 389-ds-base: Read permission check bypass via the deref plugin
- Resolves: Bug 1754831 - After audit log file is rotated, DS version string is logged after each update
- Resolves: Bug 1763622 - Extremely slow LDIF import with ldif2db
- Resolves: Bug 1763627 - ns-slapd crash on concurrent SASL BINDs, connection_call_io_layer_callbacks must hold hold c_mutex
- Resolves: Bug 1749289 - DB Deadlock on modrdn appears to corrupt database and entry cache
* Thu Jun 13 2019 Mark Reynolds <mreynolds@redhat.com> - 1.3.9.1-10
- Bump version to 1.3.9.1-10
- Resolves: Bug 1668457 - CVE-2019-3883 389-ds-base: DoS via hanging secured connections