import 389-ds-base-1.3.10.2-14.el7_9

This commit is contained in:
CentOS Sources 2021-11-23 09:37:21 -05:00
parent 22b3c56cda
commit fcc5433313
3 changed files with 751 additions and 2 deletions

View file

@ -0,0 +1,415 @@
From 4503d03aa3aa5b5f35f3db4cf1de838273e051a7 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 23 Sep 2021 10:48:50 +0200
Subject: [PATCH 1/2] Issue 4925 - Performance ACI: targetfilter evaluation
result can be reused (#4926)
Bug description:
An ACI may contain targetfilter. For a given returned entry, of a
SRCH request, the same targetfilter is evaluated for each of the
returned attributes.
Once the filter has been evaluated, it is useless to reevaluate
it for a next attribute.
Fix description:
The fix implements a very simple cache (linked list) that keeps
the results of the previously evaluated 'targetfilter'.
This cache is per-entry. For an operation, a aclpb is allocated
that is used to evaluate ACIs against each successive entry.
Each time a candidate entry is added in the aclpb
(acl_access_allowed), the cache (aclpb_curr_entry_targetfilters)
is freed. Then for each 'targetfilter', the original targetfilter
is lookup from the cache. If this is the first evaluation of it
then the result of the evaluation is stored into the cache using
the original targetfilter as the key in the cache
The key to lookup/store the cache is the string representation
of the targetfilter. The string contains a redzone to detect
that the filter exceeds the maximum size (2K). If it exceeds
then the key is invalid and the lookup/store is noop.
relates: #4925
Reviewed by: Mark Reynolds, William Brown (Thanks)
Platforms tested: F34
---
ldap/servers/plugins/acl/acl.c | 138 +++++++++++++++++++++++++++--
ldap/servers/plugins/acl/acl.h | 14 +++
ldap/servers/plugins/acl/acl_ext.c | 12 +++
ldap/servers/slapd/libglobs.c | 29 ++++++
ldap/servers/slapd/proto-slap.h | 2 +
ldap/servers/slapd/slap.h | 2 +
6 files changed, 191 insertions(+), 6 deletions(-)
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
index ecd23aa88..646a369db 100644
--- a/ldap/servers/plugins/acl/acl.c
+++ b/ldap/servers/plugins/acl/acl.c
@@ -66,6 +66,9 @@ static void print_access_control_summary(char *source,
const char *edn,
aclResultReason_t *acl_reason);
static int check_rdn_access(Slapi_PBlock *pb, Slapi_Entry *e, const char *newrdn, int access);
+static struct targetfilter_cached_result *targetfilter_cache_lookup(struct acl_pblock *aclpb, char *filter, PRBool filter_valid);
+static void targetfilter_cache_add(struct acl_pblock *aclpb, char *filter, int result, PRBool filter_valid);
+
/*
@@ -175,6 +178,70 @@ check_rdn_access(Slapi_PBlock *pb, Slapi_Entry *e, const char *dn, int access)
return (retCode);
}
+/* Retrieves, in the targetfilter cache (list), this
+ * filter in case it was already evaluated
+ *
+ * filter: key to retrieve the evaluation in the cache
+ * filter_valid: PR_FALSE means that the filter key is truncated, PR_TRUE else
+ */
+static struct targetfilter_cached_result *
+targetfilter_cache_lookup(struct acl_pblock *aclpb, char *filter, PRBool filter_valid)
+{
+ struct targetfilter_cached_result *results;
+ if (! aclpb->targetfilter_cache_enabled) {
+ /* targetfilter cache is disabled */
+ return NULL;
+ }
+ if (filter == NULL) {
+ return NULL;
+ }
+ for(results = aclpb->aclpb_curr_entry_targetfilters; results; results = results->next) {
+ if (strcmp(results->filter, filter) == 0) {
+ return results;
+ }
+ }
+
+ return NULL;
+}
+
+/* Free all evaluations cached for this current entry */
+void
+targetfilter_cache_free(struct acl_pblock *aclpb)
+{
+ struct targetfilter_cached_result *results, *next;
+ if (aclpb == NULL) {
+ return;
+ }
+ for(results = aclpb->aclpb_curr_entry_targetfilters; results;) {
+ next = results->next;
+ slapi_ch_free_string(&results->filter);
+ slapi_ch_free((void **) &results);
+ results = next;
+ }
+ aclpb->aclpb_curr_entry_targetfilters = NULL;
+}
+
+/* add a new targetfilter evaluation into the cache (per entry)
+ * ATM just use a linked list of evaluation
+ *
+ * filter: key to retrieve the evaluation in the cache
+ * result: result of the evaluation
+ * filter_valid: PR_FALSE means that the filter key is truncated, PR_TRUE else
+ */
+static void
+targetfilter_cache_add(struct acl_pblock *aclpb, char *filter, int result, PRBool filter_valid)
+{
+ struct targetfilter_cached_result *results;
+ if (! filter_valid || ! aclpb->targetfilter_cache_enabled) {
+ /* targetfilter cache is disabled or filter is truncated */
+ return;
+ }
+ results = (struct targetfilter_cached_result *) slapi_ch_calloc(1, (sizeof(struct targetfilter_cached_result)));
+ results->filter = slapi_ch_strdup(filter);
+ results->next = aclpb->aclpb_curr_entry_targetfilters;
+ results->matching_result = result;
+ aclpb->aclpb_curr_entry_targetfilters = results;
+}
/***************************************************************************
*
* acl_access_allowed
@@ -495,6 +562,7 @@ acl_access_allowed(
/* Keep the ptr to the current entry */
aclpb->aclpb_curr_entry = (Slapi_Entry *)e;
+ targetfilter_cache_free(aclpb);
/* Get the attr info */
deallocate_attrEval = acl__get_attrEval(aclpb, attr);
@@ -1922,7 +1990,7 @@ acl_modified(Slapi_PBlock *pb, int optype, Slapi_DN *e_sdn, void *change)
* None.
*
**************************************************************************/
-static int
+int
acl__scan_for_acis(Acl_PBlock *aclpb, int *err)
{
aci_t *aci;
@@ -2403,10 +2471,68 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
ACL_EVAL_TARGET_FILTER);
slapi_ch_free((void **)&lasinfo);
} else {
- if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
- aci->targetFilter,
- 0 /*don't do access check*/) != 0) {
- filter_matched = ACL_FALSE;
+ Slapi_DN *sdn;
+ char* attr_evaluated = "None";
+ char logbuf[2048] = {0};
+ char *redzone = "the redzone";
+ int32_t redzone_idx;
+ char *filterstr; /* key to retrieve/add targetfilter value in the cache */
+ PRBool valid_filter;
+ struct targetfilter_cached_result *previous_filter_test;
+
+ /* only usefull for debug purpose */
+ if (aclpb->aclpb_curr_attrEval && aclpb->aclpb_curr_attrEval->attrEval_name) {
+ attr_evaluated = aclpb->aclpb_curr_attrEval->attrEval_name;
+ }
+ sdn = slapi_entry_get_sdn(aclpb->aclpb_curr_entry);
+
+ /* The key for the cache is the string representation of the original filter
+ * If the string can not fit into the provided buffer (overwrite redzone)
+ * then the filter is said invalid (for the cache) and it will be evaluated
+ */
+ redzone_idx = sizeof(logbuf) - 1 - strlen(redzone);
+ strcpy(&logbuf[redzone_idx], redzone);
+ filterstr = slapi_filter_to_string(aci->targetFilter, logbuf, sizeof(logbuf));
+
+ /* if the redzone was overwritten that means filterstr is truncated and not valid */
+ valid_filter = (strcmp(&logbuf[redzone_idx], redzone) == 0);
+ if (!valid_filter) {
+ strcpy(&logbuf[50], "...");
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "targetfilter too large (can not be cache) %s\n", logbuf);
+ }
+
+ previous_filter_test = targetfilter_cache_lookup(aclpb, filterstr, valid_filter);
+ if (previous_filter_test) {
+ /* The filter was already evaluated against that same entry */
+ if (previous_filter_test->matching_result == 0) {
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "cached result for entry %s did NOT match %s (%s)\n",
+ slapi_sdn_get_ndn(sdn),
+ filterstr,
+ attr_evaluated);
+ filter_matched = ACL_FALSE;
+ } else {
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "cached result for entry %s did match %s (%s)\n",
+ slapi_sdn_get_ndn(sdn),
+ filterstr,
+ attr_evaluated);
+ }
+ } else {
+ /* The filter has not already been evaluated against that entry
+ * evaluate it and cache the result
+ */
+ if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
+ aci->targetFilter,
+ 0 /*don't do access check*/) != 0) {
+ filter_matched = ACL_FALSE;
+ targetfilter_cache_add(aclpb, filterstr, 0, valid_filter); /* does not match */
+ } else {
+ targetfilter_cache_add(aclpb, filterstr, 1, valid_filter); /* does match */
+ }
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "entry %s %s match %s (%s)\n",
+ slapi_sdn_get_ndn(sdn),
+ filter_matched == ACL_FALSE ? "does not" : "does",
+ filterstr,
+ attr_evaluated);
}
}
@@ -2862,7 +2988,7 @@ acl__resource_match_aci_EXIT:
* None.
*
**************************************************************************/
-static int
+int
acl__TestRights(Acl_PBlock *aclpb, int access, const char **right, const char **map_generic, aclResultReason_t *result_reason)
{
ACLEvalHandle_t *acleval;
diff --git a/ldap/servers/plugins/acl/acl.h b/ldap/servers/plugins/acl/acl.h
index 5d453d825..cf74510e7 100644
--- a/ldap/servers/plugins/acl/acl.h
+++ b/ldap/servers/plugins/acl/acl.h
@@ -407,6 +407,17 @@ struct aci_container
};
typedef struct aci_container AciContainer;
+/* This structure is stored in the aclpb.
+ * It is a linked list containing the result of
+ * the filter matching against a specific entry.
+ *
+ * This list is free for each new entry in the aclpb*/
+struct targetfilter_cached_result {
+ char *filter; /* strdup of string representation of aci->targetFilter */
+ int matching_result; /* 0 does not match / 1 does match */
+ struct targetfilter_cached_result *next; /* next targetfilter already evaluated */
+};
+
struct acl_pblock
{
int aclpb_state;
@@ -476,6 +487,8 @@ struct acl_pblock
/* Current entry/dn/attr evaluation info */
Slapi_Entry *aclpb_curr_entry; /* current Entry being processed */
+ int32_t targetfilter_cache_enabled;
+ struct targetfilter_cached_result *aclpb_curr_entry_targetfilters;
int aclpb_num_entries;
Slapi_DN *aclpb_curr_entry_sdn; /* Entry's SDN */
Slapi_DN *aclpb_authorization_sdn; /* dn used for authorization */
@@ -723,6 +736,7 @@ void acl_modified(Slapi_PBlock *pb, int optype, Slapi_DN *e_sdn, void *change);
int acl_access_allowed_disjoint_resource(Slapi_PBlock *pb, Slapi_Entry *e, char *attr, struct berval *val, int access);
int acl_access_allowed_main(Slapi_PBlock *pb, Slapi_Entry *e, char **attrs, struct berval *val, int access, int flags, char **errbuf);
+void targetfilter_cache_free(struct acl_pblock *aclpb);
int acl_access_allowed(Slapi_PBlock *pb, Slapi_Entry *e, char *attr, struct berval *val, int access);
aclUserGroup *acl_get_usersGroup(struct acl_pblock *aclpb, char *n_dn);
void acl_print_acllib_err(NSErr_t *errp, char *str);
diff --git a/ldap/servers/plugins/acl/acl_ext.c b/ldap/servers/plugins/acl/acl_ext.c
index 31c61c2f4..73dd6c39d 100644
--- a/ldap/servers/plugins/acl/acl_ext.c
+++ b/ldap/servers/plugins/acl/acl_ext.c
@@ -187,6 +187,11 @@ acl_operation_ext_constructor(void *object __attribute__((unused)), void *parent
slapi_log_err(SLAPI_LOG_ERR, plugin_name,
"acl_operation_ext_constructor - Operation extension allocation Failed\n");
}
+ /* targetfilter_cache toggle set during aclpb allocation
+ * to avoid accessing configuration during the evaluation
+ * of each aci
+ */
+ aclpb->targetfilter_cache_enabled = config_get_targetfilter_cache();
TNF_PROBE_0_DEBUG(acl_operation_ext_constructor_end, "ACL", "");
@@ -711,6 +716,7 @@ acl__free_aclpb(Acl_PBlock **aclpb_ptr)
slapi_ch_free((void **)&(aclpb->aclpb_curr_entryEval_context.acle_handles_matched_target));
slapi_ch_free((void **)&(aclpb->aclpb_prev_entryEval_context.acle_handles_matched_target));
slapi_ch_free((void **)&(aclpb->aclpb_prev_opEval_context.acle_handles_matched_target));
+ targetfilter_cache_free(aclpb);
slapi_sdn_free(&aclpb->aclpb_authorization_sdn);
slapi_sdn_free(&aclpb->aclpb_curr_entry_sdn);
if (aclpb->aclpb_macro_ht) {
@@ -919,6 +925,12 @@ acl__done_aclpb(struct acl_pblock *aclpb)
aclpb->aclpb_acleval ? (char *)aclpb->aclpb_acleval : "NULL");
}
+ /* This aclpb return to the aclpb pool, make sure
+ * the cached evaluations are freed and that
+ * aclpb_curr_entry_targetfilters is NULL
+ */
+ targetfilter_cache_free(aclpb);
+
/* Now Free the contents or clean it */
slapi_sdn_done(aclpb->aclpb_curr_entry_sdn);
if (aclpb->aclpb_Evalattr)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 91c3a4a89..89f7e5344 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -211,6 +211,7 @@ slapi_onoff_t init_return_exact_case;
slapi_onoff_t init_result_tweak;
slapi_onoff_t init_plugin_track;
slapi_onoff_t init_moddn_aci;
+slapi_onoff_t init_targetfilter_cache;
slapi_onoff_t init_lastmod;
slapi_onoff_t init_readonly;
slapi_onoff_t init_accesscontrol;
@@ -822,6 +823,11 @@ static struct config_get_and_set
(void **)&global_slapdFrontendConfig.moddn_aci,
CONFIG_ON_OFF, (ConfigGetFunc)config_get_moddn_aci,
&init_moddn_aci},
+ {CONFIG_TARGETFILTER_CACHE_ATTRIBUTE, config_set_targetfilter_cache,
+ NULL, 0,
+ (void **)&global_slapdFrontendConfig.targetfilter_cache,
+ CONFIG_ON_OFF, (ConfigGetFunc)config_get_targetfilter_cache,
+ &init_targetfilter_cache},
{CONFIG_ATTRIBUTE_NAME_EXCEPTION_ATTRIBUTE, config_set_attrname_exceptions,
NULL, 0,
(void **)&global_slapdFrontendConfig.attrname_exceptions,
@@ -1567,6 +1573,7 @@ FrontendConfig_init(void)
init_syntaxcheck = cfg->syntaxcheck = LDAP_ON;
init_plugin_track = cfg->plugin_track = LDAP_OFF;
init_moddn_aci = cfg->moddn_aci = LDAP_ON;
+ init_targetfilter_cache = cfg->targetfilter_cache = LDAP_ON;
init_syntaxlogging = cfg->syntaxlogging = LDAP_OFF;
init_dn_validate_strict = cfg->dn_validate_strict = LDAP_OFF;
init_ds4_compatible_schema = cfg->ds4_compatible_schema = LDAP_OFF;
@@ -3525,6 +3532,21 @@ config_set_moddn_aci(const char *attrname, char *value, char *errorbuf, int appl
return retVal;
}
+int32_t
+config_set_targetfilter_cache(const char *attrname, char *value, char *errorbuf, int apply)
+{
+ int32_t retVal = LDAP_SUCCESS;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ retVal = config_set_onoff(attrname,
+ value,
+ &(slapdFrontendConfig->targetfilter_cache),
+ errorbuf,
+ apply);
+
+ return retVal;
+}
+
int32_t
config_set_dynamic_plugins(const char *attrname, char *value, char *errorbuf, int apply)
{
@@ -5334,6 +5356,13 @@ config_get_moddn_aci(void)
return slapi_atomic_load_32(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE);
}
+int32_t
+config_get_targetfilter_cache(void)
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ return slapi_atomic_load_32(&(slapdFrontendConfig->targetfilter_cache), __ATOMIC_ACQUIRE);
+}
+
int32_t
config_get_security(void)
{
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index d9fb8fd08..13b41ffa4 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -262,6 +262,7 @@ int config_set_lastmod(const char *attrname, char *value, char *errorbuf, int ap
int config_set_nagle(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_accesscontrol(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_moddn_aci(const char *attrname, char *value, char *errorbuf, int apply);
+int32_t config_set_targetfilter_cache(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_security(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_readonly(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_schemacheck(const char *attrname, char *value, char *errorbuf, int apply);
@@ -450,6 +451,7 @@ int config_get_accesscontrol(void);
int config_get_return_exact_case(void);
int config_get_result_tweak(void);
int config_get_moddn_aci(void);
+int32_t config_get_targetfilter_cache(void);
int config_get_security(void);
int config_get_schemacheck(void);
int config_get_syntaxcheck(void);
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 007c50b86..b2de66754 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2157,6 +2157,7 @@ typedef struct _slapdEntryPoints
#define CONFIG_REWRITE_RFC1274_ATTRIBUTE "nsslapd-rewrite-rfc1274"
#define CONFIG_PLUGIN_BINDDN_TRACKING_ATTRIBUTE "nsslapd-plugin-binddn-tracking"
#define CONFIG_MODDN_ACI_ATTRIBUTE "nsslapd-moddn-aci"
+#define CONFIG_TARGETFILTER_CACHE_ATTRIBUTE "nsslapd-targetfilter-cache"
#define CONFIG_GLOBAL_BACKEND_LOCK "nsslapd-global-backend-lock"
#define CONFIG_ENABLE_NUNC_STANS "nsslapd-enable-nunc-stans"
#define CONFIG_CONFIG_ATTRIBUTE "nsslapd-config"
@@ -2315,6 +2316,7 @@ typedef struct _slapdFrontendConfig
char **plugin;
slapi_onoff_t plugin_track;
slapi_onoff_t moddn_aci;
+ slapi_onoff_t targetfilter_cache;
struct pw_scheme *pw_storagescheme;
slapi_onoff_t pwpolicy_local;
slapi_onoff_t pw_is_global_policy;
--
2.31.1

View file

@ -0,0 +1,327 @@
From 6a1af7917a86c0b7218d876852cd9c0d5027edeb Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 29 Apr 2021 09:29:44 +0200
Subject: [PATCH 2/2] Issue 4667 - incorrect accounting of readers in vattr
rwlock (#4732)
Bug description:
The fix #2932 (Contention on virtual attribute lookup) reduced
contention on vattr acquiring vattr lock at the operation
level rather than at the attribute level (filter and
returned attr).
The fix #2932 is invalid. it can lead to deadlock scenario
(3 threads). A vattr writer (new cos/schema) blocks
an update thread that hold DB pages and later needs vattr.
Then if a reader (holding vattr) blocks vattr writer and later
needs the same DB pages, there is a deadlock.
The decisions are:
- revert #2932 (this issue)
- Skip contention if deployement has no vattr #4678
- reduce contention with new approaches
(COW and/or cache vattr struct in each thread)
no issue opened
Fix description:
The fix reverts #2932
relates: https://github.com/389ds/389-ds-base/issues/4667
Reviewed by: William Brown, Simon Pichugin
Platforms tested: F33
---
ldap/servers/slapd/detach.c | 8 --
ldap/servers/slapd/opshared.c | 6 --
ldap/servers/slapd/proto-slap.h | 5 --
ldap/servers/slapd/vattr.c | 147 ++------------------------------
4 files changed, 8 insertions(+), 158 deletions(-)
diff --git a/ldap/servers/slapd/detach.c b/ldap/servers/slapd/detach.c
index d5c95a04f..8270b83c5 100644
--- a/ldap/servers/slapd/detach.c
+++ b/ldap/servers/slapd/detach.c
@@ -144,10 +144,6 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t *
}
break;
}
- /* The thread private counter needs to be allocated after the fork
- * it is not inherited from parent process
- */
- vattr_global_lock_create();
/* call this right after the fork, but before closing stdin */
if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) {
@@ -178,10 +174,6 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t *
g_set_detached(1);
} else { /* not detaching - call nss/ssl init */
- /* The thread private counter needs to be allocated after the fork
- * it is not inherited from parent process
- */
- vattr_global_lock_create();
if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) {
return 1;
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 05b9a1553..a1630e134 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -266,7 +266,6 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
int pr_idx = -1;
Slapi_DN *orig_sdn = NULL;
int free_sdn = 0;
- PRBool vattr_lock_acquired = PR_FALSE;
be_list[0] = NULL;
referral_list[0] = NULL;
@@ -538,8 +537,6 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
}
slapi_pblock_set(pb, SLAPI_BACKEND_COUNT, &index);
- vattr_rdlock();
- vattr_lock_acquired = PR_TRUE;
if (be) {
slapi_pblock_set(pb, SLAPI_BACKEND, be);
@@ -1007,9 +1004,6 @@ free_and_return:
} else if (be_single) {
slapi_be_Unlock(be_single);
}
- if (vattr_lock_acquired) {
- vattr_rd_unlock();
- }
free_and_return_nolock:
slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc);
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 13b41ffa4..50f1ff75a 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1415,11 +1415,6 @@ void subentry_create_filter(Slapi_Filter **filter);
* vattr.c
*/
void vattr_init(void);
-void vattr_global_lock_create(void);
-void vattr_rdlock();
-void vattr_rd_unlock();
-void vattr_wrlock();
-void vattr_wr_unlock();
void vattr_cleanup(void);
/*
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
index eef444270..b9a4ee965 100644
--- a/ldap/servers/slapd/vattr.c
+++ b/ldap/servers/slapd/vattr.c
@@ -110,7 +110,6 @@ struct _vattr_map
typedef struct _vattr_map vattr_map;
static vattr_map *the_map = NULL;
-static PRUintn thread_private_global_vattr_lock;
/* Housekeeping Functions, called by server startup/shutdown code */
@@ -125,136 +124,6 @@ vattr_init()
vattr_basic_sp_init();
#endif
}
-/*
- * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PR_NewThreadPrivateIndex
- * It is called each time:
- * - PR_SetThreadPrivate is call with a not NULL private value
- * - on thread exit
- */
-static void
-vattr_global_lock_free(void *ptr)
-{
- int *nb_acquired = ptr;
- if (nb_acquired) {
- slapi_ch_free((void **)&nb_acquired);
- }
-}
-/* Create a private variable for each individual thread of the current process */
-void
-vattr_global_lock_create()
-{
- if (PR_NewThreadPrivateIndex(&thread_private_global_vattr_lock, vattr_global_lock_free) != PR_SUCCESS) {
- slapi_log_err(SLAPI_LOG_ALERT,
- "vattr_global_lock_create", "Failure to create global lock for virtual attribute !\n");
- PR_ASSERT(0);
- }
-}
-static int
-global_vattr_lock_get_acquired_count()
-{
- int *nb_acquired;
- nb_acquired = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock);
- if (nb_acquired == NULL) {
- /* if it was not initialized set it to zero */
- nb_acquired = (int *) slapi_ch_calloc(1, sizeof(int));
- PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) nb_acquired);
- }
- return *nb_acquired;
-}
-static void
-global_vattr_lock_set_acquired_count(int nb_acquired)
-{
- int *val;
- val = (int *) PR_GetThreadPrivate(thread_private_global_vattr_lock);
- if (val == NULL) {
- /* if it was not initialized set it to zero */
- val = (int *) slapi_ch_calloc(1, sizeof(int));
- PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) val);
- }
- *val = nb_acquired;
-}
-/* The map lock can be acquired recursively. So only the first rdlock
- * will acquire the lock.
- * A optimization acquires it at high level (op_shared_search), so that
- * later calls during the operation processing will just increase/decrease a counter.
- */
-void
-vattr_rdlock()
-{
- int nb_acquire = global_vattr_lock_get_acquired_count();
-
- if (nb_acquire == 0) {
- /* The lock was not held just acquire it */
- slapi_rwlock_rdlock(the_map->lock);
- }
- nb_acquire++;
- global_vattr_lock_set_acquired_count(nb_acquire);
-
-}
-/* The map lock can be acquired recursively. So only the last unlock
- * will release the lock.
- * A optimization acquires it at high level (op_shared_search), so that
- * later calls during the operation processing will just increase/decrease a counter.
- */
-void
-vattr_rd_unlock()
-{
- int nb_acquire = global_vattr_lock_get_acquired_count();
-
- if (nb_acquire >= 1) {
- nb_acquire--;
- if (nb_acquire == 0) {
- slapi_rwlock_unlock(the_map->lock);
- }
- global_vattr_lock_set_acquired_count(nb_acquire);
- } else {
- /* this is likely the consequence of lock acquire in read during an internal search
- * but the search callback updated the map and release the readlock and acquired
- * it in write.
- * So after the update the lock was no longer held but when completing the internal
- * search we release the global read lock, that now has nothing to do
- */
- slapi_log_err(SLAPI_LOG_DEBUG,
- "vattr_rd_unlock", "vattr lock no longer acquired in read.\n");
- }
-}
-
-/* The map lock is acquired in write (updating the map)
- * It exists a possibility that lock is acquired in write while it is already
- * hold in read by this thread (internal search with updating callback)
- * In such situation, the we must abandon the read global lock and acquire in write
- */
-void
-vattr_wrlock()
-{
- int nb_read_acquire = global_vattr_lock_get_acquired_count();
-
- if (nb_read_acquire) {
- /* The lock was acquired in read but we need it in write
- * release it and set the global vattr_lock counter to 0
- */
- slapi_rwlock_unlock(the_map->lock);
- global_vattr_lock_set_acquired_count(0);
- }
- slapi_rwlock_wrlock(the_map->lock);
-}
-/* The map lock is release from a write write (updating the map)
- */
-void
-vattr_wr_unlock()
-{
- int nb_read_acquire = global_vattr_lock_get_acquired_count();
-
- if (nb_read_acquire) {
- /* The lock being acquired in write, the private thread counter
- * (that count the number of time it was acquired in read) should be 0
- */
- slapi_log_err(SLAPI_LOG_INFO,
- "vattr_unlock", "The lock was acquired in write. We should not be here\n");
- PR_ASSERT(nb_read_acquire == 0);
- }
- slapi_rwlock_unlock(the_map->lock);
-}
/* Called on server shutdown, free all structures, inform service providers that we're going down etc */
void
vattr_cleanup()
@@ -2069,11 +1938,11 @@ vattr_map_lookup(const char *type_to_find, vattr_map_entry **result)
}
/* Get the reader lock */
- vattr_rdlock();
+ slapi_rwlock_rdlock(the_map->lock);
*result = (vattr_map_entry *)PL_HashTableLookupConst(the_map->hashtable,
(void *)basetype);
/* Release ze lock */
- vattr_rd_unlock();
+ slapi_rwlock_unlock(the_map->lock);
if (tmp) {
slapi_ch_free_string(&tmp);
@@ -2092,13 +1961,13 @@ vattr_map_insert(vattr_map_entry *vae)
{
PR_ASSERT(the_map);
/* Get the writer lock */
- vattr_wrlock();
+ slapi_rwlock_wrlock(the_map->lock);
/* Insert the thing */
/* It's illegal to call this function if the entry is already there */
PR_ASSERT(NULL == PL_HashTableLookupConst(the_map->hashtable, (void *)vae->type_name));
PL_HashTableAdd(the_map->hashtable, (void *)vae->type_name, (void *)vae);
/* Unlock and we're done */
- vattr_wr_unlock();
+ slapi_rwlock_unlock(the_map->lock);
return 0;
}
@@ -2235,13 +2104,13 @@ schema_changed_callback(Slapi_Entry *e __attribute__((unused)),
void *caller_data __attribute__((unused)))
{
/* Get the writer lock */
- vattr_wrlock();
+ slapi_rwlock_wrlock(the_map->lock);
/* go through the list */
PL_HashTableEnumerateEntries(the_map->hashtable, vattr_map_entry_rebuild_schema, 0);
/* Unlock and we're done */
- vattr_wr_unlock();
+ slapi_rwlock_unlock(the_map->lock);
}
@@ -2261,7 +2130,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
objAttrValue *obj;
if (0 == vattr_map_lookup(type, &map_entry)) {
- vattr_rdlock();
+ slapi_rwlock_rdlock(the_map->lock);
obj = map_entry->objectclasses;
@@ -2278,7 +2147,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
obj = obj->pNext;
}
- vattr_rd_unlock();
+ slapi_rwlock_unlock(the_map->lock);
}
slapi_valueset_free(vs);
--
2.31.1

View file

@ -39,7 +39,7 @@
Summary: 389 Directory Server (%{variant})
Name: 389-ds-base
Version: 1.3.10.2
Release: %{?relprefix}13%{?prerel}%{?dist}
Release: %{?relprefix}14%{?prerel}%{?dist}
License: GPLv3+
URL: https://www.port389.org/
Group: System Environment/Daemons
@ -175,6 +175,8 @@ Patch26: 0026-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patc
Patch27: 0027-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch
Patch28: 0028-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
Patch29: 0029-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch
Patch30: 0030-Issue-4925-Performance-ACI-targetfilter-evaluation-r.patch
Patch31: 0031-Issue-4667-incorrect-accounting-of-readers-in-vattr-.patch
%description
@ -529,6 +531,11 @@ fi
%{_sysconfdir}/%{pkgname}/dirsrvtests
%changelog
* Fri Oct 29 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.3.10.2-14
- Bump version to 1.3.10.2-14
- Resolves: Bug 2018257 - hang because of incorrect accounting of readers in vattr rwlock
- Resolves: Bug 2010976 - IPA server (389ds) is very slow in execution of some searches (`&(memberOf=...)(objectClass=ipaHost)` in particular)
* Mon Sep 20 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.3.10.2-13
- Bump version to 1.3.10.2-13
- Resolves: Bug 2005399 - Internal unindexed searches in syncrepl
@ -539,7 +546,7 @@ fi
* Fri May 7 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.3.10.2-12
- Bump version to 1.3.10.2-12
* Mon May 5 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.3.10.2-11
* Wed May 5 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.3.10.2-11
- Bump version to 1.3.10.2-11
- Resolves: Bug 1953673 - Add new access log keywords for time spent in work queue and actual operation time
- Resolves: Bug 1931182 - information disclosure during the binding of a DN