From 222f885df38c3abd34ee239a721654155609631b Mon Sep 17 00:00:00 2001 From: AlexeiFedorov Date: Wed, 13 Mar 2024 12:16:51 +0000 Subject: [PATCH] feat(locks): add bitlock This patch adds 'bitlock_t' type and bit_lock() and bit_unlock() to support locking/release functionality based on individual bit position. These functions use atomic bit set and clear instructions which require FEAT_LSE mandatory from Armv8.1. Change-Id: I3eb0f29bbccefe6c0f69061aa701187a6364df0c Signed-off-by: AlexeiFedorov --- include/lib/spinlock.h | 12 +++++-- lib/locks/exclusive/aarch64/spinlock.S | 44 +++++++++++++++++++++++++- 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/include/lib/spinlock.h b/include/lib/spinlock.h index 9fd3fc65f..055a911d0 100644 --- a/include/lib/spinlock.h +++ b/include/lib/spinlock.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -15,15 +15,21 @@ typedef struct spinlock { volatile uint32_t lock; } spinlock_t; +typedef struct bitlock { + volatile uint8_t lock; +} bitlock_t; + void spin_lock(spinlock_t *lock); void spin_unlock(spinlock_t *lock); +void bit_lock(bitlock_t *lock, uint8_t mask); +void bit_unlock(bitlock_t *lock, uint8_t mask); + #else /* Spin lock definitions for use in assembly */ #define SPINLOCK_ASM_ALIGN 2 #define SPINLOCK_ASM_SIZE 4 -#endif - +#endif /* __ASSEMBLER__ */ #endif /* SPINLOCK_H */ diff --git a/lib/locks/exclusive/aarch64/spinlock.S b/lib/locks/exclusive/aarch64/spinlock.S index 5144bf7a9..77bb7fe45 100644 --- a/lib/locks/exclusive/aarch64/spinlock.S +++ b/lib/locks/exclusive/aarch64/spinlock.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -8,6 +8,8 @@ .globl spin_lock .globl spin_unlock + .globl bit_lock + .globl bit_unlock #if USE_SPINLOCK_CAS #if !ARM_ARCH_AT_LEAST(8, 1) @@ -73,3 +75,43 @@ func spin_unlock stlr wzr, [x0] ret endfunc spin_unlock + +/* + * Atomic bit clear and set instructions require FEAT_LSE which is + * mandatory from Armv8.1. + */ +#if ARM_ARCH_AT_LEAST(8, 1) + +/* + * Acquire bitlock using atomic bit set on byte. If the original read value + * has the bit set, use load exclusive semantics to monitor the address and + * enter WFE. + * + * void bit_lock(bitlock_t *lock, uint8_t mask); + */ +func bit_lock +1: ldsetab w1, w2, [x0] + tst w2, w1 + b.eq 2f + ldxrb w2, [x0] + tst w2, w1 + b.eq 1b + wfe + b 1b +2: + ret +endfunc bit_lock + +/* + * Use atomic bit clear store-release to unconditionally clear bitlock variable. + * Store operation generates an event to all cores waiting in WFE when address + * is monitored by the global monitor. + * + * void bit_unlock(bitlock_t *lock, uint8_t mask); + */ +func bit_unlock + stclrlb w1, [x0] + ret +endfunc bit_unlock + +#endif /* ARM_ARCH_AT_LEAST(8, 1) */