summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug44
-rw-r--r--lib/Makefile6
-rw-r--r--lib/dec_and_lock.c8
-rw-r--r--lib/lockref.c1
-rw-r--r--lib/rhashtable.c5
-rw-r--r--lib/stackdepot.c20
-rw-r--r--lib/test_context-analysis.c598
7 files changed, 669 insertions, 13 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 34783f13843e..a0a4760bb20b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -616,6 +616,36 @@ config DEBUG_FORCE_WEAK_PER_CPU
To ensure that generic code follows the above rules, this
option forces all percpu variables to be defined as weak.
+config WARN_CONTEXT_ANALYSIS
+ bool "Compiler context-analysis warnings"
+ depends on CC_IS_CLANG && CLANG_VERSION >= 220000
+ # Branch profiling re-defines "if", which messes with the compiler's
+ # ability to analyze __cond_acquires(..), resulting in false positives.
+ depends on !TRACE_BRANCH_PROFILING
+ default y
+ help
+ Context Analysis is a language extension, which enables statically
+ checking that required contexts are active (or inactive) by acquiring
+ and releasing user-definable "context locks".
+
+ Clang's name of the feature is "Thread Safety Analysis". Requires
+ Clang 22 or later.
+
+ Produces warnings by default. Select CONFIG_WERROR if you wish to
+ turn these warnings into errors.
+
+ For more details, see Documentation/dev-tools/context-analysis.rst.
+
+config WARN_CONTEXT_ANALYSIS_ALL
+ bool "Enable context analysis for all source files"
+ depends on WARN_CONTEXT_ANALYSIS
+ depends on EXPERT && !COMPILE_TEST
+ help
+ Enable tree-wide context analysis. This is likely to produce a
+ large number of false positives - enable at your own risk.
+
+ If unsure, say N.
+
endmenu # "Compiler options"
menu "Generic Kernel Debugging Instruments"
@@ -2813,6 +2843,20 @@ config LINEAR_RANGES_TEST
If unsure, say N.
+config CONTEXT_ANALYSIS_TEST
+ bool "Compiler context-analysis warnings test"
+ depends on EXPERT
+ help
+ This builds the test for compiler-based context analysis. The test
+ does not add executable code to the kernel, but is meant to test that
+ common patterns supported by the analysis do not result in false
+ positive warnings.
+
+ When adding support for new context locks, it is strongly recommended
+ to add supported patterns to this test.
+
+ If unsure, say N.
+
config CMDLINE_KUNIT_TEST
tristate "KUnit test for cmdline API" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/Makefile b/lib/Makefile
index aaf677cf4527..22d8742bba57 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -50,6 +50,8 @@ lib-$(CONFIG_MIN_HEAP) += min_heap.o
lib-y += kobject.o klist.o
obj-y += lockref.o
+CONTEXT_ANALYSIS_rhashtable.o := y
+
obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
list_sort.o uuid.o iov_iter.o clz_ctz.o \
@@ -250,6 +252,7 @@ obj-$(CONFIG_POLYNOMIAL) += polynomial.o
# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
# file.
CFLAGS_stackdepot.o += -fno-builtin
+CONTEXT_ANALYSIS_stackdepot.o := y
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
# In particular, instrumenting stackdepot.c with KMSAN will result in infinite
@@ -331,4 +334,7 @@ obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
+CONTEXT_ANALYSIS_test_context-analysis.o := y
+obj-$(CONFIG_CONTEXT_ANALYSIS_TEST) += test_context-analysis.o
+
subdir-$(CONFIG_FORTIFY_SOURCE) += test_fortify
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 1dcca8f2e194..8c7c398fd770 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -18,7 +18,7 @@
* because the spin-lock and the decrement must be
* "atomic".
*/
-int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
@@ -32,7 +32,7 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
return 0;
}
-EXPORT_SYMBOL(_atomic_dec_and_lock);
+EXPORT_SYMBOL(atomic_dec_and_lock);
int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags)
@@ -50,7 +50,7 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
-int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
+int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
@@ -63,7 +63,7 @@ int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
raw_spin_unlock(lock);
return 0;
}
-EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
+EXPORT_SYMBOL(atomic_dec_and_raw_lock);
int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
unsigned long *flags)
diff --git a/lib/lockref.c b/lib/lockref.c
index 9210fc6ae714..5d8e3ef3860e 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -105,7 +105,6 @@ EXPORT_SYMBOL(lockref_put_return);
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
*/
-#undef lockref_put_or_lock
bool lockref_put_or_lock(struct lockref *lockref)
{
CMPXCHG_LOOP(
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index fde0f0e556f8..6074ed5f66f3 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -358,6 +358,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
static int rhashtable_rehash_alloc(struct rhashtable *ht,
struct bucket_table *old_tbl,
unsigned int size)
+ __must_hold(&ht->mutex)
{
struct bucket_table *new_tbl;
int err;
@@ -392,6 +393,7 @@ static int rhashtable_rehash_alloc(struct rhashtable *ht,
* bucket locks or concurrent RCU protected lookups and traversals.
*/
static int rhashtable_shrink(struct rhashtable *ht)
+ __must_hold(&ht->mutex)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
unsigned int nelems = atomic_read(&ht->nelems);
@@ -724,7 +726,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
* resize events and always continue.
*/
int rhashtable_walk_start_check(struct rhashtable_iter *iter)
- __acquires(RCU)
+ __acquires_shared(RCU)
{
struct rhashtable *ht = iter->ht;
bool rhlist = ht->rhlist;
@@ -940,7 +942,6 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
* hash table.
*/
void rhashtable_walk_stop(struct rhashtable_iter *iter)
- __releases(RCU)
{
struct rhashtable *ht;
struct bucket_table *tbl = iter->walker.tbl;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index de0b0025af2b..166f50ad8391 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -61,18 +61,18 @@ static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
+/* The lock must be held when performing pool or freelist modifications. */
+static DEFINE_RAW_SPINLOCK(pool_lock);
/* Array of memory regions that store stack records. */
-static void **stack_pools;
+static void **stack_pools __pt_guarded_by(&pool_lock);
/* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool;
/* Number of pools in stack_pools. */
static int pools_num;
/* Offset to the unused space in the currently used pool. */
-static size_t pool_offset = DEPOT_POOL_SIZE;
+static size_t pool_offset __guarded_by(&pool_lock) = DEPOT_POOL_SIZE;
/* Freelist of stack records within stack_pools. */
-static LIST_HEAD(free_stacks);
-/* The lock must be held when performing pool or freelist modifications. */
-static DEFINE_RAW_SPINLOCK(pool_lock);
+static __guarded_by(&pool_lock) LIST_HEAD(free_stacks);
/* Statistics counters for debugfs. */
enum depot_counter_id {
@@ -291,6 +291,7 @@ EXPORT_SYMBOL_GPL(stack_depot_init);
* Initializes new stack pool, and updates the list of pools.
*/
static bool depot_init_pool(void **prealloc)
+ __must_hold(&pool_lock)
{
lockdep_assert_held(&pool_lock);
@@ -338,6 +339,7 @@ static bool depot_init_pool(void **prealloc)
/* Keeps the preallocated memory to be used for a new stack depot pool. */
static void depot_keep_new_pool(void **prealloc)
+ __must_hold(&pool_lock)
{
lockdep_assert_held(&pool_lock);
@@ -357,6 +359,7 @@ static void depot_keep_new_pool(void **prealloc)
* the current pre-allocation.
*/
static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
+ __must_hold(&pool_lock)
{
struct stack_record *stack;
void *current_pool;
@@ -391,6 +394,7 @@ static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
/* Try to find next free usable entry from the freelist. */
static struct stack_record *depot_pop_free(void)
+ __must_hold(&pool_lock)
{
struct stack_record *stack;
@@ -428,6 +432,7 @@ static inline size_t depot_stack_record_size(struct stack_record *s, unsigned in
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
+ __must_hold(&pool_lock)
{
struct stack_record *stack = NULL;
size_t record_size;
@@ -486,6 +491,7 @@ depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, dep
}
static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
+ __must_not_hold(&pool_lock)
{
const int pools_num_cached = READ_ONCE(pools_num);
union handle_parts parts = { .handle = handle };
@@ -502,7 +508,8 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
return NULL;
}
- pool = stack_pools[pool_index];
+ /* @pool_index either valid, or user passed in corrupted value. */
+ pool = context_unsafe(stack_pools[pool_index]);
if (WARN_ON(!pool))
return NULL;
@@ -515,6 +522,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
/* Links stack into the freelist. */
static void depot_free_stack(struct stack_record *stack)
+ __must_not_hold(&pool_lock)
{
unsigned long flags;
diff --git a/lib/test_context-analysis.c b/lib/test_context-analysis.c
new file mode 100644
index 000000000000..140efa8a9763
--- /dev/null
+++ b/lib/test_context-analysis.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Compile-only tests for common patterns that should not generate false
+ * positive errors when compiled with Clang's context analysis.
+ */
+
+#include <linux/bit_spinlock.h>
+#include <linux/build_bug.h>
+#include <linux/local_lock.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/rwsem.h>
+#include <linux/seqlock.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/ww_mutex.h>
+
+/*
+ * Test that helper macros work as expected.
+ */
+static void __used test_common_helpers(void)
+{
+ BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
+ BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
+ BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
+ context_unsafe(do { } while (0)); /* works with void statements */
+}
+
+#define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op) \
+ struct test_##class##_data { \
+ type lock; \
+ int counter __guarded_by(&lock); \
+ int *pointer __pt_guarded_by(&lock); \
+ }; \
+ static void __used test_##class##_init(struct test_##class##_data *d) \
+ { \
+ guard(type_init)(&d->lock); \
+ d->counter = 0; \
+ } \
+ static void __used test_##class(struct test_##class##_data *d) \
+ { \
+ unsigned long flags; \
+ d->pointer++; \
+ type_lock(&d->lock); \
+ op(d->counter); \
+ op(*d->pointer); \
+ type_unlock(&d->lock); \
+ type_lock##_irq(&d->lock); \
+ op(d->counter); \
+ op(*d->pointer); \
+ type_unlock##_irq(&d->lock); \
+ type_lock##_bh(&d->lock); \
+ op(d->counter); \
+ op(*d->pointer); \
+ type_unlock##_bh(&d->lock); \
+ type_lock##_irqsave(&d->lock, flags); \
+ op(d->counter); \
+ op(*d->pointer); \
+ type_unlock##_irqrestore(&d->lock, flags); \
+ } \
+ static void __used test_##class##_trylock(struct test_##class##_data *d) \
+ { \
+ if (type_trylock(&d->lock)) { \
+ op(d->counter); \
+ type_unlock(&d->lock); \
+ } \
+ } \
+ static void __used test_##class##_assert(struct test_##class##_data *d) \
+ { \
+ lockdep_assert_held(&d->lock); \
+ op(d->counter); \
+ } \
+ static void __used test_##class##_guard(struct test_##class##_data *d) \
+ { \
+ { guard(class)(&d->lock); op(d->counter); } \
+ { guard(class##_irq)(&d->lock); op(d->counter); } \
+ { guard(class##_irqsave)(&d->lock); op(d->counter); } \
+ }
+
+#define TEST_OP_RW(x) (x)++
+#define TEST_OP_RO(x) ((void)(x))
+
+TEST_SPINLOCK_COMMON(raw_spinlock,
+ raw_spinlock_t,
+ raw_spinlock_init,
+ raw_spin_lock,
+ raw_spin_unlock,
+ raw_spin_trylock,
+ TEST_OP_RW);
+static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
+{
+ unsigned long flags;
+
+ data_race(d->counter++); /* no warning */
+
+ if (raw_spin_trylock_irq(&d->lock)) {
+ d->counter++;
+ raw_spin_unlock_irq(&d->lock);
+ }
+ if (raw_spin_trylock_irqsave(&d->lock, flags)) {
+ d->counter++;
+ raw_spin_unlock_irqrestore(&d->lock, flags);
+ }
+ scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
+ d->counter++;
+ }
+}
+
+TEST_SPINLOCK_COMMON(spinlock,
+ spinlock_t,
+ spinlock_init,
+ spin_lock,
+ spin_unlock,
+ spin_trylock,
+ TEST_OP_RW);
+static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
+{
+ unsigned long flags;
+
+ if (spin_trylock_irq(&d->lock)) {
+ d->counter++;
+ spin_unlock_irq(&d->lock);
+ }
+ if (spin_trylock_irqsave(&d->lock, flags)) {
+ d->counter++;
+ spin_unlock_irqrestore(&d->lock, flags);
+ }
+ scoped_cond_guard(spinlock_try, return, &d->lock) {
+ d->counter++;
+ }
+}
+
+TEST_SPINLOCK_COMMON(write_lock,
+ rwlock_t,
+ rwlock_init,
+ write_lock,
+ write_unlock,
+ write_trylock,
+ TEST_OP_RW);
+static void __used test_write_trylock_extra(struct test_write_lock_data *d)
+{
+ unsigned long flags;
+
+ if (write_trylock_irqsave(&d->lock, flags)) {
+ d->counter++;
+ write_unlock_irqrestore(&d->lock, flags);
+ }
+}
+
+TEST_SPINLOCK_COMMON(read_lock,
+ rwlock_t,
+ rwlock_init,
+ read_lock,
+ read_unlock,
+ read_trylock,
+ TEST_OP_RO);
+
+struct test_mutex_data {
+ struct mutex mtx;
+ int counter __guarded_by(&mtx);
+};
+
+static void __used test_mutex_init(struct test_mutex_data *d)
+{
+ guard(mutex_init)(&d->mtx);
+ d->counter = 0;
+}
+
+static void __used test_mutex_lock(struct test_mutex_data *d)
+{
+ mutex_lock(&d->mtx);
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ mutex_lock_io(&d->mtx);
+ d->counter++;
+ mutex_unlock(&d->mtx);
+}
+
+static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
+{
+ if (!mutex_lock_interruptible(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (!mutex_lock_killable(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (mutex_trylock(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+}
+
+static void __used test_mutex_assert(struct test_mutex_data *d)
+{
+ lockdep_assert_held(&d->mtx);
+ d->counter++;
+}
+
+static void __used test_mutex_guard(struct test_mutex_data *d)
+{
+ guard(mutex)(&d->mtx);
+ d->counter++;
+}
+
+static void __used test_mutex_cond_guard(struct test_mutex_data *d)
+{
+ scoped_cond_guard(mutex_try, return, &d->mtx) {
+ d->counter++;
+ }
+ scoped_cond_guard(mutex_intr, return, &d->mtx) {
+ d->counter++;
+ }
+}
+
+struct test_seqlock_data {
+ seqlock_t sl;
+ int counter __guarded_by(&sl);
+};
+
+static void __used test_seqlock_init(struct test_seqlock_data *d)
+{
+ guard(seqlock_init)(&d->sl);
+ d->counter = 0;
+}
+
+static void __used test_seqlock_reader(struct test_seqlock_data *d)
+{
+ unsigned int seq;
+
+ do {
+ seq = read_seqbegin(&d->sl);
+ (void)d->counter;
+ } while (read_seqretry(&d->sl, seq));
+}
+
+static void __used test_seqlock_writer(struct test_seqlock_data *d)
+{
+ unsigned long flags;
+
+ write_seqlock(&d->sl);
+ d->counter++;
+ write_sequnlock(&d->sl);
+
+ write_seqlock_irq(&d->sl);
+ d->counter++;
+ write_sequnlock_irq(&d->sl);
+
+ write_seqlock_bh(&d->sl);
+ d->counter++;
+ write_sequnlock_bh(&d->sl);
+
+ write_seqlock_irqsave(&d->sl, flags);
+ d->counter++;
+ write_sequnlock_irqrestore(&d->sl, flags);
+}
+
+static void __used test_seqlock_scoped(struct test_seqlock_data *d)
+{
+ scoped_seqlock_read (&d->sl, ss_lockless) {
+ (void)d->counter;
+ }
+}
+
+struct test_rwsem_data {
+ struct rw_semaphore sem;
+ int counter __guarded_by(&sem);
+};
+
+static void __used test_rwsem_init(struct test_rwsem_data *d)
+{
+ guard(rwsem_init)(&d->sem);
+ d->counter = 0;
+}
+
+static void __used test_rwsem_reader(struct test_rwsem_data *d)
+{
+ down_read(&d->sem);
+ (void)d->counter;
+ up_read(&d->sem);
+
+ if (down_read_trylock(&d->sem)) {
+ (void)d->counter;
+ up_read(&d->sem);
+ }
+}
+
+static void __used test_rwsem_writer(struct test_rwsem_data *d)
+{
+ down_write(&d->sem);
+ d->counter++;
+ up_write(&d->sem);
+
+ down_write(&d->sem);
+ d->counter++;
+ downgrade_write(&d->sem);
+ (void)d->counter;
+ up_read(&d->sem);
+
+ if (down_write_trylock(&d->sem)) {
+ d->counter++;
+ up_write(&d->sem);
+ }
+}
+
+static void __used test_rwsem_assert(struct test_rwsem_data *d)
+{
+ rwsem_assert_held_nolockdep(&d->sem);
+ d->counter++;
+}
+
+static void __used test_rwsem_guard(struct test_rwsem_data *d)
+{
+ { guard(rwsem_read)(&d->sem); (void)d->counter; }
+ { guard(rwsem_write)(&d->sem); d->counter++; }
+}
+
+static void __used test_rwsem_cond_guard(struct test_rwsem_data *d)
+{
+ scoped_cond_guard(rwsem_read_try, return, &d->sem) {
+ (void)d->counter;
+ }
+ scoped_cond_guard(rwsem_write_try, return, &d->sem) {
+ d->counter++;
+ }
+}
+
+struct test_bit_spinlock_data {
+ unsigned long bits;
+ int counter __guarded_by(__bitlock(3, &bits));
+};
+
+static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
+{
+ /*
+ * Note, the analysis seems to have false negatives, because it won't
+ * precisely recognize the bit of the fake __bitlock() token.
+ */
+ bit_spin_lock(3, &d->bits);
+ d->counter++;
+ bit_spin_unlock(3, &d->bits);
+
+ bit_spin_lock(3, &d->bits);
+ d->counter++;
+ __bit_spin_unlock(3, &d->bits);
+
+ if (bit_spin_trylock(3, &d->bits)) {
+ d->counter++;
+ bit_spin_unlock(3, &d->bits);
+ }
+}
+
+/*
+ * Test that we can mark a variable guarded by RCU, and we can dereference and
+ * write to the pointer with RCU's primitives.
+ */
+struct test_rcu_data {
+ long __rcu_guarded *data;
+};
+
+static void __used test_rcu_guarded_reader(struct test_rcu_data *d)
+{
+ rcu_read_lock();
+ (void)rcu_dereference(d->data);
+ rcu_read_unlock();
+
+ rcu_read_lock_bh();
+ (void)rcu_dereference(d->data);
+ rcu_read_unlock_bh();
+
+ rcu_read_lock_sched();
+ (void)rcu_dereference(d->data);
+ rcu_read_unlock_sched();
+}
+
+static void __used test_rcu_guard(struct test_rcu_data *d)
+{
+ guard(rcu)();
+ (void)rcu_dereference(d->data);
+}
+
+static void __used test_rcu_guarded_updater(struct test_rcu_data *d)
+{
+ rcu_assign_pointer(d->data, NULL);
+ RCU_INIT_POINTER(d->data, NULL);
+ (void)unrcu_pointer(d->data);
+}
+
+static void wants_rcu_held(void) __must_hold_shared(RCU) { }
+static void wants_rcu_held_bh(void) __must_hold_shared(RCU_BH) { }
+static void wants_rcu_held_sched(void) __must_hold_shared(RCU_SCHED) { }
+
+static void __used test_rcu_lock_variants(void)
+{
+ rcu_read_lock();
+ wants_rcu_held();
+ rcu_read_unlock();
+
+ rcu_read_lock_bh();
+ wants_rcu_held_bh();
+ rcu_read_unlock_bh();
+
+ rcu_read_lock_sched();
+ wants_rcu_held_sched();
+ rcu_read_unlock_sched();
+}
+
+static void __used test_rcu_lock_reentrant(void)
+{
+ rcu_read_lock();
+ rcu_read_lock();
+ rcu_read_lock_bh();
+ rcu_read_lock_bh();
+ rcu_read_lock_sched();
+ rcu_read_lock_sched();
+
+ rcu_read_unlock_sched();
+ rcu_read_unlock_sched();
+ rcu_read_unlock_bh();
+ rcu_read_unlock_bh();
+ rcu_read_unlock();
+ rcu_read_unlock();
+}
+
+static void __used test_rcu_assert_variants(void)
+{
+ lockdep_assert_in_rcu_read_lock();
+ wants_rcu_held();
+
+ lockdep_assert_in_rcu_read_lock_bh();
+ wants_rcu_held_bh();
+
+ lockdep_assert_in_rcu_read_lock_sched();
+ wants_rcu_held_sched();
+}
+
+struct test_srcu_data {
+ struct srcu_struct srcu;
+ long __rcu_guarded *data;
+};
+
+static void __used test_srcu(struct test_srcu_data *d)
+{
+ init_srcu_struct(&d->srcu);
+
+ int idx = srcu_read_lock(&d->srcu);
+ long *data = srcu_dereference(d->data, &d->srcu);
+ (void)data;
+ srcu_read_unlock(&d->srcu, idx);
+
+ rcu_assign_pointer(d->data, NULL);
+}
+
+static void __used test_srcu_guard(struct test_srcu_data *d)
+{
+ { guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
+ { guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
+ { guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
+}
+
+struct test_local_lock_data {
+ local_lock_t lock;
+ int counter __guarded_by(&lock);
+};
+
+static DEFINE_PER_CPU(struct test_local_lock_data, test_local_lock_data) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
+
+static void __used test_local_lock_init(struct test_local_lock_data *d)
+{
+ guard(local_lock_init)(&d->lock);
+ d->counter = 0;
+}
+
+static void __used test_local_lock(void)
+{
+ unsigned long flags;
+
+ local_lock(&test_local_lock_data.lock);
+ this_cpu_add(test_local_lock_data.counter, 1);
+ local_unlock(&test_local_lock_data.lock);
+
+ local_lock_irq(&test_local_lock_data.lock);
+ this_cpu_add(test_local_lock_data.counter, 1);
+ local_unlock_irq(&test_local_lock_data.lock);
+
+ local_lock_irqsave(&test_local_lock_data.lock, flags);
+ this_cpu_add(test_local_lock_data.counter, 1);
+ local_unlock_irqrestore(&test_local_lock_data.lock, flags);
+
+ local_lock_nested_bh(&test_local_lock_data.lock);
+ this_cpu_add(test_local_lock_data.counter, 1);
+ local_unlock_nested_bh(&test_local_lock_data.lock);
+}
+
+static void __used test_local_lock_guard(void)
+{
+ { guard(local_lock)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
+ { guard(local_lock_irq)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
+ { guard(local_lock_irqsave)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
+ { guard(local_lock_nested_bh)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
+}
+
+struct test_local_trylock_data {
+ local_trylock_t lock;
+ int counter __guarded_by(&lock);
+};
+
+static DEFINE_PER_CPU(struct test_local_trylock_data, test_local_trylock_data) = {
+ .lock = INIT_LOCAL_TRYLOCK(lock),
+};
+
+static void __used test_local_trylock_init(struct test_local_trylock_data *d)
+{
+ guard(local_trylock_init)(&d->lock);
+ d->counter = 0;
+}
+
+static void __used test_local_trylock(void)
+{
+ local_lock(&test_local_trylock_data.lock);
+ this_cpu_add(test_local_trylock_data.counter, 1);
+ local_unlock(&test_local_trylock_data.lock);
+
+ if (local_trylock(&test_local_trylock_data.lock)) {
+ this_cpu_add(test_local_trylock_data.counter, 1);
+ local_unlock(&test_local_trylock_data.lock);
+ }
+}
+
+static DEFINE_WD_CLASS(ww_class);
+
+struct test_ww_mutex_data {
+ struct ww_mutex mtx;
+ int counter __guarded_by(&mtx);
+};
+
+static void __used test_ww_mutex_lock_noctx(struct test_ww_mutex_data *d)
+{
+ if (!ww_mutex_lock(&d->mtx, NULL)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ if (!ww_mutex_lock_interruptible(&d->mtx, NULL)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ if (ww_mutex_trylock(&d->mtx, NULL)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ ww_mutex_lock_slow(&d->mtx, NULL);
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+
+ ww_mutex_destroy(&d->mtx);
+}
+
+static void __used test_ww_mutex_lock_ctx(struct test_ww_mutex_data *d)
+{
+ struct ww_acquire_ctx ctx;
+
+ ww_acquire_init(&ctx, &ww_class);
+
+ if (!ww_mutex_lock(&d->mtx, &ctx)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ if (!ww_mutex_lock_interruptible(&d->mtx, &ctx)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ if (ww_mutex_trylock(&d->mtx, &ctx)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ ww_mutex_lock_slow(&d->mtx, &ctx);
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+
+ ww_acquire_done(&ctx);
+ ww_acquire_fini(&ctx);
+
+ ww_mutex_destroy(&d->mtx);
+}