Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * KCSAN test with various race scenarious to test runtime behaviour. Since the
   4 * interface with which KCSAN's reports are obtained is via the console, this is
   5 * the output we should verify. For each test case checks the presence (or
   6 * absence) of generated reports. Relies on 'console' tracepoint to capture
   7 * reports as they appear in the kernel log.
   8 *
   9 * Makes use of KUnit for test organization, and the Torture framework for test
  10 * thread control.
  11 *
  12 * Copyright (C) 2020, Google LLC.
  13 * Author: Marco Elver <elver@google.com>
  14 */
  15
  16#define pr_fmt(fmt) "kcsan_test: " fmt
  17
  18#include <kunit/test.h>
  19#include <linux/atomic.h>
  20#include <linux/bitops.h>
  21#include <linux/jiffies.h>
  22#include <linux/kcsan-checks.h>
  23#include <linux/kernel.h>
  24#include <linux/mutex.h>
  25#include <linux/sched.h>
  26#include <linux/seqlock.h>
  27#include <linux/spinlock.h>
  28#include <linux/string.h>
  29#include <linux/timer.h>
  30#include <linux/torture.h>
  31#include <linux/tracepoint.h>
  32#include <linux/types.h>
  33#include <trace/events/printk.h>
  34
  35#define KCSAN_TEST_REQUIRES(test, cond) do {			\
  36	if (!(cond))						\
  37		kunit_skip((test), "Test requires: " #cond);	\
  38} while (0)
  39
  40#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
  41#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
  42#else
  43#define __KCSAN_ACCESS_RW(alt) (alt)
  44#endif
  45
  46/* Points to current test-case memory access "kernels". */
  47static void (*access_kernels[2])(void);
  48
  49static struct task_struct **threads; /* Lists of threads. */
  50static unsigned long end_time;       /* End time of test. */
  51
  52/* Report as observed from console. */
  53static struct {
  54	spinlock_t lock;
  55	int nlines;
  56	char lines[3][512];
  57} observed = {
  58	.lock = __SPIN_LOCK_UNLOCKED(observed.lock),
  59};
  60
  61/* Setup test checking loop. */
  62static __no_kcsan inline void
  63begin_test_checks(void (*func1)(void), void (*func2)(void))
  64{
  65	kcsan_disable_current();
  66
  67	/*
  68	 * Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at
  69	 * least one race is reported.
  70	 */
  71	end_time = jiffies + msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS + 500);
  72
  73	/* Signal start; release potential initialization of shared data. */
  74	smp_store_release(&access_kernels[0], func1);
  75	smp_store_release(&access_kernels[1], func2);
  76}
  77
  78/* End test checking loop. */
  79static __no_kcsan inline bool
  80end_test_checks(bool stop)
  81{
  82	if (!stop && time_before(jiffies, end_time)) {
  83		/* Continue checking */
  84		might_sleep();
  85		return false;
  86	}
  87
  88	kcsan_enable_current();
  89	return true;
  90}
  91
  92/*
  93 * Probe for console output: checks if a race was reported, and obtains observed
  94 * lines of interest.
  95 */
  96__no_kcsan
  97static void probe_console(void *ignore, const char *buf, size_t len)
  98{
  99	unsigned long flags;
 100	int nlines;
 101
 102	/*
 103	 * Note that KCSAN reports under a global lock, so we do not risk the
 104	 * possibility of having multiple reports interleaved. If that were the
 105	 * case, we'd expect tests to fail.
 106	 */
 107
 108	spin_lock_irqsave(&observed.lock, flags);
 109	nlines = observed.nlines;
 110
 111	if (strnstr(buf, "BUG: KCSAN: ", len) && strnstr(buf, "test_", len)) {
 112		/*
 113		 * KCSAN report and related to the test.
 114		 *
 115		 * The provided @buf is not NUL-terminated; copy no more than
 116		 * @len bytes and let strscpy() add the missing NUL-terminator.
 117		 */
 118		strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
 119		nlines = 1;
 120	} else if ((nlines == 1 || nlines == 2) && strnstr(buf, "bytes by", len)) {
 121		strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
 122
 123		if (strnstr(buf, "race at unknown origin", len)) {
 124			if (WARN_ON(nlines != 2))
 125				goto out;
 126
 127			/* No second line of interest. */
 128			strcpy(observed.lines[nlines++], "<none>");
 129		}
 130	}
 131
 132out:
 133	WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
 134	spin_unlock_irqrestore(&observed.lock, flags);
 135}
 136
 137/* Check if a report related to the test exists. */
 138__no_kcsan
 139static bool report_available(void)
 140{
 141	return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
 142}
 143
 144/* Report information we expect in a report. */
 145struct expect_report {
 146	/* Access information of both accesses. */
 147	struct {
 148		void *fn;    /* Function pointer to expected function of top frame. */
 149		void *addr;  /* Address of access; unchecked if NULL. */
 150		size_t size; /* Size of access; unchecked if @addr is NULL. */
 151		int type;    /* Access type, see KCSAN_ACCESS definitions. */
 152	} access[2];
 153};
 154
 155/* Check observed report matches information in @r. */
 156__no_kcsan
 157static bool __report_matches(const struct expect_report *r)
 158{
 159	const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
 160	bool ret = false;
 161	unsigned long flags;
 162	typeof(*observed.lines) *expect;
 163	const char *end;
 164	char *cur;
 165	int i;
 166
 167	/* Doubled-checked locking. */
 168	if (!report_available())
 169		return false;
 170
 171	expect = kmalloc(sizeof(observed.lines), GFP_KERNEL);
 172	if (WARN_ON(!expect))
 173		return false;
 174
 175	/* Generate expected report contents. */
 176
 177	/* Title */
 178	cur = expect[0];
 179	end = &expect[0][sizeof(expect[0]) - 1];
 180	cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ",
 181			 is_assert ? "assert: race" : "data-race");
 182	if (r->access[1].fn) {
 183		char tmp[2][64];
 184		int cmp;
 185
 186		/* Expect lexographically sorted function names in title. */
 187		scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn);
 188		scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn);
 189		cmp = strcmp(tmp[0], tmp[1]);
 190		cur += scnprintf(cur, end - cur, "%ps / %ps",
 191				 cmp < 0 ? r->access[0].fn : r->access[1].fn,
 192				 cmp < 0 ? r->access[1].fn : r->access[0].fn);
 193	} else {
 194		scnprintf(cur, end - cur, "%pS", r->access[0].fn);
 195		/* The exact offset won't match, remove it. */
 196		cur = strchr(expect[0], '+');
 197		if (cur)
 198			*cur = '\0';
 199	}
 200
 201	/* Access 1 */
 202	cur = expect[1];
 203	end = &expect[1][sizeof(expect[1]) - 1];
 204	if (!r->access[1].fn)
 205		cur += scnprintf(cur, end - cur, "race at unknown origin, with ");
 206
 207	/* Access 1 & 2 */
 208	for (i = 0; i < 2; ++i) {
 209		const int ty = r->access[i].type;
 210		const char *const access_type =
 211			(ty & KCSAN_ACCESS_ASSERT) ?
 212				      ((ty & KCSAN_ACCESS_WRITE) ?
 213					       "assert no accesses" :
 214					       "assert no writes") :
 215				      ((ty & KCSAN_ACCESS_WRITE) ?
 216					       ((ty & KCSAN_ACCESS_COMPOUND) ?
 217							"read-write" :
 218							"write") :
 219					       "read");
 220		const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC);
 221		const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED);
 222		const char *const access_type_aux =
 223				(is_atomic && is_scoped)	? " (marked, reordered)"
 224				: (is_atomic			? " (marked)"
 225				   : (is_scoped			? " (reordered)" : ""));
 226
 227		if (i == 1) {
 228			/* Access 2 */
 229			cur = expect[2];
 230			end = &expect[2][sizeof(expect[2]) - 1];
 231
 232			if (!r->access[1].fn) {
 233				/* Dummy string if no second access is available. */
 234				strcpy(cur, "<none>");
 235				break;
 236			}
 237		}
 238
 239		cur += scnprintf(cur, end - cur, "%s%s to ", access_type,
 240				 access_type_aux);
 241
 242		if (r->access[i].addr) /* Address is optional. */
 243			cur += scnprintf(cur, end - cur, "0x%px of %zu bytes",
 244					 r->access[i].addr, r->access[i].size);
 245	}
 246
 247	spin_lock_irqsave(&observed.lock, flags);
 248	if (!report_available())
 249		goto out; /* A new report is being captured. */
 250
 251	/* Finally match expected output to what we actually observed. */
 252	ret = strstr(observed.lines[0], expect[0]) &&
 253	      /* Access info may appear in any order. */
 254	      ((strstr(observed.lines[1], expect[1]) &&
 255		strstr(observed.lines[2], expect[2])) ||
 256	       (strstr(observed.lines[1], expect[2]) &&
 257		strstr(observed.lines[2], expect[1])));
 258out:
 259	spin_unlock_irqrestore(&observed.lock, flags);
 260	kfree(expect);
 261	return ret;
 262}
 263
 264static __always_inline const struct expect_report *
 265__report_set_scoped(struct expect_report *r, int accesses)
 266{
 267	BUILD_BUG_ON(accesses > 3);
 268
 269	if (accesses & 1)
 270		r->access[0].type |= KCSAN_ACCESS_SCOPED;
 271	else
 272		r->access[0].type &= ~KCSAN_ACCESS_SCOPED;
 273
 274	if (accesses & 2)
 275		r->access[1].type |= KCSAN_ACCESS_SCOPED;
 276	else
 277		r->access[1].type &= ~KCSAN_ACCESS_SCOPED;
 278
 279	return r;
 280}
 281
 282__no_kcsan
 283static bool report_matches_any_reordered(struct expect_report *r)
 284{
 285	return __report_matches(__report_set_scoped(r, 0)) ||
 286	       __report_matches(__report_set_scoped(r, 1)) ||
 287	       __report_matches(__report_set_scoped(r, 2)) ||
 288	       __report_matches(__report_set_scoped(r, 3));
 289}
 290
 291#ifdef CONFIG_KCSAN_WEAK_MEMORY
 292/* Due to reordering accesses, any access may appear as "(reordered)". */
 293#define report_matches report_matches_any_reordered
 294#else
 295#define report_matches __report_matches
 296#endif
 297
 298/* ===== Test kernels ===== */
 299
 300static long test_sink;
 301static long test_var;
 302/* @test_array should be large enough to fall into multiple watchpoint slots. */
 303static long test_array[3 * PAGE_SIZE / sizeof(long)];
 304static struct {
 305	long val[8];
 306} test_struct;
 307static DEFINE_SEQLOCK(test_seqlock);
 308static DEFINE_SPINLOCK(test_spinlock);
 309static DEFINE_MUTEX(test_mutex);
 310
 311/*
 312 * Helper to avoid compiler optimizing out reads, and to generate source values
 313 * for writes.
 314 */
 315__no_kcsan
 316static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); }
 317
 318/*
 319 * Generates a delay and some accesses that enter the runtime but do not produce
 320 * data races.
 321 */
 322static noinline void test_delay(int iter)
 323{
 324	while (iter--)
 325		sink_value(READ_ONCE(test_sink));
 326}
 327
 328static noinline void test_kernel_read(void) { sink_value(test_var); }
 329
 330static noinline void test_kernel_write(void)
 331{
 332	test_var = READ_ONCE_NOCHECK(test_sink) + 1;
 333}
 334
 335static noinline void test_kernel_write_nochange(void) { test_var = 42; }
 336
 337/* Suffixed by value-change exception filter. */
 338static noinline void test_kernel_write_nochange_rcu(void) { test_var = 42; }
 339
 340static noinline void test_kernel_read_atomic(void)
 341{
 342	sink_value(READ_ONCE(test_var));
 343}
 344
 345static noinline void test_kernel_write_atomic(void)
 346{
 347	WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1);
 348}
 349
 350static noinline void test_kernel_atomic_rmw(void)
 351{
 352	/* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */
 353	__atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED);
 354}
 355
 356__no_kcsan
 357static noinline void test_kernel_write_uninstrumented(void) { test_var++; }
 358
 359static noinline void test_kernel_data_race(void) { data_race(test_var++); }
 360
 361static noinline void test_kernel_assert_writer(void)
 362{
 363	ASSERT_EXCLUSIVE_WRITER(test_var);
 364}
 365
 366static noinline void test_kernel_assert_access(void)
 367{
 368	ASSERT_EXCLUSIVE_ACCESS(test_var);
 369}
 370
 371#define TEST_CHANGE_BITS 0xff00ff00
 372
 373static noinline void test_kernel_change_bits(void)
 374{
 375	if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
 376		/*
 377		 * Avoid race of unknown origin for this test, just pretend they
 378		 * are atomic.
 379		 */
 380		kcsan_nestable_atomic_begin();
 381		test_var ^= TEST_CHANGE_BITS;
 382		kcsan_nestable_atomic_end();
 383	} else
 384		WRITE_ONCE(test_var, READ_ONCE(test_var) ^ TEST_CHANGE_BITS);
 385}
 386
 387static noinline void test_kernel_assert_bits_change(void)
 388{
 389	ASSERT_EXCLUSIVE_BITS(test_var, TEST_CHANGE_BITS);
 390}
 391
 392static noinline void test_kernel_assert_bits_nochange(void)
 393{
 394	ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS);
 395}
 396
 397/*
 398 * Scoped assertions do trigger anywhere in scope. However, the report should
 399 * still only point at the start of the scope.
 400 */
 401static noinline void test_enter_scope(void)
 402{
 403	int x = 0;
 404
 405	/* Unrelated accesses to scoped assert. */
 406	READ_ONCE(test_sink);
 407	kcsan_check_read(&x, sizeof(x));
 408}
 409
 410static noinline void test_kernel_assert_writer_scoped(void)
 411{
 412	ASSERT_EXCLUSIVE_WRITER_SCOPED(test_var);
 413	test_enter_scope();
 414}
 415
 416static noinline void test_kernel_assert_access_scoped(void)
 417{
 418	ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_var);
 419	test_enter_scope();
 420}
 421
 422static noinline void test_kernel_rmw_array(void)
 423{
 424	int i;
 425
 426	for (i = 0; i < ARRAY_SIZE(test_array); ++i)
 427		test_array[i]++;
 428}
 429
 430static noinline void test_kernel_write_struct(void)
 431{
 432	kcsan_check_write(&test_struct, sizeof(test_struct));
 433	kcsan_disable_current();
 434	test_struct.val[3]++; /* induce value change */
 435	kcsan_enable_current();
 436}
 437
 438static noinline void test_kernel_write_struct_part(void)
 439{
 440	test_struct.val[3] = 42;
 441}
 442
 443static noinline void test_kernel_read_struct_zero_size(void)
 444{
 445	kcsan_check_read(&test_struct.val[3], 0);
 446}
 447
 448static noinline void test_kernel_jiffies_reader(void)
 449{
 450	sink_value((long)jiffies);
 451}
 452
 453static noinline void test_kernel_seqlock_reader(void)
 454{
 455	unsigned int seq;
 456
 457	do {
 458		seq = read_seqbegin(&test_seqlock);
 459		sink_value(test_var);
 460	} while (read_seqretry(&test_seqlock, seq));
 461}
 462
 463static noinline void test_kernel_seqlock_writer(void)
 464{
 465	unsigned long flags;
 466
 467	write_seqlock_irqsave(&test_seqlock, flags);
 468	test_var++;
 469	write_sequnlock_irqrestore(&test_seqlock, flags);
 470}
 471
 472static noinline void test_kernel_atomic_builtins(void)
 473{
 474	/*
 475	 * Generate concurrent accesses, expecting no reports, ensuring KCSAN
 476	 * treats builtin atomics as actually atomic.
 477	 */
 478	__atomic_load_n(&test_var, __ATOMIC_RELAXED);
 479}
 480
 481static noinline void test_kernel_xor_1bit(void)
 482{
 483	/* Do not report data races between the read-writes. */
 484	kcsan_nestable_atomic_begin();
 485	test_var ^= 0x10000;
 486	kcsan_nestable_atomic_end();
 487}
 488
 489#define TEST_KERNEL_LOCKED(name, acquire, release)		\
 490	static noinline void test_kernel_##name(void)		\
 491	{							\
 492		long *flag = &test_struct.val[0];		\
 493		long v = 0;					\
 494		if (!(acquire))					\
 495			return;					\
 496		while (v++ < 100) {				\
 497			test_var++;				\
 498			barrier();				\
 499		}						\
 500		release;					\
 501		test_delay(10);					\
 502	}
 503
 504TEST_KERNEL_LOCKED(with_memorder,
 505		   cmpxchg_acquire(flag, 0, 1) == 0,
 506		   smp_store_release(flag, 0));
 507TEST_KERNEL_LOCKED(wrong_memorder,
 508		   cmpxchg_relaxed(flag, 0, 1) == 0,
 509		   WRITE_ONCE(*flag, 0));
 510TEST_KERNEL_LOCKED(atomic_builtin_with_memorder,
 511		   __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED),
 512		   __atomic_store_n(flag, 0, __ATOMIC_RELEASE));
 513TEST_KERNEL_LOCKED(atomic_builtin_wrong_memorder,
 514		   __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED),
 515		   __atomic_store_n(flag, 0, __ATOMIC_RELAXED));
 516
 517/* ===== Test cases ===== */
 518
 519/*
 520 * Tests that various barriers have the expected effect on internal state. Not
 521 * exhaustive on atomic_t operations. Unlike the selftest, also checks for
 522 * too-strict barrier instrumentation; these can be tolerated, because it does
 523 * not cause false positives, but at least we should be aware of such cases.
 524 */
 525static void test_barrier_nothreads(struct kunit *test)
 526{
 527#ifdef CONFIG_KCSAN_WEAK_MEMORY
 528	struct kcsan_scoped_access *reorder_access = &current->kcsan_ctx.reorder_access;
 529#else
 530	struct kcsan_scoped_access *reorder_access = NULL;
 531#endif
 532	arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
 533	atomic_t dummy;
 534
 535	KCSAN_TEST_REQUIRES(test, reorder_access != NULL);
 536	KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP));
 537
 538#define __KCSAN_EXPECT_BARRIER(access_type, barrier, order_before, name)			\
 539	do {											\
 540		reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED;			\
 541		reorder_access->size = sizeof(test_var);					\
 542		barrier;									\
 543		KUNIT_EXPECT_EQ_MSG(test, reorder_access->size,					\
 544				    order_before ? 0 : sizeof(test_var),			\
 545				    "improperly instrumented type=(" #access_type "): " name);	\
 546	} while (0)
 547#define KCSAN_EXPECT_READ_BARRIER(b, o)  __KCSAN_EXPECT_BARRIER(0, b, o, #b)
 548#define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b)
 549#define KCSAN_EXPECT_RW_BARRIER(b, o)    __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b)
 550
 551	/*
 552	 * Lockdep initialization can strengthen certain locking operations due
 553	 * to calling into instrumented files; "warm up" our locks.
 554	 */
 555	spin_lock(&test_spinlock);
 556	spin_unlock(&test_spinlock);
 557	mutex_lock(&test_mutex);
 558	mutex_unlock(&test_mutex);
 559
 560	/* Force creating a valid entry in reorder_access first. */
 561	test_var = 0;
 562	while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var))
 563		__kcsan_check_read(&test_var, sizeof(test_var));
 564	KUNIT_ASSERT_EQ(test, reorder_access->size, sizeof(test_var));
 565
 566	kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */
 567
 568	KCSAN_EXPECT_READ_BARRIER(mb(), true);
 569	KCSAN_EXPECT_READ_BARRIER(wmb(), false);
 570	KCSAN_EXPECT_READ_BARRIER(rmb(), true);
 571	KCSAN_EXPECT_READ_BARRIER(smp_mb(), true);
 572	KCSAN_EXPECT_READ_BARRIER(smp_wmb(), false);
 573	KCSAN_EXPECT_READ_BARRIER(smp_rmb(), true);
 574	KCSAN_EXPECT_READ_BARRIER(dma_wmb(), false);
 575	KCSAN_EXPECT_READ_BARRIER(dma_rmb(), true);
 576	KCSAN_EXPECT_READ_BARRIER(smp_mb__before_atomic(), true);
 577	KCSAN_EXPECT_READ_BARRIER(smp_mb__after_atomic(), true);
 578	KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true);
 579	KCSAN_EXPECT_READ_BARRIER(smp_store_mb(test_var, 0), true);
 580	KCSAN_EXPECT_READ_BARRIER(smp_load_acquire(&test_var), false);
 581	KCSAN_EXPECT_READ_BARRIER(smp_store_release(&test_var, 0), true);
 582	KCSAN_EXPECT_READ_BARRIER(xchg(&test_var, 0), true);
 583	KCSAN_EXPECT_READ_BARRIER(xchg_release(&test_var, 0), true);
 584	KCSAN_EXPECT_READ_BARRIER(xchg_relaxed(&test_var, 0), false);
 585	KCSAN_EXPECT_READ_BARRIER(cmpxchg(&test_var, 0,  0), true);
 586	KCSAN_EXPECT_READ_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
 587	KCSAN_EXPECT_READ_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
 588	KCSAN_EXPECT_READ_BARRIER(atomic_read(&dummy), false);
 589	KCSAN_EXPECT_READ_BARRIER(atomic_read_acquire(&dummy), false);
 590	KCSAN_EXPECT_READ_BARRIER(atomic_set(&dummy, 0), false);
 591	KCSAN_EXPECT_READ_BARRIER(atomic_set_release(&dummy, 0), true);
 592	KCSAN_EXPECT_READ_BARRIER(atomic_add(1, &dummy), false);
 593	KCSAN_EXPECT_READ_BARRIER(atomic_add_return(1, &dummy), true);
 594	KCSAN_EXPECT_READ_BARRIER(atomic_add_return_acquire(1, &dummy), false);
 595	KCSAN_EXPECT_READ_BARRIER(atomic_add_return_release(1, &dummy), true);
 596	KCSAN_EXPECT_READ_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
 597	KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add(1, &dummy), true);
 598	KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
 599	KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_release(1, &dummy), true);
 600	KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
 601	KCSAN_EXPECT_READ_BARRIER(test_and_set_bit(0, &test_var), true);
 602	KCSAN_EXPECT_READ_BARRIER(test_and_clear_bit(0, &test_var), true);
 603	KCSAN_EXPECT_READ_BARRIER(test_and_change_bit(0, &test_var), true);
 604	KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock(0, &test_var), true);
 605	KCSAN_EXPECT_READ_BARRIER(__clear_bit_unlock(0, &test_var), true);
 606	KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false);
 607	KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true);
 608	KCSAN_EXPECT_READ_BARRIER(spin_lock(&test_spinlock), false);
 609	KCSAN_EXPECT_READ_BARRIER(spin_unlock(&test_spinlock), true);
 610	KCSAN_EXPECT_READ_BARRIER(mutex_lock(&test_mutex), false);
 611	KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&test_mutex), true);
 612
 613	KCSAN_EXPECT_WRITE_BARRIER(mb(), true);
 614	KCSAN_EXPECT_WRITE_BARRIER(wmb(), true);
 615	KCSAN_EXPECT_WRITE_BARRIER(rmb(), false);
 616	KCSAN_EXPECT_WRITE_BARRIER(smp_mb(), true);
 617	KCSAN_EXPECT_WRITE_BARRIER(smp_wmb(), true);
 618	KCSAN_EXPECT_WRITE_BARRIER(smp_rmb(), false);
 619	KCSAN_EXPECT_WRITE_BARRIER(dma_wmb(), true);
 620	KCSAN_EXPECT_WRITE_BARRIER(dma_rmb(), false);
 621	KCSAN_EXPECT_WRITE_BARRIER(smp_mb__before_atomic(), true);
 622	KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_atomic(), true);
 623	KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true);
 624	KCSAN_EXPECT_WRITE_BARRIER(smp_store_mb(test_var, 0), true);
 625	KCSAN_EXPECT_WRITE_BARRIER(smp_load_acquire(&test_var), false);
 626	KCSAN_EXPECT_WRITE_BARRIER(smp_store_release(&test_var, 0), true);
 627	KCSAN_EXPECT_WRITE_BARRIER(xchg(&test_var, 0), true);
 628	KCSAN_EXPECT_WRITE_BARRIER(xchg_release(&test_var, 0), true);
 629	KCSAN_EXPECT_WRITE_BARRIER(xchg_relaxed(&test_var, 0), false);
 630	KCSAN_EXPECT_WRITE_BARRIER(cmpxchg(&test_var, 0,  0), true);
 631	KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
 632	KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
 633	KCSAN_EXPECT_WRITE_BARRIER(atomic_read(&dummy), false);
 634	KCSAN_EXPECT_WRITE_BARRIER(atomic_read_acquire(&dummy), false);
 635	KCSAN_EXPECT_WRITE_BARRIER(atomic_set(&dummy, 0), false);
 636	KCSAN_EXPECT_WRITE_BARRIER(atomic_set_release(&dummy, 0), true);
 637	KCSAN_EXPECT_WRITE_BARRIER(atomic_add(1, &dummy), false);
 638	KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return(1, &dummy), true);
 639	KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_acquire(1, &dummy), false);
 640	KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_release(1, &dummy), true);
 641	KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
 642	KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add(1, &dummy), true);
 643	KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
 644	KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy), true);
 645	KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
 646	KCSAN_EXPECT_WRITE_BARRIER(test_and_set_bit(0, &test_var), true);
 647	KCSAN_EXPECT_WRITE_BARRIER(test_and_clear_bit(0, &test_var), true);
 648	KCSAN_EXPECT_WRITE_BARRIER(test_and_change_bit(0, &test_var), true);
 649	KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock(0, &test_var), true);
 650	KCSAN_EXPECT_WRITE_BARRIER(__clear_bit_unlock(0, &test_var), true);
 651	KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false);
 652	KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true);
 653	KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&test_spinlock), false);
 654	KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&test_spinlock), true);
 655	KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&test_mutex), false);
 656	KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&test_mutex), true);
 657
 658	KCSAN_EXPECT_RW_BARRIER(mb(), true);
 659	KCSAN_EXPECT_RW_BARRIER(wmb(), true);
 660	KCSAN_EXPECT_RW_BARRIER(rmb(), true);
 661	KCSAN_EXPECT_RW_BARRIER(smp_mb(), true);
 662	KCSAN_EXPECT_RW_BARRIER(smp_wmb(), true);
 663	KCSAN_EXPECT_RW_BARRIER(smp_rmb(), true);
 664	KCSAN_EXPECT_RW_BARRIER(dma_wmb(), true);
 665	KCSAN_EXPECT_RW_BARRIER(dma_rmb(), true);
 666	KCSAN_EXPECT_RW_BARRIER(smp_mb__before_atomic(), true);
 667	KCSAN_EXPECT_RW_BARRIER(smp_mb__after_atomic(), true);
 668	KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true);
 669	KCSAN_EXPECT_RW_BARRIER(smp_store_mb(test_var, 0), true);
 670	KCSAN_EXPECT_RW_BARRIER(smp_load_acquire(&test_var), false);
 671	KCSAN_EXPECT_RW_BARRIER(smp_store_release(&test_var, 0), true);
 672	KCSAN_EXPECT_RW_BARRIER(xchg(&test_var, 0), true);
 673	KCSAN_EXPECT_RW_BARRIER(xchg_release(&test_var, 0), true);
 674	KCSAN_EXPECT_RW_BARRIER(xchg_relaxed(&test_var, 0), false);
 675	KCSAN_EXPECT_RW_BARRIER(cmpxchg(&test_var, 0,  0), true);
 676	KCSAN_EXPECT_RW_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
 677	KCSAN_EXPECT_RW_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
 678	KCSAN_EXPECT_RW_BARRIER(atomic_read(&dummy), false);
 679	KCSAN_EXPECT_RW_BARRIER(atomic_read_acquire(&dummy), false);
 680	KCSAN_EXPECT_RW_BARRIER(atomic_set(&dummy, 0), false);
 681	KCSAN_EXPECT_RW_BARRIER(atomic_set_release(&dummy, 0), true);
 682	KCSAN_EXPECT_RW_BARRIER(atomic_add(1, &dummy), false);
 683	KCSAN_EXPECT_RW_BARRIER(atomic_add_return(1, &dummy), true);
 684	KCSAN_EXPECT_RW_BARRIER(atomic_add_return_acquire(1, &dummy), false);
 685	KCSAN_EXPECT_RW_BARRIER(atomic_add_return_release(1, &dummy), true);
 686	KCSAN_EXPECT_RW_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
 687	KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add(1, &dummy), true);
 688	KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
 689	KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_release(1, &dummy), true);
 690	KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
 691	KCSAN_EXPECT_RW_BARRIER(test_and_set_bit(0, &test_var), true);
 692	KCSAN_EXPECT_RW_BARRIER(test_and_clear_bit(0, &test_var), true);
 693	KCSAN_EXPECT_RW_BARRIER(test_and_change_bit(0, &test_var), true);
 694	KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock(0, &test_var), true);
 695	KCSAN_EXPECT_RW_BARRIER(__clear_bit_unlock(0, &test_var), true);
 696	KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false);
 697	KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true);
 698	KCSAN_EXPECT_RW_BARRIER(spin_lock(&test_spinlock), false);
 699	KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true);
 700	KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
 701	KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
 702
 703#ifdef clear_bit_unlock_is_negative_byte
 704	KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
 705	KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
 706	KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
 707#endif
 708	kcsan_nestable_atomic_end();
 709}
 710
 711/* Simple test with normal data race. */
 712__no_kcsan
 713static void test_basic(struct kunit *test)
 714{
 715	struct expect_report expect = {
 716		.access = {
 717			{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 718			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
 719		},
 720	};
 721	struct expect_report never = {
 722		.access = {
 723			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
 724			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
 725		},
 726	};
 727	bool match_expect = false;
 728	bool match_never = false;
 729
 730	begin_test_checks(test_kernel_write, test_kernel_read);
 731	do {
 732		match_expect |= report_matches(&expect);
 733		match_never = report_matches(&never);
 734	} while (!end_test_checks(match_never));
 735	KUNIT_EXPECT_TRUE(test, match_expect);
 736	KUNIT_EXPECT_FALSE(test, match_never);
 737}
 738
 739/*
 740 * Stress KCSAN with lots of concurrent races on different addresses until
 741 * timeout.
 742 */
 743__no_kcsan
 744static void test_concurrent_races(struct kunit *test)
 745{
 746	struct expect_report expect = {
 747		.access = {
 748			/* NULL will match any address. */
 749			{ test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
 750			{ test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) },
 751		},
 752	};
 753	struct expect_report never = {
 754		.access = {
 755			{ test_kernel_rmw_array, NULL, 0, 0 },
 756			{ test_kernel_rmw_array, NULL, 0, 0 },
 757		},
 758	};
 759	bool match_expect = false;
 760	bool match_never = false;
 761
 762	begin_test_checks(test_kernel_rmw_array, test_kernel_rmw_array);
 763	do {
 764		match_expect |= report_matches(&expect);
 765		match_never |= report_matches(&never);
 766	} while (!end_test_checks(false));
 767	KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check matches exist. */
 768	KUNIT_EXPECT_FALSE(test, match_never);
 769}
 770
 771/* Test the KCSAN_REPORT_VALUE_CHANGE_ONLY option. */
 772__no_kcsan
 773static void test_novalue_change(struct kunit *test)
 774{
 775	struct expect_report expect_rw = {
 776		.access = {
 777			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 778			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
 779		},
 780	};
 781	struct expect_report expect_ww = {
 782		.access = {
 783			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 784			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 785		},
 786	};
 787	bool match_expect = false;
 788
 789	test_kernel_write_nochange(); /* Reset value. */
 790	begin_test_checks(test_kernel_write_nochange, test_kernel_read);
 791	do {
 792		match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
 793	} while (!end_test_checks(match_expect));
 794	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY))
 795		KUNIT_EXPECT_FALSE(test, match_expect);
 796	else
 797		KUNIT_EXPECT_TRUE(test, match_expect);
 798}
 799
 800/*
 801 * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should
 802 * never apply work.
 803 */
 804__no_kcsan
 805static void test_novalue_change_exception(struct kunit *test)
 806{
 807	struct expect_report expect_rw = {
 808		.access = {
 809			{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 810			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
 811		},
 812	};
 813	struct expect_report expect_ww = {
 814		.access = {
 815			{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 816			{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 817		},
 818	};
 819	bool match_expect = false;
 820
 821	test_kernel_write_nochange_rcu(); /* Reset value. */
 822	begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read);
 823	do {
 824		match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
 825	} while (!end_test_checks(match_expect));
 826	KUNIT_EXPECT_TRUE(test, match_expect);
 827}
 828
 829/* Test that data races of unknown origin are reported. */
 830__no_kcsan
 831static void test_unknown_origin(struct kunit *test)
 832{
 833	struct expect_report expect = {
 834		.access = {
 835			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
 836			{ NULL },
 837		},
 838	};
 839	bool match_expect = false;
 840
 841	begin_test_checks(test_kernel_write_uninstrumented, test_kernel_read);
 842	do {
 843		match_expect = report_matches(&expect);
 844	} while (!end_test_checks(match_expect));
 845	if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
 846		KUNIT_EXPECT_TRUE(test, match_expect);
 847	else
 848		KUNIT_EXPECT_FALSE(test, match_expect);
 849}
 850
 851/* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */
 852__no_kcsan
 853static void test_write_write_assume_atomic(struct kunit *test)
 854{
 855	struct expect_report expect = {
 856		.access = {
 857			{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 858			{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 859		},
 860	};
 861	bool match_expect = false;
 862
 863	begin_test_checks(test_kernel_write, test_kernel_write);
 864	do {
 865		sink_value(READ_ONCE(test_var)); /* induce value-change */
 866		match_expect = report_matches(&expect);
 867	} while (!end_test_checks(match_expect));
 868	if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC))
 869		KUNIT_EXPECT_FALSE(test, match_expect);
 870	else
 871		KUNIT_EXPECT_TRUE(test, match_expect);
 872}
 873
 874/*
 875 * Test that data races with writes larger than word-size are always reported,
 876 * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
 877 */
 878__no_kcsan
 879static void test_write_write_struct(struct kunit *test)
 880{
 881	struct expect_report expect = {
 882		.access = {
 883			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 884			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 885		},
 886	};
 887	bool match_expect = false;
 888
 889	begin_test_checks(test_kernel_write_struct, test_kernel_write_struct);
 890	do {
 891		match_expect = report_matches(&expect);
 892	} while (!end_test_checks(match_expect));
 893	KUNIT_EXPECT_TRUE(test, match_expect);
 894}
 895
 896/*
 897 * Test that data races where only one write is larger than word-size are always
 898 * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
 899 */
 900__no_kcsan
 901static void test_write_write_struct_part(struct kunit *test)
 902{
 903	struct expect_report expect = {
 904		.access = {
 905			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 906			{ test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE },
 907		},
 908	};
 909	bool match_expect = false;
 910
 911	begin_test_checks(test_kernel_write_struct, test_kernel_write_struct_part);
 912	do {
 913		match_expect = report_matches(&expect);
 914	} while (!end_test_checks(match_expect));
 915	KUNIT_EXPECT_TRUE(test, match_expect);
 916}
 917
 918/* Test that races with atomic accesses never result in reports. */
 919__no_kcsan
 920static void test_read_atomic_write_atomic(struct kunit *test)
 921{
 922	bool match_never = false;
 923
 924	begin_test_checks(test_kernel_read_atomic, test_kernel_write_atomic);
 925	do {
 926		match_never = report_available();
 927	} while (!end_test_checks(match_never));
 928	KUNIT_EXPECT_FALSE(test, match_never);
 929}
 930
 931/* Test that a race with an atomic and plain access result in reports. */
 932__no_kcsan
 933static void test_read_plain_atomic_write(struct kunit *test)
 934{
 935	struct expect_report expect = {
 936		.access = {
 937			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
 938			{ test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
 939		},
 940	};
 941	bool match_expect = false;
 942
 943	KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
 944
 945	begin_test_checks(test_kernel_read, test_kernel_write_atomic);
 946	do {
 947		match_expect = report_matches(&expect);
 948	} while (!end_test_checks(match_expect));
 949	KUNIT_EXPECT_TRUE(test, match_expect);
 950}
 951
 952/* Test that atomic RMWs generate correct report. */
 953__no_kcsan
 954static void test_read_plain_atomic_rmw(struct kunit *test)
 955{
 956	struct expect_report expect = {
 957		.access = {
 958			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
 959			{ test_kernel_atomic_rmw, &test_var, sizeof(test_var),
 960				KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
 961		},
 962	};
 963	bool match_expect = false;
 964
 965	KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
 966
 967	begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
 968	do {
 969		match_expect = report_matches(&expect);
 970	} while (!end_test_checks(match_expect));
 971	KUNIT_EXPECT_TRUE(test, match_expect);
 972}
 973
 974/* Zero-sized accesses should never cause data race reports. */
 975__no_kcsan
 976static void test_zero_size_access(struct kunit *test)
 977{
 978	struct expect_report expect = {
 979		.access = {
 980			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 981			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 982		},
 983	};
 984	struct expect_report never = {
 985		.access = {
 986			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 987			{ test_kernel_read_struct_zero_size, &test_struct.val[3], 0, 0 },
 988		},
 989	};
 990	bool match_expect = false;
 991	bool match_never = false;
 992
 993	begin_test_checks(test_kernel_write_struct, test_kernel_read_struct_zero_size);
 994	do {
 995		match_expect |= report_matches(&expect);
 996		match_never = report_matches(&never);
 997	} while (!end_test_checks(match_never));
 998	KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check. */
 999	KUNIT_EXPECT_FALSE(test, match_never);
1000}
1001
1002/* Test the data_race() macro. */
1003__no_kcsan
1004static void test_data_race(struct kunit *test)
1005{
1006	bool match_never = false;
1007
1008	begin_test_checks(test_kernel_data_race, test_kernel_data_race);
1009	do {
1010		match_never = report_available();
1011	} while (!end_test_checks(match_never));
1012	KUNIT_EXPECT_FALSE(test, match_never);
1013}
1014
1015__no_kcsan
1016static void test_assert_exclusive_writer(struct kunit *test)
1017{
1018	struct expect_report expect = {
1019		.access = {
1020			{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1021			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1022		},
1023	};
1024	bool match_expect = false;
1025
1026	begin_test_checks(test_kernel_assert_writer, test_kernel_write_nochange);
1027	do {
1028		match_expect = report_matches(&expect);
1029	} while (!end_test_checks(match_expect));
1030	KUNIT_EXPECT_TRUE(test, match_expect);
1031}
1032
1033__no_kcsan
1034static void test_assert_exclusive_access(struct kunit *test)
1035{
1036	struct expect_report expect = {
1037		.access = {
1038			{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1039			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
1040		},
1041	};
1042	bool match_expect = false;
1043
1044	begin_test_checks(test_kernel_assert_access, test_kernel_read);
1045	do {
1046		match_expect = report_matches(&expect);
1047	} while (!end_test_checks(match_expect));
1048	KUNIT_EXPECT_TRUE(test, match_expect);
1049}
1050
1051__no_kcsan
1052static void test_assert_exclusive_access_writer(struct kunit *test)
1053{
1054	struct expect_report expect_access_writer = {
1055		.access = {
1056			{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1057			{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1058		},
1059	};
1060	struct expect_report expect_access_access = {
1061		.access = {
1062			{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1063			{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1064		},
1065	};
1066	struct expect_report never = {
1067		.access = {
1068			{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1069			{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1070		},
1071	};
1072	bool match_expect_access_writer = false;
1073	bool match_expect_access_access = false;
1074	bool match_never = false;
1075
1076	begin_test_checks(test_kernel_assert_access, test_kernel_assert_writer);
1077	do {
1078		match_expect_access_writer |= report_matches(&expect_access_writer);
1079		match_expect_access_access |= report_matches(&expect_access_access);
1080		match_never |= report_matches(&never);
1081	} while (!end_test_checks(match_never));
1082	KUNIT_EXPECT_TRUE(test, match_expect_access_writer);
1083	KUNIT_EXPECT_TRUE(test, match_expect_access_access);
1084	KUNIT_EXPECT_FALSE(test, match_never);
1085}
1086
1087__no_kcsan
1088static void test_assert_exclusive_bits_change(struct kunit *test)
1089{
1090	struct expect_report expect = {
1091		.access = {
1092			{ test_kernel_assert_bits_change, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1093			{ test_kernel_change_bits, &test_var, sizeof(test_var),
1094				KCSAN_ACCESS_WRITE | (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) ? 0 : KCSAN_ACCESS_ATOMIC) },
1095		},
1096	};
1097	bool match_expect = false;
1098
1099	begin_test_checks(test_kernel_assert_bits_change, test_kernel_change_bits);
1100	do {
1101		match_expect = report_matches(&expect);
1102	} while (!end_test_checks(match_expect));
1103	KUNIT_EXPECT_TRUE(test, match_expect);
1104}
1105
1106__no_kcsan
1107static void test_assert_exclusive_bits_nochange(struct kunit *test)
1108{
1109	bool match_never = false;
1110
1111	begin_test_checks(test_kernel_assert_bits_nochange, test_kernel_change_bits);
1112	do {
1113		match_never = report_available();
1114	} while (!end_test_checks(match_never));
1115	KUNIT_EXPECT_FALSE(test, match_never);
1116}
1117
1118__no_kcsan
1119static void test_assert_exclusive_writer_scoped(struct kunit *test)
1120{
1121	struct expect_report expect_start = {
1122		.access = {
1123			{ test_kernel_assert_writer_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
1124			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1125		},
1126	};
1127	struct expect_report expect_inscope = {
1128		.access = {
1129			{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
1130			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1131		},
1132	};
1133	bool match_expect_start = false;
1134	bool match_expect_inscope = false;
1135
1136	begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange);
1137	do {
1138		match_expect_start |= report_matches(&expect_start);
1139		match_expect_inscope |= report_matches(&expect_inscope);
1140	} while (!end_test_checks(match_expect_inscope));
1141	KUNIT_EXPECT_TRUE(test, match_expect_start);
1142	KUNIT_EXPECT_FALSE(test, match_expect_inscope);
1143}
1144
1145__no_kcsan
1146static void test_assert_exclusive_access_scoped(struct kunit *test)
1147{
1148	struct expect_report expect_start1 = {
1149		.access = {
1150			{ test_kernel_assert_access_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
1151			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
1152		},
1153	};
1154	struct expect_report expect_start2 = {
1155		.access = { expect_start1.access[0], expect_start1.access[0] },
1156	};
1157	struct expect_report expect_inscope = {
1158		.access = {
1159			{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
1160			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
1161		},
1162	};
1163	bool match_expect_start = false;
1164	bool match_expect_inscope = false;
1165
1166	begin_test_checks(test_kernel_assert_access_scoped, test_kernel_read);
1167	end_time += msecs_to_jiffies(1000); /* This test requires a bit more time. */
1168	do {
1169		match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2);
1170		match_expect_inscope |= report_matches(&expect_inscope);
1171	} while (!end_test_checks(match_expect_inscope));
1172	KUNIT_EXPECT_TRUE(test, match_expect_start);
1173	KUNIT_EXPECT_FALSE(test, match_expect_inscope);
1174}
1175
1176/*
1177 * jiffies is special (declared to be volatile) and its accesses are typically
1178 * not marked; this test ensures that the compiler nor KCSAN gets confused about
1179 * jiffies's declaration on different architectures.
1180 */
1181__no_kcsan
1182static void test_jiffies_noreport(struct kunit *test)
1183{
1184	bool match_never = false;
1185
1186	begin_test_checks(test_kernel_jiffies_reader, test_kernel_jiffies_reader);
1187	do {
1188		match_never = report_available();
1189	} while (!end_test_checks(match_never));
1190	KUNIT_EXPECT_FALSE(test, match_never);
1191}
1192
1193/* Test that racing accesses in seqlock critical sections are not reported. */
1194__no_kcsan
1195static void test_seqlock_noreport(struct kunit *test)
1196{
1197	bool match_never = false;
1198
1199	begin_test_checks(test_kernel_seqlock_reader, test_kernel_seqlock_writer);
1200	do {
1201		match_never = report_available();
1202	} while (!end_test_checks(match_never));
1203	KUNIT_EXPECT_FALSE(test, match_never);
1204}
1205
1206/*
1207 * Test atomic builtins work and required instrumentation functions exist. We
1208 * also test that KCSAN understands they're atomic by racing with them via
1209 * test_kernel_atomic_builtins(), and expect no reports.
1210 *
1211 * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
1212 */
1213static void test_atomic_builtins(struct kunit *test)
1214{
1215	bool match_never = false;
1216
1217	begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins);
1218	do {
1219		long tmp;
1220
1221		kcsan_enable_current();
1222
1223		__atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED);
1224		KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED));
1225
1226		KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED));
1227		KUNIT_EXPECT_EQ(test, 20L, test_var);
1228
1229		tmp = 20L;
1230		KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L,
1231								    0, __ATOMIC_RELAXED,
1232								    __ATOMIC_RELAXED));
1233		KUNIT_EXPECT_EQ(test, tmp, 20L);
1234		KUNIT_EXPECT_EQ(test, test_var, 30L);
1235		KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L,
1236								     1, __ATOMIC_RELAXED,
1237								     __ATOMIC_RELAXED));
1238		KUNIT_EXPECT_EQ(test, tmp, 30L);
1239		KUNIT_EXPECT_EQ(test, test_var, 30L);
1240
1241		KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED));
1242		KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED));
1243		KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED));
1244		KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED));
1245		KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED));
1246		KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED));
1247		KUNIT_EXPECT_EQ(test, -2L, test_var);
1248
1249		__atomic_thread_fence(__ATOMIC_SEQ_CST);
1250		__atomic_signal_fence(__ATOMIC_SEQ_CST);
1251
1252		kcsan_disable_current();
1253
1254		match_never = report_available();
1255	} while (!end_test_checks(match_never));
1256	KUNIT_EXPECT_FALSE(test, match_never);
1257}
1258
1259__no_kcsan
1260static void test_1bit_value_change(struct kunit *test)
1261{
1262	struct expect_report expect = {
1263		.access = {
1264			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
1265			{ test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1266		},
1267	};
1268	bool match = false;
1269
1270	begin_test_checks(test_kernel_read, test_kernel_xor_1bit);
1271	do {
1272		match = IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)
1273				? report_available()
1274				: report_matches(&expect);
1275	} while (!end_test_checks(match));
1276	if (IS_ENABLED(CONFIG_KCSAN_PERMISSIVE))
1277		KUNIT_EXPECT_FALSE(test, match);
1278	else
1279		KUNIT_EXPECT_TRUE(test, match);
1280}
1281
1282__no_kcsan
1283static void test_correct_barrier(struct kunit *test)
1284{
1285	struct expect_report expect = {
1286		.access = {
1287			{ test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1288			{ test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1289		},
1290	};
1291	bool match_expect = false;
1292
1293	test_struct.val[0] = 0; /* init unlocked */
1294	begin_test_checks(test_kernel_with_memorder, test_kernel_with_memorder);
1295	do {
1296		match_expect = report_matches_any_reordered(&expect);
1297	} while (!end_test_checks(match_expect));
1298	KUNIT_EXPECT_FALSE(test, match_expect);
1299}
1300
1301__no_kcsan
1302static void test_missing_barrier(struct kunit *test)
1303{
1304	struct expect_report expect = {
1305		.access = {
1306			{ test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1307			{ test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1308		},
1309	};
1310	bool match_expect = false;
1311
1312	test_struct.val[0] = 0; /* init unlocked */
1313	begin_test_checks(test_kernel_wrong_memorder, test_kernel_wrong_memorder);
1314	do {
1315		match_expect = report_matches_any_reordered(&expect);
1316	} while (!end_test_checks(match_expect));
1317	if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1318		KUNIT_EXPECT_TRUE(test, match_expect);
1319	else
1320		KUNIT_EXPECT_FALSE(test, match_expect);
1321}
1322
1323__no_kcsan
1324static void test_atomic_builtins_correct_barrier(struct kunit *test)
1325{
1326	struct expect_report expect = {
1327		.access = {
1328			{ test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1329			{ test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1330		},
1331	};
1332	bool match_expect = false;
1333
1334	test_struct.val[0] = 0; /* init unlocked */
1335	begin_test_checks(test_kernel_atomic_builtin_with_memorder,
1336			  test_kernel_atomic_builtin_with_memorder);
1337	do {
1338		match_expect = report_matches_any_reordered(&expect);
1339	} while (!end_test_checks(match_expect));
1340	KUNIT_EXPECT_FALSE(test, match_expect);
1341}
1342
1343__no_kcsan
1344static void test_atomic_builtins_missing_barrier(struct kunit *test)
1345{
1346	struct expect_report expect = {
1347		.access = {
1348			{ test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1349			{ test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1350		},
1351	};
1352	bool match_expect = false;
1353
1354	test_struct.val[0] = 0; /* init unlocked */
1355	begin_test_checks(test_kernel_atomic_builtin_wrong_memorder,
1356			  test_kernel_atomic_builtin_wrong_memorder);
1357	do {
1358		match_expect = report_matches_any_reordered(&expect);
1359	} while (!end_test_checks(match_expect));
1360	if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1361		KUNIT_EXPECT_TRUE(test, match_expect);
1362	else
1363		KUNIT_EXPECT_FALSE(test, match_expect);
1364}
1365
1366/*
1367 * Generate thread counts for all test cases. Values generated are in interval
1368 * [2, 5] followed by exponentially increasing thread counts from 8 to 32.
1369 *
1370 * The thread counts are chosen to cover potentially interesting boundaries and
1371 * corner cases (2 to 5), and then stress the system with larger counts.
1372 */
1373static const void *nthreads_gen_params(const void *prev, char *desc)
1374{
1375	long nthreads = (long)prev;
1376
1377	if (nthreads < 0 || nthreads >= 32)
1378		nthreads = 0; /* stop */
1379	else if (!nthreads)
1380		nthreads = 2; /* initial value */
1381	else if (nthreads < 5)
1382		nthreads++;
1383	else if (nthreads == 5)
1384		nthreads = 8;
1385	else
1386		nthreads *= 2;
1387
1388	if (!preempt_model_preemptible() ||
1389	    !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) {
1390		/*
1391		 * Without any preemption, keep 2 CPUs free for other tasks, one
1392		 * of which is the main test case function checking for
1393		 * completion or failure.
1394		 */
1395		const long min_unused_cpus = preempt_model_none() ? 2 : 0;
1396		const long min_required_cpus = 2 + min_unused_cpus;
1397
1398		if (num_online_cpus() < min_required_cpus) {
1399			pr_err_once("Too few online CPUs (%u < %ld) for test\n",
1400				    num_online_cpus(), min_required_cpus);
1401			nthreads = 0;
1402		} else if (nthreads >= num_online_cpus() - min_unused_cpus) {
1403			/* Use negative value to indicate last param. */
1404			nthreads = -(num_online_cpus() - min_unused_cpus);
1405			pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n",
1406				     -nthreads, num_online_cpus());
1407		}
1408	}
1409
1410	snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads));
1411	return (void *)nthreads;
1412}
1413
1414#define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params)
1415static struct kunit_case kcsan_test_cases[] = {
1416	KUNIT_CASE(test_barrier_nothreads),
1417	KCSAN_KUNIT_CASE(test_basic),
1418	KCSAN_KUNIT_CASE(test_concurrent_races),
1419	KCSAN_KUNIT_CASE(test_novalue_change),
1420	KCSAN_KUNIT_CASE(test_novalue_change_exception),
1421	KCSAN_KUNIT_CASE(test_unknown_origin),
1422	KCSAN_KUNIT_CASE(test_write_write_assume_atomic),
1423	KCSAN_KUNIT_CASE(test_write_write_struct),
1424	KCSAN_KUNIT_CASE(test_write_write_struct_part),
1425	KCSAN_KUNIT_CASE(test_read_atomic_write_atomic),
1426	KCSAN_KUNIT_CASE(test_read_plain_atomic_write),
1427	KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw),
1428	KCSAN_KUNIT_CASE(test_zero_size_access),
1429	KCSAN_KUNIT_CASE(test_data_race),
1430	KCSAN_KUNIT_CASE(test_assert_exclusive_writer),
1431	KCSAN_KUNIT_CASE(test_assert_exclusive_access),
1432	KCSAN_KUNIT_CASE(test_assert_exclusive_access_writer),
1433	KCSAN_KUNIT_CASE(test_assert_exclusive_bits_change),
1434	KCSAN_KUNIT_CASE(test_assert_exclusive_bits_nochange),
1435	KCSAN_KUNIT_CASE(test_assert_exclusive_writer_scoped),
1436	KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
1437	KCSAN_KUNIT_CASE(test_jiffies_noreport),
1438	KCSAN_KUNIT_CASE(test_seqlock_noreport),
1439	KCSAN_KUNIT_CASE(test_atomic_builtins),
1440	KCSAN_KUNIT_CASE(test_1bit_value_change),
1441	KCSAN_KUNIT_CASE(test_correct_barrier),
1442	KCSAN_KUNIT_CASE(test_missing_barrier),
1443	KCSAN_KUNIT_CASE(test_atomic_builtins_correct_barrier),
1444	KCSAN_KUNIT_CASE(test_atomic_builtins_missing_barrier),
1445	{},
1446};
1447
1448/* ===== End test cases ===== */
1449
1450/* Concurrent accesses from interrupts. */
1451__no_kcsan
1452static void access_thread_timer(struct timer_list *timer)
1453{
1454	static atomic_t cnt = ATOMIC_INIT(0);
1455	unsigned int idx;
1456	void (*func)(void);
1457
1458	idx = (unsigned int)atomic_inc_return(&cnt) % ARRAY_SIZE(access_kernels);
1459	/* Acquire potential initialization. */
1460	func = smp_load_acquire(&access_kernels[idx]);
1461	if (func)
1462		func();
1463}
1464
1465/* The main loop for each thread. */
1466__no_kcsan
1467static int access_thread(void *arg)
1468{
1469	struct timer_list timer;
1470	unsigned int cnt = 0;
1471	unsigned int idx;
1472	void (*func)(void);
1473
1474	timer_setup_on_stack(&timer, access_thread_timer, 0);
1475	do {
1476		might_sleep();
1477
1478		if (!timer_pending(&timer))
1479			mod_timer(&timer, jiffies + 1);
1480		else {
1481			/* Iterate through all kernels. */
1482			idx = cnt++ % ARRAY_SIZE(access_kernels);
1483			/* Acquire potential initialization. */
1484			func = smp_load_acquire(&access_kernels[idx]);
1485			if (func)
1486				func();
1487		}
1488	} while (!torture_must_stop());
1489	del_timer_sync(&timer);
1490	destroy_timer_on_stack(&timer);
1491
1492	torture_kthread_stopping("access_thread");
1493	return 0;
1494}
1495
1496__no_kcsan
1497static int test_init(struct kunit *test)
1498{
1499	unsigned long flags;
1500	int nthreads;
1501	int i;
1502
1503	spin_lock_irqsave(&observed.lock, flags);
1504	for (i = 0; i < ARRAY_SIZE(observed.lines); ++i)
1505		observed.lines[i][0] = '\0';
1506	observed.nlines = 0;
1507	spin_unlock_irqrestore(&observed.lock, flags);
1508
1509	if (strstr(test->name, "nothreads"))
1510		return 0;
1511
1512	if (!torture_init_begin((char *)test->name, 1))
1513		return -EBUSY;
1514
1515	if (WARN_ON(threads))
1516		goto err;
1517
1518	for (i = 0; i < ARRAY_SIZE(access_kernels); ++i) {
1519		if (WARN_ON(access_kernels[i]))
1520			goto err;
1521	}
1522
1523	nthreads = abs((long)test->param_value);
1524	if (WARN_ON(!nthreads))
1525		goto err;
1526
1527	threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL);
1528	if (WARN_ON(!threads))
1529		goto err;
1530
1531	threads[nthreads] = NULL;
1532	for (i = 0; i < nthreads; ++i) {
1533		if (torture_create_kthread(access_thread, NULL, threads[i]))
1534			goto err;
1535	}
1536
1537	torture_init_end();
1538
1539	return 0;
1540
1541err:
1542	kfree(threads);
1543	threads = NULL;
1544	torture_init_end();
1545	return -EINVAL;
1546}
1547
1548__no_kcsan
1549static void test_exit(struct kunit *test)
1550{
1551	struct task_struct **stop_thread;
1552	int i;
1553
1554	if (strstr(test->name, "nothreads"))
1555		return;
1556
1557	if (torture_cleanup_begin())
1558		return;
1559
1560	for (i = 0; i < ARRAY_SIZE(access_kernels); ++i)
1561		WRITE_ONCE(access_kernels[i], NULL);
1562
1563	if (threads) {
1564		for (stop_thread = threads; *stop_thread; stop_thread++)
1565			torture_stop_kthread(reader_thread, *stop_thread);
1566
1567		kfree(threads);
1568		threads = NULL;
1569	}
1570
1571	torture_cleanup_end();
1572}
1573
1574__no_kcsan
1575static void register_tracepoints(struct tracepoint *tp, void *ignore)
1576{
1577	check_trace_callback_type_console(probe_console);
1578	if (!strcmp(tp->name, "console"))
1579		WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
1580}
1581
1582__no_kcsan
1583static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
1584{
1585	if (!strcmp(tp->name, "console"))
1586		tracepoint_probe_unregister(tp, probe_console, NULL);
1587}
1588
1589static int kcsan_suite_init(struct kunit_suite *suite)
1590{
1591	/*
1592	 * Because we want to be able to build the test as a module, we need to
1593	 * iterate through all known tracepoints, since the static registration
1594	 * won't work here.
1595	 */
1596	for_each_kernel_tracepoint(register_tracepoints, NULL);
1597	return 0;
1598}
1599
1600static void kcsan_suite_exit(struct kunit_suite *suite)
1601{
1602	for_each_kernel_tracepoint(unregister_tracepoints, NULL);
1603	tracepoint_synchronize_unregister();
1604}
1605
1606static struct kunit_suite kcsan_test_suite = {
1607	.name = "kcsan",
1608	.test_cases = kcsan_test_cases,
1609	.init = test_init,
1610	.exit = test_exit,
1611	.suite_init = kcsan_suite_init,
1612	.suite_exit = kcsan_suite_exit,
1613};
1614
1615kunit_test_suites(&kcsan_test_suite);
1616
1617MODULE_LICENSE("GPL v2");
1618MODULE_AUTHOR("Marco Elver <elver@google.com>");