Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
   5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
   6 */
   7
   8#define pr_fmt(fmt) "kasan: test: " fmt
   9
  10#include <kunit/test.h>
  11#include <linux/bitops.h>
  12#include <linux/delay.h>
  13#include <linux/io.h>
  14#include <linux/kasan.h>
  15#include <linux/kernel.h>
  16#include <linux/mempool.h>
  17#include <linux/mm.h>
  18#include <linux/mman.h>
  19#include <linux/module.h>
  20#include <linux/printk.h>
  21#include <linux/random.h>
  22#include <linux/set_memory.h>
  23#include <linux/slab.h>
  24#include <linux/string.h>
  25#include <linux/tracepoint.h>
  26#include <linux/uaccess.h>
  27#include <linux/vmalloc.h>
  28#include <trace/events/printk.h>
  29
  30#include <asm/page.h>
  31
  32#include "kasan.h"
  33
  34#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
  35
  36static bool multishot;
  37
  38/* Fields set based on lines observed in the console. */
  39static struct {
  40	bool report_found;
  41	bool async_fault;
  42} test_status;
  43
  44/*
  45 * Some tests use these global variables to store return values from function
  46 * calls that could otherwise be eliminated by the compiler as dead code.
  47 */
  48void *kasan_ptr_result;
  49int kasan_int_result;
  50
  51/* Probe for console output: obtains test_status lines of interest. */
  52static void probe_console(void *ignore, const char *buf, size_t len)
  53{
  54	if (strnstr(buf, "BUG: KASAN: ", len))
  55		WRITE_ONCE(test_status.report_found, true);
  56	else if (strnstr(buf, "Asynchronous fault: ", len))
  57		WRITE_ONCE(test_status.async_fault, true);
  58}
  59
  60static int kasan_suite_init(struct kunit_suite *suite)
  61{
  62	if (!kasan_enabled()) {
  63		pr_err("Can't run KASAN tests with KASAN disabled");
  64		return -1;
  65	}
  66
  67	/* Stop failing KUnit tests on KASAN reports. */
  68	kasan_kunit_test_suite_start();
  69
  70	/*
  71	 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
  72	 * report the first detected bug and panic the kernel if panic_on_warn
  73	 * is enabled.
  74	 */
  75	multishot = kasan_save_enable_multi_shot();
  76
  77	register_trace_console(probe_console, NULL);
  78	return 0;
  79}
  80
  81static void kasan_suite_exit(struct kunit_suite *suite)
  82{
  83	kasan_kunit_test_suite_end();
  84	kasan_restore_multi_shot(multishot);
  85	unregister_trace_console(probe_console, NULL);
  86	tracepoint_synchronize_unregister();
  87}
  88
  89static void kasan_test_exit(struct kunit *test)
  90{
  91	KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
  92}
  93
  94/**
  95 * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
  96 * KASAN report; causes a KUnit test failure otherwise.
  97 *
  98 * @test: Currently executing KUnit test.
  99 * @expression: Expression that must produce a KASAN report.
 100 *
 101 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
 102 * checking is auto-disabled. When this happens, this test handler reenables
 103 * tag checking. As tag checking can be only disabled or enabled per CPU,
 104 * this handler disables migration (preemption).
 105 *
 106 * Since the compiler doesn't see that the expression can change the test_status
 107 * fields, it can reorder or optimize away the accesses to those fields.
 108 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
 109 * expression to prevent that.
 110 *
 111 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
 112 * as false. This allows detecting KASAN reports that happen outside of the
 113 * checks by asserting !test_status.report_found at the start of
 114 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
 115 */
 116#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {			\
 117	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&				\
 118	    kasan_sync_fault_possible())				\
 119		migrate_disable();					\
 120	KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));	\
 121	barrier();							\
 122	expression;							\
 123	barrier();							\
 124	if (kasan_async_fault_possible())				\
 125		kasan_force_async_fault();				\
 126	if (!READ_ONCE(test_status.report_found)) {			\
 127		KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure "	\
 128				"expected in \"" #expression		\
 129				 "\", but none occurred");		\
 130	}								\
 131	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&				\
 132	    kasan_sync_fault_possible()) {				\
 133		if (READ_ONCE(test_status.report_found) &&		\
 134		    !READ_ONCE(test_status.async_fault))		\
 135			kasan_enable_hw_tags();				\
 136		migrate_enable();					\
 137	}								\
 138	WRITE_ONCE(test_status.report_found, false);			\
 139	WRITE_ONCE(test_status.async_fault, false);			\
 140} while (0)
 141
 142#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do {			\
 143	if (!IS_ENABLED(config))					\
 144		kunit_skip((test), "Test requires " #config "=y");	\
 145} while (0)
 146
 147#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do {			\
 148	if (IS_ENABLED(config))						\
 149		kunit_skip((test), "Test requires " #config "=n");	\
 150} while (0)
 151
 152#define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do {		\
 153	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS))				\
 154		break;  /* No compiler instrumentation. */		\
 155	if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX))	\
 156		break;  /* Should always be instrumented! */		\
 157	if (IS_ENABLED(CONFIG_GENERIC_ENTRY))				\
 158		kunit_skip((test), "Test requires checked mem*()");	\
 159} while (0)
 160
 161static void kmalloc_oob_right(struct kunit *test)
 162{
 163	char *ptr;
 164	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
 165
 166	ptr = kmalloc(size, GFP_KERNEL);
 167	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 168
 169	OPTIMIZER_HIDE_VAR(ptr);
 170	/*
 171	 * An unaligned access past the requested kmalloc size.
 172	 * Only generic KASAN can precisely detect these.
 173	 */
 174	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
 175		KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
 176
 177	/*
 178	 * An aligned access into the first out-of-bounds granule that falls
 179	 * within the aligned kmalloc object.
 180	 */
 181	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
 182
 183	/* Out-of-bounds access past the aligned kmalloc object. */
 184	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
 185					ptr[size + KASAN_GRANULE_SIZE + 5]);
 186
 187	kfree(ptr);
 188}
 189
 190static void kmalloc_oob_left(struct kunit *test)
 191{
 192	char *ptr;
 193	size_t size = 15;
 194
 195	ptr = kmalloc(size, GFP_KERNEL);
 196	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 197
 198	OPTIMIZER_HIDE_VAR(ptr);
 199	KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
 200	kfree(ptr);
 201}
 202
 203static void kmalloc_node_oob_right(struct kunit *test)
 204{
 205	char *ptr;
 206	size_t size = 4096;
 207
 208	ptr = kmalloc_node(size, GFP_KERNEL, 0);
 209	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 210
 211	OPTIMIZER_HIDE_VAR(ptr);
 212	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
 213	kfree(ptr);
 214}
 215
 216/*
 217 * Check that KASAN detects an out-of-bounds access for a big object allocated
 218 * via kmalloc(). But not as big as to trigger the page_alloc fallback.
 219 */
 220static void kmalloc_big_oob_right(struct kunit *test)
 221{
 222	char *ptr;
 223	size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
 224
 225	ptr = kmalloc(size, GFP_KERNEL);
 226	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 227
 228	OPTIMIZER_HIDE_VAR(ptr);
 229	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
 230	kfree(ptr);
 231}
 232
 233/*
 234 * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
 235 * that does not fit into the largest slab cache and therefore is allocated via
 236 * the page_alloc fallback.
 237 */
 238
 239static void kmalloc_large_oob_right(struct kunit *test)
 240{
 241	char *ptr;
 242	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
 243
 244	ptr = kmalloc(size, GFP_KERNEL);
 245	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 246
 247	OPTIMIZER_HIDE_VAR(ptr);
 248	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
 249
 250	kfree(ptr);
 251}
 252
 253static void kmalloc_large_uaf(struct kunit *test)
 254{
 255	char *ptr;
 256	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
 257
 258	ptr = kmalloc(size, GFP_KERNEL);
 259	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 260	kfree(ptr);
 261
 262	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
 263}
 264
 265static void kmalloc_large_invalid_free(struct kunit *test)
 266{
 267	char *ptr;
 268	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
 269
 270	ptr = kmalloc(size, GFP_KERNEL);
 271	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 272
 273	KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
 274}
 275
 276static void page_alloc_oob_right(struct kunit *test)
 277{
 278	char *ptr;
 279	struct page *pages;
 280	size_t order = 4;
 281	size_t size = (1UL << (PAGE_SHIFT + order));
 282
 283	/*
 284	 * With generic KASAN page allocations have no redzones, thus
 285	 * out-of-bounds detection is not guaranteed.
 286	 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
 287	 */
 288	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
 289
 290	pages = alloc_pages(GFP_KERNEL, order);
 291	ptr = page_address(pages);
 292	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 293
 294	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
 295	free_pages((unsigned long)ptr, order);
 296}
 297
 298static void page_alloc_uaf(struct kunit *test)
 299{
 300	char *ptr;
 301	struct page *pages;
 302	size_t order = 4;
 303
 304	pages = alloc_pages(GFP_KERNEL, order);
 305	ptr = page_address(pages);
 306	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 307	free_pages((unsigned long)ptr, order);
 308
 309	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
 310}
 311
 312static void krealloc_more_oob_helper(struct kunit *test,
 313					size_t size1, size_t size2)
 314{
 315	char *ptr1, *ptr2;
 316	size_t middle;
 317
 318	KUNIT_ASSERT_LT(test, size1, size2);
 319	middle = size1 + (size2 - size1) / 2;
 320
 321	ptr1 = kmalloc(size1, GFP_KERNEL);
 322	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 323
 324	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
 325	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
 326
 327	/* Suppress -Warray-bounds warnings. */
 328	OPTIMIZER_HIDE_VAR(ptr2);
 329
 330	/* All offsets up to size2 must be accessible. */
 331	ptr2[size1 - 1] = 'x';
 332	ptr2[size1] = 'x';
 333	ptr2[middle] = 'x';
 334	ptr2[size2 - 1] = 'x';
 335
 336	/* Generic mode is precise, so unaligned size2 must be inaccessible. */
 337	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
 338		KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
 339
 340	/* For all modes first aligned offset after size2 must be inaccessible. */
 341	KUNIT_EXPECT_KASAN_FAIL(test,
 342		ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
 343
 344	kfree(ptr2);
 345}
 346
 347static void krealloc_less_oob_helper(struct kunit *test,
 348					size_t size1, size_t size2)
 349{
 350	char *ptr1, *ptr2;
 351	size_t middle;
 352
 353	KUNIT_ASSERT_LT(test, size2, size1);
 354	middle = size2 + (size1 - size2) / 2;
 355
 356	ptr1 = kmalloc(size1, GFP_KERNEL);
 357	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 358
 359	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
 360	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
 361
 362	/* Suppress -Warray-bounds warnings. */
 363	OPTIMIZER_HIDE_VAR(ptr2);
 364
 365	/* Must be accessible for all modes. */
 366	ptr2[size2 - 1] = 'x';
 367
 368	/* Generic mode is precise, so unaligned size2 must be inaccessible. */
 369	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
 370		KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
 371
 372	/* For all modes first aligned offset after size2 must be inaccessible. */
 373	KUNIT_EXPECT_KASAN_FAIL(test,
 374		ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
 375
 376	/*
 377	 * For all modes all size2, middle, and size1 should land in separate
 378	 * granules and thus the latter two offsets should be inaccessible.
 379	 */
 380	KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
 381				round_down(middle, KASAN_GRANULE_SIZE));
 382	KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
 383				round_down(size1, KASAN_GRANULE_SIZE));
 384	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
 385	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
 386	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
 387
 388	kfree(ptr2);
 389}
 390
 391static void krealloc_more_oob(struct kunit *test)
 392{
 393	krealloc_more_oob_helper(test, 201, 235);
 394}
 395
 396static void krealloc_less_oob(struct kunit *test)
 397{
 398	krealloc_less_oob_helper(test, 235, 201);
 399}
 400
 401static void krealloc_large_more_oob(struct kunit *test)
 402{
 403	krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
 404					KMALLOC_MAX_CACHE_SIZE + 235);
 405}
 406
 407static void krealloc_large_less_oob(struct kunit *test)
 408{
 409	krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
 410					KMALLOC_MAX_CACHE_SIZE + 201);
 411}
 412
 413/*
 414 * Check that krealloc() detects a use-after-free, returns NULL,
 415 * and doesn't unpoison the freed object.
 416 */
 417static void krealloc_uaf(struct kunit *test)
 418{
 419	char *ptr1, *ptr2;
 420	int size1 = 201;
 421	int size2 = 235;
 422
 423	ptr1 = kmalloc(size1, GFP_KERNEL);
 424	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 425	kfree(ptr1);
 426
 427	KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
 428	KUNIT_ASSERT_NULL(test, ptr2);
 429	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
 430}
 431
 432static void kmalloc_oob_16(struct kunit *test)
 433{
 434	struct {
 435		u64 words[2];
 436	} *ptr1, *ptr2;
 437
 438	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 439
 440	/* This test is specifically crafted for the generic mode. */
 441	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
 442
 443	ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
 444	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 445
 446	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
 447	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
 448
 449	OPTIMIZER_HIDE_VAR(ptr1);
 450	OPTIMIZER_HIDE_VAR(ptr2);
 451	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
 452	kfree(ptr1);
 453	kfree(ptr2);
 454}
 455
 456static void kmalloc_uaf_16(struct kunit *test)
 457{
 458	struct {
 459		u64 words[2];
 460	} *ptr1, *ptr2;
 461
 462	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 463
 464	ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
 465	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 466
 467	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
 468	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
 469	kfree(ptr2);
 470
 471	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
 472	kfree(ptr1);
 473}
 474
 475/*
 476 * Note: in the memset tests below, the written range touches both valid and
 477 * invalid memory. This makes sure that the instrumentation does not only check
 478 * the starting address but the whole range.
 479 */
 480
 481static void kmalloc_oob_memset_2(struct kunit *test)
 482{
 483	char *ptr;
 484	size_t size = 128 - KASAN_GRANULE_SIZE;
 485	size_t memset_size = 2;
 486
 487	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 488
 489	ptr = kmalloc(size, GFP_KERNEL);
 490	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 491
 492	OPTIMIZER_HIDE_VAR(ptr);
 493	OPTIMIZER_HIDE_VAR(size);
 494	OPTIMIZER_HIDE_VAR(memset_size);
 495	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
 496	kfree(ptr);
 497}
 498
 499static void kmalloc_oob_memset_4(struct kunit *test)
 500{
 501	char *ptr;
 502	size_t size = 128 - KASAN_GRANULE_SIZE;
 503	size_t memset_size = 4;
 504
 505	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 506
 507	ptr = kmalloc(size, GFP_KERNEL);
 508	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 509
 510	OPTIMIZER_HIDE_VAR(ptr);
 511	OPTIMIZER_HIDE_VAR(size);
 512	OPTIMIZER_HIDE_VAR(memset_size);
 513	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
 514	kfree(ptr);
 515}
 516
 517static void kmalloc_oob_memset_8(struct kunit *test)
 518{
 519	char *ptr;
 520	size_t size = 128 - KASAN_GRANULE_SIZE;
 521	size_t memset_size = 8;
 522
 523	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 524
 525	ptr = kmalloc(size, GFP_KERNEL);
 526	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 527
 528	OPTIMIZER_HIDE_VAR(ptr);
 529	OPTIMIZER_HIDE_VAR(size);
 530	OPTIMIZER_HIDE_VAR(memset_size);
 531	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
 532	kfree(ptr);
 533}
 534
 535static void kmalloc_oob_memset_16(struct kunit *test)
 536{
 537	char *ptr;
 538	size_t size = 128 - KASAN_GRANULE_SIZE;
 539	size_t memset_size = 16;
 540
 541	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 542
 543	ptr = kmalloc(size, GFP_KERNEL);
 544	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 545
 546	OPTIMIZER_HIDE_VAR(ptr);
 547	OPTIMIZER_HIDE_VAR(size);
 548	OPTIMIZER_HIDE_VAR(memset_size);
 549	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
 550	kfree(ptr);
 551}
 552
 553static void kmalloc_oob_in_memset(struct kunit *test)
 554{
 555	char *ptr;
 556	size_t size = 128 - KASAN_GRANULE_SIZE;
 557
 558	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 559
 560	ptr = kmalloc(size, GFP_KERNEL);
 561	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 562
 563	OPTIMIZER_HIDE_VAR(ptr);
 564	OPTIMIZER_HIDE_VAR(size);
 565	KUNIT_EXPECT_KASAN_FAIL(test,
 566				memset(ptr, 0, size + KASAN_GRANULE_SIZE));
 567	kfree(ptr);
 568}
 569
 570static void kmalloc_memmove_negative_size(struct kunit *test)
 571{
 572	char *ptr;
 573	size_t size = 64;
 574	size_t invalid_size = -2;
 575
 576	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 577
 578	/*
 579	 * Hardware tag-based mode doesn't check memmove for negative size.
 580	 * As a result, this test introduces a side-effect memory corruption,
 581	 * which can result in a crash.
 582	 */
 583	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
 584
 585	ptr = kmalloc(size, GFP_KERNEL);
 586	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 587
 588	memset((char *)ptr, 0, 64);
 589	OPTIMIZER_HIDE_VAR(ptr);
 590	OPTIMIZER_HIDE_VAR(invalid_size);
 591	KUNIT_EXPECT_KASAN_FAIL(test,
 592		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
 593	kfree(ptr);
 594}
 595
 596static void kmalloc_memmove_invalid_size(struct kunit *test)
 597{
 598	char *ptr;
 599	size_t size = 64;
 600	size_t invalid_size = size;
 601
 602	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 603
 604	ptr = kmalloc(size, GFP_KERNEL);
 605	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 606
 607	memset((char *)ptr, 0, 64);
 608	OPTIMIZER_HIDE_VAR(ptr);
 609	OPTIMIZER_HIDE_VAR(invalid_size);
 610	KUNIT_EXPECT_KASAN_FAIL(test,
 611		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
 612	kfree(ptr);
 613}
 614
 615static void kmalloc_uaf(struct kunit *test)
 616{
 617	char *ptr;
 618	size_t size = 10;
 619
 620	ptr = kmalloc(size, GFP_KERNEL);
 621	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 622
 623	kfree(ptr);
 624	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
 625}
 626
 627static void kmalloc_uaf_memset(struct kunit *test)
 628{
 629	char *ptr;
 630	size_t size = 33;
 631
 632	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
 633
 634	/*
 635	 * Only generic KASAN uses quarantine, which is required to avoid a
 636	 * kernel memory corruption this test causes.
 637	 */
 638	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
 639
 640	ptr = kmalloc(size, GFP_KERNEL);
 641	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 642
 643	kfree(ptr);
 644	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
 645}
 646
 647static void kmalloc_uaf2(struct kunit *test)
 648{
 649	char *ptr1, *ptr2;
 650	size_t size = 43;
 651	int counter = 0;
 652
 653again:
 654	ptr1 = kmalloc(size, GFP_KERNEL);
 655	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 656
 657	kfree(ptr1);
 658
 659	ptr2 = kmalloc(size, GFP_KERNEL);
 660	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
 661
 662	/*
 663	 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
 664	 * Allow up to 16 attempts at generating different tags.
 665	 */
 666	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
 667		kfree(ptr2);
 668		goto again;
 669	}
 670
 671	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
 672	KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
 673
 674	kfree(ptr2);
 675}
 676
 677/*
 678 * Check that KASAN detects use-after-free when another object was allocated in
 679 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
 680 */
 681static void kmalloc_uaf3(struct kunit *test)
 682{
 683	char *ptr1, *ptr2;
 684	size_t size = 100;
 685
 686	/* This test is specifically crafted for tag-based modes. */
 687	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
 688
 689	ptr1 = kmalloc(size, GFP_KERNEL);
 690	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 691	kfree(ptr1);
 692
 693	ptr2 = kmalloc(size, GFP_KERNEL);
 694	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
 695	kfree(ptr2);
 696
 697	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
 698}
 699
 700static void kmalloc_double_kzfree(struct kunit *test)
 701{
 702	char *ptr;
 703	size_t size = 16;
 704
 705	ptr = kmalloc(size, GFP_KERNEL);
 706	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 707
 708	kfree_sensitive(ptr);
 709	KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
 710}
 711
 712/* Check that ksize() does NOT unpoison whole object. */
 713static void ksize_unpoisons_memory(struct kunit *test)
 714{
 715	char *ptr;
 716	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
 717	size_t real_size;
 718
 719	ptr = kmalloc(size, GFP_KERNEL);
 720	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 721
 722	real_size = ksize(ptr);
 723	KUNIT_EXPECT_GT(test, real_size, size);
 724
 725	OPTIMIZER_HIDE_VAR(ptr);
 726
 727	/* These accesses shouldn't trigger a KASAN report. */
 728	ptr[0] = 'x';
 729	ptr[size - 1] = 'x';
 730
 731	/* These must trigger a KASAN report. */
 732	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
 733		KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
 734	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
 735	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
 736
 737	kfree(ptr);
 738}
 739
 740/*
 741 * Check that a use-after-free is detected by ksize() and via normal accesses
 742 * after it.
 743 */
 744static void ksize_uaf(struct kunit *test)
 745{
 746	char *ptr;
 747	int size = 128 - KASAN_GRANULE_SIZE;
 748
 749	ptr = kmalloc(size, GFP_KERNEL);
 750	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 751	kfree(ptr);
 752
 753	OPTIMIZER_HIDE_VAR(ptr);
 754	KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
 755	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
 756	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
 757}
 758
 759/*
 760 * The two tests below check that Generic KASAN prints auxiliary stack traces
 761 * for RCU callbacks and workqueues. The reports need to be inspected manually.
 762 *
 763 * These tests are still enabled for other KASAN modes to make sure that all
 764 * modes report bad accesses in tested scenarios.
 765 */
 766
 767static struct kasan_rcu_info {
 768	int i;
 769	struct rcu_head rcu;
 770} *global_rcu_ptr;
 771
 772static void rcu_uaf_reclaim(struct rcu_head *rp)
 773{
 774	struct kasan_rcu_info *fp =
 775		container_of(rp, struct kasan_rcu_info, rcu);
 776
 777	kfree(fp);
 778	((volatile struct kasan_rcu_info *)fp)->i;
 779}
 780
 781static void rcu_uaf(struct kunit *test)
 782{
 783	struct kasan_rcu_info *ptr;
 784
 785	ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
 786	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 787
 788	global_rcu_ptr = rcu_dereference_protected(
 789				(struct kasan_rcu_info __rcu *)ptr, NULL);
 790
 791	KUNIT_EXPECT_KASAN_FAIL(test,
 792		call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
 793		rcu_barrier());
 794}
 795
 796static void workqueue_uaf_work(struct work_struct *work)
 797{
 798	kfree(work);
 799}
 800
 801static void workqueue_uaf(struct kunit *test)
 802{
 803	struct workqueue_struct *workqueue;
 804	struct work_struct *work;
 805
 806	workqueue = create_workqueue("kasan_workqueue_test");
 807	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
 808
 809	work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
 810	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
 811
 812	INIT_WORK(work, workqueue_uaf_work);
 813	queue_work(workqueue, work);
 814	destroy_workqueue(workqueue);
 815
 816	KUNIT_EXPECT_KASAN_FAIL(test,
 817		((volatile struct work_struct *)work)->data);
 818}
 819
 820static void kfree_via_page(struct kunit *test)
 821{
 822	char *ptr;
 823	size_t size = 8;
 824	struct page *page;
 825	unsigned long offset;
 826
 827	ptr = kmalloc(size, GFP_KERNEL);
 828	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 829
 830	page = virt_to_page(ptr);
 831	offset = offset_in_page(ptr);
 832	kfree(page_address(page) + offset);
 833}
 834
 835static void kfree_via_phys(struct kunit *test)
 836{
 837	char *ptr;
 838	size_t size = 8;
 839	phys_addr_t phys;
 840
 841	ptr = kmalloc(size, GFP_KERNEL);
 842	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 843
 844	phys = virt_to_phys(ptr);
 845	kfree(phys_to_virt(phys));
 846}
 847
 848static void kmem_cache_oob(struct kunit *test)
 849{
 850	char *p;
 851	size_t size = 200;
 852	struct kmem_cache *cache;
 853
 854	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
 855	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
 856
 857	p = kmem_cache_alloc(cache, GFP_KERNEL);
 858	if (!p) {
 859		kunit_err(test, "Allocation failed: %s\n", __func__);
 860		kmem_cache_destroy(cache);
 861		return;
 862	}
 863
 864	KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
 865
 866	kmem_cache_free(cache, p);
 867	kmem_cache_destroy(cache);
 868}
 869
 870static void kmem_cache_double_free(struct kunit *test)
 871{
 872	char *p;
 873	size_t size = 200;
 874	struct kmem_cache *cache;
 875
 876	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
 877	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
 878
 879	p = kmem_cache_alloc(cache, GFP_KERNEL);
 880	if (!p) {
 881		kunit_err(test, "Allocation failed: %s\n", __func__);
 882		kmem_cache_destroy(cache);
 883		return;
 884	}
 885
 886	kmem_cache_free(cache, p);
 887	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
 888	kmem_cache_destroy(cache);
 889}
 890
 891static void kmem_cache_invalid_free(struct kunit *test)
 892{
 893	char *p;
 894	size_t size = 200;
 895	struct kmem_cache *cache;
 896
 897	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
 898				  NULL);
 899	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
 900
 901	p = kmem_cache_alloc(cache, GFP_KERNEL);
 902	if (!p) {
 903		kunit_err(test, "Allocation failed: %s\n", __func__);
 904		kmem_cache_destroy(cache);
 905		return;
 906	}
 907
 908	/* Trigger invalid free, the object doesn't get freed. */
 909	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
 910
 911	/*
 912	 * Properly free the object to prevent the "Objects remaining in
 913	 * test_cache on __kmem_cache_shutdown" BUG failure.
 914	 */
 915	kmem_cache_free(cache, p);
 916
 917	kmem_cache_destroy(cache);
 918}
 919
 920static void empty_cache_ctor(void *object) { }
 921
 922static void kmem_cache_double_destroy(struct kunit *test)
 923{
 924	struct kmem_cache *cache;
 925
 926	/* Provide a constructor to prevent cache merging. */
 927	cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
 928	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
 929	kmem_cache_destroy(cache);
 930	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
 931}
 932
 933static void kmem_cache_accounted(struct kunit *test)
 934{
 935	int i;
 936	char *p;
 937	size_t size = 200;
 938	struct kmem_cache *cache;
 939
 940	cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
 941	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
 942
 943	/*
 944	 * Several allocations with a delay to allow for lazy per memcg kmem
 945	 * cache creation.
 946	 */
 947	for (i = 0; i < 5; i++) {
 948		p = kmem_cache_alloc(cache, GFP_KERNEL);
 949		if (!p)
 950			goto free_cache;
 951
 952		kmem_cache_free(cache, p);
 953		msleep(100);
 954	}
 955
 956free_cache:
 957	kmem_cache_destroy(cache);
 958}
 959
 960static void kmem_cache_bulk(struct kunit *test)
 961{
 962	struct kmem_cache *cache;
 963	size_t size = 200;
 964	char *p[10];
 965	bool ret;
 966	int i;
 967
 968	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
 969	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
 970
 971	ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
 972	if (!ret) {
 973		kunit_err(test, "Allocation failed: %s\n", __func__);
 974		kmem_cache_destroy(cache);
 975		return;
 976	}
 977
 978	for (i = 0; i < ARRAY_SIZE(p); i++)
 979		p[i][0] = p[i][size - 1] = 42;
 980
 981	kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
 982	kmem_cache_destroy(cache);
 983}
 984
 985static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
 986{
 987	int pool_size = 4;
 988	int ret;
 989	void *elem;
 990
 991	memset(pool, 0, sizeof(*pool));
 992	ret = mempool_init_kmalloc_pool(pool, pool_size, size);
 993	KUNIT_ASSERT_EQ(test, ret, 0);
 994
 995	/*
 996	 * Allocate one element to prevent mempool from freeing elements to the
 997	 * underlying allocator and instead make it add them to the element
 998	 * list when the tests trigger double-free and invalid-free bugs.
 999	 * This allows testing KASAN annotations in add_element().
1000	 */
1001	elem = mempool_alloc_preallocated(pool);
1002	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1003
1004	return elem;
1005}
1006
1007static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
1008{
1009	struct kmem_cache *cache;
1010	int pool_size = 4;
1011	int ret;
1012
1013	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1014	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1015
1016	memset(pool, 0, sizeof(*pool));
1017	ret = mempool_init_slab_pool(pool, pool_size, cache);
1018	KUNIT_ASSERT_EQ(test, ret, 0);
1019
1020	/*
1021	 * Do not allocate one preallocated element, as we skip the double-free
1022	 * and invalid-free tests for slab mempool for simplicity.
1023	 */
1024
1025	return cache;
1026}
1027
1028static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
1029{
1030	int pool_size = 4;
1031	int ret;
1032	void *elem;
1033
1034	memset(pool, 0, sizeof(*pool));
1035	ret = mempool_init_page_pool(pool, pool_size, order);
1036	KUNIT_ASSERT_EQ(test, ret, 0);
1037
1038	elem = mempool_alloc_preallocated(pool);
1039	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1040
1041	return elem;
1042}
1043
1044static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
1045{
1046	char *elem;
1047
1048	elem = mempool_alloc_preallocated(pool);
1049	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1050
1051	OPTIMIZER_HIDE_VAR(elem);
1052
1053	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1054		KUNIT_EXPECT_KASAN_FAIL(test,
1055			((volatile char *)&elem[size])[0]);
1056	else
1057		KUNIT_EXPECT_KASAN_FAIL(test,
1058			((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
1059
1060	mempool_free(elem, pool);
1061}
1062
1063static void mempool_kmalloc_oob_right(struct kunit *test)
1064{
1065	mempool_t pool;
1066	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
1067	void *extra_elem;
1068
1069	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1070
1071	mempool_oob_right_helper(test, &pool, size);
1072
1073	mempool_free(extra_elem, &pool);
1074	mempool_exit(&pool);
1075}
1076
1077static void mempool_kmalloc_large_oob_right(struct kunit *test)
1078{
1079	mempool_t pool;
1080	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1081	void *extra_elem;
1082
1083	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1084
1085	mempool_oob_right_helper(test, &pool, size);
1086
1087	mempool_free(extra_elem, &pool);
1088	mempool_exit(&pool);
1089}
1090
1091static void mempool_slab_oob_right(struct kunit *test)
1092{
1093	mempool_t pool;
1094	size_t size = 123;
1095	struct kmem_cache *cache;
1096
1097	cache = mempool_prepare_slab(test, &pool, size);
1098
1099	mempool_oob_right_helper(test, &pool, size);
1100
1101	mempool_exit(&pool);
1102	kmem_cache_destroy(cache);
1103}
1104
1105/*
1106 * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1107 * allocations have no redzones, and thus the out-of-bounds detection is not
1108 * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1109 * the tag-based KASAN modes, the neighboring allocation might have the same
1110 * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1111 */
1112
1113static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
1114{
1115	char *elem, *ptr;
1116
1117	elem = mempool_alloc_preallocated(pool);
1118	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1119
1120	mempool_free(elem, pool);
1121
1122	ptr = page ? page_address((struct page *)elem) : elem;
1123	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
1124}
1125
1126static void mempool_kmalloc_uaf(struct kunit *test)
1127{
1128	mempool_t pool;
1129	size_t size = 128;
1130	void *extra_elem;
1131
1132	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1133
1134	mempool_uaf_helper(test, &pool, false);
1135
1136	mempool_free(extra_elem, &pool);
1137	mempool_exit(&pool);
1138}
1139
1140static void mempool_kmalloc_large_uaf(struct kunit *test)
1141{
1142	mempool_t pool;
1143	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1144	void *extra_elem;
1145
1146	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1147
1148	mempool_uaf_helper(test, &pool, false);
1149
1150	mempool_free(extra_elem, &pool);
1151	mempool_exit(&pool);
1152}
1153
1154static void mempool_slab_uaf(struct kunit *test)
1155{
1156	mempool_t pool;
1157	size_t size = 123;
1158	struct kmem_cache *cache;
1159
1160	cache = mempool_prepare_slab(test, &pool, size);
1161
1162	mempool_uaf_helper(test, &pool, false);
1163
1164	mempool_exit(&pool);
1165	kmem_cache_destroy(cache);
1166}
1167
1168static void mempool_page_alloc_uaf(struct kunit *test)
1169{
1170	mempool_t pool;
1171	int order = 2;
1172	void *extra_elem;
1173
1174	extra_elem = mempool_prepare_page(test, &pool, order);
1175
1176	mempool_uaf_helper(test, &pool, true);
1177
1178	mempool_free(extra_elem, &pool);
1179	mempool_exit(&pool);
1180}
1181
1182static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
1183{
1184	char *elem;
1185
1186	elem = mempool_alloc_preallocated(pool);
1187	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1188
1189	mempool_free(elem, pool);
1190
1191	KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
1192}
1193
1194static void mempool_kmalloc_double_free(struct kunit *test)
1195{
1196	mempool_t pool;
1197	size_t size = 128;
1198	char *extra_elem;
1199
1200	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1201
1202	mempool_double_free_helper(test, &pool);
1203
1204	mempool_free(extra_elem, &pool);
1205	mempool_exit(&pool);
1206}
1207
1208static void mempool_kmalloc_large_double_free(struct kunit *test)
1209{
1210	mempool_t pool;
1211	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1212	char *extra_elem;
1213
1214	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1215
1216	mempool_double_free_helper(test, &pool);
1217
1218	mempool_free(extra_elem, &pool);
1219	mempool_exit(&pool);
1220}
1221
1222static void mempool_page_alloc_double_free(struct kunit *test)
1223{
1224	mempool_t pool;
1225	int order = 2;
1226	char *extra_elem;
1227
1228	extra_elem = mempool_prepare_page(test, &pool, order);
1229
1230	mempool_double_free_helper(test, &pool);
1231
1232	mempool_free(extra_elem, &pool);
1233	mempool_exit(&pool);
1234}
1235
1236static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
1237{
1238	char *elem;
1239
1240	elem = mempool_alloc_preallocated(pool);
1241	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1242
1243	KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
1244
1245	mempool_free(elem, pool);
1246}
1247
1248static void mempool_kmalloc_invalid_free(struct kunit *test)
1249{
1250	mempool_t pool;
1251	size_t size = 128;
1252	char *extra_elem;
1253
1254	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1255
1256	mempool_kmalloc_invalid_free_helper(test, &pool);
1257
1258	mempool_free(extra_elem, &pool);
1259	mempool_exit(&pool);
1260}
1261
1262static void mempool_kmalloc_large_invalid_free(struct kunit *test)
1263{
1264	mempool_t pool;
1265	size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1266	char *extra_elem;
1267
1268	extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1269
1270	mempool_kmalloc_invalid_free_helper(test, &pool);
1271
1272	mempool_free(extra_elem, &pool);
1273	mempool_exit(&pool);
1274}
1275
1276/*
1277 * Skip the invalid-free test for page mempool. The invalid-free detection only
1278 * works for compound pages and mempool preallocates all page elements without
1279 * the __GFP_COMP flag.
1280 */
1281
1282static char global_array[10];
1283
1284static void kasan_global_oob_right(struct kunit *test)
1285{
1286	/*
1287	 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1288	 * from failing here and panicking the kernel, access the array via a
1289	 * volatile pointer, which will prevent the compiler from being able to
1290	 * determine the array bounds.
1291	 *
1292	 * This access uses a volatile pointer to char (char *volatile) rather
1293	 * than the more conventional pointer to volatile char (volatile char *)
1294	 * because we want to prevent the compiler from making inferences about
1295	 * the pointer itself (i.e. its array bounds), not the data that it
1296	 * refers to.
1297	 */
1298	char *volatile array = global_array;
1299	char *p = &array[ARRAY_SIZE(global_array) + 3];
1300
1301	/* Only generic mode instruments globals. */
1302	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1303
1304	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1305}
1306
1307static void kasan_global_oob_left(struct kunit *test)
1308{
1309	char *volatile array = global_array;
1310	char *p = array - 3;
1311
1312	/*
1313	 * GCC is known to fail this test, skip it.
1314	 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1315	 */
1316	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1317	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1318	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1319}
1320
1321static void kasan_stack_oob(struct kunit *test)
1322{
1323	char stack_array[10];
1324	/* See comment in kasan_global_oob_right. */
1325	char *volatile array = stack_array;
1326	char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1327
1328	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1329
1330	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1331}
1332
1333static void kasan_alloca_oob_left(struct kunit *test)
1334{
1335	volatile int i = 10;
1336	char alloca_array[i];
1337	/* See comment in kasan_global_oob_right. */
1338	char *volatile array = alloca_array;
1339	char *p = array - 1;
1340
1341	/* Only generic mode instruments dynamic allocas. */
1342	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1343	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1344
1345	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1346}
1347
1348static void kasan_alloca_oob_right(struct kunit *test)
1349{
1350	volatile int i = 10;
1351	char alloca_array[i];
1352	/* See comment in kasan_global_oob_right. */
1353	char *volatile array = alloca_array;
1354	char *p = array + i;
1355
1356	/* Only generic mode instruments dynamic allocas. */
1357	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1358	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1359
1360	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1361}
1362
1363static void kasan_memchr(struct kunit *test)
1364{
1365	char *ptr;
1366	size_t size = 24;
1367
1368	/*
1369	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1370	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1371	 */
1372	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1373
1374	if (OOB_TAG_OFF)
1375		size = round_up(size, OOB_TAG_OFF);
1376
1377	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1378	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1379
1380	OPTIMIZER_HIDE_VAR(ptr);
1381	OPTIMIZER_HIDE_VAR(size);
1382	KUNIT_EXPECT_KASAN_FAIL(test,
1383		kasan_ptr_result = memchr(ptr, '1', size + 1));
1384
1385	kfree(ptr);
1386}
1387
1388static void kasan_memcmp(struct kunit *test)
1389{
1390	char *ptr;
1391	size_t size = 24;
1392	int arr[9];
1393
1394	/*
1395	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1396	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1397	 */
1398	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1399
1400	if (OOB_TAG_OFF)
1401		size = round_up(size, OOB_TAG_OFF);
1402
1403	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1404	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1405	memset(arr, 0, sizeof(arr));
1406
1407	OPTIMIZER_HIDE_VAR(ptr);
1408	OPTIMIZER_HIDE_VAR(size);
1409	KUNIT_EXPECT_KASAN_FAIL(test,
1410		kasan_int_result = memcmp(ptr, arr, size+1));
1411	kfree(ptr);
1412}
1413
1414static void kasan_strings(struct kunit *test)
1415{
1416	char *ptr;
1417	size_t size = 24;
1418
1419	/*
1420	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1421	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1422	 */
1423	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1424
1425	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1426	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1427
1428	kfree(ptr);
1429
1430	/*
1431	 * Try to cause only 1 invalid access (less spam in dmesg).
1432	 * For that we need ptr to point to zeroed byte.
1433	 * Skip metadata that could be stored in freed object so ptr
1434	 * will likely point to zeroed byte.
1435	 */
1436	ptr += 16;
1437	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1438
1439	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1440
1441	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1442
1443	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1444
1445	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1446
1447	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1448}
1449
1450static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1451{
1452	KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1453	KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1454	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1455	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1456	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1457	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1458	KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1459	KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1460}
1461
1462static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1463{
1464	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1465	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1466	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1467	KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1468	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1469	KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1470	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1471	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1472	if (nr < 7)
1473		KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1474				xor_unlock_is_negative_byte(1 << nr, addr));
1475}
1476
1477static void kasan_bitops_generic(struct kunit *test)
1478{
1479	long *bits;
1480
1481	/* This test is specifically crafted for the generic mode. */
1482	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1483
1484	/*
1485	 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1486	 * this way we do not actually corrupt other memory.
1487	 */
1488	bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1489	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1490
1491	/*
1492	 * Below calls try to access bit within allocated memory; however, the
1493	 * below accesses are still out-of-bounds, since bitops are defined to
1494	 * operate on the whole long the bit is in.
1495	 */
1496	kasan_bitops_modify(test, BITS_PER_LONG, bits);
1497
1498	/*
1499	 * Below calls try to access bit beyond allocated memory.
1500	 */
1501	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1502
1503	kfree(bits);
1504}
1505
1506static void kasan_bitops_tags(struct kunit *test)
1507{
1508	long *bits;
1509
1510	/* This test is specifically crafted for tag-based modes. */
1511	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1512
1513	/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1514	bits = kzalloc(48, GFP_KERNEL);
1515	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1516
1517	/* Do the accesses past the 48 allocated bytes, but within the redone. */
1518	kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1519	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1520
1521	kfree(bits);
1522}
1523
1524static void vmalloc_helpers_tags(struct kunit *test)
1525{
1526	void *ptr;
1527
1528	/* This test is intended for tag-based modes. */
1529	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1530
1531	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1532
1533	if (!kasan_vmalloc_enabled())
1534		kunit_skip(test, "Test requires kasan.vmalloc=on");
1535
1536	ptr = vmalloc(PAGE_SIZE);
1537	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1538
1539	/* Check that the returned pointer is tagged. */
1540	KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1541	KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1542
1543	/* Make sure exported vmalloc helpers handle tagged pointers. */
1544	KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1545	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1546
1547#if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1548	{
1549		int rv;
1550
1551		/* Make sure vmalloc'ed memory permissions can be changed. */
1552		rv = set_memory_ro((unsigned long)ptr, 1);
1553		KUNIT_ASSERT_GE(test, rv, 0);
1554		rv = set_memory_rw((unsigned long)ptr, 1);
1555		KUNIT_ASSERT_GE(test, rv, 0);
1556	}
1557#endif
1558
1559	vfree(ptr);
1560}
1561
1562static void vmalloc_oob(struct kunit *test)
1563{
1564	char *v_ptr, *p_ptr;
1565	struct page *page;
1566	size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1567
1568	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1569
1570	if (!kasan_vmalloc_enabled())
1571		kunit_skip(test, "Test requires kasan.vmalloc=on");
1572
1573	v_ptr = vmalloc(size);
1574	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1575
1576	OPTIMIZER_HIDE_VAR(v_ptr);
1577
1578	/*
1579	 * We have to be careful not to hit the guard page in vmalloc tests.
1580	 * The MMU will catch that and crash us.
1581	 */
1582
1583	/* Make sure in-bounds accesses are valid. */
1584	v_ptr[0] = 0;
1585	v_ptr[size - 1] = 0;
1586
1587	/*
1588	 * An unaligned access past the requested vmalloc size.
1589	 * Only generic KASAN can precisely detect these.
1590	 */
1591	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1592		KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1593
1594	/* An aligned access into the first out-of-bounds granule. */
1595	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1596
1597	/* Check that in-bounds accesses to the physical page are valid. */
1598	page = vmalloc_to_page(v_ptr);
1599	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1600	p_ptr = page_address(page);
1601	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1602	p_ptr[0] = 0;
1603
1604	vfree(v_ptr);
1605
1606	/*
1607	 * We can't check for use-after-unmap bugs in this nor in the following
1608	 * vmalloc tests, as the page might be fully unmapped and accessing it
1609	 * will crash the kernel.
1610	 */
1611}
1612
1613static void vmap_tags(struct kunit *test)
1614{
1615	char *p_ptr, *v_ptr;
1616	struct page *p_page, *v_page;
1617
1618	/*
1619	 * This test is specifically crafted for the software tag-based mode,
1620	 * the only tag-based mode that poisons vmap mappings.
1621	 */
1622	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1623
1624	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1625
1626	if (!kasan_vmalloc_enabled())
1627		kunit_skip(test, "Test requires kasan.vmalloc=on");
1628
1629	p_page = alloc_pages(GFP_KERNEL, 1);
1630	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1631	p_ptr = page_address(p_page);
1632	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1633
1634	v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1635	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1636
1637	/*
1638	 * We can't check for out-of-bounds bugs in this nor in the following
1639	 * vmalloc tests, as allocations have page granularity and accessing
1640	 * the guard page will crash the kernel.
1641	 */
1642
1643	KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1644	KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1645
1646	/* Make sure that in-bounds accesses through both pointers work. */
1647	*p_ptr = 0;
1648	*v_ptr = 0;
1649
1650	/* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1651	v_page = vmalloc_to_page(v_ptr);
1652	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1653	KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1654
1655	vunmap(v_ptr);
1656	free_pages((unsigned long)p_ptr, 1);
1657}
1658
1659static void vm_map_ram_tags(struct kunit *test)
1660{
1661	char *p_ptr, *v_ptr;
1662	struct page *page;
1663
1664	/*
1665	 * This test is specifically crafted for the software tag-based mode,
1666	 * the only tag-based mode that poisons vm_map_ram mappings.
1667	 */
1668	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1669
1670	page = alloc_pages(GFP_KERNEL, 1);
1671	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1672	p_ptr = page_address(page);
1673	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1674
1675	v_ptr = vm_map_ram(&page, 1, -1);
1676	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1677
1678	KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1679	KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1680
1681	/* Make sure that in-bounds accesses through both pointers work. */
1682	*p_ptr = 0;
1683	*v_ptr = 0;
1684
1685	vm_unmap_ram(v_ptr, 1);
1686	free_pages((unsigned long)p_ptr, 1);
1687}
1688
1689static void vmalloc_percpu(struct kunit *test)
1690{
1691	char __percpu *ptr;
1692	int cpu;
1693
1694	/*
1695	 * This test is specifically crafted for the software tag-based mode,
1696	 * the only tag-based mode that poisons percpu mappings.
1697	 */
1698	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1699
1700	ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1701
1702	for_each_possible_cpu(cpu) {
1703		char *c_ptr = per_cpu_ptr(ptr, cpu);
1704
1705		KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1706		KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1707
1708		/* Make sure that in-bounds accesses don't crash the kernel. */
1709		*c_ptr = 0;
1710	}
1711
1712	free_percpu(ptr);
1713}
1714
1715/*
1716 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1717 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1718 * modes.
1719 */
1720static void match_all_not_assigned(struct kunit *test)
1721{
1722	char *ptr;
1723	struct page *pages;
1724	int i, size, order;
1725
1726	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1727
1728	for (i = 0; i < 256; i++) {
1729		size = get_random_u32_inclusive(1, 1024);
1730		ptr = kmalloc(size, GFP_KERNEL);
1731		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1732		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1733		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1734		kfree(ptr);
1735	}
1736
1737	for (i = 0; i < 256; i++) {
1738		order = get_random_u32_inclusive(1, 4);
1739		pages = alloc_pages(GFP_KERNEL, order);
1740		ptr = page_address(pages);
1741		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1742		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1743		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1744		free_pages((unsigned long)ptr, order);
1745	}
1746
1747	if (!kasan_vmalloc_enabled())
1748		return;
1749
1750	for (i = 0; i < 256; i++) {
1751		size = get_random_u32_inclusive(1, 1024);
1752		ptr = vmalloc(size);
1753		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1754		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1755		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1756		vfree(ptr);
1757	}
1758}
1759
1760/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1761static void match_all_ptr_tag(struct kunit *test)
1762{
1763	char *ptr;
1764	u8 tag;
1765
1766	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1767
1768	ptr = kmalloc(128, GFP_KERNEL);
1769	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1770
1771	/* Backup the assigned tag. */
1772	tag = get_tag(ptr);
1773	KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1774
1775	/* Reset the tag to 0xff.*/
1776	ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1777
1778	/* This access shouldn't trigger a KASAN report. */
1779	*ptr = 0;
1780
1781	/* Recover the pointer tag and free. */
1782	ptr = set_tag(ptr, tag);
1783	kfree(ptr);
1784}
1785
1786/* Check that there are no match-all memory tags for tag-based modes. */
1787static void match_all_mem_tag(struct kunit *test)
1788{
1789	char *ptr;
1790	int tag;
1791
1792	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1793
1794	ptr = kmalloc(128, GFP_KERNEL);
1795	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1796	KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1797
1798	/* For each possible tag value not matching the pointer tag. */
1799	for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1800		/*
1801		 * For Software Tag-Based KASAN, skip the majority of tag
1802		 * values to avoid the test printing too many reports.
1803		 */
1804		if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
1805		    tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
1806			continue;
1807
1808		if (tag == get_tag(ptr))
1809			continue;
1810
1811		/* Mark the first memory granule with the chosen memory tag. */
1812		kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1813
1814		/* This access must cause a KASAN report. */
1815		KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1816	}
1817
1818	/* Recover the memory tag and free. */
1819	kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1820	kfree(ptr);
1821}
1822
1823static struct kunit_case kasan_kunit_test_cases[] = {
1824	KUNIT_CASE(kmalloc_oob_right),
1825	KUNIT_CASE(kmalloc_oob_left),
1826	KUNIT_CASE(kmalloc_node_oob_right),
1827	KUNIT_CASE(kmalloc_big_oob_right),
1828	KUNIT_CASE(kmalloc_large_oob_right),
1829	KUNIT_CASE(kmalloc_large_uaf),
1830	KUNIT_CASE(kmalloc_large_invalid_free),
1831	KUNIT_CASE(page_alloc_oob_right),
1832	KUNIT_CASE(page_alloc_uaf),
1833	KUNIT_CASE(krealloc_more_oob),
1834	KUNIT_CASE(krealloc_less_oob),
1835	KUNIT_CASE(krealloc_large_more_oob),
1836	KUNIT_CASE(krealloc_large_less_oob),
1837	KUNIT_CASE(krealloc_uaf),
1838	KUNIT_CASE(kmalloc_oob_16),
1839	KUNIT_CASE(kmalloc_uaf_16),
1840	KUNIT_CASE(kmalloc_oob_in_memset),
1841	KUNIT_CASE(kmalloc_oob_memset_2),
1842	KUNIT_CASE(kmalloc_oob_memset_4),
1843	KUNIT_CASE(kmalloc_oob_memset_8),
1844	KUNIT_CASE(kmalloc_oob_memset_16),
1845	KUNIT_CASE(kmalloc_memmove_negative_size),
1846	KUNIT_CASE(kmalloc_memmove_invalid_size),
1847	KUNIT_CASE(kmalloc_uaf),
1848	KUNIT_CASE(kmalloc_uaf_memset),
1849	KUNIT_CASE(kmalloc_uaf2),
1850	KUNIT_CASE(kmalloc_uaf3),
1851	KUNIT_CASE(kmalloc_double_kzfree),
1852	KUNIT_CASE(ksize_unpoisons_memory),
1853	KUNIT_CASE(ksize_uaf),
1854	KUNIT_CASE(rcu_uaf),
1855	KUNIT_CASE(workqueue_uaf),
1856	KUNIT_CASE(kfree_via_page),
1857	KUNIT_CASE(kfree_via_phys),
1858	KUNIT_CASE(kmem_cache_oob),
1859	KUNIT_CASE(kmem_cache_double_free),
1860	KUNIT_CASE(kmem_cache_invalid_free),
1861	KUNIT_CASE(kmem_cache_double_destroy),
1862	KUNIT_CASE(kmem_cache_accounted),
1863	KUNIT_CASE(kmem_cache_bulk),
1864	KUNIT_CASE(mempool_kmalloc_oob_right),
1865	KUNIT_CASE(mempool_kmalloc_large_oob_right),
1866	KUNIT_CASE(mempool_slab_oob_right),
1867	KUNIT_CASE(mempool_kmalloc_uaf),
1868	KUNIT_CASE(mempool_kmalloc_large_uaf),
1869	KUNIT_CASE(mempool_slab_uaf),
1870	KUNIT_CASE(mempool_page_alloc_uaf),
1871	KUNIT_CASE(mempool_kmalloc_double_free),
1872	KUNIT_CASE(mempool_kmalloc_large_double_free),
1873	KUNIT_CASE(mempool_page_alloc_double_free),
1874	KUNIT_CASE(mempool_kmalloc_invalid_free),
1875	KUNIT_CASE(mempool_kmalloc_large_invalid_free),
1876	KUNIT_CASE(kasan_global_oob_right),
1877	KUNIT_CASE(kasan_global_oob_left),
1878	KUNIT_CASE(kasan_stack_oob),
1879	KUNIT_CASE(kasan_alloca_oob_left),
1880	KUNIT_CASE(kasan_alloca_oob_right),
1881	KUNIT_CASE(kasan_memchr),
1882	KUNIT_CASE(kasan_memcmp),
1883	KUNIT_CASE(kasan_strings),
1884	KUNIT_CASE(kasan_bitops_generic),
1885	KUNIT_CASE(kasan_bitops_tags),
1886	KUNIT_CASE(vmalloc_helpers_tags),
1887	KUNIT_CASE(vmalloc_oob),
1888	KUNIT_CASE(vmap_tags),
1889	KUNIT_CASE(vm_map_ram_tags),
1890	KUNIT_CASE(vmalloc_percpu),
1891	KUNIT_CASE(match_all_not_assigned),
1892	KUNIT_CASE(match_all_ptr_tag),
1893	KUNIT_CASE(match_all_mem_tag),
1894	{}
1895};
1896
1897static struct kunit_suite kasan_kunit_test_suite = {
1898	.name = "kasan",
1899	.test_cases = kasan_kunit_test_cases,
1900	.exit = kasan_test_exit,
1901	.suite_init = kasan_suite_init,
1902	.suite_exit = kasan_suite_exit,
1903};
1904
1905kunit_test_suite(kasan_kunit_test_suite);
1906
1907MODULE_LICENSE("GPL");