Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * KFENCE guarded object allocator and fault handling.
   4 *
   5 * Copyright (C) 2020, Google LLC.
   6 */
   7
   8#define pr_fmt(fmt) "kfence: " fmt
   9
  10#include <linux/atomic.h>
  11#include <linux/bug.h>
  12#include <linux/debugfs.h>
  13#include <linux/hash.h>
  14#include <linux/irq_work.h>
  15#include <linux/jhash.h>
  16#include <linux/kcsan-checks.h>
  17#include <linux/kfence.h>
  18#include <linux/kmemleak.h>
  19#include <linux/list.h>
  20#include <linux/lockdep.h>
  21#include <linux/log2.h>
  22#include <linux/memblock.h>
  23#include <linux/moduleparam.h>
  24#include <linux/notifier.h>
  25#include <linux/panic_notifier.h>
  26#include <linux/random.h>
  27#include <linux/rcupdate.h>
  28#include <linux/sched/clock.h>
  29#include <linux/seq_file.h>
  30#include <linux/slab.h>
  31#include <linux/spinlock.h>
  32#include <linux/string.h>
  33
  34#include <asm/kfence.h>
  35
  36#include "kfence.h"
  37
  38/* Disables KFENCE on the first warning assuming an irrecoverable error. */
  39#define KFENCE_WARN_ON(cond)                                                   \
  40	({                                                                     \
  41		const bool __cond = WARN_ON(cond);                             \
  42		if (unlikely(__cond)) {                                        \
  43			WRITE_ONCE(kfence_enabled, false);                     \
  44			disabled_by_warn = true;                               \
  45		}                                                              \
  46		__cond;                                                        \
  47	})
  48
  49/* === Data ================================================================= */
  50
  51static bool kfence_enabled __read_mostly;
  52static bool disabled_by_warn __read_mostly;
  53
  54unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
  55EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
  56
  57#ifdef MODULE_PARAM_PREFIX
  58#undef MODULE_PARAM_PREFIX
  59#endif
  60#define MODULE_PARAM_PREFIX "kfence."
  61
  62static int kfence_enable_late(void);
  63static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
  64{
  65	unsigned long num;
  66	int ret = kstrtoul(val, 0, &num);
  67
  68	if (ret < 0)
  69		return ret;
  70
  71	/* Using 0 to indicate KFENCE is disabled. */
  72	if (!num && READ_ONCE(kfence_enabled)) {
  73		pr_info("disabled\n");
  74		WRITE_ONCE(kfence_enabled, false);
  75	}
  76
  77	*((unsigned long *)kp->arg) = num;
  78
  79	if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
  80		return disabled_by_warn ? -EINVAL : kfence_enable_late();
  81	return 0;
  82}
  83
  84static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
  85{
  86	if (!READ_ONCE(kfence_enabled))
  87		return sprintf(buffer, "0\n");
  88
  89	return param_get_ulong(buffer, kp);
  90}
  91
  92static const struct kernel_param_ops sample_interval_param_ops = {
  93	.set = param_set_sample_interval,
  94	.get = param_get_sample_interval,
  95};
  96module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
  97
  98/* Pool usage% threshold when currently covered allocations are skipped. */
  99static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
 100module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
 101
 102/* If true, use a deferrable timer. */
 103static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
 104module_param_named(deferrable, kfence_deferrable, bool, 0444);
 105
 106/* If true, check all canary bytes on panic. */
 107static bool kfence_check_on_panic __read_mostly;
 108module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
 109
 110/* The pool of pages used for guard pages and objects. */
 111char *__kfence_pool __read_mostly;
 112EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
 113
 114/*
 115 * Per-object metadata, with one-to-one mapping of object metadata to
 116 * backing pages (in __kfence_pool).
 117 */
 118static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
 119struct kfence_metadata *kfence_metadata __read_mostly;
 120
 121/*
 122 * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
 123 * So introduce kfence_metadata_init to initialize metadata, and then make
 124 * kfence_metadata visible after initialization is successful. This prevents
 125 * potential UAF or access to uninitialized metadata.
 126 */
 127static struct kfence_metadata *kfence_metadata_init __read_mostly;
 128
 129/* Freelist with available objects. */
 130static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
 131static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
 132
 133/*
 134 * The static key to set up a KFENCE allocation; or if static keys are not used
 135 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
 136 */
 137DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
 138
 139/* Gates the allocation, ensuring only one succeeds in a given period. */
 140atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
 141
 142/*
 143 * A Counting Bloom filter of allocation coverage: limits currently covered
 144 * allocations of the same source filling up the pool.
 145 *
 146 * Assuming a range of 15%-85% unique allocations in the pool at any point in
 147 * time, the below parameters provide a probablity of 0.02-0.33 for false
 148 * positive hits respectively:
 149 *
 150 *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
 151 */
 152#define ALLOC_COVERED_HNUM	2
 153#define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
 154#define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
 155#define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
 156#define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
 157static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
 158
 159/* Stack depth used to determine uniqueness of an allocation. */
 160#define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
 161
 162/*
 163 * Randomness for stack hashes, making the same collisions across reboots and
 164 * different machines less likely.
 165 */
 166static u32 stack_hash_seed __ro_after_init;
 167
 168/* Statistics counters for debugfs. */
 169enum kfence_counter_id {
 170	KFENCE_COUNTER_ALLOCATED,
 171	KFENCE_COUNTER_ALLOCS,
 172	KFENCE_COUNTER_FREES,
 173	KFENCE_COUNTER_ZOMBIES,
 174	KFENCE_COUNTER_BUGS,
 175	KFENCE_COUNTER_SKIP_INCOMPAT,
 176	KFENCE_COUNTER_SKIP_CAPACITY,
 177	KFENCE_COUNTER_SKIP_COVERED,
 178	KFENCE_COUNTER_COUNT,
 179};
 180static atomic_long_t counters[KFENCE_COUNTER_COUNT];
 181static const char *const counter_names[] = {
 182	[KFENCE_COUNTER_ALLOCATED]	= "currently allocated",
 183	[KFENCE_COUNTER_ALLOCS]		= "total allocations",
 184	[KFENCE_COUNTER_FREES]		= "total frees",
 185	[KFENCE_COUNTER_ZOMBIES]	= "zombie allocations",
 186	[KFENCE_COUNTER_BUGS]		= "total bugs",
 187	[KFENCE_COUNTER_SKIP_INCOMPAT]	= "skipped allocations (incompatible)",
 188	[KFENCE_COUNTER_SKIP_CAPACITY]	= "skipped allocations (capacity)",
 189	[KFENCE_COUNTER_SKIP_COVERED]	= "skipped allocations (covered)",
 190};
 191static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
 192
 193/* === Internals ============================================================ */
 194
 195static inline bool should_skip_covered(void)
 196{
 197	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
 198
 199	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
 200}
 201
 202static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
 203{
 204	num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
 205	num_entries = filter_irq_stacks(stack_entries, num_entries);
 206	return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
 207}
 208
 209/*
 210 * Adds (or subtracts) count @val for allocation stack trace hash
 211 * @alloc_stack_hash from Counting Bloom filter.
 212 */
 213static void alloc_covered_add(u32 alloc_stack_hash, int val)
 214{
 215	int i;
 216
 217	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
 218		atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
 219		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
 220	}
 221}
 222
 223/*
 224 * Returns true if the allocation stack trace hash @alloc_stack_hash is
 225 * currently contained (non-zero count) in Counting Bloom filter.
 226 */
 227static bool alloc_covered_contains(u32 alloc_stack_hash)
 228{
 229	int i;
 230
 231	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
 232		if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
 233			return false;
 234		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
 235	}
 236
 237	return true;
 238}
 239
 240static bool kfence_protect(unsigned long addr)
 241{
 242	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
 243}
 244
 245static bool kfence_unprotect(unsigned long addr)
 246{
 247	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
 248}
 249
 250static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
 251{
 252	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
 253	unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
 254
 255	/* The checks do not affect performance; only called from slow-paths. */
 256
 257	/* Only call with a pointer into kfence_metadata. */
 258	if (KFENCE_WARN_ON(meta < kfence_metadata ||
 259			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
 260		return 0;
 261
 262	/*
 263	 * This metadata object only ever maps to 1 page; verify that the stored
 264	 * address is in the expected range.
 265	 */
 266	if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
 267		return 0;
 268
 269	return pageaddr;
 270}
 271
 272/*
 273 * Update the object's metadata state, including updating the alloc/free stacks
 274 * depending on the state transition.
 275 */
 276static noinline void
 277metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
 278		      unsigned long *stack_entries, size_t num_stack_entries)
 279{
 280	struct kfence_track *track =
 281		next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
 282
 283	lockdep_assert_held(&meta->lock);
 284
 285	if (stack_entries) {
 286		memcpy(track->stack_entries, stack_entries,
 287		       num_stack_entries * sizeof(stack_entries[0]));
 288	} else {
 289		/*
 290		 * Skip over 1 (this) functions; noinline ensures we do not
 291		 * accidentally skip over the caller by never inlining.
 292		 */
 293		num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
 294	}
 295	track->num_stack_entries = num_stack_entries;
 296	track->pid = task_pid_nr(current);
 297	track->cpu = raw_smp_processor_id();
 298	track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
 299
 300	/*
 301	 * Pairs with READ_ONCE() in
 302	 *	kfence_shutdown_cache(),
 303	 *	kfence_handle_page_fault().
 304	 */
 305	WRITE_ONCE(meta->state, next);
 306}
 307
 308/* Check canary byte at @addr. */
 309static inline bool check_canary_byte(u8 *addr)
 310{
 311	struct kfence_metadata *meta;
 312	unsigned long flags;
 313
 314	if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
 315		return true;
 316
 317	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
 318
 319	meta = addr_to_metadata((unsigned long)addr);
 320	raw_spin_lock_irqsave(&meta->lock, flags);
 321	kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
 322	raw_spin_unlock_irqrestore(&meta->lock, flags);
 323
 324	return false;
 325}
 326
 327static inline void set_canary(const struct kfence_metadata *meta)
 328{
 329	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
 330	unsigned long addr = pageaddr;
 331
 332	/*
 333	 * The canary may be written to part of the object memory, but it does
 334	 * not affect it. The user should initialize the object before using it.
 335	 */
 336	for (; addr < meta->addr; addr += sizeof(u64))
 337		*((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
 338
 339	addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
 340	for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
 341		*((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
 342}
 343
 344static inline void check_canary(const struct kfence_metadata *meta)
 345{
 346	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
 347	unsigned long addr = pageaddr;
 348
 349	/*
 350	 * We'll iterate over each canary byte per-side until a corrupted byte
 351	 * is found. However, we'll still iterate over the canary bytes to the
 352	 * right of the object even if there was an error in the canary bytes to
 353	 * the left of the object. Specifically, if check_canary_byte()
 354	 * generates an error, showing both sides might give more clues as to
 355	 * what the error is about when displaying which bytes were corrupted.
 356	 */
 357
 358	/* Apply to left of object. */
 359	for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
 360		if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
 361			break;
 362	}
 363
 364	/*
 365	 * If the canary is corrupted in a certain 64 bytes, or the canary
 366	 * memory cannot be completely covered by multiple consecutive 64 bytes,
 367	 * it needs to be checked one by one.
 368	 */
 369	for (; addr < meta->addr; addr++) {
 370		if (unlikely(!check_canary_byte((u8 *)addr)))
 371			break;
 372	}
 373
 374	/* Apply to right of object. */
 375	for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
 376		if (unlikely(!check_canary_byte((u8 *)addr)))
 377			return;
 378	}
 379	for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
 380		if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
 381
 382			for (; addr - pageaddr < PAGE_SIZE; addr++) {
 383				if (!check_canary_byte((u8 *)addr))
 384					return;
 385			}
 386		}
 387	}
 388}
 389
 390static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
 391				  unsigned long *stack_entries, size_t num_stack_entries,
 392				  u32 alloc_stack_hash)
 393{
 394	struct kfence_metadata *meta = NULL;
 395	unsigned long flags;
 396	struct slab *slab;
 397	void *addr;
 398	const bool random_right_allocate = get_random_u32_below(2);
 399	const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
 400				  !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
 401
 402	/* Try to obtain a free object. */
 403	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
 404	if (!list_empty(&kfence_freelist)) {
 405		meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
 406		list_del_init(&meta->list);
 407	}
 408	raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
 409	if (!meta) {
 410		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
 411		return NULL;
 412	}
 413
 414	if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
 415		/*
 416		 * This is extremely unlikely -- we are reporting on a
 417		 * use-after-free, which locked meta->lock, and the reporting
 418		 * code via printk calls kmalloc() which ends up in
 419		 * kfence_alloc() and tries to grab the same object that we're
 420		 * reporting on. While it has never been observed, lockdep does
 421		 * report that there is a possibility of deadlock. Fix it by
 422		 * using trylock and bailing out gracefully.
 423		 */
 424		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
 425		/* Put the object back on the freelist. */
 426		list_add_tail(&meta->list, &kfence_freelist);
 427		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
 428
 429		return NULL;
 430	}
 431
 432	meta->addr = metadata_to_pageaddr(meta);
 433	/* Unprotect if we're reusing this page. */
 434	if (meta->state == KFENCE_OBJECT_FREED)
 435		kfence_unprotect(meta->addr);
 436
 437	/*
 438	 * Note: for allocations made before RNG initialization, will always
 439	 * return zero. We still benefit from enabling KFENCE as early as
 440	 * possible, even when the RNG is not yet available, as this will allow
 441	 * KFENCE to detect bugs due to earlier allocations. The only downside
 442	 * is that the out-of-bounds accesses detected are deterministic for
 443	 * such allocations.
 444	 */
 445	if (random_right_allocate) {
 446		/* Allocate on the "right" side, re-calculate address. */
 447		meta->addr += PAGE_SIZE - size;
 448		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
 449	}
 450
 451	addr = (void *)meta->addr;
 452
 453	/* Update remaining metadata. */
 454	metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
 455	/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
 456	WRITE_ONCE(meta->cache, cache);
 457	meta->size = size;
 458	meta->alloc_stack_hash = alloc_stack_hash;
 459	raw_spin_unlock_irqrestore(&meta->lock, flags);
 460
 461	alloc_covered_add(alloc_stack_hash, 1);
 462
 463	/* Set required slab fields. */
 464	slab = virt_to_slab((void *)meta->addr);
 465	slab->slab_cache = cache;
 466	slab->objects = 1;
 467
 468	/* Memory initialization. */
 469	set_canary(meta);
 470
 471	/*
 472	 * We check slab_want_init_on_alloc() ourselves, rather than letting
 473	 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
 474	 * redzone.
 475	 */
 476	if (unlikely(slab_want_init_on_alloc(gfp, cache)))
 477		memzero_explicit(addr, size);
 478	if (cache->ctor)
 479		cache->ctor(addr);
 480
 481	if (random_fault)
 482		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
 483
 484	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
 485	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
 486
 487	return addr;
 488}
 489
 490static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
 491{
 492	struct kcsan_scoped_access assert_page_exclusive;
 493	unsigned long flags;
 494	bool init;
 495
 496	raw_spin_lock_irqsave(&meta->lock, flags);
 497
 498	if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
 499		/* Invalid or double-free, bail out. */
 500		atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
 501		kfence_report_error((unsigned long)addr, false, NULL, meta,
 502				    KFENCE_ERROR_INVALID_FREE);
 503		raw_spin_unlock_irqrestore(&meta->lock, flags);
 504		return;
 505	}
 506
 507	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
 508	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
 509				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
 510				  &assert_page_exclusive);
 511
 512	if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
 513		kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
 514
 515	/* Restore page protection if there was an OOB access. */
 516	if (meta->unprotected_page) {
 517		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
 518		kfence_protect(meta->unprotected_page);
 519		meta->unprotected_page = 0;
 520	}
 521
 522	/* Mark the object as freed. */
 523	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
 524	init = slab_want_init_on_free(meta->cache);
 525	raw_spin_unlock_irqrestore(&meta->lock, flags);
 526
 527	alloc_covered_add(meta->alloc_stack_hash, -1);
 528
 529	/* Check canary bytes for memory corruption. */
 530	check_canary(meta);
 531
 532	/*
 533	 * Clear memory if init-on-free is set. While we protect the page, the
 534	 * data is still there, and after a use-after-free is detected, we
 535	 * unprotect the page, so the data is still accessible.
 536	 */
 537	if (!zombie && unlikely(init))
 538		memzero_explicit(addr, meta->size);
 539
 540	/* Protect to detect use-after-frees. */
 541	kfence_protect((unsigned long)addr);
 542
 543	kcsan_end_scoped_access(&assert_page_exclusive);
 544	if (!zombie) {
 545		/* Add it to the tail of the freelist for reuse. */
 546		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
 547		KFENCE_WARN_ON(!list_empty(&meta->list));
 548		list_add_tail(&meta->list, &kfence_freelist);
 549		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
 550
 551		atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
 552		atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
 553	} else {
 554		/* See kfence_shutdown_cache(). */
 555		atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
 556	}
 557}
 558
 559static void rcu_guarded_free(struct rcu_head *h)
 560{
 561	struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
 562
 563	kfence_guarded_free((void *)meta->addr, meta, false);
 564}
 565
 566/*
 567 * Initialization of the KFENCE pool after its allocation.
 568 * Returns 0 on success; otherwise returns the address up to
 569 * which partial initialization succeeded.
 570 */
 571static unsigned long kfence_init_pool(void)
 572{
 573	unsigned long addr;
 574	struct page *pages;
 575	int i;
 576
 577	if (!arch_kfence_init_pool())
 578		return (unsigned long)__kfence_pool;
 579
 580	addr = (unsigned long)__kfence_pool;
 581	pages = virt_to_page(__kfence_pool);
 582
 583	/*
 584	 * Set up object pages: they must have PG_slab set, to avoid freeing
 585	 * these as real pages.
 586	 *
 587	 * We also want to avoid inserting kfence_free() in the kfree()
 588	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
 589	 * enters __slab_free() slow-path.
 590	 */
 591	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
 592		struct slab *slab = page_slab(nth_page(pages, i));
 593
 594		if (!i || (i % 2))
 595			continue;
 596
 597		__folio_set_slab(slab_folio(slab));
 598#ifdef CONFIG_MEMCG
 599		slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg |
 600				   MEMCG_DATA_OBJCGS;
 601#endif
 602	}
 603
 604	/*
 605	 * Protect the first 2 pages. The first page is mostly unnecessary, and
 606	 * merely serves as an extended guard page. However, adding one
 607	 * additional page in the beginning gives us an even number of pages,
 608	 * which simplifies the mapping of address to metadata index.
 609	 */
 610	for (i = 0; i < 2; i++) {
 611		if (unlikely(!kfence_protect(addr)))
 612			return addr;
 613
 614		addr += PAGE_SIZE;
 615	}
 616
 617	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
 618		struct kfence_metadata *meta = &kfence_metadata_init[i];
 619
 620		/* Initialize metadata. */
 621		INIT_LIST_HEAD(&meta->list);
 622		raw_spin_lock_init(&meta->lock);
 623		meta->state = KFENCE_OBJECT_UNUSED;
 624		meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
 625		list_add_tail(&meta->list, &kfence_freelist);
 626
 627		/* Protect the right redzone. */
 628		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
 629			goto reset_slab;
 630
 631		addr += 2 * PAGE_SIZE;
 632	}
 633
 634	/*
 635	 * Make kfence_metadata visible only when initialization is successful.
 636	 * Otherwise, if the initialization fails and kfence_metadata is freed,
 637	 * it may cause UAF in kfence_shutdown_cache().
 638	 */
 639	smp_store_release(&kfence_metadata, kfence_metadata_init);
 640	return 0;
 641
 642reset_slab:
 643	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
 644		struct slab *slab = page_slab(nth_page(pages, i));
 645
 646		if (!i || (i % 2))
 647			continue;
 648#ifdef CONFIG_MEMCG
 649		slab->memcg_data = 0;
 650#endif
 651		__folio_clear_slab(slab_folio(slab));
 652	}
 653
 654	return addr;
 655}
 656
 657static bool __init kfence_init_pool_early(void)
 658{
 659	unsigned long addr;
 660
 661	if (!__kfence_pool)
 662		return false;
 663
 664	addr = kfence_init_pool();
 665
 666	if (!addr) {
 667		/*
 668		 * The pool is live and will never be deallocated from this point on.
 669		 * Ignore the pool object from the kmemleak phys object tree, as it would
 670		 * otherwise overlap with allocations returned by kfence_alloc(), which
 671		 * are registered with kmemleak through the slab post-alloc hook.
 672		 */
 673		kmemleak_ignore_phys(__pa(__kfence_pool));
 674		return true;
 675	}
 676
 677	/*
 678	 * Only release unprotected pages, and do not try to go back and change
 679	 * page attributes due to risk of failing to do so as well. If changing
 680	 * page attributes for some pages fails, it is very likely that it also
 681	 * fails for the first page, and therefore expect addr==__kfence_pool in
 682	 * most failure cases.
 683	 */
 684	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
 685	__kfence_pool = NULL;
 686
 687	memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
 688	kfence_metadata_init = NULL;
 689
 690	return false;
 691}
 692
 693/* === DebugFS Interface ==================================================== */
 694
 695static int stats_show(struct seq_file *seq, void *v)
 696{
 697	int i;
 698
 699	seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
 700	for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
 701		seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
 702
 703	return 0;
 704}
 705DEFINE_SHOW_ATTRIBUTE(stats);
 706
 707/*
 708 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
 709 * start_object() and next_object() return the object index + 1, because NULL is used
 710 * to stop iteration.
 711 */
 712static void *start_object(struct seq_file *seq, loff_t *pos)
 713{
 714	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
 715		return (void *)((long)*pos + 1);
 716	return NULL;
 717}
 718
 719static void stop_object(struct seq_file *seq, void *v)
 720{
 721}
 722
 723static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
 724{
 725	++*pos;
 726	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
 727		return (void *)((long)*pos + 1);
 728	return NULL;
 729}
 730
 731static int show_object(struct seq_file *seq, void *v)
 732{
 733	struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
 734	unsigned long flags;
 735
 736	raw_spin_lock_irqsave(&meta->lock, flags);
 737	kfence_print_object(seq, meta);
 738	raw_spin_unlock_irqrestore(&meta->lock, flags);
 739	seq_puts(seq, "---------------------------------\n");
 740
 741	return 0;
 742}
 743
 744static const struct seq_operations objects_sops = {
 745	.start = start_object,
 746	.next = next_object,
 747	.stop = stop_object,
 748	.show = show_object,
 749};
 750DEFINE_SEQ_ATTRIBUTE(objects);
 751
 752static int kfence_debugfs_init(void)
 753{
 754	struct dentry *kfence_dir;
 755
 756	if (!READ_ONCE(kfence_enabled))
 757		return 0;
 758
 759	kfence_dir = debugfs_create_dir("kfence", NULL);
 760	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
 761	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
 762	return 0;
 763}
 764
 765late_initcall(kfence_debugfs_init);
 766
 767/* === Panic Notifier ====================================================== */
 768
 769static void kfence_check_all_canary(void)
 770{
 771	int i;
 772
 773	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
 774		struct kfence_metadata *meta = &kfence_metadata[i];
 775
 776		if (meta->state == KFENCE_OBJECT_ALLOCATED)
 777			check_canary(meta);
 778	}
 779}
 780
 781static int kfence_check_canary_callback(struct notifier_block *nb,
 782					unsigned long reason, void *arg)
 783{
 784	kfence_check_all_canary();
 785	return NOTIFY_OK;
 786}
 787
 788static struct notifier_block kfence_check_canary_notifier = {
 789	.notifier_call = kfence_check_canary_callback,
 790};
 791
 792/* === Allocation Gate Timer ================================================ */
 793
 794static struct delayed_work kfence_timer;
 795
 796#ifdef CONFIG_KFENCE_STATIC_KEYS
 797/* Wait queue to wake up allocation-gate timer task. */
 798static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
 799
 800static void wake_up_kfence_timer(struct irq_work *work)
 801{
 802	wake_up(&allocation_wait);
 803}
 804static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
 805#endif
 806
 807/*
 808 * Set up delayed work, which will enable and disable the static key. We need to
 809 * use a work queue (rather than a simple timer), since enabling and disabling a
 810 * static key cannot be done from an interrupt.
 811 *
 812 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
 813 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
 814 * more aggressive sampling intervals), we could get away with a variant that
 815 * avoids IPIs, at the cost of not immediately capturing allocations if the
 816 * instructions remain cached.
 817 */
 818static void toggle_allocation_gate(struct work_struct *work)
 819{
 820	if (!READ_ONCE(kfence_enabled))
 821		return;
 822
 823	atomic_set(&kfence_allocation_gate, 0);
 824#ifdef CONFIG_KFENCE_STATIC_KEYS
 825	/* Enable static key, and await allocation to happen. */
 826	static_branch_enable(&kfence_allocation_key);
 827
 828	wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
 829
 830	/* Disable static key and reset timer. */
 831	static_branch_disable(&kfence_allocation_key);
 832#endif
 833	queue_delayed_work(system_unbound_wq, &kfence_timer,
 834			   msecs_to_jiffies(kfence_sample_interval));
 835}
 836
 837/* === Public interface ===================================================== */
 838
 839void __init kfence_alloc_pool_and_metadata(void)
 840{
 841	if (!kfence_sample_interval)
 842		return;
 843
 844	/*
 845	 * If the pool has already been initialized by arch, there is no need to
 846	 * re-allocate the memory pool.
 847	 */
 848	if (!__kfence_pool)
 849		__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
 850
 851	if (!__kfence_pool) {
 852		pr_err("failed to allocate pool\n");
 853		return;
 854	}
 855
 856	/* The memory allocated by memblock has been zeroed out. */
 857	kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
 858	if (!kfence_metadata_init) {
 859		pr_err("failed to allocate metadata\n");
 860		memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
 861		__kfence_pool = NULL;
 862	}
 863}
 864
 865static void kfence_init_enable(void)
 866{
 867	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
 868		static_branch_enable(&kfence_allocation_key);
 869
 870	if (kfence_deferrable)
 871		INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
 872	else
 873		INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
 874
 875	if (kfence_check_on_panic)
 876		atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
 877
 878	WRITE_ONCE(kfence_enabled, true);
 879	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
 880
 881	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
 882		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
 883		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
 884}
 885
 886void __init kfence_init(void)
 887{
 888	stack_hash_seed = get_random_u32();
 889
 890	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
 891	if (!kfence_sample_interval)
 892		return;
 893
 894	if (!kfence_init_pool_early()) {
 895		pr_err("%s failed\n", __func__);
 896		return;
 897	}
 898
 899	kfence_init_enable();
 900}
 901
 902static int kfence_init_late(void)
 903{
 904	const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
 905	const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
 906	unsigned long addr = (unsigned long)__kfence_pool;
 907	unsigned long free_size = KFENCE_POOL_SIZE;
 908	int err = -ENOMEM;
 909
 910#ifdef CONFIG_CONTIG_ALLOC
 911	struct page *pages;
 912
 913	pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
 914				   NULL);
 915	if (!pages)
 916		return -ENOMEM;
 917
 918	__kfence_pool = page_to_virt(pages);
 919	pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
 920				   NULL);
 921	if (pages)
 922		kfence_metadata_init = page_to_virt(pages);
 923#else
 924	if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
 925	    nr_pages_meta > MAX_ORDER_NR_PAGES) {
 926		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
 927		return -EINVAL;
 928	}
 929
 930	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
 931	if (!__kfence_pool)
 932		return -ENOMEM;
 933
 934	kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
 935#endif
 936
 937	if (!kfence_metadata_init)
 938		goto free_pool;
 939
 940	memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
 941	addr = kfence_init_pool();
 942	if (!addr) {
 943		kfence_init_enable();
 944		kfence_debugfs_init();
 945		return 0;
 946	}
 947
 948	pr_err("%s failed\n", __func__);
 949	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
 950	err = -EBUSY;
 951
 952#ifdef CONFIG_CONTIG_ALLOC
 953	free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
 954			  nr_pages_meta);
 955free_pool:
 956	free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
 957			  free_size / PAGE_SIZE);
 958#else
 959	free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
 960free_pool:
 961	free_pages_exact((void *)addr, free_size);
 962#endif
 963
 964	kfence_metadata_init = NULL;
 965	__kfence_pool = NULL;
 966	return err;
 967}
 968
 969static int kfence_enable_late(void)
 970{
 971	if (!__kfence_pool)
 972		return kfence_init_late();
 973
 974	WRITE_ONCE(kfence_enabled, true);
 975	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
 976	pr_info("re-enabled\n");
 977	return 0;
 978}
 979
 980void kfence_shutdown_cache(struct kmem_cache *s)
 981{
 982	unsigned long flags;
 983	struct kfence_metadata *meta;
 984	int i;
 985
 986	/* Pairs with release in kfence_init_pool(). */
 987	if (!smp_load_acquire(&kfence_metadata))
 988		return;
 989
 990	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
 991		bool in_use;
 992
 993		meta = &kfence_metadata[i];
 994
 995		/*
 996		 * If we observe some inconsistent cache and state pair where we
 997		 * should have returned false here, cache destruction is racing
 998		 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
 999		 * the lock will not help, as different critical section
1000		 * serialization will have the same outcome.
1001		 */
1002		if (READ_ONCE(meta->cache) != s ||
1003		    READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
1004			continue;
1005
1006		raw_spin_lock_irqsave(&meta->lock, flags);
1007		in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
1008		raw_spin_unlock_irqrestore(&meta->lock, flags);
1009
1010		if (in_use) {
1011			/*
1012			 * This cache still has allocations, and we should not
1013			 * release them back into the freelist so they can still
1014			 * safely be used and retain the kernel's default
1015			 * behaviour of keeping the allocations alive (leak the
1016			 * cache); however, they effectively become "zombie
1017			 * allocations" as the KFENCE objects are the only ones
1018			 * still in use and the owning cache is being destroyed.
1019			 *
1020			 * We mark them freed, so that any subsequent use shows
1021			 * more useful error messages that will include stack
1022			 * traces of the user of the object, the original
1023			 * allocation, and caller to shutdown_cache().
1024			 */
1025			kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1026		}
1027	}
1028
1029	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1030		meta = &kfence_metadata[i];
1031
1032		/* See above. */
1033		if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1034			continue;
1035
1036		raw_spin_lock_irqsave(&meta->lock, flags);
1037		if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1038			meta->cache = NULL;
1039		raw_spin_unlock_irqrestore(&meta->lock, flags);
1040	}
1041}
1042
1043void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
1044{
1045	unsigned long stack_entries[KFENCE_STACK_DEPTH];
1046	size_t num_stack_entries;
1047	u32 alloc_stack_hash;
1048
1049	/*
1050	 * Perform size check before switching kfence_allocation_gate, so that
1051	 * we don't disable KFENCE without making an allocation.
1052	 */
1053	if (size > PAGE_SIZE) {
1054		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1055		return NULL;
1056	}
1057
1058	/*
1059	 * Skip allocations from non-default zones, including DMA. We cannot
1060	 * guarantee that pages in the KFENCE pool will have the requested
1061	 * properties (e.g. reside in DMAable memory).
1062	 */
1063	if ((flags & GFP_ZONEMASK) ||
1064	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
1065		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1066		return NULL;
1067	}
1068
1069	/*
1070	 * Skip allocations for this slab, if KFENCE has been disabled for
1071	 * this slab.
1072	 */
1073	if (s->flags & SLAB_SKIP_KFENCE)
1074		return NULL;
1075
1076	if (atomic_inc_return(&kfence_allocation_gate) > 1)
1077		return NULL;
1078#ifdef CONFIG_KFENCE_STATIC_KEYS
1079	/*
1080	 * waitqueue_active() is fully ordered after the update of
1081	 * kfence_allocation_gate per atomic_inc_return().
1082	 */
1083	if (waitqueue_active(&allocation_wait)) {
1084		/*
1085		 * Calling wake_up() here may deadlock when allocations happen
1086		 * from within timer code. Use an irq_work to defer it.
1087		 */
1088		irq_work_queue(&wake_up_kfence_timer_work);
1089	}
1090#endif
1091
1092	if (!READ_ONCE(kfence_enabled))
1093		return NULL;
1094
1095	num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1096
1097	/*
1098	 * Do expensive check for coverage of allocation in slow-path after
1099	 * allocation_gate has already become non-zero, even though it might
1100	 * mean not making any allocation within a given sample interval.
1101	 *
1102	 * This ensures reasonable allocation coverage when the pool is almost
1103	 * full, including avoiding long-lived allocations of the same source
1104	 * filling up the pool (e.g. pagecache allocations).
1105	 */
1106	alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1107	if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1108		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1109		return NULL;
1110	}
1111
1112	return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1113				    alloc_stack_hash);
1114}
1115
1116size_t kfence_ksize(const void *addr)
1117{
1118	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1119
1120	/*
1121	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1122	 * either a use-after-free or invalid access.
1123	 */
1124	return meta ? meta->size : 0;
1125}
1126
1127void *kfence_object_start(const void *addr)
1128{
1129	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1130
1131	/*
1132	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1133	 * either a use-after-free or invalid access.
1134	 */
1135	return meta ? (void *)meta->addr : NULL;
1136}
1137
1138void __kfence_free(void *addr)
1139{
1140	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1141
1142#ifdef CONFIG_MEMCG
1143	KFENCE_WARN_ON(meta->objcg);
1144#endif
1145	/*
1146	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1147	 * the object, as the object page may be recycled for other-typed
1148	 * objects once it has been freed. meta->cache may be NULL if the cache
1149	 * was destroyed.
1150	 */
1151	if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1152		call_rcu(&meta->rcu_head, rcu_guarded_free);
1153	else
1154		kfence_guarded_free(addr, meta, false);
1155}
1156
1157bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1158{
1159	const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1160	struct kfence_metadata *to_report = NULL;
1161	enum kfence_error_type error_type;
1162	unsigned long flags;
1163
1164	if (!is_kfence_address((void *)addr))
1165		return false;
1166
1167	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1168		return kfence_unprotect(addr); /* ... unprotect and proceed. */
1169
1170	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1171
1172	if (page_index % 2) {
1173		/* This is a redzone, report a buffer overflow. */
1174		struct kfence_metadata *meta;
1175		int distance = 0;
1176
1177		meta = addr_to_metadata(addr - PAGE_SIZE);
1178		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1179			to_report = meta;
1180			/* Data race ok; distance calculation approximate. */
1181			distance = addr - data_race(meta->addr + meta->size);
1182		}
1183
1184		meta = addr_to_metadata(addr + PAGE_SIZE);
1185		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1186			/* Data race ok; distance calculation approximate. */
1187			if (!to_report || distance > data_race(meta->addr) - addr)
1188				to_report = meta;
1189		}
1190
1191		if (!to_report)
1192			goto out;
1193
1194		raw_spin_lock_irqsave(&to_report->lock, flags);
1195		to_report->unprotected_page = addr;
1196		error_type = KFENCE_ERROR_OOB;
1197
1198		/*
1199		 * If the object was freed before we took the look we can still
1200		 * report this as an OOB -- the report will simply show the
1201		 * stacktrace of the free as well.
1202		 */
1203	} else {
1204		to_report = addr_to_metadata(addr);
1205		if (!to_report)
1206			goto out;
1207
1208		raw_spin_lock_irqsave(&to_report->lock, flags);
1209		error_type = KFENCE_ERROR_UAF;
1210		/*
1211		 * We may race with __kfence_alloc(), and it is possible that a
1212		 * freed object may be reallocated. We simply report this as a
1213		 * use-after-free, with the stack trace showing the place where
1214		 * the object was re-allocated.
1215		 */
1216	}
1217
1218out:
1219	if (to_report) {
1220		kfence_report_error(addr, is_write, regs, to_report, error_type);
1221		raw_spin_unlock_irqrestore(&to_report->lock, flags);
1222	} else {
1223		/* This may be a UAF or OOB access, but we can't be sure. */
1224		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1225	}
1226
1227	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1228}