Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Test cases for KFENCE memory safety error detector. Since the interface with
  4 * which KFENCE's reports are obtained is via the console, this is the output we
  5 * should verify. For each test case checks the presence (or absence) of
  6 * generated reports. Relies on 'console' tracepoint to capture reports as they
  7 * appear in the kernel log.
  8 *
  9 * Copyright (C) 2020, Google LLC.
 10 * Author: Alexander Potapenko <glider@google.com>
 11 *         Marco Elver <elver@google.com>
 12 */
 13
 14#include <kunit/test.h>
 15#include <linux/jiffies.h>
 16#include <linux/kernel.h>
 17#include <linux/kfence.h>
 18#include <linux/mm.h>
 19#include <linux/random.h>
 20#include <linux/slab.h>
 21#include <linux/spinlock.h>
 22#include <linux/string.h>
 23#include <linux/tracepoint.h>
 24#include <trace/events/printk.h>
 25
 26#include <asm/kfence.h>
 27
 28#include "kfence.h"
 29
 30/* May be overridden by <asm/kfence.h>. */
 31#ifndef arch_kfence_test_address
 32#define arch_kfence_test_address(addr) (addr)
 33#endif
 34
 35#define KFENCE_TEST_REQUIRES(test, cond) do {			\
 36	if (!(cond))						\
 37		kunit_skip((test), "Test requires: " #cond);	\
 38} while (0)
 39
 40/* Report as observed from console. */
 41static struct {
 42	spinlock_t lock;
 43	int nlines;
 44	char lines[2][256];
 45} observed = {
 46	.lock = __SPIN_LOCK_UNLOCKED(observed.lock),
 47};
 48
 49/* Probe for console output: obtains observed lines of interest. */
 50static void probe_console(void *ignore, const char *buf, size_t len)
 51{
 52	unsigned long flags;
 53	int nlines;
 54
 55	spin_lock_irqsave(&observed.lock, flags);
 56	nlines = observed.nlines;
 57
 58	if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
 59		/*
 60		 * KFENCE report and related to the test.
 61		 *
 62		 * The provided @buf is not NUL-terminated; copy no more than
 63		 * @len bytes and let strscpy() add the missing NUL-terminator.
 64		 */
 65		strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
 66		nlines = 1;
 67	} else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
 68		strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
 69	}
 70
 71	WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
 72	spin_unlock_irqrestore(&observed.lock, flags);
 73}
 74
 75/* Check if a report related to the test exists. */
 76static bool report_available(void)
 77{
 78	return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
 79}
 80
 81/* Information we expect in a report. */
 82struct expect_report {
 83	enum kfence_error_type type; /* The type or error. */
 84	void *fn; /* Function pointer to expected function where access occurred. */
 85	char *addr; /* Address at which the bad access occurred. */
 86	bool is_write; /* Is access a write. */
 87};
 88
 89static const char *get_access_type(const struct expect_report *r)
 90{
 91	return r->is_write ? "write" : "read";
 92}
 93
 94/* Check observed report matches information in @r. */
 95static bool report_matches(const struct expect_report *r)
 96{
 97	unsigned long addr = (unsigned long)r->addr;
 98	bool ret = false;
 99	unsigned long flags;
100	typeof(observed.lines) expect;
101	const char *end;
102	char *cur;
103
104	/* Doubled-checked locking. */
105	if (!report_available())
106		return false;
107
108	/* Generate expected report contents. */
109
110	/* Title */
111	cur = expect[0];
112	end = &expect[0][sizeof(expect[0]) - 1];
113	switch (r->type) {
114	case KFENCE_ERROR_OOB:
115		cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
116				 get_access_type(r));
117		break;
118	case KFENCE_ERROR_UAF:
119		cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
120				 get_access_type(r));
121		break;
122	case KFENCE_ERROR_CORRUPTION:
123		cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
124		break;
125	case KFENCE_ERROR_INVALID:
126		cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
127				 get_access_type(r));
128		break;
129	case KFENCE_ERROR_INVALID_FREE:
130		cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
131		break;
132	}
133
134	scnprintf(cur, end - cur, " in %pS", r->fn);
135	/* The exact offset won't match, remove it; also strip module name. */
136	cur = strchr(expect[0], '+');
137	if (cur)
138		*cur = '\0';
139
140	/* Access information */
141	cur = expect[1];
142	end = &expect[1][sizeof(expect[1]) - 1];
143
144	switch (r->type) {
145	case KFENCE_ERROR_OOB:
146		cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
147		addr = arch_kfence_test_address(addr);
148		break;
149	case KFENCE_ERROR_UAF:
150		cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
151		addr = arch_kfence_test_address(addr);
152		break;
153	case KFENCE_ERROR_CORRUPTION:
154		cur += scnprintf(cur, end - cur, "Corrupted memory at");
155		break;
156	case KFENCE_ERROR_INVALID:
157		cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
158		addr = arch_kfence_test_address(addr);
159		break;
160	case KFENCE_ERROR_INVALID_FREE:
161		cur += scnprintf(cur, end - cur, "Invalid free of");
162		break;
163	}
164
165	cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
166
167	spin_lock_irqsave(&observed.lock, flags);
168	if (!report_available())
169		goto out; /* A new report is being captured. */
170
171	/* Finally match expected output to what we actually observed. */
172	ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
173out:
174	spin_unlock_irqrestore(&observed.lock, flags);
175	return ret;
176}
177
178/* ===== Test cases ===== */
179
180#define TEST_PRIV_WANT_MEMCACHE ((void *)1)
181
182/* Cache used by tests; if NULL, allocate from kmalloc instead. */
183static struct kmem_cache *test_cache;
184
185static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
186			       void (*ctor)(void *))
187{
188	if (test->priv != TEST_PRIV_WANT_MEMCACHE)
189		return size;
190
191	kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
192
193	/*
194	 * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
195	 * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
196	 * allocate via memcg, if enabled.
197	 */
198	flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT;
199	test_cache = kmem_cache_create("test", size, 1, flags, ctor);
200	KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
201
202	return size;
203}
204
205static void test_cache_destroy(void)
206{
207	if (!test_cache)
208		return;
209
210	kmem_cache_destroy(test_cache);
211	test_cache = NULL;
212}
213
214static inline size_t kmalloc_cache_alignment(size_t size)
215{
216	return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align;
217}
218
219/* Must always inline to match stack trace against caller. */
220static __always_inline void test_free(void *ptr)
221{
222	if (test_cache)
223		kmem_cache_free(test_cache, ptr);
224	else
225		kfree(ptr);
226}
227
228/*
229 * If this should be a KFENCE allocation, and on which side the allocation and
230 * the closest guard page should be.
231 */
232enum allocation_policy {
233	ALLOCATE_ANY, /* KFENCE, any side. */
234	ALLOCATE_LEFT, /* KFENCE, left side of page. */
235	ALLOCATE_RIGHT, /* KFENCE, right side of page. */
236	ALLOCATE_NONE, /* No KFENCE allocation. */
237};
238
239/*
240 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
241 * current test_cache if set up.
242 */
243static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
244{
245	void *alloc;
246	unsigned long timeout, resched_after;
247	const char *policy_name;
248
249	switch (policy) {
250	case ALLOCATE_ANY:
251		policy_name = "any";
252		break;
253	case ALLOCATE_LEFT:
254		policy_name = "left";
255		break;
256	case ALLOCATE_RIGHT:
257		policy_name = "right";
258		break;
259	case ALLOCATE_NONE:
260		policy_name = "none";
261		break;
262	}
263
264	kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
265		   policy_name, !!test_cache);
266
267	/*
268	 * 100x the sample interval should be more than enough to ensure we get
269	 * a KFENCE allocation eventually.
270	 */
271	timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
272	/*
273	 * Especially for non-preemption kernels, ensure the allocation-gate
274	 * timer can catch up: after @resched_after, every failed allocation
275	 * attempt yields, to ensure the allocation-gate timer is scheduled.
276	 */
277	resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
278	do {
279		if (test_cache)
280			alloc = kmem_cache_alloc(test_cache, gfp);
281		else
282			alloc = kmalloc(size, gfp);
283
284		if (is_kfence_address(alloc)) {
285			struct slab *slab = virt_to_slab(alloc);
286			struct kmem_cache *s = test_cache ?:
287					kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
288
289			/*
290			 * Verify that various helpers return the right values
291			 * even for KFENCE objects; these are required so that
292			 * memcg accounting works correctly.
293			 */
294			KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
295			KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
296
297			if (policy == ALLOCATE_ANY)
298				return alloc;
299			if (policy == ALLOCATE_LEFT && PAGE_ALIGNED(alloc))
300				return alloc;
301			if (policy == ALLOCATE_RIGHT && !PAGE_ALIGNED(alloc))
302				return alloc;
303		} else if (policy == ALLOCATE_NONE)
304			return alloc;
305
306		test_free(alloc);
307
308		if (time_after(jiffies, resched_after))
309			cond_resched();
310	} while (time_before(jiffies, timeout));
311
312	KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
313	return NULL; /* Unreachable. */
314}
315
316static void test_out_of_bounds_read(struct kunit *test)
317{
318	size_t size = 32;
319	struct expect_report expect = {
320		.type = KFENCE_ERROR_OOB,
321		.fn = test_out_of_bounds_read,
322		.is_write = false,
323	};
324	char *buf;
325
326	setup_test_cache(test, size, 0, NULL);
327
328	/*
329	 * If we don't have our own cache, adjust based on alignment, so that we
330	 * actually access guard pages on either side.
331	 */
332	if (!test_cache)
333		size = kmalloc_cache_alignment(size);
334
335	/* Test both sides. */
336
337	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
338	expect.addr = buf - 1;
339	READ_ONCE(*expect.addr);
340	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
341	test_free(buf);
342
343	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
344	expect.addr = buf + size;
345	READ_ONCE(*expect.addr);
346	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
347	test_free(buf);
348}
349
350static void test_out_of_bounds_write(struct kunit *test)
351{
352	size_t size = 32;
353	struct expect_report expect = {
354		.type = KFENCE_ERROR_OOB,
355		.fn = test_out_of_bounds_write,
356		.is_write = true,
357	};
358	char *buf;
359
360	setup_test_cache(test, size, 0, NULL);
361	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
362	expect.addr = buf - 1;
363	WRITE_ONCE(*expect.addr, 42);
364	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
365	test_free(buf);
366}
367
368static void test_use_after_free_read(struct kunit *test)
369{
370	const size_t size = 32;
371	struct expect_report expect = {
372		.type = KFENCE_ERROR_UAF,
373		.fn = test_use_after_free_read,
374		.is_write = false,
375	};
376
377	setup_test_cache(test, size, 0, NULL);
378	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
379	test_free(expect.addr);
380	READ_ONCE(*expect.addr);
381	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
382}
383
384static void test_double_free(struct kunit *test)
385{
386	const size_t size = 32;
387	struct expect_report expect = {
388		.type = KFENCE_ERROR_INVALID_FREE,
389		.fn = test_double_free,
390	};
391
392	setup_test_cache(test, size, 0, NULL);
393	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
394	test_free(expect.addr);
395	test_free(expect.addr); /* Double-free. */
396	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
397}
398
399static void test_invalid_addr_free(struct kunit *test)
400{
401	const size_t size = 32;
402	struct expect_report expect = {
403		.type = KFENCE_ERROR_INVALID_FREE,
404		.fn = test_invalid_addr_free,
405	};
406	char *buf;
407
408	setup_test_cache(test, size, 0, NULL);
409	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
410	expect.addr = buf + 1; /* Free on invalid address. */
411	test_free(expect.addr); /* Invalid address free. */
412	test_free(buf); /* No error. */
413	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
414}
415
416static void test_corruption(struct kunit *test)
417{
418	size_t size = 32;
419	struct expect_report expect = {
420		.type = KFENCE_ERROR_CORRUPTION,
421		.fn = test_corruption,
422	};
423	char *buf;
424
425	setup_test_cache(test, size, 0, NULL);
426
427	/* Test both sides. */
428
429	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
430	expect.addr = buf + size;
431	WRITE_ONCE(*expect.addr, 42);
432	test_free(buf);
433	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
434
435	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
436	expect.addr = buf - 1;
437	WRITE_ONCE(*expect.addr, 42);
438	test_free(buf);
439	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
440}
441
442/*
443 * KFENCE is unable to detect an OOB if the allocation's alignment requirements
444 * leave a gap between the object and the guard page. Specifically, an
445 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
446 * respectively. Therefore it is impossible for the allocated object to
447 * contiguously line up with the right guard page.
448 *
449 * However, we test that an access to memory beyond the gap results in KFENCE
450 * detecting an OOB access.
451 */
452static void test_kmalloc_aligned_oob_read(struct kunit *test)
453{
454	const size_t size = 73;
455	const size_t align = kmalloc_cache_alignment(size);
456	struct expect_report expect = {
457		.type = KFENCE_ERROR_OOB,
458		.fn = test_kmalloc_aligned_oob_read,
459		.is_write = false,
460	};
461	char *buf;
462
463	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
464
465	/*
466	 * The object is offset to the right, so there won't be an OOB to the
467	 * left of it.
468	 */
469	READ_ONCE(*(buf - 1));
470	KUNIT_EXPECT_FALSE(test, report_available());
471
472	/*
473	 * @buf must be aligned on @align, therefore buf + size belongs to the
474	 * same page -> no OOB.
475	 */
476	READ_ONCE(*(buf + size));
477	KUNIT_EXPECT_FALSE(test, report_available());
478
479	/* Overflowing by @align bytes will result in an OOB. */
480	expect.addr = buf + size + align;
481	READ_ONCE(*expect.addr);
482	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
483
484	test_free(buf);
485}
486
487static void test_kmalloc_aligned_oob_write(struct kunit *test)
488{
489	const size_t size = 73;
490	struct expect_report expect = {
491		.type = KFENCE_ERROR_CORRUPTION,
492		.fn = test_kmalloc_aligned_oob_write,
493	};
494	char *buf;
495
496	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
497	/*
498	 * The object is offset to the right, so we won't get a page
499	 * fault immediately after it.
500	 */
501	expect.addr = buf + size;
502	WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
503	KUNIT_EXPECT_FALSE(test, report_available());
504	test_free(buf);
505	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
506}
507
508/* Test cache shrinking and destroying with KFENCE. */
509static void test_shrink_memcache(struct kunit *test)
510{
511	const size_t size = 32;
512	void *buf;
513
514	setup_test_cache(test, size, 0, NULL);
515	KUNIT_EXPECT_TRUE(test, test_cache);
516	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
517	kmem_cache_shrink(test_cache);
518	test_free(buf);
519
520	KUNIT_EXPECT_FALSE(test, report_available());
521}
522
523static void ctor_set_x(void *obj)
524{
525	/* Every object has at least 8 bytes. */
526	memset(obj, 'x', 8);
527}
528
529/* Ensure that SL*B does not modify KFENCE objects on bulk free. */
530static void test_free_bulk(struct kunit *test)
531{
532	int iter;
533
534	for (iter = 0; iter < 5; iter++) {
535		const size_t size = setup_test_cache(test, get_random_u32_inclusive(8, 307),
536						     0, (iter & 1) ? ctor_set_x : NULL);
537		void *objects[] = {
538			test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
539			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
540			test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
541			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
542			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
543		};
544
545		kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
546		KUNIT_ASSERT_FALSE(test, report_available());
547		test_cache_destroy();
548	}
549}
550
551/* Test init-on-free works. */
552static void test_init_on_free(struct kunit *test)
553{
554	const size_t size = 32;
555	struct expect_report expect = {
556		.type = KFENCE_ERROR_UAF,
557		.fn = test_init_on_free,
558		.is_write = false,
559	};
560	int i;
561
562	KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON));
563	/* Assume it hasn't been disabled on command line. */
564
565	setup_test_cache(test, size, 0, NULL);
566	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
567	for (i = 0; i < size; i++)
568		expect.addr[i] = i + 1;
569	test_free(expect.addr);
570
571	for (i = 0; i < size; i++) {
572		/*
573		 * This may fail if the page was recycled by KFENCE and then
574		 * written to again -- this however, is near impossible with a
575		 * default config.
576		 */
577		KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
578
579		if (!i) /* Only check first access to not fail test if page is ever re-protected. */
580			KUNIT_EXPECT_TRUE(test, report_matches(&expect));
581	}
582}
583
584/* Ensure that constructors work properly. */
585static void test_memcache_ctor(struct kunit *test)
586{
587	const size_t size = 32;
588	char *buf;
589	int i;
590
591	setup_test_cache(test, size, 0, ctor_set_x);
592	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
593
594	for (i = 0; i < 8; i++)
595		KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
596
597	test_free(buf);
598
599	KUNIT_EXPECT_FALSE(test, report_available());
600}
601
602/* Test that memory is zeroed if requested. */
603static void test_gfpzero(struct kunit *test)
604{
605	const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
606	char *buf1, *buf2;
607	int i;
608
609	/* Skip if we think it'd take too long. */
610	KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
611
612	setup_test_cache(test, size, 0, NULL);
613	buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
614	for (i = 0; i < size; i++)
615		buf1[i] = i + 1;
616	test_free(buf1);
617
618	/* Try to get same address again -- this can take a while. */
619	for (i = 0;; i++) {
620		buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
621		if (buf1 == buf2)
622			break;
623		test_free(buf2);
624
625		if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
626			kunit_warn(test, "giving up ... cannot get same object back\n");
627			return;
628		}
629		cond_resched();
630	}
631
632	for (i = 0; i < size; i++)
633		KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
634
635	test_free(buf2);
636
637	KUNIT_EXPECT_FALSE(test, report_available());
638}
639
640static void test_invalid_access(struct kunit *test)
641{
642	const struct expect_report expect = {
643		.type = KFENCE_ERROR_INVALID,
644		.fn = test_invalid_access,
645		.addr = &__kfence_pool[10],
646		.is_write = false,
647	};
648
649	READ_ONCE(__kfence_pool[10]);
650	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
651}
652
653/* Test SLAB_TYPESAFE_BY_RCU works. */
654static void test_memcache_typesafe_by_rcu(struct kunit *test)
655{
656	const size_t size = 32;
657	struct expect_report expect = {
658		.type = KFENCE_ERROR_UAF,
659		.fn = test_memcache_typesafe_by_rcu,
660		.is_write = false,
661	};
662
663	setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
664	KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
665
666	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
667	*expect.addr = 42;
668
669	rcu_read_lock();
670	test_free(expect.addr);
671	KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
672	/*
673	 * Up to this point, memory should not have been freed yet, and
674	 * therefore there should be no KFENCE report from the above access.
675	 */
676	rcu_read_unlock();
677
678	/* Above access to @expect.addr should not have generated a report! */
679	KUNIT_EXPECT_FALSE(test, report_available());
680
681	/* Only after rcu_barrier() is the memory guaranteed to be freed. */
682	rcu_barrier();
683
684	/* Expect use-after-free. */
685	KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
686	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
687}
688
689/* Test krealloc(). */
690static void test_krealloc(struct kunit *test)
691{
692	const size_t size = 32;
693	const struct expect_report expect = {
694		.type = KFENCE_ERROR_UAF,
695		.fn = test_krealloc,
696		.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
697		.is_write = false,
698	};
699	char *buf = expect.addr;
700	int i;
701
702	KUNIT_EXPECT_FALSE(test, test_cache);
703	KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
704	for (i = 0; i < size; i++)
705		buf[i] = i + 1;
706
707	/* Check that we successfully change the size. */
708	buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
709	/* Note: Might no longer be a KFENCE alloc. */
710	KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
711	for (i = 0; i < size; i++)
712		KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
713	for (; i < size * 3; i++) /* Fill to extra bytes. */
714		buf[i] = i + 1;
715
716	buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
717	KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
718	for (i = 0; i < size * 2; i++)
719		KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
720
721	buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
722	KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
723	KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
724
725	READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
726	KUNIT_ASSERT_TRUE(test, report_matches(&expect));
727}
728
729/* Test that some objects from a bulk allocation belong to KFENCE pool. */
730static void test_memcache_alloc_bulk(struct kunit *test)
731{
732	const size_t size = 32;
733	bool pass = false;
734	unsigned long timeout;
735
736	setup_test_cache(test, size, 0, NULL);
737	KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
738	/*
739	 * 100x the sample interval should be more than enough to ensure we get
740	 * a KFENCE allocation eventually.
741	 */
742	timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
743	do {
744		void *objects[100];
745		int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
746						   objects);
747		if (!num)
748			continue;
749		for (i = 0; i < ARRAY_SIZE(objects); i++) {
750			if (is_kfence_address(objects[i])) {
751				pass = true;
752				break;
753			}
754		}
755		kmem_cache_free_bulk(test_cache, num, objects);
756		/*
757		 * kmem_cache_alloc_bulk() disables interrupts, and calling it
758		 * in a tight loop may not give KFENCE a chance to switch the
759		 * static branch. Call cond_resched() to let KFENCE chime in.
760		 */
761		cond_resched();
762	} while (!pass && time_before(jiffies, timeout));
763
764	KUNIT_EXPECT_TRUE(test, pass);
765	KUNIT_EXPECT_FALSE(test, report_available());
766}
767
768/*
769 * KUnit does not provide a way to provide arguments to tests, and we encode
770 * additional info in the name. Set up 2 tests per test case, one using the
771 * default allocator, and another using a custom memcache (suffix '-memcache').
772 */
773#define KFENCE_KUNIT_CASE(test_name)						\
774	{ .run_case = test_name, .name = #test_name },				\
775	{ .run_case = test_name, .name = #test_name "-memcache" }
776
777static struct kunit_case kfence_test_cases[] = {
778	KFENCE_KUNIT_CASE(test_out_of_bounds_read),
779	KFENCE_KUNIT_CASE(test_out_of_bounds_write),
780	KFENCE_KUNIT_CASE(test_use_after_free_read),
781	KFENCE_KUNIT_CASE(test_double_free),
782	KFENCE_KUNIT_CASE(test_invalid_addr_free),
783	KFENCE_KUNIT_CASE(test_corruption),
784	KFENCE_KUNIT_CASE(test_free_bulk),
785	KFENCE_KUNIT_CASE(test_init_on_free),
786	KUNIT_CASE(test_kmalloc_aligned_oob_read),
787	KUNIT_CASE(test_kmalloc_aligned_oob_write),
788	KUNIT_CASE(test_shrink_memcache),
789	KUNIT_CASE(test_memcache_ctor),
790	KUNIT_CASE(test_invalid_access),
791	KUNIT_CASE(test_gfpzero),
792	KUNIT_CASE(test_memcache_typesafe_by_rcu),
793	KUNIT_CASE(test_krealloc),
794	KUNIT_CASE(test_memcache_alloc_bulk),
795	{},
796};
797
798/* ===== End test cases ===== */
799
800static int test_init(struct kunit *test)
801{
802	unsigned long flags;
803	int i;
804
805	if (!__kfence_pool)
806		return -EINVAL;
807
808	spin_lock_irqsave(&observed.lock, flags);
809	for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
810		observed.lines[i][0] = '\0';
811	observed.nlines = 0;
812	spin_unlock_irqrestore(&observed.lock, flags);
813
814	/* Any test with 'memcache' in its name will want a memcache. */
815	if (strstr(test->name, "memcache"))
816		test->priv = TEST_PRIV_WANT_MEMCACHE;
817	else
818		test->priv = NULL;
819
820	return 0;
821}
822
823static void test_exit(struct kunit *test)
824{
825	test_cache_destroy();
826}
827
828static void register_tracepoints(struct tracepoint *tp, void *ignore)
829{
830	check_trace_callback_type_console(probe_console);
831	if (!strcmp(tp->name, "console"))
832		WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
833}
834
835static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
836{
837	if (!strcmp(tp->name, "console"))
838		tracepoint_probe_unregister(tp, probe_console, NULL);
839}
840
841static int kfence_suite_init(struct kunit_suite *suite)
842{
843	/*
844	 * Because we want to be able to build the test as a module, we need to
845	 * iterate through all known tracepoints, since the static registration
846	 * won't work here.
847	 */
848	for_each_kernel_tracepoint(register_tracepoints, NULL);
849	return 0;
850}
851
852static void kfence_suite_exit(struct kunit_suite *suite)
853{
854	for_each_kernel_tracepoint(unregister_tracepoints, NULL);
855	tracepoint_synchronize_unregister();
856}
857
858static struct kunit_suite kfence_test_suite = {
859	.name = "kfence",
860	.test_cases = kfence_test_cases,
861	.init = test_init,
862	.exit = test_exit,
863	.suite_init = kfence_suite_init,
864	.suite_exit = kfence_suite_exit,
865};
866
867kunit_test_suites(&kfence_test_suite);
868
869MODULE_LICENSE("GPL v2");
870MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");