Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6 */
7
8#include <linux/bitops.h>
9#include <linux/delay.h>
10#include <linux/kasan.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/module.h>
15#include <linux/printk.h>
16#include <linux/random.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/uaccess.h>
20#include <linux/io.h>
21#include <linux/vmalloc.h>
22
23#include <asm/page.h>
24
25#include <kunit/test.h>
26
27#include "../mm/kasan/kasan.h"
28
29#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
30
31/*
32 * Some tests use these global variables to store return values from function
33 * calls that could otherwise be eliminated by the compiler as dead code.
34 */
35void *kasan_ptr_result;
36int kasan_int_result;
37
38static struct kunit_resource resource;
39static struct kunit_kasan_expectation fail_data;
40static bool multishot;
41
42/*
43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
44 * first detected bug and panic the kernel if panic_on_warn is enabled. For
45 * hardware tag-based KASAN also allow tag checking to be reenabled for each
46 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
47 */
48static int kasan_test_init(struct kunit *test)
49{
50 if (!kasan_enabled()) {
51 kunit_err(test, "can't run KASAN tests with KASAN disabled");
52 return -1;
53 }
54
55 multishot = kasan_save_enable_multi_shot();
56 kasan_set_tagging_report_once(false);
57 fail_data.report_found = false;
58 kunit_add_named_resource(test, NULL, NULL, &resource,
59 "kasan_data", &fail_data);
60 return 0;
61}
62
63static void kasan_test_exit(struct kunit *test)
64{
65 kasan_set_tagging_report_once(true);
66 kasan_restore_multi_shot(multishot);
67 KUNIT_EXPECT_FALSE(test, fail_data.report_found);
68}
69
70/**
71 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
72 * KASAN report; causes a test failure otherwise. This relies on a KUnit
73 * resource named "kasan_data". Do not use this name for KUnit resources
74 * outside of KASAN tests.
75 *
76 * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
77 * checking is auto-disabled. When this happens, this test handler reenables
78 * tag checking. As tag checking can be only disabled or enabled per CPU,
79 * this handler disables migration (preemption).
80 *
81 * Since the compiler doesn't see that the expression can change the fail_data
82 * fields, it can reorder or optimize away the accesses to those fields.
83 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
84 * expression to prevent that.
85 *
86 * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
87 * false. This allows detecting KASAN reports that happen outside of the checks
88 * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
89 * and in kasan_test_exit.
90 */
91#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
92 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
93 !kasan_async_mode_enabled()) \
94 migrate_disable(); \
95 KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
96 barrier(); \
97 expression; \
98 barrier(); \
99 if (!READ_ONCE(fail_data.report_found)) { \
100 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
101 "expected in \"" #expression \
102 "\", but none occurred"); \
103 } \
104 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
105 if (READ_ONCE(fail_data.report_found)) \
106 kasan_enable_tagging_sync(); \
107 migrate_enable(); \
108 } \
109 WRITE_ONCE(fail_data.report_found, false); \
110} while (0)
111
112#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
113 if (!IS_ENABLED(config)) \
114 kunit_skip((test), "Test requires " #config "=y"); \
115} while (0)
116
117#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
118 if (IS_ENABLED(config)) \
119 kunit_skip((test), "Test requires " #config "=n"); \
120} while (0)
121
122static void kmalloc_oob_right(struct kunit *test)
123{
124 char *ptr;
125 size_t size = 123;
126
127 ptr = kmalloc(size, GFP_KERNEL);
128 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
129
130 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
131 kfree(ptr);
132}
133
134static void kmalloc_oob_left(struct kunit *test)
135{
136 char *ptr;
137 size_t size = 15;
138
139 ptr = kmalloc(size, GFP_KERNEL);
140 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
141
142 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
143 kfree(ptr);
144}
145
146static void kmalloc_node_oob_right(struct kunit *test)
147{
148 char *ptr;
149 size_t size = 4096;
150
151 ptr = kmalloc_node(size, GFP_KERNEL, 0);
152 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
153
154 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
155 kfree(ptr);
156}
157
158/*
159 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
160 * fit into a slab cache and therefore is allocated via the page allocator
161 * fallback. Since this kind of fallback is only implemented for SLUB, these
162 * tests are limited to that allocator.
163 */
164static void kmalloc_pagealloc_oob_right(struct kunit *test)
165{
166 char *ptr;
167 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
168
169 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
170
171 ptr = kmalloc(size, GFP_KERNEL);
172 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
173
174 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
175
176 kfree(ptr);
177}
178
179static void kmalloc_pagealloc_uaf(struct kunit *test)
180{
181 char *ptr;
182 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
183
184 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
185
186 ptr = kmalloc(size, GFP_KERNEL);
187 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
188 kfree(ptr);
189
190 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
191}
192
193static void kmalloc_pagealloc_invalid_free(struct kunit *test)
194{
195 char *ptr;
196 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
197
198 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
199
200 ptr = kmalloc(size, GFP_KERNEL);
201 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
202
203 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
204}
205
206static void pagealloc_oob_right(struct kunit *test)
207{
208 char *ptr;
209 struct page *pages;
210 size_t order = 4;
211 size_t size = (1UL << (PAGE_SHIFT + order));
212
213 /*
214 * With generic KASAN page allocations have no redzones, thus
215 * out-of-bounds detection is not guaranteed.
216 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
217 */
218 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
219
220 pages = alloc_pages(GFP_KERNEL, order);
221 ptr = page_address(pages);
222 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
223
224 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
225 free_pages((unsigned long)ptr, order);
226}
227
228static void pagealloc_uaf(struct kunit *test)
229{
230 char *ptr;
231 struct page *pages;
232 size_t order = 4;
233
234 pages = alloc_pages(GFP_KERNEL, order);
235 ptr = page_address(pages);
236 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
237 free_pages((unsigned long)ptr, order);
238
239 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
240}
241
242static void kmalloc_large_oob_right(struct kunit *test)
243{
244 char *ptr;
245 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
246
247 /*
248 * Allocate a chunk that is large enough, but still fits into a slab
249 * and does not trigger the page allocator fallback in SLUB.
250 */
251 ptr = kmalloc(size, GFP_KERNEL);
252 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
253
254 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
255 kfree(ptr);
256}
257
258static void krealloc_more_oob_helper(struct kunit *test,
259 size_t size1, size_t size2)
260{
261 char *ptr1, *ptr2;
262 size_t middle;
263
264 KUNIT_ASSERT_LT(test, size1, size2);
265 middle = size1 + (size2 - size1) / 2;
266
267 ptr1 = kmalloc(size1, GFP_KERNEL);
268 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
269
270 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
271 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
272
273 /* All offsets up to size2 must be accessible. */
274 ptr2[size1 - 1] = 'x';
275 ptr2[size1] = 'x';
276 ptr2[middle] = 'x';
277 ptr2[size2 - 1] = 'x';
278
279 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
280 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
281 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
282
283 /* For all modes first aligned offset after size2 must be inaccessible. */
284 KUNIT_EXPECT_KASAN_FAIL(test,
285 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
286
287 kfree(ptr2);
288}
289
290static void krealloc_less_oob_helper(struct kunit *test,
291 size_t size1, size_t size2)
292{
293 char *ptr1, *ptr2;
294 size_t middle;
295
296 KUNIT_ASSERT_LT(test, size2, size1);
297 middle = size2 + (size1 - size2) / 2;
298
299 ptr1 = kmalloc(size1, GFP_KERNEL);
300 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
301
302 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
303 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
304
305 /* Must be accessible for all modes. */
306 ptr2[size2 - 1] = 'x';
307
308 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
309 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
310 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
311
312 /* For all modes first aligned offset after size2 must be inaccessible. */
313 KUNIT_EXPECT_KASAN_FAIL(test,
314 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
315
316 /*
317 * For all modes all size2, middle, and size1 should land in separate
318 * granules and thus the latter two offsets should be inaccessible.
319 */
320 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
321 round_down(middle, KASAN_GRANULE_SIZE));
322 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
323 round_down(size1, KASAN_GRANULE_SIZE));
324 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
325 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
326 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
327
328 kfree(ptr2);
329}
330
331static void krealloc_more_oob(struct kunit *test)
332{
333 krealloc_more_oob_helper(test, 201, 235);
334}
335
336static void krealloc_less_oob(struct kunit *test)
337{
338 krealloc_less_oob_helper(test, 235, 201);
339}
340
341static void krealloc_pagealloc_more_oob(struct kunit *test)
342{
343 /* page_alloc fallback in only implemented for SLUB. */
344 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
345
346 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
347 KMALLOC_MAX_CACHE_SIZE + 235);
348}
349
350static void krealloc_pagealloc_less_oob(struct kunit *test)
351{
352 /* page_alloc fallback in only implemented for SLUB. */
353 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
354
355 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
356 KMALLOC_MAX_CACHE_SIZE + 201);
357}
358
359/*
360 * Check that krealloc() detects a use-after-free, returns NULL,
361 * and doesn't unpoison the freed object.
362 */
363static void krealloc_uaf(struct kunit *test)
364{
365 char *ptr1, *ptr2;
366 int size1 = 201;
367 int size2 = 235;
368
369 ptr1 = kmalloc(size1, GFP_KERNEL);
370 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
371 kfree(ptr1);
372
373 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
374 KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
375 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
376}
377
378static void kmalloc_oob_16(struct kunit *test)
379{
380 struct {
381 u64 words[2];
382 } *ptr1, *ptr2;
383
384 /* This test is specifically crafted for the generic mode. */
385 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
386
387 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
388 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
389
390 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
391 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
392
393 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
394 kfree(ptr1);
395 kfree(ptr2);
396}
397
398static void kmalloc_uaf_16(struct kunit *test)
399{
400 struct {
401 u64 words[2];
402 } *ptr1, *ptr2;
403
404 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
405 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
406
407 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
408 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
409 kfree(ptr2);
410
411 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
412 kfree(ptr1);
413}
414
415static void kmalloc_oob_memset_2(struct kunit *test)
416{
417 char *ptr;
418 size_t size = 8;
419
420 ptr = kmalloc(size, GFP_KERNEL);
421 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
422
423 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
424 kfree(ptr);
425}
426
427static void kmalloc_oob_memset_4(struct kunit *test)
428{
429 char *ptr;
430 size_t size = 8;
431
432 ptr = kmalloc(size, GFP_KERNEL);
433 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
434
435 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
436 kfree(ptr);
437}
438
439
440static void kmalloc_oob_memset_8(struct kunit *test)
441{
442 char *ptr;
443 size_t size = 8;
444
445 ptr = kmalloc(size, GFP_KERNEL);
446 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
447
448 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
449 kfree(ptr);
450}
451
452static void kmalloc_oob_memset_16(struct kunit *test)
453{
454 char *ptr;
455 size_t size = 16;
456
457 ptr = kmalloc(size, GFP_KERNEL);
458 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
459
460 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
461 kfree(ptr);
462}
463
464static void kmalloc_oob_in_memset(struct kunit *test)
465{
466 char *ptr;
467 size_t size = 666;
468
469 ptr = kmalloc(size, GFP_KERNEL);
470 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
471
472 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
473 kfree(ptr);
474}
475
476static void kmalloc_memmove_invalid_size(struct kunit *test)
477{
478 char *ptr;
479 size_t size = 64;
480 volatile size_t invalid_size = -2;
481
482 ptr = kmalloc(size, GFP_KERNEL);
483 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
484
485 memset((char *)ptr, 0, 64);
486
487 KUNIT_EXPECT_KASAN_FAIL(test,
488 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
489 kfree(ptr);
490}
491
492static void kmalloc_uaf(struct kunit *test)
493{
494 char *ptr;
495 size_t size = 10;
496
497 ptr = kmalloc(size, GFP_KERNEL);
498 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
499
500 kfree(ptr);
501 KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
502}
503
504static void kmalloc_uaf_memset(struct kunit *test)
505{
506 char *ptr;
507 size_t size = 33;
508
509 ptr = kmalloc(size, GFP_KERNEL);
510 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
511
512 kfree(ptr);
513 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
514}
515
516static void kmalloc_uaf2(struct kunit *test)
517{
518 char *ptr1, *ptr2;
519 size_t size = 43;
520 int counter = 0;
521
522again:
523 ptr1 = kmalloc(size, GFP_KERNEL);
524 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
525
526 kfree(ptr1);
527
528 ptr2 = kmalloc(size, GFP_KERNEL);
529 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
530
531 /*
532 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
533 * Allow up to 16 attempts at generating different tags.
534 */
535 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
536 kfree(ptr2);
537 goto again;
538 }
539
540 KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
541 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
542
543 kfree(ptr2);
544}
545
546static void kfree_via_page(struct kunit *test)
547{
548 char *ptr;
549 size_t size = 8;
550 struct page *page;
551 unsigned long offset;
552
553 ptr = kmalloc(size, GFP_KERNEL);
554 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
555
556 page = virt_to_page(ptr);
557 offset = offset_in_page(ptr);
558 kfree(page_address(page) + offset);
559}
560
561static void kfree_via_phys(struct kunit *test)
562{
563 char *ptr;
564 size_t size = 8;
565 phys_addr_t phys;
566
567 ptr = kmalloc(size, GFP_KERNEL);
568 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
569
570 phys = virt_to_phys(ptr);
571 kfree(phys_to_virt(phys));
572}
573
574static void kmem_cache_oob(struct kunit *test)
575{
576 char *p;
577 size_t size = 200;
578 struct kmem_cache *cache;
579
580 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
581 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
582
583 p = kmem_cache_alloc(cache, GFP_KERNEL);
584 if (!p) {
585 kunit_err(test, "Allocation failed: %s\n", __func__);
586 kmem_cache_destroy(cache);
587 return;
588 }
589
590 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
591
592 kmem_cache_free(cache, p);
593 kmem_cache_destroy(cache);
594}
595
596static void kmem_cache_accounted(struct kunit *test)
597{
598 int i;
599 char *p;
600 size_t size = 200;
601 struct kmem_cache *cache;
602
603 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
604 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
605
606 /*
607 * Several allocations with a delay to allow for lazy per memcg kmem
608 * cache creation.
609 */
610 for (i = 0; i < 5; i++) {
611 p = kmem_cache_alloc(cache, GFP_KERNEL);
612 if (!p)
613 goto free_cache;
614
615 kmem_cache_free(cache, p);
616 msleep(100);
617 }
618
619free_cache:
620 kmem_cache_destroy(cache);
621}
622
623static void kmem_cache_bulk(struct kunit *test)
624{
625 struct kmem_cache *cache;
626 size_t size = 200;
627 char *p[10];
628 bool ret;
629 int i;
630
631 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
632 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
633
634 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
635 if (!ret) {
636 kunit_err(test, "Allocation failed: %s\n", __func__);
637 kmem_cache_destroy(cache);
638 return;
639 }
640
641 for (i = 0; i < ARRAY_SIZE(p); i++)
642 p[i][0] = p[i][size - 1] = 42;
643
644 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
645 kmem_cache_destroy(cache);
646}
647
648static char global_array[10];
649
650static void kasan_global_oob(struct kunit *test)
651{
652 /*
653 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
654 * from failing here and panicking the kernel, access the array via a
655 * volatile pointer, which will prevent the compiler from being able to
656 * determine the array bounds.
657 *
658 * This access uses a volatile pointer to char (char *volatile) rather
659 * than the more conventional pointer to volatile char (volatile char *)
660 * because we want to prevent the compiler from making inferences about
661 * the pointer itself (i.e. its array bounds), not the data that it
662 * refers to.
663 */
664 char *volatile array = global_array;
665 char *p = &array[ARRAY_SIZE(global_array) + 3];
666
667 /* Only generic mode instruments globals. */
668 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
669
670 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
671}
672
673/* Check that ksize() makes the whole object accessible. */
674static void ksize_unpoisons_memory(struct kunit *test)
675{
676 char *ptr;
677 size_t size = 123, real_size;
678
679 ptr = kmalloc(size, GFP_KERNEL);
680 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
681 real_size = ksize(ptr);
682
683 /* This access shouldn't trigger a KASAN report. */
684 ptr[size] = 'x';
685
686 /* This one must. */
687 KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
688
689 kfree(ptr);
690}
691
692/*
693 * Check that a use-after-free is detected by ksize() and via normal accesses
694 * after it.
695 */
696static void ksize_uaf(struct kunit *test)
697{
698 char *ptr;
699 int size = 128 - KASAN_GRANULE_SIZE;
700
701 ptr = kmalloc(size, GFP_KERNEL);
702 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
703 kfree(ptr);
704
705 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
706 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
707 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
708}
709
710static void kasan_stack_oob(struct kunit *test)
711{
712 char stack_array[10];
713 /* See comment in kasan_global_oob. */
714 char *volatile array = stack_array;
715 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
716
717 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
718
719 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
720}
721
722static void kasan_alloca_oob_left(struct kunit *test)
723{
724 volatile int i = 10;
725 char alloca_array[i];
726 /* See comment in kasan_global_oob. */
727 char *volatile array = alloca_array;
728 char *p = array - 1;
729
730 /* Only generic mode instruments dynamic allocas. */
731 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
732 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
733
734 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
735}
736
737static void kasan_alloca_oob_right(struct kunit *test)
738{
739 volatile int i = 10;
740 char alloca_array[i];
741 /* See comment in kasan_global_oob. */
742 char *volatile array = alloca_array;
743 char *p = array + i;
744
745 /* Only generic mode instruments dynamic allocas. */
746 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
747 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
748
749 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
750}
751
752static void kmem_cache_double_free(struct kunit *test)
753{
754 char *p;
755 size_t size = 200;
756 struct kmem_cache *cache;
757
758 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
759 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
760
761 p = kmem_cache_alloc(cache, GFP_KERNEL);
762 if (!p) {
763 kunit_err(test, "Allocation failed: %s\n", __func__);
764 kmem_cache_destroy(cache);
765 return;
766 }
767
768 kmem_cache_free(cache, p);
769 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
770 kmem_cache_destroy(cache);
771}
772
773static void kmem_cache_invalid_free(struct kunit *test)
774{
775 char *p;
776 size_t size = 200;
777 struct kmem_cache *cache;
778
779 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
780 NULL);
781 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
782
783 p = kmem_cache_alloc(cache, GFP_KERNEL);
784 if (!p) {
785 kunit_err(test, "Allocation failed: %s\n", __func__);
786 kmem_cache_destroy(cache);
787 return;
788 }
789
790 /* Trigger invalid free, the object doesn't get freed. */
791 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
792
793 /*
794 * Properly free the object to prevent the "Objects remaining in
795 * test_cache on __kmem_cache_shutdown" BUG failure.
796 */
797 kmem_cache_free(cache, p);
798
799 kmem_cache_destroy(cache);
800}
801
802static void kasan_memchr(struct kunit *test)
803{
804 char *ptr;
805 size_t size = 24;
806
807 /*
808 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
809 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
810 */
811 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
812
813 if (OOB_TAG_OFF)
814 size = round_up(size, OOB_TAG_OFF);
815
816 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
817 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
818
819 KUNIT_EXPECT_KASAN_FAIL(test,
820 kasan_ptr_result = memchr(ptr, '1', size + 1));
821
822 kfree(ptr);
823}
824
825static void kasan_memcmp(struct kunit *test)
826{
827 char *ptr;
828 size_t size = 24;
829 int arr[9];
830
831 /*
832 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
833 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
834 */
835 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
836
837 if (OOB_TAG_OFF)
838 size = round_up(size, OOB_TAG_OFF);
839
840 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
841 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
842 memset(arr, 0, sizeof(arr));
843
844 KUNIT_EXPECT_KASAN_FAIL(test,
845 kasan_int_result = memcmp(ptr, arr, size+1));
846 kfree(ptr);
847}
848
849static void kasan_strings(struct kunit *test)
850{
851 char *ptr;
852 size_t size = 24;
853
854 /*
855 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
856 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
857 */
858 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
859
860 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
861 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
862
863 kfree(ptr);
864
865 /*
866 * Try to cause only 1 invalid access (less spam in dmesg).
867 * For that we need ptr to point to zeroed byte.
868 * Skip metadata that could be stored in freed object so ptr
869 * will likely point to zeroed byte.
870 */
871 ptr += 16;
872 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
873
874 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
875
876 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
877
878 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
879
880 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
881
882 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
883}
884
885static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
886{
887 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
888 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
889 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
890 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
891 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
892 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
893 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
894 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
895}
896
897static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
898{
899 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
900 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
901 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
902 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
903 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
904 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
905 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
906 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
907
908#if defined(clear_bit_unlock_is_negative_byte)
909 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
910 clear_bit_unlock_is_negative_byte(nr, addr));
911#endif
912}
913
914static void kasan_bitops_generic(struct kunit *test)
915{
916 long *bits;
917
918 /* This test is specifically crafted for the generic mode. */
919 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
920
921 /*
922 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
923 * this way we do not actually corrupt other memory.
924 */
925 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
926 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
927
928 /*
929 * Below calls try to access bit within allocated memory; however, the
930 * below accesses are still out-of-bounds, since bitops are defined to
931 * operate on the whole long the bit is in.
932 */
933 kasan_bitops_modify(test, BITS_PER_LONG, bits);
934
935 /*
936 * Below calls try to access bit beyond allocated memory.
937 */
938 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
939
940 kfree(bits);
941}
942
943static void kasan_bitops_tags(struct kunit *test)
944{
945 long *bits;
946
947 /* This test is specifically crafted for tag-based modes. */
948 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
949
950 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
951 bits = kzalloc(48, GFP_KERNEL);
952 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
953
954 /* Do the accesses past the 48 allocated bytes, but within the redone. */
955 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
956 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
957
958 kfree(bits);
959}
960
961static void kmalloc_double_kzfree(struct kunit *test)
962{
963 char *ptr;
964 size_t size = 16;
965
966 ptr = kmalloc(size, GFP_KERNEL);
967 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
968
969 kfree_sensitive(ptr);
970 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
971}
972
973static void vmalloc_oob(struct kunit *test)
974{
975 void *area;
976
977 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
978
979 /*
980 * We have to be careful not to hit the guard page.
981 * The MMU will catch that and crash us.
982 */
983 area = vmalloc(3000);
984 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
985
986 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
987 vfree(area);
988}
989
990/*
991 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
992 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
993 * modes.
994 */
995static void match_all_not_assigned(struct kunit *test)
996{
997 char *ptr;
998 struct page *pages;
999 int i, size, order;
1000
1001 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1002
1003 for (i = 0; i < 256; i++) {
1004 size = (get_random_int() % 1024) + 1;
1005 ptr = kmalloc(size, GFP_KERNEL);
1006 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1007 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1008 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1009 kfree(ptr);
1010 }
1011
1012 for (i = 0; i < 256; i++) {
1013 order = (get_random_int() % 4) + 1;
1014 pages = alloc_pages(GFP_KERNEL, order);
1015 ptr = page_address(pages);
1016 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1017 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1018 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1019 free_pages((unsigned long)ptr, order);
1020 }
1021}
1022
1023/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1024static void match_all_ptr_tag(struct kunit *test)
1025{
1026 char *ptr;
1027 u8 tag;
1028
1029 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1030
1031 ptr = kmalloc(128, GFP_KERNEL);
1032 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1033
1034 /* Backup the assigned tag. */
1035 tag = get_tag(ptr);
1036 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1037
1038 /* Reset the tag to 0xff.*/
1039 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1040
1041 /* This access shouldn't trigger a KASAN report. */
1042 *ptr = 0;
1043
1044 /* Recover the pointer tag and free. */
1045 ptr = set_tag(ptr, tag);
1046 kfree(ptr);
1047}
1048
1049/* Check that there are no match-all memory tags for tag-based modes. */
1050static void match_all_mem_tag(struct kunit *test)
1051{
1052 char *ptr;
1053 int tag;
1054
1055 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1056
1057 ptr = kmalloc(128, GFP_KERNEL);
1058 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1059 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1060
1061 /* For each possible tag value not matching the pointer tag. */
1062 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1063 if (tag == get_tag(ptr))
1064 continue;
1065
1066 /* Mark the first memory granule with the chosen memory tag. */
1067 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1068
1069 /* This access must cause a KASAN report. */
1070 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1071 }
1072
1073 /* Recover the memory tag and free. */
1074 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1075 kfree(ptr);
1076}
1077
1078static struct kunit_case kasan_kunit_test_cases[] = {
1079 KUNIT_CASE(kmalloc_oob_right),
1080 KUNIT_CASE(kmalloc_oob_left),
1081 KUNIT_CASE(kmalloc_node_oob_right),
1082 KUNIT_CASE(kmalloc_pagealloc_oob_right),
1083 KUNIT_CASE(kmalloc_pagealloc_uaf),
1084 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1085 KUNIT_CASE(pagealloc_oob_right),
1086 KUNIT_CASE(pagealloc_uaf),
1087 KUNIT_CASE(kmalloc_large_oob_right),
1088 KUNIT_CASE(krealloc_more_oob),
1089 KUNIT_CASE(krealloc_less_oob),
1090 KUNIT_CASE(krealloc_pagealloc_more_oob),
1091 KUNIT_CASE(krealloc_pagealloc_less_oob),
1092 KUNIT_CASE(krealloc_uaf),
1093 KUNIT_CASE(kmalloc_oob_16),
1094 KUNIT_CASE(kmalloc_uaf_16),
1095 KUNIT_CASE(kmalloc_oob_in_memset),
1096 KUNIT_CASE(kmalloc_oob_memset_2),
1097 KUNIT_CASE(kmalloc_oob_memset_4),
1098 KUNIT_CASE(kmalloc_oob_memset_8),
1099 KUNIT_CASE(kmalloc_oob_memset_16),
1100 KUNIT_CASE(kmalloc_memmove_invalid_size),
1101 KUNIT_CASE(kmalloc_uaf),
1102 KUNIT_CASE(kmalloc_uaf_memset),
1103 KUNIT_CASE(kmalloc_uaf2),
1104 KUNIT_CASE(kfree_via_page),
1105 KUNIT_CASE(kfree_via_phys),
1106 KUNIT_CASE(kmem_cache_oob),
1107 KUNIT_CASE(kmem_cache_accounted),
1108 KUNIT_CASE(kmem_cache_bulk),
1109 KUNIT_CASE(kasan_global_oob),
1110 KUNIT_CASE(kasan_stack_oob),
1111 KUNIT_CASE(kasan_alloca_oob_left),
1112 KUNIT_CASE(kasan_alloca_oob_right),
1113 KUNIT_CASE(ksize_unpoisons_memory),
1114 KUNIT_CASE(ksize_uaf),
1115 KUNIT_CASE(kmem_cache_double_free),
1116 KUNIT_CASE(kmem_cache_invalid_free),
1117 KUNIT_CASE(kasan_memchr),
1118 KUNIT_CASE(kasan_memcmp),
1119 KUNIT_CASE(kasan_strings),
1120 KUNIT_CASE(kasan_bitops_generic),
1121 KUNIT_CASE(kasan_bitops_tags),
1122 KUNIT_CASE(kmalloc_double_kzfree),
1123 KUNIT_CASE(vmalloc_oob),
1124 KUNIT_CASE(match_all_not_assigned),
1125 KUNIT_CASE(match_all_ptr_tag),
1126 KUNIT_CASE(match_all_mem_tag),
1127 {}
1128};
1129
1130static struct kunit_suite kasan_kunit_test_suite = {
1131 .name = "kasan",
1132 .init = kasan_test_init,
1133 .test_cases = kasan_kunit_test_cases,
1134 .exit = kasan_test_exit,
1135};
1136
1137kunit_test_suite(kasan_kunit_test_suite);
1138
1139MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6 */
7
8#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
9
10#include <linux/bitops.h>
11#include <linux/delay.h>
12#include <linux/kasan.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/module.h>
17#include <linux/printk.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/uaccess.h>
21#include <linux/io.h>
22
23#include <asm/page.h>
24
25/*
26 * Note: test functions are marked noinline so that their names appear in
27 * reports.
28 */
29
30static noinline void __init kmalloc_oob_right(void)
31{
32 char *ptr;
33 size_t size = 123;
34
35 pr_info("out-of-bounds to right\n");
36 ptr = kmalloc(size, GFP_KERNEL);
37 if (!ptr) {
38 pr_err("Allocation failed\n");
39 return;
40 }
41
42 ptr[size] = 'x';
43 kfree(ptr);
44}
45
46static noinline void __init kmalloc_oob_left(void)
47{
48 char *ptr;
49 size_t size = 15;
50
51 pr_info("out-of-bounds to left\n");
52 ptr = kmalloc(size, GFP_KERNEL);
53 if (!ptr) {
54 pr_err("Allocation failed\n");
55 return;
56 }
57
58 *ptr = *(ptr - 1);
59 kfree(ptr);
60}
61
62static noinline void __init kmalloc_node_oob_right(void)
63{
64 char *ptr;
65 size_t size = 4096;
66
67 pr_info("kmalloc_node(): out-of-bounds to right\n");
68 ptr = kmalloc_node(size, GFP_KERNEL, 0);
69 if (!ptr) {
70 pr_err("Allocation failed\n");
71 return;
72 }
73
74 ptr[size] = 0;
75 kfree(ptr);
76}
77
78#ifdef CONFIG_SLUB
79static noinline void __init kmalloc_pagealloc_oob_right(void)
80{
81 char *ptr;
82 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
83
84 /* Allocate a chunk that does not fit into a SLUB cache to trigger
85 * the page allocator fallback.
86 */
87 pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
88 ptr = kmalloc(size, GFP_KERNEL);
89 if (!ptr) {
90 pr_err("Allocation failed\n");
91 return;
92 }
93
94 ptr[size] = 0;
95 kfree(ptr);
96}
97
98static noinline void __init kmalloc_pagealloc_uaf(void)
99{
100 char *ptr;
101 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
102
103 pr_info("kmalloc pagealloc allocation: use-after-free\n");
104 ptr = kmalloc(size, GFP_KERNEL);
105 if (!ptr) {
106 pr_err("Allocation failed\n");
107 return;
108 }
109
110 kfree(ptr);
111 ptr[0] = 0;
112}
113
114static noinline void __init kmalloc_pagealloc_invalid_free(void)
115{
116 char *ptr;
117 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
118
119 pr_info("kmalloc pagealloc allocation: invalid-free\n");
120 ptr = kmalloc(size, GFP_KERNEL);
121 if (!ptr) {
122 pr_err("Allocation failed\n");
123 return;
124 }
125
126 kfree(ptr + 1);
127}
128#endif
129
130static noinline void __init kmalloc_large_oob_right(void)
131{
132 char *ptr;
133 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
134 /* Allocate a chunk that is large enough, but still fits into a slab
135 * and does not trigger the page allocator fallback in SLUB.
136 */
137 pr_info("kmalloc large allocation: out-of-bounds to right\n");
138 ptr = kmalloc(size, GFP_KERNEL);
139 if (!ptr) {
140 pr_err("Allocation failed\n");
141 return;
142 }
143
144 ptr[size] = 0;
145 kfree(ptr);
146}
147
148static noinline void __init kmalloc_oob_krealloc_more(void)
149{
150 char *ptr1, *ptr2;
151 size_t size1 = 17;
152 size_t size2 = 19;
153
154 pr_info("out-of-bounds after krealloc more\n");
155 ptr1 = kmalloc(size1, GFP_KERNEL);
156 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
157 if (!ptr1 || !ptr2) {
158 pr_err("Allocation failed\n");
159 kfree(ptr1);
160 return;
161 }
162
163 ptr2[size2] = 'x';
164 kfree(ptr2);
165}
166
167static noinline void __init kmalloc_oob_krealloc_less(void)
168{
169 char *ptr1, *ptr2;
170 size_t size1 = 17;
171 size_t size2 = 15;
172
173 pr_info("out-of-bounds after krealloc less\n");
174 ptr1 = kmalloc(size1, GFP_KERNEL);
175 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
176 if (!ptr1 || !ptr2) {
177 pr_err("Allocation failed\n");
178 kfree(ptr1);
179 return;
180 }
181 ptr2[size2] = 'x';
182 kfree(ptr2);
183}
184
185static noinline void __init kmalloc_oob_16(void)
186{
187 struct {
188 u64 words[2];
189 } *ptr1, *ptr2;
190
191 pr_info("kmalloc out-of-bounds for 16-bytes access\n");
192 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
193 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
194 if (!ptr1 || !ptr2) {
195 pr_err("Allocation failed\n");
196 kfree(ptr1);
197 kfree(ptr2);
198 return;
199 }
200 *ptr1 = *ptr2;
201 kfree(ptr1);
202 kfree(ptr2);
203}
204
205static noinline void __init kmalloc_oob_memset_2(void)
206{
207 char *ptr;
208 size_t size = 8;
209
210 pr_info("out-of-bounds in memset2\n");
211 ptr = kmalloc(size, GFP_KERNEL);
212 if (!ptr) {
213 pr_err("Allocation failed\n");
214 return;
215 }
216
217 memset(ptr+7, 0, 2);
218 kfree(ptr);
219}
220
221static noinline void __init kmalloc_oob_memset_4(void)
222{
223 char *ptr;
224 size_t size = 8;
225
226 pr_info("out-of-bounds in memset4\n");
227 ptr = kmalloc(size, GFP_KERNEL);
228 if (!ptr) {
229 pr_err("Allocation failed\n");
230 return;
231 }
232
233 memset(ptr+5, 0, 4);
234 kfree(ptr);
235}
236
237
238static noinline void __init kmalloc_oob_memset_8(void)
239{
240 char *ptr;
241 size_t size = 8;
242
243 pr_info("out-of-bounds in memset8\n");
244 ptr = kmalloc(size, GFP_KERNEL);
245 if (!ptr) {
246 pr_err("Allocation failed\n");
247 return;
248 }
249
250 memset(ptr+1, 0, 8);
251 kfree(ptr);
252}
253
254static noinline void __init kmalloc_oob_memset_16(void)
255{
256 char *ptr;
257 size_t size = 16;
258
259 pr_info("out-of-bounds in memset16\n");
260 ptr = kmalloc(size, GFP_KERNEL);
261 if (!ptr) {
262 pr_err("Allocation failed\n");
263 return;
264 }
265
266 memset(ptr+1, 0, 16);
267 kfree(ptr);
268}
269
270static noinline void __init kmalloc_oob_in_memset(void)
271{
272 char *ptr;
273 size_t size = 666;
274
275 pr_info("out-of-bounds in memset\n");
276 ptr = kmalloc(size, GFP_KERNEL);
277 if (!ptr) {
278 pr_err("Allocation failed\n");
279 return;
280 }
281
282 memset(ptr, 0, size+5);
283 kfree(ptr);
284}
285
286static noinline void __init kmalloc_uaf(void)
287{
288 char *ptr;
289 size_t size = 10;
290
291 pr_info("use-after-free\n");
292 ptr = kmalloc(size, GFP_KERNEL);
293 if (!ptr) {
294 pr_err("Allocation failed\n");
295 return;
296 }
297
298 kfree(ptr);
299 *(ptr + 8) = 'x';
300}
301
302static noinline void __init kmalloc_uaf_memset(void)
303{
304 char *ptr;
305 size_t size = 33;
306
307 pr_info("use-after-free in memset\n");
308 ptr = kmalloc(size, GFP_KERNEL);
309 if (!ptr) {
310 pr_err("Allocation failed\n");
311 return;
312 }
313
314 kfree(ptr);
315 memset(ptr, 0, size);
316}
317
318static noinline void __init kmalloc_uaf2(void)
319{
320 char *ptr1, *ptr2;
321 size_t size = 43;
322
323 pr_info("use-after-free after another kmalloc\n");
324 ptr1 = kmalloc(size, GFP_KERNEL);
325 if (!ptr1) {
326 pr_err("Allocation failed\n");
327 return;
328 }
329
330 kfree(ptr1);
331 ptr2 = kmalloc(size, GFP_KERNEL);
332 if (!ptr2) {
333 pr_err("Allocation failed\n");
334 return;
335 }
336
337 ptr1[40] = 'x';
338 if (ptr1 == ptr2)
339 pr_err("Could not detect use-after-free: ptr1 == ptr2\n");
340 kfree(ptr2);
341}
342
343static noinline void __init kfree_via_page(void)
344{
345 char *ptr;
346 size_t size = 8;
347 struct page *page;
348 unsigned long offset;
349
350 pr_info("invalid-free false positive (via page)\n");
351 ptr = kmalloc(size, GFP_KERNEL);
352 if (!ptr) {
353 pr_err("Allocation failed\n");
354 return;
355 }
356
357 page = virt_to_page(ptr);
358 offset = offset_in_page(ptr);
359 kfree(page_address(page) + offset);
360}
361
362static noinline void __init kfree_via_phys(void)
363{
364 char *ptr;
365 size_t size = 8;
366 phys_addr_t phys;
367
368 pr_info("invalid-free false positive (via phys)\n");
369 ptr = kmalloc(size, GFP_KERNEL);
370 if (!ptr) {
371 pr_err("Allocation failed\n");
372 return;
373 }
374
375 phys = virt_to_phys(ptr);
376 kfree(phys_to_virt(phys));
377}
378
379static noinline void __init kmem_cache_oob(void)
380{
381 char *p;
382 size_t size = 200;
383 struct kmem_cache *cache = kmem_cache_create("test_cache",
384 size, 0,
385 0, NULL);
386 if (!cache) {
387 pr_err("Cache allocation failed\n");
388 return;
389 }
390 pr_info("out-of-bounds in kmem_cache_alloc\n");
391 p = kmem_cache_alloc(cache, GFP_KERNEL);
392 if (!p) {
393 pr_err("Allocation failed\n");
394 kmem_cache_destroy(cache);
395 return;
396 }
397
398 *p = p[size];
399 kmem_cache_free(cache, p);
400 kmem_cache_destroy(cache);
401}
402
403static noinline void __init memcg_accounted_kmem_cache(void)
404{
405 int i;
406 char *p;
407 size_t size = 200;
408 struct kmem_cache *cache;
409
410 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
411 if (!cache) {
412 pr_err("Cache allocation failed\n");
413 return;
414 }
415
416 pr_info("allocate memcg accounted object\n");
417 /*
418 * Several allocations with a delay to allow for lazy per memcg kmem
419 * cache creation.
420 */
421 for (i = 0; i < 5; i++) {
422 p = kmem_cache_alloc(cache, GFP_KERNEL);
423 if (!p)
424 goto free_cache;
425
426 kmem_cache_free(cache, p);
427 msleep(100);
428 }
429
430free_cache:
431 kmem_cache_destroy(cache);
432}
433
434static char global_array[10];
435
436static noinline void __init kasan_global_oob(void)
437{
438 volatile int i = 3;
439 char *p = &global_array[ARRAY_SIZE(global_array) + i];
440
441 pr_info("out-of-bounds global variable\n");
442 *(volatile char *)p;
443}
444
445static noinline void __init kasan_stack_oob(void)
446{
447 char stack_array[10];
448 volatile int i = 0;
449 char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
450
451 pr_info("out-of-bounds on stack\n");
452 *(volatile char *)p;
453}
454
455static noinline void __init ksize_unpoisons_memory(void)
456{
457 char *ptr;
458 size_t size = 123, real_size;
459
460 pr_info("ksize() unpoisons the whole allocated chunk\n");
461 ptr = kmalloc(size, GFP_KERNEL);
462 if (!ptr) {
463 pr_err("Allocation failed\n");
464 return;
465 }
466 real_size = ksize(ptr);
467 /* This access doesn't trigger an error. */
468 ptr[size] = 'x';
469 /* This one does. */
470 ptr[real_size] = 'y';
471 kfree(ptr);
472}
473
474static noinline void __init copy_user_test(void)
475{
476 char *kmem;
477 char __user *usermem;
478 size_t size = 10;
479 int unused;
480
481 kmem = kmalloc(size, GFP_KERNEL);
482 if (!kmem)
483 return;
484
485 usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
486 PROT_READ | PROT_WRITE | PROT_EXEC,
487 MAP_ANONYMOUS | MAP_PRIVATE, 0);
488 if (IS_ERR(usermem)) {
489 pr_err("Failed to allocate user memory\n");
490 kfree(kmem);
491 return;
492 }
493
494 pr_info("out-of-bounds in copy_from_user()\n");
495 unused = copy_from_user(kmem, usermem, size + 1);
496
497 pr_info("out-of-bounds in copy_to_user()\n");
498 unused = copy_to_user(usermem, kmem, size + 1);
499
500 pr_info("out-of-bounds in __copy_from_user()\n");
501 unused = __copy_from_user(kmem, usermem, size + 1);
502
503 pr_info("out-of-bounds in __copy_to_user()\n");
504 unused = __copy_to_user(usermem, kmem, size + 1);
505
506 pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
507 unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
508
509 pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
510 unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
511
512 pr_info("out-of-bounds in strncpy_from_user()\n");
513 unused = strncpy_from_user(kmem, usermem, size + 1);
514
515 vm_munmap((unsigned long)usermem, PAGE_SIZE);
516 kfree(kmem);
517}
518
519static noinline void __init kasan_alloca_oob_left(void)
520{
521 volatile int i = 10;
522 char alloca_array[i];
523 char *p = alloca_array - 1;
524
525 pr_info("out-of-bounds to left on alloca\n");
526 *(volatile char *)p;
527}
528
529static noinline void __init kasan_alloca_oob_right(void)
530{
531 volatile int i = 10;
532 char alloca_array[i];
533 char *p = alloca_array + i;
534
535 pr_info("out-of-bounds to right on alloca\n");
536 *(volatile char *)p;
537}
538
539static noinline void __init kmem_cache_double_free(void)
540{
541 char *p;
542 size_t size = 200;
543 struct kmem_cache *cache;
544
545 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
546 if (!cache) {
547 pr_err("Cache allocation failed\n");
548 return;
549 }
550 pr_info("double-free on heap object\n");
551 p = kmem_cache_alloc(cache, GFP_KERNEL);
552 if (!p) {
553 pr_err("Allocation failed\n");
554 kmem_cache_destroy(cache);
555 return;
556 }
557
558 kmem_cache_free(cache, p);
559 kmem_cache_free(cache, p);
560 kmem_cache_destroy(cache);
561}
562
563static noinline void __init kmem_cache_invalid_free(void)
564{
565 char *p;
566 size_t size = 200;
567 struct kmem_cache *cache;
568
569 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
570 NULL);
571 if (!cache) {
572 pr_err("Cache allocation failed\n");
573 return;
574 }
575 pr_info("invalid-free of heap object\n");
576 p = kmem_cache_alloc(cache, GFP_KERNEL);
577 if (!p) {
578 pr_err("Allocation failed\n");
579 kmem_cache_destroy(cache);
580 return;
581 }
582
583 /* Trigger invalid free, the object doesn't get freed */
584 kmem_cache_free(cache, p + 1);
585
586 /*
587 * Properly free the object to prevent the "Objects remaining in
588 * test_cache on __kmem_cache_shutdown" BUG failure.
589 */
590 kmem_cache_free(cache, p);
591
592 kmem_cache_destroy(cache);
593}
594
595static noinline void __init kasan_memchr(void)
596{
597 char *ptr;
598 size_t size = 24;
599
600 pr_info("out-of-bounds in memchr\n");
601 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
602 if (!ptr)
603 return;
604
605 memchr(ptr, '1', size + 1);
606 kfree(ptr);
607}
608
609static noinline void __init kasan_memcmp(void)
610{
611 char *ptr;
612 size_t size = 24;
613 int arr[9];
614
615 pr_info("out-of-bounds in memcmp\n");
616 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
617 if (!ptr)
618 return;
619
620 memset(arr, 0, sizeof(arr));
621 memcmp(ptr, arr, size+1);
622 kfree(ptr);
623}
624
625static noinline void __init kasan_strings(void)
626{
627 char *ptr;
628 size_t size = 24;
629
630 pr_info("use-after-free in strchr\n");
631 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
632 if (!ptr)
633 return;
634
635 kfree(ptr);
636
637 /*
638 * Try to cause only 1 invalid access (less spam in dmesg).
639 * For that we need ptr to point to zeroed byte.
640 * Skip metadata that could be stored in freed object so ptr
641 * will likely point to zeroed byte.
642 */
643 ptr += 16;
644 strchr(ptr, '1');
645
646 pr_info("use-after-free in strrchr\n");
647 strrchr(ptr, '1');
648
649 pr_info("use-after-free in strcmp\n");
650 strcmp(ptr, "2");
651
652 pr_info("use-after-free in strncmp\n");
653 strncmp(ptr, "2", 1);
654
655 pr_info("use-after-free in strlen\n");
656 strlen(ptr);
657
658 pr_info("use-after-free in strnlen\n");
659 strnlen(ptr, 1);
660}
661
662static noinline void __init kasan_bitops(void)
663{
664 /*
665 * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
666 * this way we do not actually corrupt other memory.
667 */
668 long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
669 if (!bits)
670 return;
671
672 /*
673 * Below calls try to access bit within allocated memory; however, the
674 * below accesses are still out-of-bounds, since bitops are defined to
675 * operate on the whole long the bit is in.
676 */
677 pr_info("out-of-bounds in set_bit\n");
678 set_bit(BITS_PER_LONG, bits);
679
680 pr_info("out-of-bounds in __set_bit\n");
681 __set_bit(BITS_PER_LONG, bits);
682
683 pr_info("out-of-bounds in clear_bit\n");
684 clear_bit(BITS_PER_LONG, bits);
685
686 pr_info("out-of-bounds in __clear_bit\n");
687 __clear_bit(BITS_PER_LONG, bits);
688
689 pr_info("out-of-bounds in clear_bit_unlock\n");
690 clear_bit_unlock(BITS_PER_LONG, bits);
691
692 pr_info("out-of-bounds in __clear_bit_unlock\n");
693 __clear_bit_unlock(BITS_PER_LONG, bits);
694
695 pr_info("out-of-bounds in change_bit\n");
696 change_bit(BITS_PER_LONG, bits);
697
698 pr_info("out-of-bounds in __change_bit\n");
699 __change_bit(BITS_PER_LONG, bits);
700
701 /*
702 * Below calls try to access bit beyond allocated memory.
703 */
704 pr_info("out-of-bounds in test_and_set_bit\n");
705 test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
706
707 pr_info("out-of-bounds in __test_and_set_bit\n");
708 __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
709
710 pr_info("out-of-bounds in test_and_set_bit_lock\n");
711 test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
712
713 pr_info("out-of-bounds in test_and_clear_bit\n");
714 test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
715
716 pr_info("out-of-bounds in __test_and_clear_bit\n");
717 __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
718
719 pr_info("out-of-bounds in test_and_change_bit\n");
720 test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
721
722 pr_info("out-of-bounds in __test_and_change_bit\n");
723 __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
724
725 pr_info("out-of-bounds in test_bit\n");
726 (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
727
728#if defined(clear_bit_unlock_is_negative_byte)
729 pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
730 clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
731#endif
732 kfree(bits);
733}
734
735static noinline void __init kmalloc_double_kzfree(void)
736{
737 char *ptr;
738 size_t size = 16;
739
740 pr_info("double-free (kzfree)\n");
741 ptr = kmalloc(size, GFP_KERNEL);
742 if (!ptr) {
743 pr_err("Allocation failed\n");
744 return;
745 }
746
747 kzfree(ptr);
748 kzfree(ptr);
749}
750
751static int __init kmalloc_tests_init(void)
752{
753 /*
754 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
755 * report for the first case.
756 */
757 bool multishot = kasan_save_enable_multi_shot();
758
759 kmalloc_oob_right();
760 kmalloc_oob_left();
761 kmalloc_node_oob_right();
762#ifdef CONFIG_SLUB
763 kmalloc_pagealloc_oob_right();
764 kmalloc_pagealloc_uaf();
765 kmalloc_pagealloc_invalid_free();
766#endif
767 kmalloc_large_oob_right();
768 kmalloc_oob_krealloc_more();
769 kmalloc_oob_krealloc_less();
770 kmalloc_oob_16();
771 kmalloc_oob_in_memset();
772 kmalloc_oob_memset_2();
773 kmalloc_oob_memset_4();
774 kmalloc_oob_memset_8();
775 kmalloc_oob_memset_16();
776 kmalloc_uaf();
777 kmalloc_uaf_memset();
778 kmalloc_uaf2();
779 kfree_via_page();
780 kfree_via_phys();
781 kmem_cache_oob();
782 memcg_accounted_kmem_cache();
783 kasan_stack_oob();
784 kasan_global_oob();
785 kasan_alloca_oob_left();
786 kasan_alloca_oob_right();
787 ksize_unpoisons_memory();
788 copy_user_test();
789 kmem_cache_double_free();
790 kmem_cache_invalid_free();
791 kasan_memchr();
792 kasan_memcmp();
793 kasan_strings();
794 kasan_bitops();
795 kmalloc_double_kzfree();
796
797 kasan_restore_multi_shot(multishot);
798
799 return -EAGAIN;
800}
801
802module_init(kmalloc_tests_init);
803MODULE_LICENSE("GPL");