Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
  1// SPDX-License-Identifier: GPL-2.0
  2#include <kunit/test.h>
  3#include <kunit/test-bug.h>
  4#include <linux/mm.h>
  5#include <linux/slab.h>
  6#include <linux/module.h>
  7#include <linux/kernel.h>
  8#include "../mm/slab.h"
  9
 10static struct kunit_resource resource;
 11static int slab_errors;
 12
 13/*
 14 * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
 15 * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
 16 * object from kfence pool, where the operation could be caught by both
 17 * our test and kfence sanity check.
 18 */
 19static struct kmem_cache *test_kmem_cache_create(const char *name,
 20				unsigned int size, slab_flags_t flags)
 21{
 22	struct kmem_cache *s = kmem_cache_create(name, size, 0,
 23					(flags | SLAB_NO_USER_FLAGS), NULL);
 24	s->flags |= SLAB_SKIP_KFENCE;
 25	return s;
 26}
 27
 28static void test_clobber_zone(struct kunit *test)
 29{
 30	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
 31							SLAB_RED_ZONE);
 32	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 33
 34	kasan_disable_current();
 35	p[64] = 0x12;
 36
 37	validate_slab_cache(s);
 38	KUNIT_EXPECT_EQ(test, 2, slab_errors);
 39
 40	kasan_enable_current();
 41	kmem_cache_free(s, p);
 42	kmem_cache_destroy(s);
 43}
 44
 45#ifndef CONFIG_KASAN
 46static void test_next_pointer(struct kunit *test)
 47{
 48	struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
 49							64, SLAB_POISON);
 50	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 51	unsigned long tmp;
 52	unsigned long *ptr_addr;
 53
 54	kmem_cache_free(s, p);
 55
 56	ptr_addr = (unsigned long *)(p + s->offset);
 57	tmp = *ptr_addr;
 58	p[s->offset] = ~p[s->offset];
 59
 60	/*
 61	 * Expecting three errors.
 62	 * One for the corrupted freechain and the other one for the wrong
 63	 * count of objects in use. The third error is fixing broken cache.
 64	 */
 65	validate_slab_cache(s);
 66	KUNIT_EXPECT_EQ(test, 3, slab_errors);
 67
 68	/*
 69	 * Try to repair corrupted freepointer.
 70	 * Still expecting two errors. The first for the wrong count
 71	 * of objects in use.
 72	 * The second error is for fixing broken cache.
 73	 */
 74	*ptr_addr = tmp;
 75	slab_errors = 0;
 76
 77	validate_slab_cache(s);
 78	KUNIT_EXPECT_EQ(test, 2, slab_errors);
 79
 80	/*
 81	 * Previous validation repaired the count of objects in use.
 82	 * Now expecting no error.
 83	 */
 84	slab_errors = 0;
 85	validate_slab_cache(s);
 86	KUNIT_EXPECT_EQ(test, 0, slab_errors);
 87
 88	kmem_cache_destroy(s);
 89}
 90
 91static void test_first_word(struct kunit *test)
 92{
 93	struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
 94							64, SLAB_POISON);
 95	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 96
 97	kmem_cache_free(s, p);
 98	*p = 0x78;
 99
100	validate_slab_cache(s);
101	KUNIT_EXPECT_EQ(test, 2, slab_errors);
102
103	kmem_cache_destroy(s);
104}
105
106static void test_clobber_50th_byte(struct kunit *test)
107{
108	struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
109							64, SLAB_POISON);
110	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
111
112	kmem_cache_free(s, p);
113	p[50] = 0x9a;
114
115	validate_slab_cache(s);
116	KUNIT_EXPECT_EQ(test, 2, slab_errors);
117
118	kmem_cache_destroy(s);
119}
120#endif
121
122static void test_clobber_redzone_free(struct kunit *test)
123{
124	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
125							SLAB_RED_ZONE);
126	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
127
128	kasan_disable_current();
129	kmem_cache_free(s, p);
130	p[64] = 0xab;
131
132	validate_slab_cache(s);
133	KUNIT_EXPECT_EQ(test, 2, slab_errors);
134
135	kasan_enable_current();
136	kmem_cache_destroy(s);
137}
138
139static void test_kmalloc_redzone_access(struct kunit *test)
140{
141	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
142				SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
143	u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
144
145	kasan_disable_current();
146
147	/* Suppress the -Warray-bounds warning */
148	OPTIMIZER_HIDE_VAR(p);
149	p[18] = 0xab;
150	p[19] = 0xab;
151
152	validate_slab_cache(s);
153	KUNIT_EXPECT_EQ(test, 2, slab_errors);
154
155	kasan_enable_current();
156	kmem_cache_free(s, p);
157	kmem_cache_destroy(s);
158}
159
160static int test_init(struct kunit *test)
161{
162	slab_errors = 0;
163
164	kunit_add_named_resource(test, NULL, NULL, &resource,
165					"slab_errors", &slab_errors);
166	return 0;
167}
168
169static struct kunit_case test_cases[] = {
170	KUNIT_CASE(test_clobber_zone),
171
172#ifndef CONFIG_KASAN
173	KUNIT_CASE(test_next_pointer),
174	KUNIT_CASE(test_first_word),
175	KUNIT_CASE(test_clobber_50th_byte),
176#endif
177
178	KUNIT_CASE(test_clobber_redzone_free),
179	KUNIT_CASE(test_kmalloc_redzone_access),
180	{}
181};
182
183static struct kunit_suite test_suite = {
184	.name = "slub_test",
185	.init = test_init,
186	.test_cases = test_cases,
187};
188kunit_test_suite(test_suite);
189
190MODULE_LICENSE("GPL");
  1// SPDX-License-Identifier: GPL-2.0
  2#include <kunit/test.h>
  3#include <kunit/test-bug.h>
  4#include <linux/mm.h>
  5#include <linux/slab.h>
  6#include <linux/module.h>
  7#include <linux/kernel.h>
  8#include "../mm/slab.h"
  9
 10static struct kunit_resource resource;
 11static int slab_errors;
 12
 13/*
 14 * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
 15 * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
 16 * object from kfence pool, where the operation could be caught by both
 17 * our test and kfence sanity check.
 18 */
 19static struct kmem_cache *test_kmem_cache_create(const char *name,
 20				unsigned int size, slab_flags_t flags)
 21{
 22	struct kmem_cache *s = kmem_cache_create(name, size, 0,
 23					(flags | SLAB_NO_USER_FLAGS), NULL);
 24	s->flags |= SLAB_SKIP_KFENCE;
 25	return s;
 26}
 27
 28static void test_clobber_zone(struct kunit *test)
 29{
 30	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
 31							SLAB_RED_ZONE);
 32	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 33
 34	kasan_disable_current();
 35	p[64] = 0x12;
 36
 37	validate_slab_cache(s);
 38	KUNIT_EXPECT_EQ(test, 2, slab_errors);
 39
 40	kasan_enable_current();
 41	kmem_cache_free(s, p);
 42	kmem_cache_destroy(s);
 43}
 44
 45#ifndef CONFIG_KASAN
 46static void test_next_pointer(struct kunit *test)
 47{
 48	struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
 49							64, SLAB_POISON);
 50	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 51	unsigned long tmp;
 52	unsigned long *ptr_addr;
 53
 54	kmem_cache_free(s, p);
 55
 56	ptr_addr = (unsigned long *)(p + s->offset);
 57	tmp = *ptr_addr;
 58	p[s->offset] = 0x12;
 59
 60	/*
 61	 * Expecting three errors.
 62	 * One for the corrupted freechain and the other one for the wrong
 63	 * count of objects in use. The third error is fixing broken cache.
 64	 */
 65	validate_slab_cache(s);
 66	KUNIT_EXPECT_EQ(test, 3, slab_errors);
 67
 68	/*
 69	 * Try to repair corrupted freepointer.
 70	 * Still expecting two errors. The first for the wrong count
 71	 * of objects in use.
 72	 * The second error is for fixing broken cache.
 73	 */
 74	*ptr_addr = tmp;
 75	slab_errors = 0;
 76
 77	validate_slab_cache(s);
 78	KUNIT_EXPECT_EQ(test, 2, slab_errors);
 79
 80	/*
 81	 * Previous validation repaired the count of objects in use.
 82	 * Now expecting no error.
 83	 */
 84	slab_errors = 0;
 85	validate_slab_cache(s);
 86	KUNIT_EXPECT_EQ(test, 0, slab_errors);
 87
 88	kmem_cache_destroy(s);
 89}
 90
 91static void test_first_word(struct kunit *test)
 92{
 93	struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
 94							64, SLAB_POISON);
 95	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 96
 97	kmem_cache_free(s, p);
 98	*p = 0x78;
 99
100	validate_slab_cache(s);
101	KUNIT_EXPECT_EQ(test, 2, slab_errors);
102
103	kmem_cache_destroy(s);
104}
105
106static void test_clobber_50th_byte(struct kunit *test)
107{
108	struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
109							64, SLAB_POISON);
110	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
111
112	kmem_cache_free(s, p);
113	p[50] = 0x9a;
114
115	validate_slab_cache(s);
116	KUNIT_EXPECT_EQ(test, 2, slab_errors);
117
118	kmem_cache_destroy(s);
119}
120#endif
121
122static void test_clobber_redzone_free(struct kunit *test)
123{
124	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
125							SLAB_RED_ZONE);
126	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
127
128	kasan_disable_current();
129	kmem_cache_free(s, p);
130	p[64] = 0xab;
131
132	validate_slab_cache(s);
133	KUNIT_EXPECT_EQ(test, 2, slab_errors);
134
135	kasan_enable_current();
136	kmem_cache_destroy(s);
137}
138
139static void test_kmalloc_redzone_access(struct kunit *test)
140{
141	struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
142				SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
143	u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
144
145	kasan_disable_current();
146
147	/* Suppress the -Warray-bounds warning */
148	OPTIMIZER_HIDE_VAR(p);
149	p[18] = 0xab;
150	p[19] = 0xab;
151
152	validate_slab_cache(s);
153	KUNIT_EXPECT_EQ(test, 2, slab_errors);
154
155	kasan_enable_current();
156	kmem_cache_free(s, p);
157	kmem_cache_destroy(s);
158}
159
160static int test_init(struct kunit *test)
161{
162	slab_errors = 0;
163
164	kunit_add_named_resource(test, NULL, NULL, &resource,
165					"slab_errors", &slab_errors);
166	return 0;
167}
168
169static struct kunit_case test_cases[] = {
170	KUNIT_CASE(test_clobber_zone),
171
172#ifndef CONFIG_KASAN
173	KUNIT_CASE(test_next_pointer),
174	KUNIT_CASE(test_first_word),
175	KUNIT_CASE(test_clobber_50th_byte),
176#endif
177
178	KUNIT_CASE(test_clobber_redzone_free),
179	KUNIT_CASE(test_kmalloc_redzone_access),
180	{}
181};
182
183static struct kunit_suite test_suite = {
184	.name = "slub_test",
185	.init = test_init,
186	.test_cases = test_cases,
187};
188kunit_test_suite(test_suite);
189
190MODULE_LICENSE("GPL");