Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests relating directly to heap memory, including
  4 * page allocation and slab allocations.
  5 */
  6#include "lkdtm.h"
  7#include <linux/slab.h>
  8#include <linux/vmalloc.h>
  9#include <linux/sched.h>
 10
 11static struct kmem_cache *double_free_cache;
 12static struct kmem_cache *a_cache;
 13static struct kmem_cache *b_cache;
 14
 15/*
 16 * Using volatile here means the compiler cannot ever make assumptions
 17 * about this value. This means compile-time length checks involving
 18 * this variable cannot be performed; only run-time checks.
 19 */
 20static volatile int __offset = 1;
 21
 22/*
 23 * If there aren't guard pages, it's likely that a consecutive allocation will
 24 * let us overflow into the second allocation without overwriting something real.
 25 *
 26 * This should always be caught because there is an unconditional unmapped
 27 * page after vmap allocations.
 28 */
 29static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
 30{
 31	char *one, *two;
 32
 33	one = vzalloc(PAGE_SIZE);
 34	two = vzalloc(PAGE_SIZE);
 35
 36	pr_info("Attempting vmalloc linear overflow ...\n");
 37	memset(one, 0xAA, PAGE_SIZE + __offset);
 38
 39	vfree(two);
 40	vfree(one);
 41}
 42
 43/*
 44 * This tries to stay within the next largest power-of-2 kmalloc cache
 45 * to avoid actually overwriting anything important if it's not detected
 46 * correctly.
 47 *
 48 * This should get caught by either memory tagging, KASan, or by using
 49 * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
 50 */
 51static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
 52{
 53	size_t len = 1020;
 54	u32 *data = kmalloc(len, GFP_KERNEL);
 55	if (!data)
 56		return;
 57
 58	pr_info("Attempting slab linear overflow ...\n");
 59	OPTIMIZER_HIDE_VAR(data);
 60	data[1024 / sizeof(u32)] = 0x12345678;
 61	kfree(data);
 62}
 63
 64static void lkdtm_WRITE_AFTER_FREE(void)
 65{
 66	int *base, *again;
 67	size_t len = 1024;
 68	/*
 69	 * The slub allocator uses the first word to store the free
 70	 * pointer in some configurations. Use the middle of the
 71	 * allocation to avoid running into the freelist
 72	 */
 73	size_t offset = (len / sizeof(*base)) / 2;
 74
 75	base = kmalloc(len, GFP_KERNEL);
 76	if (!base)
 77		return;
 78	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
 79	pr_info("Attempting bad write to freed memory at %p\n",
 80		&base[offset]);
 81	kfree(base);
 82	base[offset] = 0x0abcdef0;
 83	/* Attempt to notice the overwrite. */
 84	again = kmalloc(len, GFP_KERNEL);
 85	kfree(again);
 86	if (again != base)
 87		pr_info("Hmm, didn't get the same memory range.\n");
 88}
 89
 90static void lkdtm_READ_AFTER_FREE(void)
 91{
 92	int *base, *val, saw;
 93	size_t len = 1024;
 94	/*
 95	 * The slub allocator will use the either the first word or
 96	 * the middle of the allocation to store the free pointer,
 97	 * depending on configurations. Store in the second word to
 98	 * avoid running into the freelist.
 99	 */
100	size_t offset = sizeof(*base);
101
102	base = kmalloc(len, GFP_KERNEL);
103	if (!base) {
104		pr_info("Unable to allocate base memory.\n");
105		return;
106	}
107
108	val = kmalloc(len, GFP_KERNEL);
109	if (!val) {
110		pr_info("Unable to allocate val memory.\n");
111		kfree(base);
112		return;
113	}
114
115	*val = 0x12345678;
116	base[offset] = *val;
117	pr_info("Value in memory before free: %x\n", base[offset]);
118
119	kfree(base);
120
121	pr_info("Attempting bad read from freed memory\n");
122	saw = base[offset];
123	if (saw != *val) {
124		/* Good! Poisoning happened, so declare a win. */
125		pr_info("Memory correctly poisoned (%x)\n", saw);
126	} else {
127		pr_err("FAIL: Memory was not poisoned!\n");
128		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
129	}
130
131	kfree(val);
132}
133
134static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
135{
136	unsigned long p = __get_free_page(GFP_KERNEL);
137	if (!p) {
138		pr_info("Unable to allocate free page\n");
139		return;
140	}
141
142	pr_info("Writing to the buddy page before free\n");
143	memset((void *)p, 0x3, PAGE_SIZE);
144	free_page(p);
145	schedule();
146	pr_info("Attempting bad write to the buddy page after free\n");
147	memset((void *)p, 0x78, PAGE_SIZE);
148	/* Attempt to notice the overwrite. */
149	p = __get_free_page(GFP_KERNEL);
150	free_page(p);
151	schedule();
152}
153
154static void lkdtm_READ_BUDDY_AFTER_FREE(void)
155{
156	unsigned long p = __get_free_page(GFP_KERNEL);
157	int saw, *val;
158	int *base;
159
160	if (!p) {
161		pr_info("Unable to allocate free page\n");
162		return;
163	}
164
165	val = kmalloc(1024, GFP_KERNEL);
166	if (!val) {
167		pr_info("Unable to allocate val memory.\n");
168		free_page(p);
169		return;
170	}
171
172	base = (int *)p;
173
174	*val = 0x12345678;
175	base[0] = *val;
176	pr_info("Value in memory before free: %x\n", base[0]);
177	free_page(p);
178	pr_info("Attempting to read from freed memory\n");
179	saw = base[0];
180	if (saw != *val) {
181		/* Good! Poisoning happened, so declare a win. */
182		pr_info("Memory correctly poisoned (%x)\n", saw);
183	} else {
184		pr_err("FAIL: Buddy page was not poisoned!\n");
185		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
186	}
187
188	kfree(val);
189}
190
191static void lkdtm_SLAB_INIT_ON_ALLOC(void)
192{
193	u8 *first;
194	u8 *val;
195
196	first = kmalloc(512, GFP_KERNEL);
197	if (!first) {
198		pr_info("Unable to allocate 512 bytes the first time.\n");
199		return;
200	}
201
202	memset(first, 0xAB, 512);
203	kfree(first);
204
205	val = kmalloc(512, GFP_KERNEL);
206	if (!val) {
207		pr_info("Unable to allocate 512 bytes the second time.\n");
208		return;
209	}
210	if (val != first) {
211		pr_warn("Reallocation missed clobbered memory.\n");
212	}
213
214	if (memchr(val, 0xAB, 512) == NULL) {
215		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
216	} else {
217		pr_err("FAIL: Slab was not initialized\n");
218		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
219	}
220	kfree(val);
221}
222
223static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
224{
225	u8 *first;
226	u8 *val;
227
228	first = (u8 *)__get_free_page(GFP_KERNEL);
229	if (!first) {
230		pr_info("Unable to allocate first free page\n");
231		return;
232	}
233
234	memset(first, 0xAB, PAGE_SIZE);
235	free_page((unsigned long)first);
236
237	val = (u8 *)__get_free_page(GFP_KERNEL);
238	if (!val) {
239		pr_info("Unable to allocate second free page\n");
240		return;
241	}
242
243	if (val != first) {
244		pr_warn("Reallocation missed clobbered memory.\n");
245	}
246
247	if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
248		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
249	} else {
250		pr_err("FAIL: Slab was not initialized\n");
251		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
252	}
253	free_page((unsigned long)val);
254}
255
256static void lkdtm_SLAB_FREE_DOUBLE(void)
257{
258	int *val;
259
260	val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
261	if (!val) {
262		pr_info("Unable to allocate double_free_cache memory.\n");
263		return;
264	}
265
266	/* Just make sure we got real memory. */
267	*val = 0x12345678;
268	pr_info("Attempting double slab free ...\n");
269	kmem_cache_free(double_free_cache, val);
270	kmem_cache_free(double_free_cache, val);
271}
272
273static void lkdtm_SLAB_FREE_CROSS(void)
274{
275	int *val;
276
277	val = kmem_cache_alloc(a_cache, GFP_KERNEL);
278	if (!val) {
279		pr_info("Unable to allocate a_cache memory.\n");
280		return;
281	}
282
283	/* Just make sure we got real memory. */
284	*val = 0x12345679;
285	pr_info("Attempting cross-cache slab free ...\n");
286	kmem_cache_free(b_cache, val);
287}
288
289static void lkdtm_SLAB_FREE_PAGE(void)
290{
291	unsigned long p = __get_free_page(GFP_KERNEL);
292
293	pr_info("Attempting non-Slab slab free ...\n");
294	kmem_cache_free(NULL, (void *)p);
295	free_page(p);
296}
297
298/*
299 * We have constructors to keep the caches distinctly separated without
300 * needing to boot with "slab_nomerge".
301 */
302static void ctor_double_free(void *region)
303{ }
304static void ctor_a(void *region)
305{ }
306static void ctor_b(void *region)
307{ }
308
309void __init lkdtm_heap_init(void)
310{
311	double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
312					      64, 0, 0, ctor_double_free);
313	a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
314	b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
315}
316
317void __exit lkdtm_heap_exit(void)
318{
319	kmem_cache_destroy(double_free_cache);
320	kmem_cache_destroy(a_cache);
321	kmem_cache_destroy(b_cache);
322}
323
324static struct crashtype crashtypes[] = {
325	CRASHTYPE(SLAB_LINEAR_OVERFLOW),
326	CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
327	CRASHTYPE(WRITE_AFTER_FREE),
328	CRASHTYPE(READ_AFTER_FREE),
329	CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
330	CRASHTYPE(READ_BUDDY_AFTER_FREE),
331	CRASHTYPE(SLAB_INIT_ON_ALLOC),
332	CRASHTYPE(BUDDY_INIT_ON_ALLOC),
333	CRASHTYPE(SLAB_FREE_DOUBLE),
334	CRASHTYPE(SLAB_FREE_CROSS),
335	CRASHTYPE(SLAB_FREE_PAGE),
336};
337
338struct crashtype_category heap_crashtypes = {
339	.crashtypes = crashtypes,
340	.len	    = ARRAY_SIZE(crashtypes),
341};
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests relating directly to heap memory, including
  4 * page allocation and slab allocations.
  5 */
  6#include "lkdtm.h"
  7#include <linux/slab.h>
  8#include <linux/vmalloc.h>
  9#include <linux/sched.h>
 10
 11static struct kmem_cache *double_free_cache;
 12static struct kmem_cache *a_cache;
 13static struct kmem_cache *b_cache;
 14
 15/*
 
 
 
 
 
 
 
 16 * If there aren't guard pages, it's likely that a consecutive allocation will
 17 * let us overflow into the second allocation without overwriting something real.
 
 
 
 18 */
 19void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
 20{
 21	char *one, *two;
 22
 23	one = vzalloc(PAGE_SIZE);
 24	two = vzalloc(PAGE_SIZE);
 25
 26	pr_info("Attempting vmalloc linear overflow ...\n");
 27	memset(one, 0xAA, PAGE_SIZE + 1);
 28
 29	vfree(two);
 30	vfree(one);
 31}
 32
 33/*
 34 * This tries to stay within the next largest power-of-2 kmalloc cache
 35 * to avoid actually overwriting anything important if it's not detected
 36 * correctly.
 
 
 
 37 */
 38void lkdtm_SLAB_LINEAR_OVERFLOW(void)
 39{
 40	size_t len = 1020;
 41	u32 *data = kmalloc(len, GFP_KERNEL);
 42	if (!data)
 43		return;
 44
 45	pr_info("Attempting slab linear overflow ...\n");
 
 46	data[1024 / sizeof(u32)] = 0x12345678;
 47	kfree(data);
 48}
 49
 50void lkdtm_WRITE_AFTER_FREE(void)
 51{
 52	int *base, *again;
 53	size_t len = 1024;
 54	/*
 55	 * The slub allocator uses the first word to store the free
 56	 * pointer in some configurations. Use the middle of the
 57	 * allocation to avoid running into the freelist
 58	 */
 59	size_t offset = (len / sizeof(*base)) / 2;
 60
 61	base = kmalloc(len, GFP_KERNEL);
 62	if (!base)
 63		return;
 64	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
 65	pr_info("Attempting bad write to freed memory at %p\n",
 66		&base[offset]);
 67	kfree(base);
 68	base[offset] = 0x0abcdef0;
 69	/* Attempt to notice the overwrite. */
 70	again = kmalloc(len, GFP_KERNEL);
 71	kfree(again);
 72	if (again != base)
 73		pr_info("Hmm, didn't get the same memory range.\n");
 74}
 75
 76void lkdtm_READ_AFTER_FREE(void)
 77{
 78	int *base, *val, saw;
 79	size_t len = 1024;
 80	/*
 81	 * The slub allocator will use the either the first word or
 82	 * the middle of the allocation to store the free pointer,
 83	 * depending on configurations. Store in the second word to
 84	 * avoid running into the freelist.
 85	 */
 86	size_t offset = sizeof(*base);
 87
 88	base = kmalloc(len, GFP_KERNEL);
 89	if (!base) {
 90		pr_info("Unable to allocate base memory.\n");
 91		return;
 92	}
 93
 94	val = kmalloc(len, GFP_KERNEL);
 95	if (!val) {
 96		pr_info("Unable to allocate val memory.\n");
 97		kfree(base);
 98		return;
 99	}
100
101	*val = 0x12345678;
102	base[offset] = *val;
103	pr_info("Value in memory before free: %x\n", base[offset]);
104
105	kfree(base);
106
107	pr_info("Attempting bad read from freed memory\n");
108	saw = base[offset];
109	if (saw != *val) {
110		/* Good! Poisoning happened, so declare a win. */
111		pr_info("Memory correctly poisoned (%x)\n", saw);
112	} else {
113		pr_err("FAIL: Memory was not poisoned!\n");
114		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
115	}
116
117	kfree(val);
118}
119
120void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
121{
122	unsigned long p = __get_free_page(GFP_KERNEL);
123	if (!p) {
124		pr_info("Unable to allocate free page\n");
125		return;
126	}
127
128	pr_info("Writing to the buddy page before free\n");
129	memset((void *)p, 0x3, PAGE_SIZE);
130	free_page(p);
131	schedule();
132	pr_info("Attempting bad write to the buddy page after free\n");
133	memset((void *)p, 0x78, PAGE_SIZE);
134	/* Attempt to notice the overwrite. */
135	p = __get_free_page(GFP_KERNEL);
136	free_page(p);
137	schedule();
138}
139
140void lkdtm_READ_BUDDY_AFTER_FREE(void)
141{
142	unsigned long p = __get_free_page(GFP_KERNEL);
143	int saw, *val;
144	int *base;
145
146	if (!p) {
147		pr_info("Unable to allocate free page\n");
148		return;
149	}
150
151	val = kmalloc(1024, GFP_KERNEL);
152	if (!val) {
153		pr_info("Unable to allocate val memory.\n");
154		free_page(p);
155		return;
156	}
157
158	base = (int *)p;
159
160	*val = 0x12345678;
161	base[0] = *val;
162	pr_info("Value in memory before free: %x\n", base[0]);
163	free_page(p);
164	pr_info("Attempting to read from freed memory\n");
165	saw = base[0];
166	if (saw != *val) {
167		/* Good! Poisoning happened, so declare a win. */
168		pr_info("Memory correctly poisoned (%x)\n", saw);
169	} else {
170		pr_err("FAIL: Buddy page was not poisoned!\n");
171		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
172	}
173
174	kfree(val);
175}
176
177void lkdtm_SLAB_INIT_ON_ALLOC(void)
178{
179	u8 *first;
180	u8 *val;
181
182	first = kmalloc(512, GFP_KERNEL);
183	if (!first) {
184		pr_info("Unable to allocate 512 bytes the first time.\n");
185		return;
186	}
187
188	memset(first, 0xAB, 512);
189	kfree(first);
190
191	val = kmalloc(512, GFP_KERNEL);
192	if (!val) {
193		pr_info("Unable to allocate 512 bytes the second time.\n");
194		return;
195	}
196	if (val != first) {
197		pr_warn("Reallocation missed clobbered memory.\n");
198	}
199
200	if (memchr(val, 0xAB, 512) == NULL) {
201		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
202	} else {
203		pr_err("FAIL: Slab was not initialized\n");
204		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
205	}
206	kfree(val);
207}
208
209void lkdtm_BUDDY_INIT_ON_ALLOC(void)
210{
211	u8 *first;
212	u8 *val;
213
214	first = (u8 *)__get_free_page(GFP_KERNEL);
215	if (!first) {
216		pr_info("Unable to allocate first free page\n");
217		return;
218	}
219
220	memset(first, 0xAB, PAGE_SIZE);
221	free_page((unsigned long)first);
222
223	val = (u8 *)__get_free_page(GFP_KERNEL);
224	if (!val) {
225		pr_info("Unable to allocate second free page\n");
226		return;
227	}
228
229	if (val != first) {
230		pr_warn("Reallocation missed clobbered memory.\n");
231	}
232
233	if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
234		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
235	} else {
236		pr_err("FAIL: Slab was not initialized\n");
237		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
238	}
239	free_page((unsigned long)val);
240}
241
242void lkdtm_SLAB_FREE_DOUBLE(void)
243{
244	int *val;
245
246	val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
247	if (!val) {
248		pr_info("Unable to allocate double_free_cache memory.\n");
249		return;
250	}
251
252	/* Just make sure we got real memory. */
253	*val = 0x12345678;
254	pr_info("Attempting double slab free ...\n");
255	kmem_cache_free(double_free_cache, val);
256	kmem_cache_free(double_free_cache, val);
257}
258
259void lkdtm_SLAB_FREE_CROSS(void)
260{
261	int *val;
262
263	val = kmem_cache_alloc(a_cache, GFP_KERNEL);
264	if (!val) {
265		pr_info("Unable to allocate a_cache memory.\n");
266		return;
267	}
268
269	/* Just make sure we got real memory. */
270	*val = 0x12345679;
271	pr_info("Attempting cross-cache slab free ...\n");
272	kmem_cache_free(b_cache, val);
273}
274
275void lkdtm_SLAB_FREE_PAGE(void)
276{
277	unsigned long p = __get_free_page(GFP_KERNEL);
278
279	pr_info("Attempting non-Slab slab free ...\n");
280	kmem_cache_free(NULL, (void *)p);
281	free_page(p);
282}
283
284/*
285 * We have constructors to keep the caches distinctly separated without
286 * needing to boot with "slab_nomerge".
287 */
288static void ctor_double_free(void *region)
289{ }
290static void ctor_a(void *region)
291{ }
292static void ctor_b(void *region)
293{ }
294
295void __init lkdtm_heap_init(void)
296{
297	double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
298					      64, 0, 0, ctor_double_free);
299	a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
300	b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
301}
302
303void __exit lkdtm_heap_exit(void)
304{
305	kmem_cache_destroy(double_free_cache);
306	kmem_cache_destroy(a_cache);
307	kmem_cache_destroy(b_cache);
308}