Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests relating directly to heap memory, including
  4 * page allocation and slab allocations.
  5 */
  6#include "lkdtm.h"
  7#include <linux/kfence.h>
  8#include <linux/slab.h>
  9#include <linux/vmalloc.h>
 10#include <linux/sched.h>
 11
 12static struct kmem_cache *double_free_cache;
 13static struct kmem_cache *a_cache;
 14static struct kmem_cache *b_cache;
 15
 16/*
 17 * Using volatile here means the compiler cannot ever make assumptions
 18 * about this value. This means compile-time length checks involving
 19 * this variable cannot be performed; only run-time checks.
 20 */
 21static volatile int __offset = 1;
 22
 23/*
 24 * If there aren't guard pages, it's likely that a consecutive allocation will
 25 * let us overflow into the second allocation without overwriting something real.
 26 *
 27 * This should always be caught because there is an unconditional unmapped
 28 * page after vmap allocations.
 29 */
 30static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
 31{
 32	char *one, *two;
 33
 34	one = vzalloc(PAGE_SIZE);
 35	OPTIMIZER_HIDE_VAR(one);
 36	two = vzalloc(PAGE_SIZE);
 37
 38	pr_info("Attempting vmalloc linear overflow ...\n");
 39	memset(one, 0xAA, PAGE_SIZE + __offset);
 40
 41	vfree(two);
 42	vfree(one);
 43}
 44
 45/*
 46 * This tries to stay within the next largest power-of-2 kmalloc cache
 47 * to avoid actually overwriting anything important if it's not detected
 48 * correctly.
 49 *
 50 * This should get caught by either memory tagging, KASan, or by using
 51 * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
 52 */
 53static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
 54{
 55	size_t len = 1020;
 56	u32 *data = kmalloc(len, GFP_KERNEL);
 57	if (!data)
 58		return;
 59
 60	pr_info("Attempting slab linear overflow ...\n");
 61	OPTIMIZER_HIDE_VAR(data);
 62	data[1024 / sizeof(u32)] = 0x12345678;
 63	kfree(data);
 64}
 65
 66static void lkdtm_WRITE_AFTER_FREE(void)
 67{
 68	int *base, *again;
 69	size_t len = 1024;
 70	/*
 71	 * The slub allocator uses the first word to store the free
 72	 * pointer in some configurations. Use the middle of the
 73	 * allocation to avoid running into the freelist
 74	 */
 75	size_t offset = (len / sizeof(*base)) / 2;
 76
 77	base = kmalloc(len, GFP_KERNEL);
 78	if (!base)
 79		return;
 80	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
 81	pr_info("Attempting bad write to freed memory at %p\n",
 82		&base[offset]);
 83	kfree(base);
 84	base[offset] = 0x0abcdef0;
 85	/* Attempt to notice the overwrite. */
 86	again = kmalloc(len, GFP_KERNEL);
 87	kfree(again);
 88	if (again != base)
 89		pr_info("Hmm, didn't get the same memory range.\n");
 90}
 91
 92static void lkdtm_READ_AFTER_FREE(void)
 93{
 94	int *base, *val, saw;
 95	size_t len = 1024;
 96	/*
 97	 * The slub allocator will use the either the first word or
 98	 * the middle of the allocation to store the free pointer,
 99	 * depending on configurations. Store in the second word to
100	 * avoid running into the freelist.
101	 */
102	size_t offset = sizeof(*base);
103
104	base = kmalloc(len, GFP_KERNEL);
105	if (!base) {
106		pr_info("Unable to allocate base memory.\n");
107		return;
108	}
109
110	val = kmalloc(len, GFP_KERNEL);
111	if (!val) {
112		pr_info("Unable to allocate val memory.\n");
113		kfree(base);
114		return;
115	}
116
117	*val = 0x12345678;
118	base[offset] = *val;
119	pr_info("Value in memory before free: %x\n", base[offset]);
120
121	kfree(base);
122
123	pr_info("Attempting bad read from freed memory\n");
124	saw = base[offset];
125	if (saw != *val) {
126		/* Good! Poisoning happened, so declare a win. */
127		pr_info("Memory correctly poisoned (%x)\n", saw);
128	} else {
129		pr_err("FAIL: Memory was not poisoned!\n");
130		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
131	}
132
133	kfree(val);
134}
135
136static void lkdtm_KFENCE_READ_AFTER_FREE(void)
137{
138	int *base, val, saw;
139	unsigned long timeout, resched_after;
140	size_t len = 1024;
141	/*
142	 * The slub allocator will use the either the first word or
143	 * the middle of the allocation to store the free pointer,
144	 * depending on configurations. Store in the second word to
145	 * avoid running into the freelist.
146	 */
147	size_t offset = sizeof(*base);
148
149	/*
150	 * 100x the sample interval should be more than enough to ensure we get
151	 * a KFENCE allocation eventually.
152	 */
153	timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
154	/*
155	 * Especially for non-preemption kernels, ensure the allocation-gate
156	 * timer can catch up: after @resched_after, every failed allocation
157	 * attempt yields, to ensure the allocation-gate timer is scheduled.
158	 */
159	resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
160	do {
161		base = kmalloc(len, GFP_KERNEL);
162		if (!base) {
163			pr_err("FAIL: Unable to allocate kfence memory!\n");
164			return;
165		}
166
167		if (is_kfence_address(base)) {
168			val = 0x12345678;
169			base[offset] = val;
170			pr_info("Value in memory before free: %x\n", base[offset]);
171
172			kfree(base);
173
174			pr_info("Attempting bad read from freed memory\n");
175			saw = base[offset];
176			if (saw != val) {
177				/* Good! Poisoning happened, so declare a win. */
178				pr_info("Memory correctly poisoned (%x)\n", saw);
179			} else {
180				pr_err("FAIL: Memory was not poisoned!\n");
181				pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
182			}
183			return;
184		}
185
186		kfree(base);
187		if (time_after(jiffies, resched_after))
188			cond_resched();
189	} while (time_before(jiffies, timeout));
190
191	pr_err("FAIL: kfence memory never allocated!\n");
192}
193
194static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
195{
196	unsigned long p = __get_free_page(GFP_KERNEL);
197	if (!p) {
198		pr_info("Unable to allocate free page\n");
199		return;
200	}
201
202	pr_info("Writing to the buddy page before free\n");
203	memset((void *)p, 0x3, PAGE_SIZE);
204	free_page(p);
205	schedule();
206	pr_info("Attempting bad write to the buddy page after free\n");
207	memset((void *)p, 0x78, PAGE_SIZE);
208	/* Attempt to notice the overwrite. */
209	p = __get_free_page(GFP_KERNEL);
210	free_page(p);
211	schedule();
212}
213
214static void lkdtm_READ_BUDDY_AFTER_FREE(void)
215{
216	unsigned long p = __get_free_page(GFP_KERNEL);
217	int saw, *val;
218	int *base;
219
220	if (!p) {
221		pr_info("Unable to allocate free page\n");
222		return;
223	}
224
225	val = kmalloc(1024, GFP_KERNEL);
226	if (!val) {
227		pr_info("Unable to allocate val memory.\n");
228		free_page(p);
229		return;
230	}
231
232	base = (int *)p;
233
234	*val = 0x12345678;
235	base[0] = *val;
236	pr_info("Value in memory before free: %x\n", base[0]);
237	free_page(p);
238	pr_info("Attempting to read from freed memory\n");
239	saw = base[0];
240	if (saw != *val) {
241		/* Good! Poisoning happened, so declare a win. */
242		pr_info("Memory correctly poisoned (%x)\n", saw);
243	} else {
244		pr_err("FAIL: Buddy page was not poisoned!\n");
245		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
246	}
247
248	kfree(val);
249}
250
251static void lkdtm_SLAB_INIT_ON_ALLOC(void)
252{
253	u8 *first;
254	u8 *val;
255
256	first = kmalloc(512, GFP_KERNEL);
257	if (!first) {
258		pr_info("Unable to allocate 512 bytes the first time.\n");
259		return;
260	}
261
262	memset(first, 0xAB, 512);
263	kfree(first);
264
265	val = kmalloc(512, GFP_KERNEL);
266	if (!val) {
267		pr_info("Unable to allocate 512 bytes the second time.\n");
268		return;
269	}
270	if (val != first) {
271		pr_warn("Reallocation missed clobbered memory.\n");
272	}
273
274	if (memchr(val, 0xAB, 512) == NULL) {
275		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
276	} else {
277		pr_err("FAIL: Slab was not initialized\n");
278		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
279	}
280	kfree(val);
281}
282
283static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
284{
285	u8 *first;
286	u8 *val;
287
288	first = (u8 *)__get_free_page(GFP_KERNEL);
289	if (!first) {
290		pr_info("Unable to allocate first free page\n");
291		return;
292	}
293
294	memset(first, 0xAB, PAGE_SIZE);
295	free_page((unsigned long)first);
296
297	val = (u8 *)__get_free_page(GFP_KERNEL);
298	if (!val) {
299		pr_info("Unable to allocate second free page\n");
300		return;
301	}
302
303	if (val != first) {
304		pr_warn("Reallocation missed clobbered memory.\n");
305	}
306
307	if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
308		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
309	} else {
310		pr_err("FAIL: Slab was not initialized\n");
311		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
312	}
313	free_page((unsigned long)val);
314}
315
316static void lkdtm_SLAB_FREE_DOUBLE(void)
317{
318	int *val;
319
320	val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
321	if (!val) {
322		pr_info("Unable to allocate double_free_cache memory.\n");
323		return;
324	}
325
326	/* Just make sure we got real memory. */
327	*val = 0x12345678;
328	pr_info("Attempting double slab free ...\n");
329	kmem_cache_free(double_free_cache, val);
330	kmem_cache_free(double_free_cache, val);
331}
332
333static void lkdtm_SLAB_FREE_CROSS(void)
334{
335	int *val;
336
337	val = kmem_cache_alloc(a_cache, GFP_KERNEL);
338	if (!val) {
339		pr_info("Unable to allocate a_cache memory.\n");
340		return;
341	}
342
343	/* Just make sure we got real memory. */
344	*val = 0x12345679;
345	pr_info("Attempting cross-cache slab free ...\n");
346	kmem_cache_free(b_cache, val);
347}
348
349static void lkdtm_SLAB_FREE_PAGE(void)
350{
351	unsigned long p = __get_free_page(GFP_KERNEL);
352
353	pr_info("Attempting non-Slab slab free ...\n");
354	kmem_cache_free(NULL, (void *)p);
355	free_page(p);
356}
357
358/*
359 * We have constructors to keep the caches distinctly separated without
360 * needing to boot with "slab_nomerge".
361 */
362static void ctor_double_free(void *region)
363{ }
364static void ctor_a(void *region)
365{ }
366static void ctor_b(void *region)
367{ }
368
369void __init lkdtm_heap_init(void)
370{
371	double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
372					      64, 0, 0, ctor_double_free);
373	a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
374	b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
375}
376
377void __exit lkdtm_heap_exit(void)
378{
379	kmem_cache_destroy(double_free_cache);
380	kmem_cache_destroy(a_cache);
381	kmem_cache_destroy(b_cache);
382}
383
384static struct crashtype crashtypes[] = {
385	CRASHTYPE(SLAB_LINEAR_OVERFLOW),
386	CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
387	CRASHTYPE(WRITE_AFTER_FREE),
388	CRASHTYPE(READ_AFTER_FREE),
389	CRASHTYPE(KFENCE_READ_AFTER_FREE),
390	CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
391	CRASHTYPE(READ_BUDDY_AFTER_FREE),
392	CRASHTYPE(SLAB_INIT_ON_ALLOC),
393	CRASHTYPE(BUDDY_INIT_ON_ALLOC),
394	CRASHTYPE(SLAB_FREE_DOUBLE),
395	CRASHTYPE(SLAB_FREE_CROSS),
396	CRASHTYPE(SLAB_FREE_PAGE),
397};
398
399struct crashtype_category heap_crashtypes = {
400	.crashtypes = crashtypes,
401	.len	    = ARRAY_SIZE(crashtypes),
402};
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests relating directly to heap memory, including
  4 * page allocation and slab allocations.
  5 */
  6#include "lkdtm.h"
 
  7#include <linux/slab.h>
  8#include <linux/vmalloc.h>
  9#include <linux/sched.h>
 10
 11static struct kmem_cache *double_free_cache;
 12static struct kmem_cache *a_cache;
 13static struct kmem_cache *b_cache;
 14
 15/*
 16 * Using volatile here means the compiler cannot ever make assumptions
 17 * about this value. This means compile-time length checks involving
 18 * this variable cannot be performed; only run-time checks.
 19 */
 20static volatile int __offset = 1;
 21
 22/*
 23 * If there aren't guard pages, it's likely that a consecutive allocation will
 24 * let us overflow into the second allocation without overwriting something real.
 25 *
 26 * This should always be caught because there is an unconditional unmapped
 27 * page after vmap allocations.
 28 */
 29static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
 30{
 31	char *one, *two;
 32
 33	one = vzalloc(PAGE_SIZE);
 
 34	two = vzalloc(PAGE_SIZE);
 35
 36	pr_info("Attempting vmalloc linear overflow ...\n");
 37	memset(one, 0xAA, PAGE_SIZE + __offset);
 38
 39	vfree(two);
 40	vfree(one);
 41}
 42
 43/*
 44 * This tries to stay within the next largest power-of-2 kmalloc cache
 45 * to avoid actually overwriting anything important if it's not detected
 46 * correctly.
 47 *
 48 * This should get caught by either memory tagging, KASan, or by using
 49 * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
 50 */
 51static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
 52{
 53	size_t len = 1020;
 54	u32 *data = kmalloc(len, GFP_KERNEL);
 55	if (!data)
 56		return;
 57
 58	pr_info("Attempting slab linear overflow ...\n");
 59	OPTIMIZER_HIDE_VAR(data);
 60	data[1024 / sizeof(u32)] = 0x12345678;
 61	kfree(data);
 62}
 63
 64static void lkdtm_WRITE_AFTER_FREE(void)
 65{
 66	int *base, *again;
 67	size_t len = 1024;
 68	/*
 69	 * The slub allocator uses the first word to store the free
 70	 * pointer in some configurations. Use the middle of the
 71	 * allocation to avoid running into the freelist
 72	 */
 73	size_t offset = (len / sizeof(*base)) / 2;
 74
 75	base = kmalloc(len, GFP_KERNEL);
 76	if (!base)
 77		return;
 78	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
 79	pr_info("Attempting bad write to freed memory at %p\n",
 80		&base[offset]);
 81	kfree(base);
 82	base[offset] = 0x0abcdef0;
 83	/* Attempt to notice the overwrite. */
 84	again = kmalloc(len, GFP_KERNEL);
 85	kfree(again);
 86	if (again != base)
 87		pr_info("Hmm, didn't get the same memory range.\n");
 88}
 89
 90static void lkdtm_READ_AFTER_FREE(void)
 91{
 92	int *base, *val, saw;
 93	size_t len = 1024;
 94	/*
 95	 * The slub allocator will use the either the first word or
 96	 * the middle of the allocation to store the free pointer,
 97	 * depending on configurations. Store in the second word to
 98	 * avoid running into the freelist.
 99	 */
100	size_t offset = sizeof(*base);
101
102	base = kmalloc(len, GFP_KERNEL);
103	if (!base) {
104		pr_info("Unable to allocate base memory.\n");
105		return;
106	}
107
108	val = kmalloc(len, GFP_KERNEL);
109	if (!val) {
110		pr_info("Unable to allocate val memory.\n");
111		kfree(base);
112		return;
113	}
114
115	*val = 0x12345678;
116	base[offset] = *val;
117	pr_info("Value in memory before free: %x\n", base[offset]);
118
119	kfree(base);
120
121	pr_info("Attempting bad read from freed memory\n");
122	saw = base[offset];
123	if (saw != *val) {
124		/* Good! Poisoning happened, so declare a win. */
125		pr_info("Memory correctly poisoned (%x)\n", saw);
126	} else {
127		pr_err("FAIL: Memory was not poisoned!\n");
128		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
129	}
130
131	kfree(val);
132}
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
135{
136	unsigned long p = __get_free_page(GFP_KERNEL);
137	if (!p) {
138		pr_info("Unable to allocate free page\n");
139		return;
140	}
141
142	pr_info("Writing to the buddy page before free\n");
143	memset((void *)p, 0x3, PAGE_SIZE);
144	free_page(p);
145	schedule();
146	pr_info("Attempting bad write to the buddy page after free\n");
147	memset((void *)p, 0x78, PAGE_SIZE);
148	/* Attempt to notice the overwrite. */
149	p = __get_free_page(GFP_KERNEL);
150	free_page(p);
151	schedule();
152}
153
154static void lkdtm_READ_BUDDY_AFTER_FREE(void)
155{
156	unsigned long p = __get_free_page(GFP_KERNEL);
157	int saw, *val;
158	int *base;
159
160	if (!p) {
161		pr_info("Unable to allocate free page\n");
162		return;
163	}
164
165	val = kmalloc(1024, GFP_KERNEL);
166	if (!val) {
167		pr_info("Unable to allocate val memory.\n");
168		free_page(p);
169		return;
170	}
171
172	base = (int *)p;
173
174	*val = 0x12345678;
175	base[0] = *val;
176	pr_info("Value in memory before free: %x\n", base[0]);
177	free_page(p);
178	pr_info("Attempting to read from freed memory\n");
179	saw = base[0];
180	if (saw != *val) {
181		/* Good! Poisoning happened, so declare a win. */
182		pr_info("Memory correctly poisoned (%x)\n", saw);
183	} else {
184		pr_err("FAIL: Buddy page was not poisoned!\n");
185		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
186	}
187
188	kfree(val);
189}
190
191static void lkdtm_SLAB_INIT_ON_ALLOC(void)
192{
193	u8 *first;
194	u8 *val;
195
196	first = kmalloc(512, GFP_KERNEL);
197	if (!first) {
198		pr_info("Unable to allocate 512 bytes the first time.\n");
199		return;
200	}
201
202	memset(first, 0xAB, 512);
203	kfree(first);
204
205	val = kmalloc(512, GFP_KERNEL);
206	if (!val) {
207		pr_info("Unable to allocate 512 bytes the second time.\n");
208		return;
209	}
210	if (val != first) {
211		pr_warn("Reallocation missed clobbered memory.\n");
212	}
213
214	if (memchr(val, 0xAB, 512) == NULL) {
215		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
216	} else {
217		pr_err("FAIL: Slab was not initialized\n");
218		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
219	}
220	kfree(val);
221}
222
223static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
224{
225	u8 *first;
226	u8 *val;
227
228	first = (u8 *)__get_free_page(GFP_KERNEL);
229	if (!first) {
230		pr_info("Unable to allocate first free page\n");
231		return;
232	}
233
234	memset(first, 0xAB, PAGE_SIZE);
235	free_page((unsigned long)first);
236
237	val = (u8 *)__get_free_page(GFP_KERNEL);
238	if (!val) {
239		pr_info("Unable to allocate second free page\n");
240		return;
241	}
242
243	if (val != first) {
244		pr_warn("Reallocation missed clobbered memory.\n");
245	}
246
247	if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
248		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
249	} else {
250		pr_err("FAIL: Slab was not initialized\n");
251		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
252	}
253	free_page((unsigned long)val);
254}
255
256static void lkdtm_SLAB_FREE_DOUBLE(void)
257{
258	int *val;
259
260	val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
261	if (!val) {
262		pr_info("Unable to allocate double_free_cache memory.\n");
263		return;
264	}
265
266	/* Just make sure we got real memory. */
267	*val = 0x12345678;
268	pr_info("Attempting double slab free ...\n");
269	kmem_cache_free(double_free_cache, val);
270	kmem_cache_free(double_free_cache, val);
271}
272
273static void lkdtm_SLAB_FREE_CROSS(void)
274{
275	int *val;
276
277	val = kmem_cache_alloc(a_cache, GFP_KERNEL);
278	if (!val) {
279		pr_info("Unable to allocate a_cache memory.\n");
280		return;
281	}
282
283	/* Just make sure we got real memory. */
284	*val = 0x12345679;
285	pr_info("Attempting cross-cache slab free ...\n");
286	kmem_cache_free(b_cache, val);
287}
288
289static void lkdtm_SLAB_FREE_PAGE(void)
290{
291	unsigned long p = __get_free_page(GFP_KERNEL);
292
293	pr_info("Attempting non-Slab slab free ...\n");
294	kmem_cache_free(NULL, (void *)p);
295	free_page(p);
296}
297
298/*
299 * We have constructors to keep the caches distinctly separated without
300 * needing to boot with "slab_nomerge".
301 */
302static void ctor_double_free(void *region)
303{ }
304static void ctor_a(void *region)
305{ }
306static void ctor_b(void *region)
307{ }
308
309void __init lkdtm_heap_init(void)
310{
311	double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
312					      64, 0, 0, ctor_double_free);
313	a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
314	b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
315}
316
317void __exit lkdtm_heap_exit(void)
318{
319	kmem_cache_destroy(double_free_cache);
320	kmem_cache_destroy(a_cache);
321	kmem_cache_destroy(b_cache);
322}
323
324static struct crashtype crashtypes[] = {
325	CRASHTYPE(SLAB_LINEAR_OVERFLOW),
326	CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
327	CRASHTYPE(WRITE_AFTER_FREE),
328	CRASHTYPE(READ_AFTER_FREE),
 
329	CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
330	CRASHTYPE(READ_BUDDY_AFTER_FREE),
331	CRASHTYPE(SLAB_INIT_ON_ALLOC),
332	CRASHTYPE(BUDDY_INIT_ON_ALLOC),
333	CRASHTYPE(SLAB_FREE_DOUBLE),
334	CRASHTYPE(SLAB_FREE_CROSS),
335	CRASHTYPE(SLAB_FREE_PAGE),
336};
337
338struct crashtype_category heap_crashtypes = {
339	.crashtypes = crashtypes,
340	.len	    = ARRAY_SIZE(crashtypes),
341};