Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
  4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  5 * lockups) along with other things that don't fit well into existing LKDTM
  6 * test source files.
  7 */
  8#include "lkdtm.h"
  9#include <linux/cpu.h>
 10#include <linux/list.h>
 11#include <linux/sched.h>
 12#include <linux/sched/signal.h>
 13#include <linux/sched/task_stack.h>
 14#include <linux/slab.h>
 15#include <linux/stop_machine.h>
 16#include <linux/uaccess.h>
 17
 18#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
 19#include <asm/desc.h>
 20#endif
 21
 22struct lkdtm_list {
 23	struct list_head node;
 24};
 25
 26/*
 27 * Make sure our attempts to over run the kernel stack doesn't trigger
 28 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
 29 * recurse past the end of THREAD_SIZE by default.
 30 */
 31#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
 32#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
 33#else
 34#define REC_STACK_SIZE (THREAD_SIZE / 8UL)
 35#endif
 36#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
 37
 38static int recur_count = REC_NUM_DEFAULT;
 39
 40static DEFINE_SPINLOCK(lock_me_up);
 41
 42/*
 43 * Make sure compiler does not optimize this function or stack frame away:
 44 * - function marked noinline
 45 * - stack variables are marked volatile
 46 * - stack variables are written (memset()) and read (buf[..] passed as arg)
 47 * - function may have external effects (memzero_explicit())
 48 * - no tail recursion possible
 49 */
 50static int noinline recursive_loop(int remaining)
 51{
 52	volatile char buf[REC_STACK_SIZE];
 53	volatile int ret;
 54
 55	memset((void *)buf, remaining & 0xFF, sizeof(buf));
 56	if (!remaining)
 57		ret = 0;
 58	else
 59		ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
 60	memzero_explicit((void *)buf, sizeof(buf));
 61	return ret;
 62}
 63
 64/* If the depth is negative, use the default, otherwise keep parameter. */
 65void __init lkdtm_bugs_init(int *recur_param)
 66{
 67	if (*recur_param < 0)
 68		*recur_param = recur_count;
 69	else
 70		recur_count = *recur_param;
 71}
 72
 73static void lkdtm_PANIC(void)
 74{
 75	panic("dumptest");
 76}
 77
 78static int panic_stop_irqoff_fn(void *arg)
 79{
 80	atomic_t *v = arg;
 81
 82	/*
 83	 * As stop_machine() disables interrupts, all CPUs within this function
 84	 * have interrupts disabled and cannot take a regular IPI.
 85	 *
 86	 * The last CPU which enters here will trigger a panic, and as all CPUs
 87	 * cannot take a regular IPI, we'll only be able to stop secondaries if
 88	 * smp_send_stop() or crash_smp_send_stop() uses an NMI.
 89	 */
 90	if (atomic_inc_return(v) == num_online_cpus())
 91		panic("panic stop irqoff test");
 92
 93	for (;;)
 94		cpu_relax();
 95}
 96
 97static void lkdtm_PANIC_STOP_IRQOFF(void)
 98{
 99	atomic_t v = ATOMIC_INIT(0);
100	stop_machine(panic_stop_irqoff_fn, &v, cpu_online_mask);
101}
102
103static void lkdtm_BUG(void)
104{
105	BUG();
106}
107
108static int warn_counter;
109
110static void lkdtm_WARNING(void)
111{
112	WARN_ON(++warn_counter);
113}
114
115static void lkdtm_WARNING_MESSAGE(void)
116{
117	WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
118}
119
120static void lkdtm_EXCEPTION(void)
121{
122	*((volatile int *) 0) = 0;
123}
124
125static void lkdtm_LOOP(void)
126{
127	for (;;)
128		;
129}
130
131static void lkdtm_EXHAUST_STACK(void)
132{
133	pr_info("Calling function with %lu frame size to depth %d ...\n",
134		REC_STACK_SIZE, recur_count);
135	recursive_loop(recur_count);
136	pr_info("FAIL: survived without exhausting stack?!\n");
137}
138
139static noinline void __lkdtm_CORRUPT_STACK(void *stack)
140{
141	memset(stack, '\xff', 64);
142}
143
144/* This should trip the stack canary, not corrupt the return address. */
145static noinline void lkdtm_CORRUPT_STACK(void)
146{
147	/* Use default char array length that triggers stack protection. */
148	char data[8] __aligned(sizeof(void *));
149
150	pr_info("Corrupting stack containing char array ...\n");
151	__lkdtm_CORRUPT_STACK((void *)&data);
152}
153
154/* Same as above but will only get a canary with -fstack-protector-strong */
155static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
156{
157	union {
158		unsigned short shorts[4];
159		unsigned long *ptr;
160	} data __aligned(sizeof(void *));
161
162	pr_info("Corrupting stack containing union ...\n");
163	__lkdtm_CORRUPT_STACK((void *)&data);
164}
165
166static pid_t stack_pid;
167static unsigned long stack_addr;
168
169static void lkdtm_REPORT_STACK(void)
170{
171	volatile uintptr_t magic;
172	pid_t pid = task_pid_nr(current);
173
174	if (pid != stack_pid) {
175		pr_info("Starting stack offset tracking for pid %d\n", pid);
176		stack_pid = pid;
177		stack_addr = (uintptr_t)&magic;
178	}
179
180	pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
181}
182
183static pid_t stack_canary_pid;
184static unsigned long stack_canary;
185static unsigned long stack_canary_offset;
186
187static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
188{
189	int i = 0;
190	pid_t pid = task_pid_nr(current);
191	unsigned long *canary = (unsigned long *)stack;
192	unsigned long current_offset = 0, init_offset = 0;
193
194	/* Do our best to find the canary in a 16 word window ... */
195	for (i = 1; i < 16; i++) {
196		canary = (unsigned long *)stack + i;
197#ifdef CONFIG_STACKPROTECTOR
198		if (*canary == current->stack_canary)
199			current_offset = i;
200		if (*canary == init_task.stack_canary)
201			init_offset = i;
202#endif
203	}
204
205	if (current_offset == 0) {
206		/*
207		 * If the canary doesn't match what's in the task_struct,
208		 * we're either using a global canary or the stack frame
209		 * layout changed.
210		 */
211		if (init_offset != 0) {
212			pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
213			       init_offset, pid);
214		} else {
215			pr_warn("FAIL: did not correctly locate stack canary :(\n");
216			pr_expected_config(CONFIG_STACKPROTECTOR);
217		}
218
219		return;
220	} else if (init_offset != 0) {
221		pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
222	}
223
224	canary = (unsigned long *)stack + current_offset;
225	if (stack_canary_pid == 0) {
226		stack_canary = *canary;
227		stack_canary_pid = pid;
228		stack_canary_offset = current_offset;
229		pr_info("Recorded stack canary for pid %d at offset %ld\n",
230			stack_canary_pid, stack_canary_offset);
231	} else if (pid == stack_canary_pid) {
232		pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
233	} else {
234		if (current_offset != stack_canary_offset) {
235			pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
236				stack_canary_offset, current_offset);
237			return;
238		}
239
240		if (*canary == stack_canary) {
241			pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
242				stack_canary_pid, pid, current_offset);
243		} else {
244			pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
245				stack_canary_pid, pid, current_offset);
246			/* Reset the test. */
247			stack_canary_pid = 0;
248		}
249	}
250}
251
252static void lkdtm_REPORT_STACK_CANARY(void)
253{
254	/* Use default char array length that triggers stack protection. */
255	char data[8] __aligned(sizeof(void *)) = { };
256
257	__lkdtm_REPORT_STACK_CANARY((void *)&data);
258}
259
260static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
261{
262	static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
263	u32 *p;
264	u32 val = 0x12345678;
265
266	p = (u32 *)(data + 1);
267	if (*p == 0)
268		val = 0x87654321;
269	*p = val;
270
271	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
272		pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
273}
274
275static void lkdtm_SOFTLOCKUP(void)
276{
277	preempt_disable();
278	for (;;)
279		cpu_relax();
280}
281
282static void lkdtm_HARDLOCKUP(void)
283{
284	local_irq_disable();
285	for (;;)
286		cpu_relax();
287}
288
289static void __lkdtm_SMP_CALL_LOCKUP(void *unused)
290{
291	for (;;)
292		cpu_relax();
293}
294
295static void lkdtm_SMP_CALL_LOCKUP(void)
296{
297	unsigned int cpu, target;
298
299	cpus_read_lock();
300
301	cpu = get_cpu();
302	target = cpumask_any_but(cpu_online_mask, cpu);
303
304	if (target >= nr_cpu_ids) {
305		pr_err("FAIL: no other online CPUs\n");
306		goto out_put_cpus;
307	}
308
309	smp_call_function_single(target, __lkdtm_SMP_CALL_LOCKUP, NULL, 1);
310
311	pr_err("FAIL: did not hang\n");
312
313out_put_cpus:
314	put_cpu();
315	cpus_read_unlock();
316}
317
318static void lkdtm_SPINLOCKUP(void)
319{
320	/* Must be called twice to trigger. */
321	spin_lock(&lock_me_up);
322	/* Let sparse know we intended to exit holding the lock. */
323	__release(&lock_me_up);
324}
325
326static void __noreturn lkdtm_HUNG_TASK(void)
327{
328	set_current_state(TASK_UNINTERRUPTIBLE);
329	schedule();
330	BUG();
331}
332
333static volatile unsigned int huge = INT_MAX - 2;
334static volatile unsigned int ignored;
335
336static void lkdtm_OVERFLOW_SIGNED(void)
337{
338	int value;
339
340	value = huge;
341	pr_info("Normal signed addition ...\n");
342	value += 1;
343	ignored = value;
344
345	pr_info("Overflowing signed addition ...\n");
346	value += 4;
347	ignored = value;
348}
349
350
351static void lkdtm_OVERFLOW_UNSIGNED(void)
352{
353	unsigned int value;
354
355	value = huge;
356	pr_info("Normal unsigned addition ...\n");
357	value += 1;
358	ignored = value;
359
360	pr_info("Overflowing unsigned addition ...\n");
361	value += 4;
362	ignored = value;
363}
364
365/* Intentionally using unannotated flex array definition. */
366struct array_bounds_flex_array {
367	int one;
368	int two;
369	char data[];
370};
371
372struct array_bounds {
373	int one;
374	int two;
375	char data[8];
376	int three;
377};
378
379static void lkdtm_ARRAY_BOUNDS(void)
380{
381	struct array_bounds_flex_array *not_checked;
382	struct array_bounds *checked;
383	volatile int i;
384
385	not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
386	checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
387	if (!not_checked || !checked) {
388		kfree(not_checked);
389		kfree(checked);
390		return;
391	}
392
393	pr_info("Array access within bounds ...\n");
394	/* For both, touch all bytes in the actual member size. */
395	for (i = 0; i < sizeof(checked->data); i++)
396		checked->data[i] = 'A';
397	/*
398	 * For the uninstrumented flex array member, also touch 1 byte
399	 * beyond to verify it is correctly uninstrumented.
400	 */
401	for (i = 0; i < 2; i++)
402		not_checked->data[i] = 'A';
403
404	pr_info("Array access beyond bounds ...\n");
405	for (i = 0; i < sizeof(checked->data) + 1; i++)
406		checked->data[i] = 'B';
407
408	kfree(not_checked);
409	kfree(checked);
410	pr_err("FAIL: survived array bounds overflow!\n");
411	if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
412		pr_expected_config(CONFIG_UBSAN_TRAP);
413	else
414		pr_expected_config(CONFIG_UBSAN_BOUNDS);
415}
416
417struct lkdtm_annotated {
418	unsigned long flags;
419	int count;
420	int array[] __counted_by(count);
421};
422
423static volatile int fam_count = 4;
424
425static void lkdtm_FAM_BOUNDS(void)
426{
427	struct lkdtm_annotated *inst;
428
429	inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL);
430	if (!inst) {
431		pr_err("FAIL: could not allocate test struct!\n");
432		return;
433	}
434
435	inst->count = fam_count;
436	pr_info("Array access within bounds ...\n");
437	inst->array[1] = fam_count;
438	ignored = inst->array[1];
439
440	pr_info("Array access beyond bounds ...\n");
441	inst->array[fam_count] = fam_count;
442	ignored = inst->array[fam_count];
443
444	kfree(inst);
445
446	pr_err("FAIL: survived access of invalid flexible array member index!\n");
447
448	if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY))
449		pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n",
450			lkdtm_kernel_info);
451	else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
452		pr_expected_config(CONFIG_UBSAN_TRAP);
453	else
454		pr_expected_config(CONFIG_UBSAN_BOUNDS);
455}
456
457static void lkdtm_CORRUPT_LIST_ADD(void)
458{
459	/*
460	 * Initially, an empty list via LIST_HEAD:
461	 *	test_head.next = &test_head
462	 *	test_head.prev = &test_head
463	 */
464	LIST_HEAD(test_head);
465	struct lkdtm_list good, bad;
466	void *target[2] = { };
467	void *redirection = &target;
468
469	pr_info("attempting good list addition\n");
470
471	/*
472	 * Adding to the list performs these actions:
473	 *	test_head.next->prev = &good.node
474	 *	good.node.next = test_head.next
475	 *	good.node.prev = test_head
476	 *	test_head.next = good.node
477	 */
478	list_add(&good.node, &test_head);
479
480	pr_info("attempting corrupted list addition\n");
481	/*
482	 * In simulating this "write what where" primitive, the "what" is
483	 * the address of &bad.node, and the "where" is the address held
484	 * by "redirection".
485	 */
486	test_head.next = redirection;
487	list_add(&bad.node, &test_head);
488
489	if (target[0] == NULL && target[1] == NULL)
490		pr_err("Overwrite did not happen, but no BUG?!\n");
491	else {
492		pr_err("list_add() corruption not detected!\n");
493		pr_expected_config(CONFIG_LIST_HARDENED);
494	}
495}
496
497static void lkdtm_CORRUPT_LIST_DEL(void)
498{
499	LIST_HEAD(test_head);
500	struct lkdtm_list item;
501	void *target[2] = { };
502	void *redirection = &target;
503
504	list_add(&item.node, &test_head);
505
506	pr_info("attempting good list removal\n");
507	list_del(&item.node);
508
509	pr_info("attempting corrupted list removal\n");
510	list_add(&item.node, &test_head);
511
512	/* As with the list_add() test above, this corrupts "next". */
513	item.node.next = redirection;
514	list_del(&item.node);
515
516	if (target[0] == NULL && target[1] == NULL)
517		pr_err("Overwrite did not happen, but no BUG?!\n");
518	else {
519		pr_err("list_del() corruption not detected!\n");
520		pr_expected_config(CONFIG_LIST_HARDENED);
521	}
522}
523
524/* Test that VMAP_STACK is actually allocating with a leading guard page */
525static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
526{
527	const unsigned char *stack = task_stack_page(current);
528	const unsigned char *ptr = stack - 1;
529	volatile unsigned char byte;
530
531	pr_info("attempting bad read from page below current stack\n");
532
533	byte = *ptr;
534
535	pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
536}
537
538/* Test that VMAP_STACK is actually allocating with a trailing guard page */
539static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
540{
541	const unsigned char *stack = task_stack_page(current);
542	const unsigned char *ptr = stack + THREAD_SIZE;
543	volatile unsigned char byte;
544
545	pr_info("attempting bad read from page above current stack\n");
546
547	byte = *ptr;
548
549	pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
550}
551
552static void lkdtm_UNSET_SMEP(void)
553{
554#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
555#define MOV_CR4_DEPTH	64
556	void (*direct_write_cr4)(unsigned long val);
557	unsigned char *insn;
558	unsigned long cr4;
559	int i;
560
561	cr4 = native_read_cr4();
562
563	if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
564		pr_err("FAIL: SMEP not in use\n");
565		return;
566	}
567	cr4 &= ~(X86_CR4_SMEP);
568
569	pr_info("trying to clear SMEP normally\n");
570	native_write_cr4(cr4);
571	if (cr4 == native_read_cr4()) {
572		pr_err("FAIL: pinning SMEP failed!\n");
573		cr4 |= X86_CR4_SMEP;
574		pr_info("restoring SMEP\n");
575		native_write_cr4(cr4);
576		return;
577	}
578	pr_info("ok: SMEP did not get cleared\n");
579
580	/*
581	 * To test the post-write pinning verification we need to call
582	 * directly into the middle of native_write_cr4() where the
583	 * cr4 write happens, skipping any pinning. This searches for
584	 * the cr4 writing instruction.
585	 */
586	insn = (unsigned char *)native_write_cr4;
587	OPTIMIZER_HIDE_VAR(insn);
588	for (i = 0; i < MOV_CR4_DEPTH; i++) {
589		/* mov %rdi, %cr4 */
590		if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
591			break;
592		/* mov %rdi,%rax; mov %rax, %cr4 */
593		if (insn[i]   == 0x48 && insn[i+1] == 0x89 &&
594		    insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
595		    insn[i+4] == 0x22 && insn[i+5] == 0xe0)
596			break;
597	}
598	if (i >= MOV_CR4_DEPTH) {
599		pr_info("ok: cannot locate cr4 writing call gadget\n");
600		return;
601	}
602	direct_write_cr4 = (void *)(insn + i);
603
604	pr_info("trying to clear SMEP with call gadget\n");
605	direct_write_cr4(cr4);
606	if (native_read_cr4() & X86_CR4_SMEP) {
607		pr_info("ok: SMEP removal was reverted\n");
608	} else {
609		pr_err("FAIL: cleared SMEP not detected!\n");
610		cr4 |= X86_CR4_SMEP;
611		pr_info("restoring SMEP\n");
612		native_write_cr4(cr4);
613	}
614#else
615	pr_err("XFAIL: this test is x86_64-only\n");
616#endif
617}
618
619static void lkdtm_DOUBLE_FAULT(void)
620{
621#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
622	/*
623	 * Trigger #DF by setting the stack limit to zero.  This clobbers
624	 * a GDT TLS slot, which is okay because the current task will die
625	 * anyway due to the double fault.
626	 */
627	struct desc_struct d = {
628		.type = 3,	/* expand-up, writable, accessed data */
629		.p = 1,		/* present */
630		.d = 1,		/* 32-bit */
631		.g = 0,		/* limit in bytes */
632		.s = 1,		/* not system */
633	};
634
635	local_irq_disable();
636	write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
637			GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
638
639	/*
640	 * Put our zero-limit segment in SS and then trigger a fault.  The
641	 * 4-byte access to (%esp) will fault with #SS, and the attempt to
642	 * deliver the fault will recursively cause #SS and result in #DF.
643	 * This whole process happens while NMIs and MCEs are blocked by the
644	 * MOV SS window.  This is nice because an NMI with an invalid SS
645	 * would also double-fault, resulting in the NMI or MCE being lost.
646	 */
647	asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
648		      "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
649
650	pr_err("FAIL: tried to double fault but didn't die\n");
651#else
652	pr_err("XFAIL: this test is ia32-only\n");
653#endif
654}
655
656#ifdef CONFIG_ARM64
657static noinline void change_pac_parameters(void)
658{
659	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
660		/* Reset the keys of current task */
661		ptrauth_thread_init_kernel(current);
662		ptrauth_thread_switch_kernel(current);
663	}
664}
665#endif
666
667static noinline void lkdtm_CORRUPT_PAC(void)
668{
669#ifdef CONFIG_ARM64
670#define CORRUPT_PAC_ITERATE	10
671	int i;
672
673	if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
674		pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
675
676	if (!system_supports_address_auth()) {
677		pr_err("FAIL: CPU lacks pointer authentication feature\n");
678		return;
679	}
680
681	pr_info("changing PAC parameters to force function return failure...\n");
682	/*
683	 * PAC is a hash value computed from input keys, return address and
684	 * stack pointer. As pac has fewer bits so there is a chance of
685	 * collision, so iterate few times to reduce the collision probability.
686	 */
687	for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
688		change_pac_parameters();
689
690	pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
691#else
692	pr_err("XFAIL: this test is arm64-only\n");
693#endif
694}
695
696static struct crashtype crashtypes[] = {
697	CRASHTYPE(PANIC),
698	CRASHTYPE(PANIC_STOP_IRQOFF),
699	CRASHTYPE(BUG),
700	CRASHTYPE(WARNING),
701	CRASHTYPE(WARNING_MESSAGE),
702	CRASHTYPE(EXCEPTION),
703	CRASHTYPE(LOOP),
704	CRASHTYPE(EXHAUST_STACK),
705	CRASHTYPE(CORRUPT_STACK),
706	CRASHTYPE(CORRUPT_STACK_STRONG),
707	CRASHTYPE(REPORT_STACK),
708	CRASHTYPE(REPORT_STACK_CANARY),
709	CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
710	CRASHTYPE(SOFTLOCKUP),
711	CRASHTYPE(HARDLOCKUP),
712	CRASHTYPE(SMP_CALL_LOCKUP),
713	CRASHTYPE(SPINLOCKUP),
714	CRASHTYPE(HUNG_TASK),
715	CRASHTYPE(OVERFLOW_SIGNED),
716	CRASHTYPE(OVERFLOW_UNSIGNED),
717	CRASHTYPE(ARRAY_BOUNDS),
718	CRASHTYPE(FAM_BOUNDS),
719	CRASHTYPE(CORRUPT_LIST_ADD),
720	CRASHTYPE(CORRUPT_LIST_DEL),
721	CRASHTYPE(STACK_GUARD_PAGE_LEADING),
722	CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
723	CRASHTYPE(UNSET_SMEP),
724	CRASHTYPE(DOUBLE_FAULT),
725	CRASHTYPE(CORRUPT_PAC),
726};
727
728struct crashtype_category bugs_crashtypes = {
729	.crashtypes = crashtypes,
730	.len	    = ARRAY_SIZE(crashtypes),
731};
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
  4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  5 * lockups) along with other things that don't fit well into existing LKDTM
  6 * test source files.
  7 */
  8#include "lkdtm.h"
  9#include <linux/cpu.h>
 10#include <linux/list.h>
 11#include <linux/sched.h>
 12#include <linux/sched/signal.h>
 13#include <linux/sched/task_stack.h>
 14#include <linux/slab.h>
 15#include <linux/stop_machine.h>
 16#include <linux/uaccess.h>
 17
 18#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
 19#include <asm/desc.h>
 20#endif
 21
 22struct lkdtm_list {
 23	struct list_head node;
 24};
 25
 26/*
 27 * Make sure our attempts to over run the kernel stack doesn't trigger
 28 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
 29 * recurse past the end of THREAD_SIZE by default.
 30 */
 31#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
 32#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
 33#else
 34#define REC_STACK_SIZE (THREAD_SIZE / 8UL)
 35#endif
 36#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
 37
 38static int recur_count = REC_NUM_DEFAULT;
 39
 40static DEFINE_SPINLOCK(lock_me_up);
 41
 42/*
 43 * Make sure compiler does not optimize this function or stack frame away:
 44 * - function marked noinline
 45 * - stack variables are marked volatile
 46 * - stack variables are written (memset()) and read (buf[..] passed as arg)
 47 * - function may have external effects (memzero_explicit())
 48 * - no tail recursion possible
 49 */
 50static int noinline recursive_loop(int remaining)
 51{
 52	volatile char buf[REC_STACK_SIZE];
 53	volatile int ret;
 54
 55	memset((void *)buf, remaining & 0xFF, sizeof(buf));
 56	if (!remaining)
 57		ret = 0;
 58	else
 59		ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
 60	memzero_explicit((void *)buf, sizeof(buf));
 61	return ret;
 62}
 63
 64/* If the depth is negative, use the default, otherwise keep parameter. */
 65void __init lkdtm_bugs_init(int *recur_param)
 66{
 67	if (*recur_param < 0)
 68		*recur_param = recur_count;
 69	else
 70		recur_count = *recur_param;
 71}
 72
 73static void lkdtm_PANIC(void)
 74{
 75	panic("dumptest");
 76}
 77
 78static int panic_stop_irqoff_fn(void *arg)
 79{
 80	atomic_t *v = arg;
 81
 82	/*
 83	 * As stop_machine() disables interrupts, all CPUs within this function
 84	 * have interrupts disabled and cannot take a regular IPI.
 85	 *
 86	 * The last CPU which enters here will trigger a panic, and as all CPUs
 87	 * cannot take a regular IPI, we'll only be able to stop secondaries if
 88	 * smp_send_stop() or crash_smp_send_stop() uses an NMI.
 89	 */
 90	if (atomic_inc_return(v) == num_online_cpus())
 91		panic("panic stop irqoff test");
 92
 93	for (;;)
 94		cpu_relax();
 95}
 96
 97static void lkdtm_PANIC_STOP_IRQOFF(void)
 98{
 99	atomic_t v = ATOMIC_INIT(0);
100	stop_machine(panic_stop_irqoff_fn, &v, cpu_online_mask);
101}
102
103static void lkdtm_BUG(void)
104{
105	BUG();
106}
107
108static int warn_counter;
109
110static void lkdtm_WARNING(void)
111{
112	WARN_ON(++warn_counter);
113}
114
115static void lkdtm_WARNING_MESSAGE(void)
116{
117	WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
118}
119
120static void lkdtm_EXCEPTION(void)
121{
122	*((volatile int *) 0) = 0;
123}
124
125static void lkdtm_LOOP(void)
126{
127	for (;;)
128		;
129}
130
131static void lkdtm_EXHAUST_STACK(void)
132{
133	pr_info("Calling function with %lu frame size to depth %d ...\n",
134		REC_STACK_SIZE, recur_count);
135	recursive_loop(recur_count);
136	pr_info("FAIL: survived without exhausting stack?!\n");
137}
138
139static noinline void __lkdtm_CORRUPT_STACK(void *stack)
140{
141	memset(stack, '\xff', 64);
142}
143
144/* This should trip the stack canary, not corrupt the return address. */
145static noinline void lkdtm_CORRUPT_STACK(void)
146{
147	/* Use default char array length that triggers stack protection. */
148	char data[8] __aligned(sizeof(void *));
149
150	pr_info("Corrupting stack containing char array ...\n");
151	__lkdtm_CORRUPT_STACK((void *)&data);
152}
153
154/* Same as above but will only get a canary with -fstack-protector-strong */
155static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
156{
157	union {
158		unsigned short shorts[4];
159		unsigned long *ptr;
160	} data __aligned(sizeof(void *));
161
162	pr_info("Corrupting stack containing union ...\n");
163	__lkdtm_CORRUPT_STACK((void *)&data);
164}
165
166static pid_t stack_pid;
167static unsigned long stack_addr;
168
169static void lkdtm_REPORT_STACK(void)
170{
171	volatile uintptr_t magic;
172	pid_t pid = task_pid_nr(current);
173
174	if (pid != stack_pid) {
175		pr_info("Starting stack offset tracking for pid %d\n", pid);
176		stack_pid = pid;
177		stack_addr = (uintptr_t)&magic;
178	}
179
180	pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
181}
182
183static pid_t stack_canary_pid;
184static unsigned long stack_canary;
185static unsigned long stack_canary_offset;
186
187static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
188{
189	int i = 0;
190	pid_t pid = task_pid_nr(current);
191	unsigned long *canary = (unsigned long *)stack;
192	unsigned long current_offset = 0, init_offset = 0;
193
194	/* Do our best to find the canary in a 16 word window ... */
195	for (i = 1; i < 16; i++) {
196		canary = (unsigned long *)stack + i;
197#ifdef CONFIG_STACKPROTECTOR
198		if (*canary == current->stack_canary)
199			current_offset = i;
200		if (*canary == init_task.stack_canary)
201			init_offset = i;
202#endif
203	}
204
205	if (current_offset == 0) {
206		/*
207		 * If the canary doesn't match what's in the task_struct,
208		 * we're either using a global canary or the stack frame
209		 * layout changed.
210		 */
211		if (init_offset != 0) {
212			pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
213			       init_offset, pid);
214		} else {
215			pr_warn("FAIL: did not correctly locate stack canary :(\n");
216			pr_expected_config(CONFIG_STACKPROTECTOR);
217		}
218
219		return;
220	} else if (init_offset != 0) {
221		pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
222	}
223
224	canary = (unsigned long *)stack + current_offset;
225	if (stack_canary_pid == 0) {
226		stack_canary = *canary;
227		stack_canary_pid = pid;
228		stack_canary_offset = current_offset;
229		pr_info("Recorded stack canary for pid %d at offset %ld\n",
230			stack_canary_pid, stack_canary_offset);
231	} else if (pid == stack_canary_pid) {
232		pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
233	} else {
234		if (current_offset != stack_canary_offset) {
235			pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
236				stack_canary_offset, current_offset);
237			return;
238		}
239
240		if (*canary == stack_canary) {
241			pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
242				stack_canary_pid, pid, current_offset);
243		} else {
244			pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
245				stack_canary_pid, pid, current_offset);
246			/* Reset the test. */
247			stack_canary_pid = 0;
248		}
249	}
250}
251
252static void lkdtm_REPORT_STACK_CANARY(void)
253{
254	/* Use default char array length that triggers stack protection. */
255	char data[8] __aligned(sizeof(void *)) = { };
256
257	__lkdtm_REPORT_STACK_CANARY((void *)&data);
258}
259
260static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
261{
262	static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
263	u32 *p;
264	u32 val = 0x12345678;
265
266	p = (u32 *)(data + 1);
267	if (*p == 0)
268		val = 0x87654321;
269	*p = val;
270
271	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
272		pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
273}
274
275static void lkdtm_SOFTLOCKUP(void)
276{
277	preempt_disable();
278	for (;;)
279		cpu_relax();
280}
281
282static void lkdtm_HARDLOCKUP(void)
283{
284	local_irq_disable();
285	for (;;)
286		cpu_relax();
287}
288
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289static void lkdtm_SPINLOCKUP(void)
290{
291	/* Must be called twice to trigger. */
292	spin_lock(&lock_me_up);
293	/* Let sparse know we intended to exit holding the lock. */
294	__release(&lock_me_up);
295}
296
297static void __noreturn lkdtm_HUNG_TASK(void)
298{
299	set_current_state(TASK_UNINTERRUPTIBLE);
300	schedule();
301	BUG();
302}
303
304static volatile unsigned int huge = INT_MAX - 2;
305static volatile unsigned int ignored;
306
307static void lkdtm_OVERFLOW_SIGNED(void)
308{
309	int value;
310
311	value = huge;
312	pr_info("Normal signed addition ...\n");
313	value += 1;
314	ignored = value;
315
316	pr_info("Overflowing signed addition ...\n");
317	value += 4;
318	ignored = value;
319}
320
321
322static void lkdtm_OVERFLOW_UNSIGNED(void)
323{
324	unsigned int value;
325
326	value = huge;
327	pr_info("Normal unsigned addition ...\n");
328	value += 1;
329	ignored = value;
330
331	pr_info("Overflowing unsigned addition ...\n");
332	value += 4;
333	ignored = value;
334}
335
336/* Intentionally using unannotated flex array definition. */
337struct array_bounds_flex_array {
338	int one;
339	int two;
340	char data[];
341};
342
343struct array_bounds {
344	int one;
345	int two;
346	char data[8];
347	int three;
348};
349
350static void lkdtm_ARRAY_BOUNDS(void)
351{
352	struct array_bounds_flex_array *not_checked;
353	struct array_bounds *checked;
354	volatile int i;
355
356	not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
357	checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
358	if (!not_checked || !checked) {
359		kfree(not_checked);
360		kfree(checked);
361		return;
362	}
363
364	pr_info("Array access within bounds ...\n");
365	/* For both, touch all bytes in the actual member size. */
366	for (i = 0; i < sizeof(checked->data); i++)
367		checked->data[i] = 'A';
368	/*
369	 * For the uninstrumented flex array member, also touch 1 byte
370	 * beyond to verify it is correctly uninstrumented.
371	 */
372	for (i = 0; i < 2; i++)
373		not_checked->data[i] = 'A';
374
375	pr_info("Array access beyond bounds ...\n");
376	for (i = 0; i < sizeof(checked->data) + 1; i++)
377		checked->data[i] = 'B';
378
379	kfree(not_checked);
380	kfree(checked);
381	pr_err("FAIL: survived array bounds overflow!\n");
382	if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
383		pr_expected_config(CONFIG_UBSAN_TRAP);
384	else
385		pr_expected_config(CONFIG_UBSAN_BOUNDS);
386}
387
388struct lkdtm_annotated {
389	unsigned long flags;
390	int count;
391	int array[] __counted_by(count);
392};
393
394static volatile int fam_count = 4;
395
396static void lkdtm_FAM_BOUNDS(void)
397{
398	struct lkdtm_annotated *inst;
399
400	inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL);
401	if (!inst) {
402		pr_err("FAIL: could not allocate test struct!\n");
403		return;
404	}
405
406	inst->count = fam_count;
407	pr_info("Array access within bounds ...\n");
408	inst->array[1] = fam_count;
409	ignored = inst->array[1];
410
411	pr_info("Array access beyond bounds ...\n");
412	inst->array[fam_count] = fam_count;
413	ignored = inst->array[fam_count];
414
415	kfree(inst);
416
417	pr_err("FAIL: survived access of invalid flexible array member index!\n");
418
419	if (!__has_attribute(__counted_by__))
420		pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n",
421			lkdtm_kernel_info);
422	else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
423		pr_expected_config(CONFIG_UBSAN_TRAP);
424	else
425		pr_expected_config(CONFIG_UBSAN_BOUNDS);
426}
427
428static void lkdtm_CORRUPT_LIST_ADD(void)
429{
430	/*
431	 * Initially, an empty list via LIST_HEAD:
432	 *	test_head.next = &test_head
433	 *	test_head.prev = &test_head
434	 */
435	LIST_HEAD(test_head);
436	struct lkdtm_list good, bad;
437	void *target[2] = { };
438	void *redirection = &target;
439
440	pr_info("attempting good list addition\n");
441
442	/*
443	 * Adding to the list performs these actions:
444	 *	test_head.next->prev = &good.node
445	 *	good.node.next = test_head.next
446	 *	good.node.prev = test_head
447	 *	test_head.next = good.node
448	 */
449	list_add(&good.node, &test_head);
450
451	pr_info("attempting corrupted list addition\n");
452	/*
453	 * In simulating this "write what where" primitive, the "what" is
454	 * the address of &bad.node, and the "where" is the address held
455	 * by "redirection".
456	 */
457	test_head.next = redirection;
458	list_add(&bad.node, &test_head);
459
460	if (target[0] == NULL && target[1] == NULL)
461		pr_err("Overwrite did not happen, but no BUG?!\n");
462	else {
463		pr_err("list_add() corruption not detected!\n");
464		pr_expected_config(CONFIG_LIST_HARDENED);
465	}
466}
467
468static void lkdtm_CORRUPT_LIST_DEL(void)
469{
470	LIST_HEAD(test_head);
471	struct lkdtm_list item;
472	void *target[2] = { };
473	void *redirection = &target;
474
475	list_add(&item.node, &test_head);
476
477	pr_info("attempting good list removal\n");
478	list_del(&item.node);
479
480	pr_info("attempting corrupted list removal\n");
481	list_add(&item.node, &test_head);
482
483	/* As with the list_add() test above, this corrupts "next". */
484	item.node.next = redirection;
485	list_del(&item.node);
486
487	if (target[0] == NULL && target[1] == NULL)
488		pr_err("Overwrite did not happen, but no BUG?!\n");
489	else {
490		pr_err("list_del() corruption not detected!\n");
491		pr_expected_config(CONFIG_LIST_HARDENED);
492	}
493}
494
495/* Test that VMAP_STACK is actually allocating with a leading guard page */
496static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
497{
498	const unsigned char *stack = task_stack_page(current);
499	const unsigned char *ptr = stack - 1;
500	volatile unsigned char byte;
501
502	pr_info("attempting bad read from page below current stack\n");
503
504	byte = *ptr;
505
506	pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
507}
508
509/* Test that VMAP_STACK is actually allocating with a trailing guard page */
510static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
511{
512	const unsigned char *stack = task_stack_page(current);
513	const unsigned char *ptr = stack + THREAD_SIZE;
514	volatile unsigned char byte;
515
516	pr_info("attempting bad read from page above current stack\n");
517
518	byte = *ptr;
519
520	pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
521}
522
523static void lkdtm_UNSET_SMEP(void)
524{
525#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
526#define MOV_CR4_DEPTH	64
527	void (*direct_write_cr4)(unsigned long val);
528	unsigned char *insn;
529	unsigned long cr4;
530	int i;
531
532	cr4 = native_read_cr4();
533
534	if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
535		pr_err("FAIL: SMEP not in use\n");
536		return;
537	}
538	cr4 &= ~(X86_CR4_SMEP);
539
540	pr_info("trying to clear SMEP normally\n");
541	native_write_cr4(cr4);
542	if (cr4 == native_read_cr4()) {
543		pr_err("FAIL: pinning SMEP failed!\n");
544		cr4 |= X86_CR4_SMEP;
545		pr_info("restoring SMEP\n");
546		native_write_cr4(cr4);
547		return;
548	}
549	pr_info("ok: SMEP did not get cleared\n");
550
551	/*
552	 * To test the post-write pinning verification we need to call
553	 * directly into the middle of native_write_cr4() where the
554	 * cr4 write happens, skipping any pinning. This searches for
555	 * the cr4 writing instruction.
556	 */
557	insn = (unsigned char *)native_write_cr4;
558	OPTIMIZER_HIDE_VAR(insn);
559	for (i = 0; i < MOV_CR4_DEPTH; i++) {
560		/* mov %rdi, %cr4 */
561		if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
562			break;
563		/* mov %rdi,%rax; mov %rax, %cr4 */
564		if (insn[i]   == 0x48 && insn[i+1] == 0x89 &&
565		    insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
566		    insn[i+4] == 0x22 && insn[i+5] == 0xe0)
567			break;
568	}
569	if (i >= MOV_CR4_DEPTH) {
570		pr_info("ok: cannot locate cr4 writing call gadget\n");
571		return;
572	}
573	direct_write_cr4 = (void *)(insn + i);
574
575	pr_info("trying to clear SMEP with call gadget\n");
576	direct_write_cr4(cr4);
577	if (native_read_cr4() & X86_CR4_SMEP) {
578		pr_info("ok: SMEP removal was reverted\n");
579	} else {
580		pr_err("FAIL: cleared SMEP not detected!\n");
581		cr4 |= X86_CR4_SMEP;
582		pr_info("restoring SMEP\n");
583		native_write_cr4(cr4);
584	}
585#else
586	pr_err("XFAIL: this test is x86_64-only\n");
587#endif
588}
589
590static void lkdtm_DOUBLE_FAULT(void)
591{
592#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
593	/*
594	 * Trigger #DF by setting the stack limit to zero.  This clobbers
595	 * a GDT TLS slot, which is okay because the current task will die
596	 * anyway due to the double fault.
597	 */
598	struct desc_struct d = {
599		.type = 3,	/* expand-up, writable, accessed data */
600		.p = 1,		/* present */
601		.d = 1,		/* 32-bit */
602		.g = 0,		/* limit in bytes */
603		.s = 1,		/* not system */
604	};
605
606	local_irq_disable();
607	write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
608			GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
609
610	/*
611	 * Put our zero-limit segment in SS and then trigger a fault.  The
612	 * 4-byte access to (%esp) will fault with #SS, and the attempt to
613	 * deliver the fault will recursively cause #SS and result in #DF.
614	 * This whole process happens while NMIs and MCEs are blocked by the
615	 * MOV SS window.  This is nice because an NMI with an invalid SS
616	 * would also double-fault, resulting in the NMI or MCE being lost.
617	 */
618	asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
619		      "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
620
621	pr_err("FAIL: tried to double fault but didn't die\n");
622#else
623	pr_err("XFAIL: this test is ia32-only\n");
624#endif
625}
626
627#ifdef CONFIG_ARM64
628static noinline void change_pac_parameters(void)
629{
630	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
631		/* Reset the keys of current task */
632		ptrauth_thread_init_kernel(current);
633		ptrauth_thread_switch_kernel(current);
634	}
635}
636#endif
637
638static noinline void lkdtm_CORRUPT_PAC(void)
639{
640#ifdef CONFIG_ARM64
641#define CORRUPT_PAC_ITERATE	10
642	int i;
643
644	if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
645		pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
646
647	if (!system_supports_address_auth()) {
648		pr_err("FAIL: CPU lacks pointer authentication feature\n");
649		return;
650	}
651
652	pr_info("changing PAC parameters to force function return failure...\n");
653	/*
654	 * PAC is a hash value computed from input keys, return address and
655	 * stack pointer. As pac has fewer bits so there is a chance of
656	 * collision, so iterate few times to reduce the collision probability.
657	 */
658	for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
659		change_pac_parameters();
660
661	pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
662#else
663	pr_err("XFAIL: this test is arm64-only\n");
664#endif
665}
666
667static struct crashtype crashtypes[] = {
668	CRASHTYPE(PANIC),
669	CRASHTYPE(PANIC_STOP_IRQOFF),
670	CRASHTYPE(BUG),
671	CRASHTYPE(WARNING),
672	CRASHTYPE(WARNING_MESSAGE),
673	CRASHTYPE(EXCEPTION),
674	CRASHTYPE(LOOP),
675	CRASHTYPE(EXHAUST_STACK),
676	CRASHTYPE(CORRUPT_STACK),
677	CRASHTYPE(CORRUPT_STACK_STRONG),
678	CRASHTYPE(REPORT_STACK),
679	CRASHTYPE(REPORT_STACK_CANARY),
680	CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
681	CRASHTYPE(SOFTLOCKUP),
682	CRASHTYPE(HARDLOCKUP),
 
683	CRASHTYPE(SPINLOCKUP),
684	CRASHTYPE(HUNG_TASK),
685	CRASHTYPE(OVERFLOW_SIGNED),
686	CRASHTYPE(OVERFLOW_UNSIGNED),
687	CRASHTYPE(ARRAY_BOUNDS),
688	CRASHTYPE(FAM_BOUNDS),
689	CRASHTYPE(CORRUPT_LIST_ADD),
690	CRASHTYPE(CORRUPT_LIST_DEL),
691	CRASHTYPE(STACK_GUARD_PAGE_LEADING),
692	CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
693	CRASHTYPE(UNSET_SMEP),
694	CRASHTYPE(DOUBLE_FAULT),
695	CRASHTYPE(CORRUPT_PAC),
696};
697
698struct crashtype_category bugs_crashtypes = {
699	.crashtypes = crashtypes,
700	.len	    = ARRAY_SIZE(crashtypes),
701};