Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
  4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
  5 * lockups) along with other things that don't fit well into existing LKDTM
  6 * test source files.
  7 */
  8#include "lkdtm.h"
  9#include <linux/list.h>
 10#include <linux/sched.h>
 11#include <linux/sched/signal.h>
 12#include <linux/sched/task_stack.h>
 13#include <linux/uaccess.h>
 14#include <linux/slab.h>
 15
 16#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
 17#include <asm/desc.h>
 18#endif
 19
 20struct lkdtm_list {
 21	struct list_head node;
 22};
 23
 24/*
 25 * Make sure our attempts to over run the kernel stack doesn't trigger
 26 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
 27 * recurse past the end of THREAD_SIZE by default.
 28 */
 29#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
 30#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
 31#else
 32#define REC_STACK_SIZE (THREAD_SIZE / 8)
 33#endif
 34#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
 35
 36static int recur_count = REC_NUM_DEFAULT;
 37
 38static DEFINE_SPINLOCK(lock_me_up);
 39
 40/*
 41 * Make sure compiler does not optimize this function or stack frame away:
 42 * - function marked noinline
 43 * - stack variables are marked volatile
 44 * - stack variables are written (memset()) and read (pr_info())
 45 * - function has external effects (pr_info())
 46 * */
 47static int noinline recursive_loop(int remaining)
 48{
 49	volatile char buf[REC_STACK_SIZE];
 50
 51	memset((void *)buf, remaining & 0xFF, sizeof(buf));
 52	pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
 53		recur_count);
 54	if (!remaining)
 55		return 0;
 56	else
 57		return recursive_loop(remaining - 1);
 58}
 59
 60/* If the depth is negative, use the default, otherwise keep parameter. */
 61void __init lkdtm_bugs_init(int *recur_param)
 62{
 63	if (*recur_param < 0)
 64		*recur_param = recur_count;
 65	else
 66		recur_count = *recur_param;
 67}
 68
 69void lkdtm_PANIC(void)
 70{
 71	panic("dumptest");
 72}
 73
 74void lkdtm_BUG(void)
 75{
 76	BUG();
 77}
 78
 79static int warn_counter;
 80
 81void lkdtm_WARNING(void)
 82{
 83	WARN_ON(++warn_counter);
 84}
 85
 86void lkdtm_WARNING_MESSAGE(void)
 87{
 88	WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
 89}
 90
 91void lkdtm_EXCEPTION(void)
 92{
 93	*((volatile int *) 0) = 0;
 94}
 95
 96void lkdtm_LOOP(void)
 97{
 98	for (;;)
 99		;
100}
101
102void lkdtm_EXHAUST_STACK(void)
103{
104	pr_info("Calling function with %lu frame size to depth %d ...\n",
105		REC_STACK_SIZE, recur_count);
106	recursive_loop(recur_count);
107	pr_info("FAIL: survived without exhausting stack?!\n");
108}
109
110static noinline void __lkdtm_CORRUPT_STACK(void *stack)
111{
112	memset(stack, '\xff', 64);
113}
114
115/* This should trip the stack canary, not corrupt the return address. */
116noinline void lkdtm_CORRUPT_STACK(void)
117{
118	/* Use default char array length that triggers stack protection. */
119	char data[8] __aligned(sizeof(void *));
120
121	pr_info("Corrupting stack containing char array ...\n");
122	__lkdtm_CORRUPT_STACK((void *)&data);
123}
124
125/* Same as above but will only get a canary with -fstack-protector-strong */
126noinline void lkdtm_CORRUPT_STACK_STRONG(void)
127{
128	union {
129		unsigned short shorts[4];
130		unsigned long *ptr;
131	} data __aligned(sizeof(void *));
132
133	pr_info("Corrupting stack containing union ...\n");
134	__lkdtm_CORRUPT_STACK((void *)&data);
135}
136
137void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
138{
139	static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
140	u32 *p;
141	u32 val = 0x12345678;
142
143	p = (u32 *)(data + 1);
144	if (*p == 0)
145		val = 0x87654321;
146	*p = val;
147}
148
149void lkdtm_SOFTLOCKUP(void)
150{
151	preempt_disable();
152	for (;;)
153		cpu_relax();
154}
155
156void lkdtm_HARDLOCKUP(void)
157{
158	local_irq_disable();
159	for (;;)
160		cpu_relax();
161}
162
163void lkdtm_SPINLOCKUP(void)
164{
165	/* Must be called twice to trigger. */
166	spin_lock(&lock_me_up);
167	/* Let sparse know we intended to exit holding the lock. */
168	__release(&lock_me_up);
169}
170
171void lkdtm_HUNG_TASK(void)
172{
173	set_current_state(TASK_UNINTERRUPTIBLE);
174	schedule();
175}
176
177volatile unsigned int huge = INT_MAX - 2;
178volatile unsigned int ignored;
179
180void lkdtm_OVERFLOW_SIGNED(void)
181{
182	int value;
183
184	value = huge;
185	pr_info("Normal signed addition ...\n");
186	value += 1;
187	ignored = value;
188
189	pr_info("Overflowing signed addition ...\n");
190	value += 4;
191	ignored = value;
192}
193
194
195void lkdtm_OVERFLOW_UNSIGNED(void)
196{
197	unsigned int value;
198
199	value = huge;
200	pr_info("Normal unsigned addition ...\n");
201	value += 1;
202	ignored = value;
203
204	pr_info("Overflowing unsigned addition ...\n");
205	value += 4;
206	ignored = value;
207}
208
209/* Intentionally using old-style flex array definition of 1 byte. */
210struct array_bounds_flex_array {
211	int one;
212	int two;
213	char data[1];
214};
215
216struct array_bounds {
217	int one;
218	int two;
219	char data[8];
220	int three;
221};
222
223void lkdtm_ARRAY_BOUNDS(void)
224{
225	struct array_bounds_flex_array *not_checked;
226	struct array_bounds *checked;
227	volatile int i;
228
229	not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
230	checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
231
232	pr_info("Array access within bounds ...\n");
233	/* For both, touch all bytes in the actual member size. */
234	for (i = 0; i < sizeof(checked->data); i++)
235		checked->data[i] = 'A';
236	/*
237	 * For the uninstrumented flex array member, also touch 1 byte
238	 * beyond to verify it is correctly uninstrumented.
239	 */
240	for (i = 0; i < sizeof(not_checked->data) + 1; i++)
241		not_checked->data[i] = 'A';
242
243	pr_info("Array access beyond bounds ...\n");
244	for (i = 0; i < sizeof(checked->data) + 1; i++)
245		checked->data[i] = 'B';
246
247	kfree(not_checked);
248	kfree(checked);
249	pr_err("FAIL: survived array bounds overflow!\n");
250}
251
252void lkdtm_CORRUPT_LIST_ADD(void)
253{
254	/*
255	 * Initially, an empty list via LIST_HEAD:
256	 *	test_head.next = &test_head
257	 *	test_head.prev = &test_head
258	 */
259	LIST_HEAD(test_head);
260	struct lkdtm_list good, bad;
261	void *target[2] = { };
262	void *redirection = &target;
263
264	pr_info("attempting good list addition\n");
265
266	/*
267	 * Adding to the list performs these actions:
268	 *	test_head.next->prev = &good.node
269	 *	good.node.next = test_head.next
270	 *	good.node.prev = test_head
271	 *	test_head.next = good.node
272	 */
273	list_add(&good.node, &test_head);
274
275	pr_info("attempting corrupted list addition\n");
276	/*
277	 * In simulating this "write what where" primitive, the "what" is
278	 * the address of &bad.node, and the "where" is the address held
279	 * by "redirection".
280	 */
281	test_head.next = redirection;
282	list_add(&bad.node, &test_head);
283
284	if (target[0] == NULL && target[1] == NULL)
285		pr_err("Overwrite did not happen, but no BUG?!\n");
286	else
287		pr_err("list_add() corruption not detected!\n");
288}
289
290void lkdtm_CORRUPT_LIST_DEL(void)
291{
292	LIST_HEAD(test_head);
293	struct lkdtm_list item;
294	void *target[2] = { };
295	void *redirection = &target;
296
297	list_add(&item.node, &test_head);
298
299	pr_info("attempting good list removal\n");
300	list_del(&item.node);
301
302	pr_info("attempting corrupted list removal\n");
303	list_add(&item.node, &test_head);
304
305	/* As with the list_add() test above, this corrupts "next". */
306	item.node.next = redirection;
307	list_del(&item.node);
308
309	if (target[0] == NULL && target[1] == NULL)
310		pr_err("Overwrite did not happen, but no BUG?!\n");
311	else
312		pr_err("list_del() corruption not detected!\n");
313}
314
315/* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
316void lkdtm_CORRUPT_USER_DS(void)
317{
318	pr_info("setting bad task size limit\n");
319	set_fs(KERNEL_DS);
320
321	/* Make sure we do not keep running with a KERNEL_DS! */
322	force_sig(SIGKILL);
323}
324
325/* Test that VMAP_STACK is actually allocating with a leading guard page */
326void lkdtm_STACK_GUARD_PAGE_LEADING(void)
327{
328	const unsigned char *stack = task_stack_page(current);
329	const unsigned char *ptr = stack - 1;
330	volatile unsigned char byte;
331
332	pr_info("attempting bad read from page below current stack\n");
333
334	byte = *ptr;
335
336	pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
337}
338
339/* Test that VMAP_STACK is actually allocating with a trailing guard page */
340void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
341{
342	const unsigned char *stack = task_stack_page(current);
343	const unsigned char *ptr = stack + THREAD_SIZE;
344	volatile unsigned char byte;
345
346	pr_info("attempting bad read from page above current stack\n");
347
348	byte = *ptr;
349
350	pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
351}
352
353void lkdtm_UNSET_SMEP(void)
354{
355#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
356#define MOV_CR4_DEPTH	64
357	void (*direct_write_cr4)(unsigned long val);
358	unsigned char *insn;
359	unsigned long cr4;
360	int i;
361
362	cr4 = native_read_cr4();
363
364	if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
365		pr_err("FAIL: SMEP not in use\n");
366		return;
367	}
368	cr4 &= ~(X86_CR4_SMEP);
369
370	pr_info("trying to clear SMEP normally\n");
371	native_write_cr4(cr4);
372	if (cr4 == native_read_cr4()) {
373		pr_err("FAIL: pinning SMEP failed!\n");
374		cr4 |= X86_CR4_SMEP;
375		pr_info("restoring SMEP\n");
376		native_write_cr4(cr4);
377		return;
378	}
379	pr_info("ok: SMEP did not get cleared\n");
380
381	/*
382	 * To test the post-write pinning verification we need to call
383	 * directly into the middle of native_write_cr4() where the
384	 * cr4 write happens, skipping any pinning. This searches for
385	 * the cr4 writing instruction.
386	 */
387	insn = (unsigned char *)native_write_cr4;
388	for (i = 0; i < MOV_CR4_DEPTH; i++) {
389		/* mov %rdi, %cr4 */
390		if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
391			break;
392		/* mov %rdi,%rax; mov %rax, %cr4 */
393		if (insn[i]   == 0x48 && insn[i+1] == 0x89 &&
394		    insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
395		    insn[i+4] == 0x22 && insn[i+5] == 0xe0)
396			break;
397	}
398	if (i >= MOV_CR4_DEPTH) {
399		pr_info("ok: cannot locate cr4 writing call gadget\n");
400		return;
401	}
402	direct_write_cr4 = (void *)(insn + i);
403
404	pr_info("trying to clear SMEP with call gadget\n");
405	direct_write_cr4(cr4);
406	if (native_read_cr4() & X86_CR4_SMEP) {
407		pr_info("ok: SMEP removal was reverted\n");
408	} else {
409		pr_err("FAIL: cleared SMEP not detected!\n");
410		cr4 |= X86_CR4_SMEP;
411		pr_info("restoring SMEP\n");
412		native_write_cr4(cr4);
413	}
414#else
415	pr_err("XFAIL: this test is x86_64-only\n");
416#endif
417}
418
419void lkdtm_DOUBLE_FAULT(void)
420{
421#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
422	/*
423	 * Trigger #DF by setting the stack limit to zero.  This clobbers
424	 * a GDT TLS slot, which is okay because the current task will die
425	 * anyway due to the double fault.
426	 */
427	struct desc_struct d = {
428		.type = 3,	/* expand-up, writable, accessed data */
429		.p = 1,		/* present */
430		.d = 1,		/* 32-bit */
431		.g = 0,		/* limit in bytes */
432		.s = 1,		/* not system */
433	};
434
435	local_irq_disable();
436	write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
437			GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
438
439	/*
440	 * Put our zero-limit segment in SS and then trigger a fault.  The
441	 * 4-byte access to (%esp) will fault with #SS, and the attempt to
442	 * deliver the fault will recursively cause #SS and result in #DF.
443	 * This whole process happens while NMIs and MCEs are blocked by the
444	 * MOV SS window.  This is nice because an NMI with an invalid SS
445	 * would also double-fault, resulting in the NMI or MCE being lost.
446	 */
447	asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
448		      "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
449
450	pr_err("FAIL: tried to double fault but didn't die\n");
451#else
452	pr_err("XFAIL: this test is ia32-only\n");
453#endif
454}
455
456#ifdef CONFIG_ARM64
457static noinline void change_pac_parameters(void)
458{
459	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
460		/* Reset the keys of current task */
461		ptrauth_thread_init_kernel(current);
462		ptrauth_thread_switch_kernel(current);
463	}
464}
465#endif
466
467noinline void lkdtm_CORRUPT_PAC(void)
468{
469#ifdef CONFIG_ARM64
470#define CORRUPT_PAC_ITERATE	10
471	int i;
472
473	if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
474		pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
475
476	if (!system_supports_address_auth()) {
477		pr_err("FAIL: CPU lacks pointer authentication feature\n");
478		return;
479	}
480
481	pr_info("changing PAC parameters to force function return failure...\n");
482	/*
483	 * PAC is a hash value computed from input keys, return address and
484	 * stack pointer. As pac has fewer bits so there is a chance of
485	 * collision, so iterate few times to reduce the collision probability.
486	 */
487	for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
488		change_pac_parameters();
489
490	pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
491#else
492	pr_err("XFAIL: this test is arm64-only\n");
493#endif
494}