Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
5 * lockups) along with other things that don't fit well into existing LKDTM
6 * test source files.
7 */
8#include "lkdtm.h"
9#include <linux/list.h>
10#include <linux/sched.h>
11#include <linux/sched/signal.h>
12#include <linux/sched/task_stack.h>
13#include <linux/uaccess.h>
14#include <linux/slab.h>
15
16#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
17#include <asm/desc.h>
18#endif
19
20struct lkdtm_list {
21 struct list_head node;
22};
23
24/*
25 * Make sure our attempts to over run the kernel stack doesn't trigger
26 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
27 * recurse past the end of THREAD_SIZE by default.
28 */
29#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
30#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
31#else
32#define REC_STACK_SIZE (THREAD_SIZE / 8)
33#endif
34#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
35
36static int recur_count = REC_NUM_DEFAULT;
37
38static DEFINE_SPINLOCK(lock_me_up);
39
40/*
41 * Make sure compiler does not optimize this function or stack frame away:
42 * - function marked noinline
43 * - stack variables are marked volatile
44 * - stack variables are written (memset()) and read (pr_info())
45 * - function has external effects (pr_info())
46 * */
47static int noinline recursive_loop(int remaining)
48{
49 volatile char buf[REC_STACK_SIZE];
50
51 memset((void *)buf, remaining & 0xFF, sizeof(buf));
52 pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
53 recur_count);
54 if (!remaining)
55 return 0;
56 else
57 return recursive_loop(remaining - 1);
58}
59
60/* If the depth is negative, use the default, otherwise keep parameter. */
61void __init lkdtm_bugs_init(int *recur_param)
62{
63 if (*recur_param < 0)
64 *recur_param = recur_count;
65 else
66 recur_count = *recur_param;
67}
68
69void lkdtm_PANIC(void)
70{
71 panic("dumptest");
72}
73
74void lkdtm_BUG(void)
75{
76 BUG();
77}
78
79static int warn_counter;
80
81void lkdtm_WARNING(void)
82{
83 WARN_ON(++warn_counter);
84}
85
86void lkdtm_WARNING_MESSAGE(void)
87{
88 WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
89}
90
91void lkdtm_EXCEPTION(void)
92{
93 *((volatile int *) 0) = 0;
94}
95
96void lkdtm_LOOP(void)
97{
98 for (;;)
99 ;
100}
101
102void lkdtm_EXHAUST_STACK(void)
103{
104 pr_info("Calling function with %lu frame size to depth %d ...\n",
105 REC_STACK_SIZE, recur_count);
106 recursive_loop(recur_count);
107 pr_info("FAIL: survived without exhausting stack?!\n");
108}
109
110static noinline void __lkdtm_CORRUPT_STACK(void *stack)
111{
112 memset(stack, '\xff', 64);
113}
114
115/* This should trip the stack canary, not corrupt the return address. */
116noinline void lkdtm_CORRUPT_STACK(void)
117{
118 /* Use default char array length that triggers stack protection. */
119 char data[8] __aligned(sizeof(void *));
120
121 pr_info("Corrupting stack containing char array ...\n");
122 __lkdtm_CORRUPT_STACK((void *)&data);
123}
124
125/* Same as above but will only get a canary with -fstack-protector-strong */
126noinline void lkdtm_CORRUPT_STACK_STRONG(void)
127{
128 union {
129 unsigned short shorts[4];
130 unsigned long *ptr;
131 } data __aligned(sizeof(void *));
132
133 pr_info("Corrupting stack containing union ...\n");
134 __lkdtm_CORRUPT_STACK((void *)&data);
135}
136
137static pid_t stack_pid;
138static unsigned long stack_addr;
139
140void lkdtm_REPORT_STACK(void)
141{
142 volatile uintptr_t magic;
143 pid_t pid = task_pid_nr(current);
144
145 if (pid != stack_pid) {
146 pr_info("Starting stack offset tracking for pid %d\n", pid);
147 stack_pid = pid;
148 stack_addr = (uintptr_t)&magic;
149 }
150
151 pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
152}
153
154void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
155{
156 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
157 u32 *p;
158 u32 val = 0x12345678;
159
160 p = (u32 *)(data + 1);
161 if (*p == 0)
162 val = 0x87654321;
163 *p = val;
164
165 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
166 pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
167}
168
169void lkdtm_SOFTLOCKUP(void)
170{
171 preempt_disable();
172 for (;;)
173 cpu_relax();
174}
175
176void lkdtm_HARDLOCKUP(void)
177{
178 local_irq_disable();
179 for (;;)
180 cpu_relax();
181}
182
183void lkdtm_SPINLOCKUP(void)
184{
185 /* Must be called twice to trigger. */
186 spin_lock(&lock_me_up);
187 /* Let sparse know we intended to exit holding the lock. */
188 __release(&lock_me_up);
189}
190
191void lkdtm_HUNG_TASK(void)
192{
193 set_current_state(TASK_UNINTERRUPTIBLE);
194 schedule();
195}
196
197volatile unsigned int huge = INT_MAX - 2;
198volatile unsigned int ignored;
199
200void lkdtm_OVERFLOW_SIGNED(void)
201{
202 int value;
203
204 value = huge;
205 pr_info("Normal signed addition ...\n");
206 value += 1;
207 ignored = value;
208
209 pr_info("Overflowing signed addition ...\n");
210 value += 4;
211 ignored = value;
212}
213
214
215void lkdtm_OVERFLOW_UNSIGNED(void)
216{
217 unsigned int value;
218
219 value = huge;
220 pr_info("Normal unsigned addition ...\n");
221 value += 1;
222 ignored = value;
223
224 pr_info("Overflowing unsigned addition ...\n");
225 value += 4;
226 ignored = value;
227}
228
229/* Intentionally using old-style flex array definition of 1 byte. */
230struct array_bounds_flex_array {
231 int one;
232 int two;
233 char data[1];
234};
235
236struct array_bounds {
237 int one;
238 int two;
239 char data[8];
240 int three;
241};
242
243void lkdtm_ARRAY_BOUNDS(void)
244{
245 struct array_bounds_flex_array *not_checked;
246 struct array_bounds *checked;
247 volatile int i;
248
249 not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
250 checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
251
252 pr_info("Array access within bounds ...\n");
253 /* For both, touch all bytes in the actual member size. */
254 for (i = 0; i < sizeof(checked->data); i++)
255 checked->data[i] = 'A';
256 /*
257 * For the uninstrumented flex array member, also touch 1 byte
258 * beyond to verify it is correctly uninstrumented.
259 */
260 for (i = 0; i < sizeof(not_checked->data) + 1; i++)
261 not_checked->data[i] = 'A';
262
263 pr_info("Array access beyond bounds ...\n");
264 for (i = 0; i < sizeof(checked->data) + 1; i++)
265 checked->data[i] = 'B';
266
267 kfree(not_checked);
268 kfree(checked);
269 pr_err("FAIL: survived array bounds overflow!\n");
270}
271
272void lkdtm_CORRUPT_LIST_ADD(void)
273{
274 /*
275 * Initially, an empty list via LIST_HEAD:
276 * test_head.next = &test_head
277 * test_head.prev = &test_head
278 */
279 LIST_HEAD(test_head);
280 struct lkdtm_list good, bad;
281 void *target[2] = { };
282 void *redirection = ⌖
283
284 pr_info("attempting good list addition\n");
285
286 /*
287 * Adding to the list performs these actions:
288 * test_head.next->prev = &good.node
289 * good.node.next = test_head.next
290 * good.node.prev = test_head
291 * test_head.next = good.node
292 */
293 list_add(&good.node, &test_head);
294
295 pr_info("attempting corrupted list addition\n");
296 /*
297 * In simulating this "write what where" primitive, the "what" is
298 * the address of &bad.node, and the "where" is the address held
299 * by "redirection".
300 */
301 test_head.next = redirection;
302 list_add(&bad.node, &test_head);
303
304 if (target[0] == NULL && target[1] == NULL)
305 pr_err("Overwrite did not happen, but no BUG?!\n");
306 else {
307 pr_err("list_add() corruption not detected!\n");
308 pr_expected_config(CONFIG_DEBUG_LIST);
309 }
310}
311
312void lkdtm_CORRUPT_LIST_DEL(void)
313{
314 LIST_HEAD(test_head);
315 struct lkdtm_list item;
316 void *target[2] = { };
317 void *redirection = ⌖
318
319 list_add(&item.node, &test_head);
320
321 pr_info("attempting good list removal\n");
322 list_del(&item.node);
323
324 pr_info("attempting corrupted list removal\n");
325 list_add(&item.node, &test_head);
326
327 /* As with the list_add() test above, this corrupts "next". */
328 item.node.next = redirection;
329 list_del(&item.node);
330
331 if (target[0] == NULL && target[1] == NULL)
332 pr_err("Overwrite did not happen, but no BUG?!\n");
333 else {
334 pr_err("list_del() corruption not detected!\n");
335 pr_expected_config(CONFIG_DEBUG_LIST);
336 }
337}
338
339/* Test that VMAP_STACK is actually allocating with a leading guard page */
340void lkdtm_STACK_GUARD_PAGE_LEADING(void)
341{
342 const unsigned char *stack = task_stack_page(current);
343 const unsigned char *ptr = stack - 1;
344 volatile unsigned char byte;
345
346 pr_info("attempting bad read from page below current stack\n");
347
348 byte = *ptr;
349
350 pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
351}
352
353/* Test that VMAP_STACK is actually allocating with a trailing guard page */
354void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
355{
356 const unsigned char *stack = task_stack_page(current);
357 const unsigned char *ptr = stack + THREAD_SIZE;
358 volatile unsigned char byte;
359
360 pr_info("attempting bad read from page above current stack\n");
361
362 byte = *ptr;
363
364 pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
365}
366
367void lkdtm_UNSET_SMEP(void)
368{
369#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
370#define MOV_CR4_DEPTH 64
371 void (*direct_write_cr4)(unsigned long val);
372 unsigned char *insn;
373 unsigned long cr4;
374 int i;
375
376 cr4 = native_read_cr4();
377
378 if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
379 pr_err("FAIL: SMEP not in use\n");
380 return;
381 }
382 cr4 &= ~(X86_CR4_SMEP);
383
384 pr_info("trying to clear SMEP normally\n");
385 native_write_cr4(cr4);
386 if (cr4 == native_read_cr4()) {
387 pr_err("FAIL: pinning SMEP failed!\n");
388 cr4 |= X86_CR4_SMEP;
389 pr_info("restoring SMEP\n");
390 native_write_cr4(cr4);
391 return;
392 }
393 pr_info("ok: SMEP did not get cleared\n");
394
395 /*
396 * To test the post-write pinning verification we need to call
397 * directly into the middle of native_write_cr4() where the
398 * cr4 write happens, skipping any pinning. This searches for
399 * the cr4 writing instruction.
400 */
401 insn = (unsigned char *)native_write_cr4;
402 for (i = 0; i < MOV_CR4_DEPTH; i++) {
403 /* mov %rdi, %cr4 */
404 if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
405 break;
406 /* mov %rdi,%rax; mov %rax, %cr4 */
407 if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
408 insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
409 insn[i+4] == 0x22 && insn[i+5] == 0xe0)
410 break;
411 }
412 if (i >= MOV_CR4_DEPTH) {
413 pr_info("ok: cannot locate cr4 writing call gadget\n");
414 return;
415 }
416 direct_write_cr4 = (void *)(insn + i);
417
418 pr_info("trying to clear SMEP with call gadget\n");
419 direct_write_cr4(cr4);
420 if (native_read_cr4() & X86_CR4_SMEP) {
421 pr_info("ok: SMEP removal was reverted\n");
422 } else {
423 pr_err("FAIL: cleared SMEP not detected!\n");
424 cr4 |= X86_CR4_SMEP;
425 pr_info("restoring SMEP\n");
426 native_write_cr4(cr4);
427 }
428#else
429 pr_err("XFAIL: this test is x86_64-only\n");
430#endif
431}
432
433void lkdtm_DOUBLE_FAULT(void)
434{
435#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
436 /*
437 * Trigger #DF by setting the stack limit to zero. This clobbers
438 * a GDT TLS slot, which is okay because the current task will die
439 * anyway due to the double fault.
440 */
441 struct desc_struct d = {
442 .type = 3, /* expand-up, writable, accessed data */
443 .p = 1, /* present */
444 .d = 1, /* 32-bit */
445 .g = 0, /* limit in bytes */
446 .s = 1, /* not system */
447 };
448
449 local_irq_disable();
450 write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
451 GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
452
453 /*
454 * Put our zero-limit segment in SS and then trigger a fault. The
455 * 4-byte access to (%esp) will fault with #SS, and the attempt to
456 * deliver the fault will recursively cause #SS and result in #DF.
457 * This whole process happens while NMIs and MCEs are blocked by the
458 * MOV SS window. This is nice because an NMI with an invalid SS
459 * would also double-fault, resulting in the NMI or MCE being lost.
460 */
461 asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
462 "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
463
464 pr_err("FAIL: tried to double fault but didn't die\n");
465#else
466 pr_err("XFAIL: this test is ia32-only\n");
467#endif
468}
469
470#ifdef CONFIG_ARM64
471static noinline void change_pac_parameters(void)
472{
473 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
474 /* Reset the keys of current task */
475 ptrauth_thread_init_kernel(current);
476 ptrauth_thread_switch_kernel(current);
477 }
478}
479#endif
480
481noinline void lkdtm_CORRUPT_PAC(void)
482{
483#ifdef CONFIG_ARM64
484#define CORRUPT_PAC_ITERATE 10
485 int i;
486
487 if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
488 pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
489
490 if (!system_supports_address_auth()) {
491 pr_err("FAIL: CPU lacks pointer authentication feature\n");
492 return;
493 }
494
495 pr_info("changing PAC parameters to force function return failure...\n");
496 /*
497 * PAC is a hash value computed from input keys, return address and
498 * stack pointer. As pac has fewer bits so there is a chance of
499 * collision, so iterate few times to reduce the collision probability.
500 */
501 for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
502 change_pac_parameters();
503
504 pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
505#else
506 pr_err("XFAIL: this test is arm64-only\n");
507#endif
508}
509
510void lkdtm_FORTIFY_OBJECT(void)
511{
512 struct target {
513 char a[10];
514 } target[2] = {};
515 int result;
516
517 /*
518 * Using volatile prevents the compiler from determining the value of
519 * 'size' at compile time. Without that, we would get a compile error
520 * rather than a runtime error.
521 */
522 volatile int size = 11;
523
524 pr_info("trying to read past the end of a struct\n");
525
526 result = memcmp(&target[0], &target[1], size);
527
528 /* Print result to prevent the code from being eliminated */
529 pr_err("FAIL: fortify did not catch an object overread!\n"
530 "\"%d\" was the memcmp result.\n", result);
531}
532
533void lkdtm_FORTIFY_SUBOBJECT(void)
534{
535 struct target {
536 char a[10];
537 char b[10];
538 } target;
539 char *src;
540
541 src = kmalloc(20, GFP_KERNEL);
542 strscpy(src, "over ten bytes", 20);
543
544 pr_info("trying to strcpy past the end of a member of a struct\n");
545
546 /*
547 * strncpy(target.a, src, 20); will hit a compile error because the
548 * compiler knows at build time that target.a < 20 bytes. Use strcpy()
549 * to force a runtime error.
550 */
551 strcpy(target.a, src);
552
553 /* Use target.a to prevent the code from being eliminated */
554 pr_err("FAIL: fortify did not catch an sub-object overrun!\n"
555 "\"%s\" was copied.\n", target.a);
556
557 kfree(src);
558}