Loading...
1/*
2 * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <stdlib.h>
7#include <unistd.h>
8#include <sched.h>
9#include <errno.h>
10#include <string.h>
11#include <sys/mman.h>
12#include <sys/wait.h>
13#include <asm/unistd.h>
14#include "as-layout.h"
15#include "init.h"
16#include "kern_util.h"
17#include "mem.h"
18#include "os.h"
19#include "proc_mm.h"
20#include "ptrace_user.h"
21#include "registers.h"
22#include "skas.h"
23#include "skas_ptrace.h"
24#include "sysdep/stub.h"
25
26int is_skas_winch(int pid, int fd, void *data)
27{
28 return pid == getpgrp();
29}
30
31static int ptrace_dump_regs(int pid)
32{
33 unsigned long regs[MAX_REG_NR];
34 int i;
35
36 if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
37 return -errno;
38
39 printk(UM_KERN_ERR "Stub registers -\n");
40 for (i = 0; i < ARRAY_SIZE(regs); i++)
41 printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
42
43 return 0;
44}
45
46/*
47 * Signals that are OK to receive in the stub - we'll just continue it.
48 * SIGWINCH will happen when UML is inside a detached screen.
49 */
50#define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
51
52/* Signals that the stub will finish with - anything else is an error */
53#define STUB_DONE_MASK (1 << SIGTRAP)
54
55void wait_stub_done(int pid)
56{
57 int n, status, err;
58
59 while (1) {
60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
61 if ((n < 0) || !WIFSTOPPED(status))
62 goto bad_wait;
63
64 if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
65 break;
66
67 err = ptrace(PTRACE_CONT, pid, 0, 0);
68 if (err) {
69 printk(UM_KERN_ERR "wait_stub_done : continue failed, "
70 "errno = %d\n", errno);
71 fatal_sigsegv();
72 }
73 }
74
75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
76 return;
77
78bad_wait:
79 err = ptrace_dump_regs(pid);
80 if (err)
81 printk(UM_KERN_ERR "Failed to get registers from stub, "
82 "errno = %d\n", -err);
83 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
84 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
85 status);
86 fatal_sigsegv();
87}
88
89extern unsigned long current_stub_stack(void);
90
91static void get_skas_faultinfo(int pid, struct faultinfo *fi)
92{
93 int err;
94
95 if (ptrace_faultinfo) {
96 err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
97 if (err) {
98 printk(UM_KERN_ERR "get_skas_faultinfo - "
99 "PTRACE_FAULTINFO failed, errno = %d\n", errno);
100 fatal_sigsegv();
101 }
102
103 /* Special handling for i386, which has different structs */
104 if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
105 memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
106 sizeof(struct faultinfo) -
107 sizeof(struct ptrace_faultinfo));
108 }
109 else {
110 unsigned long fpregs[FP_SIZE];
111
112 err = get_fp_registers(pid, fpregs);
113 if (err < 0) {
114 printk(UM_KERN_ERR "save_fp_registers returned %d\n",
115 err);
116 fatal_sigsegv();
117 }
118 err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
119 if (err) {
120 printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
121 "errno = %d\n", pid, errno);
122 fatal_sigsegv();
123 }
124 wait_stub_done(pid);
125
126 /*
127 * faultinfo is prepared by the stub-segv-handler at start of
128 * the stub stack page. We just have to copy it.
129 */
130 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
131
132 err = put_fp_registers(pid, fpregs);
133 if (err < 0) {
134 printk(UM_KERN_ERR "put_fp_registers returned %d\n",
135 err);
136 fatal_sigsegv();
137 }
138 }
139}
140
141static void handle_segv(int pid, struct uml_pt_regs * regs)
142{
143 get_skas_faultinfo(pid, ®s->faultinfo);
144 segv(regs->faultinfo, 0, 1, NULL);
145}
146
147/*
148 * To use the same value of using_sysemu as the caller, ask it that value
149 * (in local_using_sysemu
150 */
151static void handle_trap(int pid, struct uml_pt_regs *regs,
152 int local_using_sysemu)
153{
154 int err, status;
155
156 if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
157 fatal_sigsegv();
158
159 /* Mark this as a syscall */
160 UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
161
162 if (!local_using_sysemu)
163 {
164 err = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET,
165 __NR_getpid);
166 if (err < 0) {
167 printk(UM_KERN_ERR "handle_trap - nullifying syscall "
168 "failed, errno = %d\n", errno);
169 fatal_sigsegv();
170 }
171
172 err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
173 if (err < 0) {
174 printk(UM_KERN_ERR "handle_trap - continuing to end of "
175 "syscall failed, errno = %d\n", errno);
176 fatal_sigsegv();
177 }
178
179 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
180 if ((err < 0) || !WIFSTOPPED(status) ||
181 (WSTOPSIG(status) != SIGTRAP + 0x80)) {
182 err = ptrace_dump_regs(pid);
183 if (err)
184 printk(UM_KERN_ERR "Failed to get registers "
185 "from process, errno = %d\n", -err);
186 printk(UM_KERN_ERR "handle_trap - failed to wait at "
187 "end of syscall, errno = %d, status = %d\n",
188 errno, status);
189 fatal_sigsegv();
190 }
191 }
192
193 handle_syscall(regs);
194}
195
196extern int __syscall_stub_start;
197
198static int userspace_tramp(void *stack)
199{
200 void *addr;
201 int err;
202
203 ptrace(PTRACE_TRACEME, 0, 0, 0);
204
205 signal(SIGTERM, SIG_DFL);
206 signal(SIGWINCH, SIG_IGN);
207 err = set_interval();
208 if (err) {
209 printk(UM_KERN_ERR "userspace_tramp - setting timer failed, "
210 "errno = %d\n", err);
211 exit(1);
212 }
213
214 if (!proc_mm) {
215 /*
216 * This has a pte, but it can't be mapped in with the usual
217 * tlb_flush mechanism because this is part of that mechanism
218 */
219 int fd;
220 unsigned long long offset;
221 fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
222 addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
223 PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
224 if (addr == MAP_FAILED) {
225 printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
226 "errno = %d\n", STUB_CODE, errno);
227 exit(1);
228 }
229
230 if (stack != NULL) {
231 fd = phys_mapping(to_phys(stack), &offset);
232 addr = mmap((void *) STUB_DATA,
233 UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
234 MAP_FIXED | MAP_SHARED, fd, offset);
235 if (addr == MAP_FAILED) {
236 printk(UM_KERN_ERR "mapping segfault stack "
237 "at 0x%lx failed, errno = %d\n",
238 STUB_DATA, errno);
239 exit(1);
240 }
241 }
242 }
243 if (!ptrace_faultinfo && (stack != NULL)) {
244 struct sigaction sa;
245
246 unsigned long v = STUB_CODE +
247 (unsigned long) stub_segv_handler -
248 (unsigned long) &__syscall_stub_start;
249
250 set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
251 sigemptyset(&sa.sa_mask);
252 sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
253 sa.sa_sigaction = (void *) v;
254 sa.sa_restorer = NULL;
255 if (sigaction(SIGSEGV, &sa, NULL) < 0) {
256 printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV "
257 "handler failed - errno = %d\n", errno);
258 exit(1);
259 }
260 }
261
262 kill(os_getpid(), SIGSTOP);
263 return 0;
264}
265
266/* Each element set once, and only accessed by a single processor anyway */
267#undef NR_CPUS
268#define NR_CPUS 1
269int userspace_pid[NR_CPUS];
270
271int start_userspace(unsigned long stub_stack)
272{
273 void *stack;
274 unsigned long sp;
275 int pid, status, n, flags, err;
276
277 stack = mmap(NULL, UM_KERN_PAGE_SIZE,
278 PROT_READ | PROT_WRITE | PROT_EXEC,
279 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
280 if (stack == MAP_FAILED) {
281 err = -errno;
282 printk(UM_KERN_ERR "start_userspace : mmap failed, "
283 "errno = %d\n", errno);
284 return err;
285 }
286
287 sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
288
289 flags = CLONE_FILES;
290 if (proc_mm)
291 flags |= CLONE_VM;
292 else
293 flags |= SIGCHLD;
294
295 pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
296 if (pid < 0) {
297 err = -errno;
298 printk(UM_KERN_ERR "start_userspace : clone failed, "
299 "errno = %d\n", errno);
300 return err;
301 }
302
303 do {
304 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
305 if (n < 0) {
306 err = -errno;
307 printk(UM_KERN_ERR "start_userspace : wait failed, "
308 "errno = %d\n", errno);
309 goto out_kill;
310 }
311 } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
312
313 if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
314 err = -EINVAL;
315 printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got "
316 "status = %d\n", status);
317 goto out_kill;
318 }
319
320 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
321 (void *) PTRACE_O_TRACESYSGOOD) < 0) {
322 err = -errno;
323 printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS "
324 "failed, errno = %d\n", errno);
325 goto out_kill;
326 }
327
328 if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
329 err = -errno;
330 printk(UM_KERN_ERR "start_userspace : munmap failed, "
331 "errno = %d\n", errno);
332 goto out_kill;
333 }
334
335 return pid;
336
337 out_kill:
338 os_kill_ptraced_process(pid, 1);
339 return err;
340}
341
342void userspace(struct uml_pt_regs *regs)
343{
344 struct itimerval timer;
345 unsigned long long nsecs, now;
346 int err, status, op, pid = userspace_pid[0];
347 /* To prevent races if using_sysemu changes under us.*/
348 int local_using_sysemu;
349
350 if (getitimer(ITIMER_VIRTUAL, &timer))
351 printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno);
352 nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC +
353 timer.it_value.tv_usec * UM_NSEC_PER_USEC;
354 nsecs += os_nsecs();
355
356 while (1) {
357 /*
358 * This can legitimately fail if the process loads a
359 * bogus value into a segment register. It will
360 * segfault and PTRACE_GETREGS will read that value
361 * out of the process. However, PTRACE_SETREGS will
362 * fail. In this case, there is nothing to do but
363 * just kill the process.
364 */
365 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
366 fatal_sigsegv();
367
368 if (put_fp_registers(pid, regs->fp))
369 fatal_sigsegv();
370
371 /* Now we set local_using_sysemu to be used for one loop */
372 local_using_sysemu = get_using_sysemu();
373
374 op = SELECT_PTRACE_OPERATION(local_using_sysemu,
375 singlestepping(NULL));
376
377 if (ptrace(op, pid, 0, 0)) {
378 printk(UM_KERN_ERR "userspace - ptrace continue "
379 "failed, op = %d, errno = %d\n", op, errno);
380 fatal_sigsegv();
381 }
382
383 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
384 if (err < 0) {
385 printk(UM_KERN_ERR "userspace - wait failed, "
386 "errno = %d\n", errno);
387 fatal_sigsegv();
388 }
389
390 regs->is_user = 1;
391 if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
392 printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, "
393 "errno = %d\n", errno);
394 fatal_sigsegv();
395 }
396
397 if (get_fp_registers(pid, regs->fp)) {
398 printk(UM_KERN_ERR "userspace - get_fp_registers failed, "
399 "errno = %d\n", errno);
400 fatal_sigsegv();
401 }
402
403 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
404
405 if (WIFSTOPPED(status)) {
406 int sig = WSTOPSIG(status);
407 switch (sig) {
408 case SIGSEGV:
409 if (PTRACE_FULL_FAULTINFO ||
410 !ptrace_faultinfo) {
411 get_skas_faultinfo(pid,
412 ®s->faultinfo);
413 (*sig_info[SIGSEGV])(SIGSEGV, regs);
414 }
415 else handle_segv(pid, regs);
416 break;
417 case SIGTRAP + 0x80:
418 handle_trap(pid, regs, local_using_sysemu);
419 break;
420 case SIGTRAP:
421 relay_signal(SIGTRAP, regs);
422 break;
423 case SIGVTALRM:
424 now = os_nsecs();
425 if (now < nsecs)
426 break;
427 block_signals();
428 (*sig_info[sig])(sig, regs);
429 unblock_signals();
430 nsecs = timer.it_value.tv_sec *
431 UM_NSEC_PER_SEC +
432 timer.it_value.tv_usec *
433 UM_NSEC_PER_USEC;
434 nsecs += os_nsecs();
435 break;
436 case SIGIO:
437 case SIGILL:
438 case SIGBUS:
439 case SIGFPE:
440 case SIGWINCH:
441 block_signals();
442 (*sig_info[sig])(sig, regs);
443 unblock_signals();
444 break;
445 default:
446 printk(UM_KERN_ERR "userspace - child stopped "
447 "with signal %d\n", sig);
448 fatal_sigsegv();
449 }
450 pid = userspace_pid[0];
451 interrupt_end();
452
453 /* Avoid -ERESTARTSYS handling in host */
454 if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
455 PT_SYSCALL_NR(regs->gp) = -1;
456 }
457 }
458}
459
460static unsigned long thread_regs[MAX_REG_NR];
461static unsigned long thread_fp_regs[FP_SIZE];
462
463static int __init init_thread_regs(void)
464{
465 get_safe_registers(thread_regs, thread_fp_regs);
466 /* Set parent's instruction pointer to start of clone-stub */
467 thread_regs[REGS_IP_INDEX] = STUB_CODE +
468 (unsigned long) stub_clone_handler -
469 (unsigned long) &__syscall_stub_start;
470 thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
471 sizeof(void *);
472#ifdef __SIGNAL_FRAMESIZE
473 thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
474#endif
475 return 0;
476}
477
478__initcall(init_thread_regs);
479
480int copy_context_skas0(unsigned long new_stack, int pid)
481{
482 struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ };
483 int err;
484 unsigned long current_stack = current_stub_stack();
485 struct stub_data *data = (struct stub_data *) current_stack;
486 struct stub_data *child_data = (struct stub_data *) new_stack;
487 unsigned long long new_offset;
488 int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
489
490 /*
491 * prepare offset and fd of child's stack as argument for parent's
492 * and child's mmap2 calls
493 */
494 *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset),
495 .fd = new_fd,
496 .timer = ((struct itimerval)
497 { .it_value = tv,
498 .it_interval = tv }) });
499
500 err = ptrace_setregs(pid, thread_regs);
501 if (err < 0) {
502 err = -errno;
503 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS "
504 "failed, pid = %d, errno = %d\n", pid, -err);
505 return err;
506 }
507
508 err = put_fp_registers(pid, thread_fp_regs);
509 if (err < 0) {
510 printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers "
511 "failed, pid = %d, err = %d\n", pid, err);
512 return err;
513 }
514
515 /* set a well known return code for detection of child write failure */
516 child_data->err = 12345678;
517
518 /*
519 * Wait, until parent has finished its work: read child's pid from
520 * parent's stack, and check, if bad result.
521 */
522 err = ptrace(PTRACE_CONT, pid, 0, 0);
523 if (err) {
524 err = -errno;
525 printk(UM_KERN_ERR "Failed to continue new process, pid = %d, "
526 "errno = %d\n", pid, errno);
527 return err;
528 }
529
530 wait_stub_done(pid);
531
532 pid = data->err;
533 if (pid < 0) {
534 printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
535 "error %d\n", -pid);
536 return pid;
537 }
538
539 /*
540 * Wait, until child has finished too: read child's result from
541 * child's stack and check it.
542 */
543 wait_stub_done(pid);
544 if (child_data->err != STUB_DATA) {
545 printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
546 "error %ld\n", child_data->err);
547 err = child_data->err;
548 goto out_kill;
549 }
550
551 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
552 (void *)PTRACE_O_TRACESYSGOOD) < 0) {
553 err = -errno;
554 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS "
555 "failed, errno = %d\n", errno);
556 goto out_kill;
557 }
558
559 return pid;
560
561 out_kill:
562 os_kill_ptraced_process(pid, 1);
563 return err;
564}
565
566/*
567 * This is used only, if stub pages are needed, while proc_mm is
568 * available. Opening /proc/mm creates a new mm_context, which lacks
569 * the stub-pages. Thus, we map them using /proc/mm-fd
570 */
571int map_stub_pages(int fd, unsigned long code, unsigned long data,
572 unsigned long stack)
573{
574 struct proc_mm_op mmop;
575 int n;
576 unsigned long long code_offset;
577 int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
578 &code_offset);
579
580 mmop = ((struct proc_mm_op) { .op = MM_MMAP,
581 .u =
582 { .mmap =
583 { .addr = code,
584 .len = UM_KERN_PAGE_SIZE,
585 .prot = PROT_EXEC,
586 .flags = MAP_FIXED | MAP_PRIVATE,
587 .fd = code_fd,
588 .offset = code_offset
589 } } });
590 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
591 if (n != sizeof(mmop)) {
592 n = errno;
593 printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
594 "offset = %llx\n", code, code_fd,
595 (unsigned long long) code_offset);
596 printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
597 "failed, err = %d\n", n);
598 return -n;
599 }
600
601 if (stack) {
602 unsigned long long map_offset;
603 int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
604 mmop = ((struct proc_mm_op)
605 { .op = MM_MMAP,
606 .u =
607 { .mmap =
608 { .addr = data,
609 .len = UM_KERN_PAGE_SIZE,
610 .prot = PROT_READ | PROT_WRITE,
611 .flags = MAP_FIXED | MAP_SHARED,
612 .fd = map_fd,
613 .offset = map_offset
614 } } });
615 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
616 if (n != sizeof(mmop)) {
617 n = errno;
618 printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
619 "data failed, err = %d\n", n);
620 return -n;
621 }
622 }
623
624 return 0;
625}
626
627void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
628{
629 (*buf)[0].JB_IP = (unsigned long) handler;
630 (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
631 sizeof(void *);
632}
633
634#define INIT_JMP_NEW_THREAD 0
635#define INIT_JMP_CALLBACK 1
636#define INIT_JMP_HALT 2
637#define INIT_JMP_REBOOT 3
638
639void switch_threads(jmp_buf *me, jmp_buf *you)
640{
641 if (UML_SETJMP(me) == 0)
642 UML_LONGJMP(you, 1);
643}
644
645static jmp_buf initial_jmpbuf;
646
647/* XXX Make these percpu */
648static void (*cb_proc)(void *arg);
649static void *cb_arg;
650static jmp_buf *cb_back;
651
652int start_idle_thread(void *stack, jmp_buf *switch_buf)
653{
654 int n;
655
656 set_handler(SIGWINCH);
657
658 /*
659 * Can't use UML_SETJMP or UML_LONGJMP here because they save
660 * and restore signals, with the possible side-effect of
661 * trying to handle any signals which came when they were
662 * blocked, which can't be done on this stack.
663 * Signals must be blocked when jumping back here and restored
664 * after returning to the jumper.
665 */
666 n = setjmp(initial_jmpbuf);
667 switch (n) {
668 case INIT_JMP_NEW_THREAD:
669 (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
670 (*switch_buf)[0].JB_SP = (unsigned long) stack +
671 UM_THREAD_SIZE - sizeof(void *);
672 break;
673 case INIT_JMP_CALLBACK:
674 (*cb_proc)(cb_arg);
675 longjmp(*cb_back, 1);
676 break;
677 case INIT_JMP_HALT:
678 kmalloc_ok = 0;
679 return 0;
680 case INIT_JMP_REBOOT:
681 kmalloc_ok = 0;
682 return 1;
683 default:
684 printk(UM_KERN_ERR "Bad sigsetjmp return in "
685 "start_idle_thread - %d\n", n);
686 fatal_sigsegv();
687 }
688 longjmp(*switch_buf, 1);
689}
690
691void initial_thread_cb_skas(void (*proc)(void *), void *arg)
692{
693 jmp_buf here;
694
695 cb_proc = proc;
696 cb_arg = arg;
697 cb_back = &here;
698
699 block_signals();
700 if (UML_SETJMP(&here) == 0)
701 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
702 unblock_signals();
703
704 cb_proc = NULL;
705 cb_arg = NULL;
706 cb_back = NULL;
707}
708
709void halt_skas(void)
710{
711 block_signals();
712 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
713}
714
715void reboot_skas(void)
716{
717 block_signals();
718 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
719}
720
721void __switch_mm(struct mm_id *mm_idp)
722{
723 int err;
724
725 /* FIXME: need cpu pid in __switch_mm */
726 if (proc_mm) {
727 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
728 mm_idp->u.mm_fd);
729 if (err) {
730 printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
731 "failed, errno = %d\n", errno);
732 fatal_sigsegv();
733 }
734 }
735 else userspace_pid[0] = mm_idp->u.pid;
736}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
4 * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 */
6
7#include <stdlib.h>
8#include <stdbool.h>
9#include <unistd.h>
10#include <sched.h>
11#include <errno.h>
12#include <string.h>
13#include <sys/mman.h>
14#include <sys/wait.h>
15#include <asm/unistd.h>
16#include <as-layout.h>
17#include <init.h>
18#include <kern_util.h>
19#include <mem.h>
20#include <os.h>
21#include <ptrace_user.h>
22#include <registers.h>
23#include <skas.h>
24#include <sysdep/stub.h>
25#include <linux/threads.h>
26
27int is_skas_winch(int pid, int fd, void *data)
28{
29 return pid == getpgrp();
30}
31
32static const char *ptrace_reg_name(int idx)
33{
34#define R(n) case HOST_##n: return #n
35
36 switch (idx) {
37#ifdef __x86_64__
38 R(BX);
39 R(CX);
40 R(DI);
41 R(SI);
42 R(DX);
43 R(BP);
44 R(AX);
45 R(R8);
46 R(R9);
47 R(R10);
48 R(R11);
49 R(R12);
50 R(R13);
51 R(R14);
52 R(R15);
53 R(ORIG_AX);
54 R(CS);
55 R(SS);
56 R(EFLAGS);
57#elif defined(__i386__)
58 R(IP);
59 R(SP);
60 R(EFLAGS);
61 R(AX);
62 R(BX);
63 R(CX);
64 R(DX);
65 R(SI);
66 R(DI);
67 R(BP);
68 R(CS);
69 R(SS);
70 R(DS);
71 R(FS);
72 R(ES);
73 R(GS);
74 R(ORIG_AX);
75#endif
76 }
77 return "";
78}
79
80static int ptrace_dump_regs(int pid)
81{
82 unsigned long regs[MAX_REG_NR];
83 int i;
84
85 if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
86 return -errno;
87
88 printk(UM_KERN_ERR "Stub registers -\n");
89 for (i = 0; i < ARRAY_SIZE(regs); i++) {
90 const char *regname = ptrace_reg_name(i);
91
92 printk(UM_KERN_ERR "\t%s\t(%2d): %lx\n", regname, i, regs[i]);
93 }
94
95 return 0;
96}
97
98/*
99 * Signals that are OK to receive in the stub - we'll just continue it.
100 * SIGWINCH will happen when UML is inside a detached screen.
101 */
102#define STUB_SIG_MASK ((1 << SIGALRM) | (1 << SIGWINCH))
103
104/* Signals that the stub will finish with - anything else is an error */
105#define STUB_DONE_MASK (1 << SIGTRAP)
106
107void wait_stub_done(int pid)
108{
109 int n, status, err;
110
111 while (1) {
112 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
113 if ((n < 0) || !WIFSTOPPED(status))
114 goto bad_wait;
115
116 if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
117 break;
118
119 err = ptrace(PTRACE_CONT, pid, 0, 0);
120 if (err) {
121 printk(UM_KERN_ERR "%s : continue failed, errno = %d\n",
122 __func__, errno);
123 fatal_sigsegv();
124 }
125 }
126
127 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
128 return;
129
130bad_wait:
131 err = ptrace_dump_regs(pid);
132 if (err)
133 printk(UM_KERN_ERR "Failed to get registers from stub, errno = %d\n",
134 -err);
135 printk(UM_KERN_ERR "%s : failed to wait for SIGTRAP, pid = %d, n = %d, errno = %d, status = 0x%x\n",
136 __func__, pid, n, errno, status);
137 fatal_sigsegv();
138}
139
140extern unsigned long current_stub_stack(void);
141
142static void get_skas_faultinfo(int pid, struct faultinfo *fi, unsigned long *aux_fp_regs)
143{
144 int err;
145
146 err = get_fp_registers(pid, aux_fp_regs);
147 if (err < 0) {
148 printk(UM_KERN_ERR "save_fp_registers returned %d\n",
149 err);
150 fatal_sigsegv();
151 }
152 err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
153 if (err) {
154 printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
155 "errno = %d\n", pid, errno);
156 fatal_sigsegv();
157 }
158 wait_stub_done(pid);
159
160 /*
161 * faultinfo is prepared by the stub_segv_handler at start of
162 * the stub stack page. We just have to copy it.
163 */
164 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
165
166 err = put_fp_registers(pid, aux_fp_regs);
167 if (err < 0) {
168 printk(UM_KERN_ERR "put_fp_registers returned %d\n",
169 err);
170 fatal_sigsegv();
171 }
172}
173
174static void handle_segv(int pid, struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
175{
176 get_skas_faultinfo(pid, ®s->faultinfo, aux_fp_regs);
177 segv(regs->faultinfo, 0, 1, NULL);
178}
179
180static void handle_trap(int pid, struct uml_pt_regs *regs)
181{
182 if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
183 fatal_sigsegv();
184
185 handle_syscall(regs);
186}
187
188extern char __syscall_stub_start[];
189
190/**
191 * userspace_tramp() - userspace trampoline
192 * @stack: pointer to the new userspace stack page
193 *
194 * The userspace trampoline is used to setup a new userspace process in start_userspace() after it was clone()'ed.
195 * This function will run on a temporary stack page.
196 * It ptrace()'es itself, then
197 * Two pages are mapped into the userspace address space:
198 * - STUB_CODE (with EXEC), which contains the skas stub code
199 * - STUB_DATA (with R/W), which contains a data page that is used to transfer certain data between the UML userspace process and the UML kernel.
200 * Also for the userspace process a SIGSEGV handler is installed to catch pagefaults in the userspace process.
201 * And last the process stops itself to give control to the UML kernel for this userspace process.
202 *
203 * Return: Always zero, otherwise the current userspace process is ended with non null exit() call
204 */
205static int userspace_tramp(void *stack)
206{
207 struct sigaction sa;
208 void *addr;
209 int fd;
210 unsigned long long offset;
211 unsigned long segv_handler = STUB_CODE +
212 (unsigned long) stub_segv_handler -
213 (unsigned long) __syscall_stub_start;
214
215 ptrace(PTRACE_TRACEME, 0, 0, 0);
216
217 signal(SIGTERM, SIG_DFL);
218 signal(SIGWINCH, SIG_IGN);
219
220 fd = phys_mapping(uml_to_phys(__syscall_stub_start), &offset);
221 addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
222 PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
223 if (addr == MAP_FAILED) {
224 os_info("mapping mmap stub at 0x%lx failed, errno = %d\n",
225 STUB_CODE, errno);
226 exit(1);
227 }
228
229 fd = phys_mapping(uml_to_phys(stack), &offset);
230 addr = mmap((void *) STUB_DATA,
231 STUB_DATA_PAGES * UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
232 MAP_FIXED | MAP_SHARED, fd, offset);
233 if (addr == MAP_FAILED) {
234 os_info("mapping segfault stack at 0x%lx failed, errno = %d\n",
235 STUB_DATA, errno);
236 exit(1);
237 }
238
239 set_sigstack((void *) STUB_DATA, STUB_DATA_PAGES * UM_KERN_PAGE_SIZE);
240 sigemptyset(&sa.sa_mask);
241 sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
242 sa.sa_sigaction = (void *) segv_handler;
243 sa.sa_restorer = NULL;
244 if (sigaction(SIGSEGV, &sa, NULL) < 0) {
245 os_info("%s - setting SIGSEGV handler failed - errno = %d\n",
246 __func__, errno);
247 exit(1);
248 }
249
250 kill(os_getpid(), SIGSTOP);
251 return 0;
252}
253
254int userspace_pid[NR_CPUS];
255int kill_userspace_mm[NR_CPUS];
256
257/**
258 * start_userspace() - prepare a new userspace process
259 * @stub_stack: pointer to the stub stack.
260 *
261 * Setups a new temporary stack page that is used while userspace_tramp() runs
262 * Clones the kernel process into a new userspace process, with FDs only.
263 *
264 * Return: When positive: the process id of the new userspace process,
265 * when negative: an error number.
266 * FIXME: can PIDs become negative?!
267 */
268int start_userspace(unsigned long stub_stack)
269{
270 void *stack;
271 unsigned long sp;
272 int pid, status, n, flags, err;
273
274 /* setup a temporary stack page */
275 stack = mmap(NULL, UM_KERN_PAGE_SIZE,
276 PROT_READ | PROT_WRITE | PROT_EXEC,
277 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
278 if (stack == MAP_FAILED) {
279 err = -errno;
280 printk(UM_KERN_ERR "%s : mmap failed, errno = %d\n",
281 __func__, errno);
282 return err;
283 }
284
285 /* set stack pointer to the end of the stack page, so it can grow downwards */
286 sp = (unsigned long)stack + UM_KERN_PAGE_SIZE;
287
288 flags = CLONE_FILES | SIGCHLD;
289
290 /* clone into new userspace process */
291 pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
292 if (pid < 0) {
293 err = -errno;
294 printk(UM_KERN_ERR "%s : clone failed, errno = %d\n",
295 __func__, errno);
296 return err;
297 }
298
299 do {
300 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
301 if (n < 0) {
302 err = -errno;
303 printk(UM_KERN_ERR "%s : wait failed, errno = %d\n",
304 __func__, errno);
305 goto out_kill;
306 }
307 } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGALRM));
308
309 if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
310 err = -EINVAL;
311 printk(UM_KERN_ERR "%s : expected SIGSTOP, got status = %d\n",
312 __func__, status);
313 goto out_kill;
314 }
315
316 if (ptrace(PTRACE_SETOPTIONS, pid, NULL,
317 (void *) PTRACE_O_TRACESYSGOOD) < 0) {
318 err = -errno;
319 printk(UM_KERN_ERR "%s : PTRACE_SETOPTIONS failed, errno = %d\n",
320 __func__, errno);
321 goto out_kill;
322 }
323
324 if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
325 err = -errno;
326 printk(UM_KERN_ERR "%s : munmap failed, errno = %d\n",
327 __func__, errno);
328 goto out_kill;
329 }
330
331 return pid;
332
333 out_kill:
334 os_kill_ptraced_process(pid, 1);
335 return err;
336}
337
338void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
339{
340 int err, status, op, pid = userspace_pid[0];
341 siginfo_t si;
342
343 /* Handle any immediate reschedules or signals */
344 interrupt_end();
345
346 while (1) {
347 if (kill_userspace_mm[0])
348 fatal_sigsegv();
349
350 /*
351 * This can legitimately fail if the process loads a
352 * bogus value into a segment register. It will
353 * segfault and PTRACE_GETREGS will read that value
354 * out of the process. However, PTRACE_SETREGS will
355 * fail. In this case, there is nothing to do but
356 * just kill the process.
357 */
358 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) {
359 printk(UM_KERN_ERR "%s - ptrace set regs failed, errno = %d\n",
360 __func__, errno);
361 fatal_sigsegv();
362 }
363
364 if (put_fp_registers(pid, regs->fp)) {
365 printk(UM_KERN_ERR "%s - ptrace set fp regs failed, errno = %d\n",
366 __func__, errno);
367 fatal_sigsegv();
368 }
369
370 if (singlestepping())
371 op = PTRACE_SYSEMU_SINGLESTEP;
372 else
373 op = PTRACE_SYSEMU;
374
375 if (ptrace(op, pid, 0, 0)) {
376 printk(UM_KERN_ERR "%s - ptrace continue failed, op = %d, errno = %d\n",
377 __func__, op, errno);
378 fatal_sigsegv();
379 }
380
381 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
382 if (err < 0) {
383 printk(UM_KERN_ERR "%s - wait failed, errno = %d\n",
384 __func__, errno);
385 fatal_sigsegv();
386 }
387
388 regs->is_user = 1;
389 if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
390 printk(UM_KERN_ERR "%s - PTRACE_GETREGS failed, errno = %d\n",
391 __func__, errno);
392 fatal_sigsegv();
393 }
394
395 if (get_fp_registers(pid, regs->fp)) {
396 printk(UM_KERN_ERR "%s - get_fp_registers failed, errno = %d\n",
397 __func__, errno);
398 fatal_sigsegv();
399 }
400
401 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
402
403 if (WIFSTOPPED(status)) {
404 int sig = WSTOPSIG(status);
405
406 /* These signal handlers need the si argument.
407 * The SIGIO and SIGALARM handlers which constitute the
408 * majority of invocations, do not use it.
409 */
410 switch (sig) {
411 case SIGSEGV:
412 case SIGTRAP:
413 case SIGILL:
414 case SIGBUS:
415 case SIGFPE:
416 case SIGWINCH:
417 ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
418 break;
419 }
420
421 switch (sig) {
422 case SIGSEGV:
423 if (PTRACE_FULL_FAULTINFO) {
424 get_skas_faultinfo(pid,
425 ®s->faultinfo, aux_fp_regs);
426 (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
427 regs);
428 }
429 else handle_segv(pid, regs, aux_fp_regs);
430 break;
431 case SIGTRAP + 0x80:
432 handle_trap(pid, regs);
433 break;
434 case SIGTRAP:
435 relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
436 break;
437 case SIGALRM:
438 break;
439 case SIGIO:
440 case SIGILL:
441 case SIGBUS:
442 case SIGFPE:
443 case SIGWINCH:
444 block_signals_trace();
445 (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
446 unblock_signals_trace();
447 break;
448 default:
449 printk(UM_KERN_ERR "%s - child stopped with signal %d\n",
450 __func__, sig);
451 fatal_sigsegv();
452 }
453 pid = userspace_pid[0];
454 interrupt_end();
455
456 /* Avoid -ERESTARTSYS handling in host */
457 if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
458 PT_SYSCALL_NR(regs->gp) = -1;
459 }
460 }
461}
462
463static unsigned long thread_regs[MAX_REG_NR];
464static unsigned long thread_fp_regs[FP_SIZE];
465
466static int __init init_thread_regs(void)
467{
468 get_safe_registers(thread_regs, thread_fp_regs);
469 /* Set parent's instruction pointer to start of clone-stub */
470 thread_regs[REGS_IP_INDEX] = STUB_CODE +
471 (unsigned long) stub_clone_handler -
472 (unsigned long) __syscall_stub_start;
473 thread_regs[REGS_SP_INDEX] = STUB_DATA + STUB_DATA_PAGES * UM_KERN_PAGE_SIZE -
474 sizeof(void *);
475#ifdef __SIGNAL_FRAMESIZE
476 thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
477#endif
478 return 0;
479}
480
481__initcall(init_thread_regs);
482
483int copy_context_skas0(unsigned long new_stack, int pid)
484{
485 int err;
486 unsigned long current_stack = current_stub_stack();
487 struct stub_data *data = (struct stub_data *) current_stack;
488 struct stub_data *child_data = (struct stub_data *) new_stack;
489 unsigned long long new_offset;
490 int new_fd = phys_mapping(uml_to_phys((void *)new_stack), &new_offset);
491
492 /*
493 * prepare offset and fd of child's stack as argument for parent's
494 * and child's mmap2 calls
495 */
496 *data = ((struct stub_data) {
497 .offset = MMAP_OFFSET(new_offset),
498 .fd = new_fd,
499 .parent_err = -ESRCH,
500 .child_err = 0,
501 });
502
503 *child_data = ((struct stub_data) {
504 .child_err = -ESRCH,
505 });
506
507 err = ptrace_setregs(pid, thread_regs);
508 if (err < 0) {
509 err = -errno;
510 printk(UM_KERN_ERR "%s : PTRACE_SETREGS failed, pid = %d, errno = %d\n",
511 __func__, pid, -err);
512 return err;
513 }
514
515 err = put_fp_registers(pid, thread_fp_regs);
516 if (err < 0) {
517 printk(UM_KERN_ERR "%s : put_fp_registers failed, pid = %d, err = %d\n",
518 __func__, pid, err);
519 return err;
520 }
521
522 /*
523 * Wait, until parent has finished its work: read child's pid from
524 * parent's stack, and check, if bad result.
525 */
526 err = ptrace(PTRACE_CONT, pid, 0, 0);
527 if (err) {
528 err = -errno;
529 printk(UM_KERN_ERR "Failed to continue new process, pid = %d, errno = %d\n",
530 pid, errno);
531 return err;
532 }
533
534 wait_stub_done(pid);
535
536 pid = data->parent_err;
537 if (pid < 0) {
538 printk(UM_KERN_ERR "%s - stub-parent reports error %d\n",
539 __func__, -pid);
540 return pid;
541 }
542
543 /*
544 * Wait, until child has finished too: read child's result from
545 * child's stack and check it.
546 */
547 wait_stub_done(pid);
548 if (child_data->child_err != STUB_DATA) {
549 printk(UM_KERN_ERR "%s - stub-child %d reports error %ld\n",
550 __func__, pid, data->child_err);
551 err = data->child_err;
552 goto out_kill;
553 }
554
555 if (ptrace(PTRACE_SETOPTIONS, pid, NULL,
556 (void *)PTRACE_O_TRACESYSGOOD) < 0) {
557 err = -errno;
558 printk(UM_KERN_ERR "%s : PTRACE_SETOPTIONS failed, errno = %d\n",
559 __func__, errno);
560 goto out_kill;
561 }
562
563 return pid;
564
565 out_kill:
566 os_kill_ptraced_process(pid, 1);
567 return err;
568}
569
570void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
571{
572 (*buf)[0].JB_IP = (unsigned long) handler;
573 (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
574 sizeof(void *);
575}
576
577#define INIT_JMP_NEW_THREAD 0
578#define INIT_JMP_CALLBACK 1
579#define INIT_JMP_HALT 2
580#define INIT_JMP_REBOOT 3
581
582void switch_threads(jmp_buf *me, jmp_buf *you)
583{
584 if (UML_SETJMP(me) == 0)
585 UML_LONGJMP(you, 1);
586}
587
588static jmp_buf initial_jmpbuf;
589
590/* XXX Make these percpu */
591static void (*cb_proc)(void *arg);
592static void *cb_arg;
593static jmp_buf *cb_back;
594
595int start_idle_thread(void *stack, jmp_buf *switch_buf)
596{
597 int n;
598
599 set_handler(SIGWINCH);
600
601 /*
602 * Can't use UML_SETJMP or UML_LONGJMP here because they save
603 * and restore signals, with the possible side-effect of
604 * trying to handle any signals which came when they were
605 * blocked, which can't be done on this stack.
606 * Signals must be blocked when jumping back here and restored
607 * after returning to the jumper.
608 */
609 n = setjmp(initial_jmpbuf);
610 switch (n) {
611 case INIT_JMP_NEW_THREAD:
612 (*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
613 (*switch_buf)[0].JB_SP = (unsigned long) stack +
614 UM_THREAD_SIZE - sizeof(void *);
615 break;
616 case INIT_JMP_CALLBACK:
617 (*cb_proc)(cb_arg);
618 longjmp(*cb_back, 1);
619 break;
620 case INIT_JMP_HALT:
621 kmalloc_ok = 0;
622 return 0;
623 case INIT_JMP_REBOOT:
624 kmalloc_ok = 0;
625 return 1;
626 default:
627 printk(UM_KERN_ERR "Bad sigsetjmp return in %s - %d\n",
628 __func__, n);
629 fatal_sigsegv();
630 }
631 longjmp(*switch_buf, 1);
632
633 /* unreachable */
634 printk(UM_KERN_ERR "impossible long jump!");
635 fatal_sigsegv();
636 return 0;
637}
638
639void initial_thread_cb_skas(void (*proc)(void *), void *arg)
640{
641 jmp_buf here;
642
643 cb_proc = proc;
644 cb_arg = arg;
645 cb_back = &here;
646
647 block_signals_trace();
648 if (UML_SETJMP(&here) == 0)
649 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
650 unblock_signals_trace();
651
652 cb_proc = NULL;
653 cb_arg = NULL;
654 cb_back = NULL;
655}
656
657void halt_skas(void)
658{
659 block_signals_trace();
660 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
661}
662
663static bool noreboot;
664
665static int __init noreboot_cmd_param(char *str, int *add)
666{
667 noreboot = true;
668 return 0;
669}
670
671__uml_setup("noreboot", noreboot_cmd_param,
672"noreboot\n"
673" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
674" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
675" crashes in CI\n");
676
677void reboot_skas(void)
678{
679 block_signals_trace();
680 UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
681}
682
683void __switch_mm(struct mm_id *mm_idp)
684{
685 userspace_pid[0] = mm_idp->u.pid;
686 kill_userspace_mm[0] = mm_idp->kill;
687}