Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/ptrace.c
4 *
5 * (C) Copyright 1999 Linus Torvalds
6 *
7 * Common interfaces for "ptrace()" which we do not want
8 * to continually duplicate across every architecture.
9 */
10
11#include <linux/capability.h>
12#include <linux/export.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/coredump.h>
16#include <linux/sched/task.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/highmem.h>
20#include <linux/pagemap.h>
21#include <linux/ptrace.h>
22#include <linux/security.h>
23#include <linux/signal.h>
24#include <linux/uio.h>
25#include <linux/audit.h>
26#include <linux/pid_namespace.h>
27#include <linux/syscalls.h>
28#include <linux/uaccess.h>
29#include <linux/regset.h>
30#include <linux/hw_breakpoint.h>
31#include <linux/cn_proc.h>
32#include <linux/compat.h>
33#include <linux/sched/signal.h>
34#include <linux/minmax.h>
35#include <linux/syscall_user_dispatch.h>
36
37#include <asm/syscall.h> /* for syscall_get_* */
38
39/*
40 * Access another process' address space via ptrace.
41 * Source/target buffer must be kernel space,
42 * Do not walk the page table directly, use get_user_pages
43 */
44int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
45 void *buf, int len, unsigned int gup_flags)
46{
47 struct mm_struct *mm;
48 int ret;
49
50 mm = get_task_mm(tsk);
51 if (!mm)
52 return 0;
53
54 if (!tsk->ptrace ||
55 (current != tsk->parent) ||
56 ((get_dumpable(mm) != SUID_DUMP_USER) &&
57 !ptracer_capable(tsk, mm->user_ns))) {
58 mmput(mm);
59 return 0;
60 }
61
62 ret = access_remote_vm(mm, addr, buf, len, gup_flags);
63 mmput(mm);
64
65 return ret;
66}
67
68
69void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
70 const struct cred *ptracer_cred)
71{
72 BUG_ON(!list_empty(&child->ptrace_entry));
73 list_add(&child->ptrace_entry, &new_parent->ptraced);
74 child->parent = new_parent;
75 child->ptracer_cred = get_cred(ptracer_cred);
76}
77
78/*
79 * ptrace a task: make the debugger its new parent and
80 * move it to the ptrace list.
81 *
82 * Must be called with the tasklist lock write-held.
83 */
84static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
85{
86 __ptrace_link(child, new_parent, current_cred());
87}
88
89/**
90 * __ptrace_unlink - unlink ptracee and restore its execution state
91 * @child: ptracee to be unlinked
92 *
93 * Remove @child from the ptrace list, move it back to the original parent,
94 * and restore the execution state so that it conforms to the group stop
95 * state.
96 *
97 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
98 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
99 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
100 * If the ptracer is exiting, the ptracee can be in any state.
101 *
102 * After detach, the ptracee should be in a state which conforms to the
103 * group stop. If the group is stopped or in the process of stopping, the
104 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
105 * up from TASK_TRACED.
106 *
107 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
108 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
109 * to but in the opposite direction of what happens while attaching to a
110 * stopped task. However, in this direction, the intermediate RUNNING
111 * state is not hidden even from the current ptracer and if it immediately
112 * re-attaches and performs a WNOHANG wait(2), it may fail.
113 *
114 * CONTEXT:
115 * write_lock_irq(tasklist_lock)
116 */
117void __ptrace_unlink(struct task_struct *child)
118{
119 const struct cred *old_cred;
120 BUG_ON(!child->ptrace);
121
122 clear_task_syscall_work(child, SYSCALL_TRACE);
123#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
124 clear_task_syscall_work(child, SYSCALL_EMU);
125#endif
126
127 child->parent = child->real_parent;
128 list_del_init(&child->ptrace_entry);
129 old_cred = child->ptracer_cred;
130 child->ptracer_cred = NULL;
131 put_cred(old_cred);
132
133 spin_lock(&child->sighand->siglock);
134 child->ptrace = 0;
135 /*
136 * Clear all pending traps and TRAPPING. TRAPPING should be
137 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
138 */
139 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
140 task_clear_jobctl_trapping(child);
141
142 /*
143 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
144 * @child isn't dead.
145 */
146 if (!(child->flags & PF_EXITING) &&
147 (child->signal->flags & SIGNAL_STOP_STOPPED ||
148 child->signal->group_stop_count))
149 child->jobctl |= JOBCTL_STOP_PENDING;
150
151 /*
152 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
153 * @child in the butt. Note that @resume should be used iff @child
154 * is in TASK_TRACED; otherwise, we might unduly disrupt
155 * TASK_KILLABLE sleeps.
156 */
157 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
158 ptrace_signal_wake_up(child, true);
159
160 spin_unlock(&child->sighand->siglock);
161}
162
163static bool looks_like_a_spurious_pid(struct task_struct *task)
164{
165 if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
166 return false;
167
168 if (task_pid_vnr(task) == task->ptrace_message)
169 return false;
170 /*
171 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
172 * was not wait()'ed, most probably debugger targets the old
173 * leader which was destroyed in de_thread().
174 */
175 return true;
176}
177
178/*
179 * Ensure that nothing can wake it up, even SIGKILL
180 *
181 * A task is switched to this state while a ptrace operation is in progress;
182 * such that the ptrace operation is uninterruptible.
183 */
184static bool ptrace_freeze_traced(struct task_struct *task)
185{
186 bool ret = false;
187
188 /* Lockless, nobody but us can set this flag */
189 if (task->jobctl & JOBCTL_LISTENING)
190 return ret;
191
192 spin_lock_irq(&task->sighand->siglock);
193 if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
194 !__fatal_signal_pending(task)) {
195 task->jobctl |= JOBCTL_PTRACE_FROZEN;
196 ret = true;
197 }
198 spin_unlock_irq(&task->sighand->siglock);
199
200 return ret;
201}
202
203static void ptrace_unfreeze_traced(struct task_struct *task)
204{
205 unsigned long flags;
206
207 /*
208 * The child may be awake and may have cleared
209 * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will
210 * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
211 */
212 if (lock_task_sighand(task, &flags)) {
213 task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
214 if (__fatal_signal_pending(task)) {
215 task->jobctl &= ~JOBCTL_TRACED;
216 wake_up_state(task, __TASK_TRACED);
217 }
218 unlock_task_sighand(task, &flags);
219 }
220}
221
222/**
223 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
224 * @child: ptracee to check for
225 * @ignore_state: don't check whether @child is currently %TASK_TRACED
226 *
227 * Check whether @child is being ptraced by %current and ready for further
228 * ptrace operations. If @ignore_state is %false, @child also should be in
229 * %TASK_TRACED state and on return the child is guaranteed to be traced
230 * and not executing. If @ignore_state is %true, @child can be in any
231 * state.
232 *
233 * CONTEXT:
234 * Grabs and releases tasklist_lock and @child->sighand->siglock.
235 *
236 * RETURNS:
237 * 0 on success, -ESRCH if %child is not ready.
238 */
239static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
240{
241 int ret = -ESRCH;
242
243 /*
244 * We take the read lock around doing both checks to close a
245 * possible race where someone else was tracing our child and
246 * detached between these two checks. After this locked check,
247 * we are sure that this is our traced child and that can only
248 * be changed by us so it's not changing right after this.
249 */
250 read_lock(&tasklist_lock);
251 if (child->ptrace && child->parent == current) {
252 /*
253 * child->sighand can't be NULL, release_task()
254 * does ptrace_unlink() before __exit_signal().
255 */
256 if (ignore_state || ptrace_freeze_traced(child))
257 ret = 0;
258 }
259 read_unlock(&tasklist_lock);
260
261 if (!ret && !ignore_state &&
262 WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED|TASK_FROZEN)))
263 ret = -ESRCH;
264
265 return ret;
266}
267
268static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
269{
270 if (mode & PTRACE_MODE_NOAUDIT)
271 return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
272 return ns_capable(ns, CAP_SYS_PTRACE);
273}
274
275/* Returns 0 on success, -errno on denial. */
276static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
277{
278 const struct cred *cred = current_cred(), *tcred;
279 struct mm_struct *mm;
280 kuid_t caller_uid;
281 kgid_t caller_gid;
282
283 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
284 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
285 return -EPERM;
286 }
287
288 /* May we inspect the given task?
289 * This check is used both for attaching with ptrace
290 * and for allowing access to sensitive information in /proc.
291 *
292 * ptrace_attach denies several cases that /proc allows
293 * because setting up the necessary parent/child relationship
294 * or halting the specified task is impossible.
295 */
296
297 /* Don't let security modules deny introspection */
298 if (same_thread_group(task, current))
299 return 0;
300 rcu_read_lock();
301 if (mode & PTRACE_MODE_FSCREDS) {
302 caller_uid = cred->fsuid;
303 caller_gid = cred->fsgid;
304 } else {
305 /*
306 * Using the euid would make more sense here, but something
307 * in userland might rely on the old behavior, and this
308 * shouldn't be a security problem since
309 * PTRACE_MODE_REALCREDS implies that the caller explicitly
310 * used a syscall that requests access to another process
311 * (and not a filesystem syscall to procfs).
312 */
313 caller_uid = cred->uid;
314 caller_gid = cred->gid;
315 }
316 tcred = __task_cred(task);
317 if (uid_eq(caller_uid, tcred->euid) &&
318 uid_eq(caller_uid, tcred->suid) &&
319 uid_eq(caller_uid, tcred->uid) &&
320 gid_eq(caller_gid, tcred->egid) &&
321 gid_eq(caller_gid, tcred->sgid) &&
322 gid_eq(caller_gid, tcred->gid))
323 goto ok;
324 if (ptrace_has_cap(tcred->user_ns, mode))
325 goto ok;
326 rcu_read_unlock();
327 return -EPERM;
328ok:
329 rcu_read_unlock();
330 /*
331 * If a task drops privileges and becomes nondumpable (through a syscall
332 * like setresuid()) while we are trying to access it, we must ensure
333 * that the dumpability is read after the credentials; otherwise,
334 * we may be able to attach to a task that we shouldn't be able to
335 * attach to (as if the task had dropped privileges without becoming
336 * nondumpable).
337 * Pairs with a write barrier in commit_creds().
338 */
339 smp_rmb();
340 mm = task->mm;
341 if (mm &&
342 ((get_dumpable(mm) != SUID_DUMP_USER) &&
343 !ptrace_has_cap(mm->user_ns, mode)))
344 return -EPERM;
345
346 return security_ptrace_access_check(task, mode);
347}
348
349bool ptrace_may_access(struct task_struct *task, unsigned int mode)
350{
351 int err;
352 task_lock(task);
353 err = __ptrace_may_access(task, mode);
354 task_unlock(task);
355 return !err;
356}
357
358static int check_ptrace_options(unsigned long data)
359{
360 if (data & ~(unsigned long)PTRACE_O_MASK)
361 return -EINVAL;
362
363 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
364 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
365 !IS_ENABLED(CONFIG_SECCOMP))
366 return -EINVAL;
367
368 if (!capable(CAP_SYS_ADMIN))
369 return -EPERM;
370
371 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
372 current->ptrace & PT_SUSPEND_SECCOMP)
373 return -EPERM;
374 }
375 return 0;
376}
377
378static inline void ptrace_set_stopped(struct task_struct *task)
379{
380 guard(spinlock)(&task->sighand->siglock);
381
382 /*
383 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
384 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
385 * will be cleared if the child completes the transition or any
386 * event which clears the group stop states happens. We'll wait
387 * for the transition to complete before returning from this
388 * function.
389 *
390 * This hides STOPPED -> RUNNING -> TRACED transition from the
391 * attaching thread but a different thread in the same group can
392 * still observe the transient RUNNING state. IOW, if another
393 * thread's WNOHANG wait(2) on the stopped tracee races against
394 * ATTACH, the wait(2) may fail due to the transient RUNNING.
395 *
396 * The following task_is_stopped() test is safe as both transitions
397 * in and out of STOPPED are protected by siglock.
398 */
399 if (task_is_stopped(task) &&
400 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
401 task->jobctl &= ~JOBCTL_STOPPED;
402 signal_wake_up_state(task, __TASK_STOPPED);
403 }
404}
405
406static int ptrace_attach(struct task_struct *task, long request,
407 unsigned long addr,
408 unsigned long flags)
409{
410 bool seize = (request == PTRACE_SEIZE);
411 int retval;
412
413 if (seize) {
414 if (addr != 0)
415 return -EIO;
416 /*
417 * This duplicates the check in check_ptrace_options() because
418 * ptrace_attach() and ptrace_setoptions() have historically
419 * used different error codes for unknown ptrace options.
420 */
421 if (flags & ~(unsigned long)PTRACE_O_MASK)
422 return -EIO;
423
424 retval = check_ptrace_options(flags);
425 if (retval)
426 return retval;
427 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
428 } else {
429 flags = PT_PTRACED;
430 }
431
432 audit_ptrace(task);
433
434 if (unlikely(task->flags & PF_KTHREAD))
435 return -EPERM;
436 if (same_thread_group(task, current))
437 return -EPERM;
438
439 /*
440 * Protect exec's credential calculations against our interference;
441 * SUID, SGID and LSM creds get determined differently
442 * under ptrace.
443 */
444 scoped_cond_guard (mutex_intr, return -ERESTARTNOINTR,
445 &task->signal->cred_guard_mutex) {
446
447 scoped_guard (task_lock, task) {
448 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
449 if (retval)
450 return retval;
451 }
452
453 scoped_guard (write_lock_irq, &tasklist_lock) {
454 if (unlikely(task->exit_state))
455 return -EPERM;
456 if (task->ptrace)
457 return -EPERM;
458
459 task->ptrace = flags;
460
461 ptrace_link(task, current);
462
463 /* SEIZE doesn't trap tracee on attach */
464 if (!seize)
465 send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
466
467 ptrace_set_stopped(task);
468 }
469 }
470
471 /*
472 * We do not bother to change retval or clear JOBCTL_TRAPPING
473 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
474 * not return to user-mode, it will exit and clear this bit in
475 * __ptrace_unlink() if it wasn't already cleared by the tracee;
476 * and until then nobody can ptrace this task.
477 */
478 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
479 proc_ptrace_connector(task, PTRACE_ATTACH);
480
481 return 0;
482}
483
484/**
485 * ptrace_traceme -- helper for PTRACE_TRACEME
486 *
487 * Performs checks and sets PT_PTRACED.
488 * Should be used by all ptrace implementations for PTRACE_TRACEME.
489 */
490static int ptrace_traceme(void)
491{
492 int ret = -EPERM;
493
494 write_lock_irq(&tasklist_lock);
495 /* Are we already being traced? */
496 if (!current->ptrace) {
497 ret = security_ptrace_traceme(current->parent);
498 /*
499 * Check PF_EXITING to ensure ->real_parent has not passed
500 * exit_ptrace(). Otherwise we don't report the error but
501 * pretend ->real_parent untraces us right after return.
502 */
503 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
504 current->ptrace = PT_PTRACED;
505 ptrace_link(current, current->real_parent);
506 }
507 }
508 write_unlock_irq(&tasklist_lock);
509
510 return ret;
511}
512
513/*
514 * Called with irqs disabled, returns true if childs should reap themselves.
515 */
516static int ignoring_children(struct sighand_struct *sigh)
517{
518 int ret;
519 spin_lock(&sigh->siglock);
520 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
521 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
522 spin_unlock(&sigh->siglock);
523 return ret;
524}
525
526/*
527 * Called with tasklist_lock held for writing.
528 * Unlink a traced task, and clean it up if it was a traced zombie.
529 * Return true if it needs to be reaped with release_task().
530 * (We can't call release_task() here because we already hold tasklist_lock.)
531 *
532 * If it's a zombie, our attachedness prevented normal parent notification
533 * or self-reaping. Do notification now if it would have happened earlier.
534 * If it should reap itself, return true.
535 *
536 * If it's our own child, there is no notification to do. But if our normal
537 * children self-reap, then this child was prevented by ptrace and we must
538 * reap it now, in that case we must also wake up sub-threads sleeping in
539 * do_wait().
540 */
541static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
542{
543 bool dead;
544
545 __ptrace_unlink(p);
546
547 if (p->exit_state != EXIT_ZOMBIE)
548 return false;
549
550 dead = !thread_group_leader(p);
551
552 if (!dead && thread_group_empty(p)) {
553 if (!same_thread_group(p->real_parent, tracer))
554 dead = do_notify_parent(p, p->exit_signal);
555 else if (ignoring_children(tracer->sighand)) {
556 __wake_up_parent(p, tracer);
557 dead = true;
558 }
559 }
560 /* Mark it as in the process of being reaped. */
561 if (dead)
562 p->exit_state = EXIT_DEAD;
563 return dead;
564}
565
566static int ptrace_detach(struct task_struct *child, unsigned int data)
567{
568 if (!valid_signal(data))
569 return -EIO;
570
571 /* Architecture-specific hardware disable .. */
572 ptrace_disable(child);
573
574 write_lock_irq(&tasklist_lock);
575 /*
576 * We rely on ptrace_freeze_traced(). It can't be killed and
577 * untraced by another thread, it can't be a zombie.
578 */
579 WARN_ON(!child->ptrace || child->exit_state);
580 /*
581 * tasklist_lock avoids the race with wait_task_stopped(), see
582 * the comment in ptrace_resume().
583 */
584 child->exit_code = data;
585 __ptrace_detach(current, child);
586 write_unlock_irq(&tasklist_lock);
587
588 proc_ptrace_connector(child, PTRACE_DETACH);
589
590 return 0;
591}
592
593/*
594 * Detach all tasks we were using ptrace on. Called with tasklist held
595 * for writing.
596 */
597void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
598{
599 struct task_struct *p, *n;
600
601 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
602 if (unlikely(p->ptrace & PT_EXITKILL))
603 send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
604
605 if (__ptrace_detach(tracer, p))
606 list_add(&p->ptrace_entry, dead);
607 }
608}
609
610int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
611{
612 int copied = 0;
613
614 while (len > 0) {
615 char buf[128];
616 int this_len, retval;
617
618 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
619 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
620
621 if (!retval) {
622 if (copied)
623 break;
624 return -EIO;
625 }
626 if (copy_to_user(dst, buf, retval))
627 return -EFAULT;
628 copied += retval;
629 src += retval;
630 dst += retval;
631 len -= retval;
632 }
633 return copied;
634}
635
636int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
637{
638 int copied = 0;
639
640 while (len > 0) {
641 char buf[128];
642 int this_len, retval;
643
644 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
645 if (copy_from_user(buf, src, this_len))
646 return -EFAULT;
647 retval = ptrace_access_vm(tsk, dst, buf, this_len,
648 FOLL_FORCE | FOLL_WRITE);
649 if (!retval) {
650 if (copied)
651 break;
652 return -EIO;
653 }
654 copied += retval;
655 src += retval;
656 dst += retval;
657 len -= retval;
658 }
659 return copied;
660}
661
662static int ptrace_setoptions(struct task_struct *child, unsigned long data)
663{
664 unsigned flags;
665 int ret;
666
667 ret = check_ptrace_options(data);
668 if (ret)
669 return ret;
670
671 /* Avoid intermediate state when all opts are cleared */
672 flags = child->ptrace;
673 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
674 flags |= (data << PT_OPT_FLAG_SHIFT);
675 child->ptrace = flags;
676
677 return 0;
678}
679
680static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
681{
682 unsigned long flags;
683 int error = -ESRCH;
684
685 if (lock_task_sighand(child, &flags)) {
686 error = -EINVAL;
687 if (likely(child->last_siginfo != NULL)) {
688 copy_siginfo(info, child->last_siginfo);
689 error = 0;
690 }
691 unlock_task_sighand(child, &flags);
692 }
693 return error;
694}
695
696static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
697{
698 unsigned long flags;
699 int error = -ESRCH;
700
701 if (lock_task_sighand(child, &flags)) {
702 error = -EINVAL;
703 if (likely(child->last_siginfo != NULL)) {
704 copy_siginfo(child->last_siginfo, info);
705 error = 0;
706 }
707 unlock_task_sighand(child, &flags);
708 }
709 return error;
710}
711
712static int ptrace_peek_siginfo(struct task_struct *child,
713 unsigned long addr,
714 unsigned long data)
715{
716 struct ptrace_peeksiginfo_args arg;
717 struct sigpending *pending;
718 struct sigqueue *q;
719 int ret, i;
720
721 ret = copy_from_user(&arg, (void __user *) addr,
722 sizeof(struct ptrace_peeksiginfo_args));
723 if (ret)
724 return -EFAULT;
725
726 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
727 return -EINVAL; /* unknown flags */
728
729 if (arg.nr < 0)
730 return -EINVAL;
731
732 /* Ensure arg.off fits in an unsigned long */
733 if (arg.off > ULONG_MAX)
734 return 0;
735
736 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
737 pending = &child->signal->shared_pending;
738 else
739 pending = &child->pending;
740
741 for (i = 0; i < arg.nr; ) {
742 kernel_siginfo_t info;
743 unsigned long off = arg.off + i;
744 bool found = false;
745
746 spin_lock_irq(&child->sighand->siglock);
747 list_for_each_entry(q, &pending->list, list) {
748 if (!off--) {
749 found = true;
750 copy_siginfo(&info, &q->info);
751 break;
752 }
753 }
754 spin_unlock_irq(&child->sighand->siglock);
755
756 if (!found) /* beyond the end of the list */
757 break;
758
759#ifdef CONFIG_COMPAT
760 if (unlikely(in_compat_syscall())) {
761 compat_siginfo_t __user *uinfo = compat_ptr(data);
762
763 if (copy_siginfo_to_user32(uinfo, &info)) {
764 ret = -EFAULT;
765 break;
766 }
767
768 } else
769#endif
770 {
771 siginfo_t __user *uinfo = (siginfo_t __user *) data;
772
773 if (copy_siginfo_to_user(uinfo, &info)) {
774 ret = -EFAULT;
775 break;
776 }
777 }
778
779 data += sizeof(siginfo_t);
780 i++;
781
782 if (signal_pending(current))
783 break;
784
785 cond_resched();
786 }
787
788 if (i > 0)
789 return i;
790
791 return ret;
792}
793
794#ifdef CONFIG_RSEQ
795static long ptrace_get_rseq_configuration(struct task_struct *task,
796 unsigned long size, void __user *data)
797{
798 struct ptrace_rseq_configuration conf = {
799 .rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
800 .rseq_abi_size = task->rseq_len,
801 .signature = task->rseq_sig,
802 .flags = 0,
803 };
804
805 size = min_t(unsigned long, size, sizeof(conf));
806 if (copy_to_user(data, &conf, size))
807 return -EFAULT;
808 return sizeof(conf);
809}
810#endif
811
812#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
813
814#ifdef PTRACE_SINGLEBLOCK
815#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
816#else
817#define is_singleblock(request) 0
818#endif
819
820#ifdef PTRACE_SYSEMU
821#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
822#else
823#define is_sysemu_singlestep(request) 0
824#endif
825
826static int ptrace_resume(struct task_struct *child, long request,
827 unsigned long data)
828{
829 if (!valid_signal(data))
830 return -EIO;
831
832 if (request == PTRACE_SYSCALL)
833 set_task_syscall_work(child, SYSCALL_TRACE);
834 else
835 clear_task_syscall_work(child, SYSCALL_TRACE);
836
837#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
838 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
839 set_task_syscall_work(child, SYSCALL_EMU);
840 else
841 clear_task_syscall_work(child, SYSCALL_EMU);
842#endif
843
844 if (is_singleblock(request)) {
845 if (unlikely(!arch_has_block_step()))
846 return -EIO;
847 user_enable_block_step(child);
848 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
849 if (unlikely(!arch_has_single_step()))
850 return -EIO;
851 user_enable_single_step(child);
852 } else {
853 user_disable_single_step(child);
854 }
855
856 /*
857 * Change ->exit_code and ->state under siglock to avoid the race
858 * with wait_task_stopped() in between; a non-zero ->exit_code will
859 * wrongly look like another report from tracee.
860 *
861 * Note that we need siglock even if ->exit_code == data and/or this
862 * status was not reported yet, the new status must not be cleared by
863 * wait_task_stopped() after resume.
864 */
865 spin_lock_irq(&child->sighand->siglock);
866 child->exit_code = data;
867 child->jobctl &= ~JOBCTL_TRACED;
868 wake_up_state(child, __TASK_TRACED);
869 spin_unlock_irq(&child->sighand->siglock);
870
871 return 0;
872}
873
874#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
875
876static const struct user_regset *
877find_regset(const struct user_regset_view *view, unsigned int type)
878{
879 const struct user_regset *regset;
880 int n;
881
882 for (n = 0; n < view->n; ++n) {
883 regset = view->regsets + n;
884 if (regset->core_note_type == type)
885 return regset;
886 }
887
888 return NULL;
889}
890
891static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
892 struct iovec *kiov)
893{
894 const struct user_regset_view *view = task_user_regset_view(task);
895 const struct user_regset *regset = find_regset(view, type);
896 int regset_no;
897
898 if (!regset || (kiov->iov_len % regset->size) != 0)
899 return -EINVAL;
900
901 regset_no = regset - view->regsets;
902 kiov->iov_len = min(kiov->iov_len,
903 (__kernel_size_t) (regset->n * regset->size));
904
905 if (req == PTRACE_GETREGSET)
906 return copy_regset_to_user(task, view, regset_no, 0,
907 kiov->iov_len, kiov->iov_base);
908 else
909 return copy_regset_from_user(task, view, regset_no, 0,
910 kiov->iov_len, kiov->iov_base);
911}
912
913/*
914 * This is declared in linux/regset.h and defined in machine-dependent
915 * code. We put the export here, near the primary machine-neutral use,
916 * to ensure no machine forgets it.
917 */
918EXPORT_SYMBOL_GPL(task_user_regset_view);
919
920static unsigned long
921ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
922 struct ptrace_syscall_info *info)
923{
924 unsigned long args[ARRAY_SIZE(info->entry.args)];
925 int i;
926
927 info->op = PTRACE_SYSCALL_INFO_ENTRY;
928 info->entry.nr = syscall_get_nr(child, regs);
929 syscall_get_arguments(child, regs, args);
930 for (i = 0; i < ARRAY_SIZE(args); i++)
931 info->entry.args[i] = args[i];
932
933 /* args is the last field in struct ptrace_syscall_info.entry */
934 return offsetofend(struct ptrace_syscall_info, entry.args);
935}
936
937static unsigned long
938ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
939 struct ptrace_syscall_info *info)
940{
941 /*
942 * As struct ptrace_syscall_info.entry is currently a subset
943 * of struct ptrace_syscall_info.seccomp, it makes sense to
944 * initialize that subset using ptrace_get_syscall_info_entry().
945 * This can be reconsidered in the future if these structures
946 * diverge significantly enough.
947 */
948 ptrace_get_syscall_info_entry(child, regs, info);
949 info->op = PTRACE_SYSCALL_INFO_SECCOMP;
950 info->seccomp.ret_data = child->ptrace_message;
951
952 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
953 return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
954}
955
956static unsigned long
957ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
958 struct ptrace_syscall_info *info)
959{
960 info->op = PTRACE_SYSCALL_INFO_EXIT;
961 info->exit.rval = syscall_get_error(child, regs);
962 info->exit.is_error = !!info->exit.rval;
963 if (!info->exit.is_error)
964 info->exit.rval = syscall_get_return_value(child, regs);
965
966 /* is_error is the last field in struct ptrace_syscall_info.exit */
967 return offsetofend(struct ptrace_syscall_info, exit.is_error);
968}
969
970static int
971ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
972 void __user *datavp)
973{
974 struct pt_regs *regs = task_pt_regs(child);
975 struct ptrace_syscall_info info = {
976 .op = PTRACE_SYSCALL_INFO_NONE,
977 .arch = syscall_get_arch(child),
978 .instruction_pointer = instruction_pointer(regs),
979 .stack_pointer = user_stack_pointer(regs),
980 };
981 unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
982 unsigned long write_size;
983
984 /*
985 * This does not need lock_task_sighand() to access
986 * child->last_siginfo because ptrace_freeze_traced()
987 * called earlier by ptrace_check_attach() ensures that
988 * the tracee cannot go away and clear its last_siginfo.
989 */
990 switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
991 case SIGTRAP | 0x80:
992 switch (child->ptrace_message) {
993 case PTRACE_EVENTMSG_SYSCALL_ENTRY:
994 actual_size = ptrace_get_syscall_info_entry(child, regs,
995 &info);
996 break;
997 case PTRACE_EVENTMSG_SYSCALL_EXIT:
998 actual_size = ptrace_get_syscall_info_exit(child, regs,
999 &info);
1000 break;
1001 }
1002 break;
1003 case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
1004 actual_size = ptrace_get_syscall_info_seccomp(child, regs,
1005 &info);
1006 break;
1007 }
1008
1009 write_size = min(actual_size, user_size);
1010 return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
1011}
1012#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1013
1014int ptrace_request(struct task_struct *child, long request,
1015 unsigned long addr, unsigned long data)
1016{
1017 bool seized = child->ptrace & PT_SEIZED;
1018 int ret = -EIO;
1019 kernel_siginfo_t siginfo, *si;
1020 void __user *datavp = (void __user *) data;
1021 unsigned long __user *datalp = datavp;
1022 unsigned long flags;
1023
1024 switch (request) {
1025 case PTRACE_PEEKTEXT:
1026 case PTRACE_PEEKDATA:
1027 return generic_ptrace_peekdata(child, addr, data);
1028 case PTRACE_POKETEXT:
1029 case PTRACE_POKEDATA:
1030 return generic_ptrace_pokedata(child, addr, data);
1031
1032#ifdef PTRACE_OLDSETOPTIONS
1033 case PTRACE_OLDSETOPTIONS:
1034#endif
1035 case PTRACE_SETOPTIONS:
1036 ret = ptrace_setoptions(child, data);
1037 break;
1038 case PTRACE_GETEVENTMSG:
1039 ret = put_user(child->ptrace_message, datalp);
1040 break;
1041
1042 case PTRACE_PEEKSIGINFO:
1043 ret = ptrace_peek_siginfo(child, addr, data);
1044 break;
1045
1046 case PTRACE_GETSIGINFO:
1047 ret = ptrace_getsiginfo(child, &siginfo);
1048 if (!ret)
1049 ret = copy_siginfo_to_user(datavp, &siginfo);
1050 break;
1051
1052 case PTRACE_SETSIGINFO:
1053 ret = copy_siginfo_from_user(&siginfo, datavp);
1054 if (!ret)
1055 ret = ptrace_setsiginfo(child, &siginfo);
1056 break;
1057
1058 case PTRACE_GETSIGMASK: {
1059 sigset_t *mask;
1060
1061 if (addr != sizeof(sigset_t)) {
1062 ret = -EINVAL;
1063 break;
1064 }
1065
1066 if (test_tsk_restore_sigmask(child))
1067 mask = &child->saved_sigmask;
1068 else
1069 mask = &child->blocked;
1070
1071 if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1072 ret = -EFAULT;
1073 else
1074 ret = 0;
1075
1076 break;
1077 }
1078
1079 case PTRACE_SETSIGMASK: {
1080 sigset_t new_set;
1081
1082 if (addr != sizeof(sigset_t)) {
1083 ret = -EINVAL;
1084 break;
1085 }
1086
1087 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1088 ret = -EFAULT;
1089 break;
1090 }
1091
1092 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1093
1094 /*
1095 * Every thread does recalc_sigpending() after resume, so
1096 * retarget_shared_pending() and recalc_sigpending() are not
1097 * called here.
1098 */
1099 spin_lock_irq(&child->sighand->siglock);
1100 child->blocked = new_set;
1101 spin_unlock_irq(&child->sighand->siglock);
1102
1103 clear_tsk_restore_sigmask(child);
1104
1105 ret = 0;
1106 break;
1107 }
1108
1109 case PTRACE_INTERRUPT:
1110 /*
1111 * Stop tracee without any side-effect on signal or job
1112 * control. At least one trap is guaranteed to happen
1113 * after this request. If @child is already trapped, the
1114 * current trap is not disturbed and another trap will
1115 * happen after the current trap is ended with PTRACE_CONT.
1116 *
1117 * The actual trap might not be PTRACE_EVENT_STOP trap but
1118 * the pending condition is cleared regardless.
1119 */
1120 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1121 break;
1122
1123 /*
1124 * INTERRUPT doesn't disturb existing trap sans one
1125 * exception. If ptracer issued LISTEN for the current
1126 * STOP, this INTERRUPT should clear LISTEN and re-trap
1127 * tracee into STOP.
1128 */
1129 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1130 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1131
1132 unlock_task_sighand(child, &flags);
1133 ret = 0;
1134 break;
1135
1136 case PTRACE_LISTEN:
1137 /*
1138 * Listen for events. Tracee must be in STOP. It's not
1139 * resumed per-se but is not considered to be in TRACED by
1140 * wait(2) or ptrace(2). If an async event (e.g. group
1141 * stop state change) happens, tracee will enter STOP trap
1142 * again. Alternatively, ptracer can issue INTERRUPT to
1143 * finish listening and re-trap tracee into STOP.
1144 */
1145 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1146 break;
1147
1148 si = child->last_siginfo;
1149 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1150 child->jobctl |= JOBCTL_LISTENING;
1151 /*
1152 * If NOTIFY is set, it means event happened between
1153 * start of this trap and now. Trigger re-trap.
1154 */
1155 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1156 ptrace_signal_wake_up(child, true);
1157 ret = 0;
1158 }
1159 unlock_task_sighand(child, &flags);
1160 break;
1161
1162 case PTRACE_DETACH: /* detach a process that was attached. */
1163 ret = ptrace_detach(child, data);
1164 break;
1165
1166#ifdef CONFIG_BINFMT_ELF_FDPIC
1167 case PTRACE_GETFDPIC: {
1168 struct mm_struct *mm = get_task_mm(child);
1169 unsigned long tmp = 0;
1170
1171 ret = -ESRCH;
1172 if (!mm)
1173 break;
1174
1175 switch (addr) {
1176 case PTRACE_GETFDPIC_EXEC:
1177 tmp = mm->context.exec_fdpic_loadmap;
1178 break;
1179 case PTRACE_GETFDPIC_INTERP:
1180 tmp = mm->context.interp_fdpic_loadmap;
1181 break;
1182 default:
1183 break;
1184 }
1185 mmput(mm);
1186
1187 ret = put_user(tmp, datalp);
1188 break;
1189 }
1190#endif
1191
1192 case PTRACE_SINGLESTEP:
1193#ifdef PTRACE_SINGLEBLOCK
1194 case PTRACE_SINGLEBLOCK:
1195#endif
1196#ifdef PTRACE_SYSEMU
1197 case PTRACE_SYSEMU:
1198 case PTRACE_SYSEMU_SINGLESTEP:
1199#endif
1200 case PTRACE_SYSCALL:
1201 case PTRACE_CONT:
1202 return ptrace_resume(child, request, data);
1203
1204 case PTRACE_KILL:
1205 send_sig_info(SIGKILL, SEND_SIG_NOINFO, child);
1206 return 0;
1207
1208#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1209 case PTRACE_GETREGSET:
1210 case PTRACE_SETREGSET: {
1211 struct iovec kiov;
1212 struct iovec __user *uiov = datavp;
1213
1214 if (!access_ok(uiov, sizeof(*uiov)))
1215 return -EFAULT;
1216
1217 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1218 __get_user(kiov.iov_len, &uiov->iov_len))
1219 return -EFAULT;
1220
1221 ret = ptrace_regset(child, request, addr, &kiov);
1222 if (!ret)
1223 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1224 break;
1225 }
1226
1227 case PTRACE_GET_SYSCALL_INFO:
1228 ret = ptrace_get_syscall_info(child, addr, datavp);
1229 break;
1230#endif
1231
1232 case PTRACE_SECCOMP_GET_FILTER:
1233 ret = seccomp_get_filter(child, addr, datavp);
1234 break;
1235
1236 case PTRACE_SECCOMP_GET_METADATA:
1237 ret = seccomp_get_metadata(child, addr, datavp);
1238 break;
1239
1240#ifdef CONFIG_RSEQ
1241 case PTRACE_GET_RSEQ_CONFIGURATION:
1242 ret = ptrace_get_rseq_configuration(child, addr, datavp);
1243 break;
1244#endif
1245
1246 case PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG:
1247 ret = syscall_user_dispatch_set_config(child, addr, datavp);
1248 break;
1249
1250 case PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG:
1251 ret = syscall_user_dispatch_get_config(child, addr, datavp);
1252 break;
1253
1254 default:
1255 break;
1256 }
1257
1258 return ret;
1259}
1260
1261SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1262 unsigned long, data)
1263{
1264 struct task_struct *child;
1265 long ret;
1266
1267 if (request == PTRACE_TRACEME) {
1268 ret = ptrace_traceme();
1269 goto out;
1270 }
1271
1272 child = find_get_task_by_vpid(pid);
1273 if (!child) {
1274 ret = -ESRCH;
1275 goto out;
1276 }
1277
1278 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1279 ret = ptrace_attach(child, request, addr, data);
1280 goto out_put_task_struct;
1281 }
1282
1283 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1284 request == PTRACE_INTERRUPT);
1285 if (ret < 0)
1286 goto out_put_task_struct;
1287
1288 ret = arch_ptrace(child, request, addr, data);
1289 if (ret || request != PTRACE_DETACH)
1290 ptrace_unfreeze_traced(child);
1291
1292 out_put_task_struct:
1293 put_task_struct(child);
1294 out:
1295 return ret;
1296}
1297
1298int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1299 unsigned long data)
1300{
1301 unsigned long tmp;
1302 int copied;
1303
1304 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1305 if (copied != sizeof(tmp))
1306 return -EIO;
1307 return put_user(tmp, (unsigned long __user *)data);
1308}
1309
1310int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1311 unsigned long data)
1312{
1313 int copied;
1314
1315 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1316 FOLL_FORCE | FOLL_WRITE);
1317 return (copied == sizeof(data)) ? 0 : -EIO;
1318}
1319
1320#if defined CONFIG_COMPAT
1321
1322int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1323 compat_ulong_t addr, compat_ulong_t data)
1324{
1325 compat_ulong_t __user *datap = compat_ptr(data);
1326 compat_ulong_t word;
1327 kernel_siginfo_t siginfo;
1328 int ret;
1329
1330 switch (request) {
1331 case PTRACE_PEEKTEXT:
1332 case PTRACE_PEEKDATA:
1333 ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1334 FOLL_FORCE);
1335 if (ret != sizeof(word))
1336 ret = -EIO;
1337 else
1338 ret = put_user(word, datap);
1339 break;
1340
1341 case PTRACE_POKETEXT:
1342 case PTRACE_POKEDATA:
1343 ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1344 FOLL_FORCE | FOLL_WRITE);
1345 ret = (ret != sizeof(data) ? -EIO : 0);
1346 break;
1347
1348 case PTRACE_GETEVENTMSG:
1349 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1350 break;
1351
1352 case PTRACE_GETSIGINFO:
1353 ret = ptrace_getsiginfo(child, &siginfo);
1354 if (!ret)
1355 ret = copy_siginfo_to_user32(
1356 (struct compat_siginfo __user *) datap,
1357 &siginfo);
1358 break;
1359
1360 case PTRACE_SETSIGINFO:
1361 ret = copy_siginfo_from_user32(
1362 &siginfo, (struct compat_siginfo __user *) datap);
1363 if (!ret)
1364 ret = ptrace_setsiginfo(child, &siginfo);
1365 break;
1366#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1367 case PTRACE_GETREGSET:
1368 case PTRACE_SETREGSET:
1369 {
1370 struct iovec kiov;
1371 struct compat_iovec __user *uiov =
1372 (struct compat_iovec __user *) datap;
1373 compat_uptr_t ptr;
1374 compat_size_t len;
1375
1376 if (!access_ok(uiov, sizeof(*uiov)))
1377 return -EFAULT;
1378
1379 if (__get_user(ptr, &uiov->iov_base) ||
1380 __get_user(len, &uiov->iov_len))
1381 return -EFAULT;
1382
1383 kiov.iov_base = compat_ptr(ptr);
1384 kiov.iov_len = len;
1385
1386 ret = ptrace_regset(child, request, addr, &kiov);
1387 if (!ret)
1388 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1389 break;
1390 }
1391#endif
1392
1393 default:
1394 ret = ptrace_request(child, request, addr, data);
1395 }
1396
1397 return ret;
1398}
1399
1400COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1401 compat_long_t, addr, compat_long_t, data)
1402{
1403 struct task_struct *child;
1404 long ret;
1405
1406 if (request == PTRACE_TRACEME) {
1407 ret = ptrace_traceme();
1408 goto out;
1409 }
1410
1411 child = find_get_task_by_vpid(pid);
1412 if (!child) {
1413 ret = -ESRCH;
1414 goto out;
1415 }
1416
1417 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1418 ret = ptrace_attach(child, request, addr, data);
1419 goto out_put_task_struct;
1420 }
1421
1422 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1423 request == PTRACE_INTERRUPT);
1424 if (!ret) {
1425 ret = compat_arch_ptrace(child, request, addr, data);
1426 if (ret || request != PTRACE_DETACH)
1427 ptrace_unfreeze_traced(child);
1428 }
1429
1430 out_put_task_struct:
1431 put_task_struct(child);
1432 out:
1433 return ret;
1434}
1435#endif /* CONFIG_COMPAT */
1/*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
10#include <linux/capability.h>
11#include <linux/export.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <linux/pagemap.h>
17#include <linux/ptrace.h>
18#include <linux/security.h>
19#include <linux/signal.h>
20#include <linux/uio.h>
21#include <linux/audit.h>
22#include <linux/pid_namespace.h>
23#include <linux/syscalls.h>
24#include <linux/uaccess.h>
25#include <linux/regset.h>
26#include <linux/hw_breakpoint.h>
27#include <linux/cn_proc.h>
28#include <linux/compat.h>
29
30
31/*
32 * ptrace a task: make the debugger its new parent and
33 * move it to the ptrace list.
34 *
35 * Must be called with the tasklist lock write-held.
36 */
37void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
38{
39 BUG_ON(!list_empty(&child->ptrace_entry));
40 list_add(&child->ptrace_entry, &new_parent->ptraced);
41 child->parent = new_parent;
42}
43
44/**
45 * __ptrace_unlink - unlink ptracee and restore its execution state
46 * @child: ptracee to be unlinked
47 *
48 * Remove @child from the ptrace list, move it back to the original parent,
49 * and restore the execution state so that it conforms to the group stop
50 * state.
51 *
52 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
53 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
54 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
55 * If the ptracer is exiting, the ptracee can be in any state.
56 *
57 * After detach, the ptracee should be in a state which conforms to the
58 * group stop. If the group is stopped or in the process of stopping, the
59 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
60 * up from TASK_TRACED.
61 *
62 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
63 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
64 * to but in the opposite direction of what happens while attaching to a
65 * stopped task. However, in this direction, the intermediate RUNNING
66 * state is not hidden even from the current ptracer and if it immediately
67 * re-attaches and performs a WNOHANG wait(2), it may fail.
68 *
69 * CONTEXT:
70 * write_lock_irq(tasklist_lock)
71 */
72void __ptrace_unlink(struct task_struct *child)
73{
74 BUG_ON(!child->ptrace);
75
76 child->parent = child->real_parent;
77 list_del_init(&child->ptrace_entry);
78
79 spin_lock(&child->sighand->siglock);
80 child->ptrace = 0;
81 /*
82 * Clear all pending traps and TRAPPING. TRAPPING should be
83 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
84 */
85 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
86 task_clear_jobctl_trapping(child);
87
88 /*
89 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
90 * @child isn't dead.
91 */
92 if (!(child->flags & PF_EXITING) &&
93 (child->signal->flags & SIGNAL_STOP_STOPPED ||
94 child->signal->group_stop_count)) {
95 child->jobctl |= JOBCTL_STOP_PENDING;
96
97 /*
98 * This is only possible if this thread was cloned by the
99 * traced task running in the stopped group, set the signal
100 * for the future reports.
101 * FIXME: we should change ptrace_init_task() to handle this
102 * case.
103 */
104 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
105 child->jobctl |= SIGSTOP;
106 }
107
108 /*
109 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
110 * @child in the butt. Note that @resume should be used iff @child
111 * is in TASK_TRACED; otherwise, we might unduly disrupt
112 * TASK_KILLABLE sleeps.
113 */
114 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
115 ptrace_signal_wake_up(child, true);
116
117 spin_unlock(&child->sighand->siglock);
118}
119
120/* Ensure that nothing can wake it up, even SIGKILL */
121static bool ptrace_freeze_traced(struct task_struct *task)
122{
123 bool ret = false;
124
125 /* Lockless, nobody but us can set this flag */
126 if (task->jobctl & JOBCTL_LISTENING)
127 return ret;
128
129 spin_lock_irq(&task->sighand->siglock);
130 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
131 task->state = __TASK_TRACED;
132 ret = true;
133 }
134 spin_unlock_irq(&task->sighand->siglock);
135
136 return ret;
137}
138
139static void ptrace_unfreeze_traced(struct task_struct *task)
140{
141 if (task->state != __TASK_TRACED)
142 return;
143
144 WARN_ON(!task->ptrace || task->parent != current);
145
146 spin_lock_irq(&task->sighand->siglock);
147 if (__fatal_signal_pending(task))
148 wake_up_state(task, __TASK_TRACED);
149 else
150 task->state = TASK_TRACED;
151 spin_unlock_irq(&task->sighand->siglock);
152}
153
154/**
155 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
156 * @child: ptracee to check for
157 * @ignore_state: don't check whether @child is currently %TASK_TRACED
158 *
159 * Check whether @child is being ptraced by %current and ready for further
160 * ptrace operations. If @ignore_state is %false, @child also should be in
161 * %TASK_TRACED state and on return the child is guaranteed to be traced
162 * and not executing. If @ignore_state is %true, @child can be in any
163 * state.
164 *
165 * CONTEXT:
166 * Grabs and releases tasklist_lock and @child->sighand->siglock.
167 *
168 * RETURNS:
169 * 0 on success, -ESRCH if %child is not ready.
170 */
171static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
172{
173 int ret = -ESRCH;
174
175 /*
176 * We take the read lock around doing both checks to close a
177 * possible race where someone else was tracing our child and
178 * detached between these two checks. After this locked check,
179 * we are sure that this is our traced child and that can only
180 * be changed by us so it's not changing right after this.
181 */
182 read_lock(&tasklist_lock);
183 if (child->ptrace && child->parent == current) {
184 WARN_ON(child->state == __TASK_TRACED);
185 /*
186 * child->sighand can't be NULL, release_task()
187 * does ptrace_unlink() before __exit_signal().
188 */
189 if (ignore_state || ptrace_freeze_traced(child))
190 ret = 0;
191 }
192 read_unlock(&tasklist_lock);
193
194 if (!ret && !ignore_state) {
195 if (!wait_task_inactive(child, __TASK_TRACED)) {
196 /*
197 * This can only happen if may_ptrace_stop() fails and
198 * ptrace_stop() changes ->state back to TASK_RUNNING,
199 * so we should not worry about leaking __TASK_TRACED.
200 */
201 WARN_ON(child->state == __TASK_TRACED);
202 ret = -ESRCH;
203 }
204 }
205
206 return ret;
207}
208
209static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
210{
211 if (mode & PTRACE_MODE_NOAUDIT)
212 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
213 else
214 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
215}
216
217/* Returns 0 on success, -errno on denial. */
218static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
219{
220 const struct cred *cred = current_cred(), *tcred;
221 int dumpable = 0;
222 kuid_t caller_uid;
223 kgid_t caller_gid;
224
225 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
226 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
227 return -EPERM;
228 }
229
230 /* May we inspect the given task?
231 * This check is used both for attaching with ptrace
232 * and for allowing access to sensitive information in /proc.
233 *
234 * ptrace_attach denies several cases that /proc allows
235 * because setting up the necessary parent/child relationship
236 * or halting the specified task is impossible.
237 */
238
239 /* Don't let security modules deny introspection */
240 if (same_thread_group(task, current))
241 return 0;
242 rcu_read_lock();
243 if (mode & PTRACE_MODE_FSCREDS) {
244 caller_uid = cred->fsuid;
245 caller_gid = cred->fsgid;
246 } else {
247 /*
248 * Using the euid would make more sense here, but something
249 * in userland might rely on the old behavior, and this
250 * shouldn't be a security problem since
251 * PTRACE_MODE_REALCREDS implies that the caller explicitly
252 * used a syscall that requests access to another process
253 * (and not a filesystem syscall to procfs).
254 */
255 caller_uid = cred->uid;
256 caller_gid = cred->gid;
257 }
258 tcred = __task_cred(task);
259 if (uid_eq(caller_uid, tcred->euid) &&
260 uid_eq(caller_uid, tcred->suid) &&
261 uid_eq(caller_uid, tcred->uid) &&
262 gid_eq(caller_gid, tcred->egid) &&
263 gid_eq(caller_gid, tcred->sgid) &&
264 gid_eq(caller_gid, tcred->gid))
265 goto ok;
266 if (ptrace_has_cap(tcred->user_ns, mode))
267 goto ok;
268 rcu_read_unlock();
269 return -EPERM;
270ok:
271 rcu_read_unlock();
272 smp_rmb();
273 if (task->mm)
274 dumpable = get_dumpable(task->mm);
275 rcu_read_lock();
276 if (dumpable != SUID_DUMP_USER &&
277 !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
278 rcu_read_unlock();
279 return -EPERM;
280 }
281 rcu_read_unlock();
282
283 return security_ptrace_access_check(task, mode);
284}
285
286bool ptrace_may_access(struct task_struct *task, unsigned int mode)
287{
288 int err;
289 task_lock(task);
290 err = __ptrace_may_access(task, mode);
291 task_unlock(task);
292 return !err;
293}
294
295static int ptrace_attach(struct task_struct *task, long request,
296 unsigned long addr,
297 unsigned long flags)
298{
299 bool seize = (request == PTRACE_SEIZE);
300 int retval;
301
302 retval = -EIO;
303 if (seize) {
304 if (addr != 0)
305 goto out;
306 if (flags & ~(unsigned long)PTRACE_O_MASK)
307 goto out;
308 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
309 } else {
310 flags = PT_PTRACED;
311 }
312
313 audit_ptrace(task);
314
315 retval = -EPERM;
316 if (unlikely(task->flags & PF_KTHREAD))
317 goto out;
318 if (same_thread_group(task, current))
319 goto out;
320
321 /*
322 * Protect exec's credential calculations against our interference;
323 * SUID, SGID and LSM creds get determined differently
324 * under ptrace.
325 */
326 retval = -ERESTARTNOINTR;
327 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
328 goto out;
329
330 task_lock(task);
331 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
332 task_unlock(task);
333 if (retval)
334 goto unlock_creds;
335
336 write_lock_irq(&tasklist_lock);
337 retval = -EPERM;
338 if (unlikely(task->exit_state))
339 goto unlock_tasklist;
340 if (task->ptrace)
341 goto unlock_tasklist;
342
343 if (seize)
344 flags |= PT_SEIZED;
345 rcu_read_lock();
346 if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
347 flags |= PT_PTRACE_CAP;
348 rcu_read_unlock();
349 task->ptrace = flags;
350
351 __ptrace_link(task, current);
352
353 /* SEIZE doesn't trap tracee on attach */
354 if (!seize)
355 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
356
357 spin_lock(&task->sighand->siglock);
358
359 /*
360 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
361 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
362 * will be cleared if the child completes the transition or any
363 * event which clears the group stop states happens. We'll wait
364 * for the transition to complete before returning from this
365 * function.
366 *
367 * This hides STOPPED -> RUNNING -> TRACED transition from the
368 * attaching thread but a different thread in the same group can
369 * still observe the transient RUNNING state. IOW, if another
370 * thread's WNOHANG wait(2) on the stopped tracee races against
371 * ATTACH, the wait(2) may fail due to the transient RUNNING.
372 *
373 * The following task_is_stopped() test is safe as both transitions
374 * in and out of STOPPED are protected by siglock.
375 */
376 if (task_is_stopped(task) &&
377 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
378 signal_wake_up_state(task, __TASK_STOPPED);
379
380 spin_unlock(&task->sighand->siglock);
381
382 retval = 0;
383unlock_tasklist:
384 write_unlock_irq(&tasklist_lock);
385unlock_creds:
386 mutex_unlock(&task->signal->cred_guard_mutex);
387out:
388 if (!retval) {
389 /*
390 * We do not bother to change retval or clear JOBCTL_TRAPPING
391 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
392 * not return to user-mode, it will exit and clear this bit in
393 * __ptrace_unlink() if it wasn't already cleared by the tracee;
394 * and until then nobody can ptrace this task.
395 */
396 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
397 proc_ptrace_connector(task, PTRACE_ATTACH);
398 }
399
400 return retval;
401}
402
403/**
404 * ptrace_traceme -- helper for PTRACE_TRACEME
405 *
406 * Performs checks and sets PT_PTRACED.
407 * Should be used by all ptrace implementations for PTRACE_TRACEME.
408 */
409static int ptrace_traceme(void)
410{
411 int ret = -EPERM;
412
413 write_lock_irq(&tasklist_lock);
414 /* Are we already being traced? */
415 if (!current->ptrace) {
416 ret = security_ptrace_traceme(current->parent);
417 /*
418 * Check PF_EXITING to ensure ->real_parent has not passed
419 * exit_ptrace(). Otherwise we don't report the error but
420 * pretend ->real_parent untraces us right after return.
421 */
422 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
423 current->ptrace = PT_PTRACED;
424 __ptrace_link(current, current->real_parent);
425 }
426 }
427 write_unlock_irq(&tasklist_lock);
428
429 return ret;
430}
431
432/*
433 * Called with irqs disabled, returns true if childs should reap themselves.
434 */
435static int ignoring_children(struct sighand_struct *sigh)
436{
437 int ret;
438 spin_lock(&sigh->siglock);
439 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
440 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
441 spin_unlock(&sigh->siglock);
442 return ret;
443}
444
445/*
446 * Called with tasklist_lock held for writing.
447 * Unlink a traced task, and clean it up if it was a traced zombie.
448 * Return true if it needs to be reaped with release_task().
449 * (We can't call release_task() here because we already hold tasklist_lock.)
450 *
451 * If it's a zombie, our attachedness prevented normal parent notification
452 * or self-reaping. Do notification now if it would have happened earlier.
453 * If it should reap itself, return true.
454 *
455 * If it's our own child, there is no notification to do. But if our normal
456 * children self-reap, then this child was prevented by ptrace and we must
457 * reap it now, in that case we must also wake up sub-threads sleeping in
458 * do_wait().
459 */
460static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
461{
462 bool dead;
463
464 __ptrace_unlink(p);
465
466 if (p->exit_state != EXIT_ZOMBIE)
467 return false;
468
469 dead = !thread_group_leader(p);
470
471 if (!dead && thread_group_empty(p)) {
472 if (!same_thread_group(p->real_parent, tracer))
473 dead = do_notify_parent(p, p->exit_signal);
474 else if (ignoring_children(tracer->sighand)) {
475 __wake_up_parent(p, tracer);
476 dead = true;
477 }
478 }
479 /* Mark it as in the process of being reaped. */
480 if (dead)
481 p->exit_state = EXIT_DEAD;
482 return dead;
483}
484
485static int ptrace_detach(struct task_struct *child, unsigned int data)
486{
487 if (!valid_signal(data))
488 return -EIO;
489
490 /* Architecture-specific hardware disable .. */
491 ptrace_disable(child);
492 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
493
494 write_lock_irq(&tasklist_lock);
495 /*
496 * We rely on ptrace_freeze_traced(). It can't be killed and
497 * untraced by another thread, it can't be a zombie.
498 */
499 WARN_ON(!child->ptrace || child->exit_state);
500 /*
501 * tasklist_lock avoids the race with wait_task_stopped(), see
502 * the comment in ptrace_resume().
503 */
504 child->exit_code = data;
505 __ptrace_detach(current, child);
506 write_unlock_irq(&tasklist_lock);
507
508 proc_ptrace_connector(child, PTRACE_DETACH);
509
510 return 0;
511}
512
513/*
514 * Detach all tasks we were using ptrace on. Called with tasklist held
515 * for writing.
516 */
517void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
518{
519 struct task_struct *p, *n;
520
521 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
522 if (unlikely(p->ptrace & PT_EXITKILL))
523 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
524
525 if (__ptrace_detach(tracer, p))
526 list_add(&p->ptrace_entry, dead);
527 }
528}
529
530int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
531{
532 int copied = 0;
533
534 while (len > 0) {
535 char buf[128];
536 int this_len, retval;
537
538 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
539 retval = access_process_vm(tsk, src, buf, this_len, 0);
540 if (!retval) {
541 if (copied)
542 break;
543 return -EIO;
544 }
545 if (copy_to_user(dst, buf, retval))
546 return -EFAULT;
547 copied += retval;
548 src += retval;
549 dst += retval;
550 len -= retval;
551 }
552 return copied;
553}
554
555int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
556{
557 int copied = 0;
558
559 while (len > 0) {
560 char buf[128];
561 int this_len, retval;
562
563 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
564 if (copy_from_user(buf, src, this_len))
565 return -EFAULT;
566 retval = access_process_vm(tsk, dst, buf, this_len, 1);
567 if (!retval) {
568 if (copied)
569 break;
570 return -EIO;
571 }
572 copied += retval;
573 src += retval;
574 dst += retval;
575 len -= retval;
576 }
577 return copied;
578}
579
580static int ptrace_setoptions(struct task_struct *child, unsigned long data)
581{
582 unsigned flags;
583
584 if (data & ~(unsigned long)PTRACE_O_MASK)
585 return -EINVAL;
586
587 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
588 if (!config_enabled(CONFIG_CHECKPOINT_RESTORE) ||
589 !config_enabled(CONFIG_SECCOMP))
590 return -EINVAL;
591
592 if (!capable(CAP_SYS_ADMIN))
593 return -EPERM;
594
595 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
596 current->ptrace & PT_SUSPEND_SECCOMP)
597 return -EPERM;
598 }
599
600 /* Avoid intermediate state when all opts are cleared */
601 flags = child->ptrace;
602 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
603 flags |= (data << PT_OPT_FLAG_SHIFT);
604 child->ptrace = flags;
605
606 return 0;
607}
608
609static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
610{
611 unsigned long flags;
612 int error = -ESRCH;
613
614 if (lock_task_sighand(child, &flags)) {
615 error = -EINVAL;
616 if (likely(child->last_siginfo != NULL)) {
617 *info = *child->last_siginfo;
618 error = 0;
619 }
620 unlock_task_sighand(child, &flags);
621 }
622 return error;
623}
624
625static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
626{
627 unsigned long flags;
628 int error = -ESRCH;
629
630 if (lock_task_sighand(child, &flags)) {
631 error = -EINVAL;
632 if (likely(child->last_siginfo != NULL)) {
633 *child->last_siginfo = *info;
634 error = 0;
635 }
636 unlock_task_sighand(child, &flags);
637 }
638 return error;
639}
640
641static int ptrace_peek_siginfo(struct task_struct *child,
642 unsigned long addr,
643 unsigned long data)
644{
645 struct ptrace_peeksiginfo_args arg;
646 struct sigpending *pending;
647 struct sigqueue *q;
648 int ret, i;
649
650 ret = copy_from_user(&arg, (void __user *) addr,
651 sizeof(struct ptrace_peeksiginfo_args));
652 if (ret)
653 return -EFAULT;
654
655 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
656 return -EINVAL; /* unknown flags */
657
658 if (arg.nr < 0)
659 return -EINVAL;
660
661 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
662 pending = &child->signal->shared_pending;
663 else
664 pending = &child->pending;
665
666 for (i = 0; i < arg.nr; ) {
667 siginfo_t info;
668 s32 off = arg.off + i;
669
670 spin_lock_irq(&child->sighand->siglock);
671 list_for_each_entry(q, &pending->list, list) {
672 if (!off--) {
673 copy_siginfo(&info, &q->info);
674 break;
675 }
676 }
677 spin_unlock_irq(&child->sighand->siglock);
678
679 if (off >= 0) /* beyond the end of the list */
680 break;
681
682#ifdef CONFIG_COMPAT
683 if (unlikely(in_compat_syscall())) {
684 compat_siginfo_t __user *uinfo = compat_ptr(data);
685
686 if (copy_siginfo_to_user32(uinfo, &info) ||
687 __put_user(info.si_code, &uinfo->si_code)) {
688 ret = -EFAULT;
689 break;
690 }
691
692 } else
693#endif
694 {
695 siginfo_t __user *uinfo = (siginfo_t __user *) data;
696
697 if (copy_siginfo_to_user(uinfo, &info) ||
698 __put_user(info.si_code, &uinfo->si_code)) {
699 ret = -EFAULT;
700 break;
701 }
702 }
703
704 data += sizeof(siginfo_t);
705 i++;
706
707 if (signal_pending(current))
708 break;
709
710 cond_resched();
711 }
712
713 if (i > 0)
714 return i;
715
716 return ret;
717}
718
719#ifdef PTRACE_SINGLESTEP
720#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
721#else
722#define is_singlestep(request) 0
723#endif
724
725#ifdef PTRACE_SINGLEBLOCK
726#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
727#else
728#define is_singleblock(request) 0
729#endif
730
731#ifdef PTRACE_SYSEMU
732#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
733#else
734#define is_sysemu_singlestep(request) 0
735#endif
736
737static int ptrace_resume(struct task_struct *child, long request,
738 unsigned long data)
739{
740 bool need_siglock;
741
742 if (!valid_signal(data))
743 return -EIO;
744
745 if (request == PTRACE_SYSCALL)
746 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
747 else
748 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
749
750#ifdef TIF_SYSCALL_EMU
751 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
752 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
753 else
754 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
755#endif
756
757 if (is_singleblock(request)) {
758 if (unlikely(!arch_has_block_step()))
759 return -EIO;
760 user_enable_block_step(child);
761 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
762 if (unlikely(!arch_has_single_step()))
763 return -EIO;
764 user_enable_single_step(child);
765 } else {
766 user_disable_single_step(child);
767 }
768
769 /*
770 * Change ->exit_code and ->state under siglock to avoid the race
771 * with wait_task_stopped() in between; a non-zero ->exit_code will
772 * wrongly look like another report from tracee.
773 *
774 * Note that we need siglock even if ->exit_code == data and/or this
775 * status was not reported yet, the new status must not be cleared by
776 * wait_task_stopped() after resume.
777 *
778 * If data == 0 we do not care if wait_task_stopped() reports the old
779 * status and clears the code too; this can't race with the tracee, it
780 * takes siglock after resume.
781 */
782 need_siglock = data && !thread_group_empty(current);
783 if (need_siglock)
784 spin_lock_irq(&child->sighand->siglock);
785 child->exit_code = data;
786 wake_up_state(child, __TASK_TRACED);
787 if (need_siglock)
788 spin_unlock_irq(&child->sighand->siglock);
789
790 return 0;
791}
792
793#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
794
795static const struct user_regset *
796find_regset(const struct user_regset_view *view, unsigned int type)
797{
798 const struct user_regset *regset;
799 int n;
800
801 for (n = 0; n < view->n; ++n) {
802 regset = view->regsets + n;
803 if (regset->core_note_type == type)
804 return regset;
805 }
806
807 return NULL;
808}
809
810static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
811 struct iovec *kiov)
812{
813 const struct user_regset_view *view = task_user_regset_view(task);
814 const struct user_regset *regset = find_regset(view, type);
815 int regset_no;
816
817 if (!regset || (kiov->iov_len % regset->size) != 0)
818 return -EINVAL;
819
820 regset_no = regset - view->regsets;
821 kiov->iov_len = min(kiov->iov_len,
822 (__kernel_size_t) (regset->n * regset->size));
823
824 if (req == PTRACE_GETREGSET)
825 return copy_regset_to_user(task, view, regset_no, 0,
826 kiov->iov_len, kiov->iov_base);
827 else
828 return copy_regset_from_user(task, view, regset_no, 0,
829 kiov->iov_len, kiov->iov_base);
830}
831
832/*
833 * This is declared in linux/regset.h and defined in machine-dependent
834 * code. We put the export here, near the primary machine-neutral use,
835 * to ensure no machine forgets it.
836 */
837EXPORT_SYMBOL_GPL(task_user_regset_view);
838#endif
839
840int ptrace_request(struct task_struct *child, long request,
841 unsigned long addr, unsigned long data)
842{
843 bool seized = child->ptrace & PT_SEIZED;
844 int ret = -EIO;
845 siginfo_t siginfo, *si;
846 void __user *datavp = (void __user *) data;
847 unsigned long __user *datalp = datavp;
848 unsigned long flags;
849
850 switch (request) {
851 case PTRACE_PEEKTEXT:
852 case PTRACE_PEEKDATA:
853 return generic_ptrace_peekdata(child, addr, data);
854 case PTRACE_POKETEXT:
855 case PTRACE_POKEDATA:
856 return generic_ptrace_pokedata(child, addr, data);
857
858#ifdef PTRACE_OLDSETOPTIONS
859 case PTRACE_OLDSETOPTIONS:
860#endif
861 case PTRACE_SETOPTIONS:
862 ret = ptrace_setoptions(child, data);
863 break;
864 case PTRACE_GETEVENTMSG:
865 ret = put_user(child->ptrace_message, datalp);
866 break;
867
868 case PTRACE_PEEKSIGINFO:
869 ret = ptrace_peek_siginfo(child, addr, data);
870 break;
871
872 case PTRACE_GETSIGINFO:
873 ret = ptrace_getsiginfo(child, &siginfo);
874 if (!ret)
875 ret = copy_siginfo_to_user(datavp, &siginfo);
876 break;
877
878 case PTRACE_SETSIGINFO:
879 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
880 ret = -EFAULT;
881 else
882 ret = ptrace_setsiginfo(child, &siginfo);
883 break;
884
885 case PTRACE_GETSIGMASK:
886 if (addr != sizeof(sigset_t)) {
887 ret = -EINVAL;
888 break;
889 }
890
891 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
892 ret = -EFAULT;
893 else
894 ret = 0;
895
896 break;
897
898 case PTRACE_SETSIGMASK: {
899 sigset_t new_set;
900
901 if (addr != sizeof(sigset_t)) {
902 ret = -EINVAL;
903 break;
904 }
905
906 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
907 ret = -EFAULT;
908 break;
909 }
910
911 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
912
913 /*
914 * Every thread does recalc_sigpending() after resume, so
915 * retarget_shared_pending() and recalc_sigpending() are not
916 * called here.
917 */
918 spin_lock_irq(&child->sighand->siglock);
919 child->blocked = new_set;
920 spin_unlock_irq(&child->sighand->siglock);
921
922 ret = 0;
923 break;
924 }
925
926 case PTRACE_INTERRUPT:
927 /*
928 * Stop tracee without any side-effect on signal or job
929 * control. At least one trap is guaranteed to happen
930 * after this request. If @child is already trapped, the
931 * current trap is not disturbed and another trap will
932 * happen after the current trap is ended with PTRACE_CONT.
933 *
934 * The actual trap might not be PTRACE_EVENT_STOP trap but
935 * the pending condition is cleared regardless.
936 */
937 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
938 break;
939
940 /*
941 * INTERRUPT doesn't disturb existing trap sans one
942 * exception. If ptracer issued LISTEN for the current
943 * STOP, this INTERRUPT should clear LISTEN and re-trap
944 * tracee into STOP.
945 */
946 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
947 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
948
949 unlock_task_sighand(child, &flags);
950 ret = 0;
951 break;
952
953 case PTRACE_LISTEN:
954 /*
955 * Listen for events. Tracee must be in STOP. It's not
956 * resumed per-se but is not considered to be in TRACED by
957 * wait(2) or ptrace(2). If an async event (e.g. group
958 * stop state change) happens, tracee will enter STOP trap
959 * again. Alternatively, ptracer can issue INTERRUPT to
960 * finish listening and re-trap tracee into STOP.
961 */
962 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
963 break;
964
965 si = child->last_siginfo;
966 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
967 child->jobctl |= JOBCTL_LISTENING;
968 /*
969 * If NOTIFY is set, it means event happened between
970 * start of this trap and now. Trigger re-trap.
971 */
972 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
973 ptrace_signal_wake_up(child, true);
974 ret = 0;
975 }
976 unlock_task_sighand(child, &flags);
977 break;
978
979 case PTRACE_DETACH: /* detach a process that was attached. */
980 ret = ptrace_detach(child, data);
981 break;
982
983#ifdef CONFIG_BINFMT_ELF_FDPIC
984 case PTRACE_GETFDPIC: {
985 struct mm_struct *mm = get_task_mm(child);
986 unsigned long tmp = 0;
987
988 ret = -ESRCH;
989 if (!mm)
990 break;
991
992 switch (addr) {
993 case PTRACE_GETFDPIC_EXEC:
994 tmp = mm->context.exec_fdpic_loadmap;
995 break;
996 case PTRACE_GETFDPIC_INTERP:
997 tmp = mm->context.interp_fdpic_loadmap;
998 break;
999 default:
1000 break;
1001 }
1002 mmput(mm);
1003
1004 ret = put_user(tmp, datalp);
1005 break;
1006 }
1007#endif
1008
1009#ifdef PTRACE_SINGLESTEP
1010 case PTRACE_SINGLESTEP:
1011#endif
1012#ifdef PTRACE_SINGLEBLOCK
1013 case PTRACE_SINGLEBLOCK:
1014#endif
1015#ifdef PTRACE_SYSEMU
1016 case PTRACE_SYSEMU:
1017 case PTRACE_SYSEMU_SINGLESTEP:
1018#endif
1019 case PTRACE_SYSCALL:
1020 case PTRACE_CONT:
1021 return ptrace_resume(child, request, data);
1022
1023 case PTRACE_KILL:
1024 if (child->exit_state) /* already dead */
1025 return 0;
1026 return ptrace_resume(child, request, SIGKILL);
1027
1028#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1029 case PTRACE_GETREGSET:
1030 case PTRACE_SETREGSET: {
1031 struct iovec kiov;
1032 struct iovec __user *uiov = datavp;
1033
1034 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1035 return -EFAULT;
1036
1037 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1038 __get_user(kiov.iov_len, &uiov->iov_len))
1039 return -EFAULT;
1040
1041 ret = ptrace_regset(child, request, addr, &kiov);
1042 if (!ret)
1043 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1044 break;
1045 }
1046#endif
1047
1048 case PTRACE_SECCOMP_GET_FILTER:
1049 ret = seccomp_get_filter(child, addr, datavp);
1050 break;
1051
1052 default:
1053 break;
1054 }
1055
1056 return ret;
1057}
1058
1059static struct task_struct *ptrace_get_task_struct(pid_t pid)
1060{
1061 struct task_struct *child;
1062
1063 rcu_read_lock();
1064 child = find_task_by_vpid(pid);
1065 if (child)
1066 get_task_struct(child);
1067 rcu_read_unlock();
1068
1069 if (!child)
1070 return ERR_PTR(-ESRCH);
1071 return child;
1072}
1073
1074#ifndef arch_ptrace_attach
1075#define arch_ptrace_attach(child) do { } while (0)
1076#endif
1077
1078SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1079 unsigned long, data)
1080{
1081 struct task_struct *child;
1082 long ret;
1083
1084 if (request == PTRACE_TRACEME) {
1085 ret = ptrace_traceme();
1086 if (!ret)
1087 arch_ptrace_attach(current);
1088 goto out;
1089 }
1090
1091 child = ptrace_get_task_struct(pid);
1092 if (IS_ERR(child)) {
1093 ret = PTR_ERR(child);
1094 goto out;
1095 }
1096
1097 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1098 ret = ptrace_attach(child, request, addr, data);
1099 /*
1100 * Some architectures need to do book-keeping after
1101 * a ptrace attach.
1102 */
1103 if (!ret)
1104 arch_ptrace_attach(child);
1105 goto out_put_task_struct;
1106 }
1107
1108 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1109 request == PTRACE_INTERRUPT);
1110 if (ret < 0)
1111 goto out_put_task_struct;
1112
1113 ret = arch_ptrace(child, request, addr, data);
1114 if (ret || request != PTRACE_DETACH)
1115 ptrace_unfreeze_traced(child);
1116
1117 out_put_task_struct:
1118 put_task_struct(child);
1119 out:
1120 return ret;
1121}
1122
1123int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1124 unsigned long data)
1125{
1126 unsigned long tmp;
1127 int copied;
1128
1129 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
1130 if (copied != sizeof(tmp))
1131 return -EIO;
1132 return put_user(tmp, (unsigned long __user *)data);
1133}
1134
1135int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1136 unsigned long data)
1137{
1138 int copied;
1139
1140 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
1141 return (copied == sizeof(data)) ? 0 : -EIO;
1142}
1143
1144#if defined CONFIG_COMPAT
1145
1146int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1147 compat_ulong_t addr, compat_ulong_t data)
1148{
1149 compat_ulong_t __user *datap = compat_ptr(data);
1150 compat_ulong_t word;
1151 siginfo_t siginfo;
1152 int ret;
1153
1154 switch (request) {
1155 case PTRACE_PEEKTEXT:
1156 case PTRACE_PEEKDATA:
1157 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
1158 if (ret != sizeof(word))
1159 ret = -EIO;
1160 else
1161 ret = put_user(word, datap);
1162 break;
1163
1164 case PTRACE_POKETEXT:
1165 case PTRACE_POKEDATA:
1166 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
1167 ret = (ret != sizeof(data) ? -EIO : 0);
1168 break;
1169
1170 case PTRACE_GETEVENTMSG:
1171 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1172 break;
1173
1174 case PTRACE_GETSIGINFO:
1175 ret = ptrace_getsiginfo(child, &siginfo);
1176 if (!ret)
1177 ret = copy_siginfo_to_user32(
1178 (struct compat_siginfo __user *) datap,
1179 &siginfo);
1180 break;
1181
1182 case PTRACE_SETSIGINFO:
1183 memset(&siginfo, 0, sizeof siginfo);
1184 if (copy_siginfo_from_user32(
1185 &siginfo, (struct compat_siginfo __user *) datap))
1186 ret = -EFAULT;
1187 else
1188 ret = ptrace_setsiginfo(child, &siginfo);
1189 break;
1190#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1191 case PTRACE_GETREGSET:
1192 case PTRACE_SETREGSET:
1193 {
1194 struct iovec kiov;
1195 struct compat_iovec __user *uiov =
1196 (struct compat_iovec __user *) datap;
1197 compat_uptr_t ptr;
1198 compat_size_t len;
1199
1200 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1201 return -EFAULT;
1202
1203 if (__get_user(ptr, &uiov->iov_base) ||
1204 __get_user(len, &uiov->iov_len))
1205 return -EFAULT;
1206
1207 kiov.iov_base = compat_ptr(ptr);
1208 kiov.iov_len = len;
1209
1210 ret = ptrace_regset(child, request, addr, &kiov);
1211 if (!ret)
1212 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1213 break;
1214 }
1215#endif
1216
1217 default:
1218 ret = ptrace_request(child, request, addr, data);
1219 }
1220
1221 return ret;
1222}
1223
1224COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1225 compat_long_t, addr, compat_long_t, data)
1226{
1227 struct task_struct *child;
1228 long ret;
1229
1230 if (request == PTRACE_TRACEME) {
1231 ret = ptrace_traceme();
1232 goto out;
1233 }
1234
1235 child = ptrace_get_task_struct(pid);
1236 if (IS_ERR(child)) {
1237 ret = PTR_ERR(child);
1238 goto out;
1239 }
1240
1241 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1242 ret = ptrace_attach(child, request, addr, data);
1243 /*
1244 * Some architectures need to do book-keeping after
1245 * a ptrace attach.
1246 */
1247 if (!ret)
1248 arch_ptrace_attach(child);
1249 goto out_put_task_struct;
1250 }
1251
1252 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1253 request == PTRACE_INTERRUPT);
1254 if (!ret) {
1255 ret = compat_arch_ptrace(child, request, addr, data);
1256 if (ret || request != PTRACE_DETACH)
1257 ptrace_unfreeze_traced(child);
1258 }
1259
1260 out_put_task_struct:
1261 put_task_struct(child);
1262 out:
1263 return ret;
1264}
1265#endif /* CONFIG_COMPAT */