Loading...
1/*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
10#include <linux/capability.h>
11#include <linux/export.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <linux/pagemap.h>
17#include <linux/ptrace.h>
18#include <linux/security.h>
19#include <linux/signal.h>
20#include <linux/uio.h>
21#include <linux/audit.h>
22#include <linux/pid_namespace.h>
23#include <linux/syscalls.h>
24#include <linux/uaccess.h>
25#include <linux/regset.h>
26#include <linux/hw_breakpoint.h>
27#include <linux/cn_proc.h>
28#include <linux/compat.h>
29
30/*
31 * Access another process' address space via ptrace.
32 * Source/target buffer must be kernel space,
33 * Do not walk the page table directly, use get_user_pages
34 */
35int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
36 void *buf, int len, unsigned int gup_flags)
37{
38 struct mm_struct *mm;
39 int ret;
40
41 mm = get_task_mm(tsk);
42 if (!mm)
43 return 0;
44
45 if (!tsk->ptrace ||
46 (current != tsk->parent) ||
47 ((get_dumpable(mm) != SUID_DUMP_USER) &&
48 !ptracer_capable(tsk, mm->user_ns))) {
49 mmput(mm);
50 return 0;
51 }
52
53 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
54 mmput(mm);
55
56 return ret;
57}
58
59
60/*
61 * ptrace a task: make the debugger its new parent and
62 * move it to the ptrace list.
63 *
64 * Must be called with the tasklist lock write-held.
65 */
66void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
67{
68 BUG_ON(!list_empty(&child->ptrace_entry));
69 list_add(&child->ptrace_entry, &new_parent->ptraced);
70 child->parent = new_parent;
71 rcu_read_lock();
72 child->ptracer_cred = get_cred(__task_cred(new_parent));
73 rcu_read_unlock();
74}
75
76/**
77 * __ptrace_unlink - unlink ptracee and restore its execution state
78 * @child: ptracee to be unlinked
79 *
80 * Remove @child from the ptrace list, move it back to the original parent,
81 * and restore the execution state so that it conforms to the group stop
82 * state.
83 *
84 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
85 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
86 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
87 * If the ptracer is exiting, the ptracee can be in any state.
88 *
89 * After detach, the ptracee should be in a state which conforms to the
90 * group stop. If the group is stopped or in the process of stopping, the
91 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
92 * up from TASK_TRACED.
93 *
94 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
95 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
96 * to but in the opposite direction of what happens while attaching to a
97 * stopped task. However, in this direction, the intermediate RUNNING
98 * state is not hidden even from the current ptracer and if it immediately
99 * re-attaches and performs a WNOHANG wait(2), it may fail.
100 *
101 * CONTEXT:
102 * write_lock_irq(tasklist_lock)
103 */
104void __ptrace_unlink(struct task_struct *child)
105{
106 const struct cred *old_cred;
107 BUG_ON(!child->ptrace);
108
109 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
110
111 child->parent = child->real_parent;
112 list_del_init(&child->ptrace_entry);
113 old_cred = child->ptracer_cred;
114 child->ptracer_cred = NULL;
115 put_cred(old_cred);
116
117 spin_lock(&child->sighand->siglock);
118 child->ptrace = 0;
119 /*
120 * Clear all pending traps and TRAPPING. TRAPPING should be
121 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
122 */
123 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
124 task_clear_jobctl_trapping(child);
125
126 /*
127 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
128 * @child isn't dead.
129 */
130 if (!(child->flags & PF_EXITING) &&
131 (child->signal->flags & SIGNAL_STOP_STOPPED ||
132 child->signal->group_stop_count)) {
133 child->jobctl |= JOBCTL_STOP_PENDING;
134
135 /*
136 * This is only possible if this thread was cloned by the
137 * traced task running in the stopped group, set the signal
138 * for the future reports.
139 * FIXME: we should change ptrace_init_task() to handle this
140 * case.
141 */
142 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
143 child->jobctl |= SIGSTOP;
144 }
145
146 /*
147 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
148 * @child in the butt. Note that @resume should be used iff @child
149 * is in TASK_TRACED; otherwise, we might unduly disrupt
150 * TASK_KILLABLE sleeps.
151 */
152 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
153 ptrace_signal_wake_up(child, true);
154
155 spin_unlock(&child->sighand->siglock);
156}
157
158/* Ensure that nothing can wake it up, even SIGKILL */
159static bool ptrace_freeze_traced(struct task_struct *task)
160{
161 bool ret = false;
162
163 /* Lockless, nobody but us can set this flag */
164 if (task->jobctl & JOBCTL_LISTENING)
165 return ret;
166
167 spin_lock_irq(&task->sighand->siglock);
168 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
169 task->state = __TASK_TRACED;
170 ret = true;
171 }
172 spin_unlock_irq(&task->sighand->siglock);
173
174 return ret;
175}
176
177static void ptrace_unfreeze_traced(struct task_struct *task)
178{
179 if (task->state != __TASK_TRACED)
180 return;
181
182 WARN_ON(!task->ptrace || task->parent != current);
183
184 /*
185 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
186 * Recheck state under the lock to close this race.
187 */
188 spin_lock_irq(&task->sighand->siglock);
189 if (task->state == __TASK_TRACED) {
190 if (__fatal_signal_pending(task))
191 wake_up_state(task, __TASK_TRACED);
192 else
193 task->state = TASK_TRACED;
194 }
195 spin_unlock_irq(&task->sighand->siglock);
196}
197
198/**
199 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
200 * @child: ptracee to check for
201 * @ignore_state: don't check whether @child is currently %TASK_TRACED
202 *
203 * Check whether @child is being ptraced by %current and ready for further
204 * ptrace operations. If @ignore_state is %false, @child also should be in
205 * %TASK_TRACED state and on return the child is guaranteed to be traced
206 * and not executing. If @ignore_state is %true, @child can be in any
207 * state.
208 *
209 * CONTEXT:
210 * Grabs and releases tasklist_lock and @child->sighand->siglock.
211 *
212 * RETURNS:
213 * 0 on success, -ESRCH if %child is not ready.
214 */
215static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
216{
217 int ret = -ESRCH;
218
219 /*
220 * We take the read lock around doing both checks to close a
221 * possible race where someone else was tracing our child and
222 * detached between these two checks. After this locked check,
223 * we are sure that this is our traced child and that can only
224 * be changed by us so it's not changing right after this.
225 */
226 read_lock(&tasklist_lock);
227 if (child->ptrace && child->parent == current) {
228 WARN_ON(child->state == __TASK_TRACED);
229 /*
230 * child->sighand can't be NULL, release_task()
231 * does ptrace_unlink() before __exit_signal().
232 */
233 if (ignore_state || ptrace_freeze_traced(child))
234 ret = 0;
235 }
236 read_unlock(&tasklist_lock);
237
238 if (!ret && !ignore_state) {
239 if (!wait_task_inactive(child, __TASK_TRACED)) {
240 /*
241 * This can only happen if may_ptrace_stop() fails and
242 * ptrace_stop() changes ->state back to TASK_RUNNING,
243 * so we should not worry about leaking __TASK_TRACED.
244 */
245 WARN_ON(child->state == __TASK_TRACED);
246 ret = -ESRCH;
247 }
248 }
249
250 return ret;
251}
252
253static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
254{
255 if (mode & PTRACE_MODE_NOAUDIT)
256 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
257 else
258 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
259}
260
261/* Returns 0 on success, -errno on denial. */
262static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
263{
264 const struct cred *cred = current_cred(), *tcred;
265 struct mm_struct *mm;
266 kuid_t caller_uid;
267 kgid_t caller_gid;
268
269 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
270 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
271 return -EPERM;
272 }
273
274 /* May we inspect the given task?
275 * This check is used both for attaching with ptrace
276 * and for allowing access to sensitive information in /proc.
277 *
278 * ptrace_attach denies several cases that /proc allows
279 * because setting up the necessary parent/child relationship
280 * or halting the specified task is impossible.
281 */
282
283 /* Don't let security modules deny introspection */
284 if (same_thread_group(task, current))
285 return 0;
286 rcu_read_lock();
287 if (mode & PTRACE_MODE_FSCREDS) {
288 caller_uid = cred->fsuid;
289 caller_gid = cred->fsgid;
290 } else {
291 /*
292 * Using the euid would make more sense here, but something
293 * in userland might rely on the old behavior, and this
294 * shouldn't be a security problem since
295 * PTRACE_MODE_REALCREDS implies that the caller explicitly
296 * used a syscall that requests access to another process
297 * (and not a filesystem syscall to procfs).
298 */
299 caller_uid = cred->uid;
300 caller_gid = cred->gid;
301 }
302 tcred = __task_cred(task);
303 if (uid_eq(caller_uid, tcred->euid) &&
304 uid_eq(caller_uid, tcred->suid) &&
305 uid_eq(caller_uid, tcred->uid) &&
306 gid_eq(caller_gid, tcred->egid) &&
307 gid_eq(caller_gid, tcred->sgid) &&
308 gid_eq(caller_gid, tcred->gid))
309 goto ok;
310 if (ptrace_has_cap(tcred->user_ns, mode))
311 goto ok;
312 rcu_read_unlock();
313 return -EPERM;
314ok:
315 rcu_read_unlock();
316 mm = task->mm;
317 if (mm &&
318 ((get_dumpable(mm) != SUID_DUMP_USER) &&
319 !ptrace_has_cap(mm->user_ns, mode)))
320 return -EPERM;
321
322 return security_ptrace_access_check(task, mode);
323}
324
325bool ptrace_may_access(struct task_struct *task, unsigned int mode)
326{
327 int err;
328 task_lock(task);
329 err = __ptrace_may_access(task, mode);
330 task_unlock(task);
331 return !err;
332}
333
334static int ptrace_attach(struct task_struct *task, long request,
335 unsigned long addr,
336 unsigned long flags)
337{
338 bool seize = (request == PTRACE_SEIZE);
339 int retval;
340
341 retval = -EIO;
342 if (seize) {
343 if (addr != 0)
344 goto out;
345 if (flags & ~(unsigned long)PTRACE_O_MASK)
346 goto out;
347 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
348 } else {
349 flags = PT_PTRACED;
350 }
351
352 audit_ptrace(task);
353
354 retval = -EPERM;
355 if (unlikely(task->flags & PF_KTHREAD))
356 goto out;
357 if (same_thread_group(task, current))
358 goto out;
359
360 /*
361 * Protect exec's credential calculations against our interference;
362 * SUID, SGID and LSM creds get determined differently
363 * under ptrace.
364 */
365 retval = -ERESTARTNOINTR;
366 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
367 goto out;
368
369 task_lock(task);
370 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
371 task_unlock(task);
372 if (retval)
373 goto unlock_creds;
374
375 write_lock_irq(&tasklist_lock);
376 retval = -EPERM;
377 if (unlikely(task->exit_state))
378 goto unlock_tasklist;
379 if (task->ptrace)
380 goto unlock_tasklist;
381
382 if (seize)
383 flags |= PT_SEIZED;
384 task->ptrace = flags;
385
386 __ptrace_link(task, current);
387
388 /* SEIZE doesn't trap tracee on attach */
389 if (!seize)
390 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
391
392 spin_lock(&task->sighand->siglock);
393
394 /*
395 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
396 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
397 * will be cleared if the child completes the transition or any
398 * event which clears the group stop states happens. We'll wait
399 * for the transition to complete before returning from this
400 * function.
401 *
402 * This hides STOPPED -> RUNNING -> TRACED transition from the
403 * attaching thread but a different thread in the same group can
404 * still observe the transient RUNNING state. IOW, if another
405 * thread's WNOHANG wait(2) on the stopped tracee races against
406 * ATTACH, the wait(2) may fail due to the transient RUNNING.
407 *
408 * The following task_is_stopped() test is safe as both transitions
409 * in and out of STOPPED are protected by siglock.
410 */
411 if (task_is_stopped(task) &&
412 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
413 signal_wake_up_state(task, __TASK_STOPPED);
414
415 spin_unlock(&task->sighand->siglock);
416
417 retval = 0;
418unlock_tasklist:
419 write_unlock_irq(&tasklist_lock);
420unlock_creds:
421 mutex_unlock(&task->signal->cred_guard_mutex);
422out:
423 if (!retval) {
424 /*
425 * We do not bother to change retval or clear JOBCTL_TRAPPING
426 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
427 * not return to user-mode, it will exit and clear this bit in
428 * __ptrace_unlink() if it wasn't already cleared by the tracee;
429 * and until then nobody can ptrace this task.
430 */
431 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
432 proc_ptrace_connector(task, PTRACE_ATTACH);
433 }
434
435 return retval;
436}
437
438/**
439 * ptrace_traceme -- helper for PTRACE_TRACEME
440 *
441 * Performs checks and sets PT_PTRACED.
442 * Should be used by all ptrace implementations for PTRACE_TRACEME.
443 */
444static int ptrace_traceme(void)
445{
446 int ret = -EPERM;
447
448 write_lock_irq(&tasklist_lock);
449 /* Are we already being traced? */
450 if (!current->ptrace) {
451 ret = security_ptrace_traceme(current->parent);
452 /*
453 * Check PF_EXITING to ensure ->real_parent has not passed
454 * exit_ptrace(). Otherwise we don't report the error but
455 * pretend ->real_parent untraces us right after return.
456 */
457 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
458 current->ptrace = PT_PTRACED;
459 __ptrace_link(current, current->real_parent);
460 }
461 }
462 write_unlock_irq(&tasklist_lock);
463
464 return ret;
465}
466
467/*
468 * Called with irqs disabled, returns true if childs should reap themselves.
469 */
470static int ignoring_children(struct sighand_struct *sigh)
471{
472 int ret;
473 spin_lock(&sigh->siglock);
474 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
475 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
476 spin_unlock(&sigh->siglock);
477 return ret;
478}
479
480/*
481 * Called with tasklist_lock held for writing.
482 * Unlink a traced task, and clean it up if it was a traced zombie.
483 * Return true if it needs to be reaped with release_task().
484 * (We can't call release_task() here because we already hold tasklist_lock.)
485 *
486 * If it's a zombie, our attachedness prevented normal parent notification
487 * or self-reaping. Do notification now if it would have happened earlier.
488 * If it should reap itself, return true.
489 *
490 * If it's our own child, there is no notification to do. But if our normal
491 * children self-reap, then this child was prevented by ptrace and we must
492 * reap it now, in that case we must also wake up sub-threads sleeping in
493 * do_wait().
494 */
495static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
496{
497 bool dead;
498
499 __ptrace_unlink(p);
500
501 if (p->exit_state != EXIT_ZOMBIE)
502 return false;
503
504 dead = !thread_group_leader(p);
505
506 if (!dead && thread_group_empty(p)) {
507 if (!same_thread_group(p->real_parent, tracer))
508 dead = do_notify_parent(p, p->exit_signal);
509 else if (ignoring_children(tracer->sighand)) {
510 __wake_up_parent(p, tracer);
511 dead = true;
512 }
513 }
514 /* Mark it as in the process of being reaped. */
515 if (dead)
516 p->exit_state = EXIT_DEAD;
517 return dead;
518}
519
520static int ptrace_detach(struct task_struct *child, unsigned int data)
521{
522 if (!valid_signal(data))
523 return -EIO;
524
525 /* Architecture-specific hardware disable .. */
526 ptrace_disable(child);
527
528 write_lock_irq(&tasklist_lock);
529 /*
530 * We rely on ptrace_freeze_traced(). It can't be killed and
531 * untraced by another thread, it can't be a zombie.
532 */
533 WARN_ON(!child->ptrace || child->exit_state);
534 /*
535 * tasklist_lock avoids the race with wait_task_stopped(), see
536 * the comment in ptrace_resume().
537 */
538 child->exit_code = data;
539 __ptrace_detach(current, child);
540 write_unlock_irq(&tasklist_lock);
541
542 proc_ptrace_connector(child, PTRACE_DETACH);
543
544 return 0;
545}
546
547/*
548 * Detach all tasks we were using ptrace on. Called with tasklist held
549 * for writing.
550 */
551void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
552{
553 struct task_struct *p, *n;
554
555 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
556 if (unlikely(p->ptrace & PT_EXITKILL))
557 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
558
559 if (__ptrace_detach(tracer, p))
560 list_add(&p->ptrace_entry, dead);
561 }
562}
563
564int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
565{
566 int copied = 0;
567
568 while (len > 0) {
569 char buf[128];
570 int this_len, retval;
571
572 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
573 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
574
575 if (!retval) {
576 if (copied)
577 break;
578 return -EIO;
579 }
580 if (copy_to_user(dst, buf, retval))
581 return -EFAULT;
582 copied += retval;
583 src += retval;
584 dst += retval;
585 len -= retval;
586 }
587 return copied;
588}
589
590int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
591{
592 int copied = 0;
593
594 while (len > 0) {
595 char buf[128];
596 int this_len, retval;
597
598 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
599 if (copy_from_user(buf, src, this_len))
600 return -EFAULT;
601 retval = ptrace_access_vm(tsk, dst, buf, this_len,
602 FOLL_FORCE | FOLL_WRITE);
603 if (!retval) {
604 if (copied)
605 break;
606 return -EIO;
607 }
608 copied += retval;
609 src += retval;
610 dst += retval;
611 len -= retval;
612 }
613 return copied;
614}
615
616static int ptrace_setoptions(struct task_struct *child, unsigned long data)
617{
618 unsigned flags;
619
620 if (data & ~(unsigned long)PTRACE_O_MASK)
621 return -EINVAL;
622
623 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
624 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
625 !IS_ENABLED(CONFIG_SECCOMP))
626 return -EINVAL;
627
628 if (!capable(CAP_SYS_ADMIN))
629 return -EPERM;
630
631 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
632 current->ptrace & PT_SUSPEND_SECCOMP)
633 return -EPERM;
634 }
635
636 /* Avoid intermediate state when all opts are cleared */
637 flags = child->ptrace;
638 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
639 flags |= (data << PT_OPT_FLAG_SHIFT);
640 child->ptrace = flags;
641
642 return 0;
643}
644
645static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
646{
647 unsigned long flags;
648 int error = -ESRCH;
649
650 if (lock_task_sighand(child, &flags)) {
651 error = -EINVAL;
652 if (likely(child->last_siginfo != NULL)) {
653 *info = *child->last_siginfo;
654 error = 0;
655 }
656 unlock_task_sighand(child, &flags);
657 }
658 return error;
659}
660
661static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
662{
663 unsigned long flags;
664 int error = -ESRCH;
665
666 if (lock_task_sighand(child, &flags)) {
667 error = -EINVAL;
668 if (likely(child->last_siginfo != NULL)) {
669 *child->last_siginfo = *info;
670 error = 0;
671 }
672 unlock_task_sighand(child, &flags);
673 }
674 return error;
675}
676
677static int ptrace_peek_siginfo(struct task_struct *child,
678 unsigned long addr,
679 unsigned long data)
680{
681 struct ptrace_peeksiginfo_args arg;
682 struct sigpending *pending;
683 struct sigqueue *q;
684 int ret, i;
685
686 ret = copy_from_user(&arg, (void __user *) addr,
687 sizeof(struct ptrace_peeksiginfo_args));
688 if (ret)
689 return -EFAULT;
690
691 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
692 return -EINVAL; /* unknown flags */
693
694 if (arg.nr < 0)
695 return -EINVAL;
696
697 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
698 pending = &child->signal->shared_pending;
699 else
700 pending = &child->pending;
701
702 for (i = 0; i < arg.nr; ) {
703 siginfo_t info;
704 s32 off = arg.off + i;
705
706 spin_lock_irq(&child->sighand->siglock);
707 list_for_each_entry(q, &pending->list, list) {
708 if (!off--) {
709 copy_siginfo(&info, &q->info);
710 break;
711 }
712 }
713 spin_unlock_irq(&child->sighand->siglock);
714
715 if (off >= 0) /* beyond the end of the list */
716 break;
717
718#ifdef CONFIG_COMPAT
719 if (unlikely(in_compat_syscall())) {
720 compat_siginfo_t __user *uinfo = compat_ptr(data);
721
722 if (copy_siginfo_to_user32(uinfo, &info) ||
723 __put_user(info.si_code, &uinfo->si_code)) {
724 ret = -EFAULT;
725 break;
726 }
727
728 } else
729#endif
730 {
731 siginfo_t __user *uinfo = (siginfo_t __user *) data;
732
733 if (copy_siginfo_to_user(uinfo, &info) ||
734 __put_user(info.si_code, &uinfo->si_code)) {
735 ret = -EFAULT;
736 break;
737 }
738 }
739
740 data += sizeof(siginfo_t);
741 i++;
742
743 if (signal_pending(current))
744 break;
745
746 cond_resched();
747 }
748
749 if (i > 0)
750 return i;
751
752 return ret;
753}
754
755#ifdef PTRACE_SINGLESTEP
756#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
757#else
758#define is_singlestep(request) 0
759#endif
760
761#ifdef PTRACE_SINGLEBLOCK
762#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
763#else
764#define is_singleblock(request) 0
765#endif
766
767#ifdef PTRACE_SYSEMU
768#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
769#else
770#define is_sysemu_singlestep(request) 0
771#endif
772
773static int ptrace_resume(struct task_struct *child, long request,
774 unsigned long data)
775{
776 bool need_siglock;
777
778 if (!valid_signal(data))
779 return -EIO;
780
781 if (request == PTRACE_SYSCALL)
782 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
783 else
784 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
785
786#ifdef TIF_SYSCALL_EMU
787 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
788 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
789 else
790 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
791#endif
792
793 if (is_singleblock(request)) {
794 if (unlikely(!arch_has_block_step()))
795 return -EIO;
796 user_enable_block_step(child);
797 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
798 if (unlikely(!arch_has_single_step()))
799 return -EIO;
800 user_enable_single_step(child);
801 } else {
802 user_disable_single_step(child);
803 }
804
805 /*
806 * Change ->exit_code and ->state under siglock to avoid the race
807 * with wait_task_stopped() in between; a non-zero ->exit_code will
808 * wrongly look like another report from tracee.
809 *
810 * Note that we need siglock even if ->exit_code == data and/or this
811 * status was not reported yet, the new status must not be cleared by
812 * wait_task_stopped() after resume.
813 *
814 * If data == 0 we do not care if wait_task_stopped() reports the old
815 * status and clears the code too; this can't race with the tracee, it
816 * takes siglock after resume.
817 */
818 need_siglock = data && !thread_group_empty(current);
819 if (need_siglock)
820 spin_lock_irq(&child->sighand->siglock);
821 child->exit_code = data;
822 wake_up_state(child, __TASK_TRACED);
823 if (need_siglock)
824 spin_unlock_irq(&child->sighand->siglock);
825
826 return 0;
827}
828
829#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
830
831static const struct user_regset *
832find_regset(const struct user_regset_view *view, unsigned int type)
833{
834 const struct user_regset *regset;
835 int n;
836
837 for (n = 0; n < view->n; ++n) {
838 regset = view->regsets + n;
839 if (regset->core_note_type == type)
840 return regset;
841 }
842
843 return NULL;
844}
845
846static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
847 struct iovec *kiov)
848{
849 const struct user_regset_view *view = task_user_regset_view(task);
850 const struct user_regset *regset = find_regset(view, type);
851 int regset_no;
852
853 if (!regset || (kiov->iov_len % regset->size) != 0)
854 return -EINVAL;
855
856 regset_no = regset - view->regsets;
857 kiov->iov_len = min(kiov->iov_len,
858 (__kernel_size_t) (regset->n * regset->size));
859
860 if (req == PTRACE_GETREGSET)
861 return copy_regset_to_user(task, view, regset_no, 0,
862 kiov->iov_len, kiov->iov_base);
863 else
864 return copy_regset_from_user(task, view, regset_no, 0,
865 kiov->iov_len, kiov->iov_base);
866}
867
868/*
869 * This is declared in linux/regset.h and defined in machine-dependent
870 * code. We put the export here, near the primary machine-neutral use,
871 * to ensure no machine forgets it.
872 */
873EXPORT_SYMBOL_GPL(task_user_regset_view);
874#endif
875
876int ptrace_request(struct task_struct *child, long request,
877 unsigned long addr, unsigned long data)
878{
879 bool seized = child->ptrace & PT_SEIZED;
880 int ret = -EIO;
881 siginfo_t siginfo, *si;
882 void __user *datavp = (void __user *) data;
883 unsigned long __user *datalp = datavp;
884 unsigned long flags;
885
886 switch (request) {
887 case PTRACE_PEEKTEXT:
888 case PTRACE_PEEKDATA:
889 return generic_ptrace_peekdata(child, addr, data);
890 case PTRACE_POKETEXT:
891 case PTRACE_POKEDATA:
892 return generic_ptrace_pokedata(child, addr, data);
893
894#ifdef PTRACE_OLDSETOPTIONS
895 case PTRACE_OLDSETOPTIONS:
896#endif
897 case PTRACE_SETOPTIONS:
898 ret = ptrace_setoptions(child, data);
899 break;
900 case PTRACE_GETEVENTMSG:
901 ret = put_user(child->ptrace_message, datalp);
902 break;
903
904 case PTRACE_PEEKSIGINFO:
905 ret = ptrace_peek_siginfo(child, addr, data);
906 break;
907
908 case PTRACE_GETSIGINFO:
909 ret = ptrace_getsiginfo(child, &siginfo);
910 if (!ret)
911 ret = copy_siginfo_to_user(datavp, &siginfo);
912 break;
913
914 case PTRACE_SETSIGINFO:
915 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
916 ret = -EFAULT;
917 else
918 ret = ptrace_setsiginfo(child, &siginfo);
919 break;
920
921 case PTRACE_GETSIGMASK:
922 if (addr != sizeof(sigset_t)) {
923 ret = -EINVAL;
924 break;
925 }
926
927 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
928 ret = -EFAULT;
929 else
930 ret = 0;
931
932 break;
933
934 case PTRACE_SETSIGMASK: {
935 sigset_t new_set;
936
937 if (addr != sizeof(sigset_t)) {
938 ret = -EINVAL;
939 break;
940 }
941
942 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
943 ret = -EFAULT;
944 break;
945 }
946
947 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
948
949 /*
950 * Every thread does recalc_sigpending() after resume, so
951 * retarget_shared_pending() and recalc_sigpending() are not
952 * called here.
953 */
954 spin_lock_irq(&child->sighand->siglock);
955 child->blocked = new_set;
956 spin_unlock_irq(&child->sighand->siglock);
957
958 ret = 0;
959 break;
960 }
961
962 case PTRACE_INTERRUPT:
963 /*
964 * Stop tracee without any side-effect on signal or job
965 * control. At least one trap is guaranteed to happen
966 * after this request. If @child is already trapped, the
967 * current trap is not disturbed and another trap will
968 * happen after the current trap is ended with PTRACE_CONT.
969 *
970 * The actual trap might not be PTRACE_EVENT_STOP trap but
971 * the pending condition is cleared regardless.
972 */
973 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
974 break;
975
976 /*
977 * INTERRUPT doesn't disturb existing trap sans one
978 * exception. If ptracer issued LISTEN for the current
979 * STOP, this INTERRUPT should clear LISTEN and re-trap
980 * tracee into STOP.
981 */
982 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
983 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
984
985 unlock_task_sighand(child, &flags);
986 ret = 0;
987 break;
988
989 case PTRACE_LISTEN:
990 /*
991 * Listen for events. Tracee must be in STOP. It's not
992 * resumed per-se but is not considered to be in TRACED by
993 * wait(2) or ptrace(2). If an async event (e.g. group
994 * stop state change) happens, tracee will enter STOP trap
995 * again. Alternatively, ptracer can issue INTERRUPT to
996 * finish listening and re-trap tracee into STOP.
997 */
998 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
999 break;
1000
1001 si = child->last_siginfo;
1002 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1003 child->jobctl |= JOBCTL_LISTENING;
1004 /*
1005 * If NOTIFY is set, it means event happened between
1006 * start of this trap and now. Trigger re-trap.
1007 */
1008 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1009 ptrace_signal_wake_up(child, true);
1010 ret = 0;
1011 }
1012 unlock_task_sighand(child, &flags);
1013 break;
1014
1015 case PTRACE_DETACH: /* detach a process that was attached. */
1016 ret = ptrace_detach(child, data);
1017 break;
1018
1019#ifdef CONFIG_BINFMT_ELF_FDPIC
1020 case PTRACE_GETFDPIC: {
1021 struct mm_struct *mm = get_task_mm(child);
1022 unsigned long tmp = 0;
1023
1024 ret = -ESRCH;
1025 if (!mm)
1026 break;
1027
1028 switch (addr) {
1029 case PTRACE_GETFDPIC_EXEC:
1030 tmp = mm->context.exec_fdpic_loadmap;
1031 break;
1032 case PTRACE_GETFDPIC_INTERP:
1033 tmp = mm->context.interp_fdpic_loadmap;
1034 break;
1035 default:
1036 break;
1037 }
1038 mmput(mm);
1039
1040 ret = put_user(tmp, datalp);
1041 break;
1042 }
1043#endif
1044
1045#ifdef PTRACE_SINGLESTEP
1046 case PTRACE_SINGLESTEP:
1047#endif
1048#ifdef PTRACE_SINGLEBLOCK
1049 case PTRACE_SINGLEBLOCK:
1050#endif
1051#ifdef PTRACE_SYSEMU
1052 case PTRACE_SYSEMU:
1053 case PTRACE_SYSEMU_SINGLESTEP:
1054#endif
1055 case PTRACE_SYSCALL:
1056 case PTRACE_CONT:
1057 return ptrace_resume(child, request, data);
1058
1059 case PTRACE_KILL:
1060 if (child->exit_state) /* already dead */
1061 return 0;
1062 return ptrace_resume(child, request, SIGKILL);
1063
1064#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1065 case PTRACE_GETREGSET:
1066 case PTRACE_SETREGSET: {
1067 struct iovec kiov;
1068 struct iovec __user *uiov = datavp;
1069
1070 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1071 return -EFAULT;
1072
1073 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1074 __get_user(kiov.iov_len, &uiov->iov_len))
1075 return -EFAULT;
1076
1077 ret = ptrace_regset(child, request, addr, &kiov);
1078 if (!ret)
1079 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1080 break;
1081 }
1082#endif
1083
1084 case PTRACE_SECCOMP_GET_FILTER:
1085 ret = seccomp_get_filter(child, addr, datavp);
1086 break;
1087
1088 default:
1089 break;
1090 }
1091
1092 return ret;
1093}
1094
1095static struct task_struct *ptrace_get_task_struct(pid_t pid)
1096{
1097 struct task_struct *child;
1098
1099 rcu_read_lock();
1100 child = find_task_by_vpid(pid);
1101 if (child)
1102 get_task_struct(child);
1103 rcu_read_unlock();
1104
1105 if (!child)
1106 return ERR_PTR(-ESRCH);
1107 return child;
1108}
1109
1110#ifndef arch_ptrace_attach
1111#define arch_ptrace_attach(child) do { } while (0)
1112#endif
1113
1114SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1115 unsigned long, data)
1116{
1117 struct task_struct *child;
1118 long ret;
1119
1120 if (request == PTRACE_TRACEME) {
1121 ret = ptrace_traceme();
1122 if (!ret)
1123 arch_ptrace_attach(current);
1124 goto out;
1125 }
1126
1127 child = ptrace_get_task_struct(pid);
1128 if (IS_ERR(child)) {
1129 ret = PTR_ERR(child);
1130 goto out;
1131 }
1132
1133 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1134 ret = ptrace_attach(child, request, addr, data);
1135 /*
1136 * Some architectures need to do book-keeping after
1137 * a ptrace attach.
1138 */
1139 if (!ret)
1140 arch_ptrace_attach(child);
1141 goto out_put_task_struct;
1142 }
1143
1144 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1145 request == PTRACE_INTERRUPT);
1146 if (ret < 0)
1147 goto out_put_task_struct;
1148
1149 ret = arch_ptrace(child, request, addr, data);
1150 if (ret || request != PTRACE_DETACH)
1151 ptrace_unfreeze_traced(child);
1152
1153 out_put_task_struct:
1154 put_task_struct(child);
1155 out:
1156 return ret;
1157}
1158
1159int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1160 unsigned long data)
1161{
1162 unsigned long tmp;
1163 int copied;
1164
1165 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1166 if (copied != sizeof(tmp))
1167 return -EIO;
1168 return put_user(tmp, (unsigned long __user *)data);
1169}
1170
1171int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1172 unsigned long data)
1173{
1174 int copied;
1175
1176 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1177 FOLL_FORCE | FOLL_WRITE);
1178 return (copied == sizeof(data)) ? 0 : -EIO;
1179}
1180
1181#if defined CONFIG_COMPAT
1182
1183int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1184 compat_ulong_t addr, compat_ulong_t data)
1185{
1186 compat_ulong_t __user *datap = compat_ptr(data);
1187 compat_ulong_t word;
1188 siginfo_t siginfo;
1189 int ret;
1190
1191 switch (request) {
1192 case PTRACE_PEEKTEXT:
1193 case PTRACE_PEEKDATA:
1194 ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1195 FOLL_FORCE);
1196 if (ret != sizeof(word))
1197 ret = -EIO;
1198 else
1199 ret = put_user(word, datap);
1200 break;
1201
1202 case PTRACE_POKETEXT:
1203 case PTRACE_POKEDATA:
1204 ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1205 FOLL_FORCE | FOLL_WRITE);
1206 ret = (ret != sizeof(data) ? -EIO : 0);
1207 break;
1208
1209 case PTRACE_GETEVENTMSG:
1210 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1211 break;
1212
1213 case PTRACE_GETSIGINFO:
1214 ret = ptrace_getsiginfo(child, &siginfo);
1215 if (!ret)
1216 ret = copy_siginfo_to_user32(
1217 (struct compat_siginfo __user *) datap,
1218 &siginfo);
1219 break;
1220
1221 case PTRACE_SETSIGINFO:
1222 memset(&siginfo, 0, sizeof siginfo);
1223 if (copy_siginfo_from_user32(
1224 &siginfo, (struct compat_siginfo __user *) datap))
1225 ret = -EFAULT;
1226 else
1227 ret = ptrace_setsiginfo(child, &siginfo);
1228 break;
1229#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1230 case PTRACE_GETREGSET:
1231 case PTRACE_SETREGSET:
1232 {
1233 struct iovec kiov;
1234 struct compat_iovec __user *uiov =
1235 (struct compat_iovec __user *) datap;
1236 compat_uptr_t ptr;
1237 compat_size_t len;
1238
1239 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1240 return -EFAULT;
1241
1242 if (__get_user(ptr, &uiov->iov_base) ||
1243 __get_user(len, &uiov->iov_len))
1244 return -EFAULT;
1245
1246 kiov.iov_base = compat_ptr(ptr);
1247 kiov.iov_len = len;
1248
1249 ret = ptrace_regset(child, request, addr, &kiov);
1250 if (!ret)
1251 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1252 break;
1253 }
1254#endif
1255
1256 default:
1257 ret = ptrace_request(child, request, addr, data);
1258 }
1259
1260 return ret;
1261}
1262
1263COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1264 compat_long_t, addr, compat_long_t, data)
1265{
1266 struct task_struct *child;
1267 long ret;
1268
1269 if (request == PTRACE_TRACEME) {
1270 ret = ptrace_traceme();
1271 goto out;
1272 }
1273
1274 child = ptrace_get_task_struct(pid);
1275 if (IS_ERR(child)) {
1276 ret = PTR_ERR(child);
1277 goto out;
1278 }
1279
1280 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1281 ret = ptrace_attach(child, request, addr, data);
1282 /*
1283 * Some architectures need to do book-keeping after
1284 * a ptrace attach.
1285 */
1286 if (!ret)
1287 arch_ptrace_attach(child);
1288 goto out_put_task_struct;
1289 }
1290
1291 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1292 request == PTRACE_INTERRUPT);
1293 if (!ret) {
1294 ret = compat_arch_ptrace(child, request, addr, data);
1295 if (ret || request != PTRACE_DETACH)
1296 ptrace_unfreeze_traced(child);
1297 }
1298
1299 out_put_task_struct:
1300 put_task_struct(child);
1301 out:
1302 return ret;
1303}
1304#endif /* CONFIG_COMPAT */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/ptrace.c
4 *
5 * (C) Copyright 1999 Linus Torvalds
6 *
7 * Common interfaces for "ptrace()" which we do not want
8 * to continually duplicate across every architecture.
9 */
10
11#include <linux/capability.h>
12#include <linux/export.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/coredump.h>
16#include <linux/sched/task.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/highmem.h>
20#include <linux/pagemap.h>
21#include <linux/ptrace.h>
22#include <linux/security.h>
23#include <linux/signal.h>
24#include <linux/uio.h>
25#include <linux/audit.h>
26#include <linux/pid_namespace.h>
27#include <linux/syscalls.h>
28#include <linux/uaccess.h>
29#include <linux/regset.h>
30#include <linux/hw_breakpoint.h>
31#include <linux/cn_proc.h>
32#include <linux/compat.h>
33#include <linux/sched/signal.h>
34#include <linux/minmax.h>
35
36#include <asm/syscall.h> /* for syscall_get_* */
37
38/*
39 * Access another process' address space via ptrace.
40 * Source/target buffer must be kernel space,
41 * Do not walk the page table directly, use get_user_pages
42 */
43int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
44 void *buf, int len, unsigned int gup_flags)
45{
46 struct mm_struct *mm;
47 int ret;
48
49 mm = get_task_mm(tsk);
50 if (!mm)
51 return 0;
52
53 if (!tsk->ptrace ||
54 (current != tsk->parent) ||
55 ((get_dumpable(mm) != SUID_DUMP_USER) &&
56 !ptracer_capable(tsk, mm->user_ns))) {
57 mmput(mm);
58 return 0;
59 }
60
61 ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
62 mmput(mm);
63
64 return ret;
65}
66
67
68void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
69 const struct cred *ptracer_cred)
70{
71 BUG_ON(!list_empty(&child->ptrace_entry));
72 list_add(&child->ptrace_entry, &new_parent->ptraced);
73 child->parent = new_parent;
74 child->ptracer_cred = get_cred(ptracer_cred);
75}
76
77/*
78 * ptrace a task: make the debugger its new parent and
79 * move it to the ptrace list.
80 *
81 * Must be called with the tasklist lock write-held.
82 */
83static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
84{
85 __ptrace_link(child, new_parent, current_cred());
86}
87
88/**
89 * __ptrace_unlink - unlink ptracee and restore its execution state
90 * @child: ptracee to be unlinked
91 *
92 * Remove @child from the ptrace list, move it back to the original parent,
93 * and restore the execution state so that it conforms to the group stop
94 * state.
95 *
96 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
97 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
98 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
99 * If the ptracer is exiting, the ptracee can be in any state.
100 *
101 * After detach, the ptracee should be in a state which conforms to the
102 * group stop. If the group is stopped or in the process of stopping, the
103 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
104 * up from TASK_TRACED.
105 *
106 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
107 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
108 * to but in the opposite direction of what happens while attaching to a
109 * stopped task. However, in this direction, the intermediate RUNNING
110 * state is not hidden even from the current ptracer and if it immediately
111 * re-attaches and performs a WNOHANG wait(2), it may fail.
112 *
113 * CONTEXT:
114 * write_lock_irq(tasklist_lock)
115 */
116void __ptrace_unlink(struct task_struct *child)
117{
118 const struct cred *old_cred;
119 BUG_ON(!child->ptrace);
120
121 clear_task_syscall_work(child, SYSCALL_TRACE);
122#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
123 clear_task_syscall_work(child, SYSCALL_EMU);
124#endif
125
126 child->parent = child->real_parent;
127 list_del_init(&child->ptrace_entry);
128 old_cred = child->ptracer_cred;
129 child->ptracer_cred = NULL;
130 put_cred(old_cred);
131
132 spin_lock(&child->sighand->siglock);
133 child->ptrace = 0;
134 /*
135 * Clear all pending traps and TRAPPING. TRAPPING should be
136 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
137 */
138 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
139 task_clear_jobctl_trapping(child);
140
141 /*
142 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
143 * @child isn't dead.
144 */
145 if (!(child->flags & PF_EXITING) &&
146 (child->signal->flags & SIGNAL_STOP_STOPPED ||
147 child->signal->group_stop_count)) {
148 child->jobctl |= JOBCTL_STOP_PENDING;
149
150 /*
151 * This is only possible if this thread was cloned by the
152 * traced task running in the stopped group, set the signal
153 * for the future reports.
154 * FIXME: we should change ptrace_init_task() to handle this
155 * case.
156 */
157 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
158 child->jobctl |= SIGSTOP;
159 }
160
161 /*
162 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
163 * @child in the butt. Note that @resume should be used iff @child
164 * is in TASK_TRACED; otherwise, we might unduly disrupt
165 * TASK_KILLABLE sleeps.
166 */
167 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
168 ptrace_signal_wake_up(child, true);
169
170 spin_unlock(&child->sighand->siglock);
171}
172
173static bool looks_like_a_spurious_pid(struct task_struct *task)
174{
175 if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
176 return false;
177
178 if (task_pid_vnr(task) == task->ptrace_message)
179 return false;
180 /*
181 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
182 * was not wait()'ed, most probably debugger targets the old
183 * leader which was destroyed in de_thread().
184 */
185 return true;
186}
187
188/* Ensure that nothing can wake it up, even SIGKILL */
189static bool ptrace_freeze_traced(struct task_struct *task)
190{
191 bool ret = false;
192
193 /* Lockless, nobody but us can set this flag */
194 if (task->jobctl & JOBCTL_LISTENING)
195 return ret;
196
197 spin_lock_irq(&task->sighand->siglock);
198 if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
199 !__fatal_signal_pending(task)) {
200 WRITE_ONCE(task->__state, __TASK_TRACED);
201 ret = true;
202 }
203 spin_unlock_irq(&task->sighand->siglock);
204
205 return ret;
206}
207
208static void ptrace_unfreeze_traced(struct task_struct *task)
209{
210 if (READ_ONCE(task->__state) != __TASK_TRACED)
211 return;
212
213 WARN_ON(!task->ptrace || task->parent != current);
214
215 /*
216 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
217 * Recheck state under the lock to close this race.
218 */
219 spin_lock_irq(&task->sighand->siglock);
220 if (READ_ONCE(task->__state) == __TASK_TRACED) {
221 if (__fatal_signal_pending(task))
222 wake_up_state(task, __TASK_TRACED);
223 else
224 WRITE_ONCE(task->__state, TASK_TRACED);
225 }
226 spin_unlock_irq(&task->sighand->siglock);
227}
228
229/**
230 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
231 * @child: ptracee to check for
232 * @ignore_state: don't check whether @child is currently %TASK_TRACED
233 *
234 * Check whether @child is being ptraced by %current and ready for further
235 * ptrace operations. If @ignore_state is %false, @child also should be in
236 * %TASK_TRACED state and on return the child is guaranteed to be traced
237 * and not executing. If @ignore_state is %true, @child can be in any
238 * state.
239 *
240 * CONTEXT:
241 * Grabs and releases tasklist_lock and @child->sighand->siglock.
242 *
243 * RETURNS:
244 * 0 on success, -ESRCH if %child is not ready.
245 */
246static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
247{
248 int ret = -ESRCH;
249
250 /*
251 * We take the read lock around doing both checks to close a
252 * possible race where someone else was tracing our child and
253 * detached between these two checks. After this locked check,
254 * we are sure that this is our traced child and that can only
255 * be changed by us so it's not changing right after this.
256 */
257 read_lock(&tasklist_lock);
258 if (child->ptrace && child->parent == current) {
259 WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
260 /*
261 * child->sighand can't be NULL, release_task()
262 * does ptrace_unlink() before __exit_signal().
263 */
264 if (ignore_state || ptrace_freeze_traced(child))
265 ret = 0;
266 }
267 read_unlock(&tasklist_lock);
268
269 if (!ret && !ignore_state) {
270 if (!wait_task_inactive(child, __TASK_TRACED)) {
271 /*
272 * This can only happen if may_ptrace_stop() fails and
273 * ptrace_stop() changes ->state back to TASK_RUNNING,
274 * so we should not worry about leaking __TASK_TRACED.
275 */
276 WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
277 ret = -ESRCH;
278 }
279 }
280
281 return ret;
282}
283
284static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
285{
286 if (mode & PTRACE_MODE_NOAUDIT)
287 return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
288 return ns_capable(ns, CAP_SYS_PTRACE);
289}
290
291/* Returns 0 on success, -errno on denial. */
292static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
293{
294 const struct cred *cred = current_cred(), *tcred;
295 struct mm_struct *mm;
296 kuid_t caller_uid;
297 kgid_t caller_gid;
298
299 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
300 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
301 return -EPERM;
302 }
303
304 /* May we inspect the given task?
305 * This check is used both for attaching with ptrace
306 * and for allowing access to sensitive information in /proc.
307 *
308 * ptrace_attach denies several cases that /proc allows
309 * because setting up the necessary parent/child relationship
310 * or halting the specified task is impossible.
311 */
312
313 /* Don't let security modules deny introspection */
314 if (same_thread_group(task, current))
315 return 0;
316 rcu_read_lock();
317 if (mode & PTRACE_MODE_FSCREDS) {
318 caller_uid = cred->fsuid;
319 caller_gid = cred->fsgid;
320 } else {
321 /*
322 * Using the euid would make more sense here, but something
323 * in userland might rely on the old behavior, and this
324 * shouldn't be a security problem since
325 * PTRACE_MODE_REALCREDS implies that the caller explicitly
326 * used a syscall that requests access to another process
327 * (and not a filesystem syscall to procfs).
328 */
329 caller_uid = cred->uid;
330 caller_gid = cred->gid;
331 }
332 tcred = __task_cred(task);
333 if (uid_eq(caller_uid, tcred->euid) &&
334 uid_eq(caller_uid, tcred->suid) &&
335 uid_eq(caller_uid, tcred->uid) &&
336 gid_eq(caller_gid, tcred->egid) &&
337 gid_eq(caller_gid, tcred->sgid) &&
338 gid_eq(caller_gid, tcred->gid))
339 goto ok;
340 if (ptrace_has_cap(tcred->user_ns, mode))
341 goto ok;
342 rcu_read_unlock();
343 return -EPERM;
344ok:
345 rcu_read_unlock();
346 /*
347 * If a task drops privileges and becomes nondumpable (through a syscall
348 * like setresuid()) while we are trying to access it, we must ensure
349 * that the dumpability is read after the credentials; otherwise,
350 * we may be able to attach to a task that we shouldn't be able to
351 * attach to (as if the task had dropped privileges without becoming
352 * nondumpable).
353 * Pairs with a write barrier in commit_creds().
354 */
355 smp_rmb();
356 mm = task->mm;
357 if (mm &&
358 ((get_dumpable(mm) != SUID_DUMP_USER) &&
359 !ptrace_has_cap(mm->user_ns, mode)))
360 return -EPERM;
361
362 return security_ptrace_access_check(task, mode);
363}
364
365bool ptrace_may_access(struct task_struct *task, unsigned int mode)
366{
367 int err;
368 task_lock(task);
369 err = __ptrace_may_access(task, mode);
370 task_unlock(task);
371 return !err;
372}
373
374static int ptrace_attach(struct task_struct *task, long request,
375 unsigned long addr,
376 unsigned long flags)
377{
378 bool seize = (request == PTRACE_SEIZE);
379 int retval;
380
381 retval = -EIO;
382 if (seize) {
383 if (addr != 0)
384 goto out;
385 if (flags & ~(unsigned long)PTRACE_O_MASK)
386 goto out;
387 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
388 } else {
389 flags = PT_PTRACED;
390 }
391
392 audit_ptrace(task);
393
394 retval = -EPERM;
395 if (unlikely(task->flags & PF_KTHREAD))
396 goto out;
397 if (same_thread_group(task, current))
398 goto out;
399
400 /*
401 * Protect exec's credential calculations against our interference;
402 * SUID, SGID and LSM creds get determined differently
403 * under ptrace.
404 */
405 retval = -ERESTARTNOINTR;
406 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
407 goto out;
408
409 task_lock(task);
410 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
411 task_unlock(task);
412 if (retval)
413 goto unlock_creds;
414
415 write_lock_irq(&tasklist_lock);
416 retval = -EPERM;
417 if (unlikely(task->exit_state))
418 goto unlock_tasklist;
419 if (task->ptrace)
420 goto unlock_tasklist;
421
422 if (seize)
423 flags |= PT_SEIZED;
424 task->ptrace = flags;
425
426 ptrace_link(task, current);
427
428 /* SEIZE doesn't trap tracee on attach */
429 if (!seize)
430 send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
431
432 spin_lock(&task->sighand->siglock);
433
434 /*
435 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
436 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
437 * will be cleared if the child completes the transition or any
438 * event which clears the group stop states happens. We'll wait
439 * for the transition to complete before returning from this
440 * function.
441 *
442 * This hides STOPPED -> RUNNING -> TRACED transition from the
443 * attaching thread but a different thread in the same group can
444 * still observe the transient RUNNING state. IOW, if another
445 * thread's WNOHANG wait(2) on the stopped tracee races against
446 * ATTACH, the wait(2) may fail due to the transient RUNNING.
447 *
448 * The following task_is_stopped() test is safe as both transitions
449 * in and out of STOPPED are protected by siglock.
450 */
451 if (task_is_stopped(task) &&
452 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
453 signal_wake_up_state(task, __TASK_STOPPED);
454
455 spin_unlock(&task->sighand->siglock);
456
457 retval = 0;
458unlock_tasklist:
459 write_unlock_irq(&tasklist_lock);
460unlock_creds:
461 mutex_unlock(&task->signal->cred_guard_mutex);
462out:
463 if (!retval) {
464 /*
465 * We do not bother to change retval or clear JOBCTL_TRAPPING
466 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
467 * not return to user-mode, it will exit and clear this bit in
468 * __ptrace_unlink() if it wasn't already cleared by the tracee;
469 * and until then nobody can ptrace this task.
470 */
471 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
472 proc_ptrace_connector(task, PTRACE_ATTACH);
473 }
474
475 return retval;
476}
477
478/**
479 * ptrace_traceme -- helper for PTRACE_TRACEME
480 *
481 * Performs checks and sets PT_PTRACED.
482 * Should be used by all ptrace implementations for PTRACE_TRACEME.
483 */
484static int ptrace_traceme(void)
485{
486 int ret = -EPERM;
487
488 write_lock_irq(&tasklist_lock);
489 /* Are we already being traced? */
490 if (!current->ptrace) {
491 ret = security_ptrace_traceme(current->parent);
492 /*
493 * Check PF_EXITING to ensure ->real_parent has not passed
494 * exit_ptrace(). Otherwise we don't report the error but
495 * pretend ->real_parent untraces us right after return.
496 */
497 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
498 current->ptrace = PT_PTRACED;
499 ptrace_link(current, current->real_parent);
500 }
501 }
502 write_unlock_irq(&tasklist_lock);
503
504 return ret;
505}
506
507/*
508 * Called with irqs disabled, returns true if childs should reap themselves.
509 */
510static int ignoring_children(struct sighand_struct *sigh)
511{
512 int ret;
513 spin_lock(&sigh->siglock);
514 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
515 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
516 spin_unlock(&sigh->siglock);
517 return ret;
518}
519
520/*
521 * Called with tasklist_lock held for writing.
522 * Unlink a traced task, and clean it up if it was a traced zombie.
523 * Return true if it needs to be reaped with release_task().
524 * (We can't call release_task() here because we already hold tasklist_lock.)
525 *
526 * If it's a zombie, our attachedness prevented normal parent notification
527 * or self-reaping. Do notification now if it would have happened earlier.
528 * If it should reap itself, return true.
529 *
530 * If it's our own child, there is no notification to do. But if our normal
531 * children self-reap, then this child was prevented by ptrace and we must
532 * reap it now, in that case we must also wake up sub-threads sleeping in
533 * do_wait().
534 */
535static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
536{
537 bool dead;
538
539 __ptrace_unlink(p);
540
541 if (p->exit_state != EXIT_ZOMBIE)
542 return false;
543
544 dead = !thread_group_leader(p);
545
546 if (!dead && thread_group_empty(p)) {
547 if (!same_thread_group(p->real_parent, tracer))
548 dead = do_notify_parent(p, p->exit_signal);
549 else if (ignoring_children(tracer->sighand)) {
550 __wake_up_parent(p, tracer);
551 dead = true;
552 }
553 }
554 /* Mark it as in the process of being reaped. */
555 if (dead)
556 p->exit_state = EXIT_DEAD;
557 return dead;
558}
559
560static int ptrace_detach(struct task_struct *child, unsigned int data)
561{
562 if (!valid_signal(data))
563 return -EIO;
564
565 /* Architecture-specific hardware disable .. */
566 ptrace_disable(child);
567
568 write_lock_irq(&tasklist_lock);
569 /*
570 * We rely on ptrace_freeze_traced(). It can't be killed and
571 * untraced by another thread, it can't be a zombie.
572 */
573 WARN_ON(!child->ptrace || child->exit_state);
574 /*
575 * tasklist_lock avoids the race with wait_task_stopped(), see
576 * the comment in ptrace_resume().
577 */
578 child->exit_code = data;
579 __ptrace_detach(current, child);
580 write_unlock_irq(&tasklist_lock);
581
582 proc_ptrace_connector(child, PTRACE_DETACH);
583
584 return 0;
585}
586
587/*
588 * Detach all tasks we were using ptrace on. Called with tasklist held
589 * for writing.
590 */
591void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
592{
593 struct task_struct *p, *n;
594
595 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
596 if (unlikely(p->ptrace & PT_EXITKILL))
597 send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
598
599 if (__ptrace_detach(tracer, p))
600 list_add(&p->ptrace_entry, dead);
601 }
602}
603
604int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
605{
606 int copied = 0;
607
608 while (len > 0) {
609 char buf[128];
610 int this_len, retval;
611
612 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
613 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
614
615 if (!retval) {
616 if (copied)
617 break;
618 return -EIO;
619 }
620 if (copy_to_user(dst, buf, retval))
621 return -EFAULT;
622 copied += retval;
623 src += retval;
624 dst += retval;
625 len -= retval;
626 }
627 return copied;
628}
629
630int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
631{
632 int copied = 0;
633
634 while (len > 0) {
635 char buf[128];
636 int this_len, retval;
637
638 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
639 if (copy_from_user(buf, src, this_len))
640 return -EFAULT;
641 retval = ptrace_access_vm(tsk, dst, buf, this_len,
642 FOLL_FORCE | FOLL_WRITE);
643 if (!retval) {
644 if (copied)
645 break;
646 return -EIO;
647 }
648 copied += retval;
649 src += retval;
650 dst += retval;
651 len -= retval;
652 }
653 return copied;
654}
655
656static int ptrace_setoptions(struct task_struct *child, unsigned long data)
657{
658 unsigned flags;
659
660 if (data & ~(unsigned long)PTRACE_O_MASK)
661 return -EINVAL;
662
663 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
664 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
665 !IS_ENABLED(CONFIG_SECCOMP))
666 return -EINVAL;
667
668 if (!capable(CAP_SYS_ADMIN))
669 return -EPERM;
670
671 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
672 current->ptrace & PT_SUSPEND_SECCOMP)
673 return -EPERM;
674 }
675
676 /* Avoid intermediate state when all opts are cleared */
677 flags = child->ptrace;
678 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
679 flags |= (data << PT_OPT_FLAG_SHIFT);
680 child->ptrace = flags;
681
682 return 0;
683}
684
685static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
686{
687 unsigned long flags;
688 int error = -ESRCH;
689
690 if (lock_task_sighand(child, &flags)) {
691 error = -EINVAL;
692 if (likely(child->last_siginfo != NULL)) {
693 copy_siginfo(info, child->last_siginfo);
694 error = 0;
695 }
696 unlock_task_sighand(child, &flags);
697 }
698 return error;
699}
700
701static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
702{
703 unsigned long flags;
704 int error = -ESRCH;
705
706 if (lock_task_sighand(child, &flags)) {
707 error = -EINVAL;
708 if (likely(child->last_siginfo != NULL)) {
709 copy_siginfo(child->last_siginfo, info);
710 error = 0;
711 }
712 unlock_task_sighand(child, &flags);
713 }
714 return error;
715}
716
717static int ptrace_peek_siginfo(struct task_struct *child,
718 unsigned long addr,
719 unsigned long data)
720{
721 struct ptrace_peeksiginfo_args arg;
722 struct sigpending *pending;
723 struct sigqueue *q;
724 int ret, i;
725
726 ret = copy_from_user(&arg, (void __user *) addr,
727 sizeof(struct ptrace_peeksiginfo_args));
728 if (ret)
729 return -EFAULT;
730
731 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
732 return -EINVAL; /* unknown flags */
733
734 if (arg.nr < 0)
735 return -EINVAL;
736
737 /* Ensure arg.off fits in an unsigned long */
738 if (arg.off > ULONG_MAX)
739 return 0;
740
741 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
742 pending = &child->signal->shared_pending;
743 else
744 pending = &child->pending;
745
746 for (i = 0; i < arg.nr; ) {
747 kernel_siginfo_t info;
748 unsigned long off = arg.off + i;
749 bool found = false;
750
751 spin_lock_irq(&child->sighand->siglock);
752 list_for_each_entry(q, &pending->list, list) {
753 if (!off--) {
754 found = true;
755 copy_siginfo(&info, &q->info);
756 break;
757 }
758 }
759 spin_unlock_irq(&child->sighand->siglock);
760
761 if (!found) /* beyond the end of the list */
762 break;
763
764#ifdef CONFIG_COMPAT
765 if (unlikely(in_compat_syscall())) {
766 compat_siginfo_t __user *uinfo = compat_ptr(data);
767
768 if (copy_siginfo_to_user32(uinfo, &info)) {
769 ret = -EFAULT;
770 break;
771 }
772
773 } else
774#endif
775 {
776 siginfo_t __user *uinfo = (siginfo_t __user *) data;
777
778 if (copy_siginfo_to_user(uinfo, &info)) {
779 ret = -EFAULT;
780 break;
781 }
782 }
783
784 data += sizeof(siginfo_t);
785 i++;
786
787 if (signal_pending(current))
788 break;
789
790 cond_resched();
791 }
792
793 if (i > 0)
794 return i;
795
796 return ret;
797}
798
799#ifdef CONFIG_RSEQ
800static long ptrace_get_rseq_configuration(struct task_struct *task,
801 unsigned long size, void __user *data)
802{
803 struct ptrace_rseq_configuration conf = {
804 .rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
805 .rseq_abi_size = sizeof(*task->rseq),
806 .signature = task->rseq_sig,
807 .flags = 0,
808 };
809
810 size = min_t(unsigned long, size, sizeof(conf));
811 if (copy_to_user(data, &conf, size))
812 return -EFAULT;
813 return sizeof(conf);
814}
815#endif
816
817#ifdef PTRACE_SINGLESTEP
818#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
819#else
820#define is_singlestep(request) 0
821#endif
822
823#ifdef PTRACE_SINGLEBLOCK
824#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
825#else
826#define is_singleblock(request) 0
827#endif
828
829#ifdef PTRACE_SYSEMU
830#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
831#else
832#define is_sysemu_singlestep(request) 0
833#endif
834
835static int ptrace_resume(struct task_struct *child, long request,
836 unsigned long data)
837{
838 bool need_siglock;
839
840 if (!valid_signal(data))
841 return -EIO;
842
843 if (request == PTRACE_SYSCALL)
844 set_task_syscall_work(child, SYSCALL_TRACE);
845 else
846 clear_task_syscall_work(child, SYSCALL_TRACE);
847
848#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
849 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
850 set_task_syscall_work(child, SYSCALL_EMU);
851 else
852 clear_task_syscall_work(child, SYSCALL_EMU);
853#endif
854
855 if (is_singleblock(request)) {
856 if (unlikely(!arch_has_block_step()))
857 return -EIO;
858 user_enable_block_step(child);
859 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
860 if (unlikely(!arch_has_single_step()))
861 return -EIO;
862 user_enable_single_step(child);
863 } else {
864 user_disable_single_step(child);
865 }
866
867 /*
868 * Change ->exit_code and ->state under siglock to avoid the race
869 * with wait_task_stopped() in between; a non-zero ->exit_code will
870 * wrongly look like another report from tracee.
871 *
872 * Note that we need siglock even if ->exit_code == data and/or this
873 * status was not reported yet, the new status must not be cleared by
874 * wait_task_stopped() after resume.
875 *
876 * If data == 0 we do not care if wait_task_stopped() reports the old
877 * status and clears the code too; this can't race with the tracee, it
878 * takes siglock after resume.
879 */
880 need_siglock = data && !thread_group_empty(current);
881 if (need_siglock)
882 spin_lock_irq(&child->sighand->siglock);
883 child->exit_code = data;
884 wake_up_state(child, __TASK_TRACED);
885 if (need_siglock)
886 spin_unlock_irq(&child->sighand->siglock);
887
888 return 0;
889}
890
891#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
892
893static const struct user_regset *
894find_regset(const struct user_regset_view *view, unsigned int type)
895{
896 const struct user_regset *regset;
897 int n;
898
899 for (n = 0; n < view->n; ++n) {
900 regset = view->regsets + n;
901 if (regset->core_note_type == type)
902 return regset;
903 }
904
905 return NULL;
906}
907
908static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
909 struct iovec *kiov)
910{
911 const struct user_regset_view *view = task_user_regset_view(task);
912 const struct user_regset *regset = find_regset(view, type);
913 int regset_no;
914
915 if (!regset || (kiov->iov_len % regset->size) != 0)
916 return -EINVAL;
917
918 regset_no = regset - view->regsets;
919 kiov->iov_len = min(kiov->iov_len,
920 (__kernel_size_t) (regset->n * regset->size));
921
922 if (req == PTRACE_GETREGSET)
923 return copy_regset_to_user(task, view, regset_no, 0,
924 kiov->iov_len, kiov->iov_base);
925 else
926 return copy_regset_from_user(task, view, regset_no, 0,
927 kiov->iov_len, kiov->iov_base);
928}
929
930/*
931 * This is declared in linux/regset.h and defined in machine-dependent
932 * code. We put the export here, near the primary machine-neutral use,
933 * to ensure no machine forgets it.
934 */
935EXPORT_SYMBOL_GPL(task_user_regset_view);
936
937static unsigned long
938ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
939 struct ptrace_syscall_info *info)
940{
941 unsigned long args[ARRAY_SIZE(info->entry.args)];
942 int i;
943
944 info->op = PTRACE_SYSCALL_INFO_ENTRY;
945 info->entry.nr = syscall_get_nr(child, regs);
946 syscall_get_arguments(child, regs, args);
947 for (i = 0; i < ARRAY_SIZE(args); i++)
948 info->entry.args[i] = args[i];
949
950 /* args is the last field in struct ptrace_syscall_info.entry */
951 return offsetofend(struct ptrace_syscall_info, entry.args);
952}
953
954static unsigned long
955ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
956 struct ptrace_syscall_info *info)
957{
958 /*
959 * As struct ptrace_syscall_info.entry is currently a subset
960 * of struct ptrace_syscall_info.seccomp, it makes sense to
961 * initialize that subset using ptrace_get_syscall_info_entry().
962 * This can be reconsidered in the future if these structures
963 * diverge significantly enough.
964 */
965 ptrace_get_syscall_info_entry(child, regs, info);
966 info->op = PTRACE_SYSCALL_INFO_SECCOMP;
967 info->seccomp.ret_data = child->ptrace_message;
968
969 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
970 return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
971}
972
973static unsigned long
974ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
975 struct ptrace_syscall_info *info)
976{
977 info->op = PTRACE_SYSCALL_INFO_EXIT;
978 info->exit.rval = syscall_get_error(child, regs);
979 info->exit.is_error = !!info->exit.rval;
980 if (!info->exit.is_error)
981 info->exit.rval = syscall_get_return_value(child, regs);
982
983 /* is_error is the last field in struct ptrace_syscall_info.exit */
984 return offsetofend(struct ptrace_syscall_info, exit.is_error);
985}
986
987static int
988ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
989 void __user *datavp)
990{
991 struct pt_regs *regs = task_pt_regs(child);
992 struct ptrace_syscall_info info = {
993 .op = PTRACE_SYSCALL_INFO_NONE,
994 .arch = syscall_get_arch(child),
995 .instruction_pointer = instruction_pointer(regs),
996 .stack_pointer = user_stack_pointer(regs),
997 };
998 unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
999 unsigned long write_size;
1000
1001 /*
1002 * This does not need lock_task_sighand() to access
1003 * child->last_siginfo because ptrace_freeze_traced()
1004 * called earlier by ptrace_check_attach() ensures that
1005 * the tracee cannot go away and clear its last_siginfo.
1006 */
1007 switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
1008 case SIGTRAP | 0x80:
1009 switch (child->ptrace_message) {
1010 case PTRACE_EVENTMSG_SYSCALL_ENTRY:
1011 actual_size = ptrace_get_syscall_info_entry(child, regs,
1012 &info);
1013 break;
1014 case PTRACE_EVENTMSG_SYSCALL_EXIT:
1015 actual_size = ptrace_get_syscall_info_exit(child, regs,
1016 &info);
1017 break;
1018 }
1019 break;
1020 case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
1021 actual_size = ptrace_get_syscall_info_seccomp(child, regs,
1022 &info);
1023 break;
1024 }
1025
1026 write_size = min(actual_size, user_size);
1027 return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
1028}
1029#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1030
1031int ptrace_request(struct task_struct *child, long request,
1032 unsigned long addr, unsigned long data)
1033{
1034 bool seized = child->ptrace & PT_SEIZED;
1035 int ret = -EIO;
1036 kernel_siginfo_t siginfo, *si;
1037 void __user *datavp = (void __user *) data;
1038 unsigned long __user *datalp = datavp;
1039 unsigned long flags;
1040
1041 switch (request) {
1042 case PTRACE_PEEKTEXT:
1043 case PTRACE_PEEKDATA:
1044 return generic_ptrace_peekdata(child, addr, data);
1045 case PTRACE_POKETEXT:
1046 case PTRACE_POKEDATA:
1047 return generic_ptrace_pokedata(child, addr, data);
1048
1049#ifdef PTRACE_OLDSETOPTIONS
1050 case PTRACE_OLDSETOPTIONS:
1051#endif
1052 case PTRACE_SETOPTIONS:
1053 ret = ptrace_setoptions(child, data);
1054 break;
1055 case PTRACE_GETEVENTMSG:
1056 ret = put_user(child->ptrace_message, datalp);
1057 break;
1058
1059 case PTRACE_PEEKSIGINFO:
1060 ret = ptrace_peek_siginfo(child, addr, data);
1061 break;
1062
1063 case PTRACE_GETSIGINFO:
1064 ret = ptrace_getsiginfo(child, &siginfo);
1065 if (!ret)
1066 ret = copy_siginfo_to_user(datavp, &siginfo);
1067 break;
1068
1069 case PTRACE_SETSIGINFO:
1070 ret = copy_siginfo_from_user(&siginfo, datavp);
1071 if (!ret)
1072 ret = ptrace_setsiginfo(child, &siginfo);
1073 break;
1074
1075 case PTRACE_GETSIGMASK: {
1076 sigset_t *mask;
1077
1078 if (addr != sizeof(sigset_t)) {
1079 ret = -EINVAL;
1080 break;
1081 }
1082
1083 if (test_tsk_restore_sigmask(child))
1084 mask = &child->saved_sigmask;
1085 else
1086 mask = &child->blocked;
1087
1088 if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1089 ret = -EFAULT;
1090 else
1091 ret = 0;
1092
1093 break;
1094 }
1095
1096 case PTRACE_SETSIGMASK: {
1097 sigset_t new_set;
1098
1099 if (addr != sizeof(sigset_t)) {
1100 ret = -EINVAL;
1101 break;
1102 }
1103
1104 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1105 ret = -EFAULT;
1106 break;
1107 }
1108
1109 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1110
1111 /*
1112 * Every thread does recalc_sigpending() after resume, so
1113 * retarget_shared_pending() and recalc_sigpending() are not
1114 * called here.
1115 */
1116 spin_lock_irq(&child->sighand->siglock);
1117 child->blocked = new_set;
1118 spin_unlock_irq(&child->sighand->siglock);
1119
1120 clear_tsk_restore_sigmask(child);
1121
1122 ret = 0;
1123 break;
1124 }
1125
1126 case PTRACE_INTERRUPT:
1127 /*
1128 * Stop tracee without any side-effect on signal or job
1129 * control. At least one trap is guaranteed to happen
1130 * after this request. If @child is already trapped, the
1131 * current trap is not disturbed and another trap will
1132 * happen after the current trap is ended with PTRACE_CONT.
1133 *
1134 * The actual trap might not be PTRACE_EVENT_STOP trap but
1135 * the pending condition is cleared regardless.
1136 */
1137 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1138 break;
1139
1140 /*
1141 * INTERRUPT doesn't disturb existing trap sans one
1142 * exception. If ptracer issued LISTEN for the current
1143 * STOP, this INTERRUPT should clear LISTEN and re-trap
1144 * tracee into STOP.
1145 */
1146 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1147 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1148
1149 unlock_task_sighand(child, &flags);
1150 ret = 0;
1151 break;
1152
1153 case PTRACE_LISTEN:
1154 /*
1155 * Listen for events. Tracee must be in STOP. It's not
1156 * resumed per-se but is not considered to be in TRACED by
1157 * wait(2) or ptrace(2). If an async event (e.g. group
1158 * stop state change) happens, tracee will enter STOP trap
1159 * again. Alternatively, ptracer can issue INTERRUPT to
1160 * finish listening and re-trap tracee into STOP.
1161 */
1162 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1163 break;
1164
1165 si = child->last_siginfo;
1166 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1167 child->jobctl |= JOBCTL_LISTENING;
1168 /*
1169 * If NOTIFY is set, it means event happened between
1170 * start of this trap and now. Trigger re-trap.
1171 */
1172 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1173 ptrace_signal_wake_up(child, true);
1174 ret = 0;
1175 }
1176 unlock_task_sighand(child, &flags);
1177 break;
1178
1179 case PTRACE_DETACH: /* detach a process that was attached. */
1180 ret = ptrace_detach(child, data);
1181 break;
1182
1183#ifdef CONFIG_BINFMT_ELF_FDPIC
1184 case PTRACE_GETFDPIC: {
1185 struct mm_struct *mm = get_task_mm(child);
1186 unsigned long tmp = 0;
1187
1188 ret = -ESRCH;
1189 if (!mm)
1190 break;
1191
1192 switch (addr) {
1193 case PTRACE_GETFDPIC_EXEC:
1194 tmp = mm->context.exec_fdpic_loadmap;
1195 break;
1196 case PTRACE_GETFDPIC_INTERP:
1197 tmp = mm->context.interp_fdpic_loadmap;
1198 break;
1199 default:
1200 break;
1201 }
1202 mmput(mm);
1203
1204 ret = put_user(tmp, datalp);
1205 break;
1206 }
1207#endif
1208
1209#ifdef PTRACE_SINGLESTEP
1210 case PTRACE_SINGLESTEP:
1211#endif
1212#ifdef PTRACE_SINGLEBLOCK
1213 case PTRACE_SINGLEBLOCK:
1214#endif
1215#ifdef PTRACE_SYSEMU
1216 case PTRACE_SYSEMU:
1217 case PTRACE_SYSEMU_SINGLESTEP:
1218#endif
1219 case PTRACE_SYSCALL:
1220 case PTRACE_CONT:
1221 return ptrace_resume(child, request, data);
1222
1223 case PTRACE_KILL:
1224 if (child->exit_state) /* already dead */
1225 return 0;
1226 return ptrace_resume(child, request, SIGKILL);
1227
1228#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1229 case PTRACE_GETREGSET:
1230 case PTRACE_SETREGSET: {
1231 struct iovec kiov;
1232 struct iovec __user *uiov = datavp;
1233
1234 if (!access_ok(uiov, sizeof(*uiov)))
1235 return -EFAULT;
1236
1237 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1238 __get_user(kiov.iov_len, &uiov->iov_len))
1239 return -EFAULT;
1240
1241 ret = ptrace_regset(child, request, addr, &kiov);
1242 if (!ret)
1243 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1244 break;
1245 }
1246
1247 case PTRACE_GET_SYSCALL_INFO:
1248 ret = ptrace_get_syscall_info(child, addr, datavp);
1249 break;
1250#endif
1251
1252 case PTRACE_SECCOMP_GET_FILTER:
1253 ret = seccomp_get_filter(child, addr, datavp);
1254 break;
1255
1256 case PTRACE_SECCOMP_GET_METADATA:
1257 ret = seccomp_get_metadata(child, addr, datavp);
1258 break;
1259
1260#ifdef CONFIG_RSEQ
1261 case PTRACE_GET_RSEQ_CONFIGURATION:
1262 ret = ptrace_get_rseq_configuration(child, addr, datavp);
1263 break;
1264#endif
1265
1266 default:
1267 break;
1268 }
1269
1270 return ret;
1271}
1272
1273#ifndef arch_ptrace_attach
1274#define arch_ptrace_attach(child) do { } while (0)
1275#endif
1276
1277SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1278 unsigned long, data)
1279{
1280 struct task_struct *child;
1281 long ret;
1282
1283 if (request == PTRACE_TRACEME) {
1284 ret = ptrace_traceme();
1285 if (!ret)
1286 arch_ptrace_attach(current);
1287 goto out;
1288 }
1289
1290 child = find_get_task_by_vpid(pid);
1291 if (!child) {
1292 ret = -ESRCH;
1293 goto out;
1294 }
1295
1296 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1297 ret = ptrace_attach(child, request, addr, data);
1298 /*
1299 * Some architectures need to do book-keeping after
1300 * a ptrace attach.
1301 */
1302 if (!ret)
1303 arch_ptrace_attach(child);
1304 goto out_put_task_struct;
1305 }
1306
1307 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1308 request == PTRACE_INTERRUPT);
1309 if (ret < 0)
1310 goto out_put_task_struct;
1311
1312 ret = arch_ptrace(child, request, addr, data);
1313 if (ret || request != PTRACE_DETACH)
1314 ptrace_unfreeze_traced(child);
1315
1316 out_put_task_struct:
1317 put_task_struct(child);
1318 out:
1319 return ret;
1320}
1321
1322int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1323 unsigned long data)
1324{
1325 unsigned long tmp;
1326 int copied;
1327
1328 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1329 if (copied != sizeof(tmp))
1330 return -EIO;
1331 return put_user(tmp, (unsigned long __user *)data);
1332}
1333
1334int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1335 unsigned long data)
1336{
1337 int copied;
1338
1339 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1340 FOLL_FORCE | FOLL_WRITE);
1341 return (copied == sizeof(data)) ? 0 : -EIO;
1342}
1343
1344#if defined CONFIG_COMPAT
1345
1346int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1347 compat_ulong_t addr, compat_ulong_t data)
1348{
1349 compat_ulong_t __user *datap = compat_ptr(data);
1350 compat_ulong_t word;
1351 kernel_siginfo_t siginfo;
1352 int ret;
1353
1354 switch (request) {
1355 case PTRACE_PEEKTEXT:
1356 case PTRACE_PEEKDATA:
1357 ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1358 FOLL_FORCE);
1359 if (ret != sizeof(word))
1360 ret = -EIO;
1361 else
1362 ret = put_user(word, datap);
1363 break;
1364
1365 case PTRACE_POKETEXT:
1366 case PTRACE_POKEDATA:
1367 ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1368 FOLL_FORCE | FOLL_WRITE);
1369 ret = (ret != sizeof(data) ? -EIO : 0);
1370 break;
1371
1372 case PTRACE_GETEVENTMSG:
1373 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1374 break;
1375
1376 case PTRACE_GETSIGINFO:
1377 ret = ptrace_getsiginfo(child, &siginfo);
1378 if (!ret)
1379 ret = copy_siginfo_to_user32(
1380 (struct compat_siginfo __user *) datap,
1381 &siginfo);
1382 break;
1383
1384 case PTRACE_SETSIGINFO:
1385 ret = copy_siginfo_from_user32(
1386 &siginfo, (struct compat_siginfo __user *) datap);
1387 if (!ret)
1388 ret = ptrace_setsiginfo(child, &siginfo);
1389 break;
1390#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1391 case PTRACE_GETREGSET:
1392 case PTRACE_SETREGSET:
1393 {
1394 struct iovec kiov;
1395 struct compat_iovec __user *uiov =
1396 (struct compat_iovec __user *) datap;
1397 compat_uptr_t ptr;
1398 compat_size_t len;
1399
1400 if (!access_ok(uiov, sizeof(*uiov)))
1401 return -EFAULT;
1402
1403 if (__get_user(ptr, &uiov->iov_base) ||
1404 __get_user(len, &uiov->iov_len))
1405 return -EFAULT;
1406
1407 kiov.iov_base = compat_ptr(ptr);
1408 kiov.iov_len = len;
1409
1410 ret = ptrace_regset(child, request, addr, &kiov);
1411 if (!ret)
1412 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1413 break;
1414 }
1415#endif
1416
1417 default:
1418 ret = ptrace_request(child, request, addr, data);
1419 }
1420
1421 return ret;
1422}
1423
1424COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1425 compat_long_t, addr, compat_long_t, data)
1426{
1427 struct task_struct *child;
1428 long ret;
1429
1430 if (request == PTRACE_TRACEME) {
1431 ret = ptrace_traceme();
1432 goto out;
1433 }
1434
1435 child = find_get_task_by_vpid(pid);
1436 if (!child) {
1437 ret = -ESRCH;
1438 goto out;
1439 }
1440
1441 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1442 ret = ptrace_attach(child, request, addr, data);
1443 /*
1444 * Some architectures need to do book-keeping after
1445 * a ptrace attach.
1446 */
1447 if (!ret)
1448 arch_ptrace_attach(child);
1449 goto out_put_task_struct;
1450 }
1451
1452 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1453 request == PTRACE_INTERRUPT);
1454 if (!ret) {
1455 ret = compat_arch_ptrace(child, request, addr, data);
1456 if (ret || request != PTRACE_DETACH)
1457 ptrace_unfreeze_traced(child);
1458 }
1459
1460 out_put_task_struct:
1461 put_task_struct(child);
1462 out:
1463 return ret;
1464}
1465#endif /* CONFIG_COMPAT */