Loading...
1/*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
10#include <linux/capability.h>
11#include <linux/export.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <linux/pagemap.h>
17#include <linux/ptrace.h>
18#include <linux/security.h>
19#include <linux/signal.h>
20#include <linux/audit.h>
21#include <linux/pid_namespace.h>
22#include <linux/syscalls.h>
23#include <linux/uaccess.h>
24#include <linux/regset.h>
25#include <linux/hw_breakpoint.h>
26#include <linux/cn_proc.h>
27
28
29static int ptrace_trapping_sleep_fn(void *flags)
30{
31 schedule();
32 return 0;
33}
34
35/*
36 * ptrace a task: make the debugger its new parent and
37 * move it to the ptrace list.
38 *
39 * Must be called with the tasklist lock write-held.
40 */
41void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
42{
43 BUG_ON(!list_empty(&child->ptrace_entry));
44 list_add(&child->ptrace_entry, &new_parent->ptraced);
45 child->parent = new_parent;
46}
47
48/**
49 * __ptrace_unlink - unlink ptracee and restore its execution state
50 * @child: ptracee to be unlinked
51 *
52 * Remove @child from the ptrace list, move it back to the original parent,
53 * and restore the execution state so that it conforms to the group stop
54 * state.
55 *
56 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
57 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
58 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
59 * If the ptracer is exiting, the ptracee can be in any state.
60 *
61 * After detach, the ptracee should be in a state which conforms to the
62 * group stop. If the group is stopped or in the process of stopping, the
63 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
64 * up from TASK_TRACED.
65 *
66 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
67 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
68 * to but in the opposite direction of what happens while attaching to a
69 * stopped task. However, in this direction, the intermediate RUNNING
70 * state is not hidden even from the current ptracer and if it immediately
71 * re-attaches and performs a WNOHANG wait(2), it may fail.
72 *
73 * CONTEXT:
74 * write_lock_irq(tasklist_lock)
75 */
76void __ptrace_unlink(struct task_struct *child)
77{
78 BUG_ON(!child->ptrace);
79
80 child->ptrace = 0;
81 child->parent = child->real_parent;
82 list_del_init(&child->ptrace_entry);
83
84 spin_lock(&child->sighand->siglock);
85
86 /*
87 * Clear all pending traps and TRAPPING. TRAPPING should be
88 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
89 */
90 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
91 task_clear_jobctl_trapping(child);
92
93 /*
94 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
95 * @child isn't dead.
96 */
97 if (!(child->flags & PF_EXITING) &&
98 (child->signal->flags & SIGNAL_STOP_STOPPED ||
99 child->signal->group_stop_count)) {
100 child->jobctl |= JOBCTL_STOP_PENDING;
101
102 /*
103 * This is only possible if this thread was cloned by the
104 * traced task running in the stopped group, set the signal
105 * for the future reports.
106 * FIXME: we should change ptrace_init_task() to handle this
107 * case.
108 */
109 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
110 child->jobctl |= SIGSTOP;
111 }
112
113 /*
114 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
115 * @child in the butt. Note that @resume should be used iff @child
116 * is in TASK_TRACED; otherwise, we might unduly disrupt
117 * TASK_KILLABLE sleeps.
118 */
119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
120 signal_wake_up(child, task_is_traced(child));
121
122 spin_unlock(&child->sighand->siglock);
123}
124
125/**
126 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
127 * @child: ptracee to check for
128 * @ignore_state: don't check whether @child is currently %TASK_TRACED
129 *
130 * Check whether @child is being ptraced by %current and ready for further
131 * ptrace operations. If @ignore_state is %false, @child also should be in
132 * %TASK_TRACED state and on return the child is guaranteed to be traced
133 * and not executing. If @ignore_state is %true, @child can be in any
134 * state.
135 *
136 * CONTEXT:
137 * Grabs and releases tasklist_lock and @child->sighand->siglock.
138 *
139 * RETURNS:
140 * 0 on success, -ESRCH if %child is not ready.
141 */
142int ptrace_check_attach(struct task_struct *child, bool ignore_state)
143{
144 int ret = -ESRCH;
145
146 /*
147 * We take the read lock around doing both checks to close a
148 * possible race where someone else was tracing our child and
149 * detached between these two checks. After this locked check,
150 * we are sure that this is our traced child and that can only
151 * be changed by us so it's not changing right after this.
152 */
153 read_lock(&tasklist_lock);
154 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
155 /*
156 * child->sighand can't be NULL, release_task()
157 * does ptrace_unlink() before __exit_signal().
158 */
159 spin_lock_irq(&child->sighand->siglock);
160 WARN_ON_ONCE(task_is_stopped(child));
161 if (ignore_state || (task_is_traced(child) &&
162 !(child->jobctl & JOBCTL_LISTENING)))
163 ret = 0;
164 spin_unlock_irq(&child->sighand->siglock);
165 }
166 read_unlock(&tasklist_lock);
167
168 if (!ret && !ignore_state)
169 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
170
171 /* All systems go.. */
172 return ret;
173}
174
175static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
176{
177 if (mode & PTRACE_MODE_NOAUDIT)
178 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
179 else
180 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
181}
182
183int __ptrace_may_access(struct task_struct *task, unsigned int mode)
184{
185 const struct cred *cred = current_cred(), *tcred;
186
187 /* May we inspect the given task?
188 * This check is used both for attaching with ptrace
189 * and for allowing access to sensitive information in /proc.
190 *
191 * ptrace_attach denies several cases that /proc allows
192 * because setting up the necessary parent/child relationship
193 * or halting the specified task is impossible.
194 */
195 int dumpable = 0;
196 /* Don't let security modules deny introspection */
197 if (task == current)
198 return 0;
199 rcu_read_lock();
200 tcred = __task_cred(task);
201 if (uid_eq(cred->uid, tcred->euid) &&
202 uid_eq(cred->uid, tcred->suid) &&
203 uid_eq(cred->uid, tcred->uid) &&
204 gid_eq(cred->gid, tcred->egid) &&
205 gid_eq(cred->gid, tcred->sgid) &&
206 gid_eq(cred->gid, tcred->gid))
207 goto ok;
208 if (ptrace_has_cap(tcred->user_ns, mode))
209 goto ok;
210 rcu_read_unlock();
211 return -EPERM;
212ok:
213 rcu_read_unlock();
214 smp_rmb();
215 if (task->mm)
216 dumpable = get_dumpable(task->mm);
217 if (!dumpable && !ptrace_has_cap(task_user_ns(task), mode))
218 return -EPERM;
219
220 return security_ptrace_access_check(task, mode);
221}
222
223bool ptrace_may_access(struct task_struct *task, unsigned int mode)
224{
225 int err;
226 task_lock(task);
227 err = __ptrace_may_access(task, mode);
228 task_unlock(task);
229 return !err;
230}
231
232static int ptrace_attach(struct task_struct *task, long request,
233 unsigned long addr,
234 unsigned long flags)
235{
236 bool seize = (request == PTRACE_SEIZE);
237 int retval;
238
239 retval = -EIO;
240 if (seize) {
241 if (addr != 0)
242 goto out;
243 if (flags & ~(unsigned long)PTRACE_O_MASK)
244 goto out;
245 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
246 } else {
247 flags = PT_PTRACED;
248 }
249
250 audit_ptrace(task);
251
252 retval = -EPERM;
253 if (unlikely(task->flags & PF_KTHREAD))
254 goto out;
255 if (same_thread_group(task, current))
256 goto out;
257
258 /*
259 * Protect exec's credential calculations against our interference;
260 * SUID, SGID and LSM creds get determined differently
261 * under ptrace.
262 */
263 retval = -ERESTARTNOINTR;
264 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
265 goto out;
266
267 task_lock(task);
268 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
269 task_unlock(task);
270 if (retval)
271 goto unlock_creds;
272
273 write_lock_irq(&tasklist_lock);
274 retval = -EPERM;
275 if (unlikely(task->exit_state))
276 goto unlock_tasklist;
277 if (task->ptrace)
278 goto unlock_tasklist;
279
280 if (seize)
281 flags |= PT_SEIZED;
282 if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
283 flags |= PT_PTRACE_CAP;
284 task->ptrace = flags;
285
286 __ptrace_link(task, current);
287
288 /* SEIZE doesn't trap tracee on attach */
289 if (!seize)
290 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
291
292 spin_lock(&task->sighand->siglock);
293
294 /*
295 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
296 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
297 * will be cleared if the child completes the transition or any
298 * event which clears the group stop states happens. We'll wait
299 * for the transition to complete before returning from this
300 * function.
301 *
302 * This hides STOPPED -> RUNNING -> TRACED transition from the
303 * attaching thread but a different thread in the same group can
304 * still observe the transient RUNNING state. IOW, if another
305 * thread's WNOHANG wait(2) on the stopped tracee races against
306 * ATTACH, the wait(2) may fail due to the transient RUNNING.
307 *
308 * The following task_is_stopped() test is safe as both transitions
309 * in and out of STOPPED are protected by siglock.
310 */
311 if (task_is_stopped(task) &&
312 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
313 signal_wake_up(task, 1);
314
315 spin_unlock(&task->sighand->siglock);
316
317 retval = 0;
318unlock_tasklist:
319 write_unlock_irq(&tasklist_lock);
320unlock_creds:
321 mutex_unlock(&task->signal->cred_guard_mutex);
322out:
323 if (!retval) {
324 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
325 ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
326 proc_ptrace_connector(task, PTRACE_ATTACH);
327 }
328
329 return retval;
330}
331
332/**
333 * ptrace_traceme -- helper for PTRACE_TRACEME
334 *
335 * Performs checks and sets PT_PTRACED.
336 * Should be used by all ptrace implementations for PTRACE_TRACEME.
337 */
338static int ptrace_traceme(void)
339{
340 int ret = -EPERM;
341
342 write_lock_irq(&tasklist_lock);
343 /* Are we already being traced? */
344 if (!current->ptrace) {
345 ret = security_ptrace_traceme(current->parent);
346 /*
347 * Check PF_EXITING to ensure ->real_parent has not passed
348 * exit_ptrace(). Otherwise we don't report the error but
349 * pretend ->real_parent untraces us right after return.
350 */
351 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
352 current->ptrace = PT_PTRACED;
353 __ptrace_link(current, current->real_parent);
354 }
355 }
356 write_unlock_irq(&tasklist_lock);
357
358 return ret;
359}
360
361/*
362 * Called with irqs disabled, returns true if childs should reap themselves.
363 */
364static int ignoring_children(struct sighand_struct *sigh)
365{
366 int ret;
367 spin_lock(&sigh->siglock);
368 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
369 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
370 spin_unlock(&sigh->siglock);
371 return ret;
372}
373
374/*
375 * Called with tasklist_lock held for writing.
376 * Unlink a traced task, and clean it up if it was a traced zombie.
377 * Return true if it needs to be reaped with release_task().
378 * (We can't call release_task() here because we already hold tasklist_lock.)
379 *
380 * If it's a zombie, our attachedness prevented normal parent notification
381 * or self-reaping. Do notification now if it would have happened earlier.
382 * If it should reap itself, return true.
383 *
384 * If it's our own child, there is no notification to do. But if our normal
385 * children self-reap, then this child was prevented by ptrace and we must
386 * reap it now, in that case we must also wake up sub-threads sleeping in
387 * do_wait().
388 */
389static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
390{
391 bool dead;
392
393 __ptrace_unlink(p);
394
395 if (p->exit_state != EXIT_ZOMBIE)
396 return false;
397
398 dead = !thread_group_leader(p);
399
400 if (!dead && thread_group_empty(p)) {
401 if (!same_thread_group(p->real_parent, tracer))
402 dead = do_notify_parent(p, p->exit_signal);
403 else if (ignoring_children(tracer->sighand)) {
404 __wake_up_parent(p, tracer);
405 dead = true;
406 }
407 }
408 /* Mark it as in the process of being reaped. */
409 if (dead)
410 p->exit_state = EXIT_DEAD;
411 return dead;
412}
413
414static int ptrace_detach(struct task_struct *child, unsigned int data)
415{
416 bool dead = false;
417
418 if (!valid_signal(data))
419 return -EIO;
420
421 /* Architecture-specific hardware disable .. */
422 ptrace_disable(child);
423 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
424
425 write_lock_irq(&tasklist_lock);
426 /*
427 * This child can be already killed. Make sure de_thread() or
428 * our sub-thread doing do_wait() didn't do release_task() yet.
429 */
430 if (child->ptrace) {
431 child->exit_code = data;
432 dead = __ptrace_detach(current, child);
433 }
434 write_unlock_irq(&tasklist_lock);
435
436 proc_ptrace_connector(child, PTRACE_DETACH);
437 if (unlikely(dead))
438 release_task(child);
439
440 return 0;
441}
442
443/*
444 * Detach all tasks we were using ptrace on. Called with tasklist held
445 * for writing, and returns with it held too. But note it can release
446 * and reacquire the lock.
447 */
448void exit_ptrace(struct task_struct *tracer)
449 __releases(&tasklist_lock)
450 __acquires(&tasklist_lock)
451{
452 struct task_struct *p, *n;
453 LIST_HEAD(ptrace_dead);
454
455 if (likely(list_empty(&tracer->ptraced)))
456 return;
457
458 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
459 if (__ptrace_detach(tracer, p))
460 list_add(&p->ptrace_entry, &ptrace_dead);
461 }
462
463 write_unlock_irq(&tasklist_lock);
464 BUG_ON(!list_empty(&tracer->ptraced));
465
466 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
467 list_del_init(&p->ptrace_entry);
468 release_task(p);
469 }
470
471 write_lock_irq(&tasklist_lock);
472}
473
474int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
475{
476 int copied = 0;
477
478 while (len > 0) {
479 char buf[128];
480 int this_len, retval;
481
482 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
483 retval = access_process_vm(tsk, src, buf, this_len, 0);
484 if (!retval) {
485 if (copied)
486 break;
487 return -EIO;
488 }
489 if (copy_to_user(dst, buf, retval))
490 return -EFAULT;
491 copied += retval;
492 src += retval;
493 dst += retval;
494 len -= retval;
495 }
496 return copied;
497}
498
499int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
500{
501 int copied = 0;
502
503 while (len > 0) {
504 char buf[128];
505 int this_len, retval;
506
507 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
508 if (copy_from_user(buf, src, this_len))
509 return -EFAULT;
510 retval = access_process_vm(tsk, dst, buf, this_len, 1);
511 if (!retval) {
512 if (copied)
513 break;
514 return -EIO;
515 }
516 copied += retval;
517 src += retval;
518 dst += retval;
519 len -= retval;
520 }
521 return copied;
522}
523
524static int ptrace_setoptions(struct task_struct *child, unsigned long data)
525{
526 unsigned flags;
527
528 if (data & ~(unsigned long)PTRACE_O_MASK)
529 return -EINVAL;
530
531 /* Avoid intermediate state when all opts are cleared */
532 flags = child->ptrace;
533 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
534 flags |= (data << PT_OPT_FLAG_SHIFT);
535 child->ptrace = flags;
536
537 return 0;
538}
539
540static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
541{
542 unsigned long flags;
543 int error = -ESRCH;
544
545 if (lock_task_sighand(child, &flags)) {
546 error = -EINVAL;
547 if (likely(child->last_siginfo != NULL)) {
548 *info = *child->last_siginfo;
549 error = 0;
550 }
551 unlock_task_sighand(child, &flags);
552 }
553 return error;
554}
555
556static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
557{
558 unsigned long flags;
559 int error = -ESRCH;
560
561 if (lock_task_sighand(child, &flags)) {
562 error = -EINVAL;
563 if (likely(child->last_siginfo != NULL)) {
564 *child->last_siginfo = *info;
565 error = 0;
566 }
567 unlock_task_sighand(child, &flags);
568 }
569 return error;
570}
571
572
573#ifdef PTRACE_SINGLESTEP
574#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
575#else
576#define is_singlestep(request) 0
577#endif
578
579#ifdef PTRACE_SINGLEBLOCK
580#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
581#else
582#define is_singleblock(request) 0
583#endif
584
585#ifdef PTRACE_SYSEMU
586#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
587#else
588#define is_sysemu_singlestep(request) 0
589#endif
590
591static int ptrace_resume(struct task_struct *child, long request,
592 unsigned long data)
593{
594 if (!valid_signal(data))
595 return -EIO;
596
597 if (request == PTRACE_SYSCALL)
598 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
599 else
600 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
601
602#ifdef TIF_SYSCALL_EMU
603 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
604 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
605 else
606 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
607#endif
608
609 if (is_singleblock(request)) {
610 if (unlikely(!arch_has_block_step()))
611 return -EIO;
612 user_enable_block_step(child);
613 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
614 if (unlikely(!arch_has_single_step()))
615 return -EIO;
616 user_enable_single_step(child);
617 } else {
618 user_disable_single_step(child);
619 }
620
621 child->exit_code = data;
622 wake_up_state(child, __TASK_TRACED);
623
624 return 0;
625}
626
627#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
628
629static const struct user_regset *
630find_regset(const struct user_regset_view *view, unsigned int type)
631{
632 const struct user_regset *regset;
633 int n;
634
635 for (n = 0; n < view->n; ++n) {
636 regset = view->regsets + n;
637 if (regset->core_note_type == type)
638 return regset;
639 }
640
641 return NULL;
642}
643
644static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
645 struct iovec *kiov)
646{
647 const struct user_regset_view *view = task_user_regset_view(task);
648 const struct user_regset *regset = find_regset(view, type);
649 int regset_no;
650
651 if (!regset || (kiov->iov_len % regset->size) != 0)
652 return -EINVAL;
653
654 regset_no = regset - view->regsets;
655 kiov->iov_len = min(kiov->iov_len,
656 (__kernel_size_t) (regset->n * regset->size));
657
658 if (req == PTRACE_GETREGSET)
659 return copy_regset_to_user(task, view, regset_no, 0,
660 kiov->iov_len, kiov->iov_base);
661 else
662 return copy_regset_from_user(task, view, regset_no, 0,
663 kiov->iov_len, kiov->iov_base);
664}
665
666#endif
667
668int ptrace_request(struct task_struct *child, long request,
669 unsigned long addr, unsigned long data)
670{
671 bool seized = child->ptrace & PT_SEIZED;
672 int ret = -EIO;
673 siginfo_t siginfo, *si;
674 void __user *datavp = (void __user *) data;
675 unsigned long __user *datalp = datavp;
676 unsigned long flags;
677
678 switch (request) {
679 case PTRACE_PEEKTEXT:
680 case PTRACE_PEEKDATA:
681 return generic_ptrace_peekdata(child, addr, data);
682 case PTRACE_POKETEXT:
683 case PTRACE_POKEDATA:
684 return generic_ptrace_pokedata(child, addr, data);
685
686#ifdef PTRACE_OLDSETOPTIONS
687 case PTRACE_OLDSETOPTIONS:
688#endif
689 case PTRACE_SETOPTIONS:
690 ret = ptrace_setoptions(child, data);
691 break;
692 case PTRACE_GETEVENTMSG:
693 ret = put_user(child->ptrace_message, datalp);
694 break;
695
696 case PTRACE_GETSIGINFO:
697 ret = ptrace_getsiginfo(child, &siginfo);
698 if (!ret)
699 ret = copy_siginfo_to_user(datavp, &siginfo);
700 break;
701
702 case PTRACE_SETSIGINFO:
703 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
704 ret = -EFAULT;
705 else
706 ret = ptrace_setsiginfo(child, &siginfo);
707 break;
708
709 case PTRACE_INTERRUPT:
710 /*
711 * Stop tracee without any side-effect on signal or job
712 * control. At least one trap is guaranteed to happen
713 * after this request. If @child is already trapped, the
714 * current trap is not disturbed and another trap will
715 * happen after the current trap is ended with PTRACE_CONT.
716 *
717 * The actual trap might not be PTRACE_EVENT_STOP trap but
718 * the pending condition is cleared regardless.
719 */
720 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
721 break;
722
723 /*
724 * INTERRUPT doesn't disturb existing trap sans one
725 * exception. If ptracer issued LISTEN for the current
726 * STOP, this INTERRUPT should clear LISTEN and re-trap
727 * tracee into STOP.
728 */
729 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
730 signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
731
732 unlock_task_sighand(child, &flags);
733 ret = 0;
734 break;
735
736 case PTRACE_LISTEN:
737 /*
738 * Listen for events. Tracee must be in STOP. It's not
739 * resumed per-se but is not considered to be in TRACED by
740 * wait(2) or ptrace(2). If an async event (e.g. group
741 * stop state change) happens, tracee will enter STOP trap
742 * again. Alternatively, ptracer can issue INTERRUPT to
743 * finish listening and re-trap tracee into STOP.
744 */
745 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
746 break;
747
748 si = child->last_siginfo;
749 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
750 child->jobctl |= JOBCTL_LISTENING;
751 /*
752 * If NOTIFY is set, it means event happened between
753 * start of this trap and now. Trigger re-trap.
754 */
755 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
756 signal_wake_up(child, true);
757 ret = 0;
758 }
759 unlock_task_sighand(child, &flags);
760 break;
761
762 case PTRACE_DETACH: /* detach a process that was attached. */
763 ret = ptrace_detach(child, data);
764 break;
765
766#ifdef CONFIG_BINFMT_ELF_FDPIC
767 case PTRACE_GETFDPIC: {
768 struct mm_struct *mm = get_task_mm(child);
769 unsigned long tmp = 0;
770
771 ret = -ESRCH;
772 if (!mm)
773 break;
774
775 switch (addr) {
776 case PTRACE_GETFDPIC_EXEC:
777 tmp = mm->context.exec_fdpic_loadmap;
778 break;
779 case PTRACE_GETFDPIC_INTERP:
780 tmp = mm->context.interp_fdpic_loadmap;
781 break;
782 default:
783 break;
784 }
785 mmput(mm);
786
787 ret = put_user(tmp, datalp);
788 break;
789 }
790#endif
791
792#ifdef PTRACE_SINGLESTEP
793 case PTRACE_SINGLESTEP:
794#endif
795#ifdef PTRACE_SINGLEBLOCK
796 case PTRACE_SINGLEBLOCK:
797#endif
798#ifdef PTRACE_SYSEMU
799 case PTRACE_SYSEMU:
800 case PTRACE_SYSEMU_SINGLESTEP:
801#endif
802 case PTRACE_SYSCALL:
803 case PTRACE_CONT:
804 return ptrace_resume(child, request, data);
805
806 case PTRACE_KILL:
807 if (child->exit_state) /* already dead */
808 return 0;
809 return ptrace_resume(child, request, SIGKILL);
810
811#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
812 case PTRACE_GETREGSET:
813 case PTRACE_SETREGSET:
814 {
815 struct iovec kiov;
816 struct iovec __user *uiov = datavp;
817
818 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
819 return -EFAULT;
820
821 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
822 __get_user(kiov.iov_len, &uiov->iov_len))
823 return -EFAULT;
824
825 ret = ptrace_regset(child, request, addr, &kiov);
826 if (!ret)
827 ret = __put_user(kiov.iov_len, &uiov->iov_len);
828 break;
829 }
830#endif
831 default:
832 break;
833 }
834
835 return ret;
836}
837
838static struct task_struct *ptrace_get_task_struct(pid_t pid)
839{
840 struct task_struct *child;
841
842 rcu_read_lock();
843 child = find_task_by_vpid(pid);
844 if (child)
845 get_task_struct(child);
846 rcu_read_unlock();
847
848 if (!child)
849 return ERR_PTR(-ESRCH);
850 return child;
851}
852
853#ifndef arch_ptrace_attach
854#define arch_ptrace_attach(child) do { } while (0)
855#endif
856
857SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
858 unsigned long, data)
859{
860 struct task_struct *child;
861 long ret;
862
863 if (request == PTRACE_TRACEME) {
864 ret = ptrace_traceme();
865 if (!ret)
866 arch_ptrace_attach(current);
867 goto out;
868 }
869
870 child = ptrace_get_task_struct(pid);
871 if (IS_ERR(child)) {
872 ret = PTR_ERR(child);
873 goto out;
874 }
875
876 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
877 ret = ptrace_attach(child, request, addr, data);
878 /*
879 * Some architectures need to do book-keeping after
880 * a ptrace attach.
881 */
882 if (!ret)
883 arch_ptrace_attach(child);
884 goto out_put_task_struct;
885 }
886
887 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
888 request == PTRACE_INTERRUPT);
889 if (ret < 0)
890 goto out_put_task_struct;
891
892 ret = arch_ptrace(child, request, addr, data);
893
894 out_put_task_struct:
895 put_task_struct(child);
896 out:
897 return ret;
898}
899
900int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
901 unsigned long data)
902{
903 unsigned long tmp;
904 int copied;
905
906 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
907 if (copied != sizeof(tmp))
908 return -EIO;
909 return put_user(tmp, (unsigned long __user *)data);
910}
911
912int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
913 unsigned long data)
914{
915 int copied;
916
917 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
918 return (copied == sizeof(data)) ? 0 : -EIO;
919}
920
921#if defined CONFIG_COMPAT
922#include <linux/compat.h>
923
924int compat_ptrace_request(struct task_struct *child, compat_long_t request,
925 compat_ulong_t addr, compat_ulong_t data)
926{
927 compat_ulong_t __user *datap = compat_ptr(data);
928 compat_ulong_t word;
929 siginfo_t siginfo;
930 int ret;
931
932 switch (request) {
933 case PTRACE_PEEKTEXT:
934 case PTRACE_PEEKDATA:
935 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
936 if (ret != sizeof(word))
937 ret = -EIO;
938 else
939 ret = put_user(word, datap);
940 break;
941
942 case PTRACE_POKETEXT:
943 case PTRACE_POKEDATA:
944 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
945 ret = (ret != sizeof(data) ? -EIO : 0);
946 break;
947
948 case PTRACE_GETEVENTMSG:
949 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
950 break;
951
952 case PTRACE_GETSIGINFO:
953 ret = ptrace_getsiginfo(child, &siginfo);
954 if (!ret)
955 ret = copy_siginfo_to_user32(
956 (struct compat_siginfo __user *) datap,
957 &siginfo);
958 break;
959
960 case PTRACE_SETSIGINFO:
961 memset(&siginfo, 0, sizeof siginfo);
962 if (copy_siginfo_from_user32(
963 &siginfo, (struct compat_siginfo __user *) datap))
964 ret = -EFAULT;
965 else
966 ret = ptrace_setsiginfo(child, &siginfo);
967 break;
968#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
969 case PTRACE_GETREGSET:
970 case PTRACE_SETREGSET:
971 {
972 struct iovec kiov;
973 struct compat_iovec __user *uiov =
974 (struct compat_iovec __user *) datap;
975 compat_uptr_t ptr;
976 compat_size_t len;
977
978 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
979 return -EFAULT;
980
981 if (__get_user(ptr, &uiov->iov_base) ||
982 __get_user(len, &uiov->iov_len))
983 return -EFAULT;
984
985 kiov.iov_base = compat_ptr(ptr);
986 kiov.iov_len = len;
987
988 ret = ptrace_regset(child, request, addr, &kiov);
989 if (!ret)
990 ret = __put_user(kiov.iov_len, &uiov->iov_len);
991 break;
992 }
993#endif
994
995 default:
996 ret = ptrace_request(child, request, addr, data);
997 }
998
999 return ret;
1000}
1001
1002asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1003 compat_long_t addr, compat_long_t data)
1004{
1005 struct task_struct *child;
1006 long ret;
1007
1008 if (request == PTRACE_TRACEME) {
1009 ret = ptrace_traceme();
1010 goto out;
1011 }
1012
1013 child = ptrace_get_task_struct(pid);
1014 if (IS_ERR(child)) {
1015 ret = PTR_ERR(child);
1016 goto out;
1017 }
1018
1019 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1020 ret = ptrace_attach(child, request, addr, data);
1021 /*
1022 * Some architectures need to do book-keeping after
1023 * a ptrace attach.
1024 */
1025 if (!ret)
1026 arch_ptrace_attach(child);
1027 goto out_put_task_struct;
1028 }
1029
1030 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1031 request == PTRACE_INTERRUPT);
1032 if (!ret)
1033 ret = compat_arch_ptrace(child, request, addr, data);
1034
1035 out_put_task_struct:
1036 put_task_struct(child);
1037 out:
1038 return ret;
1039}
1040#endif /* CONFIG_COMPAT */
1041
1042#ifdef CONFIG_HAVE_HW_BREAKPOINT
1043int ptrace_get_breakpoints(struct task_struct *tsk)
1044{
1045 if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
1046 return 0;
1047
1048 return -1;
1049}
1050
1051void ptrace_put_breakpoints(struct task_struct *tsk)
1052{
1053 if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
1054 flush_ptrace_hw_breakpoint(tsk);
1055}
1056#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/ptrace.c
4 *
5 * (C) Copyright 1999 Linus Torvalds
6 *
7 * Common interfaces for "ptrace()" which we do not want
8 * to continually duplicate across every architecture.
9 */
10
11#include <linux/capability.h>
12#include <linux/export.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/coredump.h>
16#include <linux/sched/task.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/highmem.h>
20#include <linux/pagemap.h>
21#include <linux/ptrace.h>
22#include <linux/security.h>
23#include <linux/signal.h>
24#include <linux/uio.h>
25#include <linux/audit.h>
26#include <linux/pid_namespace.h>
27#include <linux/syscalls.h>
28#include <linux/uaccess.h>
29#include <linux/regset.h>
30#include <linux/hw_breakpoint.h>
31#include <linux/cn_proc.h>
32#include <linux/compat.h>
33#include <linux/sched/signal.h>
34
35#include <asm/syscall.h> /* for syscall_get_* */
36
37/*
38 * Access another process' address space via ptrace.
39 * Source/target buffer must be kernel space,
40 * Do not walk the page table directly, use get_user_pages
41 */
42int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
43 void *buf, int len, unsigned int gup_flags)
44{
45 struct mm_struct *mm;
46 int ret;
47
48 mm = get_task_mm(tsk);
49 if (!mm)
50 return 0;
51
52 if (!tsk->ptrace ||
53 (current != tsk->parent) ||
54 ((get_dumpable(mm) != SUID_DUMP_USER) &&
55 !ptracer_capable(tsk, mm->user_ns))) {
56 mmput(mm);
57 return 0;
58 }
59
60 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
61 mmput(mm);
62
63 return ret;
64}
65
66
67void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
68 const struct cred *ptracer_cred)
69{
70 BUG_ON(!list_empty(&child->ptrace_entry));
71 list_add(&child->ptrace_entry, &new_parent->ptraced);
72 child->parent = new_parent;
73 child->ptracer_cred = get_cred(ptracer_cred);
74}
75
76/*
77 * ptrace a task: make the debugger its new parent and
78 * move it to the ptrace list.
79 *
80 * Must be called with the tasklist lock write-held.
81 */
82static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
83{
84 __ptrace_link(child, new_parent, current_cred());
85}
86
87/**
88 * __ptrace_unlink - unlink ptracee and restore its execution state
89 * @child: ptracee to be unlinked
90 *
91 * Remove @child from the ptrace list, move it back to the original parent,
92 * and restore the execution state so that it conforms to the group stop
93 * state.
94 *
95 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
96 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
97 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
98 * If the ptracer is exiting, the ptracee can be in any state.
99 *
100 * After detach, the ptracee should be in a state which conforms to the
101 * group stop. If the group is stopped or in the process of stopping, the
102 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
103 * up from TASK_TRACED.
104 *
105 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
106 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
107 * to but in the opposite direction of what happens while attaching to a
108 * stopped task. However, in this direction, the intermediate RUNNING
109 * state is not hidden even from the current ptracer and if it immediately
110 * re-attaches and performs a WNOHANG wait(2), it may fail.
111 *
112 * CONTEXT:
113 * write_lock_irq(tasklist_lock)
114 */
115void __ptrace_unlink(struct task_struct *child)
116{
117 const struct cred *old_cred;
118 BUG_ON(!child->ptrace);
119
120 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
121#ifdef TIF_SYSCALL_EMU
122 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
123#endif
124
125 child->parent = child->real_parent;
126 list_del_init(&child->ptrace_entry);
127 old_cred = child->ptracer_cred;
128 child->ptracer_cred = NULL;
129 put_cred(old_cred);
130
131 spin_lock(&child->sighand->siglock);
132 child->ptrace = 0;
133 /*
134 * Clear all pending traps and TRAPPING. TRAPPING should be
135 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
136 */
137 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
138 task_clear_jobctl_trapping(child);
139
140 /*
141 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
142 * @child isn't dead.
143 */
144 if (!(child->flags & PF_EXITING) &&
145 (child->signal->flags & SIGNAL_STOP_STOPPED ||
146 child->signal->group_stop_count)) {
147 child->jobctl |= JOBCTL_STOP_PENDING;
148
149 /*
150 * This is only possible if this thread was cloned by the
151 * traced task running in the stopped group, set the signal
152 * for the future reports.
153 * FIXME: we should change ptrace_init_task() to handle this
154 * case.
155 */
156 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
157 child->jobctl |= SIGSTOP;
158 }
159
160 /*
161 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
162 * @child in the butt. Note that @resume should be used iff @child
163 * is in TASK_TRACED; otherwise, we might unduly disrupt
164 * TASK_KILLABLE sleeps.
165 */
166 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
167 ptrace_signal_wake_up(child, true);
168
169 spin_unlock(&child->sighand->siglock);
170}
171
172/* Ensure that nothing can wake it up, even SIGKILL */
173static bool ptrace_freeze_traced(struct task_struct *task)
174{
175 bool ret = false;
176
177 /* Lockless, nobody but us can set this flag */
178 if (task->jobctl & JOBCTL_LISTENING)
179 return ret;
180
181 spin_lock_irq(&task->sighand->siglock);
182 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
183 task->state = __TASK_TRACED;
184 ret = true;
185 }
186 spin_unlock_irq(&task->sighand->siglock);
187
188 return ret;
189}
190
191static void ptrace_unfreeze_traced(struct task_struct *task)
192{
193 if (task->state != __TASK_TRACED)
194 return;
195
196 WARN_ON(!task->ptrace || task->parent != current);
197
198 /*
199 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
200 * Recheck state under the lock to close this race.
201 */
202 spin_lock_irq(&task->sighand->siglock);
203 if (task->state == __TASK_TRACED) {
204 if (__fatal_signal_pending(task))
205 wake_up_state(task, __TASK_TRACED);
206 else
207 task->state = TASK_TRACED;
208 }
209 spin_unlock_irq(&task->sighand->siglock);
210}
211
212/**
213 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
214 * @child: ptracee to check for
215 * @ignore_state: don't check whether @child is currently %TASK_TRACED
216 *
217 * Check whether @child is being ptraced by %current and ready for further
218 * ptrace operations. If @ignore_state is %false, @child also should be in
219 * %TASK_TRACED state and on return the child is guaranteed to be traced
220 * and not executing. If @ignore_state is %true, @child can be in any
221 * state.
222 *
223 * CONTEXT:
224 * Grabs and releases tasklist_lock and @child->sighand->siglock.
225 *
226 * RETURNS:
227 * 0 on success, -ESRCH if %child is not ready.
228 */
229static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
230{
231 int ret = -ESRCH;
232
233 /*
234 * We take the read lock around doing both checks to close a
235 * possible race where someone else was tracing our child and
236 * detached between these two checks. After this locked check,
237 * we are sure that this is our traced child and that can only
238 * be changed by us so it's not changing right after this.
239 */
240 read_lock(&tasklist_lock);
241 if (child->ptrace && child->parent == current) {
242 WARN_ON(child->state == __TASK_TRACED);
243 /*
244 * child->sighand can't be NULL, release_task()
245 * does ptrace_unlink() before __exit_signal().
246 */
247 if (ignore_state || ptrace_freeze_traced(child))
248 ret = 0;
249 }
250 read_unlock(&tasklist_lock);
251
252 if (!ret && !ignore_state) {
253 if (!wait_task_inactive(child, __TASK_TRACED)) {
254 /*
255 * This can only happen if may_ptrace_stop() fails and
256 * ptrace_stop() changes ->state back to TASK_RUNNING,
257 * so we should not worry about leaking __TASK_TRACED.
258 */
259 WARN_ON(child->state == __TASK_TRACED);
260 ret = -ESRCH;
261 }
262 }
263
264 return ret;
265}
266
267static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns,
268 unsigned int mode)
269{
270 int ret;
271
272 if (mode & PTRACE_MODE_NOAUDIT)
273 ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT);
274 else
275 ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE);
276
277 return ret == 0;
278}
279
280/* Returns 0 on success, -errno on denial. */
281static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
282{
283 const struct cred *cred = current_cred(), *tcred;
284 struct mm_struct *mm;
285 kuid_t caller_uid;
286 kgid_t caller_gid;
287
288 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
289 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
290 return -EPERM;
291 }
292
293 /* May we inspect the given task?
294 * This check is used both for attaching with ptrace
295 * and for allowing access to sensitive information in /proc.
296 *
297 * ptrace_attach denies several cases that /proc allows
298 * because setting up the necessary parent/child relationship
299 * or halting the specified task is impossible.
300 */
301
302 /* Don't let security modules deny introspection */
303 if (same_thread_group(task, current))
304 return 0;
305 rcu_read_lock();
306 if (mode & PTRACE_MODE_FSCREDS) {
307 caller_uid = cred->fsuid;
308 caller_gid = cred->fsgid;
309 } else {
310 /*
311 * Using the euid would make more sense here, but something
312 * in userland might rely on the old behavior, and this
313 * shouldn't be a security problem since
314 * PTRACE_MODE_REALCREDS implies that the caller explicitly
315 * used a syscall that requests access to another process
316 * (and not a filesystem syscall to procfs).
317 */
318 caller_uid = cred->uid;
319 caller_gid = cred->gid;
320 }
321 tcred = __task_cred(task);
322 if (uid_eq(caller_uid, tcred->euid) &&
323 uid_eq(caller_uid, tcred->suid) &&
324 uid_eq(caller_uid, tcred->uid) &&
325 gid_eq(caller_gid, tcred->egid) &&
326 gid_eq(caller_gid, tcred->sgid) &&
327 gid_eq(caller_gid, tcred->gid))
328 goto ok;
329 if (ptrace_has_cap(cred, tcred->user_ns, mode))
330 goto ok;
331 rcu_read_unlock();
332 return -EPERM;
333ok:
334 rcu_read_unlock();
335 /*
336 * If a task drops privileges and becomes nondumpable (through a syscall
337 * like setresuid()) while we are trying to access it, we must ensure
338 * that the dumpability is read after the credentials; otherwise,
339 * we may be able to attach to a task that we shouldn't be able to
340 * attach to (as if the task had dropped privileges without becoming
341 * nondumpable).
342 * Pairs with a write barrier in commit_creds().
343 */
344 smp_rmb();
345 mm = task->mm;
346 if (mm &&
347 ((get_dumpable(mm) != SUID_DUMP_USER) &&
348 !ptrace_has_cap(cred, mm->user_ns, mode)))
349 return -EPERM;
350
351 return security_ptrace_access_check(task, mode);
352}
353
354bool ptrace_may_access(struct task_struct *task, unsigned int mode)
355{
356 int err;
357 task_lock(task);
358 err = __ptrace_may_access(task, mode);
359 task_unlock(task);
360 return !err;
361}
362
363static int ptrace_attach(struct task_struct *task, long request,
364 unsigned long addr,
365 unsigned long flags)
366{
367 bool seize = (request == PTRACE_SEIZE);
368 int retval;
369
370 retval = -EIO;
371 if (seize) {
372 if (addr != 0)
373 goto out;
374 if (flags & ~(unsigned long)PTRACE_O_MASK)
375 goto out;
376 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
377 } else {
378 flags = PT_PTRACED;
379 }
380
381 audit_ptrace(task);
382
383 retval = -EPERM;
384 if (unlikely(task->flags & PF_KTHREAD))
385 goto out;
386 if (same_thread_group(task, current))
387 goto out;
388
389 /*
390 * Protect exec's credential calculations against our interference;
391 * SUID, SGID and LSM creds get determined differently
392 * under ptrace.
393 */
394 retval = -ERESTARTNOINTR;
395 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
396 goto out;
397
398 task_lock(task);
399 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
400 task_unlock(task);
401 if (retval)
402 goto unlock_creds;
403
404 write_lock_irq(&tasklist_lock);
405 retval = -EPERM;
406 if (unlikely(task->exit_state))
407 goto unlock_tasklist;
408 if (task->ptrace)
409 goto unlock_tasklist;
410
411 if (seize)
412 flags |= PT_SEIZED;
413 task->ptrace = flags;
414
415 ptrace_link(task, current);
416
417 /* SEIZE doesn't trap tracee on attach */
418 if (!seize)
419 send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
420
421 spin_lock(&task->sighand->siglock);
422
423 /*
424 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
425 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
426 * will be cleared if the child completes the transition or any
427 * event which clears the group stop states happens. We'll wait
428 * for the transition to complete before returning from this
429 * function.
430 *
431 * This hides STOPPED -> RUNNING -> TRACED transition from the
432 * attaching thread but a different thread in the same group can
433 * still observe the transient RUNNING state. IOW, if another
434 * thread's WNOHANG wait(2) on the stopped tracee races against
435 * ATTACH, the wait(2) may fail due to the transient RUNNING.
436 *
437 * The following task_is_stopped() test is safe as both transitions
438 * in and out of STOPPED are protected by siglock.
439 */
440 if (task_is_stopped(task) &&
441 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
442 signal_wake_up_state(task, __TASK_STOPPED);
443
444 spin_unlock(&task->sighand->siglock);
445
446 retval = 0;
447unlock_tasklist:
448 write_unlock_irq(&tasklist_lock);
449unlock_creds:
450 mutex_unlock(&task->signal->cred_guard_mutex);
451out:
452 if (!retval) {
453 /*
454 * We do not bother to change retval or clear JOBCTL_TRAPPING
455 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
456 * not return to user-mode, it will exit and clear this bit in
457 * __ptrace_unlink() if it wasn't already cleared by the tracee;
458 * and until then nobody can ptrace this task.
459 */
460 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
461 proc_ptrace_connector(task, PTRACE_ATTACH);
462 }
463
464 return retval;
465}
466
467/**
468 * ptrace_traceme -- helper for PTRACE_TRACEME
469 *
470 * Performs checks and sets PT_PTRACED.
471 * Should be used by all ptrace implementations for PTRACE_TRACEME.
472 */
473static int ptrace_traceme(void)
474{
475 int ret = -EPERM;
476
477 write_lock_irq(&tasklist_lock);
478 /* Are we already being traced? */
479 if (!current->ptrace) {
480 ret = security_ptrace_traceme(current->parent);
481 /*
482 * Check PF_EXITING to ensure ->real_parent has not passed
483 * exit_ptrace(). Otherwise we don't report the error but
484 * pretend ->real_parent untraces us right after return.
485 */
486 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
487 current->ptrace = PT_PTRACED;
488 ptrace_link(current, current->real_parent);
489 }
490 }
491 write_unlock_irq(&tasklist_lock);
492
493 return ret;
494}
495
496/*
497 * Called with irqs disabled, returns true if childs should reap themselves.
498 */
499static int ignoring_children(struct sighand_struct *sigh)
500{
501 int ret;
502 spin_lock(&sigh->siglock);
503 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
504 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
505 spin_unlock(&sigh->siglock);
506 return ret;
507}
508
509/*
510 * Called with tasklist_lock held for writing.
511 * Unlink a traced task, and clean it up if it was a traced zombie.
512 * Return true if it needs to be reaped with release_task().
513 * (We can't call release_task() here because we already hold tasklist_lock.)
514 *
515 * If it's a zombie, our attachedness prevented normal parent notification
516 * or self-reaping. Do notification now if it would have happened earlier.
517 * If it should reap itself, return true.
518 *
519 * If it's our own child, there is no notification to do. But if our normal
520 * children self-reap, then this child was prevented by ptrace and we must
521 * reap it now, in that case we must also wake up sub-threads sleeping in
522 * do_wait().
523 */
524static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
525{
526 bool dead;
527
528 __ptrace_unlink(p);
529
530 if (p->exit_state != EXIT_ZOMBIE)
531 return false;
532
533 dead = !thread_group_leader(p);
534
535 if (!dead && thread_group_empty(p)) {
536 if (!same_thread_group(p->real_parent, tracer))
537 dead = do_notify_parent(p, p->exit_signal);
538 else if (ignoring_children(tracer->sighand)) {
539 __wake_up_parent(p, tracer);
540 dead = true;
541 }
542 }
543 /* Mark it as in the process of being reaped. */
544 if (dead)
545 p->exit_state = EXIT_DEAD;
546 return dead;
547}
548
549static int ptrace_detach(struct task_struct *child, unsigned int data)
550{
551 if (!valid_signal(data))
552 return -EIO;
553
554 /* Architecture-specific hardware disable .. */
555 ptrace_disable(child);
556
557 write_lock_irq(&tasklist_lock);
558 /*
559 * We rely on ptrace_freeze_traced(). It can't be killed and
560 * untraced by another thread, it can't be a zombie.
561 */
562 WARN_ON(!child->ptrace || child->exit_state);
563 /*
564 * tasklist_lock avoids the race with wait_task_stopped(), see
565 * the comment in ptrace_resume().
566 */
567 child->exit_code = data;
568 __ptrace_detach(current, child);
569 write_unlock_irq(&tasklist_lock);
570
571 proc_ptrace_connector(child, PTRACE_DETACH);
572
573 return 0;
574}
575
576/*
577 * Detach all tasks we were using ptrace on. Called with tasklist held
578 * for writing.
579 */
580void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
581{
582 struct task_struct *p, *n;
583
584 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
585 if (unlikely(p->ptrace & PT_EXITKILL))
586 send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
587
588 if (__ptrace_detach(tracer, p))
589 list_add(&p->ptrace_entry, dead);
590 }
591}
592
593int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
594{
595 int copied = 0;
596
597 while (len > 0) {
598 char buf[128];
599 int this_len, retval;
600
601 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
602 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
603
604 if (!retval) {
605 if (copied)
606 break;
607 return -EIO;
608 }
609 if (copy_to_user(dst, buf, retval))
610 return -EFAULT;
611 copied += retval;
612 src += retval;
613 dst += retval;
614 len -= retval;
615 }
616 return copied;
617}
618
619int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
620{
621 int copied = 0;
622
623 while (len > 0) {
624 char buf[128];
625 int this_len, retval;
626
627 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
628 if (copy_from_user(buf, src, this_len))
629 return -EFAULT;
630 retval = ptrace_access_vm(tsk, dst, buf, this_len,
631 FOLL_FORCE | FOLL_WRITE);
632 if (!retval) {
633 if (copied)
634 break;
635 return -EIO;
636 }
637 copied += retval;
638 src += retval;
639 dst += retval;
640 len -= retval;
641 }
642 return copied;
643}
644
645static int ptrace_setoptions(struct task_struct *child, unsigned long data)
646{
647 unsigned flags;
648
649 if (data & ~(unsigned long)PTRACE_O_MASK)
650 return -EINVAL;
651
652 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
653 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
654 !IS_ENABLED(CONFIG_SECCOMP))
655 return -EINVAL;
656
657 if (!capable(CAP_SYS_ADMIN))
658 return -EPERM;
659
660 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
661 current->ptrace & PT_SUSPEND_SECCOMP)
662 return -EPERM;
663 }
664
665 /* Avoid intermediate state when all opts are cleared */
666 flags = child->ptrace;
667 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
668 flags |= (data << PT_OPT_FLAG_SHIFT);
669 child->ptrace = flags;
670
671 return 0;
672}
673
674static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
675{
676 unsigned long flags;
677 int error = -ESRCH;
678
679 if (lock_task_sighand(child, &flags)) {
680 error = -EINVAL;
681 if (likely(child->last_siginfo != NULL)) {
682 copy_siginfo(info, child->last_siginfo);
683 error = 0;
684 }
685 unlock_task_sighand(child, &flags);
686 }
687 return error;
688}
689
690static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
691{
692 unsigned long flags;
693 int error = -ESRCH;
694
695 if (lock_task_sighand(child, &flags)) {
696 error = -EINVAL;
697 if (likely(child->last_siginfo != NULL)) {
698 copy_siginfo(child->last_siginfo, info);
699 error = 0;
700 }
701 unlock_task_sighand(child, &flags);
702 }
703 return error;
704}
705
706static int ptrace_peek_siginfo(struct task_struct *child,
707 unsigned long addr,
708 unsigned long data)
709{
710 struct ptrace_peeksiginfo_args arg;
711 struct sigpending *pending;
712 struct sigqueue *q;
713 int ret, i;
714
715 ret = copy_from_user(&arg, (void __user *) addr,
716 sizeof(struct ptrace_peeksiginfo_args));
717 if (ret)
718 return -EFAULT;
719
720 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
721 return -EINVAL; /* unknown flags */
722
723 if (arg.nr < 0)
724 return -EINVAL;
725
726 /* Ensure arg.off fits in an unsigned long */
727 if (arg.off > ULONG_MAX)
728 return 0;
729
730 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
731 pending = &child->signal->shared_pending;
732 else
733 pending = &child->pending;
734
735 for (i = 0; i < arg.nr; ) {
736 kernel_siginfo_t info;
737 unsigned long off = arg.off + i;
738 bool found = false;
739
740 spin_lock_irq(&child->sighand->siglock);
741 list_for_each_entry(q, &pending->list, list) {
742 if (!off--) {
743 found = true;
744 copy_siginfo(&info, &q->info);
745 break;
746 }
747 }
748 spin_unlock_irq(&child->sighand->siglock);
749
750 if (!found) /* beyond the end of the list */
751 break;
752
753#ifdef CONFIG_COMPAT
754 if (unlikely(in_compat_syscall())) {
755 compat_siginfo_t __user *uinfo = compat_ptr(data);
756
757 if (copy_siginfo_to_user32(uinfo, &info)) {
758 ret = -EFAULT;
759 break;
760 }
761
762 } else
763#endif
764 {
765 siginfo_t __user *uinfo = (siginfo_t __user *) data;
766
767 if (copy_siginfo_to_user(uinfo, &info)) {
768 ret = -EFAULT;
769 break;
770 }
771 }
772
773 data += sizeof(siginfo_t);
774 i++;
775
776 if (signal_pending(current))
777 break;
778
779 cond_resched();
780 }
781
782 if (i > 0)
783 return i;
784
785 return ret;
786}
787
788#ifdef PTRACE_SINGLESTEP
789#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
790#else
791#define is_singlestep(request) 0
792#endif
793
794#ifdef PTRACE_SINGLEBLOCK
795#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
796#else
797#define is_singleblock(request) 0
798#endif
799
800#ifdef PTRACE_SYSEMU
801#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
802#else
803#define is_sysemu_singlestep(request) 0
804#endif
805
806static int ptrace_resume(struct task_struct *child, long request,
807 unsigned long data)
808{
809 bool need_siglock;
810
811 if (!valid_signal(data))
812 return -EIO;
813
814 if (request == PTRACE_SYSCALL)
815 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
816 else
817 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
818
819#ifdef TIF_SYSCALL_EMU
820 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
821 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
822 else
823 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
824#endif
825
826 if (is_singleblock(request)) {
827 if (unlikely(!arch_has_block_step()))
828 return -EIO;
829 user_enable_block_step(child);
830 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
831 if (unlikely(!arch_has_single_step()))
832 return -EIO;
833 user_enable_single_step(child);
834 } else {
835 user_disable_single_step(child);
836 }
837
838 /*
839 * Change ->exit_code and ->state under siglock to avoid the race
840 * with wait_task_stopped() in between; a non-zero ->exit_code will
841 * wrongly look like another report from tracee.
842 *
843 * Note that we need siglock even if ->exit_code == data and/or this
844 * status was not reported yet, the new status must not be cleared by
845 * wait_task_stopped() after resume.
846 *
847 * If data == 0 we do not care if wait_task_stopped() reports the old
848 * status and clears the code too; this can't race with the tracee, it
849 * takes siglock after resume.
850 */
851 need_siglock = data && !thread_group_empty(current);
852 if (need_siglock)
853 spin_lock_irq(&child->sighand->siglock);
854 child->exit_code = data;
855 wake_up_state(child, __TASK_TRACED);
856 if (need_siglock)
857 spin_unlock_irq(&child->sighand->siglock);
858
859 return 0;
860}
861
862#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
863
864static const struct user_regset *
865find_regset(const struct user_regset_view *view, unsigned int type)
866{
867 const struct user_regset *regset;
868 int n;
869
870 for (n = 0; n < view->n; ++n) {
871 regset = view->regsets + n;
872 if (regset->core_note_type == type)
873 return regset;
874 }
875
876 return NULL;
877}
878
879static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
880 struct iovec *kiov)
881{
882 const struct user_regset_view *view = task_user_regset_view(task);
883 const struct user_regset *regset = find_regset(view, type);
884 int regset_no;
885
886 if (!regset || (kiov->iov_len % regset->size) != 0)
887 return -EINVAL;
888
889 regset_no = regset - view->regsets;
890 kiov->iov_len = min(kiov->iov_len,
891 (__kernel_size_t) (regset->n * regset->size));
892
893 if (req == PTRACE_GETREGSET)
894 return copy_regset_to_user(task, view, regset_no, 0,
895 kiov->iov_len, kiov->iov_base);
896 else
897 return copy_regset_from_user(task, view, regset_no, 0,
898 kiov->iov_len, kiov->iov_base);
899}
900
901/*
902 * This is declared in linux/regset.h and defined in machine-dependent
903 * code. We put the export here, near the primary machine-neutral use,
904 * to ensure no machine forgets it.
905 */
906EXPORT_SYMBOL_GPL(task_user_regset_view);
907
908static unsigned long
909ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
910 struct ptrace_syscall_info *info)
911{
912 unsigned long args[ARRAY_SIZE(info->entry.args)];
913 int i;
914
915 info->op = PTRACE_SYSCALL_INFO_ENTRY;
916 info->entry.nr = syscall_get_nr(child, regs);
917 syscall_get_arguments(child, regs, args);
918 for (i = 0; i < ARRAY_SIZE(args); i++)
919 info->entry.args[i] = args[i];
920
921 /* args is the last field in struct ptrace_syscall_info.entry */
922 return offsetofend(struct ptrace_syscall_info, entry.args);
923}
924
925static unsigned long
926ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
927 struct ptrace_syscall_info *info)
928{
929 /*
930 * As struct ptrace_syscall_info.entry is currently a subset
931 * of struct ptrace_syscall_info.seccomp, it makes sense to
932 * initialize that subset using ptrace_get_syscall_info_entry().
933 * This can be reconsidered in the future if these structures
934 * diverge significantly enough.
935 */
936 ptrace_get_syscall_info_entry(child, regs, info);
937 info->op = PTRACE_SYSCALL_INFO_SECCOMP;
938 info->seccomp.ret_data = child->ptrace_message;
939
940 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
941 return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
942}
943
944static unsigned long
945ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
946 struct ptrace_syscall_info *info)
947{
948 info->op = PTRACE_SYSCALL_INFO_EXIT;
949 info->exit.rval = syscall_get_error(child, regs);
950 info->exit.is_error = !!info->exit.rval;
951 if (!info->exit.is_error)
952 info->exit.rval = syscall_get_return_value(child, regs);
953
954 /* is_error is the last field in struct ptrace_syscall_info.exit */
955 return offsetofend(struct ptrace_syscall_info, exit.is_error);
956}
957
958static int
959ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
960 void __user *datavp)
961{
962 struct pt_regs *regs = task_pt_regs(child);
963 struct ptrace_syscall_info info = {
964 .op = PTRACE_SYSCALL_INFO_NONE,
965 .arch = syscall_get_arch(child),
966 .instruction_pointer = instruction_pointer(regs),
967 .stack_pointer = user_stack_pointer(regs),
968 };
969 unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
970 unsigned long write_size;
971
972 /*
973 * This does not need lock_task_sighand() to access
974 * child->last_siginfo because ptrace_freeze_traced()
975 * called earlier by ptrace_check_attach() ensures that
976 * the tracee cannot go away and clear its last_siginfo.
977 */
978 switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
979 case SIGTRAP | 0x80:
980 switch (child->ptrace_message) {
981 case PTRACE_EVENTMSG_SYSCALL_ENTRY:
982 actual_size = ptrace_get_syscall_info_entry(child, regs,
983 &info);
984 break;
985 case PTRACE_EVENTMSG_SYSCALL_EXIT:
986 actual_size = ptrace_get_syscall_info_exit(child, regs,
987 &info);
988 break;
989 }
990 break;
991 case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
992 actual_size = ptrace_get_syscall_info_seccomp(child, regs,
993 &info);
994 break;
995 }
996
997 write_size = min(actual_size, user_size);
998 return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
999}
1000#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1001
1002int ptrace_request(struct task_struct *child, long request,
1003 unsigned long addr, unsigned long data)
1004{
1005 bool seized = child->ptrace & PT_SEIZED;
1006 int ret = -EIO;
1007 kernel_siginfo_t siginfo, *si;
1008 void __user *datavp = (void __user *) data;
1009 unsigned long __user *datalp = datavp;
1010 unsigned long flags;
1011
1012 switch (request) {
1013 case PTRACE_PEEKTEXT:
1014 case PTRACE_PEEKDATA:
1015 return generic_ptrace_peekdata(child, addr, data);
1016 case PTRACE_POKETEXT:
1017 case PTRACE_POKEDATA:
1018 return generic_ptrace_pokedata(child, addr, data);
1019
1020#ifdef PTRACE_OLDSETOPTIONS
1021 case PTRACE_OLDSETOPTIONS:
1022#endif
1023 case PTRACE_SETOPTIONS:
1024 ret = ptrace_setoptions(child, data);
1025 break;
1026 case PTRACE_GETEVENTMSG:
1027 ret = put_user(child->ptrace_message, datalp);
1028 break;
1029
1030 case PTRACE_PEEKSIGINFO:
1031 ret = ptrace_peek_siginfo(child, addr, data);
1032 break;
1033
1034 case PTRACE_GETSIGINFO:
1035 ret = ptrace_getsiginfo(child, &siginfo);
1036 if (!ret)
1037 ret = copy_siginfo_to_user(datavp, &siginfo);
1038 break;
1039
1040 case PTRACE_SETSIGINFO:
1041 ret = copy_siginfo_from_user(&siginfo, datavp);
1042 if (!ret)
1043 ret = ptrace_setsiginfo(child, &siginfo);
1044 break;
1045
1046 case PTRACE_GETSIGMASK: {
1047 sigset_t *mask;
1048
1049 if (addr != sizeof(sigset_t)) {
1050 ret = -EINVAL;
1051 break;
1052 }
1053
1054 if (test_tsk_restore_sigmask(child))
1055 mask = &child->saved_sigmask;
1056 else
1057 mask = &child->blocked;
1058
1059 if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1060 ret = -EFAULT;
1061 else
1062 ret = 0;
1063
1064 break;
1065 }
1066
1067 case PTRACE_SETSIGMASK: {
1068 sigset_t new_set;
1069
1070 if (addr != sizeof(sigset_t)) {
1071 ret = -EINVAL;
1072 break;
1073 }
1074
1075 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1076 ret = -EFAULT;
1077 break;
1078 }
1079
1080 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1081
1082 /*
1083 * Every thread does recalc_sigpending() after resume, so
1084 * retarget_shared_pending() and recalc_sigpending() are not
1085 * called here.
1086 */
1087 spin_lock_irq(&child->sighand->siglock);
1088 child->blocked = new_set;
1089 spin_unlock_irq(&child->sighand->siglock);
1090
1091 clear_tsk_restore_sigmask(child);
1092
1093 ret = 0;
1094 break;
1095 }
1096
1097 case PTRACE_INTERRUPT:
1098 /*
1099 * Stop tracee without any side-effect on signal or job
1100 * control. At least one trap is guaranteed to happen
1101 * after this request. If @child is already trapped, the
1102 * current trap is not disturbed and another trap will
1103 * happen after the current trap is ended with PTRACE_CONT.
1104 *
1105 * The actual trap might not be PTRACE_EVENT_STOP trap but
1106 * the pending condition is cleared regardless.
1107 */
1108 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1109 break;
1110
1111 /*
1112 * INTERRUPT doesn't disturb existing trap sans one
1113 * exception. If ptracer issued LISTEN for the current
1114 * STOP, this INTERRUPT should clear LISTEN and re-trap
1115 * tracee into STOP.
1116 */
1117 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1118 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1119
1120 unlock_task_sighand(child, &flags);
1121 ret = 0;
1122 break;
1123
1124 case PTRACE_LISTEN:
1125 /*
1126 * Listen for events. Tracee must be in STOP. It's not
1127 * resumed per-se but is not considered to be in TRACED by
1128 * wait(2) or ptrace(2). If an async event (e.g. group
1129 * stop state change) happens, tracee will enter STOP trap
1130 * again. Alternatively, ptracer can issue INTERRUPT to
1131 * finish listening and re-trap tracee into STOP.
1132 */
1133 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1134 break;
1135
1136 si = child->last_siginfo;
1137 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1138 child->jobctl |= JOBCTL_LISTENING;
1139 /*
1140 * If NOTIFY is set, it means event happened between
1141 * start of this trap and now. Trigger re-trap.
1142 */
1143 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1144 ptrace_signal_wake_up(child, true);
1145 ret = 0;
1146 }
1147 unlock_task_sighand(child, &flags);
1148 break;
1149
1150 case PTRACE_DETACH: /* detach a process that was attached. */
1151 ret = ptrace_detach(child, data);
1152 break;
1153
1154#ifdef CONFIG_BINFMT_ELF_FDPIC
1155 case PTRACE_GETFDPIC: {
1156 struct mm_struct *mm = get_task_mm(child);
1157 unsigned long tmp = 0;
1158
1159 ret = -ESRCH;
1160 if (!mm)
1161 break;
1162
1163 switch (addr) {
1164 case PTRACE_GETFDPIC_EXEC:
1165 tmp = mm->context.exec_fdpic_loadmap;
1166 break;
1167 case PTRACE_GETFDPIC_INTERP:
1168 tmp = mm->context.interp_fdpic_loadmap;
1169 break;
1170 default:
1171 break;
1172 }
1173 mmput(mm);
1174
1175 ret = put_user(tmp, datalp);
1176 break;
1177 }
1178#endif
1179
1180#ifdef PTRACE_SINGLESTEP
1181 case PTRACE_SINGLESTEP:
1182#endif
1183#ifdef PTRACE_SINGLEBLOCK
1184 case PTRACE_SINGLEBLOCK:
1185#endif
1186#ifdef PTRACE_SYSEMU
1187 case PTRACE_SYSEMU:
1188 case PTRACE_SYSEMU_SINGLESTEP:
1189#endif
1190 case PTRACE_SYSCALL:
1191 case PTRACE_CONT:
1192 return ptrace_resume(child, request, data);
1193
1194 case PTRACE_KILL:
1195 if (child->exit_state) /* already dead */
1196 return 0;
1197 return ptrace_resume(child, request, SIGKILL);
1198
1199#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1200 case PTRACE_GETREGSET:
1201 case PTRACE_SETREGSET: {
1202 struct iovec kiov;
1203 struct iovec __user *uiov = datavp;
1204
1205 if (!access_ok(uiov, sizeof(*uiov)))
1206 return -EFAULT;
1207
1208 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1209 __get_user(kiov.iov_len, &uiov->iov_len))
1210 return -EFAULT;
1211
1212 ret = ptrace_regset(child, request, addr, &kiov);
1213 if (!ret)
1214 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1215 break;
1216 }
1217
1218 case PTRACE_GET_SYSCALL_INFO:
1219 ret = ptrace_get_syscall_info(child, addr, datavp);
1220 break;
1221#endif
1222
1223 case PTRACE_SECCOMP_GET_FILTER:
1224 ret = seccomp_get_filter(child, addr, datavp);
1225 break;
1226
1227 case PTRACE_SECCOMP_GET_METADATA:
1228 ret = seccomp_get_metadata(child, addr, datavp);
1229 break;
1230
1231 default:
1232 break;
1233 }
1234
1235 return ret;
1236}
1237
1238#ifndef arch_ptrace_attach
1239#define arch_ptrace_attach(child) do { } while (0)
1240#endif
1241
1242SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1243 unsigned long, data)
1244{
1245 struct task_struct *child;
1246 long ret;
1247
1248 if (request == PTRACE_TRACEME) {
1249 ret = ptrace_traceme();
1250 if (!ret)
1251 arch_ptrace_attach(current);
1252 goto out;
1253 }
1254
1255 child = find_get_task_by_vpid(pid);
1256 if (!child) {
1257 ret = -ESRCH;
1258 goto out;
1259 }
1260
1261 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1262 ret = ptrace_attach(child, request, addr, data);
1263 /*
1264 * Some architectures need to do book-keeping after
1265 * a ptrace attach.
1266 */
1267 if (!ret)
1268 arch_ptrace_attach(child);
1269 goto out_put_task_struct;
1270 }
1271
1272 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1273 request == PTRACE_INTERRUPT);
1274 if (ret < 0)
1275 goto out_put_task_struct;
1276
1277 ret = arch_ptrace(child, request, addr, data);
1278 if (ret || request != PTRACE_DETACH)
1279 ptrace_unfreeze_traced(child);
1280
1281 out_put_task_struct:
1282 put_task_struct(child);
1283 out:
1284 return ret;
1285}
1286
1287int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1288 unsigned long data)
1289{
1290 unsigned long tmp;
1291 int copied;
1292
1293 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1294 if (copied != sizeof(tmp))
1295 return -EIO;
1296 return put_user(tmp, (unsigned long __user *)data);
1297}
1298
1299int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1300 unsigned long data)
1301{
1302 int copied;
1303
1304 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1305 FOLL_FORCE | FOLL_WRITE);
1306 return (copied == sizeof(data)) ? 0 : -EIO;
1307}
1308
1309#if defined CONFIG_COMPAT
1310
1311int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1312 compat_ulong_t addr, compat_ulong_t data)
1313{
1314 compat_ulong_t __user *datap = compat_ptr(data);
1315 compat_ulong_t word;
1316 kernel_siginfo_t siginfo;
1317 int ret;
1318
1319 switch (request) {
1320 case PTRACE_PEEKTEXT:
1321 case PTRACE_PEEKDATA:
1322 ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1323 FOLL_FORCE);
1324 if (ret != sizeof(word))
1325 ret = -EIO;
1326 else
1327 ret = put_user(word, datap);
1328 break;
1329
1330 case PTRACE_POKETEXT:
1331 case PTRACE_POKEDATA:
1332 ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1333 FOLL_FORCE | FOLL_WRITE);
1334 ret = (ret != sizeof(data) ? -EIO : 0);
1335 break;
1336
1337 case PTRACE_GETEVENTMSG:
1338 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1339 break;
1340
1341 case PTRACE_GETSIGINFO:
1342 ret = ptrace_getsiginfo(child, &siginfo);
1343 if (!ret)
1344 ret = copy_siginfo_to_user32(
1345 (struct compat_siginfo __user *) datap,
1346 &siginfo);
1347 break;
1348
1349 case PTRACE_SETSIGINFO:
1350 ret = copy_siginfo_from_user32(
1351 &siginfo, (struct compat_siginfo __user *) datap);
1352 if (!ret)
1353 ret = ptrace_setsiginfo(child, &siginfo);
1354 break;
1355#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1356 case PTRACE_GETREGSET:
1357 case PTRACE_SETREGSET:
1358 {
1359 struct iovec kiov;
1360 struct compat_iovec __user *uiov =
1361 (struct compat_iovec __user *) datap;
1362 compat_uptr_t ptr;
1363 compat_size_t len;
1364
1365 if (!access_ok(uiov, sizeof(*uiov)))
1366 return -EFAULT;
1367
1368 if (__get_user(ptr, &uiov->iov_base) ||
1369 __get_user(len, &uiov->iov_len))
1370 return -EFAULT;
1371
1372 kiov.iov_base = compat_ptr(ptr);
1373 kiov.iov_len = len;
1374
1375 ret = ptrace_regset(child, request, addr, &kiov);
1376 if (!ret)
1377 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1378 break;
1379 }
1380#endif
1381
1382 default:
1383 ret = ptrace_request(child, request, addr, data);
1384 }
1385
1386 return ret;
1387}
1388
1389COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1390 compat_long_t, addr, compat_long_t, data)
1391{
1392 struct task_struct *child;
1393 long ret;
1394
1395 if (request == PTRACE_TRACEME) {
1396 ret = ptrace_traceme();
1397 goto out;
1398 }
1399
1400 child = find_get_task_by_vpid(pid);
1401 if (!child) {
1402 ret = -ESRCH;
1403 goto out;
1404 }
1405
1406 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1407 ret = ptrace_attach(child, request, addr, data);
1408 /*
1409 * Some architectures need to do book-keeping after
1410 * a ptrace attach.
1411 */
1412 if (!ret)
1413 arch_ptrace_attach(child);
1414 goto out_put_task_struct;
1415 }
1416
1417 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1418 request == PTRACE_INTERRUPT);
1419 if (!ret) {
1420 ret = compat_arch_ptrace(child, request, addr, data);
1421 if (ret || request != PTRACE_DETACH)
1422 ptrace_unfreeze_traced(child);
1423 }
1424
1425 out_put_task_struct:
1426 put_task_struct(child);
1427 out:
1428 return ret;
1429}
1430#endif /* CONFIG_COMPAT */