Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/ptrace.c
   4 *
   5 * (C) Copyright 1999 Linus Torvalds
   6 *
   7 * Common interfaces for "ptrace()" which we do not want
   8 * to continually duplicate across every architecture.
   9 */
  10
  11#include <linux/capability.h>
  12#include <linux/export.h>
  13#include <linux/sched.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/coredump.h>
  16#include <linux/sched/task.h>
  17#include <linux/errno.h>
  18#include <linux/mm.h>
  19#include <linux/highmem.h>
  20#include <linux/pagemap.h>
  21#include <linux/ptrace.h>
  22#include <linux/security.h>
  23#include <linux/signal.h>
  24#include <linux/uio.h>
  25#include <linux/audit.h>
  26#include <linux/pid_namespace.h>
  27#include <linux/syscalls.h>
  28#include <linux/uaccess.h>
  29#include <linux/regset.h>
  30#include <linux/hw_breakpoint.h>
  31#include <linux/cn_proc.h>
  32#include <linux/compat.h>
  33#include <linux/sched/signal.h>
  34#include <linux/minmax.h>
  35
  36#include <asm/syscall.h>	/* for syscall_get_* */
  37
  38/*
  39 * Access another process' address space via ptrace.
  40 * Source/target buffer must be kernel space,
  41 * Do not walk the page table directly, use get_user_pages
  42 */
  43int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
  44		     void *buf, int len, unsigned int gup_flags)
  45{
  46	struct mm_struct *mm;
  47	int ret;
  48
  49	mm = get_task_mm(tsk);
  50	if (!mm)
  51		return 0;
  52
  53	if (!tsk->ptrace ||
  54	    (current != tsk->parent) ||
  55	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
  56	     !ptracer_capable(tsk, mm->user_ns))) {
  57		mmput(mm);
  58		return 0;
  59	}
  60
  61	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
  62	mmput(mm);
  63
  64	return ret;
  65}
  66
  67
  68void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
  69		   const struct cred *ptracer_cred)
  70{
  71	BUG_ON(!list_empty(&child->ptrace_entry));
  72	list_add(&child->ptrace_entry, &new_parent->ptraced);
  73	child->parent = new_parent;
  74	child->ptracer_cred = get_cred(ptracer_cred);
  75}
  76
  77/*
  78 * ptrace a task: make the debugger its new parent and
  79 * move it to the ptrace list.
  80 *
  81 * Must be called with the tasklist lock write-held.
  82 */
  83static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  84{
  85	__ptrace_link(child, new_parent, current_cred());
  86}
  87
  88/**
  89 * __ptrace_unlink - unlink ptracee and restore its execution state
  90 * @child: ptracee to be unlinked
  91 *
  92 * Remove @child from the ptrace list, move it back to the original parent,
  93 * and restore the execution state so that it conforms to the group stop
  94 * state.
  95 *
  96 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  97 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  98 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  99 * If the ptracer is exiting, the ptracee can be in any state.
 100 *
 101 * After detach, the ptracee should be in a state which conforms to the
 102 * group stop.  If the group is stopped or in the process of stopping, the
 103 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
 104 * up from TASK_TRACED.
 105 *
 106 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
 107 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
 108 * to but in the opposite direction of what happens while attaching to a
 109 * stopped task.  However, in this direction, the intermediate RUNNING
 110 * state is not hidden even from the current ptracer and if it immediately
 111 * re-attaches and performs a WNOHANG wait(2), it may fail.
 112 *
 113 * CONTEXT:
 114 * write_lock_irq(tasklist_lock)
 115 */
 116void __ptrace_unlink(struct task_struct *child)
 117{
 118	const struct cred *old_cred;
 119	BUG_ON(!child->ptrace);
 120
 121	clear_task_syscall_work(child, SYSCALL_TRACE);
 122#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
 123	clear_task_syscall_work(child, SYSCALL_EMU);
 124#endif
 125
 126	child->parent = child->real_parent;
 127	list_del_init(&child->ptrace_entry);
 128	old_cred = child->ptracer_cred;
 129	child->ptracer_cred = NULL;
 130	put_cred(old_cred);
 131
 132	spin_lock(&child->sighand->siglock);
 133	child->ptrace = 0;
 134	/*
 135	 * Clear all pending traps and TRAPPING.  TRAPPING should be
 136	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
 137	 */
 138	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
 139	task_clear_jobctl_trapping(child);
 140
 141	/*
 142	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
 143	 * @child isn't dead.
 144	 */
 145	if (!(child->flags & PF_EXITING) &&
 146	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
 147	     child->signal->group_stop_count)) {
 148		child->jobctl |= JOBCTL_STOP_PENDING;
 149
 150		/*
 151		 * This is only possible if this thread was cloned by the
 152		 * traced task running in the stopped group, set the signal
 153		 * for the future reports.
 154		 * FIXME: we should change ptrace_init_task() to handle this
 155		 * case.
 156		 */
 157		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 158			child->jobctl |= SIGSTOP;
 159	}
 160
 161	/*
 162	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 163	 * @child in the butt.  Note that @resume should be used iff @child
 164	 * is in TASK_TRACED; otherwise, we might unduly disrupt
 165	 * TASK_KILLABLE sleeps.
 166	 */
 167	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 168		ptrace_signal_wake_up(child, true);
 169
 170	spin_unlock(&child->sighand->siglock);
 171}
 172
 173static bool looks_like_a_spurious_pid(struct task_struct *task)
 174{
 175	if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
 176		return false;
 177
 178	if (task_pid_vnr(task) == task->ptrace_message)
 179		return false;
 180	/*
 181	 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
 182	 * was not wait()'ed, most probably debugger targets the old
 183	 * leader which was destroyed in de_thread().
 184	 */
 185	return true;
 186}
 187
 188/*
 189 * Ensure that nothing can wake it up, even SIGKILL
 190 *
 191 * A task is switched to this state while a ptrace operation is in progress;
 192 * such that the ptrace operation is uninterruptible.
 193 */
 194static bool ptrace_freeze_traced(struct task_struct *task)
 195{
 196	bool ret = false;
 197
 198	/* Lockless, nobody but us can set this flag */
 199	if (task->jobctl & JOBCTL_LISTENING)
 200		return ret;
 201
 202	spin_lock_irq(&task->sighand->siglock);
 203	if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
 204	    !__fatal_signal_pending(task)) {
 205		task->jobctl |= JOBCTL_PTRACE_FROZEN;
 206		ret = true;
 207	}
 208	spin_unlock_irq(&task->sighand->siglock);
 209
 210	return ret;
 211}
 212
 213static void ptrace_unfreeze_traced(struct task_struct *task)
 214{
 215	unsigned long flags;
 
 
 
 216
 217	/*
 218	 * The child may be awake and may have cleared
 219	 * JOBCTL_PTRACE_FROZEN (see ptrace_resume).  The child will
 220	 * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
 221	 */
 222	if (lock_task_sighand(task, &flags)) {
 223		task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
 224		if (__fatal_signal_pending(task)) {
 225			task->jobctl &= ~JOBCTL_TRACED;
 226			wake_up_state(task, __TASK_TRACED);
 227		}
 228		unlock_task_sighand(task, &flags);
 229	}
 
 230}
 231
 232/**
 233 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 234 * @child: ptracee to check for
 235 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 236 *
 237 * Check whether @child is being ptraced by %current and ready for further
 238 * ptrace operations.  If @ignore_state is %false, @child also should be in
 239 * %TASK_TRACED state and on return the child is guaranteed to be traced
 240 * and not executing.  If @ignore_state is %true, @child can be in any
 241 * state.
 242 *
 243 * CONTEXT:
 244 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 245 *
 246 * RETURNS:
 247 * 0 on success, -ESRCH if %child is not ready.
 248 */
 249static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 250{
 251	int ret = -ESRCH;
 252
 253	/*
 254	 * We take the read lock around doing both checks to close a
 255	 * possible race where someone else was tracing our child and
 256	 * detached between these two checks.  After this locked check,
 257	 * we are sure that this is our traced child and that can only
 258	 * be changed by us so it's not changing right after this.
 259	 */
 260	read_lock(&tasklist_lock);
 261	if (child->ptrace && child->parent == current) {
 
 262		/*
 263		 * child->sighand can't be NULL, release_task()
 264		 * does ptrace_unlink() before __exit_signal().
 265		 */
 266		if (ignore_state || ptrace_freeze_traced(child))
 267			ret = 0;
 268	}
 269	read_unlock(&tasklist_lock);
 270
 271	if (!ret && !ignore_state &&
 272	    WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED|TASK_FROZEN)))
 273		ret = -ESRCH;
 
 
 
 
 
 
 
 
 274
 275	return ret;
 276}
 277
 278static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 279{
 280	if (mode & PTRACE_MODE_NOAUDIT)
 281		return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
 282	return ns_capable(ns, CAP_SYS_PTRACE);
 
 283}
 284
 285/* Returns 0 on success, -errno on denial. */
 286static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 287{
 288	const struct cred *cred = current_cred(), *tcred;
 289	struct mm_struct *mm;
 290	kuid_t caller_uid;
 291	kgid_t caller_gid;
 292
 293	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 294		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
 295		return -EPERM;
 296	}
 297
 298	/* May we inspect the given task?
 299	 * This check is used both for attaching with ptrace
 300	 * and for allowing access to sensitive information in /proc.
 301	 *
 302	 * ptrace_attach denies several cases that /proc allows
 303	 * because setting up the necessary parent/child relationship
 304	 * or halting the specified task is impossible.
 305	 */
 306
 307	/* Don't let security modules deny introspection */
 308	if (same_thread_group(task, current))
 309		return 0;
 310	rcu_read_lock();
 311	if (mode & PTRACE_MODE_FSCREDS) {
 312		caller_uid = cred->fsuid;
 313		caller_gid = cred->fsgid;
 314	} else {
 315		/*
 316		 * Using the euid would make more sense here, but something
 317		 * in userland might rely on the old behavior, and this
 318		 * shouldn't be a security problem since
 319		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
 320		 * used a syscall that requests access to another process
 321		 * (and not a filesystem syscall to procfs).
 322		 */
 323		caller_uid = cred->uid;
 324		caller_gid = cred->gid;
 325	}
 326	tcred = __task_cred(task);
 327	if (uid_eq(caller_uid, tcred->euid) &&
 328	    uid_eq(caller_uid, tcred->suid) &&
 329	    uid_eq(caller_uid, tcred->uid)  &&
 330	    gid_eq(caller_gid, tcred->egid) &&
 331	    gid_eq(caller_gid, tcred->sgid) &&
 332	    gid_eq(caller_gid, tcred->gid))
 333		goto ok;
 334	if (ptrace_has_cap(tcred->user_ns, mode))
 335		goto ok;
 336	rcu_read_unlock();
 337	return -EPERM;
 338ok:
 339	rcu_read_unlock();
 340	/*
 341	 * If a task drops privileges and becomes nondumpable (through a syscall
 342	 * like setresuid()) while we are trying to access it, we must ensure
 343	 * that the dumpability is read after the credentials; otherwise,
 344	 * we may be able to attach to a task that we shouldn't be able to
 345	 * attach to (as if the task had dropped privileges without becoming
 346	 * nondumpable).
 347	 * Pairs with a write barrier in commit_creds().
 348	 */
 349	smp_rmb();
 350	mm = task->mm;
 351	if (mm &&
 352	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
 353	     !ptrace_has_cap(mm->user_ns, mode)))
 354	    return -EPERM;
 355
 356	return security_ptrace_access_check(task, mode);
 357}
 358
 359bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 360{
 361	int err;
 362	task_lock(task);
 363	err = __ptrace_may_access(task, mode);
 364	task_unlock(task);
 365	return !err;
 366}
 367
 368static int check_ptrace_options(unsigned long data)
 369{
 370	if (data & ~(unsigned long)PTRACE_O_MASK)
 371		return -EINVAL;
 372
 373	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
 374		if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
 375		    !IS_ENABLED(CONFIG_SECCOMP))
 376			return -EINVAL;
 377
 378		if (!capable(CAP_SYS_ADMIN))
 379			return -EPERM;
 380
 381		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
 382		    current->ptrace & PT_SUSPEND_SECCOMP)
 383			return -EPERM;
 384	}
 385	return 0;
 386}
 387
 388static int ptrace_attach(struct task_struct *task, long request,
 389			 unsigned long addr,
 390			 unsigned long flags)
 391{
 392	bool seize = (request == PTRACE_SEIZE);
 393	int retval;
 394
 395	retval = -EIO;
 396	if (seize) {
 397		if (addr != 0)
 398			goto out;
 399		/*
 400		 * This duplicates the check in check_ptrace_options() because
 401		 * ptrace_attach() and ptrace_setoptions() have historically
 402		 * used different error codes for unknown ptrace options.
 403		 */
 404		if (flags & ~(unsigned long)PTRACE_O_MASK)
 405			goto out;
 406		retval = check_ptrace_options(flags);
 407		if (retval)
 408			return retval;
 409		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 410	} else {
 411		flags = PT_PTRACED;
 412	}
 413
 414	audit_ptrace(task);
 415
 416	retval = -EPERM;
 417	if (unlikely(task->flags & PF_KTHREAD))
 418		goto out;
 419	if (same_thread_group(task, current))
 420		goto out;
 421
 422	/*
 423	 * Protect exec's credential calculations against our interference;
 424	 * SUID, SGID and LSM creds get determined differently
 425	 * under ptrace.
 426	 */
 427	retval = -ERESTARTNOINTR;
 428	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 429		goto out;
 430
 431	task_lock(task);
 432	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 433	task_unlock(task);
 434	if (retval)
 435		goto unlock_creds;
 436
 437	write_lock_irq(&tasklist_lock);
 438	retval = -EPERM;
 439	if (unlikely(task->exit_state))
 440		goto unlock_tasklist;
 441	if (task->ptrace)
 442		goto unlock_tasklist;
 443
 
 
 444	task->ptrace = flags;
 445
 446	ptrace_link(task, current);
 447
 448	/* SEIZE doesn't trap tracee on attach */
 449	if (!seize)
 450		send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
 451
 452	spin_lock(&task->sighand->siglock);
 453
 454	/*
 455	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 456	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 457	 * will be cleared if the child completes the transition or any
 458	 * event which clears the group stop states happens.  We'll wait
 459	 * for the transition to complete before returning from this
 460	 * function.
 461	 *
 462	 * This hides STOPPED -> RUNNING -> TRACED transition from the
 463	 * attaching thread but a different thread in the same group can
 464	 * still observe the transient RUNNING state.  IOW, if another
 465	 * thread's WNOHANG wait(2) on the stopped tracee races against
 466	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
 467	 *
 468	 * The following task_is_stopped() test is safe as both transitions
 469	 * in and out of STOPPED are protected by siglock.
 470	 */
 471	if (task_is_stopped(task) &&
 472	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
 473		task->jobctl &= ~JOBCTL_STOPPED;
 474		signal_wake_up_state(task, __TASK_STOPPED);
 475	}
 476
 477	spin_unlock(&task->sighand->siglock);
 478
 479	retval = 0;
 480unlock_tasklist:
 481	write_unlock_irq(&tasklist_lock);
 482unlock_creds:
 483	mutex_unlock(&task->signal->cred_guard_mutex);
 484out:
 485	if (!retval) {
 486		/*
 487		 * We do not bother to change retval or clear JOBCTL_TRAPPING
 488		 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
 489		 * not return to user-mode, it will exit and clear this bit in
 490		 * __ptrace_unlink() if it wasn't already cleared by the tracee;
 491		 * and until then nobody can ptrace this task.
 492		 */
 493		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
 494		proc_ptrace_connector(task, PTRACE_ATTACH);
 495	}
 496
 497	return retval;
 498}
 499
 500/**
 501 * ptrace_traceme  --  helper for PTRACE_TRACEME
 502 *
 503 * Performs checks and sets PT_PTRACED.
 504 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 505 */
 506static int ptrace_traceme(void)
 507{
 508	int ret = -EPERM;
 509
 510	write_lock_irq(&tasklist_lock);
 511	/* Are we already being traced? */
 512	if (!current->ptrace) {
 513		ret = security_ptrace_traceme(current->parent);
 514		/*
 515		 * Check PF_EXITING to ensure ->real_parent has not passed
 516		 * exit_ptrace(). Otherwise we don't report the error but
 517		 * pretend ->real_parent untraces us right after return.
 518		 */
 519		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 520			current->ptrace = PT_PTRACED;
 521			ptrace_link(current, current->real_parent);
 522		}
 523	}
 524	write_unlock_irq(&tasklist_lock);
 525
 526	return ret;
 527}
 528
 529/*
 530 * Called with irqs disabled, returns true if childs should reap themselves.
 531 */
 532static int ignoring_children(struct sighand_struct *sigh)
 533{
 534	int ret;
 535	spin_lock(&sigh->siglock);
 536	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 537	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 538	spin_unlock(&sigh->siglock);
 539	return ret;
 540}
 541
 542/*
 543 * Called with tasklist_lock held for writing.
 544 * Unlink a traced task, and clean it up if it was a traced zombie.
 545 * Return true if it needs to be reaped with release_task().
 546 * (We can't call release_task() here because we already hold tasklist_lock.)
 547 *
 548 * If it's a zombie, our attachedness prevented normal parent notification
 549 * or self-reaping.  Do notification now if it would have happened earlier.
 550 * If it should reap itself, return true.
 551 *
 552 * If it's our own child, there is no notification to do. But if our normal
 553 * children self-reap, then this child was prevented by ptrace and we must
 554 * reap it now, in that case we must also wake up sub-threads sleeping in
 555 * do_wait().
 556 */
 557static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 558{
 559	bool dead;
 560
 561	__ptrace_unlink(p);
 562
 563	if (p->exit_state != EXIT_ZOMBIE)
 564		return false;
 565
 566	dead = !thread_group_leader(p);
 567
 568	if (!dead && thread_group_empty(p)) {
 569		if (!same_thread_group(p->real_parent, tracer))
 570			dead = do_notify_parent(p, p->exit_signal);
 571		else if (ignoring_children(tracer->sighand)) {
 572			__wake_up_parent(p, tracer);
 573			dead = true;
 574		}
 575	}
 576	/* Mark it as in the process of being reaped. */
 577	if (dead)
 578		p->exit_state = EXIT_DEAD;
 579	return dead;
 580}
 581
 582static int ptrace_detach(struct task_struct *child, unsigned int data)
 583{
 584	if (!valid_signal(data))
 585		return -EIO;
 586
 587	/* Architecture-specific hardware disable .. */
 588	ptrace_disable(child);
 589
 590	write_lock_irq(&tasklist_lock);
 591	/*
 592	 * We rely on ptrace_freeze_traced(). It can't be killed and
 593	 * untraced by another thread, it can't be a zombie.
 594	 */
 595	WARN_ON(!child->ptrace || child->exit_state);
 596	/*
 597	 * tasklist_lock avoids the race with wait_task_stopped(), see
 598	 * the comment in ptrace_resume().
 599	 */
 600	child->exit_code = data;
 601	__ptrace_detach(current, child);
 602	write_unlock_irq(&tasklist_lock);
 603
 604	proc_ptrace_connector(child, PTRACE_DETACH);
 605
 606	return 0;
 607}
 608
 609/*
 610 * Detach all tasks we were using ptrace on. Called with tasklist held
 611 * for writing.
 612 */
 613void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
 614{
 615	struct task_struct *p, *n;
 616
 617	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 618		if (unlikely(p->ptrace & PT_EXITKILL))
 619			send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
 620
 621		if (__ptrace_detach(tracer, p))
 622			list_add(&p->ptrace_entry, dead);
 623	}
 624}
 625
 626int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 627{
 628	int copied = 0;
 629
 630	while (len > 0) {
 631		char buf[128];
 632		int this_len, retval;
 633
 634		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 635		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
 636
 637		if (!retval) {
 638			if (copied)
 639				break;
 640			return -EIO;
 641		}
 642		if (copy_to_user(dst, buf, retval))
 643			return -EFAULT;
 644		copied += retval;
 645		src += retval;
 646		dst += retval;
 647		len -= retval;
 648	}
 649	return copied;
 650}
 651
 652int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 653{
 654	int copied = 0;
 655
 656	while (len > 0) {
 657		char buf[128];
 658		int this_len, retval;
 659
 660		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 661		if (copy_from_user(buf, src, this_len))
 662			return -EFAULT;
 663		retval = ptrace_access_vm(tsk, dst, buf, this_len,
 664				FOLL_FORCE | FOLL_WRITE);
 665		if (!retval) {
 666			if (copied)
 667				break;
 668			return -EIO;
 669		}
 670		copied += retval;
 671		src += retval;
 672		dst += retval;
 673		len -= retval;
 674	}
 675	return copied;
 676}
 677
 678static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 679{
 680	unsigned flags;
 681	int ret;
 682
 683	ret = check_ptrace_options(data);
 684	if (ret)
 685		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 686
 687	/* Avoid intermediate state when all opts are cleared */
 688	flags = child->ptrace;
 689	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 690	flags |= (data << PT_OPT_FLAG_SHIFT);
 691	child->ptrace = flags;
 692
 693	return 0;
 694}
 695
 696static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
 697{
 698	unsigned long flags;
 699	int error = -ESRCH;
 700
 701	if (lock_task_sighand(child, &flags)) {
 702		error = -EINVAL;
 703		if (likely(child->last_siginfo != NULL)) {
 704			copy_siginfo(info, child->last_siginfo);
 705			error = 0;
 706		}
 707		unlock_task_sighand(child, &flags);
 708	}
 709	return error;
 710}
 711
 712static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
 713{
 714	unsigned long flags;
 715	int error = -ESRCH;
 716
 717	if (lock_task_sighand(child, &flags)) {
 718		error = -EINVAL;
 719		if (likely(child->last_siginfo != NULL)) {
 720			copy_siginfo(child->last_siginfo, info);
 721			error = 0;
 722		}
 723		unlock_task_sighand(child, &flags);
 724	}
 725	return error;
 726}
 727
 728static int ptrace_peek_siginfo(struct task_struct *child,
 729				unsigned long addr,
 730				unsigned long data)
 731{
 732	struct ptrace_peeksiginfo_args arg;
 733	struct sigpending *pending;
 734	struct sigqueue *q;
 735	int ret, i;
 736
 737	ret = copy_from_user(&arg, (void __user *) addr,
 738				sizeof(struct ptrace_peeksiginfo_args));
 739	if (ret)
 740		return -EFAULT;
 741
 742	if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
 743		return -EINVAL; /* unknown flags */
 744
 745	if (arg.nr < 0)
 746		return -EINVAL;
 747
 748	/* Ensure arg.off fits in an unsigned long */
 749	if (arg.off > ULONG_MAX)
 750		return 0;
 751
 752	if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
 753		pending = &child->signal->shared_pending;
 754	else
 755		pending = &child->pending;
 756
 757	for (i = 0; i < arg.nr; ) {
 758		kernel_siginfo_t info;
 759		unsigned long off = arg.off + i;
 760		bool found = false;
 761
 762		spin_lock_irq(&child->sighand->siglock);
 763		list_for_each_entry(q, &pending->list, list) {
 764			if (!off--) {
 765				found = true;
 766				copy_siginfo(&info, &q->info);
 767				break;
 768			}
 769		}
 770		spin_unlock_irq(&child->sighand->siglock);
 771
 772		if (!found) /* beyond the end of the list */
 773			break;
 774
 775#ifdef CONFIG_COMPAT
 776		if (unlikely(in_compat_syscall())) {
 777			compat_siginfo_t __user *uinfo = compat_ptr(data);
 778
 779			if (copy_siginfo_to_user32(uinfo, &info)) {
 780				ret = -EFAULT;
 781				break;
 782			}
 783
 784		} else
 785#endif
 786		{
 787			siginfo_t __user *uinfo = (siginfo_t __user *) data;
 788
 789			if (copy_siginfo_to_user(uinfo, &info)) {
 790				ret = -EFAULT;
 791				break;
 792			}
 793		}
 794
 795		data += sizeof(siginfo_t);
 796		i++;
 797
 798		if (signal_pending(current))
 799			break;
 800
 801		cond_resched();
 802	}
 803
 804	if (i > 0)
 805		return i;
 806
 807	return ret;
 808}
 809
 810#ifdef CONFIG_RSEQ
 811static long ptrace_get_rseq_configuration(struct task_struct *task,
 812					  unsigned long size, void __user *data)
 813{
 814	struct ptrace_rseq_configuration conf = {
 815		.rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
 816		.rseq_abi_size = sizeof(*task->rseq),
 817		.signature = task->rseq_sig,
 818		.flags = 0,
 819	};
 820
 821	size = min_t(unsigned long, size, sizeof(conf));
 822	if (copy_to_user(data, &conf, size))
 823		return -EFAULT;
 824	return sizeof(conf);
 825}
 826#endif
 827
 828#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
 
 
 
 829
 830#ifdef PTRACE_SINGLEBLOCK
 831#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
 832#else
 833#define is_singleblock(request)		0
 834#endif
 835
 836#ifdef PTRACE_SYSEMU
 837#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
 838#else
 839#define is_sysemu_singlestep(request)	0
 840#endif
 841
 842static int ptrace_resume(struct task_struct *child, long request,
 843			 unsigned long data)
 844{
 
 
 845	if (!valid_signal(data))
 846		return -EIO;
 847
 848	if (request == PTRACE_SYSCALL)
 849		set_task_syscall_work(child, SYSCALL_TRACE);
 850	else
 851		clear_task_syscall_work(child, SYSCALL_TRACE);
 852
 853#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
 854	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 855		set_task_syscall_work(child, SYSCALL_EMU);
 856	else
 857		clear_task_syscall_work(child, SYSCALL_EMU);
 858#endif
 859
 860	if (is_singleblock(request)) {
 861		if (unlikely(!arch_has_block_step()))
 862			return -EIO;
 863		user_enable_block_step(child);
 864	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 865		if (unlikely(!arch_has_single_step()))
 866			return -EIO;
 867		user_enable_single_step(child);
 868	} else {
 869		user_disable_single_step(child);
 870	}
 871
 872	/*
 873	 * Change ->exit_code and ->state under siglock to avoid the race
 874	 * with wait_task_stopped() in between; a non-zero ->exit_code will
 875	 * wrongly look like another report from tracee.
 876	 *
 877	 * Note that we need siglock even if ->exit_code == data and/or this
 878	 * status was not reported yet, the new status must not be cleared by
 879	 * wait_task_stopped() after resume.
 
 
 
 
 880	 */
 881	spin_lock_irq(&child->sighand->siglock);
 
 
 882	child->exit_code = data;
 883	child->jobctl &= ~JOBCTL_TRACED;
 884	wake_up_state(child, __TASK_TRACED);
 885	spin_unlock_irq(&child->sighand->siglock);
 
 886
 887	return 0;
 888}
 889
 890#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 891
 892static const struct user_regset *
 893find_regset(const struct user_regset_view *view, unsigned int type)
 894{
 895	const struct user_regset *regset;
 896	int n;
 897
 898	for (n = 0; n < view->n; ++n) {
 899		regset = view->regsets + n;
 900		if (regset->core_note_type == type)
 901			return regset;
 902	}
 903
 904	return NULL;
 905}
 906
 907static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 908			 struct iovec *kiov)
 909{
 910	const struct user_regset_view *view = task_user_regset_view(task);
 911	const struct user_regset *regset = find_regset(view, type);
 912	int regset_no;
 913
 914	if (!regset || (kiov->iov_len % regset->size) != 0)
 915		return -EINVAL;
 916
 917	regset_no = regset - view->regsets;
 918	kiov->iov_len = min(kiov->iov_len,
 919			    (__kernel_size_t) (regset->n * regset->size));
 920
 921	if (req == PTRACE_GETREGSET)
 922		return copy_regset_to_user(task, view, regset_no, 0,
 923					   kiov->iov_len, kiov->iov_base);
 924	else
 925		return copy_regset_from_user(task, view, regset_no, 0,
 926					     kiov->iov_len, kiov->iov_base);
 927}
 928
 929/*
 930 * This is declared in linux/regset.h and defined in machine-dependent
 931 * code.  We put the export here, near the primary machine-neutral use,
 932 * to ensure no machine forgets it.
 933 */
 934EXPORT_SYMBOL_GPL(task_user_regset_view);
 935
 936static unsigned long
 937ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
 938			      struct ptrace_syscall_info *info)
 939{
 940	unsigned long args[ARRAY_SIZE(info->entry.args)];
 941	int i;
 942
 943	info->op = PTRACE_SYSCALL_INFO_ENTRY;
 944	info->entry.nr = syscall_get_nr(child, regs);
 945	syscall_get_arguments(child, regs, args);
 946	for (i = 0; i < ARRAY_SIZE(args); i++)
 947		info->entry.args[i] = args[i];
 948
 949	/* args is the last field in struct ptrace_syscall_info.entry */
 950	return offsetofend(struct ptrace_syscall_info, entry.args);
 951}
 952
 953static unsigned long
 954ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
 955				struct ptrace_syscall_info *info)
 956{
 957	/*
 958	 * As struct ptrace_syscall_info.entry is currently a subset
 959	 * of struct ptrace_syscall_info.seccomp, it makes sense to
 960	 * initialize that subset using ptrace_get_syscall_info_entry().
 961	 * This can be reconsidered in the future if these structures
 962	 * diverge significantly enough.
 963	 */
 964	ptrace_get_syscall_info_entry(child, regs, info);
 965	info->op = PTRACE_SYSCALL_INFO_SECCOMP;
 966	info->seccomp.ret_data = child->ptrace_message;
 967
 968	/* ret_data is the last field in struct ptrace_syscall_info.seccomp */
 969	return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
 970}
 971
 972static unsigned long
 973ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
 974			     struct ptrace_syscall_info *info)
 975{
 976	info->op = PTRACE_SYSCALL_INFO_EXIT;
 977	info->exit.rval = syscall_get_error(child, regs);
 978	info->exit.is_error = !!info->exit.rval;
 979	if (!info->exit.is_error)
 980		info->exit.rval = syscall_get_return_value(child, regs);
 981
 982	/* is_error is the last field in struct ptrace_syscall_info.exit */
 983	return offsetofend(struct ptrace_syscall_info, exit.is_error);
 984}
 985
 986static int
 987ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
 988			void __user *datavp)
 989{
 990	struct pt_regs *regs = task_pt_regs(child);
 991	struct ptrace_syscall_info info = {
 992		.op = PTRACE_SYSCALL_INFO_NONE,
 993		.arch = syscall_get_arch(child),
 994		.instruction_pointer = instruction_pointer(regs),
 995		.stack_pointer = user_stack_pointer(regs),
 996	};
 997	unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
 998	unsigned long write_size;
 999
1000	/*
1001	 * This does not need lock_task_sighand() to access
1002	 * child->last_siginfo because ptrace_freeze_traced()
1003	 * called earlier by ptrace_check_attach() ensures that
1004	 * the tracee cannot go away and clear its last_siginfo.
1005	 */
1006	switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
1007	case SIGTRAP | 0x80:
1008		switch (child->ptrace_message) {
1009		case PTRACE_EVENTMSG_SYSCALL_ENTRY:
1010			actual_size = ptrace_get_syscall_info_entry(child, regs,
1011								    &info);
1012			break;
1013		case PTRACE_EVENTMSG_SYSCALL_EXIT:
1014			actual_size = ptrace_get_syscall_info_exit(child, regs,
1015								   &info);
1016			break;
1017		}
1018		break;
1019	case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
1020		actual_size = ptrace_get_syscall_info_seccomp(child, regs,
1021							      &info);
1022		break;
1023	}
1024
1025	write_size = min(actual_size, user_size);
1026	return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
1027}
1028#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1029
1030int ptrace_request(struct task_struct *child, long request,
1031		   unsigned long addr, unsigned long data)
1032{
1033	bool seized = child->ptrace & PT_SEIZED;
1034	int ret = -EIO;
1035	kernel_siginfo_t siginfo, *si;
1036	void __user *datavp = (void __user *) data;
1037	unsigned long __user *datalp = datavp;
1038	unsigned long flags;
1039
1040	switch (request) {
1041	case PTRACE_PEEKTEXT:
1042	case PTRACE_PEEKDATA:
1043		return generic_ptrace_peekdata(child, addr, data);
1044	case PTRACE_POKETEXT:
1045	case PTRACE_POKEDATA:
1046		return generic_ptrace_pokedata(child, addr, data);
1047
1048#ifdef PTRACE_OLDSETOPTIONS
1049	case PTRACE_OLDSETOPTIONS:
1050#endif
1051	case PTRACE_SETOPTIONS:
1052		ret = ptrace_setoptions(child, data);
1053		break;
1054	case PTRACE_GETEVENTMSG:
1055		ret = put_user(child->ptrace_message, datalp);
1056		break;
1057
1058	case PTRACE_PEEKSIGINFO:
1059		ret = ptrace_peek_siginfo(child, addr, data);
1060		break;
1061
1062	case PTRACE_GETSIGINFO:
1063		ret = ptrace_getsiginfo(child, &siginfo);
1064		if (!ret)
1065			ret = copy_siginfo_to_user(datavp, &siginfo);
1066		break;
1067
1068	case PTRACE_SETSIGINFO:
1069		ret = copy_siginfo_from_user(&siginfo, datavp);
1070		if (!ret)
1071			ret = ptrace_setsiginfo(child, &siginfo);
1072		break;
1073
1074	case PTRACE_GETSIGMASK: {
1075		sigset_t *mask;
1076
1077		if (addr != sizeof(sigset_t)) {
1078			ret = -EINVAL;
1079			break;
1080		}
1081
1082		if (test_tsk_restore_sigmask(child))
1083			mask = &child->saved_sigmask;
1084		else
1085			mask = &child->blocked;
1086
1087		if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1088			ret = -EFAULT;
1089		else
1090			ret = 0;
1091
1092		break;
1093	}
1094
1095	case PTRACE_SETSIGMASK: {
1096		sigset_t new_set;
1097
1098		if (addr != sizeof(sigset_t)) {
1099			ret = -EINVAL;
1100			break;
1101		}
1102
1103		if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1104			ret = -EFAULT;
1105			break;
1106		}
1107
1108		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1109
1110		/*
1111		 * Every thread does recalc_sigpending() after resume, so
1112		 * retarget_shared_pending() and recalc_sigpending() are not
1113		 * called here.
1114		 */
1115		spin_lock_irq(&child->sighand->siglock);
1116		child->blocked = new_set;
1117		spin_unlock_irq(&child->sighand->siglock);
1118
1119		clear_tsk_restore_sigmask(child);
1120
1121		ret = 0;
1122		break;
1123	}
1124
1125	case PTRACE_INTERRUPT:
1126		/*
1127		 * Stop tracee without any side-effect on signal or job
1128		 * control.  At least one trap is guaranteed to happen
1129		 * after this request.  If @child is already trapped, the
1130		 * current trap is not disturbed and another trap will
1131		 * happen after the current trap is ended with PTRACE_CONT.
1132		 *
1133		 * The actual trap might not be PTRACE_EVENT_STOP trap but
1134		 * the pending condition is cleared regardless.
1135		 */
1136		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1137			break;
1138
1139		/*
1140		 * INTERRUPT doesn't disturb existing trap sans one
1141		 * exception.  If ptracer issued LISTEN for the current
1142		 * STOP, this INTERRUPT should clear LISTEN and re-trap
1143		 * tracee into STOP.
1144		 */
1145		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1146			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1147
1148		unlock_task_sighand(child, &flags);
1149		ret = 0;
1150		break;
1151
1152	case PTRACE_LISTEN:
1153		/*
1154		 * Listen for events.  Tracee must be in STOP.  It's not
1155		 * resumed per-se but is not considered to be in TRACED by
1156		 * wait(2) or ptrace(2).  If an async event (e.g. group
1157		 * stop state change) happens, tracee will enter STOP trap
1158		 * again.  Alternatively, ptracer can issue INTERRUPT to
1159		 * finish listening and re-trap tracee into STOP.
1160		 */
1161		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1162			break;
1163
1164		si = child->last_siginfo;
1165		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1166			child->jobctl |= JOBCTL_LISTENING;
1167			/*
1168			 * If NOTIFY is set, it means event happened between
1169			 * start of this trap and now.  Trigger re-trap.
1170			 */
1171			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1172				ptrace_signal_wake_up(child, true);
1173			ret = 0;
1174		}
1175		unlock_task_sighand(child, &flags);
1176		break;
1177
1178	case PTRACE_DETACH:	 /* detach a process that was attached. */
1179		ret = ptrace_detach(child, data);
1180		break;
1181
1182#ifdef CONFIG_BINFMT_ELF_FDPIC
1183	case PTRACE_GETFDPIC: {
1184		struct mm_struct *mm = get_task_mm(child);
1185		unsigned long tmp = 0;
1186
1187		ret = -ESRCH;
1188		if (!mm)
1189			break;
1190
1191		switch (addr) {
1192		case PTRACE_GETFDPIC_EXEC:
1193			tmp = mm->context.exec_fdpic_loadmap;
1194			break;
1195		case PTRACE_GETFDPIC_INTERP:
1196			tmp = mm->context.interp_fdpic_loadmap;
1197			break;
1198		default:
1199			break;
1200		}
1201		mmput(mm);
1202
1203		ret = put_user(tmp, datalp);
1204		break;
1205	}
1206#endif
1207
 
1208	case PTRACE_SINGLESTEP:
 
1209#ifdef PTRACE_SINGLEBLOCK
1210	case PTRACE_SINGLEBLOCK:
1211#endif
1212#ifdef PTRACE_SYSEMU
1213	case PTRACE_SYSEMU:
1214	case PTRACE_SYSEMU_SINGLESTEP:
1215#endif
1216	case PTRACE_SYSCALL:
1217	case PTRACE_CONT:
1218		return ptrace_resume(child, request, data);
1219
1220	case PTRACE_KILL:
1221		send_sig_info(SIGKILL, SEND_SIG_NOINFO, child);
1222		return 0;
 
1223
1224#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1225	case PTRACE_GETREGSET:
1226	case PTRACE_SETREGSET: {
1227		struct iovec kiov;
1228		struct iovec __user *uiov = datavp;
1229
1230		if (!access_ok(uiov, sizeof(*uiov)))
1231			return -EFAULT;
1232
1233		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1234		    __get_user(kiov.iov_len, &uiov->iov_len))
1235			return -EFAULT;
1236
1237		ret = ptrace_regset(child, request, addr, &kiov);
1238		if (!ret)
1239			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1240		break;
1241	}
1242
1243	case PTRACE_GET_SYSCALL_INFO:
1244		ret = ptrace_get_syscall_info(child, addr, datavp);
1245		break;
1246#endif
1247
1248	case PTRACE_SECCOMP_GET_FILTER:
1249		ret = seccomp_get_filter(child, addr, datavp);
1250		break;
1251
1252	case PTRACE_SECCOMP_GET_METADATA:
1253		ret = seccomp_get_metadata(child, addr, datavp);
1254		break;
1255
1256#ifdef CONFIG_RSEQ
1257	case PTRACE_GET_RSEQ_CONFIGURATION:
1258		ret = ptrace_get_rseq_configuration(child, addr, datavp);
1259		break;
1260#endif
1261
1262	default:
1263		break;
1264	}
1265
1266	return ret;
1267}
1268
 
 
 
 
1269SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1270		unsigned long, data)
1271{
1272	struct task_struct *child;
1273	long ret;
1274
1275	if (request == PTRACE_TRACEME) {
1276		ret = ptrace_traceme();
 
 
1277		goto out;
1278	}
1279
1280	child = find_get_task_by_vpid(pid);
1281	if (!child) {
1282		ret = -ESRCH;
1283		goto out;
1284	}
1285
1286	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1287		ret = ptrace_attach(child, request, addr, data);
 
 
 
 
 
 
1288		goto out_put_task_struct;
1289	}
1290
1291	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1292				  request == PTRACE_INTERRUPT);
1293	if (ret < 0)
1294		goto out_put_task_struct;
1295
1296	ret = arch_ptrace(child, request, addr, data);
1297	if (ret || request != PTRACE_DETACH)
1298		ptrace_unfreeze_traced(child);
1299
1300 out_put_task_struct:
1301	put_task_struct(child);
1302 out:
1303	return ret;
1304}
1305
1306int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1307			    unsigned long data)
1308{
1309	unsigned long tmp;
1310	int copied;
1311
1312	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1313	if (copied != sizeof(tmp))
1314		return -EIO;
1315	return put_user(tmp, (unsigned long __user *)data);
1316}
1317
1318int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1319			    unsigned long data)
1320{
1321	int copied;
1322
1323	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1324			FOLL_FORCE | FOLL_WRITE);
1325	return (copied == sizeof(data)) ? 0 : -EIO;
1326}
1327
1328#if defined CONFIG_COMPAT
1329
1330int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1331			  compat_ulong_t addr, compat_ulong_t data)
1332{
1333	compat_ulong_t __user *datap = compat_ptr(data);
1334	compat_ulong_t word;
1335	kernel_siginfo_t siginfo;
1336	int ret;
1337
1338	switch (request) {
1339	case PTRACE_PEEKTEXT:
1340	case PTRACE_PEEKDATA:
1341		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1342				FOLL_FORCE);
1343		if (ret != sizeof(word))
1344			ret = -EIO;
1345		else
1346			ret = put_user(word, datap);
1347		break;
1348
1349	case PTRACE_POKETEXT:
1350	case PTRACE_POKEDATA:
1351		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1352				FOLL_FORCE | FOLL_WRITE);
1353		ret = (ret != sizeof(data) ? -EIO : 0);
1354		break;
1355
1356	case PTRACE_GETEVENTMSG:
1357		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1358		break;
1359
1360	case PTRACE_GETSIGINFO:
1361		ret = ptrace_getsiginfo(child, &siginfo);
1362		if (!ret)
1363			ret = copy_siginfo_to_user32(
1364				(struct compat_siginfo __user *) datap,
1365				&siginfo);
1366		break;
1367
1368	case PTRACE_SETSIGINFO:
1369		ret = copy_siginfo_from_user32(
1370			&siginfo, (struct compat_siginfo __user *) datap);
1371		if (!ret)
1372			ret = ptrace_setsiginfo(child, &siginfo);
1373		break;
1374#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1375	case PTRACE_GETREGSET:
1376	case PTRACE_SETREGSET:
1377	{
1378		struct iovec kiov;
1379		struct compat_iovec __user *uiov =
1380			(struct compat_iovec __user *) datap;
1381		compat_uptr_t ptr;
1382		compat_size_t len;
1383
1384		if (!access_ok(uiov, sizeof(*uiov)))
1385			return -EFAULT;
1386
1387		if (__get_user(ptr, &uiov->iov_base) ||
1388		    __get_user(len, &uiov->iov_len))
1389			return -EFAULT;
1390
1391		kiov.iov_base = compat_ptr(ptr);
1392		kiov.iov_len = len;
1393
1394		ret = ptrace_regset(child, request, addr, &kiov);
1395		if (!ret)
1396			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1397		break;
1398	}
1399#endif
1400
1401	default:
1402		ret = ptrace_request(child, request, addr, data);
1403	}
1404
1405	return ret;
1406}
1407
1408COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1409		       compat_long_t, addr, compat_long_t, data)
1410{
1411	struct task_struct *child;
1412	long ret;
1413
1414	if (request == PTRACE_TRACEME) {
1415		ret = ptrace_traceme();
1416		goto out;
1417	}
1418
1419	child = find_get_task_by_vpid(pid);
1420	if (!child) {
1421		ret = -ESRCH;
1422		goto out;
1423	}
1424
1425	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1426		ret = ptrace_attach(child, request, addr, data);
 
 
 
 
 
 
1427		goto out_put_task_struct;
1428	}
1429
1430	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1431				  request == PTRACE_INTERRUPT);
1432	if (!ret) {
1433		ret = compat_arch_ptrace(child, request, addr, data);
1434		if (ret || request != PTRACE_DETACH)
1435			ptrace_unfreeze_traced(child);
1436	}
1437
1438 out_put_task_struct:
1439	put_task_struct(child);
1440 out:
1441	return ret;
1442}
1443#endif	/* CONFIG_COMPAT */
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/ptrace.c
   4 *
   5 * (C) Copyright 1999 Linus Torvalds
   6 *
   7 * Common interfaces for "ptrace()" which we do not want
   8 * to continually duplicate across every architecture.
   9 */
  10
  11#include <linux/capability.h>
  12#include <linux/export.h>
  13#include <linux/sched.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/coredump.h>
  16#include <linux/sched/task.h>
  17#include <linux/errno.h>
  18#include <linux/mm.h>
  19#include <linux/highmem.h>
  20#include <linux/pagemap.h>
  21#include <linux/ptrace.h>
  22#include <linux/security.h>
  23#include <linux/signal.h>
  24#include <linux/uio.h>
  25#include <linux/audit.h>
  26#include <linux/pid_namespace.h>
  27#include <linux/syscalls.h>
  28#include <linux/uaccess.h>
  29#include <linux/regset.h>
  30#include <linux/hw_breakpoint.h>
  31#include <linux/cn_proc.h>
  32#include <linux/compat.h>
  33#include <linux/sched/signal.h>
 
  34
  35#include <asm/syscall.h>	/* for syscall_get_* */
  36
  37/*
  38 * Access another process' address space via ptrace.
  39 * Source/target buffer must be kernel space,
  40 * Do not walk the page table directly, use get_user_pages
  41 */
  42int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
  43		     void *buf, int len, unsigned int gup_flags)
  44{
  45	struct mm_struct *mm;
  46	int ret;
  47
  48	mm = get_task_mm(tsk);
  49	if (!mm)
  50		return 0;
  51
  52	if (!tsk->ptrace ||
  53	    (current != tsk->parent) ||
  54	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
  55	     !ptracer_capable(tsk, mm->user_ns))) {
  56		mmput(mm);
  57		return 0;
  58	}
  59
  60	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
  61	mmput(mm);
  62
  63	return ret;
  64}
  65
  66
  67void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
  68		   const struct cred *ptracer_cred)
  69{
  70	BUG_ON(!list_empty(&child->ptrace_entry));
  71	list_add(&child->ptrace_entry, &new_parent->ptraced);
  72	child->parent = new_parent;
  73	child->ptracer_cred = get_cred(ptracer_cred);
  74}
  75
  76/*
  77 * ptrace a task: make the debugger its new parent and
  78 * move it to the ptrace list.
  79 *
  80 * Must be called with the tasklist lock write-held.
  81 */
  82static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  83{
  84	__ptrace_link(child, new_parent, current_cred());
  85}
  86
  87/**
  88 * __ptrace_unlink - unlink ptracee and restore its execution state
  89 * @child: ptracee to be unlinked
  90 *
  91 * Remove @child from the ptrace list, move it back to the original parent,
  92 * and restore the execution state so that it conforms to the group stop
  93 * state.
  94 *
  95 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  96 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  97 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  98 * If the ptracer is exiting, the ptracee can be in any state.
  99 *
 100 * After detach, the ptracee should be in a state which conforms to the
 101 * group stop.  If the group is stopped or in the process of stopping, the
 102 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
 103 * up from TASK_TRACED.
 104 *
 105 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
 106 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
 107 * to but in the opposite direction of what happens while attaching to a
 108 * stopped task.  However, in this direction, the intermediate RUNNING
 109 * state is not hidden even from the current ptracer and if it immediately
 110 * re-attaches and performs a WNOHANG wait(2), it may fail.
 111 *
 112 * CONTEXT:
 113 * write_lock_irq(tasklist_lock)
 114 */
 115void __ptrace_unlink(struct task_struct *child)
 116{
 117	const struct cred *old_cred;
 118	BUG_ON(!child->ptrace);
 119
 120	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 121#ifdef TIF_SYSCALL_EMU
 122	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 123#endif
 124
 125	child->parent = child->real_parent;
 126	list_del_init(&child->ptrace_entry);
 127	old_cred = child->ptracer_cred;
 128	child->ptracer_cred = NULL;
 129	put_cred(old_cred);
 130
 131	spin_lock(&child->sighand->siglock);
 132	child->ptrace = 0;
 133	/*
 134	 * Clear all pending traps and TRAPPING.  TRAPPING should be
 135	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
 136	 */
 137	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
 138	task_clear_jobctl_trapping(child);
 139
 140	/*
 141	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
 142	 * @child isn't dead.
 143	 */
 144	if (!(child->flags & PF_EXITING) &&
 145	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
 146	     child->signal->group_stop_count)) {
 147		child->jobctl |= JOBCTL_STOP_PENDING;
 148
 149		/*
 150		 * This is only possible if this thread was cloned by the
 151		 * traced task running in the stopped group, set the signal
 152		 * for the future reports.
 153		 * FIXME: we should change ptrace_init_task() to handle this
 154		 * case.
 155		 */
 156		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 157			child->jobctl |= SIGSTOP;
 158	}
 159
 160	/*
 161	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 162	 * @child in the butt.  Note that @resume should be used iff @child
 163	 * is in TASK_TRACED; otherwise, we might unduly disrupt
 164	 * TASK_KILLABLE sleeps.
 165	 */
 166	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 167		ptrace_signal_wake_up(child, true);
 168
 169	spin_unlock(&child->sighand->siglock);
 170}
 171
 172/* Ensure that nothing can wake it up, even SIGKILL */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 173static bool ptrace_freeze_traced(struct task_struct *task)
 174{
 175	bool ret = false;
 176
 177	/* Lockless, nobody but us can set this flag */
 178	if (task->jobctl & JOBCTL_LISTENING)
 179		return ret;
 180
 181	spin_lock_irq(&task->sighand->siglock);
 182	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
 183		task->state = __TASK_TRACED;
 
 184		ret = true;
 185	}
 186	spin_unlock_irq(&task->sighand->siglock);
 187
 188	return ret;
 189}
 190
 191static void ptrace_unfreeze_traced(struct task_struct *task)
 192{
 193	if (task->state != __TASK_TRACED)
 194		return;
 195
 196	WARN_ON(!task->ptrace || task->parent != current);
 197
 198	/*
 199	 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
 200	 * Recheck state under the lock to close this race.
 
 201	 */
 202	spin_lock_irq(&task->sighand->siglock);
 203	if (task->state == __TASK_TRACED) {
 204		if (__fatal_signal_pending(task))
 
 205			wake_up_state(task, __TASK_TRACED);
 206		else
 207			task->state = TASK_TRACED;
 208	}
 209	spin_unlock_irq(&task->sighand->siglock);
 210}
 211
 212/**
 213 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 214 * @child: ptracee to check for
 215 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 216 *
 217 * Check whether @child is being ptraced by %current and ready for further
 218 * ptrace operations.  If @ignore_state is %false, @child also should be in
 219 * %TASK_TRACED state and on return the child is guaranteed to be traced
 220 * and not executing.  If @ignore_state is %true, @child can be in any
 221 * state.
 222 *
 223 * CONTEXT:
 224 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 225 *
 226 * RETURNS:
 227 * 0 on success, -ESRCH if %child is not ready.
 228 */
 229static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 230{
 231	int ret = -ESRCH;
 232
 233	/*
 234	 * We take the read lock around doing both checks to close a
 235	 * possible race where someone else was tracing our child and
 236	 * detached between these two checks.  After this locked check,
 237	 * we are sure that this is our traced child and that can only
 238	 * be changed by us so it's not changing right after this.
 239	 */
 240	read_lock(&tasklist_lock);
 241	if (child->ptrace && child->parent == current) {
 242		WARN_ON(child->state == __TASK_TRACED);
 243		/*
 244		 * child->sighand can't be NULL, release_task()
 245		 * does ptrace_unlink() before __exit_signal().
 246		 */
 247		if (ignore_state || ptrace_freeze_traced(child))
 248			ret = 0;
 249	}
 250	read_unlock(&tasklist_lock);
 251
 252	if (!ret && !ignore_state) {
 253		if (!wait_task_inactive(child, __TASK_TRACED)) {
 254			/*
 255			 * This can only happen if may_ptrace_stop() fails and
 256			 * ptrace_stop() changes ->state back to TASK_RUNNING,
 257			 * so we should not worry about leaking __TASK_TRACED.
 258			 */
 259			WARN_ON(child->state == __TASK_TRACED);
 260			ret = -ESRCH;
 261		}
 262	}
 263
 264	return ret;
 265}
 266
 267static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 268{
 269	if (mode & PTRACE_MODE_NOAUDIT)
 270		return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
 271	else
 272		return has_ns_capability(current, ns, CAP_SYS_PTRACE);
 273}
 274
 275/* Returns 0 on success, -errno on denial. */
 276static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 277{
 278	const struct cred *cred = current_cred(), *tcred;
 279	struct mm_struct *mm;
 280	kuid_t caller_uid;
 281	kgid_t caller_gid;
 282
 283	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 284		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
 285		return -EPERM;
 286	}
 287
 288	/* May we inspect the given task?
 289	 * This check is used both for attaching with ptrace
 290	 * and for allowing access to sensitive information in /proc.
 291	 *
 292	 * ptrace_attach denies several cases that /proc allows
 293	 * because setting up the necessary parent/child relationship
 294	 * or halting the specified task is impossible.
 295	 */
 296
 297	/* Don't let security modules deny introspection */
 298	if (same_thread_group(task, current))
 299		return 0;
 300	rcu_read_lock();
 301	if (mode & PTRACE_MODE_FSCREDS) {
 302		caller_uid = cred->fsuid;
 303		caller_gid = cred->fsgid;
 304	} else {
 305		/*
 306		 * Using the euid would make more sense here, but something
 307		 * in userland might rely on the old behavior, and this
 308		 * shouldn't be a security problem since
 309		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
 310		 * used a syscall that requests access to another process
 311		 * (and not a filesystem syscall to procfs).
 312		 */
 313		caller_uid = cred->uid;
 314		caller_gid = cred->gid;
 315	}
 316	tcred = __task_cred(task);
 317	if (uid_eq(caller_uid, tcred->euid) &&
 318	    uid_eq(caller_uid, tcred->suid) &&
 319	    uid_eq(caller_uid, tcred->uid)  &&
 320	    gid_eq(caller_gid, tcred->egid) &&
 321	    gid_eq(caller_gid, tcred->sgid) &&
 322	    gid_eq(caller_gid, tcred->gid))
 323		goto ok;
 324	if (ptrace_has_cap(tcred->user_ns, mode))
 325		goto ok;
 326	rcu_read_unlock();
 327	return -EPERM;
 328ok:
 329	rcu_read_unlock();
 330	/*
 331	 * If a task drops privileges and becomes nondumpable (through a syscall
 332	 * like setresuid()) while we are trying to access it, we must ensure
 333	 * that the dumpability is read after the credentials; otherwise,
 334	 * we may be able to attach to a task that we shouldn't be able to
 335	 * attach to (as if the task had dropped privileges without becoming
 336	 * nondumpable).
 337	 * Pairs with a write barrier in commit_creds().
 338	 */
 339	smp_rmb();
 340	mm = task->mm;
 341	if (mm &&
 342	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
 343	     !ptrace_has_cap(mm->user_ns, mode)))
 344	    return -EPERM;
 345
 346	return security_ptrace_access_check(task, mode);
 347}
 348
 349bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 350{
 351	int err;
 352	task_lock(task);
 353	err = __ptrace_may_access(task, mode);
 354	task_unlock(task);
 355	return !err;
 356}
 357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 358static int ptrace_attach(struct task_struct *task, long request,
 359			 unsigned long addr,
 360			 unsigned long flags)
 361{
 362	bool seize = (request == PTRACE_SEIZE);
 363	int retval;
 364
 365	retval = -EIO;
 366	if (seize) {
 367		if (addr != 0)
 368			goto out;
 
 
 
 
 
 369		if (flags & ~(unsigned long)PTRACE_O_MASK)
 370			goto out;
 
 
 
 371		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 372	} else {
 373		flags = PT_PTRACED;
 374	}
 375
 376	audit_ptrace(task);
 377
 378	retval = -EPERM;
 379	if (unlikely(task->flags & PF_KTHREAD))
 380		goto out;
 381	if (same_thread_group(task, current))
 382		goto out;
 383
 384	/*
 385	 * Protect exec's credential calculations against our interference;
 386	 * SUID, SGID and LSM creds get determined differently
 387	 * under ptrace.
 388	 */
 389	retval = -ERESTARTNOINTR;
 390	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 391		goto out;
 392
 393	task_lock(task);
 394	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 395	task_unlock(task);
 396	if (retval)
 397		goto unlock_creds;
 398
 399	write_lock_irq(&tasklist_lock);
 400	retval = -EPERM;
 401	if (unlikely(task->exit_state))
 402		goto unlock_tasklist;
 403	if (task->ptrace)
 404		goto unlock_tasklist;
 405
 406	if (seize)
 407		flags |= PT_SEIZED;
 408	task->ptrace = flags;
 409
 410	ptrace_link(task, current);
 411
 412	/* SEIZE doesn't trap tracee on attach */
 413	if (!seize)
 414		send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
 415
 416	spin_lock(&task->sighand->siglock);
 417
 418	/*
 419	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 420	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 421	 * will be cleared if the child completes the transition or any
 422	 * event which clears the group stop states happens.  We'll wait
 423	 * for the transition to complete before returning from this
 424	 * function.
 425	 *
 426	 * This hides STOPPED -> RUNNING -> TRACED transition from the
 427	 * attaching thread but a different thread in the same group can
 428	 * still observe the transient RUNNING state.  IOW, if another
 429	 * thread's WNOHANG wait(2) on the stopped tracee races against
 430	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
 431	 *
 432	 * The following task_is_stopped() test is safe as both transitions
 433	 * in and out of STOPPED are protected by siglock.
 434	 */
 435	if (task_is_stopped(task) &&
 436	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 
 437		signal_wake_up_state(task, __TASK_STOPPED);
 
 438
 439	spin_unlock(&task->sighand->siglock);
 440
 441	retval = 0;
 442unlock_tasklist:
 443	write_unlock_irq(&tasklist_lock);
 444unlock_creds:
 445	mutex_unlock(&task->signal->cred_guard_mutex);
 446out:
 447	if (!retval) {
 448		/*
 449		 * We do not bother to change retval or clear JOBCTL_TRAPPING
 450		 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
 451		 * not return to user-mode, it will exit and clear this bit in
 452		 * __ptrace_unlink() if it wasn't already cleared by the tracee;
 453		 * and until then nobody can ptrace this task.
 454		 */
 455		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
 456		proc_ptrace_connector(task, PTRACE_ATTACH);
 457	}
 458
 459	return retval;
 460}
 461
 462/**
 463 * ptrace_traceme  --  helper for PTRACE_TRACEME
 464 *
 465 * Performs checks and sets PT_PTRACED.
 466 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 467 */
 468static int ptrace_traceme(void)
 469{
 470	int ret = -EPERM;
 471
 472	write_lock_irq(&tasklist_lock);
 473	/* Are we already being traced? */
 474	if (!current->ptrace) {
 475		ret = security_ptrace_traceme(current->parent);
 476		/*
 477		 * Check PF_EXITING to ensure ->real_parent has not passed
 478		 * exit_ptrace(). Otherwise we don't report the error but
 479		 * pretend ->real_parent untraces us right after return.
 480		 */
 481		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 482			current->ptrace = PT_PTRACED;
 483			ptrace_link(current, current->real_parent);
 484		}
 485	}
 486	write_unlock_irq(&tasklist_lock);
 487
 488	return ret;
 489}
 490
 491/*
 492 * Called with irqs disabled, returns true if childs should reap themselves.
 493 */
 494static int ignoring_children(struct sighand_struct *sigh)
 495{
 496	int ret;
 497	spin_lock(&sigh->siglock);
 498	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 499	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 500	spin_unlock(&sigh->siglock);
 501	return ret;
 502}
 503
 504/*
 505 * Called with tasklist_lock held for writing.
 506 * Unlink a traced task, and clean it up if it was a traced zombie.
 507 * Return true if it needs to be reaped with release_task().
 508 * (We can't call release_task() here because we already hold tasklist_lock.)
 509 *
 510 * If it's a zombie, our attachedness prevented normal parent notification
 511 * or self-reaping.  Do notification now if it would have happened earlier.
 512 * If it should reap itself, return true.
 513 *
 514 * If it's our own child, there is no notification to do. But if our normal
 515 * children self-reap, then this child was prevented by ptrace and we must
 516 * reap it now, in that case we must also wake up sub-threads sleeping in
 517 * do_wait().
 518 */
 519static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 520{
 521	bool dead;
 522
 523	__ptrace_unlink(p);
 524
 525	if (p->exit_state != EXIT_ZOMBIE)
 526		return false;
 527
 528	dead = !thread_group_leader(p);
 529
 530	if (!dead && thread_group_empty(p)) {
 531		if (!same_thread_group(p->real_parent, tracer))
 532			dead = do_notify_parent(p, p->exit_signal);
 533		else if (ignoring_children(tracer->sighand)) {
 534			__wake_up_parent(p, tracer);
 535			dead = true;
 536		}
 537	}
 538	/* Mark it as in the process of being reaped. */
 539	if (dead)
 540		p->exit_state = EXIT_DEAD;
 541	return dead;
 542}
 543
 544static int ptrace_detach(struct task_struct *child, unsigned int data)
 545{
 546	if (!valid_signal(data))
 547		return -EIO;
 548
 549	/* Architecture-specific hardware disable .. */
 550	ptrace_disable(child);
 551
 552	write_lock_irq(&tasklist_lock);
 553	/*
 554	 * We rely on ptrace_freeze_traced(). It can't be killed and
 555	 * untraced by another thread, it can't be a zombie.
 556	 */
 557	WARN_ON(!child->ptrace || child->exit_state);
 558	/*
 559	 * tasklist_lock avoids the race with wait_task_stopped(), see
 560	 * the comment in ptrace_resume().
 561	 */
 562	child->exit_code = data;
 563	__ptrace_detach(current, child);
 564	write_unlock_irq(&tasklist_lock);
 565
 566	proc_ptrace_connector(child, PTRACE_DETACH);
 567
 568	return 0;
 569}
 570
 571/*
 572 * Detach all tasks we were using ptrace on. Called with tasklist held
 573 * for writing.
 574 */
 575void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
 576{
 577	struct task_struct *p, *n;
 578
 579	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 580		if (unlikely(p->ptrace & PT_EXITKILL))
 581			send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
 582
 583		if (__ptrace_detach(tracer, p))
 584			list_add(&p->ptrace_entry, dead);
 585	}
 586}
 587
 588int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 589{
 590	int copied = 0;
 591
 592	while (len > 0) {
 593		char buf[128];
 594		int this_len, retval;
 595
 596		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 597		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
 598
 599		if (!retval) {
 600			if (copied)
 601				break;
 602			return -EIO;
 603		}
 604		if (copy_to_user(dst, buf, retval))
 605			return -EFAULT;
 606		copied += retval;
 607		src += retval;
 608		dst += retval;
 609		len -= retval;
 610	}
 611	return copied;
 612}
 613
 614int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 615{
 616	int copied = 0;
 617
 618	while (len > 0) {
 619		char buf[128];
 620		int this_len, retval;
 621
 622		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 623		if (copy_from_user(buf, src, this_len))
 624			return -EFAULT;
 625		retval = ptrace_access_vm(tsk, dst, buf, this_len,
 626				FOLL_FORCE | FOLL_WRITE);
 627		if (!retval) {
 628			if (copied)
 629				break;
 630			return -EIO;
 631		}
 632		copied += retval;
 633		src += retval;
 634		dst += retval;
 635		len -= retval;
 636	}
 637	return copied;
 638}
 639
 640static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 641{
 642	unsigned flags;
 
 643
 644	if (data & ~(unsigned long)PTRACE_O_MASK)
 645		return -EINVAL;
 646
 647	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
 648		if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
 649		    !IS_ENABLED(CONFIG_SECCOMP))
 650			return -EINVAL;
 651
 652		if (!capable(CAP_SYS_ADMIN))
 653			return -EPERM;
 654
 655		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
 656		    current->ptrace & PT_SUSPEND_SECCOMP)
 657			return -EPERM;
 658	}
 659
 660	/* Avoid intermediate state when all opts are cleared */
 661	flags = child->ptrace;
 662	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 663	flags |= (data << PT_OPT_FLAG_SHIFT);
 664	child->ptrace = flags;
 665
 666	return 0;
 667}
 668
 669static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
 670{
 671	unsigned long flags;
 672	int error = -ESRCH;
 673
 674	if (lock_task_sighand(child, &flags)) {
 675		error = -EINVAL;
 676		if (likely(child->last_siginfo != NULL)) {
 677			copy_siginfo(info, child->last_siginfo);
 678			error = 0;
 679		}
 680		unlock_task_sighand(child, &flags);
 681	}
 682	return error;
 683}
 684
 685static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
 686{
 687	unsigned long flags;
 688	int error = -ESRCH;
 689
 690	if (lock_task_sighand(child, &flags)) {
 691		error = -EINVAL;
 692		if (likely(child->last_siginfo != NULL)) {
 693			copy_siginfo(child->last_siginfo, info);
 694			error = 0;
 695		}
 696		unlock_task_sighand(child, &flags);
 697	}
 698	return error;
 699}
 700
 701static int ptrace_peek_siginfo(struct task_struct *child,
 702				unsigned long addr,
 703				unsigned long data)
 704{
 705	struct ptrace_peeksiginfo_args arg;
 706	struct sigpending *pending;
 707	struct sigqueue *q;
 708	int ret, i;
 709
 710	ret = copy_from_user(&arg, (void __user *) addr,
 711				sizeof(struct ptrace_peeksiginfo_args));
 712	if (ret)
 713		return -EFAULT;
 714
 715	if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
 716		return -EINVAL; /* unknown flags */
 717
 718	if (arg.nr < 0)
 719		return -EINVAL;
 720
 721	/* Ensure arg.off fits in an unsigned long */
 722	if (arg.off > ULONG_MAX)
 723		return 0;
 724
 725	if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
 726		pending = &child->signal->shared_pending;
 727	else
 728		pending = &child->pending;
 729
 730	for (i = 0; i < arg.nr; ) {
 731		kernel_siginfo_t info;
 732		unsigned long off = arg.off + i;
 733		bool found = false;
 734
 735		spin_lock_irq(&child->sighand->siglock);
 736		list_for_each_entry(q, &pending->list, list) {
 737			if (!off--) {
 738				found = true;
 739				copy_siginfo(&info, &q->info);
 740				break;
 741			}
 742		}
 743		spin_unlock_irq(&child->sighand->siglock);
 744
 745		if (!found) /* beyond the end of the list */
 746			break;
 747
 748#ifdef CONFIG_COMPAT
 749		if (unlikely(in_compat_syscall())) {
 750			compat_siginfo_t __user *uinfo = compat_ptr(data);
 751
 752			if (copy_siginfo_to_user32(uinfo, &info)) {
 753				ret = -EFAULT;
 754				break;
 755			}
 756
 757		} else
 758#endif
 759		{
 760			siginfo_t __user *uinfo = (siginfo_t __user *) data;
 761
 762			if (copy_siginfo_to_user(uinfo, &info)) {
 763				ret = -EFAULT;
 764				break;
 765			}
 766		}
 767
 768		data += sizeof(siginfo_t);
 769		i++;
 770
 771		if (signal_pending(current))
 772			break;
 773
 774		cond_resched();
 775	}
 776
 777	if (i > 0)
 778		return i;
 779
 780	return ret;
 781}
 782
 783#ifdef PTRACE_SINGLESTEP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 784#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
 785#else
 786#define is_singlestep(request)		0
 787#endif
 788
 789#ifdef PTRACE_SINGLEBLOCK
 790#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
 791#else
 792#define is_singleblock(request)		0
 793#endif
 794
 795#ifdef PTRACE_SYSEMU
 796#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
 797#else
 798#define is_sysemu_singlestep(request)	0
 799#endif
 800
 801static int ptrace_resume(struct task_struct *child, long request,
 802			 unsigned long data)
 803{
 804	bool need_siglock;
 805
 806	if (!valid_signal(data))
 807		return -EIO;
 808
 809	if (request == PTRACE_SYSCALL)
 810		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 811	else
 812		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 813
 814#ifdef TIF_SYSCALL_EMU
 815	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 816		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 817	else
 818		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 819#endif
 820
 821	if (is_singleblock(request)) {
 822		if (unlikely(!arch_has_block_step()))
 823			return -EIO;
 824		user_enable_block_step(child);
 825	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 826		if (unlikely(!arch_has_single_step()))
 827			return -EIO;
 828		user_enable_single_step(child);
 829	} else {
 830		user_disable_single_step(child);
 831	}
 832
 833	/*
 834	 * Change ->exit_code and ->state under siglock to avoid the race
 835	 * with wait_task_stopped() in between; a non-zero ->exit_code will
 836	 * wrongly look like another report from tracee.
 837	 *
 838	 * Note that we need siglock even if ->exit_code == data and/or this
 839	 * status was not reported yet, the new status must not be cleared by
 840	 * wait_task_stopped() after resume.
 841	 *
 842	 * If data == 0 we do not care if wait_task_stopped() reports the old
 843	 * status and clears the code too; this can't race with the tracee, it
 844	 * takes siglock after resume.
 845	 */
 846	need_siglock = data && !thread_group_empty(current);
 847	if (need_siglock)
 848		spin_lock_irq(&child->sighand->siglock);
 849	child->exit_code = data;
 
 850	wake_up_state(child, __TASK_TRACED);
 851	if (need_siglock)
 852		spin_unlock_irq(&child->sighand->siglock);
 853
 854	return 0;
 855}
 856
 857#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 858
 859static const struct user_regset *
 860find_regset(const struct user_regset_view *view, unsigned int type)
 861{
 862	const struct user_regset *regset;
 863	int n;
 864
 865	for (n = 0; n < view->n; ++n) {
 866		regset = view->regsets + n;
 867		if (regset->core_note_type == type)
 868			return regset;
 869	}
 870
 871	return NULL;
 872}
 873
 874static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 875			 struct iovec *kiov)
 876{
 877	const struct user_regset_view *view = task_user_regset_view(task);
 878	const struct user_regset *regset = find_regset(view, type);
 879	int regset_no;
 880
 881	if (!regset || (kiov->iov_len % regset->size) != 0)
 882		return -EINVAL;
 883
 884	regset_no = regset - view->regsets;
 885	kiov->iov_len = min(kiov->iov_len,
 886			    (__kernel_size_t) (regset->n * regset->size));
 887
 888	if (req == PTRACE_GETREGSET)
 889		return copy_regset_to_user(task, view, regset_no, 0,
 890					   kiov->iov_len, kiov->iov_base);
 891	else
 892		return copy_regset_from_user(task, view, regset_no, 0,
 893					     kiov->iov_len, kiov->iov_base);
 894}
 895
 896/*
 897 * This is declared in linux/regset.h and defined in machine-dependent
 898 * code.  We put the export here, near the primary machine-neutral use,
 899 * to ensure no machine forgets it.
 900 */
 901EXPORT_SYMBOL_GPL(task_user_regset_view);
 902
 903static unsigned long
 904ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
 905			      struct ptrace_syscall_info *info)
 906{
 907	unsigned long args[ARRAY_SIZE(info->entry.args)];
 908	int i;
 909
 910	info->op = PTRACE_SYSCALL_INFO_ENTRY;
 911	info->entry.nr = syscall_get_nr(child, regs);
 912	syscall_get_arguments(child, regs, args);
 913	for (i = 0; i < ARRAY_SIZE(args); i++)
 914		info->entry.args[i] = args[i];
 915
 916	/* args is the last field in struct ptrace_syscall_info.entry */
 917	return offsetofend(struct ptrace_syscall_info, entry.args);
 918}
 919
 920static unsigned long
 921ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
 922				struct ptrace_syscall_info *info)
 923{
 924	/*
 925	 * As struct ptrace_syscall_info.entry is currently a subset
 926	 * of struct ptrace_syscall_info.seccomp, it makes sense to
 927	 * initialize that subset using ptrace_get_syscall_info_entry().
 928	 * This can be reconsidered in the future if these structures
 929	 * diverge significantly enough.
 930	 */
 931	ptrace_get_syscall_info_entry(child, regs, info);
 932	info->op = PTRACE_SYSCALL_INFO_SECCOMP;
 933	info->seccomp.ret_data = child->ptrace_message;
 934
 935	/* ret_data is the last field in struct ptrace_syscall_info.seccomp */
 936	return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
 937}
 938
 939static unsigned long
 940ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
 941			     struct ptrace_syscall_info *info)
 942{
 943	info->op = PTRACE_SYSCALL_INFO_EXIT;
 944	info->exit.rval = syscall_get_error(child, regs);
 945	info->exit.is_error = !!info->exit.rval;
 946	if (!info->exit.is_error)
 947		info->exit.rval = syscall_get_return_value(child, regs);
 948
 949	/* is_error is the last field in struct ptrace_syscall_info.exit */
 950	return offsetofend(struct ptrace_syscall_info, exit.is_error);
 951}
 952
 953static int
 954ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
 955			void __user *datavp)
 956{
 957	struct pt_regs *regs = task_pt_regs(child);
 958	struct ptrace_syscall_info info = {
 959		.op = PTRACE_SYSCALL_INFO_NONE,
 960		.arch = syscall_get_arch(child),
 961		.instruction_pointer = instruction_pointer(regs),
 962		.stack_pointer = user_stack_pointer(regs),
 963	};
 964	unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
 965	unsigned long write_size;
 966
 967	/*
 968	 * This does not need lock_task_sighand() to access
 969	 * child->last_siginfo because ptrace_freeze_traced()
 970	 * called earlier by ptrace_check_attach() ensures that
 971	 * the tracee cannot go away and clear its last_siginfo.
 972	 */
 973	switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
 974	case SIGTRAP | 0x80:
 975		switch (child->ptrace_message) {
 976		case PTRACE_EVENTMSG_SYSCALL_ENTRY:
 977			actual_size = ptrace_get_syscall_info_entry(child, regs,
 978								    &info);
 979			break;
 980		case PTRACE_EVENTMSG_SYSCALL_EXIT:
 981			actual_size = ptrace_get_syscall_info_exit(child, regs,
 982								   &info);
 983			break;
 984		}
 985		break;
 986	case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
 987		actual_size = ptrace_get_syscall_info_seccomp(child, regs,
 988							      &info);
 989		break;
 990	}
 991
 992	write_size = min(actual_size, user_size);
 993	return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
 994}
 995#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
 996
 997int ptrace_request(struct task_struct *child, long request,
 998		   unsigned long addr, unsigned long data)
 999{
1000	bool seized = child->ptrace & PT_SEIZED;
1001	int ret = -EIO;
1002	kernel_siginfo_t siginfo, *si;
1003	void __user *datavp = (void __user *) data;
1004	unsigned long __user *datalp = datavp;
1005	unsigned long flags;
1006
1007	switch (request) {
1008	case PTRACE_PEEKTEXT:
1009	case PTRACE_PEEKDATA:
1010		return generic_ptrace_peekdata(child, addr, data);
1011	case PTRACE_POKETEXT:
1012	case PTRACE_POKEDATA:
1013		return generic_ptrace_pokedata(child, addr, data);
1014
1015#ifdef PTRACE_OLDSETOPTIONS
1016	case PTRACE_OLDSETOPTIONS:
1017#endif
1018	case PTRACE_SETOPTIONS:
1019		ret = ptrace_setoptions(child, data);
1020		break;
1021	case PTRACE_GETEVENTMSG:
1022		ret = put_user(child->ptrace_message, datalp);
1023		break;
1024
1025	case PTRACE_PEEKSIGINFO:
1026		ret = ptrace_peek_siginfo(child, addr, data);
1027		break;
1028
1029	case PTRACE_GETSIGINFO:
1030		ret = ptrace_getsiginfo(child, &siginfo);
1031		if (!ret)
1032			ret = copy_siginfo_to_user(datavp, &siginfo);
1033		break;
1034
1035	case PTRACE_SETSIGINFO:
1036		ret = copy_siginfo_from_user(&siginfo, datavp);
1037		if (!ret)
1038			ret = ptrace_setsiginfo(child, &siginfo);
1039		break;
1040
1041	case PTRACE_GETSIGMASK: {
1042		sigset_t *mask;
1043
1044		if (addr != sizeof(sigset_t)) {
1045			ret = -EINVAL;
1046			break;
1047		}
1048
1049		if (test_tsk_restore_sigmask(child))
1050			mask = &child->saved_sigmask;
1051		else
1052			mask = &child->blocked;
1053
1054		if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1055			ret = -EFAULT;
1056		else
1057			ret = 0;
1058
1059		break;
1060	}
1061
1062	case PTRACE_SETSIGMASK: {
1063		sigset_t new_set;
1064
1065		if (addr != sizeof(sigset_t)) {
1066			ret = -EINVAL;
1067			break;
1068		}
1069
1070		if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1071			ret = -EFAULT;
1072			break;
1073		}
1074
1075		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1076
1077		/*
1078		 * Every thread does recalc_sigpending() after resume, so
1079		 * retarget_shared_pending() and recalc_sigpending() are not
1080		 * called here.
1081		 */
1082		spin_lock_irq(&child->sighand->siglock);
1083		child->blocked = new_set;
1084		spin_unlock_irq(&child->sighand->siglock);
1085
1086		clear_tsk_restore_sigmask(child);
1087
1088		ret = 0;
1089		break;
1090	}
1091
1092	case PTRACE_INTERRUPT:
1093		/*
1094		 * Stop tracee without any side-effect on signal or job
1095		 * control.  At least one trap is guaranteed to happen
1096		 * after this request.  If @child is already trapped, the
1097		 * current trap is not disturbed and another trap will
1098		 * happen after the current trap is ended with PTRACE_CONT.
1099		 *
1100		 * The actual trap might not be PTRACE_EVENT_STOP trap but
1101		 * the pending condition is cleared regardless.
1102		 */
1103		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1104			break;
1105
1106		/*
1107		 * INTERRUPT doesn't disturb existing trap sans one
1108		 * exception.  If ptracer issued LISTEN for the current
1109		 * STOP, this INTERRUPT should clear LISTEN and re-trap
1110		 * tracee into STOP.
1111		 */
1112		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1113			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1114
1115		unlock_task_sighand(child, &flags);
1116		ret = 0;
1117		break;
1118
1119	case PTRACE_LISTEN:
1120		/*
1121		 * Listen for events.  Tracee must be in STOP.  It's not
1122		 * resumed per-se but is not considered to be in TRACED by
1123		 * wait(2) or ptrace(2).  If an async event (e.g. group
1124		 * stop state change) happens, tracee will enter STOP trap
1125		 * again.  Alternatively, ptracer can issue INTERRUPT to
1126		 * finish listening and re-trap tracee into STOP.
1127		 */
1128		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1129			break;
1130
1131		si = child->last_siginfo;
1132		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1133			child->jobctl |= JOBCTL_LISTENING;
1134			/*
1135			 * If NOTIFY is set, it means event happened between
1136			 * start of this trap and now.  Trigger re-trap.
1137			 */
1138			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1139				ptrace_signal_wake_up(child, true);
1140			ret = 0;
1141		}
1142		unlock_task_sighand(child, &flags);
1143		break;
1144
1145	case PTRACE_DETACH:	 /* detach a process that was attached. */
1146		ret = ptrace_detach(child, data);
1147		break;
1148
1149#ifdef CONFIG_BINFMT_ELF_FDPIC
1150	case PTRACE_GETFDPIC: {
1151		struct mm_struct *mm = get_task_mm(child);
1152		unsigned long tmp = 0;
1153
1154		ret = -ESRCH;
1155		if (!mm)
1156			break;
1157
1158		switch (addr) {
1159		case PTRACE_GETFDPIC_EXEC:
1160			tmp = mm->context.exec_fdpic_loadmap;
1161			break;
1162		case PTRACE_GETFDPIC_INTERP:
1163			tmp = mm->context.interp_fdpic_loadmap;
1164			break;
1165		default:
1166			break;
1167		}
1168		mmput(mm);
1169
1170		ret = put_user(tmp, datalp);
1171		break;
1172	}
1173#endif
1174
1175#ifdef PTRACE_SINGLESTEP
1176	case PTRACE_SINGLESTEP:
1177#endif
1178#ifdef PTRACE_SINGLEBLOCK
1179	case PTRACE_SINGLEBLOCK:
1180#endif
1181#ifdef PTRACE_SYSEMU
1182	case PTRACE_SYSEMU:
1183	case PTRACE_SYSEMU_SINGLESTEP:
1184#endif
1185	case PTRACE_SYSCALL:
1186	case PTRACE_CONT:
1187		return ptrace_resume(child, request, data);
1188
1189	case PTRACE_KILL:
1190		if (child->exit_state)	/* already dead */
1191			return 0;
1192		return ptrace_resume(child, request, SIGKILL);
1193
1194#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1195	case PTRACE_GETREGSET:
1196	case PTRACE_SETREGSET: {
1197		struct iovec kiov;
1198		struct iovec __user *uiov = datavp;
1199
1200		if (!access_ok(uiov, sizeof(*uiov)))
1201			return -EFAULT;
1202
1203		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1204		    __get_user(kiov.iov_len, &uiov->iov_len))
1205			return -EFAULT;
1206
1207		ret = ptrace_regset(child, request, addr, &kiov);
1208		if (!ret)
1209			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1210		break;
1211	}
1212
1213	case PTRACE_GET_SYSCALL_INFO:
1214		ret = ptrace_get_syscall_info(child, addr, datavp);
1215		break;
1216#endif
1217
1218	case PTRACE_SECCOMP_GET_FILTER:
1219		ret = seccomp_get_filter(child, addr, datavp);
1220		break;
1221
1222	case PTRACE_SECCOMP_GET_METADATA:
1223		ret = seccomp_get_metadata(child, addr, datavp);
1224		break;
1225
 
 
 
 
 
 
1226	default:
1227		break;
1228	}
1229
1230	return ret;
1231}
1232
1233#ifndef arch_ptrace_attach
1234#define arch_ptrace_attach(child)	do { } while (0)
1235#endif
1236
1237SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1238		unsigned long, data)
1239{
1240	struct task_struct *child;
1241	long ret;
1242
1243	if (request == PTRACE_TRACEME) {
1244		ret = ptrace_traceme();
1245		if (!ret)
1246			arch_ptrace_attach(current);
1247		goto out;
1248	}
1249
1250	child = find_get_task_by_vpid(pid);
1251	if (!child) {
1252		ret = -ESRCH;
1253		goto out;
1254	}
1255
1256	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1257		ret = ptrace_attach(child, request, addr, data);
1258		/*
1259		 * Some architectures need to do book-keeping after
1260		 * a ptrace attach.
1261		 */
1262		if (!ret)
1263			arch_ptrace_attach(child);
1264		goto out_put_task_struct;
1265	}
1266
1267	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1268				  request == PTRACE_INTERRUPT);
1269	if (ret < 0)
1270		goto out_put_task_struct;
1271
1272	ret = arch_ptrace(child, request, addr, data);
1273	if (ret || request != PTRACE_DETACH)
1274		ptrace_unfreeze_traced(child);
1275
1276 out_put_task_struct:
1277	put_task_struct(child);
1278 out:
1279	return ret;
1280}
1281
1282int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1283			    unsigned long data)
1284{
1285	unsigned long tmp;
1286	int copied;
1287
1288	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1289	if (copied != sizeof(tmp))
1290		return -EIO;
1291	return put_user(tmp, (unsigned long __user *)data);
1292}
1293
1294int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1295			    unsigned long data)
1296{
1297	int copied;
1298
1299	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1300			FOLL_FORCE | FOLL_WRITE);
1301	return (copied == sizeof(data)) ? 0 : -EIO;
1302}
1303
1304#if defined CONFIG_COMPAT
1305
1306int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1307			  compat_ulong_t addr, compat_ulong_t data)
1308{
1309	compat_ulong_t __user *datap = compat_ptr(data);
1310	compat_ulong_t word;
1311	kernel_siginfo_t siginfo;
1312	int ret;
1313
1314	switch (request) {
1315	case PTRACE_PEEKTEXT:
1316	case PTRACE_PEEKDATA:
1317		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1318				FOLL_FORCE);
1319		if (ret != sizeof(word))
1320			ret = -EIO;
1321		else
1322			ret = put_user(word, datap);
1323		break;
1324
1325	case PTRACE_POKETEXT:
1326	case PTRACE_POKEDATA:
1327		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1328				FOLL_FORCE | FOLL_WRITE);
1329		ret = (ret != sizeof(data) ? -EIO : 0);
1330		break;
1331
1332	case PTRACE_GETEVENTMSG:
1333		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1334		break;
1335
1336	case PTRACE_GETSIGINFO:
1337		ret = ptrace_getsiginfo(child, &siginfo);
1338		if (!ret)
1339			ret = copy_siginfo_to_user32(
1340				(struct compat_siginfo __user *) datap,
1341				&siginfo);
1342		break;
1343
1344	case PTRACE_SETSIGINFO:
1345		ret = copy_siginfo_from_user32(
1346			&siginfo, (struct compat_siginfo __user *) datap);
1347		if (!ret)
1348			ret = ptrace_setsiginfo(child, &siginfo);
1349		break;
1350#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1351	case PTRACE_GETREGSET:
1352	case PTRACE_SETREGSET:
1353	{
1354		struct iovec kiov;
1355		struct compat_iovec __user *uiov =
1356			(struct compat_iovec __user *) datap;
1357		compat_uptr_t ptr;
1358		compat_size_t len;
1359
1360		if (!access_ok(uiov, sizeof(*uiov)))
1361			return -EFAULT;
1362
1363		if (__get_user(ptr, &uiov->iov_base) ||
1364		    __get_user(len, &uiov->iov_len))
1365			return -EFAULT;
1366
1367		kiov.iov_base = compat_ptr(ptr);
1368		kiov.iov_len = len;
1369
1370		ret = ptrace_regset(child, request, addr, &kiov);
1371		if (!ret)
1372			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1373		break;
1374	}
1375#endif
1376
1377	default:
1378		ret = ptrace_request(child, request, addr, data);
1379	}
1380
1381	return ret;
1382}
1383
1384COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1385		       compat_long_t, addr, compat_long_t, data)
1386{
1387	struct task_struct *child;
1388	long ret;
1389
1390	if (request == PTRACE_TRACEME) {
1391		ret = ptrace_traceme();
1392		goto out;
1393	}
1394
1395	child = find_get_task_by_vpid(pid);
1396	if (!child) {
1397		ret = -ESRCH;
1398		goto out;
1399	}
1400
1401	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1402		ret = ptrace_attach(child, request, addr, data);
1403		/*
1404		 * Some architectures need to do book-keeping after
1405		 * a ptrace attach.
1406		 */
1407		if (!ret)
1408			arch_ptrace_attach(child);
1409		goto out_put_task_struct;
1410	}
1411
1412	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1413				  request == PTRACE_INTERRUPT);
1414	if (!ret) {
1415		ret = compat_arch_ptrace(child, request, addr, data);
1416		if (ret || request != PTRACE_DETACH)
1417			ptrace_unfreeze_traced(child);
1418	}
1419
1420 out_put_task_struct:
1421	put_task_struct(child);
1422 out:
1423	return ret;
1424}
1425#endif	/* CONFIG_COMPAT */