Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/ptrace.c
   4 *
   5 * (C) Copyright 1999 Linus Torvalds
   6 *
   7 * Common interfaces for "ptrace()" which we do not want
   8 * to continually duplicate across every architecture.
   9 */
  10
  11#include <linux/capability.h>
  12#include <linux/export.h>
  13#include <linux/sched.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/coredump.h>
  16#include <linux/sched/task.h>
  17#include <linux/errno.h>
  18#include <linux/mm.h>
  19#include <linux/highmem.h>
  20#include <linux/pagemap.h>
  21#include <linux/ptrace.h>
  22#include <linux/security.h>
  23#include <linux/signal.h>
  24#include <linux/uio.h>
  25#include <linux/audit.h>
  26#include <linux/pid_namespace.h>
  27#include <linux/syscalls.h>
  28#include <linux/uaccess.h>
  29#include <linux/regset.h>
  30#include <linux/hw_breakpoint.h>
  31#include <linux/cn_proc.h>
  32#include <linux/compat.h>
  33#include <linux/sched/signal.h>
 
  34
  35#include <asm/syscall.h>	/* for syscall_get_* */
  36
  37/*
  38 * Access another process' address space via ptrace.
  39 * Source/target buffer must be kernel space,
  40 * Do not walk the page table directly, use get_user_pages
  41 */
  42int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
  43		     void *buf, int len, unsigned int gup_flags)
  44{
  45	struct mm_struct *mm;
  46	int ret;
  47
  48	mm = get_task_mm(tsk);
  49	if (!mm)
  50		return 0;
  51
  52	if (!tsk->ptrace ||
  53	    (current != tsk->parent) ||
  54	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
  55	     !ptracer_capable(tsk, mm->user_ns))) {
  56		mmput(mm);
  57		return 0;
  58	}
  59
  60	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
  61	mmput(mm);
  62
  63	return ret;
  64}
  65
  66
  67void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
  68		   const struct cred *ptracer_cred)
  69{
  70	BUG_ON(!list_empty(&child->ptrace_entry));
  71	list_add(&child->ptrace_entry, &new_parent->ptraced);
  72	child->parent = new_parent;
  73	child->ptracer_cred = get_cred(ptracer_cred);
  74}
  75
  76/*
  77 * ptrace a task: make the debugger its new parent and
  78 * move it to the ptrace list.
  79 *
  80 * Must be called with the tasklist lock write-held.
  81 */
  82static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  83{
  84	__ptrace_link(child, new_parent, current_cred());
  85}
  86
  87/**
  88 * __ptrace_unlink - unlink ptracee and restore its execution state
  89 * @child: ptracee to be unlinked
  90 *
  91 * Remove @child from the ptrace list, move it back to the original parent,
  92 * and restore the execution state so that it conforms to the group stop
  93 * state.
  94 *
  95 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  96 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  97 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  98 * If the ptracer is exiting, the ptracee can be in any state.
  99 *
 100 * After detach, the ptracee should be in a state which conforms to the
 101 * group stop.  If the group is stopped or in the process of stopping, the
 102 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
 103 * up from TASK_TRACED.
 104 *
 105 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
 106 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
 107 * to but in the opposite direction of what happens while attaching to a
 108 * stopped task.  However, in this direction, the intermediate RUNNING
 109 * state is not hidden even from the current ptracer and if it immediately
 110 * re-attaches and performs a WNOHANG wait(2), it may fail.
 111 *
 112 * CONTEXT:
 113 * write_lock_irq(tasklist_lock)
 114 */
 115void __ptrace_unlink(struct task_struct *child)
 116{
 117	const struct cred *old_cred;
 118	BUG_ON(!child->ptrace);
 119
 120	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 121#ifdef TIF_SYSCALL_EMU
 122	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 123#endif
 124
 125	child->parent = child->real_parent;
 126	list_del_init(&child->ptrace_entry);
 127	old_cred = child->ptracer_cred;
 128	child->ptracer_cred = NULL;
 129	put_cred(old_cred);
 130
 131	spin_lock(&child->sighand->siglock);
 132	child->ptrace = 0;
 133	/*
 134	 * Clear all pending traps and TRAPPING.  TRAPPING should be
 135	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
 136	 */
 137	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
 138	task_clear_jobctl_trapping(child);
 139
 140	/*
 141	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
 142	 * @child isn't dead.
 143	 */
 144	if (!(child->flags & PF_EXITING) &&
 145	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
 146	     child->signal->group_stop_count)) {
 147		child->jobctl |= JOBCTL_STOP_PENDING;
 148
 149		/*
 150		 * This is only possible if this thread was cloned by the
 151		 * traced task running in the stopped group, set the signal
 152		 * for the future reports.
 153		 * FIXME: we should change ptrace_init_task() to handle this
 154		 * case.
 155		 */
 156		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 157			child->jobctl |= SIGSTOP;
 158	}
 159
 160	/*
 161	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 162	 * @child in the butt.  Note that @resume should be used iff @child
 163	 * is in TASK_TRACED; otherwise, we might unduly disrupt
 164	 * TASK_KILLABLE sleeps.
 165	 */
 166	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 167		ptrace_signal_wake_up(child, true);
 168
 169	spin_unlock(&child->sighand->siglock);
 170}
 171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172/* Ensure that nothing can wake it up, even SIGKILL */
 173static bool ptrace_freeze_traced(struct task_struct *task)
 174{
 175	bool ret = false;
 176
 177	/* Lockless, nobody but us can set this flag */
 178	if (task->jobctl & JOBCTL_LISTENING)
 179		return ret;
 180
 181	spin_lock_irq(&task->sighand->siglock);
 182	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
 183		task->state = __TASK_TRACED;
 
 184		ret = true;
 185	}
 186	spin_unlock_irq(&task->sighand->siglock);
 187
 188	return ret;
 189}
 190
 191static void ptrace_unfreeze_traced(struct task_struct *task)
 192{
 193	if (task->state != __TASK_TRACED)
 194		return;
 195
 196	WARN_ON(!task->ptrace || task->parent != current);
 197
 198	/*
 199	 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
 200	 * Recheck state under the lock to close this race.
 201	 */
 202	spin_lock_irq(&task->sighand->siglock);
 203	if (task->state == __TASK_TRACED) {
 204		if (__fatal_signal_pending(task))
 205			wake_up_state(task, __TASK_TRACED);
 206		else
 207			task->state = TASK_TRACED;
 208	}
 209	spin_unlock_irq(&task->sighand->siglock);
 210}
 211
 212/**
 213 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 214 * @child: ptracee to check for
 215 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 216 *
 217 * Check whether @child is being ptraced by %current and ready for further
 218 * ptrace operations.  If @ignore_state is %false, @child also should be in
 219 * %TASK_TRACED state and on return the child is guaranteed to be traced
 220 * and not executing.  If @ignore_state is %true, @child can be in any
 221 * state.
 222 *
 223 * CONTEXT:
 224 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 225 *
 226 * RETURNS:
 227 * 0 on success, -ESRCH if %child is not ready.
 228 */
 229static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 230{
 231	int ret = -ESRCH;
 232
 233	/*
 234	 * We take the read lock around doing both checks to close a
 235	 * possible race where someone else was tracing our child and
 236	 * detached between these two checks.  After this locked check,
 237	 * we are sure that this is our traced child and that can only
 238	 * be changed by us so it's not changing right after this.
 239	 */
 240	read_lock(&tasklist_lock);
 241	if (child->ptrace && child->parent == current) {
 242		WARN_ON(child->state == __TASK_TRACED);
 243		/*
 244		 * child->sighand can't be NULL, release_task()
 245		 * does ptrace_unlink() before __exit_signal().
 246		 */
 247		if (ignore_state || ptrace_freeze_traced(child))
 248			ret = 0;
 249	}
 250	read_unlock(&tasklist_lock);
 251
 252	if (!ret && !ignore_state) {
 253		if (!wait_task_inactive(child, __TASK_TRACED)) {
 254			/*
 255			 * This can only happen if may_ptrace_stop() fails and
 256			 * ptrace_stop() changes ->state back to TASK_RUNNING,
 257			 * so we should not worry about leaking __TASK_TRACED.
 258			 */
 259			WARN_ON(child->state == __TASK_TRACED);
 260			ret = -ESRCH;
 261		}
 262	}
 263
 264	return ret;
 265}
 266
 267static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 268{
 269	if (mode & PTRACE_MODE_NOAUDIT)
 270		return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
 271	else
 272		return has_ns_capability(current, ns, CAP_SYS_PTRACE);
 273}
 274
 275/* Returns 0 on success, -errno on denial. */
 276static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 277{
 278	const struct cred *cred = current_cred(), *tcred;
 279	struct mm_struct *mm;
 280	kuid_t caller_uid;
 281	kgid_t caller_gid;
 282
 283	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 284		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
 285		return -EPERM;
 286	}
 287
 288	/* May we inspect the given task?
 289	 * This check is used both for attaching with ptrace
 290	 * and for allowing access to sensitive information in /proc.
 291	 *
 292	 * ptrace_attach denies several cases that /proc allows
 293	 * because setting up the necessary parent/child relationship
 294	 * or halting the specified task is impossible.
 295	 */
 296
 297	/* Don't let security modules deny introspection */
 298	if (same_thread_group(task, current))
 299		return 0;
 300	rcu_read_lock();
 301	if (mode & PTRACE_MODE_FSCREDS) {
 302		caller_uid = cred->fsuid;
 303		caller_gid = cred->fsgid;
 304	} else {
 305		/*
 306		 * Using the euid would make more sense here, but something
 307		 * in userland might rely on the old behavior, and this
 308		 * shouldn't be a security problem since
 309		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
 310		 * used a syscall that requests access to another process
 311		 * (and not a filesystem syscall to procfs).
 312		 */
 313		caller_uid = cred->uid;
 314		caller_gid = cred->gid;
 315	}
 316	tcred = __task_cred(task);
 317	if (uid_eq(caller_uid, tcred->euid) &&
 318	    uid_eq(caller_uid, tcred->suid) &&
 319	    uid_eq(caller_uid, tcred->uid)  &&
 320	    gid_eq(caller_gid, tcred->egid) &&
 321	    gid_eq(caller_gid, tcred->sgid) &&
 322	    gid_eq(caller_gid, tcred->gid))
 323		goto ok;
 324	if (ptrace_has_cap(tcred->user_ns, mode))
 325		goto ok;
 326	rcu_read_unlock();
 327	return -EPERM;
 328ok:
 329	rcu_read_unlock();
 330	/*
 331	 * If a task drops privileges and becomes nondumpable (through a syscall
 332	 * like setresuid()) while we are trying to access it, we must ensure
 333	 * that the dumpability is read after the credentials; otherwise,
 334	 * we may be able to attach to a task that we shouldn't be able to
 335	 * attach to (as if the task had dropped privileges without becoming
 336	 * nondumpable).
 337	 * Pairs with a write barrier in commit_creds().
 338	 */
 339	smp_rmb();
 340	mm = task->mm;
 341	if (mm &&
 342	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
 343	     !ptrace_has_cap(mm->user_ns, mode)))
 344	    return -EPERM;
 345
 346	return security_ptrace_access_check(task, mode);
 347}
 348
 349bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 350{
 351	int err;
 352	task_lock(task);
 353	err = __ptrace_may_access(task, mode);
 354	task_unlock(task);
 355	return !err;
 356}
 357
 358static int ptrace_attach(struct task_struct *task, long request,
 359			 unsigned long addr,
 360			 unsigned long flags)
 361{
 362	bool seize = (request == PTRACE_SEIZE);
 363	int retval;
 364
 365	retval = -EIO;
 366	if (seize) {
 367		if (addr != 0)
 368			goto out;
 369		if (flags & ~(unsigned long)PTRACE_O_MASK)
 370			goto out;
 371		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 372	} else {
 373		flags = PT_PTRACED;
 374	}
 375
 376	audit_ptrace(task);
 377
 378	retval = -EPERM;
 379	if (unlikely(task->flags & PF_KTHREAD))
 380		goto out;
 381	if (same_thread_group(task, current))
 382		goto out;
 383
 384	/*
 385	 * Protect exec's credential calculations against our interference;
 386	 * SUID, SGID and LSM creds get determined differently
 387	 * under ptrace.
 388	 */
 389	retval = -ERESTARTNOINTR;
 390	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 391		goto out;
 392
 393	task_lock(task);
 394	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 395	task_unlock(task);
 396	if (retval)
 397		goto unlock_creds;
 398
 399	write_lock_irq(&tasklist_lock);
 400	retval = -EPERM;
 401	if (unlikely(task->exit_state))
 402		goto unlock_tasklist;
 403	if (task->ptrace)
 404		goto unlock_tasklist;
 405
 406	if (seize)
 407		flags |= PT_SEIZED;
 408	task->ptrace = flags;
 409
 410	ptrace_link(task, current);
 411
 412	/* SEIZE doesn't trap tracee on attach */
 413	if (!seize)
 414		send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
 415
 416	spin_lock(&task->sighand->siglock);
 417
 418	/*
 419	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 420	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 421	 * will be cleared if the child completes the transition or any
 422	 * event which clears the group stop states happens.  We'll wait
 423	 * for the transition to complete before returning from this
 424	 * function.
 425	 *
 426	 * This hides STOPPED -> RUNNING -> TRACED transition from the
 427	 * attaching thread but a different thread in the same group can
 428	 * still observe the transient RUNNING state.  IOW, if another
 429	 * thread's WNOHANG wait(2) on the stopped tracee races against
 430	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
 431	 *
 432	 * The following task_is_stopped() test is safe as both transitions
 433	 * in and out of STOPPED are protected by siglock.
 434	 */
 435	if (task_is_stopped(task) &&
 436	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 437		signal_wake_up_state(task, __TASK_STOPPED);
 438
 439	spin_unlock(&task->sighand->siglock);
 440
 441	retval = 0;
 442unlock_tasklist:
 443	write_unlock_irq(&tasklist_lock);
 444unlock_creds:
 445	mutex_unlock(&task->signal->cred_guard_mutex);
 446out:
 447	if (!retval) {
 448		/*
 449		 * We do not bother to change retval or clear JOBCTL_TRAPPING
 450		 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
 451		 * not return to user-mode, it will exit and clear this bit in
 452		 * __ptrace_unlink() if it wasn't already cleared by the tracee;
 453		 * and until then nobody can ptrace this task.
 454		 */
 455		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
 456		proc_ptrace_connector(task, PTRACE_ATTACH);
 457	}
 458
 459	return retval;
 460}
 461
 462/**
 463 * ptrace_traceme  --  helper for PTRACE_TRACEME
 464 *
 465 * Performs checks and sets PT_PTRACED.
 466 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 467 */
 468static int ptrace_traceme(void)
 469{
 470	int ret = -EPERM;
 471
 472	write_lock_irq(&tasklist_lock);
 473	/* Are we already being traced? */
 474	if (!current->ptrace) {
 475		ret = security_ptrace_traceme(current->parent);
 476		/*
 477		 * Check PF_EXITING to ensure ->real_parent has not passed
 478		 * exit_ptrace(). Otherwise we don't report the error but
 479		 * pretend ->real_parent untraces us right after return.
 480		 */
 481		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 482			current->ptrace = PT_PTRACED;
 483			ptrace_link(current, current->real_parent);
 484		}
 485	}
 486	write_unlock_irq(&tasklist_lock);
 487
 488	return ret;
 489}
 490
 491/*
 492 * Called with irqs disabled, returns true if childs should reap themselves.
 493 */
 494static int ignoring_children(struct sighand_struct *sigh)
 495{
 496	int ret;
 497	spin_lock(&sigh->siglock);
 498	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 499	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 500	spin_unlock(&sigh->siglock);
 501	return ret;
 502}
 503
 504/*
 505 * Called with tasklist_lock held for writing.
 506 * Unlink a traced task, and clean it up if it was a traced zombie.
 507 * Return true if it needs to be reaped with release_task().
 508 * (We can't call release_task() here because we already hold tasklist_lock.)
 509 *
 510 * If it's a zombie, our attachedness prevented normal parent notification
 511 * or self-reaping.  Do notification now if it would have happened earlier.
 512 * If it should reap itself, return true.
 513 *
 514 * If it's our own child, there is no notification to do. But if our normal
 515 * children self-reap, then this child was prevented by ptrace and we must
 516 * reap it now, in that case we must also wake up sub-threads sleeping in
 517 * do_wait().
 518 */
 519static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 520{
 521	bool dead;
 522
 523	__ptrace_unlink(p);
 524
 525	if (p->exit_state != EXIT_ZOMBIE)
 526		return false;
 527
 528	dead = !thread_group_leader(p);
 529
 530	if (!dead && thread_group_empty(p)) {
 531		if (!same_thread_group(p->real_parent, tracer))
 532			dead = do_notify_parent(p, p->exit_signal);
 533		else if (ignoring_children(tracer->sighand)) {
 534			__wake_up_parent(p, tracer);
 535			dead = true;
 536		}
 537	}
 538	/* Mark it as in the process of being reaped. */
 539	if (dead)
 540		p->exit_state = EXIT_DEAD;
 541	return dead;
 542}
 543
 544static int ptrace_detach(struct task_struct *child, unsigned int data)
 545{
 546	if (!valid_signal(data))
 547		return -EIO;
 548
 549	/* Architecture-specific hardware disable .. */
 550	ptrace_disable(child);
 551
 552	write_lock_irq(&tasklist_lock);
 553	/*
 554	 * We rely on ptrace_freeze_traced(). It can't be killed and
 555	 * untraced by another thread, it can't be a zombie.
 556	 */
 557	WARN_ON(!child->ptrace || child->exit_state);
 558	/*
 559	 * tasklist_lock avoids the race with wait_task_stopped(), see
 560	 * the comment in ptrace_resume().
 561	 */
 562	child->exit_code = data;
 563	__ptrace_detach(current, child);
 564	write_unlock_irq(&tasklist_lock);
 565
 566	proc_ptrace_connector(child, PTRACE_DETACH);
 567
 568	return 0;
 569}
 570
 571/*
 572 * Detach all tasks we were using ptrace on. Called with tasklist held
 573 * for writing.
 574 */
 575void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
 576{
 577	struct task_struct *p, *n;
 578
 579	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 580		if (unlikely(p->ptrace & PT_EXITKILL))
 581			send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
 582
 583		if (__ptrace_detach(tracer, p))
 584			list_add(&p->ptrace_entry, dead);
 585	}
 586}
 587
 588int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 589{
 590	int copied = 0;
 591
 592	while (len > 0) {
 593		char buf[128];
 594		int this_len, retval;
 595
 596		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 597		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
 598
 599		if (!retval) {
 600			if (copied)
 601				break;
 602			return -EIO;
 603		}
 604		if (copy_to_user(dst, buf, retval))
 605			return -EFAULT;
 606		copied += retval;
 607		src += retval;
 608		dst += retval;
 609		len -= retval;
 610	}
 611	return copied;
 612}
 613
 614int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 615{
 616	int copied = 0;
 617
 618	while (len > 0) {
 619		char buf[128];
 620		int this_len, retval;
 621
 622		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 623		if (copy_from_user(buf, src, this_len))
 624			return -EFAULT;
 625		retval = ptrace_access_vm(tsk, dst, buf, this_len,
 626				FOLL_FORCE | FOLL_WRITE);
 627		if (!retval) {
 628			if (copied)
 629				break;
 630			return -EIO;
 631		}
 632		copied += retval;
 633		src += retval;
 634		dst += retval;
 635		len -= retval;
 636	}
 637	return copied;
 638}
 639
 640static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 641{
 642	unsigned flags;
 643
 644	if (data & ~(unsigned long)PTRACE_O_MASK)
 645		return -EINVAL;
 646
 647	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
 648		if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
 649		    !IS_ENABLED(CONFIG_SECCOMP))
 650			return -EINVAL;
 651
 652		if (!capable(CAP_SYS_ADMIN))
 653			return -EPERM;
 654
 655		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
 656		    current->ptrace & PT_SUSPEND_SECCOMP)
 657			return -EPERM;
 658	}
 659
 660	/* Avoid intermediate state when all opts are cleared */
 661	flags = child->ptrace;
 662	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 663	flags |= (data << PT_OPT_FLAG_SHIFT);
 664	child->ptrace = flags;
 665
 666	return 0;
 667}
 668
 669static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
 670{
 671	unsigned long flags;
 672	int error = -ESRCH;
 673
 674	if (lock_task_sighand(child, &flags)) {
 675		error = -EINVAL;
 676		if (likely(child->last_siginfo != NULL)) {
 677			copy_siginfo(info, child->last_siginfo);
 678			error = 0;
 679		}
 680		unlock_task_sighand(child, &flags);
 681	}
 682	return error;
 683}
 684
 685static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
 686{
 687	unsigned long flags;
 688	int error = -ESRCH;
 689
 690	if (lock_task_sighand(child, &flags)) {
 691		error = -EINVAL;
 692		if (likely(child->last_siginfo != NULL)) {
 693			copy_siginfo(child->last_siginfo, info);
 694			error = 0;
 695		}
 696		unlock_task_sighand(child, &flags);
 697	}
 698	return error;
 699}
 700
 701static int ptrace_peek_siginfo(struct task_struct *child,
 702				unsigned long addr,
 703				unsigned long data)
 704{
 705	struct ptrace_peeksiginfo_args arg;
 706	struct sigpending *pending;
 707	struct sigqueue *q;
 708	int ret, i;
 709
 710	ret = copy_from_user(&arg, (void __user *) addr,
 711				sizeof(struct ptrace_peeksiginfo_args));
 712	if (ret)
 713		return -EFAULT;
 714
 715	if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
 716		return -EINVAL; /* unknown flags */
 717
 718	if (arg.nr < 0)
 719		return -EINVAL;
 720
 721	/* Ensure arg.off fits in an unsigned long */
 722	if (arg.off > ULONG_MAX)
 723		return 0;
 724
 725	if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
 726		pending = &child->signal->shared_pending;
 727	else
 728		pending = &child->pending;
 729
 730	for (i = 0; i < arg.nr; ) {
 731		kernel_siginfo_t info;
 732		unsigned long off = arg.off + i;
 733		bool found = false;
 734
 735		spin_lock_irq(&child->sighand->siglock);
 736		list_for_each_entry(q, &pending->list, list) {
 737			if (!off--) {
 738				found = true;
 739				copy_siginfo(&info, &q->info);
 740				break;
 741			}
 742		}
 743		spin_unlock_irq(&child->sighand->siglock);
 744
 745		if (!found) /* beyond the end of the list */
 746			break;
 747
 748#ifdef CONFIG_COMPAT
 749		if (unlikely(in_compat_syscall())) {
 750			compat_siginfo_t __user *uinfo = compat_ptr(data);
 751
 752			if (copy_siginfo_to_user32(uinfo, &info)) {
 753				ret = -EFAULT;
 754				break;
 755			}
 756
 757		} else
 758#endif
 759		{
 760			siginfo_t __user *uinfo = (siginfo_t __user *) data;
 761
 762			if (copy_siginfo_to_user(uinfo, &info)) {
 763				ret = -EFAULT;
 764				break;
 765			}
 766		}
 767
 768		data += sizeof(siginfo_t);
 769		i++;
 770
 771		if (signal_pending(current))
 772			break;
 773
 774		cond_resched();
 775	}
 776
 777	if (i > 0)
 778		return i;
 779
 780	return ret;
 781}
 782
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 783#ifdef PTRACE_SINGLESTEP
 784#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
 785#else
 786#define is_singlestep(request)		0
 787#endif
 788
 789#ifdef PTRACE_SINGLEBLOCK
 790#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
 791#else
 792#define is_singleblock(request)		0
 793#endif
 794
 795#ifdef PTRACE_SYSEMU
 796#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
 797#else
 798#define is_sysemu_singlestep(request)	0
 799#endif
 800
 801static int ptrace_resume(struct task_struct *child, long request,
 802			 unsigned long data)
 803{
 804	bool need_siglock;
 805
 806	if (!valid_signal(data))
 807		return -EIO;
 808
 809	if (request == PTRACE_SYSCALL)
 810		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 811	else
 812		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 813
 814#ifdef TIF_SYSCALL_EMU
 815	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 816		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 817	else
 818		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 819#endif
 820
 821	if (is_singleblock(request)) {
 822		if (unlikely(!arch_has_block_step()))
 823			return -EIO;
 824		user_enable_block_step(child);
 825	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 826		if (unlikely(!arch_has_single_step()))
 827			return -EIO;
 828		user_enable_single_step(child);
 829	} else {
 830		user_disable_single_step(child);
 831	}
 832
 833	/*
 834	 * Change ->exit_code and ->state under siglock to avoid the race
 835	 * with wait_task_stopped() in between; a non-zero ->exit_code will
 836	 * wrongly look like another report from tracee.
 837	 *
 838	 * Note that we need siglock even if ->exit_code == data and/or this
 839	 * status was not reported yet, the new status must not be cleared by
 840	 * wait_task_stopped() after resume.
 841	 *
 842	 * If data == 0 we do not care if wait_task_stopped() reports the old
 843	 * status and clears the code too; this can't race with the tracee, it
 844	 * takes siglock after resume.
 845	 */
 846	need_siglock = data && !thread_group_empty(current);
 847	if (need_siglock)
 848		spin_lock_irq(&child->sighand->siglock);
 849	child->exit_code = data;
 850	wake_up_state(child, __TASK_TRACED);
 851	if (need_siglock)
 852		spin_unlock_irq(&child->sighand->siglock);
 853
 854	return 0;
 855}
 856
 857#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 858
 859static const struct user_regset *
 860find_regset(const struct user_regset_view *view, unsigned int type)
 861{
 862	const struct user_regset *regset;
 863	int n;
 864
 865	for (n = 0; n < view->n; ++n) {
 866		regset = view->regsets + n;
 867		if (regset->core_note_type == type)
 868			return regset;
 869	}
 870
 871	return NULL;
 872}
 873
 874static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 875			 struct iovec *kiov)
 876{
 877	const struct user_regset_view *view = task_user_regset_view(task);
 878	const struct user_regset *regset = find_regset(view, type);
 879	int regset_no;
 880
 881	if (!regset || (kiov->iov_len % regset->size) != 0)
 882		return -EINVAL;
 883
 884	regset_no = regset - view->regsets;
 885	kiov->iov_len = min(kiov->iov_len,
 886			    (__kernel_size_t) (regset->n * regset->size));
 887
 888	if (req == PTRACE_GETREGSET)
 889		return copy_regset_to_user(task, view, regset_no, 0,
 890					   kiov->iov_len, kiov->iov_base);
 891	else
 892		return copy_regset_from_user(task, view, regset_no, 0,
 893					     kiov->iov_len, kiov->iov_base);
 894}
 895
 896/*
 897 * This is declared in linux/regset.h and defined in machine-dependent
 898 * code.  We put the export here, near the primary machine-neutral use,
 899 * to ensure no machine forgets it.
 900 */
 901EXPORT_SYMBOL_GPL(task_user_regset_view);
 902
 903static unsigned long
 904ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
 905			      struct ptrace_syscall_info *info)
 906{
 907	unsigned long args[ARRAY_SIZE(info->entry.args)];
 908	int i;
 909
 910	info->op = PTRACE_SYSCALL_INFO_ENTRY;
 911	info->entry.nr = syscall_get_nr(child, regs);
 912	syscall_get_arguments(child, regs, args);
 913	for (i = 0; i < ARRAY_SIZE(args); i++)
 914		info->entry.args[i] = args[i];
 915
 916	/* args is the last field in struct ptrace_syscall_info.entry */
 917	return offsetofend(struct ptrace_syscall_info, entry.args);
 918}
 919
 920static unsigned long
 921ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
 922				struct ptrace_syscall_info *info)
 923{
 924	/*
 925	 * As struct ptrace_syscall_info.entry is currently a subset
 926	 * of struct ptrace_syscall_info.seccomp, it makes sense to
 927	 * initialize that subset using ptrace_get_syscall_info_entry().
 928	 * This can be reconsidered in the future if these structures
 929	 * diverge significantly enough.
 930	 */
 931	ptrace_get_syscall_info_entry(child, regs, info);
 932	info->op = PTRACE_SYSCALL_INFO_SECCOMP;
 933	info->seccomp.ret_data = child->ptrace_message;
 934
 935	/* ret_data is the last field in struct ptrace_syscall_info.seccomp */
 936	return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
 937}
 938
 939static unsigned long
 940ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
 941			     struct ptrace_syscall_info *info)
 942{
 943	info->op = PTRACE_SYSCALL_INFO_EXIT;
 944	info->exit.rval = syscall_get_error(child, regs);
 945	info->exit.is_error = !!info->exit.rval;
 946	if (!info->exit.is_error)
 947		info->exit.rval = syscall_get_return_value(child, regs);
 948
 949	/* is_error is the last field in struct ptrace_syscall_info.exit */
 950	return offsetofend(struct ptrace_syscall_info, exit.is_error);
 951}
 952
 953static int
 954ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
 955			void __user *datavp)
 956{
 957	struct pt_regs *regs = task_pt_regs(child);
 958	struct ptrace_syscall_info info = {
 959		.op = PTRACE_SYSCALL_INFO_NONE,
 960		.arch = syscall_get_arch(child),
 961		.instruction_pointer = instruction_pointer(regs),
 962		.stack_pointer = user_stack_pointer(regs),
 963	};
 964	unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
 965	unsigned long write_size;
 966
 967	/*
 968	 * This does not need lock_task_sighand() to access
 969	 * child->last_siginfo because ptrace_freeze_traced()
 970	 * called earlier by ptrace_check_attach() ensures that
 971	 * the tracee cannot go away and clear its last_siginfo.
 972	 */
 973	switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
 974	case SIGTRAP | 0x80:
 975		switch (child->ptrace_message) {
 976		case PTRACE_EVENTMSG_SYSCALL_ENTRY:
 977			actual_size = ptrace_get_syscall_info_entry(child, regs,
 978								    &info);
 979			break;
 980		case PTRACE_EVENTMSG_SYSCALL_EXIT:
 981			actual_size = ptrace_get_syscall_info_exit(child, regs,
 982								   &info);
 983			break;
 984		}
 985		break;
 986	case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
 987		actual_size = ptrace_get_syscall_info_seccomp(child, regs,
 988							      &info);
 989		break;
 990	}
 991
 992	write_size = min(actual_size, user_size);
 993	return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
 994}
 995#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
 996
 997int ptrace_request(struct task_struct *child, long request,
 998		   unsigned long addr, unsigned long data)
 999{
1000	bool seized = child->ptrace & PT_SEIZED;
1001	int ret = -EIO;
1002	kernel_siginfo_t siginfo, *si;
1003	void __user *datavp = (void __user *) data;
1004	unsigned long __user *datalp = datavp;
1005	unsigned long flags;
1006
1007	switch (request) {
1008	case PTRACE_PEEKTEXT:
1009	case PTRACE_PEEKDATA:
1010		return generic_ptrace_peekdata(child, addr, data);
1011	case PTRACE_POKETEXT:
1012	case PTRACE_POKEDATA:
1013		return generic_ptrace_pokedata(child, addr, data);
1014
1015#ifdef PTRACE_OLDSETOPTIONS
1016	case PTRACE_OLDSETOPTIONS:
1017#endif
1018	case PTRACE_SETOPTIONS:
1019		ret = ptrace_setoptions(child, data);
1020		break;
1021	case PTRACE_GETEVENTMSG:
1022		ret = put_user(child->ptrace_message, datalp);
1023		break;
1024
1025	case PTRACE_PEEKSIGINFO:
1026		ret = ptrace_peek_siginfo(child, addr, data);
1027		break;
1028
1029	case PTRACE_GETSIGINFO:
1030		ret = ptrace_getsiginfo(child, &siginfo);
1031		if (!ret)
1032			ret = copy_siginfo_to_user(datavp, &siginfo);
1033		break;
1034
1035	case PTRACE_SETSIGINFO:
1036		ret = copy_siginfo_from_user(&siginfo, datavp);
1037		if (!ret)
1038			ret = ptrace_setsiginfo(child, &siginfo);
1039		break;
1040
1041	case PTRACE_GETSIGMASK: {
1042		sigset_t *mask;
1043
1044		if (addr != sizeof(sigset_t)) {
1045			ret = -EINVAL;
1046			break;
1047		}
1048
1049		if (test_tsk_restore_sigmask(child))
1050			mask = &child->saved_sigmask;
1051		else
1052			mask = &child->blocked;
1053
1054		if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1055			ret = -EFAULT;
1056		else
1057			ret = 0;
1058
1059		break;
1060	}
1061
1062	case PTRACE_SETSIGMASK: {
1063		sigset_t new_set;
1064
1065		if (addr != sizeof(sigset_t)) {
1066			ret = -EINVAL;
1067			break;
1068		}
1069
1070		if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1071			ret = -EFAULT;
1072			break;
1073		}
1074
1075		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1076
1077		/*
1078		 * Every thread does recalc_sigpending() after resume, so
1079		 * retarget_shared_pending() and recalc_sigpending() are not
1080		 * called here.
1081		 */
1082		spin_lock_irq(&child->sighand->siglock);
1083		child->blocked = new_set;
1084		spin_unlock_irq(&child->sighand->siglock);
1085
1086		clear_tsk_restore_sigmask(child);
1087
1088		ret = 0;
1089		break;
1090	}
1091
1092	case PTRACE_INTERRUPT:
1093		/*
1094		 * Stop tracee without any side-effect on signal or job
1095		 * control.  At least one trap is guaranteed to happen
1096		 * after this request.  If @child is already trapped, the
1097		 * current trap is not disturbed and another trap will
1098		 * happen after the current trap is ended with PTRACE_CONT.
1099		 *
1100		 * The actual trap might not be PTRACE_EVENT_STOP trap but
1101		 * the pending condition is cleared regardless.
1102		 */
1103		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1104			break;
1105
1106		/*
1107		 * INTERRUPT doesn't disturb existing trap sans one
1108		 * exception.  If ptracer issued LISTEN for the current
1109		 * STOP, this INTERRUPT should clear LISTEN and re-trap
1110		 * tracee into STOP.
1111		 */
1112		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1113			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1114
1115		unlock_task_sighand(child, &flags);
1116		ret = 0;
1117		break;
1118
1119	case PTRACE_LISTEN:
1120		/*
1121		 * Listen for events.  Tracee must be in STOP.  It's not
1122		 * resumed per-se but is not considered to be in TRACED by
1123		 * wait(2) or ptrace(2).  If an async event (e.g. group
1124		 * stop state change) happens, tracee will enter STOP trap
1125		 * again.  Alternatively, ptracer can issue INTERRUPT to
1126		 * finish listening and re-trap tracee into STOP.
1127		 */
1128		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1129			break;
1130
1131		si = child->last_siginfo;
1132		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1133			child->jobctl |= JOBCTL_LISTENING;
1134			/*
1135			 * If NOTIFY is set, it means event happened between
1136			 * start of this trap and now.  Trigger re-trap.
1137			 */
1138			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1139				ptrace_signal_wake_up(child, true);
1140			ret = 0;
1141		}
1142		unlock_task_sighand(child, &flags);
1143		break;
1144
1145	case PTRACE_DETACH:	 /* detach a process that was attached. */
1146		ret = ptrace_detach(child, data);
1147		break;
1148
1149#ifdef CONFIG_BINFMT_ELF_FDPIC
1150	case PTRACE_GETFDPIC: {
1151		struct mm_struct *mm = get_task_mm(child);
1152		unsigned long tmp = 0;
1153
1154		ret = -ESRCH;
1155		if (!mm)
1156			break;
1157
1158		switch (addr) {
1159		case PTRACE_GETFDPIC_EXEC:
1160			tmp = mm->context.exec_fdpic_loadmap;
1161			break;
1162		case PTRACE_GETFDPIC_INTERP:
1163			tmp = mm->context.interp_fdpic_loadmap;
1164			break;
1165		default:
1166			break;
1167		}
1168		mmput(mm);
1169
1170		ret = put_user(tmp, datalp);
1171		break;
1172	}
1173#endif
1174
1175#ifdef PTRACE_SINGLESTEP
1176	case PTRACE_SINGLESTEP:
1177#endif
1178#ifdef PTRACE_SINGLEBLOCK
1179	case PTRACE_SINGLEBLOCK:
1180#endif
1181#ifdef PTRACE_SYSEMU
1182	case PTRACE_SYSEMU:
1183	case PTRACE_SYSEMU_SINGLESTEP:
1184#endif
1185	case PTRACE_SYSCALL:
1186	case PTRACE_CONT:
1187		return ptrace_resume(child, request, data);
1188
1189	case PTRACE_KILL:
1190		if (child->exit_state)	/* already dead */
1191			return 0;
1192		return ptrace_resume(child, request, SIGKILL);
1193
1194#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1195	case PTRACE_GETREGSET:
1196	case PTRACE_SETREGSET: {
1197		struct iovec kiov;
1198		struct iovec __user *uiov = datavp;
1199
1200		if (!access_ok(uiov, sizeof(*uiov)))
1201			return -EFAULT;
1202
1203		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1204		    __get_user(kiov.iov_len, &uiov->iov_len))
1205			return -EFAULT;
1206
1207		ret = ptrace_regset(child, request, addr, &kiov);
1208		if (!ret)
1209			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1210		break;
1211	}
1212
1213	case PTRACE_GET_SYSCALL_INFO:
1214		ret = ptrace_get_syscall_info(child, addr, datavp);
1215		break;
1216#endif
1217
1218	case PTRACE_SECCOMP_GET_FILTER:
1219		ret = seccomp_get_filter(child, addr, datavp);
1220		break;
1221
1222	case PTRACE_SECCOMP_GET_METADATA:
1223		ret = seccomp_get_metadata(child, addr, datavp);
1224		break;
 
 
 
 
 
 
1225
1226	default:
1227		break;
1228	}
1229
1230	return ret;
1231}
1232
1233#ifndef arch_ptrace_attach
1234#define arch_ptrace_attach(child)	do { } while (0)
1235#endif
1236
1237SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1238		unsigned long, data)
1239{
1240	struct task_struct *child;
1241	long ret;
1242
1243	if (request == PTRACE_TRACEME) {
1244		ret = ptrace_traceme();
1245		if (!ret)
1246			arch_ptrace_attach(current);
1247		goto out;
1248	}
1249
1250	child = find_get_task_by_vpid(pid);
1251	if (!child) {
1252		ret = -ESRCH;
1253		goto out;
1254	}
1255
1256	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1257		ret = ptrace_attach(child, request, addr, data);
1258		/*
1259		 * Some architectures need to do book-keeping after
1260		 * a ptrace attach.
1261		 */
1262		if (!ret)
1263			arch_ptrace_attach(child);
1264		goto out_put_task_struct;
1265	}
1266
1267	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1268				  request == PTRACE_INTERRUPT);
1269	if (ret < 0)
1270		goto out_put_task_struct;
1271
1272	ret = arch_ptrace(child, request, addr, data);
1273	if (ret || request != PTRACE_DETACH)
1274		ptrace_unfreeze_traced(child);
1275
1276 out_put_task_struct:
1277	put_task_struct(child);
1278 out:
1279	return ret;
1280}
1281
1282int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1283			    unsigned long data)
1284{
1285	unsigned long tmp;
1286	int copied;
1287
1288	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1289	if (copied != sizeof(tmp))
1290		return -EIO;
1291	return put_user(tmp, (unsigned long __user *)data);
1292}
1293
1294int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1295			    unsigned long data)
1296{
1297	int copied;
1298
1299	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1300			FOLL_FORCE | FOLL_WRITE);
1301	return (copied == sizeof(data)) ? 0 : -EIO;
1302}
1303
1304#if defined CONFIG_COMPAT
1305
1306int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1307			  compat_ulong_t addr, compat_ulong_t data)
1308{
1309	compat_ulong_t __user *datap = compat_ptr(data);
1310	compat_ulong_t word;
1311	kernel_siginfo_t siginfo;
1312	int ret;
1313
1314	switch (request) {
1315	case PTRACE_PEEKTEXT:
1316	case PTRACE_PEEKDATA:
1317		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1318				FOLL_FORCE);
1319		if (ret != sizeof(word))
1320			ret = -EIO;
1321		else
1322			ret = put_user(word, datap);
1323		break;
1324
1325	case PTRACE_POKETEXT:
1326	case PTRACE_POKEDATA:
1327		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1328				FOLL_FORCE | FOLL_WRITE);
1329		ret = (ret != sizeof(data) ? -EIO : 0);
1330		break;
1331
1332	case PTRACE_GETEVENTMSG:
1333		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1334		break;
1335
1336	case PTRACE_GETSIGINFO:
1337		ret = ptrace_getsiginfo(child, &siginfo);
1338		if (!ret)
1339			ret = copy_siginfo_to_user32(
1340				(struct compat_siginfo __user *) datap,
1341				&siginfo);
1342		break;
1343
1344	case PTRACE_SETSIGINFO:
1345		ret = copy_siginfo_from_user32(
1346			&siginfo, (struct compat_siginfo __user *) datap);
1347		if (!ret)
1348			ret = ptrace_setsiginfo(child, &siginfo);
1349		break;
1350#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1351	case PTRACE_GETREGSET:
1352	case PTRACE_SETREGSET:
1353	{
1354		struct iovec kiov;
1355		struct compat_iovec __user *uiov =
1356			(struct compat_iovec __user *) datap;
1357		compat_uptr_t ptr;
1358		compat_size_t len;
1359
1360		if (!access_ok(uiov, sizeof(*uiov)))
1361			return -EFAULT;
1362
1363		if (__get_user(ptr, &uiov->iov_base) ||
1364		    __get_user(len, &uiov->iov_len))
1365			return -EFAULT;
1366
1367		kiov.iov_base = compat_ptr(ptr);
1368		kiov.iov_len = len;
1369
1370		ret = ptrace_regset(child, request, addr, &kiov);
1371		if (!ret)
1372			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1373		break;
1374	}
1375#endif
1376
1377	default:
1378		ret = ptrace_request(child, request, addr, data);
1379	}
1380
1381	return ret;
1382}
1383
1384COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1385		       compat_long_t, addr, compat_long_t, data)
1386{
1387	struct task_struct *child;
1388	long ret;
1389
1390	if (request == PTRACE_TRACEME) {
1391		ret = ptrace_traceme();
1392		goto out;
1393	}
1394
1395	child = find_get_task_by_vpid(pid);
1396	if (!child) {
1397		ret = -ESRCH;
1398		goto out;
1399	}
1400
1401	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1402		ret = ptrace_attach(child, request, addr, data);
1403		/*
1404		 * Some architectures need to do book-keeping after
1405		 * a ptrace attach.
1406		 */
1407		if (!ret)
1408			arch_ptrace_attach(child);
1409		goto out_put_task_struct;
1410	}
1411
1412	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1413				  request == PTRACE_INTERRUPT);
1414	if (!ret) {
1415		ret = compat_arch_ptrace(child, request, addr, data);
1416		if (ret || request != PTRACE_DETACH)
1417			ptrace_unfreeze_traced(child);
1418	}
1419
1420 out_put_task_struct:
1421	put_task_struct(child);
1422 out:
1423	return ret;
1424}
1425#endif	/* CONFIG_COMPAT */
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/ptrace.c
   4 *
   5 * (C) Copyright 1999 Linus Torvalds
   6 *
   7 * Common interfaces for "ptrace()" which we do not want
   8 * to continually duplicate across every architecture.
   9 */
  10
  11#include <linux/capability.h>
  12#include <linux/export.h>
  13#include <linux/sched.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/coredump.h>
  16#include <linux/sched/task.h>
  17#include <linux/errno.h>
  18#include <linux/mm.h>
  19#include <linux/highmem.h>
  20#include <linux/pagemap.h>
  21#include <linux/ptrace.h>
  22#include <linux/security.h>
  23#include <linux/signal.h>
  24#include <linux/uio.h>
  25#include <linux/audit.h>
  26#include <linux/pid_namespace.h>
  27#include <linux/syscalls.h>
  28#include <linux/uaccess.h>
  29#include <linux/regset.h>
  30#include <linux/hw_breakpoint.h>
  31#include <linux/cn_proc.h>
  32#include <linux/compat.h>
  33#include <linux/sched/signal.h>
  34#include <linux/minmax.h>
  35
  36#include <asm/syscall.h>	/* for syscall_get_* */
  37
  38/*
  39 * Access another process' address space via ptrace.
  40 * Source/target buffer must be kernel space,
  41 * Do not walk the page table directly, use get_user_pages
  42 */
  43int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
  44		     void *buf, int len, unsigned int gup_flags)
  45{
  46	struct mm_struct *mm;
  47	int ret;
  48
  49	mm = get_task_mm(tsk);
  50	if (!mm)
  51		return 0;
  52
  53	if (!tsk->ptrace ||
  54	    (current != tsk->parent) ||
  55	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
  56	     !ptracer_capable(tsk, mm->user_ns))) {
  57		mmput(mm);
  58		return 0;
  59	}
  60
  61	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
  62	mmput(mm);
  63
  64	return ret;
  65}
  66
  67
  68void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
  69		   const struct cred *ptracer_cred)
  70{
  71	BUG_ON(!list_empty(&child->ptrace_entry));
  72	list_add(&child->ptrace_entry, &new_parent->ptraced);
  73	child->parent = new_parent;
  74	child->ptracer_cred = get_cred(ptracer_cred);
  75}
  76
  77/*
  78 * ptrace a task: make the debugger its new parent and
  79 * move it to the ptrace list.
  80 *
  81 * Must be called with the tasklist lock write-held.
  82 */
  83static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  84{
  85	__ptrace_link(child, new_parent, current_cred());
  86}
  87
  88/**
  89 * __ptrace_unlink - unlink ptracee and restore its execution state
  90 * @child: ptracee to be unlinked
  91 *
  92 * Remove @child from the ptrace list, move it back to the original parent,
  93 * and restore the execution state so that it conforms to the group stop
  94 * state.
  95 *
  96 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  97 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  98 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  99 * If the ptracer is exiting, the ptracee can be in any state.
 100 *
 101 * After detach, the ptracee should be in a state which conforms to the
 102 * group stop.  If the group is stopped or in the process of stopping, the
 103 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
 104 * up from TASK_TRACED.
 105 *
 106 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
 107 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
 108 * to but in the opposite direction of what happens while attaching to a
 109 * stopped task.  However, in this direction, the intermediate RUNNING
 110 * state is not hidden even from the current ptracer and if it immediately
 111 * re-attaches and performs a WNOHANG wait(2), it may fail.
 112 *
 113 * CONTEXT:
 114 * write_lock_irq(tasklist_lock)
 115 */
 116void __ptrace_unlink(struct task_struct *child)
 117{
 118	const struct cred *old_cred;
 119	BUG_ON(!child->ptrace);
 120
 121	clear_task_syscall_work(child, SYSCALL_TRACE);
 122#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
 123	clear_task_syscall_work(child, SYSCALL_EMU);
 124#endif
 125
 126	child->parent = child->real_parent;
 127	list_del_init(&child->ptrace_entry);
 128	old_cred = child->ptracer_cred;
 129	child->ptracer_cred = NULL;
 130	put_cred(old_cred);
 131
 132	spin_lock(&child->sighand->siglock);
 133	child->ptrace = 0;
 134	/*
 135	 * Clear all pending traps and TRAPPING.  TRAPPING should be
 136	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
 137	 */
 138	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
 139	task_clear_jobctl_trapping(child);
 140
 141	/*
 142	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
 143	 * @child isn't dead.
 144	 */
 145	if (!(child->flags & PF_EXITING) &&
 146	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
 147	     child->signal->group_stop_count)) {
 148		child->jobctl |= JOBCTL_STOP_PENDING;
 149
 150		/*
 151		 * This is only possible if this thread was cloned by the
 152		 * traced task running in the stopped group, set the signal
 153		 * for the future reports.
 154		 * FIXME: we should change ptrace_init_task() to handle this
 155		 * case.
 156		 */
 157		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 158			child->jobctl |= SIGSTOP;
 159	}
 160
 161	/*
 162	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 163	 * @child in the butt.  Note that @resume should be used iff @child
 164	 * is in TASK_TRACED; otherwise, we might unduly disrupt
 165	 * TASK_KILLABLE sleeps.
 166	 */
 167	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 168		ptrace_signal_wake_up(child, true);
 169
 170	spin_unlock(&child->sighand->siglock);
 171}
 172
 173static bool looks_like_a_spurious_pid(struct task_struct *task)
 174{
 175	if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
 176		return false;
 177
 178	if (task_pid_vnr(task) == task->ptrace_message)
 179		return false;
 180	/*
 181	 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
 182	 * was not wait()'ed, most probably debugger targets the old
 183	 * leader which was destroyed in de_thread().
 184	 */
 185	return true;
 186}
 187
 188/* Ensure that nothing can wake it up, even SIGKILL */
 189static bool ptrace_freeze_traced(struct task_struct *task)
 190{
 191	bool ret = false;
 192
 193	/* Lockless, nobody but us can set this flag */
 194	if (task->jobctl & JOBCTL_LISTENING)
 195		return ret;
 196
 197	spin_lock_irq(&task->sighand->siglock);
 198	if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
 199	    !__fatal_signal_pending(task)) {
 200		WRITE_ONCE(task->__state, __TASK_TRACED);
 201		ret = true;
 202	}
 203	spin_unlock_irq(&task->sighand->siglock);
 204
 205	return ret;
 206}
 207
 208static void ptrace_unfreeze_traced(struct task_struct *task)
 209{
 210	if (READ_ONCE(task->__state) != __TASK_TRACED)
 211		return;
 212
 213	WARN_ON(!task->ptrace || task->parent != current);
 214
 215	/*
 216	 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
 217	 * Recheck state under the lock to close this race.
 218	 */
 219	spin_lock_irq(&task->sighand->siglock);
 220	if (READ_ONCE(task->__state) == __TASK_TRACED) {
 221		if (__fatal_signal_pending(task))
 222			wake_up_state(task, __TASK_TRACED);
 223		else
 224			WRITE_ONCE(task->__state, TASK_TRACED);
 225	}
 226	spin_unlock_irq(&task->sighand->siglock);
 227}
 228
 229/**
 230 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 231 * @child: ptracee to check for
 232 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 233 *
 234 * Check whether @child is being ptraced by %current and ready for further
 235 * ptrace operations.  If @ignore_state is %false, @child also should be in
 236 * %TASK_TRACED state and on return the child is guaranteed to be traced
 237 * and not executing.  If @ignore_state is %true, @child can be in any
 238 * state.
 239 *
 240 * CONTEXT:
 241 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 242 *
 243 * RETURNS:
 244 * 0 on success, -ESRCH if %child is not ready.
 245 */
 246static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 247{
 248	int ret = -ESRCH;
 249
 250	/*
 251	 * We take the read lock around doing both checks to close a
 252	 * possible race where someone else was tracing our child and
 253	 * detached between these two checks.  After this locked check,
 254	 * we are sure that this is our traced child and that can only
 255	 * be changed by us so it's not changing right after this.
 256	 */
 257	read_lock(&tasklist_lock);
 258	if (child->ptrace && child->parent == current) {
 259		WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
 260		/*
 261		 * child->sighand can't be NULL, release_task()
 262		 * does ptrace_unlink() before __exit_signal().
 263		 */
 264		if (ignore_state || ptrace_freeze_traced(child))
 265			ret = 0;
 266	}
 267	read_unlock(&tasklist_lock);
 268
 269	if (!ret && !ignore_state) {
 270		if (!wait_task_inactive(child, __TASK_TRACED)) {
 271			/*
 272			 * This can only happen if may_ptrace_stop() fails and
 273			 * ptrace_stop() changes ->state back to TASK_RUNNING,
 274			 * so we should not worry about leaking __TASK_TRACED.
 275			 */
 276			WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
 277			ret = -ESRCH;
 278		}
 279	}
 280
 281	return ret;
 282}
 283
 284static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 285{
 286	if (mode & PTRACE_MODE_NOAUDIT)
 287		return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
 288	return ns_capable(ns, CAP_SYS_PTRACE);
 
 289}
 290
 291/* Returns 0 on success, -errno on denial. */
 292static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 293{
 294	const struct cred *cred = current_cred(), *tcred;
 295	struct mm_struct *mm;
 296	kuid_t caller_uid;
 297	kgid_t caller_gid;
 298
 299	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 300		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
 301		return -EPERM;
 302	}
 303
 304	/* May we inspect the given task?
 305	 * This check is used both for attaching with ptrace
 306	 * and for allowing access to sensitive information in /proc.
 307	 *
 308	 * ptrace_attach denies several cases that /proc allows
 309	 * because setting up the necessary parent/child relationship
 310	 * or halting the specified task is impossible.
 311	 */
 312
 313	/* Don't let security modules deny introspection */
 314	if (same_thread_group(task, current))
 315		return 0;
 316	rcu_read_lock();
 317	if (mode & PTRACE_MODE_FSCREDS) {
 318		caller_uid = cred->fsuid;
 319		caller_gid = cred->fsgid;
 320	} else {
 321		/*
 322		 * Using the euid would make more sense here, but something
 323		 * in userland might rely on the old behavior, and this
 324		 * shouldn't be a security problem since
 325		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
 326		 * used a syscall that requests access to another process
 327		 * (and not a filesystem syscall to procfs).
 328		 */
 329		caller_uid = cred->uid;
 330		caller_gid = cred->gid;
 331	}
 332	tcred = __task_cred(task);
 333	if (uid_eq(caller_uid, tcred->euid) &&
 334	    uid_eq(caller_uid, tcred->suid) &&
 335	    uid_eq(caller_uid, tcred->uid)  &&
 336	    gid_eq(caller_gid, tcred->egid) &&
 337	    gid_eq(caller_gid, tcred->sgid) &&
 338	    gid_eq(caller_gid, tcred->gid))
 339		goto ok;
 340	if (ptrace_has_cap(tcred->user_ns, mode))
 341		goto ok;
 342	rcu_read_unlock();
 343	return -EPERM;
 344ok:
 345	rcu_read_unlock();
 346	/*
 347	 * If a task drops privileges and becomes nondumpable (through a syscall
 348	 * like setresuid()) while we are trying to access it, we must ensure
 349	 * that the dumpability is read after the credentials; otherwise,
 350	 * we may be able to attach to a task that we shouldn't be able to
 351	 * attach to (as if the task had dropped privileges without becoming
 352	 * nondumpable).
 353	 * Pairs with a write barrier in commit_creds().
 354	 */
 355	smp_rmb();
 356	mm = task->mm;
 357	if (mm &&
 358	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
 359	     !ptrace_has_cap(mm->user_ns, mode)))
 360	    return -EPERM;
 361
 362	return security_ptrace_access_check(task, mode);
 363}
 364
 365bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 366{
 367	int err;
 368	task_lock(task);
 369	err = __ptrace_may_access(task, mode);
 370	task_unlock(task);
 371	return !err;
 372}
 373
 374static int ptrace_attach(struct task_struct *task, long request,
 375			 unsigned long addr,
 376			 unsigned long flags)
 377{
 378	bool seize = (request == PTRACE_SEIZE);
 379	int retval;
 380
 381	retval = -EIO;
 382	if (seize) {
 383		if (addr != 0)
 384			goto out;
 385		if (flags & ~(unsigned long)PTRACE_O_MASK)
 386			goto out;
 387		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 388	} else {
 389		flags = PT_PTRACED;
 390	}
 391
 392	audit_ptrace(task);
 393
 394	retval = -EPERM;
 395	if (unlikely(task->flags & PF_KTHREAD))
 396		goto out;
 397	if (same_thread_group(task, current))
 398		goto out;
 399
 400	/*
 401	 * Protect exec's credential calculations against our interference;
 402	 * SUID, SGID and LSM creds get determined differently
 403	 * under ptrace.
 404	 */
 405	retval = -ERESTARTNOINTR;
 406	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 407		goto out;
 408
 409	task_lock(task);
 410	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 411	task_unlock(task);
 412	if (retval)
 413		goto unlock_creds;
 414
 415	write_lock_irq(&tasklist_lock);
 416	retval = -EPERM;
 417	if (unlikely(task->exit_state))
 418		goto unlock_tasklist;
 419	if (task->ptrace)
 420		goto unlock_tasklist;
 421
 422	if (seize)
 423		flags |= PT_SEIZED;
 424	task->ptrace = flags;
 425
 426	ptrace_link(task, current);
 427
 428	/* SEIZE doesn't trap tracee on attach */
 429	if (!seize)
 430		send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
 431
 432	spin_lock(&task->sighand->siglock);
 433
 434	/*
 435	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 436	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 437	 * will be cleared if the child completes the transition or any
 438	 * event which clears the group stop states happens.  We'll wait
 439	 * for the transition to complete before returning from this
 440	 * function.
 441	 *
 442	 * This hides STOPPED -> RUNNING -> TRACED transition from the
 443	 * attaching thread but a different thread in the same group can
 444	 * still observe the transient RUNNING state.  IOW, if another
 445	 * thread's WNOHANG wait(2) on the stopped tracee races against
 446	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
 447	 *
 448	 * The following task_is_stopped() test is safe as both transitions
 449	 * in and out of STOPPED are protected by siglock.
 450	 */
 451	if (task_is_stopped(task) &&
 452	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 453		signal_wake_up_state(task, __TASK_STOPPED);
 454
 455	spin_unlock(&task->sighand->siglock);
 456
 457	retval = 0;
 458unlock_tasklist:
 459	write_unlock_irq(&tasklist_lock);
 460unlock_creds:
 461	mutex_unlock(&task->signal->cred_guard_mutex);
 462out:
 463	if (!retval) {
 464		/*
 465		 * We do not bother to change retval or clear JOBCTL_TRAPPING
 466		 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
 467		 * not return to user-mode, it will exit and clear this bit in
 468		 * __ptrace_unlink() if it wasn't already cleared by the tracee;
 469		 * and until then nobody can ptrace this task.
 470		 */
 471		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
 472		proc_ptrace_connector(task, PTRACE_ATTACH);
 473	}
 474
 475	return retval;
 476}
 477
 478/**
 479 * ptrace_traceme  --  helper for PTRACE_TRACEME
 480 *
 481 * Performs checks and sets PT_PTRACED.
 482 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 483 */
 484static int ptrace_traceme(void)
 485{
 486	int ret = -EPERM;
 487
 488	write_lock_irq(&tasklist_lock);
 489	/* Are we already being traced? */
 490	if (!current->ptrace) {
 491		ret = security_ptrace_traceme(current->parent);
 492		/*
 493		 * Check PF_EXITING to ensure ->real_parent has not passed
 494		 * exit_ptrace(). Otherwise we don't report the error but
 495		 * pretend ->real_parent untraces us right after return.
 496		 */
 497		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 498			current->ptrace = PT_PTRACED;
 499			ptrace_link(current, current->real_parent);
 500		}
 501	}
 502	write_unlock_irq(&tasklist_lock);
 503
 504	return ret;
 505}
 506
 507/*
 508 * Called with irqs disabled, returns true if childs should reap themselves.
 509 */
 510static int ignoring_children(struct sighand_struct *sigh)
 511{
 512	int ret;
 513	spin_lock(&sigh->siglock);
 514	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 515	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 516	spin_unlock(&sigh->siglock);
 517	return ret;
 518}
 519
 520/*
 521 * Called with tasklist_lock held for writing.
 522 * Unlink a traced task, and clean it up if it was a traced zombie.
 523 * Return true if it needs to be reaped with release_task().
 524 * (We can't call release_task() here because we already hold tasklist_lock.)
 525 *
 526 * If it's a zombie, our attachedness prevented normal parent notification
 527 * or self-reaping.  Do notification now if it would have happened earlier.
 528 * If it should reap itself, return true.
 529 *
 530 * If it's our own child, there is no notification to do. But if our normal
 531 * children self-reap, then this child was prevented by ptrace and we must
 532 * reap it now, in that case we must also wake up sub-threads sleeping in
 533 * do_wait().
 534 */
 535static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 536{
 537	bool dead;
 538
 539	__ptrace_unlink(p);
 540
 541	if (p->exit_state != EXIT_ZOMBIE)
 542		return false;
 543
 544	dead = !thread_group_leader(p);
 545
 546	if (!dead && thread_group_empty(p)) {
 547		if (!same_thread_group(p->real_parent, tracer))
 548			dead = do_notify_parent(p, p->exit_signal);
 549		else if (ignoring_children(tracer->sighand)) {
 550			__wake_up_parent(p, tracer);
 551			dead = true;
 552		}
 553	}
 554	/* Mark it as in the process of being reaped. */
 555	if (dead)
 556		p->exit_state = EXIT_DEAD;
 557	return dead;
 558}
 559
 560static int ptrace_detach(struct task_struct *child, unsigned int data)
 561{
 562	if (!valid_signal(data))
 563		return -EIO;
 564
 565	/* Architecture-specific hardware disable .. */
 566	ptrace_disable(child);
 567
 568	write_lock_irq(&tasklist_lock);
 569	/*
 570	 * We rely on ptrace_freeze_traced(). It can't be killed and
 571	 * untraced by another thread, it can't be a zombie.
 572	 */
 573	WARN_ON(!child->ptrace || child->exit_state);
 574	/*
 575	 * tasklist_lock avoids the race with wait_task_stopped(), see
 576	 * the comment in ptrace_resume().
 577	 */
 578	child->exit_code = data;
 579	__ptrace_detach(current, child);
 580	write_unlock_irq(&tasklist_lock);
 581
 582	proc_ptrace_connector(child, PTRACE_DETACH);
 583
 584	return 0;
 585}
 586
 587/*
 588 * Detach all tasks we were using ptrace on. Called with tasklist held
 589 * for writing.
 590 */
 591void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
 592{
 593	struct task_struct *p, *n;
 594
 595	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 596		if (unlikely(p->ptrace & PT_EXITKILL))
 597			send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
 598
 599		if (__ptrace_detach(tracer, p))
 600			list_add(&p->ptrace_entry, dead);
 601	}
 602}
 603
 604int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 605{
 606	int copied = 0;
 607
 608	while (len > 0) {
 609		char buf[128];
 610		int this_len, retval;
 611
 612		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 613		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
 614
 615		if (!retval) {
 616			if (copied)
 617				break;
 618			return -EIO;
 619		}
 620		if (copy_to_user(dst, buf, retval))
 621			return -EFAULT;
 622		copied += retval;
 623		src += retval;
 624		dst += retval;
 625		len -= retval;
 626	}
 627	return copied;
 628}
 629
 630int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 631{
 632	int copied = 0;
 633
 634	while (len > 0) {
 635		char buf[128];
 636		int this_len, retval;
 637
 638		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 639		if (copy_from_user(buf, src, this_len))
 640			return -EFAULT;
 641		retval = ptrace_access_vm(tsk, dst, buf, this_len,
 642				FOLL_FORCE | FOLL_WRITE);
 643		if (!retval) {
 644			if (copied)
 645				break;
 646			return -EIO;
 647		}
 648		copied += retval;
 649		src += retval;
 650		dst += retval;
 651		len -= retval;
 652	}
 653	return copied;
 654}
 655
 656static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 657{
 658	unsigned flags;
 659
 660	if (data & ~(unsigned long)PTRACE_O_MASK)
 661		return -EINVAL;
 662
 663	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
 664		if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
 665		    !IS_ENABLED(CONFIG_SECCOMP))
 666			return -EINVAL;
 667
 668		if (!capable(CAP_SYS_ADMIN))
 669			return -EPERM;
 670
 671		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
 672		    current->ptrace & PT_SUSPEND_SECCOMP)
 673			return -EPERM;
 674	}
 675
 676	/* Avoid intermediate state when all opts are cleared */
 677	flags = child->ptrace;
 678	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 679	flags |= (data << PT_OPT_FLAG_SHIFT);
 680	child->ptrace = flags;
 681
 682	return 0;
 683}
 684
 685static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
 686{
 687	unsigned long flags;
 688	int error = -ESRCH;
 689
 690	if (lock_task_sighand(child, &flags)) {
 691		error = -EINVAL;
 692		if (likely(child->last_siginfo != NULL)) {
 693			copy_siginfo(info, child->last_siginfo);
 694			error = 0;
 695		}
 696		unlock_task_sighand(child, &flags);
 697	}
 698	return error;
 699}
 700
 701static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
 702{
 703	unsigned long flags;
 704	int error = -ESRCH;
 705
 706	if (lock_task_sighand(child, &flags)) {
 707		error = -EINVAL;
 708		if (likely(child->last_siginfo != NULL)) {
 709			copy_siginfo(child->last_siginfo, info);
 710			error = 0;
 711		}
 712		unlock_task_sighand(child, &flags);
 713	}
 714	return error;
 715}
 716
 717static int ptrace_peek_siginfo(struct task_struct *child,
 718				unsigned long addr,
 719				unsigned long data)
 720{
 721	struct ptrace_peeksiginfo_args arg;
 722	struct sigpending *pending;
 723	struct sigqueue *q;
 724	int ret, i;
 725
 726	ret = copy_from_user(&arg, (void __user *) addr,
 727				sizeof(struct ptrace_peeksiginfo_args));
 728	if (ret)
 729		return -EFAULT;
 730
 731	if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
 732		return -EINVAL; /* unknown flags */
 733
 734	if (arg.nr < 0)
 735		return -EINVAL;
 736
 737	/* Ensure arg.off fits in an unsigned long */
 738	if (arg.off > ULONG_MAX)
 739		return 0;
 740
 741	if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
 742		pending = &child->signal->shared_pending;
 743	else
 744		pending = &child->pending;
 745
 746	for (i = 0; i < arg.nr; ) {
 747		kernel_siginfo_t info;
 748		unsigned long off = arg.off + i;
 749		bool found = false;
 750
 751		spin_lock_irq(&child->sighand->siglock);
 752		list_for_each_entry(q, &pending->list, list) {
 753			if (!off--) {
 754				found = true;
 755				copy_siginfo(&info, &q->info);
 756				break;
 757			}
 758		}
 759		spin_unlock_irq(&child->sighand->siglock);
 760
 761		if (!found) /* beyond the end of the list */
 762			break;
 763
 764#ifdef CONFIG_COMPAT
 765		if (unlikely(in_compat_syscall())) {
 766			compat_siginfo_t __user *uinfo = compat_ptr(data);
 767
 768			if (copy_siginfo_to_user32(uinfo, &info)) {
 769				ret = -EFAULT;
 770				break;
 771			}
 772
 773		} else
 774#endif
 775		{
 776			siginfo_t __user *uinfo = (siginfo_t __user *) data;
 777
 778			if (copy_siginfo_to_user(uinfo, &info)) {
 779				ret = -EFAULT;
 780				break;
 781			}
 782		}
 783
 784		data += sizeof(siginfo_t);
 785		i++;
 786
 787		if (signal_pending(current))
 788			break;
 789
 790		cond_resched();
 791	}
 792
 793	if (i > 0)
 794		return i;
 795
 796	return ret;
 797}
 798
 799#ifdef CONFIG_RSEQ
 800static long ptrace_get_rseq_configuration(struct task_struct *task,
 801					  unsigned long size, void __user *data)
 802{
 803	struct ptrace_rseq_configuration conf = {
 804		.rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
 805		.rseq_abi_size = sizeof(*task->rseq),
 806		.signature = task->rseq_sig,
 807		.flags = 0,
 808	};
 809
 810	size = min_t(unsigned long, size, sizeof(conf));
 811	if (copy_to_user(data, &conf, size))
 812		return -EFAULT;
 813	return sizeof(conf);
 814}
 815#endif
 816
 817#ifdef PTRACE_SINGLESTEP
 818#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
 819#else
 820#define is_singlestep(request)		0
 821#endif
 822
 823#ifdef PTRACE_SINGLEBLOCK
 824#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
 825#else
 826#define is_singleblock(request)		0
 827#endif
 828
 829#ifdef PTRACE_SYSEMU
 830#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
 831#else
 832#define is_sysemu_singlestep(request)	0
 833#endif
 834
 835static int ptrace_resume(struct task_struct *child, long request,
 836			 unsigned long data)
 837{
 838	bool need_siglock;
 839
 840	if (!valid_signal(data))
 841		return -EIO;
 842
 843	if (request == PTRACE_SYSCALL)
 844		set_task_syscall_work(child, SYSCALL_TRACE);
 845	else
 846		clear_task_syscall_work(child, SYSCALL_TRACE);
 847
 848#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
 849	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 850		set_task_syscall_work(child, SYSCALL_EMU);
 851	else
 852		clear_task_syscall_work(child, SYSCALL_EMU);
 853#endif
 854
 855	if (is_singleblock(request)) {
 856		if (unlikely(!arch_has_block_step()))
 857			return -EIO;
 858		user_enable_block_step(child);
 859	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 860		if (unlikely(!arch_has_single_step()))
 861			return -EIO;
 862		user_enable_single_step(child);
 863	} else {
 864		user_disable_single_step(child);
 865	}
 866
 867	/*
 868	 * Change ->exit_code and ->state under siglock to avoid the race
 869	 * with wait_task_stopped() in between; a non-zero ->exit_code will
 870	 * wrongly look like another report from tracee.
 871	 *
 872	 * Note that we need siglock even if ->exit_code == data and/or this
 873	 * status was not reported yet, the new status must not be cleared by
 874	 * wait_task_stopped() after resume.
 875	 *
 876	 * If data == 0 we do not care if wait_task_stopped() reports the old
 877	 * status and clears the code too; this can't race with the tracee, it
 878	 * takes siglock after resume.
 879	 */
 880	need_siglock = data && !thread_group_empty(current);
 881	if (need_siglock)
 882		spin_lock_irq(&child->sighand->siglock);
 883	child->exit_code = data;
 884	wake_up_state(child, __TASK_TRACED);
 885	if (need_siglock)
 886		spin_unlock_irq(&child->sighand->siglock);
 887
 888	return 0;
 889}
 890
 891#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 892
 893static const struct user_regset *
 894find_regset(const struct user_regset_view *view, unsigned int type)
 895{
 896	const struct user_regset *regset;
 897	int n;
 898
 899	for (n = 0; n < view->n; ++n) {
 900		regset = view->regsets + n;
 901		if (regset->core_note_type == type)
 902			return regset;
 903	}
 904
 905	return NULL;
 906}
 907
 908static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 909			 struct iovec *kiov)
 910{
 911	const struct user_regset_view *view = task_user_regset_view(task);
 912	const struct user_regset *regset = find_regset(view, type);
 913	int regset_no;
 914
 915	if (!regset || (kiov->iov_len % regset->size) != 0)
 916		return -EINVAL;
 917
 918	regset_no = regset - view->regsets;
 919	kiov->iov_len = min(kiov->iov_len,
 920			    (__kernel_size_t) (regset->n * regset->size));
 921
 922	if (req == PTRACE_GETREGSET)
 923		return copy_regset_to_user(task, view, regset_no, 0,
 924					   kiov->iov_len, kiov->iov_base);
 925	else
 926		return copy_regset_from_user(task, view, regset_no, 0,
 927					     kiov->iov_len, kiov->iov_base);
 928}
 929
 930/*
 931 * This is declared in linux/regset.h and defined in machine-dependent
 932 * code.  We put the export here, near the primary machine-neutral use,
 933 * to ensure no machine forgets it.
 934 */
 935EXPORT_SYMBOL_GPL(task_user_regset_view);
 936
 937static unsigned long
 938ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
 939			      struct ptrace_syscall_info *info)
 940{
 941	unsigned long args[ARRAY_SIZE(info->entry.args)];
 942	int i;
 943
 944	info->op = PTRACE_SYSCALL_INFO_ENTRY;
 945	info->entry.nr = syscall_get_nr(child, regs);
 946	syscall_get_arguments(child, regs, args);
 947	for (i = 0; i < ARRAY_SIZE(args); i++)
 948		info->entry.args[i] = args[i];
 949
 950	/* args is the last field in struct ptrace_syscall_info.entry */
 951	return offsetofend(struct ptrace_syscall_info, entry.args);
 952}
 953
 954static unsigned long
 955ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
 956				struct ptrace_syscall_info *info)
 957{
 958	/*
 959	 * As struct ptrace_syscall_info.entry is currently a subset
 960	 * of struct ptrace_syscall_info.seccomp, it makes sense to
 961	 * initialize that subset using ptrace_get_syscall_info_entry().
 962	 * This can be reconsidered in the future if these structures
 963	 * diverge significantly enough.
 964	 */
 965	ptrace_get_syscall_info_entry(child, regs, info);
 966	info->op = PTRACE_SYSCALL_INFO_SECCOMP;
 967	info->seccomp.ret_data = child->ptrace_message;
 968
 969	/* ret_data is the last field in struct ptrace_syscall_info.seccomp */
 970	return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
 971}
 972
 973static unsigned long
 974ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
 975			     struct ptrace_syscall_info *info)
 976{
 977	info->op = PTRACE_SYSCALL_INFO_EXIT;
 978	info->exit.rval = syscall_get_error(child, regs);
 979	info->exit.is_error = !!info->exit.rval;
 980	if (!info->exit.is_error)
 981		info->exit.rval = syscall_get_return_value(child, regs);
 982
 983	/* is_error is the last field in struct ptrace_syscall_info.exit */
 984	return offsetofend(struct ptrace_syscall_info, exit.is_error);
 985}
 986
 987static int
 988ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
 989			void __user *datavp)
 990{
 991	struct pt_regs *regs = task_pt_regs(child);
 992	struct ptrace_syscall_info info = {
 993		.op = PTRACE_SYSCALL_INFO_NONE,
 994		.arch = syscall_get_arch(child),
 995		.instruction_pointer = instruction_pointer(regs),
 996		.stack_pointer = user_stack_pointer(regs),
 997	};
 998	unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
 999	unsigned long write_size;
1000
1001	/*
1002	 * This does not need lock_task_sighand() to access
1003	 * child->last_siginfo because ptrace_freeze_traced()
1004	 * called earlier by ptrace_check_attach() ensures that
1005	 * the tracee cannot go away and clear its last_siginfo.
1006	 */
1007	switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
1008	case SIGTRAP | 0x80:
1009		switch (child->ptrace_message) {
1010		case PTRACE_EVENTMSG_SYSCALL_ENTRY:
1011			actual_size = ptrace_get_syscall_info_entry(child, regs,
1012								    &info);
1013			break;
1014		case PTRACE_EVENTMSG_SYSCALL_EXIT:
1015			actual_size = ptrace_get_syscall_info_exit(child, regs,
1016								   &info);
1017			break;
1018		}
1019		break;
1020	case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
1021		actual_size = ptrace_get_syscall_info_seccomp(child, regs,
1022							      &info);
1023		break;
1024	}
1025
1026	write_size = min(actual_size, user_size);
1027	return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
1028}
1029#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1030
1031int ptrace_request(struct task_struct *child, long request,
1032		   unsigned long addr, unsigned long data)
1033{
1034	bool seized = child->ptrace & PT_SEIZED;
1035	int ret = -EIO;
1036	kernel_siginfo_t siginfo, *si;
1037	void __user *datavp = (void __user *) data;
1038	unsigned long __user *datalp = datavp;
1039	unsigned long flags;
1040
1041	switch (request) {
1042	case PTRACE_PEEKTEXT:
1043	case PTRACE_PEEKDATA:
1044		return generic_ptrace_peekdata(child, addr, data);
1045	case PTRACE_POKETEXT:
1046	case PTRACE_POKEDATA:
1047		return generic_ptrace_pokedata(child, addr, data);
1048
1049#ifdef PTRACE_OLDSETOPTIONS
1050	case PTRACE_OLDSETOPTIONS:
1051#endif
1052	case PTRACE_SETOPTIONS:
1053		ret = ptrace_setoptions(child, data);
1054		break;
1055	case PTRACE_GETEVENTMSG:
1056		ret = put_user(child->ptrace_message, datalp);
1057		break;
1058
1059	case PTRACE_PEEKSIGINFO:
1060		ret = ptrace_peek_siginfo(child, addr, data);
1061		break;
1062
1063	case PTRACE_GETSIGINFO:
1064		ret = ptrace_getsiginfo(child, &siginfo);
1065		if (!ret)
1066			ret = copy_siginfo_to_user(datavp, &siginfo);
1067		break;
1068
1069	case PTRACE_SETSIGINFO:
1070		ret = copy_siginfo_from_user(&siginfo, datavp);
1071		if (!ret)
1072			ret = ptrace_setsiginfo(child, &siginfo);
1073		break;
1074
1075	case PTRACE_GETSIGMASK: {
1076		sigset_t *mask;
1077
1078		if (addr != sizeof(sigset_t)) {
1079			ret = -EINVAL;
1080			break;
1081		}
1082
1083		if (test_tsk_restore_sigmask(child))
1084			mask = &child->saved_sigmask;
1085		else
1086			mask = &child->blocked;
1087
1088		if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1089			ret = -EFAULT;
1090		else
1091			ret = 0;
1092
1093		break;
1094	}
1095
1096	case PTRACE_SETSIGMASK: {
1097		sigset_t new_set;
1098
1099		if (addr != sizeof(sigset_t)) {
1100			ret = -EINVAL;
1101			break;
1102		}
1103
1104		if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1105			ret = -EFAULT;
1106			break;
1107		}
1108
1109		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1110
1111		/*
1112		 * Every thread does recalc_sigpending() after resume, so
1113		 * retarget_shared_pending() and recalc_sigpending() are not
1114		 * called here.
1115		 */
1116		spin_lock_irq(&child->sighand->siglock);
1117		child->blocked = new_set;
1118		spin_unlock_irq(&child->sighand->siglock);
1119
1120		clear_tsk_restore_sigmask(child);
1121
1122		ret = 0;
1123		break;
1124	}
1125
1126	case PTRACE_INTERRUPT:
1127		/*
1128		 * Stop tracee without any side-effect on signal or job
1129		 * control.  At least one trap is guaranteed to happen
1130		 * after this request.  If @child is already trapped, the
1131		 * current trap is not disturbed and another trap will
1132		 * happen after the current trap is ended with PTRACE_CONT.
1133		 *
1134		 * The actual trap might not be PTRACE_EVENT_STOP trap but
1135		 * the pending condition is cleared regardless.
1136		 */
1137		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1138			break;
1139
1140		/*
1141		 * INTERRUPT doesn't disturb existing trap sans one
1142		 * exception.  If ptracer issued LISTEN for the current
1143		 * STOP, this INTERRUPT should clear LISTEN and re-trap
1144		 * tracee into STOP.
1145		 */
1146		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1147			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1148
1149		unlock_task_sighand(child, &flags);
1150		ret = 0;
1151		break;
1152
1153	case PTRACE_LISTEN:
1154		/*
1155		 * Listen for events.  Tracee must be in STOP.  It's not
1156		 * resumed per-se but is not considered to be in TRACED by
1157		 * wait(2) or ptrace(2).  If an async event (e.g. group
1158		 * stop state change) happens, tracee will enter STOP trap
1159		 * again.  Alternatively, ptracer can issue INTERRUPT to
1160		 * finish listening and re-trap tracee into STOP.
1161		 */
1162		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1163			break;
1164
1165		si = child->last_siginfo;
1166		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1167			child->jobctl |= JOBCTL_LISTENING;
1168			/*
1169			 * If NOTIFY is set, it means event happened between
1170			 * start of this trap and now.  Trigger re-trap.
1171			 */
1172			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1173				ptrace_signal_wake_up(child, true);
1174			ret = 0;
1175		}
1176		unlock_task_sighand(child, &flags);
1177		break;
1178
1179	case PTRACE_DETACH:	 /* detach a process that was attached. */
1180		ret = ptrace_detach(child, data);
1181		break;
1182
1183#ifdef CONFIG_BINFMT_ELF_FDPIC
1184	case PTRACE_GETFDPIC: {
1185		struct mm_struct *mm = get_task_mm(child);
1186		unsigned long tmp = 0;
1187
1188		ret = -ESRCH;
1189		if (!mm)
1190			break;
1191
1192		switch (addr) {
1193		case PTRACE_GETFDPIC_EXEC:
1194			tmp = mm->context.exec_fdpic_loadmap;
1195			break;
1196		case PTRACE_GETFDPIC_INTERP:
1197			tmp = mm->context.interp_fdpic_loadmap;
1198			break;
1199		default:
1200			break;
1201		}
1202		mmput(mm);
1203
1204		ret = put_user(tmp, datalp);
1205		break;
1206	}
1207#endif
1208
1209#ifdef PTRACE_SINGLESTEP
1210	case PTRACE_SINGLESTEP:
1211#endif
1212#ifdef PTRACE_SINGLEBLOCK
1213	case PTRACE_SINGLEBLOCK:
1214#endif
1215#ifdef PTRACE_SYSEMU
1216	case PTRACE_SYSEMU:
1217	case PTRACE_SYSEMU_SINGLESTEP:
1218#endif
1219	case PTRACE_SYSCALL:
1220	case PTRACE_CONT:
1221		return ptrace_resume(child, request, data);
1222
1223	case PTRACE_KILL:
1224		if (child->exit_state)	/* already dead */
1225			return 0;
1226		return ptrace_resume(child, request, SIGKILL);
1227
1228#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1229	case PTRACE_GETREGSET:
1230	case PTRACE_SETREGSET: {
1231		struct iovec kiov;
1232		struct iovec __user *uiov = datavp;
1233
1234		if (!access_ok(uiov, sizeof(*uiov)))
1235			return -EFAULT;
1236
1237		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1238		    __get_user(kiov.iov_len, &uiov->iov_len))
1239			return -EFAULT;
1240
1241		ret = ptrace_regset(child, request, addr, &kiov);
1242		if (!ret)
1243			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1244		break;
1245	}
1246
1247	case PTRACE_GET_SYSCALL_INFO:
1248		ret = ptrace_get_syscall_info(child, addr, datavp);
1249		break;
1250#endif
1251
1252	case PTRACE_SECCOMP_GET_FILTER:
1253		ret = seccomp_get_filter(child, addr, datavp);
1254		break;
1255
1256	case PTRACE_SECCOMP_GET_METADATA:
1257		ret = seccomp_get_metadata(child, addr, datavp);
1258		break;
1259
1260#ifdef CONFIG_RSEQ
1261	case PTRACE_GET_RSEQ_CONFIGURATION:
1262		ret = ptrace_get_rseq_configuration(child, addr, datavp);
1263		break;
1264#endif
1265
1266	default:
1267		break;
1268	}
1269
1270	return ret;
1271}
1272
1273#ifndef arch_ptrace_attach
1274#define arch_ptrace_attach(child)	do { } while (0)
1275#endif
1276
1277SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1278		unsigned long, data)
1279{
1280	struct task_struct *child;
1281	long ret;
1282
1283	if (request == PTRACE_TRACEME) {
1284		ret = ptrace_traceme();
1285		if (!ret)
1286			arch_ptrace_attach(current);
1287		goto out;
1288	}
1289
1290	child = find_get_task_by_vpid(pid);
1291	if (!child) {
1292		ret = -ESRCH;
1293		goto out;
1294	}
1295
1296	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1297		ret = ptrace_attach(child, request, addr, data);
1298		/*
1299		 * Some architectures need to do book-keeping after
1300		 * a ptrace attach.
1301		 */
1302		if (!ret)
1303			arch_ptrace_attach(child);
1304		goto out_put_task_struct;
1305	}
1306
1307	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1308				  request == PTRACE_INTERRUPT);
1309	if (ret < 0)
1310		goto out_put_task_struct;
1311
1312	ret = arch_ptrace(child, request, addr, data);
1313	if (ret || request != PTRACE_DETACH)
1314		ptrace_unfreeze_traced(child);
1315
1316 out_put_task_struct:
1317	put_task_struct(child);
1318 out:
1319	return ret;
1320}
1321
1322int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1323			    unsigned long data)
1324{
1325	unsigned long tmp;
1326	int copied;
1327
1328	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1329	if (copied != sizeof(tmp))
1330		return -EIO;
1331	return put_user(tmp, (unsigned long __user *)data);
1332}
1333
1334int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1335			    unsigned long data)
1336{
1337	int copied;
1338
1339	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1340			FOLL_FORCE | FOLL_WRITE);
1341	return (copied == sizeof(data)) ? 0 : -EIO;
1342}
1343
1344#if defined CONFIG_COMPAT
1345
1346int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1347			  compat_ulong_t addr, compat_ulong_t data)
1348{
1349	compat_ulong_t __user *datap = compat_ptr(data);
1350	compat_ulong_t word;
1351	kernel_siginfo_t siginfo;
1352	int ret;
1353
1354	switch (request) {
1355	case PTRACE_PEEKTEXT:
1356	case PTRACE_PEEKDATA:
1357		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1358				FOLL_FORCE);
1359		if (ret != sizeof(word))
1360			ret = -EIO;
1361		else
1362			ret = put_user(word, datap);
1363		break;
1364
1365	case PTRACE_POKETEXT:
1366	case PTRACE_POKEDATA:
1367		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1368				FOLL_FORCE | FOLL_WRITE);
1369		ret = (ret != sizeof(data) ? -EIO : 0);
1370		break;
1371
1372	case PTRACE_GETEVENTMSG:
1373		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1374		break;
1375
1376	case PTRACE_GETSIGINFO:
1377		ret = ptrace_getsiginfo(child, &siginfo);
1378		if (!ret)
1379			ret = copy_siginfo_to_user32(
1380				(struct compat_siginfo __user *) datap,
1381				&siginfo);
1382		break;
1383
1384	case PTRACE_SETSIGINFO:
1385		ret = copy_siginfo_from_user32(
1386			&siginfo, (struct compat_siginfo __user *) datap);
1387		if (!ret)
1388			ret = ptrace_setsiginfo(child, &siginfo);
1389		break;
1390#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1391	case PTRACE_GETREGSET:
1392	case PTRACE_SETREGSET:
1393	{
1394		struct iovec kiov;
1395		struct compat_iovec __user *uiov =
1396			(struct compat_iovec __user *) datap;
1397		compat_uptr_t ptr;
1398		compat_size_t len;
1399
1400		if (!access_ok(uiov, sizeof(*uiov)))
1401			return -EFAULT;
1402
1403		if (__get_user(ptr, &uiov->iov_base) ||
1404		    __get_user(len, &uiov->iov_len))
1405			return -EFAULT;
1406
1407		kiov.iov_base = compat_ptr(ptr);
1408		kiov.iov_len = len;
1409
1410		ret = ptrace_regset(child, request, addr, &kiov);
1411		if (!ret)
1412			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1413		break;
1414	}
1415#endif
1416
1417	default:
1418		ret = ptrace_request(child, request, addr, data);
1419	}
1420
1421	return ret;
1422}
1423
1424COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1425		       compat_long_t, addr, compat_long_t, data)
1426{
1427	struct task_struct *child;
1428	long ret;
1429
1430	if (request == PTRACE_TRACEME) {
1431		ret = ptrace_traceme();
1432		goto out;
1433	}
1434
1435	child = find_get_task_by_vpid(pid);
1436	if (!child) {
1437		ret = -ESRCH;
1438		goto out;
1439	}
1440
1441	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1442		ret = ptrace_attach(child, request, addr, data);
1443		/*
1444		 * Some architectures need to do book-keeping after
1445		 * a ptrace attach.
1446		 */
1447		if (!ret)
1448			arch_ptrace_attach(child);
1449		goto out_put_task_struct;
1450	}
1451
1452	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1453				  request == PTRACE_INTERRUPT);
1454	if (!ret) {
1455		ret = compat_arch_ptrace(child, request, addr, data);
1456		if (ret || request != PTRACE_DETACH)
1457			ptrace_unfreeze_traced(child);
1458	}
1459
1460 out_put_task_struct:
1461	put_task_struct(child);
1462 out:
1463	return ret;
1464}
1465#endif	/* CONFIG_COMPAT */