Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * linux/kernel/ptrace.c
   3 *
   4 * (C) Copyright 1999 Linus Torvalds
   5 *
   6 * Common interfaces for "ptrace()" which we do not want
   7 * to continually duplicate across every architecture.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/export.h>
  12#include <linux/sched.h>
 
 
 
  13#include <linux/errno.h>
  14#include <linux/mm.h>
  15#include <linux/highmem.h>
  16#include <linux/pagemap.h>
  17#include <linux/ptrace.h>
  18#include <linux/security.h>
  19#include <linux/signal.h>
  20#include <linux/uio.h>
  21#include <linux/audit.h>
  22#include <linux/pid_namespace.h>
  23#include <linux/syscalls.h>
  24#include <linux/uaccess.h>
  25#include <linux/regset.h>
  26#include <linux/hw_breakpoint.h>
  27#include <linux/cn_proc.h>
  28#include <linux/compat.h>
  29
  30/*
  31 * Access another process' address space via ptrace.
  32 * Source/target buffer must be kernel space,
  33 * Do not walk the page table directly, use get_user_pages
  34 */
  35int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
  36		     void *buf, int len, unsigned int gup_flags)
  37{
  38	struct mm_struct *mm;
  39	int ret;
  40
  41	mm = get_task_mm(tsk);
  42	if (!mm)
  43		return 0;
  44
  45	if (!tsk->ptrace ||
  46	    (current != tsk->parent) ||
  47	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
  48	     !ptracer_capable(tsk, mm->user_ns))) {
  49		mmput(mm);
  50		return 0;
  51	}
  52
  53	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
  54	mmput(mm);
  55
  56	return ret;
  57}
  58
  59
 
 
 
 
 
 
 
 
 
  60/*
  61 * ptrace a task: make the debugger its new parent and
  62 * move it to the ptrace list.
  63 *
  64 * Must be called with the tasklist lock write-held.
  65 */
  66void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  67{
  68	BUG_ON(!list_empty(&child->ptrace_entry));
  69	list_add(&child->ptrace_entry, &new_parent->ptraced);
  70	child->parent = new_parent;
  71	rcu_read_lock();
  72	child->ptracer_cred = get_cred(__task_cred(new_parent));
  73	rcu_read_unlock();
  74}
  75
  76/**
  77 * __ptrace_unlink - unlink ptracee and restore its execution state
  78 * @child: ptracee to be unlinked
  79 *
  80 * Remove @child from the ptrace list, move it back to the original parent,
  81 * and restore the execution state so that it conforms to the group stop
  82 * state.
  83 *
  84 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  85 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  86 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  87 * If the ptracer is exiting, the ptracee can be in any state.
  88 *
  89 * After detach, the ptracee should be in a state which conforms to the
  90 * group stop.  If the group is stopped or in the process of stopping, the
  91 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
  92 * up from TASK_TRACED.
  93 *
  94 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
  95 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
  96 * to but in the opposite direction of what happens while attaching to a
  97 * stopped task.  However, in this direction, the intermediate RUNNING
  98 * state is not hidden even from the current ptracer and if it immediately
  99 * re-attaches and performs a WNOHANG wait(2), it may fail.
 100 *
 101 * CONTEXT:
 102 * write_lock_irq(tasklist_lock)
 103 */
 104void __ptrace_unlink(struct task_struct *child)
 105{
 106	const struct cred *old_cred;
 107	BUG_ON(!child->ptrace);
 108
 109	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 110
 111	child->parent = child->real_parent;
 112	list_del_init(&child->ptrace_entry);
 113	old_cred = child->ptracer_cred;
 114	child->ptracer_cred = NULL;
 115	put_cred(old_cred);
 116
 117	spin_lock(&child->sighand->siglock);
 118	child->ptrace = 0;
 119	/*
 120	 * Clear all pending traps and TRAPPING.  TRAPPING should be
 121	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
 122	 */
 123	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
 124	task_clear_jobctl_trapping(child);
 125
 126	/*
 127	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
 128	 * @child isn't dead.
 129	 */
 130	if (!(child->flags & PF_EXITING) &&
 131	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
 132	     child->signal->group_stop_count)) {
 133		child->jobctl |= JOBCTL_STOP_PENDING;
 134
 135		/*
 136		 * This is only possible if this thread was cloned by the
 137		 * traced task running in the stopped group, set the signal
 138		 * for the future reports.
 139		 * FIXME: we should change ptrace_init_task() to handle this
 140		 * case.
 141		 */
 142		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 143			child->jobctl |= SIGSTOP;
 144	}
 145
 146	/*
 147	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 148	 * @child in the butt.  Note that @resume should be used iff @child
 149	 * is in TASK_TRACED; otherwise, we might unduly disrupt
 150	 * TASK_KILLABLE sleeps.
 151	 */
 152	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 153		ptrace_signal_wake_up(child, true);
 154
 155	spin_unlock(&child->sighand->siglock);
 156}
 157
 158/* Ensure that nothing can wake it up, even SIGKILL */
 159static bool ptrace_freeze_traced(struct task_struct *task)
 160{
 161	bool ret = false;
 162
 163	/* Lockless, nobody but us can set this flag */
 164	if (task->jobctl & JOBCTL_LISTENING)
 165		return ret;
 166
 167	spin_lock_irq(&task->sighand->siglock);
 168	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
 169		task->state = __TASK_TRACED;
 170		ret = true;
 171	}
 172	spin_unlock_irq(&task->sighand->siglock);
 173
 174	return ret;
 175}
 176
 177static void ptrace_unfreeze_traced(struct task_struct *task)
 178{
 179	if (task->state != __TASK_TRACED)
 180		return;
 181
 182	WARN_ON(!task->ptrace || task->parent != current);
 183
 184	/*
 185	 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
 186	 * Recheck state under the lock to close this race.
 187	 */
 188	spin_lock_irq(&task->sighand->siglock);
 189	if (task->state == __TASK_TRACED) {
 190		if (__fatal_signal_pending(task))
 191			wake_up_state(task, __TASK_TRACED);
 192		else
 193			task->state = TASK_TRACED;
 194	}
 195	spin_unlock_irq(&task->sighand->siglock);
 196}
 197
 198/**
 199 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 200 * @child: ptracee to check for
 201 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 202 *
 203 * Check whether @child is being ptraced by %current and ready for further
 204 * ptrace operations.  If @ignore_state is %false, @child also should be in
 205 * %TASK_TRACED state and on return the child is guaranteed to be traced
 206 * and not executing.  If @ignore_state is %true, @child can be in any
 207 * state.
 208 *
 209 * CONTEXT:
 210 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 211 *
 212 * RETURNS:
 213 * 0 on success, -ESRCH if %child is not ready.
 214 */
 215static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 216{
 217	int ret = -ESRCH;
 218
 219	/*
 220	 * We take the read lock around doing both checks to close a
 221	 * possible race where someone else was tracing our child and
 222	 * detached between these two checks.  After this locked check,
 223	 * we are sure that this is our traced child and that can only
 224	 * be changed by us so it's not changing right after this.
 225	 */
 226	read_lock(&tasklist_lock);
 227	if (child->ptrace && child->parent == current) {
 228		WARN_ON(child->state == __TASK_TRACED);
 229		/*
 230		 * child->sighand can't be NULL, release_task()
 231		 * does ptrace_unlink() before __exit_signal().
 232		 */
 233		if (ignore_state || ptrace_freeze_traced(child))
 234			ret = 0;
 235	}
 236	read_unlock(&tasklist_lock);
 237
 238	if (!ret && !ignore_state) {
 239		if (!wait_task_inactive(child, __TASK_TRACED)) {
 240			/*
 241			 * This can only happen if may_ptrace_stop() fails and
 242			 * ptrace_stop() changes ->state back to TASK_RUNNING,
 243			 * so we should not worry about leaking __TASK_TRACED.
 244			 */
 245			WARN_ON(child->state == __TASK_TRACED);
 246			ret = -ESRCH;
 247		}
 248	}
 249
 250	return ret;
 251}
 252
 253static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 254{
 255	if (mode & PTRACE_MODE_NOAUDIT)
 256		return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
 257	else
 258		return has_ns_capability(current, ns, CAP_SYS_PTRACE);
 259}
 260
 261/* Returns 0 on success, -errno on denial. */
 262static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 263{
 264	const struct cred *cred = current_cred(), *tcred;
 265	struct mm_struct *mm;
 266	kuid_t caller_uid;
 267	kgid_t caller_gid;
 268
 269	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 270		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
 271		return -EPERM;
 272	}
 273
 274	/* May we inspect the given task?
 275	 * This check is used both for attaching with ptrace
 276	 * and for allowing access to sensitive information in /proc.
 277	 *
 278	 * ptrace_attach denies several cases that /proc allows
 279	 * because setting up the necessary parent/child relationship
 280	 * or halting the specified task is impossible.
 281	 */
 282
 283	/* Don't let security modules deny introspection */
 284	if (same_thread_group(task, current))
 285		return 0;
 286	rcu_read_lock();
 287	if (mode & PTRACE_MODE_FSCREDS) {
 288		caller_uid = cred->fsuid;
 289		caller_gid = cred->fsgid;
 290	} else {
 291		/*
 292		 * Using the euid would make more sense here, but something
 293		 * in userland might rely on the old behavior, and this
 294		 * shouldn't be a security problem since
 295		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
 296		 * used a syscall that requests access to another process
 297		 * (and not a filesystem syscall to procfs).
 298		 */
 299		caller_uid = cred->uid;
 300		caller_gid = cred->gid;
 301	}
 302	tcred = __task_cred(task);
 303	if (uid_eq(caller_uid, tcred->euid) &&
 304	    uid_eq(caller_uid, tcred->suid) &&
 305	    uid_eq(caller_uid, tcred->uid)  &&
 306	    gid_eq(caller_gid, tcred->egid) &&
 307	    gid_eq(caller_gid, tcred->sgid) &&
 308	    gid_eq(caller_gid, tcred->gid))
 309		goto ok;
 310	if (ptrace_has_cap(tcred->user_ns, mode))
 311		goto ok;
 312	rcu_read_unlock();
 313	return -EPERM;
 314ok:
 315	rcu_read_unlock();
 316	mm = task->mm;
 317	if (mm &&
 318	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
 319	     !ptrace_has_cap(mm->user_ns, mode)))
 320	    return -EPERM;
 321
 322	return security_ptrace_access_check(task, mode);
 323}
 324
 325bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 326{
 327	int err;
 328	task_lock(task);
 329	err = __ptrace_may_access(task, mode);
 330	task_unlock(task);
 331	return !err;
 332}
 333
 334static int ptrace_attach(struct task_struct *task, long request,
 335			 unsigned long addr,
 336			 unsigned long flags)
 337{
 338	bool seize = (request == PTRACE_SEIZE);
 339	int retval;
 340
 341	retval = -EIO;
 342	if (seize) {
 343		if (addr != 0)
 344			goto out;
 345		if (flags & ~(unsigned long)PTRACE_O_MASK)
 346			goto out;
 347		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 348	} else {
 349		flags = PT_PTRACED;
 350	}
 351
 352	audit_ptrace(task);
 353
 354	retval = -EPERM;
 355	if (unlikely(task->flags & PF_KTHREAD))
 356		goto out;
 357	if (same_thread_group(task, current))
 358		goto out;
 359
 360	/*
 361	 * Protect exec's credential calculations against our interference;
 362	 * SUID, SGID and LSM creds get determined differently
 363	 * under ptrace.
 364	 */
 365	retval = -ERESTARTNOINTR;
 366	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 367		goto out;
 368
 369	task_lock(task);
 370	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 371	task_unlock(task);
 372	if (retval)
 373		goto unlock_creds;
 374
 375	write_lock_irq(&tasklist_lock);
 376	retval = -EPERM;
 377	if (unlikely(task->exit_state))
 378		goto unlock_tasklist;
 379	if (task->ptrace)
 380		goto unlock_tasklist;
 381
 382	if (seize)
 383		flags |= PT_SEIZED;
 384	task->ptrace = flags;
 385
 386	__ptrace_link(task, current);
 387
 388	/* SEIZE doesn't trap tracee on attach */
 389	if (!seize)
 390		send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 391
 392	spin_lock(&task->sighand->siglock);
 393
 394	/*
 395	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 396	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 397	 * will be cleared if the child completes the transition or any
 398	 * event which clears the group stop states happens.  We'll wait
 399	 * for the transition to complete before returning from this
 400	 * function.
 401	 *
 402	 * This hides STOPPED -> RUNNING -> TRACED transition from the
 403	 * attaching thread but a different thread in the same group can
 404	 * still observe the transient RUNNING state.  IOW, if another
 405	 * thread's WNOHANG wait(2) on the stopped tracee races against
 406	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
 407	 *
 408	 * The following task_is_stopped() test is safe as both transitions
 409	 * in and out of STOPPED are protected by siglock.
 410	 */
 411	if (task_is_stopped(task) &&
 412	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 413		signal_wake_up_state(task, __TASK_STOPPED);
 414
 415	spin_unlock(&task->sighand->siglock);
 416
 417	retval = 0;
 418unlock_tasklist:
 419	write_unlock_irq(&tasklist_lock);
 420unlock_creds:
 421	mutex_unlock(&task->signal->cred_guard_mutex);
 422out:
 423	if (!retval) {
 424		/*
 425		 * We do not bother to change retval or clear JOBCTL_TRAPPING
 426		 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
 427		 * not return to user-mode, it will exit and clear this bit in
 428		 * __ptrace_unlink() if it wasn't already cleared by the tracee;
 429		 * and until then nobody can ptrace this task.
 430		 */
 431		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
 432		proc_ptrace_connector(task, PTRACE_ATTACH);
 433	}
 434
 435	return retval;
 436}
 437
 438/**
 439 * ptrace_traceme  --  helper for PTRACE_TRACEME
 440 *
 441 * Performs checks and sets PT_PTRACED.
 442 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 443 */
 444static int ptrace_traceme(void)
 445{
 446	int ret = -EPERM;
 447
 448	write_lock_irq(&tasklist_lock);
 449	/* Are we already being traced? */
 450	if (!current->ptrace) {
 451		ret = security_ptrace_traceme(current->parent);
 452		/*
 453		 * Check PF_EXITING to ensure ->real_parent has not passed
 454		 * exit_ptrace(). Otherwise we don't report the error but
 455		 * pretend ->real_parent untraces us right after return.
 456		 */
 457		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 458			current->ptrace = PT_PTRACED;
 459			__ptrace_link(current, current->real_parent);
 460		}
 461	}
 462	write_unlock_irq(&tasklist_lock);
 463
 464	return ret;
 465}
 466
 467/*
 468 * Called with irqs disabled, returns true if childs should reap themselves.
 469 */
 470static int ignoring_children(struct sighand_struct *sigh)
 471{
 472	int ret;
 473	spin_lock(&sigh->siglock);
 474	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 475	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 476	spin_unlock(&sigh->siglock);
 477	return ret;
 478}
 479
 480/*
 481 * Called with tasklist_lock held for writing.
 482 * Unlink a traced task, and clean it up if it was a traced zombie.
 483 * Return true if it needs to be reaped with release_task().
 484 * (We can't call release_task() here because we already hold tasklist_lock.)
 485 *
 486 * If it's a zombie, our attachedness prevented normal parent notification
 487 * or self-reaping.  Do notification now if it would have happened earlier.
 488 * If it should reap itself, return true.
 489 *
 490 * If it's our own child, there is no notification to do. But if our normal
 491 * children self-reap, then this child was prevented by ptrace and we must
 492 * reap it now, in that case we must also wake up sub-threads sleeping in
 493 * do_wait().
 494 */
 495static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 496{
 497	bool dead;
 498
 499	__ptrace_unlink(p);
 500
 501	if (p->exit_state != EXIT_ZOMBIE)
 502		return false;
 503
 504	dead = !thread_group_leader(p);
 505
 506	if (!dead && thread_group_empty(p)) {
 507		if (!same_thread_group(p->real_parent, tracer))
 508			dead = do_notify_parent(p, p->exit_signal);
 509		else if (ignoring_children(tracer->sighand)) {
 510			__wake_up_parent(p, tracer);
 511			dead = true;
 512		}
 513	}
 514	/* Mark it as in the process of being reaped. */
 515	if (dead)
 516		p->exit_state = EXIT_DEAD;
 517	return dead;
 518}
 519
 520static int ptrace_detach(struct task_struct *child, unsigned int data)
 521{
 522	if (!valid_signal(data))
 523		return -EIO;
 524
 525	/* Architecture-specific hardware disable .. */
 526	ptrace_disable(child);
 527
 528	write_lock_irq(&tasklist_lock);
 529	/*
 530	 * We rely on ptrace_freeze_traced(). It can't be killed and
 531	 * untraced by another thread, it can't be a zombie.
 532	 */
 533	WARN_ON(!child->ptrace || child->exit_state);
 534	/*
 535	 * tasklist_lock avoids the race with wait_task_stopped(), see
 536	 * the comment in ptrace_resume().
 537	 */
 538	child->exit_code = data;
 539	__ptrace_detach(current, child);
 540	write_unlock_irq(&tasklist_lock);
 541
 542	proc_ptrace_connector(child, PTRACE_DETACH);
 543
 544	return 0;
 545}
 546
 547/*
 548 * Detach all tasks we were using ptrace on. Called with tasklist held
 549 * for writing.
 550 */
 551void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
 552{
 553	struct task_struct *p, *n;
 554
 555	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 556		if (unlikely(p->ptrace & PT_EXITKILL))
 557			send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
 558
 559		if (__ptrace_detach(tracer, p))
 560			list_add(&p->ptrace_entry, dead);
 561	}
 562}
 563
 564int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 565{
 566	int copied = 0;
 567
 568	while (len > 0) {
 569		char buf[128];
 570		int this_len, retval;
 571
 572		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 573		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
 574
 575		if (!retval) {
 576			if (copied)
 577				break;
 578			return -EIO;
 579		}
 580		if (copy_to_user(dst, buf, retval))
 581			return -EFAULT;
 582		copied += retval;
 583		src += retval;
 584		dst += retval;
 585		len -= retval;
 586	}
 587	return copied;
 588}
 589
 590int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 591{
 592	int copied = 0;
 593
 594	while (len > 0) {
 595		char buf[128];
 596		int this_len, retval;
 597
 598		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 599		if (copy_from_user(buf, src, this_len))
 600			return -EFAULT;
 601		retval = ptrace_access_vm(tsk, dst, buf, this_len,
 602				FOLL_FORCE | FOLL_WRITE);
 603		if (!retval) {
 604			if (copied)
 605				break;
 606			return -EIO;
 607		}
 608		copied += retval;
 609		src += retval;
 610		dst += retval;
 611		len -= retval;
 612	}
 613	return copied;
 614}
 615
 616static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 617{
 618	unsigned flags;
 619
 620	if (data & ~(unsigned long)PTRACE_O_MASK)
 621		return -EINVAL;
 622
 623	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
 624		if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
 625		    !IS_ENABLED(CONFIG_SECCOMP))
 626			return -EINVAL;
 627
 628		if (!capable(CAP_SYS_ADMIN))
 629			return -EPERM;
 630
 631		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
 632		    current->ptrace & PT_SUSPEND_SECCOMP)
 633			return -EPERM;
 634	}
 635
 636	/* Avoid intermediate state when all opts are cleared */
 637	flags = child->ptrace;
 638	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 639	flags |= (data << PT_OPT_FLAG_SHIFT);
 640	child->ptrace = flags;
 641
 642	return 0;
 643}
 644
 645static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
 646{
 647	unsigned long flags;
 648	int error = -ESRCH;
 649
 650	if (lock_task_sighand(child, &flags)) {
 651		error = -EINVAL;
 652		if (likely(child->last_siginfo != NULL)) {
 653			*info = *child->last_siginfo;
 654			error = 0;
 655		}
 656		unlock_task_sighand(child, &flags);
 657	}
 658	return error;
 659}
 660
 661static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 662{
 663	unsigned long flags;
 664	int error = -ESRCH;
 665
 666	if (lock_task_sighand(child, &flags)) {
 667		error = -EINVAL;
 668		if (likely(child->last_siginfo != NULL)) {
 669			*child->last_siginfo = *info;
 670			error = 0;
 671		}
 672		unlock_task_sighand(child, &flags);
 673	}
 674	return error;
 675}
 676
 677static int ptrace_peek_siginfo(struct task_struct *child,
 678				unsigned long addr,
 679				unsigned long data)
 680{
 681	struct ptrace_peeksiginfo_args arg;
 682	struct sigpending *pending;
 683	struct sigqueue *q;
 684	int ret, i;
 685
 686	ret = copy_from_user(&arg, (void __user *) addr,
 687				sizeof(struct ptrace_peeksiginfo_args));
 688	if (ret)
 689		return -EFAULT;
 690
 691	if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
 692		return -EINVAL; /* unknown flags */
 693
 694	if (arg.nr < 0)
 695		return -EINVAL;
 696
 697	if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
 698		pending = &child->signal->shared_pending;
 699	else
 700		pending = &child->pending;
 701
 702	for (i = 0; i < arg.nr; ) {
 703		siginfo_t info;
 704		s32 off = arg.off + i;
 705
 706		spin_lock_irq(&child->sighand->siglock);
 707		list_for_each_entry(q, &pending->list, list) {
 708			if (!off--) {
 709				copy_siginfo(&info, &q->info);
 710				break;
 711			}
 712		}
 713		spin_unlock_irq(&child->sighand->siglock);
 714
 715		if (off >= 0) /* beyond the end of the list */
 716			break;
 717
 718#ifdef CONFIG_COMPAT
 719		if (unlikely(in_compat_syscall())) {
 720			compat_siginfo_t __user *uinfo = compat_ptr(data);
 721
 722			if (copy_siginfo_to_user32(uinfo, &info) ||
 723			    __put_user(info.si_code, &uinfo->si_code)) {
 724				ret = -EFAULT;
 725				break;
 726			}
 727
 728		} else
 729#endif
 730		{
 731			siginfo_t __user *uinfo = (siginfo_t __user *) data;
 732
 733			if (copy_siginfo_to_user(uinfo, &info) ||
 734			    __put_user(info.si_code, &uinfo->si_code)) {
 735				ret = -EFAULT;
 736				break;
 737			}
 738		}
 739
 740		data += sizeof(siginfo_t);
 741		i++;
 742
 743		if (signal_pending(current))
 744			break;
 745
 746		cond_resched();
 747	}
 748
 749	if (i > 0)
 750		return i;
 751
 752	return ret;
 753}
 754
 755#ifdef PTRACE_SINGLESTEP
 756#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
 757#else
 758#define is_singlestep(request)		0
 759#endif
 760
 761#ifdef PTRACE_SINGLEBLOCK
 762#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
 763#else
 764#define is_singleblock(request)		0
 765#endif
 766
 767#ifdef PTRACE_SYSEMU
 768#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
 769#else
 770#define is_sysemu_singlestep(request)	0
 771#endif
 772
 773static int ptrace_resume(struct task_struct *child, long request,
 774			 unsigned long data)
 775{
 776	bool need_siglock;
 777
 778	if (!valid_signal(data))
 779		return -EIO;
 780
 781	if (request == PTRACE_SYSCALL)
 782		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 783	else
 784		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 785
 786#ifdef TIF_SYSCALL_EMU
 787	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 788		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 789	else
 790		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 791#endif
 792
 793	if (is_singleblock(request)) {
 794		if (unlikely(!arch_has_block_step()))
 795			return -EIO;
 796		user_enable_block_step(child);
 797	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 798		if (unlikely(!arch_has_single_step()))
 799			return -EIO;
 800		user_enable_single_step(child);
 801	} else {
 802		user_disable_single_step(child);
 803	}
 804
 805	/*
 806	 * Change ->exit_code and ->state under siglock to avoid the race
 807	 * with wait_task_stopped() in between; a non-zero ->exit_code will
 808	 * wrongly look like another report from tracee.
 809	 *
 810	 * Note that we need siglock even if ->exit_code == data and/or this
 811	 * status was not reported yet, the new status must not be cleared by
 812	 * wait_task_stopped() after resume.
 813	 *
 814	 * If data == 0 we do not care if wait_task_stopped() reports the old
 815	 * status and clears the code too; this can't race with the tracee, it
 816	 * takes siglock after resume.
 817	 */
 818	need_siglock = data && !thread_group_empty(current);
 819	if (need_siglock)
 820		spin_lock_irq(&child->sighand->siglock);
 821	child->exit_code = data;
 822	wake_up_state(child, __TASK_TRACED);
 823	if (need_siglock)
 824		spin_unlock_irq(&child->sighand->siglock);
 825
 826	return 0;
 827}
 828
 829#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 830
 831static const struct user_regset *
 832find_regset(const struct user_regset_view *view, unsigned int type)
 833{
 834	const struct user_regset *regset;
 835	int n;
 836
 837	for (n = 0; n < view->n; ++n) {
 838		regset = view->regsets + n;
 839		if (regset->core_note_type == type)
 840			return regset;
 841	}
 842
 843	return NULL;
 844}
 845
 846static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 847			 struct iovec *kiov)
 848{
 849	const struct user_regset_view *view = task_user_regset_view(task);
 850	const struct user_regset *regset = find_regset(view, type);
 851	int regset_no;
 852
 853	if (!regset || (kiov->iov_len % regset->size) != 0)
 854		return -EINVAL;
 855
 856	regset_no = regset - view->regsets;
 857	kiov->iov_len = min(kiov->iov_len,
 858			    (__kernel_size_t) (regset->n * regset->size));
 859
 860	if (req == PTRACE_GETREGSET)
 861		return copy_regset_to_user(task, view, regset_no, 0,
 862					   kiov->iov_len, kiov->iov_base);
 863	else
 864		return copy_regset_from_user(task, view, regset_no, 0,
 865					     kiov->iov_len, kiov->iov_base);
 866}
 867
 868/*
 869 * This is declared in linux/regset.h and defined in machine-dependent
 870 * code.  We put the export here, near the primary machine-neutral use,
 871 * to ensure no machine forgets it.
 872 */
 873EXPORT_SYMBOL_GPL(task_user_regset_view);
 874#endif
 875
 876int ptrace_request(struct task_struct *child, long request,
 877		   unsigned long addr, unsigned long data)
 878{
 879	bool seized = child->ptrace & PT_SEIZED;
 880	int ret = -EIO;
 881	siginfo_t siginfo, *si;
 882	void __user *datavp = (void __user *) data;
 883	unsigned long __user *datalp = datavp;
 884	unsigned long flags;
 885
 886	switch (request) {
 887	case PTRACE_PEEKTEXT:
 888	case PTRACE_PEEKDATA:
 889		return generic_ptrace_peekdata(child, addr, data);
 890	case PTRACE_POKETEXT:
 891	case PTRACE_POKEDATA:
 892		return generic_ptrace_pokedata(child, addr, data);
 893
 894#ifdef PTRACE_OLDSETOPTIONS
 895	case PTRACE_OLDSETOPTIONS:
 896#endif
 897	case PTRACE_SETOPTIONS:
 898		ret = ptrace_setoptions(child, data);
 899		break;
 900	case PTRACE_GETEVENTMSG:
 901		ret = put_user(child->ptrace_message, datalp);
 902		break;
 903
 904	case PTRACE_PEEKSIGINFO:
 905		ret = ptrace_peek_siginfo(child, addr, data);
 906		break;
 907
 908	case PTRACE_GETSIGINFO:
 909		ret = ptrace_getsiginfo(child, &siginfo);
 910		if (!ret)
 911			ret = copy_siginfo_to_user(datavp, &siginfo);
 912		break;
 913
 914	case PTRACE_SETSIGINFO:
 915		if (copy_from_user(&siginfo, datavp, sizeof siginfo))
 916			ret = -EFAULT;
 917		else
 918			ret = ptrace_setsiginfo(child, &siginfo);
 919		break;
 920
 921	case PTRACE_GETSIGMASK:
 922		if (addr != sizeof(sigset_t)) {
 923			ret = -EINVAL;
 924			break;
 925		}
 926
 927		if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
 928			ret = -EFAULT;
 929		else
 930			ret = 0;
 931
 932		break;
 933
 934	case PTRACE_SETSIGMASK: {
 935		sigset_t new_set;
 936
 937		if (addr != sizeof(sigset_t)) {
 938			ret = -EINVAL;
 939			break;
 940		}
 941
 942		if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
 943			ret = -EFAULT;
 944			break;
 945		}
 946
 947		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
 948
 949		/*
 950		 * Every thread does recalc_sigpending() after resume, so
 951		 * retarget_shared_pending() and recalc_sigpending() are not
 952		 * called here.
 953		 */
 954		spin_lock_irq(&child->sighand->siglock);
 955		child->blocked = new_set;
 956		spin_unlock_irq(&child->sighand->siglock);
 957
 958		ret = 0;
 959		break;
 960	}
 961
 962	case PTRACE_INTERRUPT:
 963		/*
 964		 * Stop tracee without any side-effect on signal or job
 965		 * control.  At least one trap is guaranteed to happen
 966		 * after this request.  If @child is already trapped, the
 967		 * current trap is not disturbed and another trap will
 968		 * happen after the current trap is ended with PTRACE_CONT.
 969		 *
 970		 * The actual trap might not be PTRACE_EVENT_STOP trap but
 971		 * the pending condition is cleared regardless.
 972		 */
 973		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 974			break;
 975
 976		/*
 977		 * INTERRUPT doesn't disturb existing trap sans one
 978		 * exception.  If ptracer issued LISTEN for the current
 979		 * STOP, this INTERRUPT should clear LISTEN and re-trap
 980		 * tracee into STOP.
 981		 */
 982		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
 983			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 984
 985		unlock_task_sighand(child, &flags);
 986		ret = 0;
 987		break;
 988
 989	case PTRACE_LISTEN:
 990		/*
 991		 * Listen for events.  Tracee must be in STOP.  It's not
 992		 * resumed per-se but is not considered to be in TRACED by
 993		 * wait(2) or ptrace(2).  If an async event (e.g. group
 994		 * stop state change) happens, tracee will enter STOP trap
 995		 * again.  Alternatively, ptracer can issue INTERRUPT to
 996		 * finish listening and re-trap tracee into STOP.
 997		 */
 998		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 999			break;
1000
1001		si = child->last_siginfo;
1002		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1003			child->jobctl |= JOBCTL_LISTENING;
1004			/*
1005			 * If NOTIFY is set, it means event happened between
1006			 * start of this trap and now.  Trigger re-trap.
1007			 */
1008			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1009				ptrace_signal_wake_up(child, true);
1010			ret = 0;
1011		}
1012		unlock_task_sighand(child, &flags);
1013		break;
1014
1015	case PTRACE_DETACH:	 /* detach a process that was attached. */
1016		ret = ptrace_detach(child, data);
1017		break;
1018
1019#ifdef CONFIG_BINFMT_ELF_FDPIC
1020	case PTRACE_GETFDPIC: {
1021		struct mm_struct *mm = get_task_mm(child);
1022		unsigned long tmp = 0;
1023
1024		ret = -ESRCH;
1025		if (!mm)
1026			break;
1027
1028		switch (addr) {
1029		case PTRACE_GETFDPIC_EXEC:
1030			tmp = mm->context.exec_fdpic_loadmap;
1031			break;
1032		case PTRACE_GETFDPIC_INTERP:
1033			tmp = mm->context.interp_fdpic_loadmap;
1034			break;
1035		default:
1036			break;
1037		}
1038		mmput(mm);
1039
1040		ret = put_user(tmp, datalp);
1041		break;
1042	}
1043#endif
1044
1045#ifdef PTRACE_SINGLESTEP
1046	case PTRACE_SINGLESTEP:
1047#endif
1048#ifdef PTRACE_SINGLEBLOCK
1049	case PTRACE_SINGLEBLOCK:
1050#endif
1051#ifdef PTRACE_SYSEMU
1052	case PTRACE_SYSEMU:
1053	case PTRACE_SYSEMU_SINGLESTEP:
1054#endif
1055	case PTRACE_SYSCALL:
1056	case PTRACE_CONT:
1057		return ptrace_resume(child, request, data);
1058
1059	case PTRACE_KILL:
1060		if (child->exit_state)	/* already dead */
1061			return 0;
1062		return ptrace_resume(child, request, SIGKILL);
1063
1064#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1065	case PTRACE_GETREGSET:
1066	case PTRACE_SETREGSET: {
1067		struct iovec kiov;
1068		struct iovec __user *uiov = datavp;
1069
1070		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1071			return -EFAULT;
1072
1073		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1074		    __get_user(kiov.iov_len, &uiov->iov_len))
1075			return -EFAULT;
1076
1077		ret = ptrace_regset(child, request, addr, &kiov);
1078		if (!ret)
1079			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1080		break;
1081	}
1082#endif
1083
1084	case PTRACE_SECCOMP_GET_FILTER:
1085		ret = seccomp_get_filter(child, addr, datavp);
1086		break;
1087
 
 
 
 
1088	default:
1089		break;
1090	}
1091
1092	return ret;
1093}
1094
1095static struct task_struct *ptrace_get_task_struct(pid_t pid)
1096{
1097	struct task_struct *child;
1098
1099	rcu_read_lock();
1100	child = find_task_by_vpid(pid);
1101	if (child)
1102		get_task_struct(child);
1103	rcu_read_unlock();
1104
1105	if (!child)
1106		return ERR_PTR(-ESRCH);
1107	return child;
1108}
1109
1110#ifndef arch_ptrace_attach
1111#define arch_ptrace_attach(child)	do { } while (0)
1112#endif
1113
1114SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1115		unsigned long, data)
1116{
1117	struct task_struct *child;
1118	long ret;
1119
1120	if (request == PTRACE_TRACEME) {
1121		ret = ptrace_traceme();
1122		if (!ret)
1123			arch_ptrace_attach(current);
1124		goto out;
1125	}
1126
1127	child = ptrace_get_task_struct(pid);
1128	if (IS_ERR(child)) {
1129		ret = PTR_ERR(child);
1130		goto out;
1131	}
1132
1133	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1134		ret = ptrace_attach(child, request, addr, data);
1135		/*
1136		 * Some architectures need to do book-keeping after
1137		 * a ptrace attach.
1138		 */
1139		if (!ret)
1140			arch_ptrace_attach(child);
1141		goto out_put_task_struct;
1142	}
1143
1144	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1145				  request == PTRACE_INTERRUPT);
1146	if (ret < 0)
1147		goto out_put_task_struct;
1148
1149	ret = arch_ptrace(child, request, addr, data);
1150	if (ret || request != PTRACE_DETACH)
1151		ptrace_unfreeze_traced(child);
1152
1153 out_put_task_struct:
1154	put_task_struct(child);
1155 out:
1156	return ret;
1157}
1158
1159int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1160			    unsigned long data)
1161{
1162	unsigned long tmp;
1163	int copied;
1164
1165	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1166	if (copied != sizeof(tmp))
1167		return -EIO;
1168	return put_user(tmp, (unsigned long __user *)data);
1169}
1170
1171int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1172			    unsigned long data)
1173{
1174	int copied;
1175
1176	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1177			FOLL_FORCE | FOLL_WRITE);
1178	return (copied == sizeof(data)) ? 0 : -EIO;
1179}
1180
1181#if defined CONFIG_COMPAT
1182
1183int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1184			  compat_ulong_t addr, compat_ulong_t data)
1185{
1186	compat_ulong_t __user *datap = compat_ptr(data);
1187	compat_ulong_t word;
1188	siginfo_t siginfo;
1189	int ret;
1190
1191	switch (request) {
1192	case PTRACE_PEEKTEXT:
1193	case PTRACE_PEEKDATA:
1194		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1195				FOLL_FORCE);
1196		if (ret != sizeof(word))
1197			ret = -EIO;
1198		else
1199			ret = put_user(word, datap);
1200		break;
1201
1202	case PTRACE_POKETEXT:
1203	case PTRACE_POKEDATA:
1204		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1205				FOLL_FORCE | FOLL_WRITE);
1206		ret = (ret != sizeof(data) ? -EIO : 0);
1207		break;
1208
1209	case PTRACE_GETEVENTMSG:
1210		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1211		break;
1212
1213	case PTRACE_GETSIGINFO:
1214		ret = ptrace_getsiginfo(child, &siginfo);
1215		if (!ret)
1216			ret = copy_siginfo_to_user32(
1217				(struct compat_siginfo __user *) datap,
1218				&siginfo);
1219		break;
1220
1221	case PTRACE_SETSIGINFO:
1222		memset(&siginfo, 0, sizeof siginfo);
1223		if (copy_siginfo_from_user32(
1224			    &siginfo, (struct compat_siginfo __user *) datap))
1225			ret = -EFAULT;
1226		else
1227			ret = ptrace_setsiginfo(child, &siginfo);
1228		break;
1229#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1230	case PTRACE_GETREGSET:
1231	case PTRACE_SETREGSET:
1232	{
1233		struct iovec kiov;
1234		struct compat_iovec __user *uiov =
1235			(struct compat_iovec __user *) datap;
1236		compat_uptr_t ptr;
1237		compat_size_t len;
1238
1239		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1240			return -EFAULT;
1241
1242		if (__get_user(ptr, &uiov->iov_base) ||
1243		    __get_user(len, &uiov->iov_len))
1244			return -EFAULT;
1245
1246		kiov.iov_base = compat_ptr(ptr);
1247		kiov.iov_len = len;
1248
1249		ret = ptrace_regset(child, request, addr, &kiov);
1250		if (!ret)
1251			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1252		break;
1253	}
1254#endif
1255
1256	default:
1257		ret = ptrace_request(child, request, addr, data);
1258	}
1259
1260	return ret;
1261}
1262
1263COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1264		       compat_long_t, addr, compat_long_t, data)
1265{
1266	struct task_struct *child;
1267	long ret;
1268
1269	if (request == PTRACE_TRACEME) {
1270		ret = ptrace_traceme();
1271		goto out;
1272	}
1273
1274	child = ptrace_get_task_struct(pid);
1275	if (IS_ERR(child)) {
1276		ret = PTR_ERR(child);
1277		goto out;
1278	}
1279
1280	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1281		ret = ptrace_attach(child, request, addr, data);
1282		/*
1283		 * Some architectures need to do book-keeping after
1284		 * a ptrace attach.
1285		 */
1286		if (!ret)
1287			arch_ptrace_attach(child);
1288		goto out_put_task_struct;
1289	}
1290
1291	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1292				  request == PTRACE_INTERRUPT);
1293	if (!ret) {
1294		ret = compat_arch_ptrace(child, request, addr, data);
1295		if (ret || request != PTRACE_DETACH)
1296			ptrace_unfreeze_traced(child);
1297	}
1298
1299 out_put_task_struct:
1300	put_task_struct(child);
1301 out:
1302	return ret;
1303}
1304#endif	/* CONFIG_COMPAT */
v4.17
   1/*
   2 * linux/kernel/ptrace.c
   3 *
   4 * (C) Copyright 1999 Linus Torvalds
   5 *
   6 * Common interfaces for "ptrace()" which we do not want
   7 * to continually duplicate across every architecture.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/export.h>
  12#include <linux/sched.h>
  13#include <linux/sched/mm.h>
  14#include <linux/sched/coredump.h>
  15#include <linux/sched/task.h>
  16#include <linux/errno.h>
  17#include <linux/mm.h>
  18#include <linux/highmem.h>
  19#include <linux/pagemap.h>
  20#include <linux/ptrace.h>
  21#include <linux/security.h>
  22#include <linux/signal.h>
  23#include <linux/uio.h>
  24#include <linux/audit.h>
  25#include <linux/pid_namespace.h>
  26#include <linux/syscalls.h>
  27#include <linux/uaccess.h>
  28#include <linux/regset.h>
  29#include <linux/hw_breakpoint.h>
  30#include <linux/cn_proc.h>
  31#include <linux/compat.h>
  32
  33/*
  34 * Access another process' address space via ptrace.
  35 * Source/target buffer must be kernel space,
  36 * Do not walk the page table directly, use get_user_pages
  37 */
  38int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
  39		     void *buf, int len, unsigned int gup_flags)
  40{
  41	struct mm_struct *mm;
  42	int ret;
  43
  44	mm = get_task_mm(tsk);
  45	if (!mm)
  46		return 0;
  47
  48	if (!tsk->ptrace ||
  49	    (current != tsk->parent) ||
  50	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
  51	     !ptracer_capable(tsk, mm->user_ns))) {
  52		mmput(mm);
  53		return 0;
  54	}
  55
  56	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
  57	mmput(mm);
  58
  59	return ret;
  60}
  61
  62
  63void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
  64		   const struct cred *ptracer_cred)
  65{
  66	BUG_ON(!list_empty(&child->ptrace_entry));
  67	list_add(&child->ptrace_entry, &new_parent->ptraced);
  68	child->parent = new_parent;
  69	child->ptracer_cred = get_cred(ptracer_cred);
  70}
  71
  72/*
  73 * ptrace a task: make the debugger its new parent and
  74 * move it to the ptrace list.
  75 *
  76 * Must be called with the tasklist lock write-held.
  77 */
  78static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  79{
 
 
 
  80	rcu_read_lock();
  81	__ptrace_link(child, new_parent, __task_cred(new_parent));
  82	rcu_read_unlock();
  83}
  84
  85/**
  86 * __ptrace_unlink - unlink ptracee and restore its execution state
  87 * @child: ptracee to be unlinked
  88 *
  89 * Remove @child from the ptrace list, move it back to the original parent,
  90 * and restore the execution state so that it conforms to the group stop
  91 * state.
  92 *
  93 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  94 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  95 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  96 * If the ptracer is exiting, the ptracee can be in any state.
  97 *
  98 * After detach, the ptracee should be in a state which conforms to the
  99 * group stop.  If the group is stopped or in the process of stopping, the
 100 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
 101 * up from TASK_TRACED.
 102 *
 103 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
 104 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
 105 * to but in the opposite direction of what happens while attaching to a
 106 * stopped task.  However, in this direction, the intermediate RUNNING
 107 * state is not hidden even from the current ptracer and if it immediately
 108 * re-attaches and performs a WNOHANG wait(2), it may fail.
 109 *
 110 * CONTEXT:
 111 * write_lock_irq(tasklist_lock)
 112 */
 113void __ptrace_unlink(struct task_struct *child)
 114{
 115	const struct cred *old_cred;
 116	BUG_ON(!child->ptrace);
 117
 118	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 119
 120	child->parent = child->real_parent;
 121	list_del_init(&child->ptrace_entry);
 122	old_cred = child->ptracer_cred;
 123	child->ptracer_cred = NULL;
 124	put_cred(old_cred);
 125
 126	spin_lock(&child->sighand->siglock);
 127	child->ptrace = 0;
 128	/*
 129	 * Clear all pending traps and TRAPPING.  TRAPPING should be
 130	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
 131	 */
 132	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
 133	task_clear_jobctl_trapping(child);
 134
 135	/*
 136	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
 137	 * @child isn't dead.
 138	 */
 139	if (!(child->flags & PF_EXITING) &&
 140	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
 141	     child->signal->group_stop_count)) {
 142		child->jobctl |= JOBCTL_STOP_PENDING;
 143
 144		/*
 145		 * This is only possible if this thread was cloned by the
 146		 * traced task running in the stopped group, set the signal
 147		 * for the future reports.
 148		 * FIXME: we should change ptrace_init_task() to handle this
 149		 * case.
 150		 */
 151		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 152			child->jobctl |= SIGSTOP;
 153	}
 154
 155	/*
 156	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 157	 * @child in the butt.  Note that @resume should be used iff @child
 158	 * is in TASK_TRACED; otherwise, we might unduly disrupt
 159	 * TASK_KILLABLE sleeps.
 160	 */
 161	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 162		ptrace_signal_wake_up(child, true);
 163
 164	spin_unlock(&child->sighand->siglock);
 165}
 166
 167/* Ensure that nothing can wake it up, even SIGKILL */
 168static bool ptrace_freeze_traced(struct task_struct *task)
 169{
 170	bool ret = false;
 171
 172	/* Lockless, nobody but us can set this flag */
 173	if (task->jobctl & JOBCTL_LISTENING)
 174		return ret;
 175
 176	spin_lock_irq(&task->sighand->siglock);
 177	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
 178		task->state = __TASK_TRACED;
 179		ret = true;
 180	}
 181	spin_unlock_irq(&task->sighand->siglock);
 182
 183	return ret;
 184}
 185
 186static void ptrace_unfreeze_traced(struct task_struct *task)
 187{
 188	if (task->state != __TASK_TRACED)
 189		return;
 190
 191	WARN_ON(!task->ptrace || task->parent != current);
 192
 193	/*
 194	 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
 195	 * Recheck state under the lock to close this race.
 196	 */
 197	spin_lock_irq(&task->sighand->siglock);
 198	if (task->state == __TASK_TRACED) {
 199		if (__fatal_signal_pending(task))
 200			wake_up_state(task, __TASK_TRACED);
 201		else
 202			task->state = TASK_TRACED;
 203	}
 204	spin_unlock_irq(&task->sighand->siglock);
 205}
 206
 207/**
 208 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 209 * @child: ptracee to check for
 210 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 211 *
 212 * Check whether @child is being ptraced by %current and ready for further
 213 * ptrace operations.  If @ignore_state is %false, @child also should be in
 214 * %TASK_TRACED state and on return the child is guaranteed to be traced
 215 * and not executing.  If @ignore_state is %true, @child can be in any
 216 * state.
 217 *
 218 * CONTEXT:
 219 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 220 *
 221 * RETURNS:
 222 * 0 on success, -ESRCH if %child is not ready.
 223 */
 224static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 225{
 226	int ret = -ESRCH;
 227
 228	/*
 229	 * We take the read lock around doing both checks to close a
 230	 * possible race where someone else was tracing our child and
 231	 * detached between these two checks.  After this locked check,
 232	 * we are sure that this is our traced child and that can only
 233	 * be changed by us so it's not changing right after this.
 234	 */
 235	read_lock(&tasklist_lock);
 236	if (child->ptrace && child->parent == current) {
 237		WARN_ON(child->state == __TASK_TRACED);
 238		/*
 239		 * child->sighand can't be NULL, release_task()
 240		 * does ptrace_unlink() before __exit_signal().
 241		 */
 242		if (ignore_state || ptrace_freeze_traced(child))
 243			ret = 0;
 244	}
 245	read_unlock(&tasklist_lock);
 246
 247	if (!ret && !ignore_state) {
 248		if (!wait_task_inactive(child, __TASK_TRACED)) {
 249			/*
 250			 * This can only happen if may_ptrace_stop() fails and
 251			 * ptrace_stop() changes ->state back to TASK_RUNNING,
 252			 * so we should not worry about leaking __TASK_TRACED.
 253			 */
 254			WARN_ON(child->state == __TASK_TRACED);
 255			ret = -ESRCH;
 256		}
 257	}
 258
 259	return ret;
 260}
 261
 262static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 263{
 264	if (mode & PTRACE_MODE_NOAUDIT)
 265		return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
 266	else
 267		return has_ns_capability(current, ns, CAP_SYS_PTRACE);
 268}
 269
 270/* Returns 0 on success, -errno on denial. */
 271static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 272{
 273	const struct cred *cred = current_cred(), *tcred;
 274	struct mm_struct *mm;
 275	kuid_t caller_uid;
 276	kgid_t caller_gid;
 277
 278	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 279		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
 280		return -EPERM;
 281	}
 282
 283	/* May we inspect the given task?
 284	 * This check is used both for attaching with ptrace
 285	 * and for allowing access to sensitive information in /proc.
 286	 *
 287	 * ptrace_attach denies several cases that /proc allows
 288	 * because setting up the necessary parent/child relationship
 289	 * or halting the specified task is impossible.
 290	 */
 291
 292	/* Don't let security modules deny introspection */
 293	if (same_thread_group(task, current))
 294		return 0;
 295	rcu_read_lock();
 296	if (mode & PTRACE_MODE_FSCREDS) {
 297		caller_uid = cred->fsuid;
 298		caller_gid = cred->fsgid;
 299	} else {
 300		/*
 301		 * Using the euid would make more sense here, but something
 302		 * in userland might rely on the old behavior, and this
 303		 * shouldn't be a security problem since
 304		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
 305		 * used a syscall that requests access to another process
 306		 * (and not a filesystem syscall to procfs).
 307		 */
 308		caller_uid = cred->uid;
 309		caller_gid = cred->gid;
 310	}
 311	tcred = __task_cred(task);
 312	if (uid_eq(caller_uid, tcred->euid) &&
 313	    uid_eq(caller_uid, tcred->suid) &&
 314	    uid_eq(caller_uid, tcred->uid)  &&
 315	    gid_eq(caller_gid, tcred->egid) &&
 316	    gid_eq(caller_gid, tcred->sgid) &&
 317	    gid_eq(caller_gid, tcred->gid))
 318		goto ok;
 319	if (ptrace_has_cap(tcred->user_ns, mode))
 320		goto ok;
 321	rcu_read_unlock();
 322	return -EPERM;
 323ok:
 324	rcu_read_unlock();
 325	mm = task->mm;
 326	if (mm &&
 327	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
 328	     !ptrace_has_cap(mm->user_ns, mode)))
 329	    return -EPERM;
 330
 331	return security_ptrace_access_check(task, mode);
 332}
 333
 334bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 335{
 336	int err;
 337	task_lock(task);
 338	err = __ptrace_may_access(task, mode);
 339	task_unlock(task);
 340	return !err;
 341}
 342
 343static int ptrace_attach(struct task_struct *task, long request,
 344			 unsigned long addr,
 345			 unsigned long flags)
 346{
 347	bool seize = (request == PTRACE_SEIZE);
 348	int retval;
 349
 350	retval = -EIO;
 351	if (seize) {
 352		if (addr != 0)
 353			goto out;
 354		if (flags & ~(unsigned long)PTRACE_O_MASK)
 355			goto out;
 356		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 357	} else {
 358		flags = PT_PTRACED;
 359	}
 360
 361	audit_ptrace(task);
 362
 363	retval = -EPERM;
 364	if (unlikely(task->flags & PF_KTHREAD))
 365		goto out;
 366	if (same_thread_group(task, current))
 367		goto out;
 368
 369	/*
 370	 * Protect exec's credential calculations against our interference;
 371	 * SUID, SGID and LSM creds get determined differently
 372	 * under ptrace.
 373	 */
 374	retval = -ERESTARTNOINTR;
 375	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 376		goto out;
 377
 378	task_lock(task);
 379	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 380	task_unlock(task);
 381	if (retval)
 382		goto unlock_creds;
 383
 384	write_lock_irq(&tasklist_lock);
 385	retval = -EPERM;
 386	if (unlikely(task->exit_state))
 387		goto unlock_tasklist;
 388	if (task->ptrace)
 389		goto unlock_tasklist;
 390
 391	if (seize)
 392		flags |= PT_SEIZED;
 393	task->ptrace = flags;
 394
 395	ptrace_link(task, current);
 396
 397	/* SEIZE doesn't trap tracee on attach */
 398	if (!seize)
 399		send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 400
 401	spin_lock(&task->sighand->siglock);
 402
 403	/*
 404	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 405	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 406	 * will be cleared if the child completes the transition or any
 407	 * event which clears the group stop states happens.  We'll wait
 408	 * for the transition to complete before returning from this
 409	 * function.
 410	 *
 411	 * This hides STOPPED -> RUNNING -> TRACED transition from the
 412	 * attaching thread but a different thread in the same group can
 413	 * still observe the transient RUNNING state.  IOW, if another
 414	 * thread's WNOHANG wait(2) on the stopped tracee races against
 415	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
 416	 *
 417	 * The following task_is_stopped() test is safe as both transitions
 418	 * in and out of STOPPED are protected by siglock.
 419	 */
 420	if (task_is_stopped(task) &&
 421	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 422		signal_wake_up_state(task, __TASK_STOPPED);
 423
 424	spin_unlock(&task->sighand->siglock);
 425
 426	retval = 0;
 427unlock_tasklist:
 428	write_unlock_irq(&tasklist_lock);
 429unlock_creds:
 430	mutex_unlock(&task->signal->cred_guard_mutex);
 431out:
 432	if (!retval) {
 433		/*
 434		 * We do not bother to change retval or clear JOBCTL_TRAPPING
 435		 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
 436		 * not return to user-mode, it will exit and clear this bit in
 437		 * __ptrace_unlink() if it wasn't already cleared by the tracee;
 438		 * and until then nobody can ptrace this task.
 439		 */
 440		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
 441		proc_ptrace_connector(task, PTRACE_ATTACH);
 442	}
 443
 444	return retval;
 445}
 446
 447/**
 448 * ptrace_traceme  --  helper for PTRACE_TRACEME
 449 *
 450 * Performs checks and sets PT_PTRACED.
 451 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 452 */
 453static int ptrace_traceme(void)
 454{
 455	int ret = -EPERM;
 456
 457	write_lock_irq(&tasklist_lock);
 458	/* Are we already being traced? */
 459	if (!current->ptrace) {
 460		ret = security_ptrace_traceme(current->parent);
 461		/*
 462		 * Check PF_EXITING to ensure ->real_parent has not passed
 463		 * exit_ptrace(). Otherwise we don't report the error but
 464		 * pretend ->real_parent untraces us right after return.
 465		 */
 466		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 467			current->ptrace = PT_PTRACED;
 468			ptrace_link(current, current->real_parent);
 469		}
 470	}
 471	write_unlock_irq(&tasklist_lock);
 472
 473	return ret;
 474}
 475
 476/*
 477 * Called with irqs disabled, returns true if childs should reap themselves.
 478 */
 479static int ignoring_children(struct sighand_struct *sigh)
 480{
 481	int ret;
 482	spin_lock(&sigh->siglock);
 483	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 484	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 485	spin_unlock(&sigh->siglock);
 486	return ret;
 487}
 488
 489/*
 490 * Called with tasklist_lock held for writing.
 491 * Unlink a traced task, and clean it up if it was a traced zombie.
 492 * Return true if it needs to be reaped with release_task().
 493 * (We can't call release_task() here because we already hold tasklist_lock.)
 494 *
 495 * If it's a zombie, our attachedness prevented normal parent notification
 496 * or self-reaping.  Do notification now if it would have happened earlier.
 497 * If it should reap itself, return true.
 498 *
 499 * If it's our own child, there is no notification to do. But if our normal
 500 * children self-reap, then this child was prevented by ptrace and we must
 501 * reap it now, in that case we must also wake up sub-threads sleeping in
 502 * do_wait().
 503 */
 504static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 505{
 506	bool dead;
 507
 508	__ptrace_unlink(p);
 509
 510	if (p->exit_state != EXIT_ZOMBIE)
 511		return false;
 512
 513	dead = !thread_group_leader(p);
 514
 515	if (!dead && thread_group_empty(p)) {
 516		if (!same_thread_group(p->real_parent, tracer))
 517			dead = do_notify_parent(p, p->exit_signal);
 518		else if (ignoring_children(tracer->sighand)) {
 519			__wake_up_parent(p, tracer);
 520			dead = true;
 521		}
 522	}
 523	/* Mark it as in the process of being reaped. */
 524	if (dead)
 525		p->exit_state = EXIT_DEAD;
 526	return dead;
 527}
 528
 529static int ptrace_detach(struct task_struct *child, unsigned int data)
 530{
 531	if (!valid_signal(data))
 532		return -EIO;
 533
 534	/* Architecture-specific hardware disable .. */
 535	ptrace_disable(child);
 536
 537	write_lock_irq(&tasklist_lock);
 538	/*
 539	 * We rely on ptrace_freeze_traced(). It can't be killed and
 540	 * untraced by another thread, it can't be a zombie.
 541	 */
 542	WARN_ON(!child->ptrace || child->exit_state);
 543	/*
 544	 * tasklist_lock avoids the race with wait_task_stopped(), see
 545	 * the comment in ptrace_resume().
 546	 */
 547	child->exit_code = data;
 548	__ptrace_detach(current, child);
 549	write_unlock_irq(&tasklist_lock);
 550
 551	proc_ptrace_connector(child, PTRACE_DETACH);
 552
 553	return 0;
 554}
 555
 556/*
 557 * Detach all tasks we were using ptrace on. Called with tasklist held
 558 * for writing.
 559 */
 560void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
 561{
 562	struct task_struct *p, *n;
 563
 564	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 565		if (unlikely(p->ptrace & PT_EXITKILL))
 566			send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
 567
 568		if (__ptrace_detach(tracer, p))
 569			list_add(&p->ptrace_entry, dead);
 570	}
 571}
 572
 573int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 574{
 575	int copied = 0;
 576
 577	while (len > 0) {
 578		char buf[128];
 579		int this_len, retval;
 580
 581		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 582		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
 583
 584		if (!retval) {
 585			if (copied)
 586				break;
 587			return -EIO;
 588		}
 589		if (copy_to_user(dst, buf, retval))
 590			return -EFAULT;
 591		copied += retval;
 592		src += retval;
 593		dst += retval;
 594		len -= retval;
 595	}
 596	return copied;
 597}
 598
 599int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 600{
 601	int copied = 0;
 602
 603	while (len > 0) {
 604		char buf[128];
 605		int this_len, retval;
 606
 607		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 608		if (copy_from_user(buf, src, this_len))
 609			return -EFAULT;
 610		retval = ptrace_access_vm(tsk, dst, buf, this_len,
 611				FOLL_FORCE | FOLL_WRITE);
 612		if (!retval) {
 613			if (copied)
 614				break;
 615			return -EIO;
 616		}
 617		copied += retval;
 618		src += retval;
 619		dst += retval;
 620		len -= retval;
 621	}
 622	return copied;
 623}
 624
 625static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 626{
 627	unsigned flags;
 628
 629	if (data & ~(unsigned long)PTRACE_O_MASK)
 630		return -EINVAL;
 631
 632	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
 633		if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
 634		    !IS_ENABLED(CONFIG_SECCOMP))
 635			return -EINVAL;
 636
 637		if (!capable(CAP_SYS_ADMIN))
 638			return -EPERM;
 639
 640		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
 641		    current->ptrace & PT_SUSPEND_SECCOMP)
 642			return -EPERM;
 643	}
 644
 645	/* Avoid intermediate state when all opts are cleared */
 646	flags = child->ptrace;
 647	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 648	flags |= (data << PT_OPT_FLAG_SHIFT);
 649	child->ptrace = flags;
 650
 651	return 0;
 652}
 653
 654static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
 655{
 656	unsigned long flags;
 657	int error = -ESRCH;
 658
 659	if (lock_task_sighand(child, &flags)) {
 660		error = -EINVAL;
 661		if (likely(child->last_siginfo != NULL)) {
 662			copy_siginfo(info, child->last_siginfo);
 663			error = 0;
 664		}
 665		unlock_task_sighand(child, &flags);
 666	}
 667	return error;
 668}
 669
 670static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 671{
 672	unsigned long flags;
 673	int error = -ESRCH;
 674
 675	if (lock_task_sighand(child, &flags)) {
 676		error = -EINVAL;
 677		if (likely(child->last_siginfo != NULL)) {
 678			copy_siginfo(child->last_siginfo, info);
 679			error = 0;
 680		}
 681		unlock_task_sighand(child, &flags);
 682	}
 683	return error;
 684}
 685
 686static int ptrace_peek_siginfo(struct task_struct *child,
 687				unsigned long addr,
 688				unsigned long data)
 689{
 690	struct ptrace_peeksiginfo_args arg;
 691	struct sigpending *pending;
 692	struct sigqueue *q;
 693	int ret, i;
 694
 695	ret = copy_from_user(&arg, (void __user *) addr,
 696				sizeof(struct ptrace_peeksiginfo_args));
 697	if (ret)
 698		return -EFAULT;
 699
 700	if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
 701		return -EINVAL; /* unknown flags */
 702
 703	if (arg.nr < 0)
 704		return -EINVAL;
 705
 706	if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
 707		pending = &child->signal->shared_pending;
 708	else
 709		pending = &child->pending;
 710
 711	for (i = 0; i < arg.nr; ) {
 712		siginfo_t info;
 713		s32 off = arg.off + i;
 714
 715		spin_lock_irq(&child->sighand->siglock);
 716		list_for_each_entry(q, &pending->list, list) {
 717			if (!off--) {
 718				copy_siginfo(&info, &q->info);
 719				break;
 720			}
 721		}
 722		spin_unlock_irq(&child->sighand->siglock);
 723
 724		if (off >= 0) /* beyond the end of the list */
 725			break;
 726
 727#ifdef CONFIG_COMPAT
 728		if (unlikely(in_compat_syscall())) {
 729			compat_siginfo_t __user *uinfo = compat_ptr(data);
 730
 731			if (copy_siginfo_to_user32(uinfo, &info)) {
 
 732				ret = -EFAULT;
 733				break;
 734			}
 735
 736		} else
 737#endif
 738		{
 739			siginfo_t __user *uinfo = (siginfo_t __user *) data;
 740
 741			if (copy_siginfo_to_user(uinfo, &info)) {
 
 742				ret = -EFAULT;
 743				break;
 744			}
 745		}
 746
 747		data += sizeof(siginfo_t);
 748		i++;
 749
 750		if (signal_pending(current))
 751			break;
 752
 753		cond_resched();
 754	}
 755
 756	if (i > 0)
 757		return i;
 758
 759	return ret;
 760}
 761
 762#ifdef PTRACE_SINGLESTEP
 763#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
 764#else
 765#define is_singlestep(request)		0
 766#endif
 767
 768#ifdef PTRACE_SINGLEBLOCK
 769#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
 770#else
 771#define is_singleblock(request)		0
 772#endif
 773
 774#ifdef PTRACE_SYSEMU
 775#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
 776#else
 777#define is_sysemu_singlestep(request)	0
 778#endif
 779
 780static int ptrace_resume(struct task_struct *child, long request,
 781			 unsigned long data)
 782{
 783	bool need_siglock;
 784
 785	if (!valid_signal(data))
 786		return -EIO;
 787
 788	if (request == PTRACE_SYSCALL)
 789		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 790	else
 791		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 792
 793#ifdef TIF_SYSCALL_EMU
 794	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 795		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 796	else
 797		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 798#endif
 799
 800	if (is_singleblock(request)) {
 801		if (unlikely(!arch_has_block_step()))
 802			return -EIO;
 803		user_enable_block_step(child);
 804	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 805		if (unlikely(!arch_has_single_step()))
 806			return -EIO;
 807		user_enable_single_step(child);
 808	} else {
 809		user_disable_single_step(child);
 810	}
 811
 812	/*
 813	 * Change ->exit_code and ->state under siglock to avoid the race
 814	 * with wait_task_stopped() in between; a non-zero ->exit_code will
 815	 * wrongly look like another report from tracee.
 816	 *
 817	 * Note that we need siglock even if ->exit_code == data and/or this
 818	 * status was not reported yet, the new status must not be cleared by
 819	 * wait_task_stopped() after resume.
 820	 *
 821	 * If data == 0 we do not care if wait_task_stopped() reports the old
 822	 * status and clears the code too; this can't race with the tracee, it
 823	 * takes siglock after resume.
 824	 */
 825	need_siglock = data && !thread_group_empty(current);
 826	if (need_siglock)
 827		spin_lock_irq(&child->sighand->siglock);
 828	child->exit_code = data;
 829	wake_up_state(child, __TASK_TRACED);
 830	if (need_siglock)
 831		spin_unlock_irq(&child->sighand->siglock);
 832
 833	return 0;
 834}
 835
 836#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 837
 838static const struct user_regset *
 839find_regset(const struct user_regset_view *view, unsigned int type)
 840{
 841	const struct user_regset *regset;
 842	int n;
 843
 844	for (n = 0; n < view->n; ++n) {
 845		regset = view->regsets + n;
 846		if (regset->core_note_type == type)
 847			return regset;
 848	}
 849
 850	return NULL;
 851}
 852
 853static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 854			 struct iovec *kiov)
 855{
 856	const struct user_regset_view *view = task_user_regset_view(task);
 857	const struct user_regset *regset = find_regset(view, type);
 858	int regset_no;
 859
 860	if (!regset || (kiov->iov_len % regset->size) != 0)
 861		return -EINVAL;
 862
 863	regset_no = regset - view->regsets;
 864	kiov->iov_len = min(kiov->iov_len,
 865			    (__kernel_size_t) (regset->n * regset->size));
 866
 867	if (req == PTRACE_GETREGSET)
 868		return copy_regset_to_user(task, view, regset_no, 0,
 869					   kiov->iov_len, kiov->iov_base);
 870	else
 871		return copy_regset_from_user(task, view, regset_no, 0,
 872					     kiov->iov_len, kiov->iov_base);
 873}
 874
 875/*
 876 * This is declared in linux/regset.h and defined in machine-dependent
 877 * code.  We put the export here, near the primary machine-neutral use,
 878 * to ensure no machine forgets it.
 879 */
 880EXPORT_SYMBOL_GPL(task_user_regset_view);
 881#endif
 882
 883int ptrace_request(struct task_struct *child, long request,
 884		   unsigned long addr, unsigned long data)
 885{
 886	bool seized = child->ptrace & PT_SEIZED;
 887	int ret = -EIO;
 888	siginfo_t siginfo, *si;
 889	void __user *datavp = (void __user *) data;
 890	unsigned long __user *datalp = datavp;
 891	unsigned long flags;
 892
 893	switch (request) {
 894	case PTRACE_PEEKTEXT:
 895	case PTRACE_PEEKDATA:
 896		return generic_ptrace_peekdata(child, addr, data);
 897	case PTRACE_POKETEXT:
 898	case PTRACE_POKEDATA:
 899		return generic_ptrace_pokedata(child, addr, data);
 900
 901#ifdef PTRACE_OLDSETOPTIONS
 902	case PTRACE_OLDSETOPTIONS:
 903#endif
 904	case PTRACE_SETOPTIONS:
 905		ret = ptrace_setoptions(child, data);
 906		break;
 907	case PTRACE_GETEVENTMSG:
 908		ret = put_user(child->ptrace_message, datalp);
 909		break;
 910
 911	case PTRACE_PEEKSIGINFO:
 912		ret = ptrace_peek_siginfo(child, addr, data);
 913		break;
 914
 915	case PTRACE_GETSIGINFO:
 916		ret = ptrace_getsiginfo(child, &siginfo);
 917		if (!ret)
 918			ret = copy_siginfo_to_user(datavp, &siginfo);
 919		break;
 920
 921	case PTRACE_SETSIGINFO:
 922		if (copy_from_user(&siginfo, datavp, sizeof siginfo))
 923			ret = -EFAULT;
 924		else
 925			ret = ptrace_setsiginfo(child, &siginfo);
 926		break;
 927
 928	case PTRACE_GETSIGMASK:
 929		if (addr != sizeof(sigset_t)) {
 930			ret = -EINVAL;
 931			break;
 932		}
 933
 934		if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
 935			ret = -EFAULT;
 936		else
 937			ret = 0;
 938
 939		break;
 940
 941	case PTRACE_SETSIGMASK: {
 942		sigset_t new_set;
 943
 944		if (addr != sizeof(sigset_t)) {
 945			ret = -EINVAL;
 946			break;
 947		}
 948
 949		if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
 950			ret = -EFAULT;
 951			break;
 952		}
 953
 954		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
 955
 956		/*
 957		 * Every thread does recalc_sigpending() after resume, so
 958		 * retarget_shared_pending() and recalc_sigpending() are not
 959		 * called here.
 960		 */
 961		spin_lock_irq(&child->sighand->siglock);
 962		child->blocked = new_set;
 963		spin_unlock_irq(&child->sighand->siglock);
 964
 965		ret = 0;
 966		break;
 967	}
 968
 969	case PTRACE_INTERRUPT:
 970		/*
 971		 * Stop tracee without any side-effect on signal or job
 972		 * control.  At least one trap is guaranteed to happen
 973		 * after this request.  If @child is already trapped, the
 974		 * current trap is not disturbed and another trap will
 975		 * happen after the current trap is ended with PTRACE_CONT.
 976		 *
 977		 * The actual trap might not be PTRACE_EVENT_STOP trap but
 978		 * the pending condition is cleared regardless.
 979		 */
 980		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 981			break;
 982
 983		/*
 984		 * INTERRUPT doesn't disturb existing trap sans one
 985		 * exception.  If ptracer issued LISTEN for the current
 986		 * STOP, this INTERRUPT should clear LISTEN and re-trap
 987		 * tracee into STOP.
 988		 */
 989		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
 990			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 991
 992		unlock_task_sighand(child, &flags);
 993		ret = 0;
 994		break;
 995
 996	case PTRACE_LISTEN:
 997		/*
 998		 * Listen for events.  Tracee must be in STOP.  It's not
 999		 * resumed per-se but is not considered to be in TRACED by
1000		 * wait(2) or ptrace(2).  If an async event (e.g. group
1001		 * stop state change) happens, tracee will enter STOP trap
1002		 * again.  Alternatively, ptracer can issue INTERRUPT to
1003		 * finish listening and re-trap tracee into STOP.
1004		 */
1005		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1006			break;
1007
1008		si = child->last_siginfo;
1009		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1010			child->jobctl |= JOBCTL_LISTENING;
1011			/*
1012			 * If NOTIFY is set, it means event happened between
1013			 * start of this trap and now.  Trigger re-trap.
1014			 */
1015			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1016				ptrace_signal_wake_up(child, true);
1017			ret = 0;
1018		}
1019		unlock_task_sighand(child, &flags);
1020		break;
1021
1022	case PTRACE_DETACH:	 /* detach a process that was attached. */
1023		ret = ptrace_detach(child, data);
1024		break;
1025
1026#ifdef CONFIG_BINFMT_ELF_FDPIC
1027	case PTRACE_GETFDPIC: {
1028		struct mm_struct *mm = get_task_mm(child);
1029		unsigned long tmp = 0;
1030
1031		ret = -ESRCH;
1032		if (!mm)
1033			break;
1034
1035		switch (addr) {
1036		case PTRACE_GETFDPIC_EXEC:
1037			tmp = mm->context.exec_fdpic_loadmap;
1038			break;
1039		case PTRACE_GETFDPIC_INTERP:
1040			tmp = mm->context.interp_fdpic_loadmap;
1041			break;
1042		default:
1043			break;
1044		}
1045		mmput(mm);
1046
1047		ret = put_user(tmp, datalp);
1048		break;
1049	}
1050#endif
1051
1052#ifdef PTRACE_SINGLESTEP
1053	case PTRACE_SINGLESTEP:
1054#endif
1055#ifdef PTRACE_SINGLEBLOCK
1056	case PTRACE_SINGLEBLOCK:
1057#endif
1058#ifdef PTRACE_SYSEMU
1059	case PTRACE_SYSEMU:
1060	case PTRACE_SYSEMU_SINGLESTEP:
1061#endif
1062	case PTRACE_SYSCALL:
1063	case PTRACE_CONT:
1064		return ptrace_resume(child, request, data);
1065
1066	case PTRACE_KILL:
1067		if (child->exit_state)	/* already dead */
1068			return 0;
1069		return ptrace_resume(child, request, SIGKILL);
1070
1071#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1072	case PTRACE_GETREGSET:
1073	case PTRACE_SETREGSET: {
1074		struct iovec kiov;
1075		struct iovec __user *uiov = datavp;
1076
1077		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1078			return -EFAULT;
1079
1080		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1081		    __get_user(kiov.iov_len, &uiov->iov_len))
1082			return -EFAULT;
1083
1084		ret = ptrace_regset(child, request, addr, &kiov);
1085		if (!ret)
1086			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1087		break;
1088	}
1089#endif
1090
1091	case PTRACE_SECCOMP_GET_FILTER:
1092		ret = seccomp_get_filter(child, addr, datavp);
1093		break;
1094
1095	case PTRACE_SECCOMP_GET_METADATA:
1096		ret = seccomp_get_metadata(child, addr, datavp);
1097		break;
1098
1099	default:
1100		break;
1101	}
1102
1103	return ret;
1104}
1105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106#ifndef arch_ptrace_attach
1107#define arch_ptrace_attach(child)	do { } while (0)
1108#endif
1109
1110SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1111		unsigned long, data)
1112{
1113	struct task_struct *child;
1114	long ret;
1115
1116	if (request == PTRACE_TRACEME) {
1117		ret = ptrace_traceme();
1118		if (!ret)
1119			arch_ptrace_attach(current);
1120		goto out;
1121	}
1122
1123	child = find_get_task_by_vpid(pid);
1124	if (!child) {
1125		ret = -ESRCH;
1126		goto out;
1127	}
1128
1129	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1130		ret = ptrace_attach(child, request, addr, data);
1131		/*
1132		 * Some architectures need to do book-keeping after
1133		 * a ptrace attach.
1134		 */
1135		if (!ret)
1136			arch_ptrace_attach(child);
1137		goto out_put_task_struct;
1138	}
1139
1140	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1141				  request == PTRACE_INTERRUPT);
1142	if (ret < 0)
1143		goto out_put_task_struct;
1144
1145	ret = arch_ptrace(child, request, addr, data);
1146	if (ret || request != PTRACE_DETACH)
1147		ptrace_unfreeze_traced(child);
1148
1149 out_put_task_struct:
1150	put_task_struct(child);
1151 out:
1152	return ret;
1153}
1154
1155int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1156			    unsigned long data)
1157{
1158	unsigned long tmp;
1159	int copied;
1160
1161	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1162	if (copied != sizeof(tmp))
1163		return -EIO;
1164	return put_user(tmp, (unsigned long __user *)data);
1165}
1166
1167int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1168			    unsigned long data)
1169{
1170	int copied;
1171
1172	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1173			FOLL_FORCE | FOLL_WRITE);
1174	return (copied == sizeof(data)) ? 0 : -EIO;
1175}
1176
1177#if defined CONFIG_COMPAT
1178
1179int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1180			  compat_ulong_t addr, compat_ulong_t data)
1181{
1182	compat_ulong_t __user *datap = compat_ptr(data);
1183	compat_ulong_t word;
1184	siginfo_t siginfo;
1185	int ret;
1186
1187	switch (request) {
1188	case PTRACE_PEEKTEXT:
1189	case PTRACE_PEEKDATA:
1190		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1191				FOLL_FORCE);
1192		if (ret != sizeof(word))
1193			ret = -EIO;
1194		else
1195			ret = put_user(word, datap);
1196		break;
1197
1198	case PTRACE_POKETEXT:
1199	case PTRACE_POKEDATA:
1200		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1201				FOLL_FORCE | FOLL_WRITE);
1202		ret = (ret != sizeof(data) ? -EIO : 0);
1203		break;
1204
1205	case PTRACE_GETEVENTMSG:
1206		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1207		break;
1208
1209	case PTRACE_GETSIGINFO:
1210		ret = ptrace_getsiginfo(child, &siginfo);
1211		if (!ret)
1212			ret = copy_siginfo_to_user32(
1213				(struct compat_siginfo __user *) datap,
1214				&siginfo);
1215		break;
1216
1217	case PTRACE_SETSIGINFO:
 
1218		if (copy_siginfo_from_user32(
1219			    &siginfo, (struct compat_siginfo __user *) datap))
1220			ret = -EFAULT;
1221		else
1222			ret = ptrace_setsiginfo(child, &siginfo);
1223		break;
1224#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1225	case PTRACE_GETREGSET:
1226	case PTRACE_SETREGSET:
1227	{
1228		struct iovec kiov;
1229		struct compat_iovec __user *uiov =
1230			(struct compat_iovec __user *) datap;
1231		compat_uptr_t ptr;
1232		compat_size_t len;
1233
1234		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1235			return -EFAULT;
1236
1237		if (__get_user(ptr, &uiov->iov_base) ||
1238		    __get_user(len, &uiov->iov_len))
1239			return -EFAULT;
1240
1241		kiov.iov_base = compat_ptr(ptr);
1242		kiov.iov_len = len;
1243
1244		ret = ptrace_regset(child, request, addr, &kiov);
1245		if (!ret)
1246			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1247		break;
1248	}
1249#endif
1250
1251	default:
1252		ret = ptrace_request(child, request, addr, data);
1253	}
1254
1255	return ret;
1256}
1257
1258COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1259		       compat_long_t, addr, compat_long_t, data)
1260{
1261	struct task_struct *child;
1262	long ret;
1263
1264	if (request == PTRACE_TRACEME) {
1265		ret = ptrace_traceme();
1266		goto out;
1267	}
1268
1269	child = find_get_task_by_vpid(pid);
1270	if (!child) {
1271		ret = -ESRCH;
1272		goto out;
1273	}
1274
1275	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1276		ret = ptrace_attach(child, request, addr, data);
1277		/*
1278		 * Some architectures need to do book-keeping after
1279		 * a ptrace attach.
1280		 */
1281		if (!ret)
1282			arch_ptrace_attach(child);
1283		goto out_put_task_struct;
1284	}
1285
1286	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1287				  request == PTRACE_INTERRUPT);
1288	if (!ret) {
1289		ret = compat_arch_ptrace(child, request, addr, data);
1290		if (ret || request != PTRACE_DETACH)
1291			ptrace_unfreeze_traced(child);
1292	}
1293
1294 out_put_task_struct:
1295	put_task_struct(child);
1296 out:
1297	return ret;
1298}
1299#endif	/* CONFIG_COMPAT */