Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * linux/kernel/ptrace.c
   3 *
   4 * (C) Copyright 1999 Linus Torvalds
   5 *
   6 * Common interfaces for "ptrace()" which we do not want
   7 * to continually duplicate across every architecture.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/export.h>
  12#include <linux/sched.h>
  13#include <linux/errno.h>
  14#include <linux/mm.h>
  15#include <linux/highmem.h>
  16#include <linux/pagemap.h>
  17#include <linux/ptrace.h>
  18#include <linux/security.h>
  19#include <linux/signal.h>
  20#include <linux/uio.h>
  21#include <linux/audit.h>
  22#include <linux/pid_namespace.h>
  23#include <linux/syscalls.h>
  24#include <linux/uaccess.h>
  25#include <linux/regset.h>
  26#include <linux/hw_breakpoint.h>
  27#include <linux/cn_proc.h>
  28#include <linux/compat.h>
  29
  30/*
  31 * Access another process' address space via ptrace.
  32 * Source/target buffer must be kernel space,
  33 * Do not walk the page table directly, use get_user_pages
  34 */
  35int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
  36		     void *buf, int len, unsigned int gup_flags)
  37{
  38	struct mm_struct *mm;
  39	int ret;
  40
  41	mm = get_task_mm(tsk);
  42	if (!mm)
  43		return 0;
  44
  45	if (!tsk->ptrace ||
  46	    (current != tsk->parent) ||
  47	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
  48	     !ptracer_capable(tsk, mm->user_ns))) {
  49		mmput(mm);
  50		return 0;
  51	}
  52
  53	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
  54	mmput(mm);
  55
  56	return ret;
 
 
 
  57}
  58
  59
  60/*
  61 * ptrace a task: make the debugger its new parent and
  62 * move it to the ptrace list.
  63 *
  64 * Must be called with the tasklist lock write-held.
  65 */
  66void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  67{
  68	BUG_ON(!list_empty(&child->ptrace_entry));
  69	list_add(&child->ptrace_entry, &new_parent->ptraced);
  70	child->parent = new_parent;
  71	rcu_read_lock();
  72	child->ptracer_cred = get_cred(__task_cred(new_parent));
  73	rcu_read_unlock();
  74}
  75
  76/**
  77 * __ptrace_unlink - unlink ptracee and restore its execution state
  78 * @child: ptracee to be unlinked
  79 *
  80 * Remove @child from the ptrace list, move it back to the original parent,
  81 * and restore the execution state so that it conforms to the group stop
  82 * state.
  83 *
  84 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  85 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  86 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  87 * If the ptracer is exiting, the ptracee can be in any state.
  88 *
  89 * After detach, the ptracee should be in a state which conforms to the
  90 * group stop.  If the group is stopped or in the process of stopping, the
  91 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
  92 * up from TASK_TRACED.
  93 *
  94 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
  95 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
  96 * to but in the opposite direction of what happens while attaching to a
  97 * stopped task.  However, in this direction, the intermediate RUNNING
  98 * state is not hidden even from the current ptracer and if it immediately
  99 * re-attaches and performs a WNOHANG wait(2), it may fail.
 100 *
 101 * CONTEXT:
 102 * write_lock_irq(tasklist_lock)
 103 */
 104void __ptrace_unlink(struct task_struct *child)
 105{
 106	const struct cred *old_cred;
 107	BUG_ON(!child->ptrace);
 108
 109	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 110
 111	child->parent = child->real_parent;
 112	list_del_init(&child->ptrace_entry);
 113	old_cred = child->ptracer_cred;
 114	child->ptracer_cred = NULL;
 115	put_cred(old_cred);
 116
 117	spin_lock(&child->sighand->siglock);
 118	child->ptrace = 0;
 119	/*
 120	 * Clear all pending traps and TRAPPING.  TRAPPING should be
 121	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
 122	 */
 123	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
 124	task_clear_jobctl_trapping(child);
 125
 126	/*
 127	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
 128	 * @child isn't dead.
 129	 */
 130	if (!(child->flags & PF_EXITING) &&
 131	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
 132	     child->signal->group_stop_count)) {
 133		child->jobctl |= JOBCTL_STOP_PENDING;
 134
 135		/*
 136		 * This is only possible if this thread was cloned by the
 137		 * traced task running in the stopped group, set the signal
 138		 * for the future reports.
 139		 * FIXME: we should change ptrace_init_task() to handle this
 140		 * case.
 141		 */
 142		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 143			child->jobctl |= SIGSTOP;
 144	}
 145
 146	/*
 147	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 148	 * @child in the butt.  Note that @resume should be used iff @child
 149	 * is in TASK_TRACED; otherwise, we might unduly disrupt
 150	 * TASK_KILLABLE sleeps.
 151	 */
 152	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 153		ptrace_signal_wake_up(child, true);
 154
 155	spin_unlock(&child->sighand->siglock);
 156}
 157
 158/* Ensure that nothing can wake it up, even SIGKILL */
 159static bool ptrace_freeze_traced(struct task_struct *task)
 160{
 161	bool ret = false;
 162
 163	/* Lockless, nobody but us can set this flag */
 164	if (task->jobctl & JOBCTL_LISTENING)
 165		return ret;
 166
 167	spin_lock_irq(&task->sighand->siglock);
 168	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
 169		task->state = __TASK_TRACED;
 170		ret = true;
 171	}
 172	spin_unlock_irq(&task->sighand->siglock);
 173
 174	return ret;
 175}
 176
 177static void ptrace_unfreeze_traced(struct task_struct *task)
 178{
 179	if (task->state != __TASK_TRACED)
 180		return;
 181
 182	WARN_ON(!task->ptrace || task->parent != current);
 183
 184	/*
 185	 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
 186	 * Recheck state under the lock to close this race.
 187	 */
 188	spin_lock_irq(&task->sighand->siglock);
 189	if (task->state == __TASK_TRACED) {
 190		if (__fatal_signal_pending(task))
 191			wake_up_state(task, __TASK_TRACED);
 192		else
 193			task->state = TASK_TRACED;
 194	}
 195	spin_unlock_irq(&task->sighand->siglock);
 196}
 197
 198/**
 199 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 200 * @child: ptracee to check for
 201 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 202 *
 203 * Check whether @child is being ptraced by %current and ready for further
 204 * ptrace operations.  If @ignore_state is %false, @child also should be in
 205 * %TASK_TRACED state and on return the child is guaranteed to be traced
 206 * and not executing.  If @ignore_state is %true, @child can be in any
 207 * state.
 208 *
 209 * CONTEXT:
 210 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 211 *
 212 * RETURNS:
 213 * 0 on success, -ESRCH if %child is not ready.
 214 */
 215static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 216{
 217	int ret = -ESRCH;
 218
 219	/*
 220	 * We take the read lock around doing both checks to close a
 221	 * possible race where someone else was tracing our child and
 222	 * detached between these two checks.  After this locked check,
 223	 * we are sure that this is our traced child and that can only
 224	 * be changed by us so it's not changing right after this.
 225	 */
 226	read_lock(&tasklist_lock);
 227	if (child->ptrace && child->parent == current) {
 228		WARN_ON(child->state == __TASK_TRACED);
 229		/*
 230		 * child->sighand can't be NULL, release_task()
 231		 * does ptrace_unlink() before __exit_signal().
 232		 */
 233		if (ignore_state || ptrace_freeze_traced(child))
 
 
 
 234			ret = 0;
 
 235	}
 236	read_unlock(&tasklist_lock);
 237
 238	if (!ret && !ignore_state) {
 239		if (!wait_task_inactive(child, __TASK_TRACED)) {
 240			/*
 241			 * This can only happen if may_ptrace_stop() fails and
 242			 * ptrace_stop() changes ->state back to TASK_RUNNING,
 243			 * so we should not worry about leaking __TASK_TRACED.
 244			 */
 245			WARN_ON(child->state == __TASK_TRACED);
 246			ret = -ESRCH;
 247		}
 248	}
 249
 
 250	return ret;
 251}
 252
 253static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 254{
 255	if (mode & PTRACE_MODE_NOAUDIT)
 256		return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
 257	else
 258		return has_ns_capability(current, ns, CAP_SYS_PTRACE);
 259}
 260
 261/* Returns 0 on success, -errno on denial. */
 262static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 263{
 264	const struct cred *cred = current_cred(), *tcred;
 265	struct mm_struct *mm;
 266	kuid_t caller_uid;
 267	kgid_t caller_gid;
 268
 269	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 270		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
 271		return -EPERM;
 272	}
 273
 274	/* May we inspect the given task?
 275	 * This check is used both for attaching with ptrace
 276	 * and for allowing access to sensitive information in /proc.
 277	 *
 278	 * ptrace_attach denies several cases that /proc allows
 279	 * because setting up the necessary parent/child relationship
 280	 * or halting the specified task is impossible.
 281	 */
 282
 283	/* Don't let security modules deny introspection */
 284	if (same_thread_group(task, current))
 285		return 0;
 286	rcu_read_lock();
 287	if (mode & PTRACE_MODE_FSCREDS) {
 288		caller_uid = cred->fsuid;
 289		caller_gid = cred->fsgid;
 290	} else {
 291		/*
 292		 * Using the euid would make more sense here, but something
 293		 * in userland might rely on the old behavior, and this
 294		 * shouldn't be a security problem since
 295		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
 296		 * used a syscall that requests access to another process
 297		 * (and not a filesystem syscall to procfs).
 298		 */
 299		caller_uid = cred->uid;
 300		caller_gid = cred->gid;
 301	}
 302	tcred = __task_cred(task);
 303	if (uid_eq(caller_uid, tcred->euid) &&
 304	    uid_eq(caller_uid, tcred->suid) &&
 305	    uid_eq(caller_uid, tcred->uid)  &&
 306	    gid_eq(caller_gid, tcred->egid) &&
 307	    gid_eq(caller_gid, tcred->sgid) &&
 308	    gid_eq(caller_gid, tcred->gid))
 
 309		goto ok;
 310	if (ptrace_has_cap(tcred->user_ns, mode))
 311		goto ok;
 312	rcu_read_unlock();
 313	return -EPERM;
 314ok:
 315	rcu_read_unlock();
 316	mm = task->mm;
 317	if (mm &&
 318	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
 319	     !ptrace_has_cap(mm->user_ns, mode)))
 320	    return -EPERM;
 321
 322	return security_ptrace_access_check(task, mode);
 323}
 324
 325bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 326{
 327	int err;
 328	task_lock(task);
 329	err = __ptrace_may_access(task, mode);
 330	task_unlock(task);
 331	return !err;
 332}
 333
 334static int ptrace_attach(struct task_struct *task, long request,
 335			 unsigned long addr,
 336			 unsigned long flags)
 337{
 338	bool seize = (request == PTRACE_SEIZE);
 339	int retval;
 340
 
 
 
 
 
 
 
 
 
 
 
 
 341	retval = -EIO;
 342	if (seize) {
 343		if (addr != 0)
 344			goto out;
 345		if (flags & ~(unsigned long)PTRACE_O_MASK)
 346			goto out;
 347		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 348	} else {
 349		flags = PT_PTRACED;
 350	}
 351
 352	audit_ptrace(task);
 353
 354	retval = -EPERM;
 355	if (unlikely(task->flags & PF_KTHREAD))
 356		goto out;
 357	if (same_thread_group(task, current))
 358		goto out;
 359
 360	/*
 361	 * Protect exec's credential calculations against our interference;
 362	 * SUID, SGID and LSM creds get determined differently
 363	 * under ptrace.
 364	 */
 365	retval = -ERESTARTNOINTR;
 366	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 367		goto out;
 368
 369	task_lock(task);
 370	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 371	task_unlock(task);
 372	if (retval)
 373		goto unlock_creds;
 374
 375	write_lock_irq(&tasklist_lock);
 376	retval = -EPERM;
 377	if (unlikely(task->exit_state))
 378		goto unlock_tasklist;
 379	if (task->ptrace)
 380		goto unlock_tasklist;
 381
 
 382	if (seize)
 383		flags |= PT_SEIZED;
 384	task->ptrace = flags;
 
 385
 386	__ptrace_link(task, current);
 387
 388	/* SEIZE doesn't trap tracee on attach */
 389	if (!seize)
 390		send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 391
 392	spin_lock(&task->sighand->siglock);
 393
 394	/*
 395	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 396	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 397	 * will be cleared if the child completes the transition or any
 398	 * event which clears the group stop states happens.  We'll wait
 399	 * for the transition to complete before returning from this
 400	 * function.
 401	 *
 402	 * This hides STOPPED -> RUNNING -> TRACED transition from the
 403	 * attaching thread but a different thread in the same group can
 404	 * still observe the transient RUNNING state.  IOW, if another
 405	 * thread's WNOHANG wait(2) on the stopped tracee races against
 406	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
 407	 *
 408	 * The following task_is_stopped() test is safe as both transitions
 409	 * in and out of STOPPED are protected by siglock.
 410	 */
 411	if (task_is_stopped(task) &&
 412	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 413		signal_wake_up_state(task, __TASK_STOPPED);
 414
 415	spin_unlock(&task->sighand->siglock);
 416
 417	retval = 0;
 418unlock_tasklist:
 419	write_unlock_irq(&tasklist_lock);
 420unlock_creds:
 421	mutex_unlock(&task->signal->cred_guard_mutex);
 422out:
 423	if (!retval) {
 424		/*
 425		 * We do not bother to change retval or clear JOBCTL_TRAPPING
 426		 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
 427		 * not return to user-mode, it will exit and clear this bit in
 428		 * __ptrace_unlink() if it wasn't already cleared by the tracee;
 429		 * and until then nobody can ptrace this task.
 430		 */
 431		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
 432		proc_ptrace_connector(task, PTRACE_ATTACH);
 433	}
 434
 435	return retval;
 436}
 437
 438/**
 439 * ptrace_traceme  --  helper for PTRACE_TRACEME
 440 *
 441 * Performs checks and sets PT_PTRACED.
 442 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 443 */
 444static int ptrace_traceme(void)
 445{
 446	int ret = -EPERM;
 447
 448	write_lock_irq(&tasklist_lock);
 449	/* Are we already being traced? */
 450	if (!current->ptrace) {
 451		ret = security_ptrace_traceme(current->parent);
 452		/*
 453		 * Check PF_EXITING to ensure ->real_parent has not passed
 454		 * exit_ptrace(). Otherwise we don't report the error but
 455		 * pretend ->real_parent untraces us right after return.
 456		 */
 457		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 458			current->ptrace = PT_PTRACED;
 459			__ptrace_link(current, current->real_parent);
 460		}
 461	}
 462	write_unlock_irq(&tasklist_lock);
 463
 464	return ret;
 465}
 466
 467/*
 468 * Called with irqs disabled, returns true if childs should reap themselves.
 469 */
 470static int ignoring_children(struct sighand_struct *sigh)
 471{
 472	int ret;
 473	spin_lock(&sigh->siglock);
 474	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 475	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 476	spin_unlock(&sigh->siglock);
 477	return ret;
 478}
 479
 480/*
 481 * Called with tasklist_lock held for writing.
 482 * Unlink a traced task, and clean it up if it was a traced zombie.
 483 * Return true if it needs to be reaped with release_task().
 484 * (We can't call release_task() here because we already hold tasklist_lock.)
 485 *
 486 * If it's a zombie, our attachedness prevented normal parent notification
 487 * or self-reaping.  Do notification now if it would have happened earlier.
 488 * If it should reap itself, return true.
 489 *
 490 * If it's our own child, there is no notification to do. But if our normal
 491 * children self-reap, then this child was prevented by ptrace and we must
 492 * reap it now, in that case we must also wake up sub-threads sleeping in
 493 * do_wait().
 494 */
 495static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 496{
 497	bool dead;
 498
 499	__ptrace_unlink(p);
 500
 501	if (p->exit_state != EXIT_ZOMBIE)
 502		return false;
 503
 504	dead = !thread_group_leader(p);
 505
 506	if (!dead && thread_group_empty(p)) {
 507		if (!same_thread_group(p->real_parent, tracer))
 508			dead = do_notify_parent(p, p->exit_signal);
 509		else if (ignoring_children(tracer->sighand)) {
 510			__wake_up_parent(p, tracer);
 511			dead = true;
 512		}
 513	}
 514	/* Mark it as in the process of being reaped. */
 515	if (dead)
 516		p->exit_state = EXIT_DEAD;
 517	return dead;
 518}
 519
 520static int ptrace_detach(struct task_struct *child, unsigned int data)
 521{
 
 
 522	if (!valid_signal(data))
 523		return -EIO;
 524
 525	/* Architecture-specific hardware disable .. */
 526	ptrace_disable(child);
 
 527
 528	write_lock_irq(&tasklist_lock);
 529	/*
 530	 * We rely on ptrace_freeze_traced(). It can't be killed and
 531	 * untraced by another thread, it can't be a zombie.
 532	 */
 533	WARN_ON(!child->ptrace || child->exit_state);
 534	/*
 535	 * tasklist_lock avoids the race with wait_task_stopped(), see
 536	 * the comment in ptrace_resume().
 537	 */
 538	child->exit_code = data;
 539	__ptrace_detach(current, child);
 540	write_unlock_irq(&tasklist_lock);
 541
 542	proc_ptrace_connector(child, PTRACE_DETACH);
 
 
 543
 544	return 0;
 545}
 546
 547/*
 548 * Detach all tasks we were using ptrace on. Called with tasklist held
 549 * for writing.
 
 550 */
 551void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
 
 
 552{
 553	struct task_struct *p, *n;
 
 554
 555	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 556		if (unlikely(p->ptrace & PT_EXITKILL))
 557			send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
 558
 
 559		if (__ptrace_detach(tracer, p))
 560			list_add(&p->ptrace_entry, dead);
 561	}
 
 
 
 
 
 
 
 
 
 
 562}
 563
 564int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 565{
 566	int copied = 0;
 567
 568	while (len > 0) {
 569		char buf[128];
 570		int this_len, retval;
 571
 572		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 573		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
 574
 575		if (!retval) {
 576			if (copied)
 577				break;
 578			return -EIO;
 579		}
 580		if (copy_to_user(dst, buf, retval))
 581			return -EFAULT;
 582		copied += retval;
 583		src += retval;
 584		dst += retval;
 585		len -= retval;
 586	}
 587	return copied;
 588}
 589
 590int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 591{
 592	int copied = 0;
 593
 594	while (len > 0) {
 595		char buf[128];
 596		int this_len, retval;
 597
 598		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 599		if (copy_from_user(buf, src, this_len))
 600			return -EFAULT;
 601		retval = ptrace_access_vm(tsk, dst, buf, this_len,
 602				FOLL_FORCE | FOLL_WRITE);
 603		if (!retval) {
 604			if (copied)
 605				break;
 606			return -EIO;
 607		}
 608		copied += retval;
 609		src += retval;
 610		dst += retval;
 611		len -= retval;
 612	}
 613	return copied;
 614}
 615
 616static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 617{
 618	unsigned flags;
 619
 620	if (data & ~(unsigned long)PTRACE_O_MASK)
 621		return -EINVAL;
 622
 623	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
 624		if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
 625		    !IS_ENABLED(CONFIG_SECCOMP))
 626			return -EINVAL;
 627
 628		if (!capable(CAP_SYS_ADMIN))
 629			return -EPERM;
 630
 631		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
 632		    current->ptrace & PT_SUSPEND_SECCOMP)
 633			return -EPERM;
 634	}
 635
 636	/* Avoid intermediate state when all opts are cleared */
 637	flags = child->ptrace;
 638	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 639	flags |= (data << PT_OPT_FLAG_SHIFT);
 640	child->ptrace = flags;
 641
 642	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 643}
 644
 645static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
 646{
 647	unsigned long flags;
 648	int error = -ESRCH;
 649
 650	if (lock_task_sighand(child, &flags)) {
 651		error = -EINVAL;
 652		if (likely(child->last_siginfo != NULL)) {
 653			*info = *child->last_siginfo;
 654			error = 0;
 655		}
 656		unlock_task_sighand(child, &flags);
 657	}
 658	return error;
 659}
 660
 661static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 662{
 663	unsigned long flags;
 664	int error = -ESRCH;
 665
 666	if (lock_task_sighand(child, &flags)) {
 667		error = -EINVAL;
 668		if (likely(child->last_siginfo != NULL)) {
 669			*child->last_siginfo = *info;
 670			error = 0;
 671		}
 672		unlock_task_sighand(child, &flags);
 673	}
 674	return error;
 675}
 676
 677static int ptrace_peek_siginfo(struct task_struct *child,
 678				unsigned long addr,
 679				unsigned long data)
 680{
 681	struct ptrace_peeksiginfo_args arg;
 682	struct sigpending *pending;
 683	struct sigqueue *q;
 684	int ret, i;
 685
 686	ret = copy_from_user(&arg, (void __user *) addr,
 687				sizeof(struct ptrace_peeksiginfo_args));
 688	if (ret)
 689		return -EFAULT;
 690
 691	if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
 692		return -EINVAL; /* unknown flags */
 693
 694	if (arg.nr < 0)
 695		return -EINVAL;
 696
 697	if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
 698		pending = &child->signal->shared_pending;
 699	else
 700		pending = &child->pending;
 701
 702	for (i = 0; i < arg.nr; ) {
 703		siginfo_t info;
 704		s32 off = arg.off + i;
 705
 706		spin_lock_irq(&child->sighand->siglock);
 707		list_for_each_entry(q, &pending->list, list) {
 708			if (!off--) {
 709				copy_siginfo(&info, &q->info);
 710				break;
 711			}
 712		}
 713		spin_unlock_irq(&child->sighand->siglock);
 714
 715		if (off >= 0) /* beyond the end of the list */
 716			break;
 717
 718#ifdef CONFIG_COMPAT
 719		if (unlikely(in_compat_syscall())) {
 720			compat_siginfo_t __user *uinfo = compat_ptr(data);
 721
 722			if (copy_siginfo_to_user32(uinfo, &info) ||
 723			    __put_user(info.si_code, &uinfo->si_code)) {
 724				ret = -EFAULT;
 725				break;
 726			}
 727
 728		} else
 729#endif
 730		{
 731			siginfo_t __user *uinfo = (siginfo_t __user *) data;
 732
 733			if (copy_siginfo_to_user(uinfo, &info) ||
 734			    __put_user(info.si_code, &uinfo->si_code)) {
 735				ret = -EFAULT;
 736				break;
 737			}
 738		}
 739
 740		data += sizeof(siginfo_t);
 741		i++;
 742
 743		if (signal_pending(current))
 744			break;
 745
 746		cond_resched();
 747	}
 748
 749	if (i > 0)
 750		return i;
 751
 752	return ret;
 753}
 754
 755#ifdef PTRACE_SINGLESTEP
 756#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
 757#else
 758#define is_singlestep(request)		0
 759#endif
 760
 761#ifdef PTRACE_SINGLEBLOCK
 762#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
 763#else
 764#define is_singleblock(request)		0
 765#endif
 766
 767#ifdef PTRACE_SYSEMU
 768#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
 769#else
 770#define is_sysemu_singlestep(request)	0
 771#endif
 772
 773static int ptrace_resume(struct task_struct *child, long request,
 774			 unsigned long data)
 775{
 776	bool need_siglock;
 777
 778	if (!valid_signal(data))
 779		return -EIO;
 780
 781	if (request == PTRACE_SYSCALL)
 782		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 783	else
 784		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 785
 786#ifdef TIF_SYSCALL_EMU
 787	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 788		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 789	else
 790		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 791#endif
 792
 793	if (is_singleblock(request)) {
 794		if (unlikely(!arch_has_block_step()))
 795			return -EIO;
 796		user_enable_block_step(child);
 797	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 798		if (unlikely(!arch_has_single_step()))
 799			return -EIO;
 800		user_enable_single_step(child);
 801	} else {
 802		user_disable_single_step(child);
 803	}
 804
 805	/*
 806	 * Change ->exit_code and ->state under siglock to avoid the race
 807	 * with wait_task_stopped() in between; a non-zero ->exit_code will
 808	 * wrongly look like another report from tracee.
 809	 *
 810	 * Note that we need siglock even if ->exit_code == data and/or this
 811	 * status was not reported yet, the new status must not be cleared by
 812	 * wait_task_stopped() after resume.
 813	 *
 814	 * If data == 0 we do not care if wait_task_stopped() reports the old
 815	 * status and clears the code too; this can't race with the tracee, it
 816	 * takes siglock after resume.
 817	 */
 818	need_siglock = data && !thread_group_empty(current);
 819	if (need_siglock)
 820		spin_lock_irq(&child->sighand->siglock);
 821	child->exit_code = data;
 822	wake_up_state(child, __TASK_TRACED);
 823	if (need_siglock)
 824		spin_unlock_irq(&child->sighand->siglock);
 825
 826	return 0;
 827}
 828
 829#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 830
 831static const struct user_regset *
 832find_regset(const struct user_regset_view *view, unsigned int type)
 833{
 834	const struct user_regset *regset;
 835	int n;
 836
 837	for (n = 0; n < view->n; ++n) {
 838		regset = view->regsets + n;
 839		if (regset->core_note_type == type)
 840			return regset;
 841	}
 842
 843	return NULL;
 844}
 845
 846static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 847			 struct iovec *kiov)
 848{
 849	const struct user_regset_view *view = task_user_regset_view(task);
 850	const struct user_regset *regset = find_regset(view, type);
 851	int regset_no;
 852
 853	if (!regset || (kiov->iov_len % regset->size) != 0)
 854		return -EINVAL;
 855
 856	regset_no = regset - view->regsets;
 857	kiov->iov_len = min(kiov->iov_len,
 858			    (__kernel_size_t) (regset->n * regset->size));
 859
 860	if (req == PTRACE_GETREGSET)
 861		return copy_regset_to_user(task, view, regset_no, 0,
 862					   kiov->iov_len, kiov->iov_base);
 863	else
 864		return copy_regset_from_user(task, view, regset_no, 0,
 865					     kiov->iov_len, kiov->iov_base);
 866}
 867
 868/*
 869 * This is declared in linux/regset.h and defined in machine-dependent
 870 * code.  We put the export here, near the primary machine-neutral use,
 871 * to ensure no machine forgets it.
 872 */
 873EXPORT_SYMBOL_GPL(task_user_regset_view);
 874#endif
 875
 876int ptrace_request(struct task_struct *child, long request,
 877		   unsigned long addr, unsigned long data)
 878{
 879	bool seized = child->ptrace & PT_SEIZED;
 880	int ret = -EIO;
 881	siginfo_t siginfo, *si;
 882	void __user *datavp = (void __user *) data;
 883	unsigned long __user *datalp = datavp;
 884	unsigned long flags;
 885
 886	switch (request) {
 887	case PTRACE_PEEKTEXT:
 888	case PTRACE_PEEKDATA:
 889		return generic_ptrace_peekdata(child, addr, data);
 890	case PTRACE_POKETEXT:
 891	case PTRACE_POKEDATA:
 892		return generic_ptrace_pokedata(child, addr, data);
 893
 894#ifdef PTRACE_OLDSETOPTIONS
 895	case PTRACE_OLDSETOPTIONS:
 896#endif
 897	case PTRACE_SETOPTIONS:
 898		ret = ptrace_setoptions(child, data);
 899		break;
 900	case PTRACE_GETEVENTMSG:
 901		ret = put_user(child->ptrace_message, datalp);
 902		break;
 903
 904	case PTRACE_PEEKSIGINFO:
 905		ret = ptrace_peek_siginfo(child, addr, data);
 906		break;
 907
 908	case PTRACE_GETSIGINFO:
 909		ret = ptrace_getsiginfo(child, &siginfo);
 910		if (!ret)
 911			ret = copy_siginfo_to_user(datavp, &siginfo);
 912		break;
 913
 914	case PTRACE_SETSIGINFO:
 915		if (copy_from_user(&siginfo, datavp, sizeof siginfo))
 916			ret = -EFAULT;
 917		else
 918			ret = ptrace_setsiginfo(child, &siginfo);
 919		break;
 920
 921	case PTRACE_GETSIGMASK:
 922		if (addr != sizeof(sigset_t)) {
 923			ret = -EINVAL;
 924			break;
 925		}
 926
 927		if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
 928			ret = -EFAULT;
 929		else
 930			ret = 0;
 931
 932		break;
 933
 934	case PTRACE_SETSIGMASK: {
 935		sigset_t new_set;
 936
 937		if (addr != sizeof(sigset_t)) {
 938			ret = -EINVAL;
 939			break;
 940		}
 941
 942		if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
 943			ret = -EFAULT;
 944			break;
 945		}
 946
 947		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
 948
 949		/*
 950		 * Every thread does recalc_sigpending() after resume, so
 951		 * retarget_shared_pending() and recalc_sigpending() are not
 952		 * called here.
 953		 */
 954		spin_lock_irq(&child->sighand->siglock);
 955		child->blocked = new_set;
 956		spin_unlock_irq(&child->sighand->siglock);
 957
 958		ret = 0;
 959		break;
 960	}
 961
 962	case PTRACE_INTERRUPT:
 963		/*
 964		 * Stop tracee without any side-effect on signal or job
 965		 * control.  At least one trap is guaranteed to happen
 966		 * after this request.  If @child is already trapped, the
 967		 * current trap is not disturbed and another trap will
 968		 * happen after the current trap is ended with PTRACE_CONT.
 969		 *
 970		 * The actual trap might not be PTRACE_EVENT_STOP trap but
 971		 * the pending condition is cleared regardless.
 972		 */
 973		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 974			break;
 975
 976		/*
 977		 * INTERRUPT doesn't disturb existing trap sans one
 978		 * exception.  If ptracer issued LISTEN for the current
 979		 * STOP, this INTERRUPT should clear LISTEN and re-trap
 980		 * tracee into STOP.
 981		 */
 982		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
 983			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 984
 985		unlock_task_sighand(child, &flags);
 986		ret = 0;
 987		break;
 988
 989	case PTRACE_LISTEN:
 990		/*
 991		 * Listen for events.  Tracee must be in STOP.  It's not
 992		 * resumed per-se but is not considered to be in TRACED by
 993		 * wait(2) or ptrace(2).  If an async event (e.g. group
 994		 * stop state change) happens, tracee will enter STOP trap
 995		 * again.  Alternatively, ptracer can issue INTERRUPT to
 996		 * finish listening and re-trap tracee into STOP.
 997		 */
 998		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 999			break;
1000
1001		si = child->last_siginfo;
1002		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1003			child->jobctl |= JOBCTL_LISTENING;
1004			/*
1005			 * If NOTIFY is set, it means event happened between
1006			 * start of this trap and now.  Trigger re-trap.
1007			 */
1008			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1009				ptrace_signal_wake_up(child, true);
1010			ret = 0;
1011		}
1012		unlock_task_sighand(child, &flags);
1013		break;
1014
1015	case PTRACE_DETACH:	 /* detach a process that was attached. */
1016		ret = ptrace_detach(child, data);
1017		break;
1018
1019#ifdef CONFIG_BINFMT_ELF_FDPIC
1020	case PTRACE_GETFDPIC: {
1021		struct mm_struct *mm = get_task_mm(child);
1022		unsigned long tmp = 0;
1023
1024		ret = -ESRCH;
1025		if (!mm)
1026			break;
1027
1028		switch (addr) {
1029		case PTRACE_GETFDPIC_EXEC:
1030			tmp = mm->context.exec_fdpic_loadmap;
1031			break;
1032		case PTRACE_GETFDPIC_INTERP:
1033			tmp = mm->context.interp_fdpic_loadmap;
1034			break;
1035		default:
1036			break;
1037		}
1038		mmput(mm);
1039
1040		ret = put_user(tmp, datalp);
1041		break;
1042	}
1043#endif
1044
1045#ifdef PTRACE_SINGLESTEP
1046	case PTRACE_SINGLESTEP:
1047#endif
1048#ifdef PTRACE_SINGLEBLOCK
1049	case PTRACE_SINGLEBLOCK:
1050#endif
1051#ifdef PTRACE_SYSEMU
1052	case PTRACE_SYSEMU:
1053	case PTRACE_SYSEMU_SINGLESTEP:
1054#endif
1055	case PTRACE_SYSCALL:
1056	case PTRACE_CONT:
1057		return ptrace_resume(child, request, data);
1058
1059	case PTRACE_KILL:
1060		if (child->exit_state)	/* already dead */
1061			return 0;
1062		return ptrace_resume(child, request, SIGKILL);
1063
1064#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1065	case PTRACE_GETREGSET:
1066	case PTRACE_SETREGSET: {
 
1067		struct iovec kiov;
1068		struct iovec __user *uiov = datavp;
1069
1070		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1071			return -EFAULT;
1072
1073		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1074		    __get_user(kiov.iov_len, &uiov->iov_len))
1075			return -EFAULT;
1076
1077		ret = ptrace_regset(child, request, addr, &kiov);
1078		if (!ret)
1079			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1080		break;
1081	}
1082#endif
1083
1084	case PTRACE_SECCOMP_GET_FILTER:
1085		ret = seccomp_get_filter(child, addr, datavp);
1086		break;
1087
1088	default:
1089		break;
1090	}
1091
1092	return ret;
1093}
1094
1095static struct task_struct *ptrace_get_task_struct(pid_t pid)
1096{
1097	struct task_struct *child;
1098
1099	rcu_read_lock();
1100	child = find_task_by_vpid(pid);
1101	if (child)
1102		get_task_struct(child);
1103	rcu_read_unlock();
1104
1105	if (!child)
1106		return ERR_PTR(-ESRCH);
1107	return child;
1108}
1109
1110#ifndef arch_ptrace_attach
1111#define arch_ptrace_attach(child)	do { } while (0)
1112#endif
1113
1114SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1115		unsigned long, data)
1116{
1117	struct task_struct *child;
1118	long ret;
1119
1120	if (request == PTRACE_TRACEME) {
1121		ret = ptrace_traceme();
1122		if (!ret)
1123			arch_ptrace_attach(current);
1124		goto out;
1125	}
1126
1127	child = ptrace_get_task_struct(pid);
1128	if (IS_ERR(child)) {
1129		ret = PTR_ERR(child);
1130		goto out;
1131	}
1132
1133	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1134		ret = ptrace_attach(child, request, addr, data);
1135		/*
1136		 * Some architectures need to do book-keeping after
1137		 * a ptrace attach.
1138		 */
1139		if (!ret)
1140			arch_ptrace_attach(child);
1141		goto out_put_task_struct;
1142	}
1143
1144	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1145				  request == PTRACE_INTERRUPT);
1146	if (ret < 0)
1147		goto out_put_task_struct;
1148
1149	ret = arch_ptrace(child, request, addr, data);
1150	if (ret || request != PTRACE_DETACH)
1151		ptrace_unfreeze_traced(child);
1152
1153 out_put_task_struct:
1154	put_task_struct(child);
1155 out:
1156	return ret;
1157}
1158
1159int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1160			    unsigned long data)
1161{
1162	unsigned long tmp;
1163	int copied;
1164
1165	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1166	if (copied != sizeof(tmp))
1167		return -EIO;
1168	return put_user(tmp, (unsigned long __user *)data);
1169}
1170
1171int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1172			    unsigned long data)
1173{
1174	int copied;
1175
1176	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1177			FOLL_FORCE | FOLL_WRITE);
1178	return (copied == sizeof(data)) ? 0 : -EIO;
1179}
1180
1181#if defined CONFIG_COMPAT
 
1182
1183int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1184			  compat_ulong_t addr, compat_ulong_t data)
1185{
1186	compat_ulong_t __user *datap = compat_ptr(data);
1187	compat_ulong_t word;
1188	siginfo_t siginfo;
1189	int ret;
1190
1191	switch (request) {
1192	case PTRACE_PEEKTEXT:
1193	case PTRACE_PEEKDATA:
1194		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1195				FOLL_FORCE);
1196		if (ret != sizeof(word))
1197			ret = -EIO;
1198		else
1199			ret = put_user(word, datap);
1200		break;
1201
1202	case PTRACE_POKETEXT:
1203	case PTRACE_POKEDATA:
1204		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1205				FOLL_FORCE | FOLL_WRITE);
1206		ret = (ret != sizeof(data) ? -EIO : 0);
1207		break;
1208
1209	case PTRACE_GETEVENTMSG:
1210		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1211		break;
1212
1213	case PTRACE_GETSIGINFO:
1214		ret = ptrace_getsiginfo(child, &siginfo);
1215		if (!ret)
1216			ret = copy_siginfo_to_user32(
1217				(struct compat_siginfo __user *) datap,
1218				&siginfo);
1219		break;
1220
1221	case PTRACE_SETSIGINFO:
1222		memset(&siginfo, 0, sizeof siginfo);
1223		if (copy_siginfo_from_user32(
1224			    &siginfo, (struct compat_siginfo __user *) datap))
1225			ret = -EFAULT;
1226		else
1227			ret = ptrace_setsiginfo(child, &siginfo);
1228		break;
1229#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1230	case PTRACE_GETREGSET:
1231	case PTRACE_SETREGSET:
1232	{
1233		struct iovec kiov;
1234		struct compat_iovec __user *uiov =
1235			(struct compat_iovec __user *) datap;
1236		compat_uptr_t ptr;
1237		compat_size_t len;
1238
1239		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1240			return -EFAULT;
1241
1242		if (__get_user(ptr, &uiov->iov_base) ||
1243		    __get_user(len, &uiov->iov_len))
1244			return -EFAULT;
1245
1246		kiov.iov_base = compat_ptr(ptr);
1247		kiov.iov_len = len;
1248
1249		ret = ptrace_regset(child, request, addr, &kiov);
1250		if (!ret)
1251			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1252		break;
1253	}
1254#endif
1255
1256	default:
1257		ret = ptrace_request(child, request, addr, data);
1258	}
1259
1260	return ret;
1261}
1262
1263COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1264		       compat_long_t, addr, compat_long_t, data)
1265{
1266	struct task_struct *child;
1267	long ret;
1268
1269	if (request == PTRACE_TRACEME) {
1270		ret = ptrace_traceme();
1271		goto out;
1272	}
1273
1274	child = ptrace_get_task_struct(pid);
1275	if (IS_ERR(child)) {
1276		ret = PTR_ERR(child);
1277		goto out;
1278	}
1279
1280	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1281		ret = ptrace_attach(child, request, addr, data);
1282		/*
1283		 * Some architectures need to do book-keeping after
1284		 * a ptrace attach.
1285		 */
1286		if (!ret)
1287			arch_ptrace_attach(child);
1288		goto out_put_task_struct;
1289	}
1290
1291	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1292				  request == PTRACE_INTERRUPT);
1293	if (!ret) {
1294		ret = compat_arch_ptrace(child, request, addr, data);
1295		if (ret || request != PTRACE_DETACH)
1296			ptrace_unfreeze_traced(child);
1297	}
1298
1299 out_put_task_struct:
1300	put_task_struct(child);
1301 out:
1302	return ret;
1303}
1304#endif	/* CONFIG_COMPAT */
v3.1
   1/*
   2 * linux/kernel/ptrace.c
   3 *
   4 * (C) Copyright 1999 Linus Torvalds
   5 *
   6 * Common interfaces for "ptrace()" which we do not want
   7 * to continually duplicate across every architecture.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/sched.h>
  13#include <linux/errno.h>
  14#include <linux/mm.h>
  15#include <linux/highmem.h>
  16#include <linux/pagemap.h>
  17#include <linux/ptrace.h>
  18#include <linux/security.h>
  19#include <linux/signal.h>
 
  20#include <linux/audit.h>
  21#include <linux/pid_namespace.h>
  22#include <linux/syscalls.h>
  23#include <linux/uaccess.h>
  24#include <linux/regset.h>
  25#include <linux/hw_breakpoint.h>
  26#include <linux/cn_proc.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  27
 
 
  28
  29static int ptrace_trapping_sleep_fn(void *flags)
  30{
  31	schedule();
  32	return 0;
  33}
  34
 
  35/*
  36 * ptrace a task: make the debugger its new parent and
  37 * move it to the ptrace list.
  38 *
  39 * Must be called with the tasklist lock write-held.
  40 */
  41void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  42{
  43	BUG_ON(!list_empty(&child->ptrace_entry));
  44	list_add(&child->ptrace_entry, &new_parent->ptraced);
  45	child->parent = new_parent;
 
 
 
  46}
  47
  48/**
  49 * __ptrace_unlink - unlink ptracee and restore its execution state
  50 * @child: ptracee to be unlinked
  51 *
  52 * Remove @child from the ptrace list, move it back to the original parent,
  53 * and restore the execution state so that it conforms to the group stop
  54 * state.
  55 *
  56 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  57 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  58 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  59 * If the ptracer is exiting, the ptracee can be in any state.
  60 *
  61 * After detach, the ptracee should be in a state which conforms to the
  62 * group stop.  If the group is stopped or in the process of stopping, the
  63 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
  64 * up from TASK_TRACED.
  65 *
  66 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
  67 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
  68 * to but in the opposite direction of what happens while attaching to a
  69 * stopped task.  However, in this direction, the intermediate RUNNING
  70 * state is not hidden even from the current ptracer and if it immediately
  71 * re-attaches and performs a WNOHANG wait(2), it may fail.
  72 *
  73 * CONTEXT:
  74 * write_lock_irq(tasklist_lock)
  75 */
  76void __ptrace_unlink(struct task_struct *child)
  77{
 
  78	BUG_ON(!child->ptrace);
  79
  80	child->ptrace = 0;
 
  81	child->parent = child->real_parent;
  82	list_del_init(&child->ptrace_entry);
 
 
 
  83
  84	spin_lock(&child->sighand->siglock);
  85
  86	/*
  87	 * Clear all pending traps and TRAPPING.  TRAPPING should be
  88	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
  89	 */
  90	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
  91	task_clear_jobctl_trapping(child);
  92
  93	/*
  94	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
  95	 * @child isn't dead.
  96	 */
  97	if (!(child->flags & PF_EXITING) &&
  98	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
  99	     child->signal->group_stop_count))
 100		child->jobctl |= JOBCTL_STOP_PENDING;
 101
 
 
 
 
 
 
 
 
 
 
 
 102	/*
 103	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 104	 * @child in the butt.  Note that @resume should be used iff @child
 105	 * is in TASK_TRACED; otherwise, we might unduly disrupt
 106	 * TASK_KILLABLE sleeps.
 107	 */
 108	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 109		signal_wake_up(child, task_is_traced(child));
 110
 111	spin_unlock(&child->sighand->siglock);
 112}
 113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114/**
 115 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 116 * @child: ptracee to check for
 117 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 118 *
 119 * Check whether @child is being ptraced by %current and ready for further
 120 * ptrace operations.  If @ignore_state is %false, @child also should be in
 121 * %TASK_TRACED state and on return the child is guaranteed to be traced
 122 * and not executing.  If @ignore_state is %true, @child can be in any
 123 * state.
 124 *
 125 * CONTEXT:
 126 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 127 *
 128 * RETURNS:
 129 * 0 on success, -ESRCH if %child is not ready.
 130 */
 131int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 132{
 133	int ret = -ESRCH;
 134
 135	/*
 136	 * We take the read lock around doing both checks to close a
 137	 * possible race where someone else was tracing our child and
 138	 * detached between these two checks.  After this locked check,
 139	 * we are sure that this is our traced child and that can only
 140	 * be changed by us so it's not changing right after this.
 141	 */
 142	read_lock(&tasklist_lock);
 143	if ((child->ptrace & PT_PTRACED) && child->parent == current) {
 
 144		/*
 145		 * child->sighand can't be NULL, release_task()
 146		 * does ptrace_unlink() before __exit_signal().
 147		 */
 148		spin_lock_irq(&child->sighand->siglock);
 149		WARN_ON_ONCE(task_is_stopped(child));
 150		if (ignore_state || (task_is_traced(child) &&
 151				     !(child->jobctl & JOBCTL_LISTENING)))
 152			ret = 0;
 153		spin_unlock_irq(&child->sighand->siglock);
 154	}
 155	read_unlock(&tasklist_lock);
 156
 157	if (!ret && !ignore_state)
 158		ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
 
 
 
 
 
 
 
 
 
 159
 160	/* All systems go.. */
 161	return ret;
 162}
 163
 164int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 
 
 
 
 
 
 
 
 
 165{
 166	const struct cred *cred = current_cred(), *tcred;
 
 
 
 
 
 
 
 
 167
 168	/* May we inspect the given task?
 169	 * This check is used both for attaching with ptrace
 170	 * and for allowing access to sensitive information in /proc.
 171	 *
 172	 * ptrace_attach denies several cases that /proc allows
 173	 * because setting up the necessary parent/child relationship
 174	 * or halting the specified task is impossible.
 175	 */
 176	int dumpable = 0;
 177	/* Don't let security modules deny introspection */
 178	if (task == current)
 179		return 0;
 180	rcu_read_lock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 181	tcred = __task_cred(task);
 182	if (cred->user->user_ns == tcred->user->user_ns &&
 183	    (cred->uid == tcred->euid &&
 184	     cred->uid == tcred->suid &&
 185	     cred->uid == tcred->uid  &&
 186	     cred->gid == tcred->egid &&
 187	     cred->gid == tcred->sgid &&
 188	     cred->gid == tcred->gid))
 189		goto ok;
 190	if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
 191		goto ok;
 192	rcu_read_unlock();
 193	return -EPERM;
 194ok:
 195	rcu_read_unlock();
 196	smp_rmb();
 197	if (task->mm)
 198		dumpable = get_dumpable(task->mm);
 199	if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
 200		return -EPERM;
 201
 202	return security_ptrace_access_check(task, mode);
 203}
 204
 205bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 206{
 207	int err;
 208	task_lock(task);
 209	err = __ptrace_may_access(task, mode);
 210	task_unlock(task);
 211	return !err;
 212}
 213
 214static int ptrace_attach(struct task_struct *task, long request,
 
 215			 unsigned long flags)
 216{
 217	bool seize = (request == PTRACE_SEIZE);
 218	int retval;
 219
 220	/*
 221	 * SEIZE will enable new ptrace behaviors which will be implemented
 222	 * gradually.  SEIZE_DEVEL is used to prevent applications
 223	 * expecting full SEIZE behaviors trapping on kernel commits which
 224	 * are still in the process of implementing them.
 225	 *
 226	 * Only test programs for new ptrace behaviors being implemented
 227	 * should set SEIZE_DEVEL.  If unset, SEIZE will fail with -EIO.
 228	 *
 229	 * Once SEIZE behaviors are completely implemented, this flag and
 230	 * the following test will be removed.
 231	 */
 232	retval = -EIO;
 233	if (seize && !(flags & PTRACE_SEIZE_DEVEL))
 234		goto out;
 
 
 
 
 
 
 
 235
 236	audit_ptrace(task);
 237
 238	retval = -EPERM;
 239	if (unlikely(task->flags & PF_KTHREAD))
 240		goto out;
 241	if (same_thread_group(task, current))
 242		goto out;
 243
 244	/*
 245	 * Protect exec's credential calculations against our interference;
 246	 * interference; SUID, SGID and LSM creds get determined differently
 247	 * under ptrace.
 248	 */
 249	retval = -ERESTARTNOINTR;
 250	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 251		goto out;
 252
 253	task_lock(task);
 254	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
 255	task_unlock(task);
 256	if (retval)
 257		goto unlock_creds;
 258
 259	write_lock_irq(&tasklist_lock);
 260	retval = -EPERM;
 261	if (unlikely(task->exit_state))
 262		goto unlock_tasklist;
 263	if (task->ptrace)
 264		goto unlock_tasklist;
 265
 266	task->ptrace = PT_PTRACED;
 267	if (seize)
 268		task->ptrace |= PT_SEIZED;
 269	if (task_ns_capable(task, CAP_SYS_PTRACE))
 270		task->ptrace |= PT_PTRACE_CAP;
 271
 272	__ptrace_link(task, current);
 273
 274	/* SEIZE doesn't trap tracee on attach */
 275	if (!seize)
 276		send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 277
 278	spin_lock(&task->sighand->siglock);
 279
 280	/*
 281	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 282	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 283	 * will be cleared if the child completes the transition or any
 284	 * event which clears the group stop states happens.  We'll wait
 285	 * for the transition to complete before returning from this
 286	 * function.
 287	 *
 288	 * This hides STOPPED -> RUNNING -> TRACED transition from the
 289	 * attaching thread but a different thread in the same group can
 290	 * still observe the transient RUNNING state.  IOW, if another
 291	 * thread's WNOHANG wait(2) on the stopped tracee races against
 292	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
 293	 *
 294	 * The following task_is_stopped() test is safe as both transitions
 295	 * in and out of STOPPED are protected by siglock.
 296	 */
 297	if (task_is_stopped(task) &&
 298	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 299		signal_wake_up(task, 1);
 300
 301	spin_unlock(&task->sighand->siglock);
 302
 303	retval = 0;
 304unlock_tasklist:
 305	write_unlock_irq(&tasklist_lock);
 306unlock_creds:
 307	mutex_unlock(&task->signal->cred_guard_mutex);
 308out:
 309	if (!retval) {
 310		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
 311			    ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
 
 
 
 
 
 
 312		proc_ptrace_connector(task, PTRACE_ATTACH);
 313	}
 314
 315	return retval;
 316}
 317
 318/**
 319 * ptrace_traceme  --  helper for PTRACE_TRACEME
 320 *
 321 * Performs checks and sets PT_PTRACED.
 322 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 323 */
 324static int ptrace_traceme(void)
 325{
 326	int ret = -EPERM;
 327
 328	write_lock_irq(&tasklist_lock);
 329	/* Are we already being traced? */
 330	if (!current->ptrace) {
 331		ret = security_ptrace_traceme(current->parent);
 332		/*
 333		 * Check PF_EXITING to ensure ->real_parent has not passed
 334		 * exit_ptrace(). Otherwise we don't report the error but
 335		 * pretend ->real_parent untraces us right after return.
 336		 */
 337		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 338			current->ptrace = PT_PTRACED;
 339			__ptrace_link(current, current->real_parent);
 340		}
 341	}
 342	write_unlock_irq(&tasklist_lock);
 343
 344	return ret;
 345}
 346
 347/*
 348 * Called with irqs disabled, returns true if childs should reap themselves.
 349 */
 350static int ignoring_children(struct sighand_struct *sigh)
 351{
 352	int ret;
 353	spin_lock(&sigh->siglock);
 354	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 355	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 356	spin_unlock(&sigh->siglock);
 357	return ret;
 358}
 359
 360/*
 361 * Called with tasklist_lock held for writing.
 362 * Unlink a traced task, and clean it up if it was a traced zombie.
 363 * Return true if it needs to be reaped with release_task().
 364 * (We can't call release_task() here because we already hold tasklist_lock.)
 365 *
 366 * If it's a zombie, our attachedness prevented normal parent notification
 367 * or self-reaping.  Do notification now if it would have happened earlier.
 368 * If it should reap itself, return true.
 369 *
 370 * If it's our own child, there is no notification to do. But if our normal
 371 * children self-reap, then this child was prevented by ptrace and we must
 372 * reap it now, in that case we must also wake up sub-threads sleeping in
 373 * do_wait().
 374 */
 375static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 376{
 377	bool dead;
 378
 379	__ptrace_unlink(p);
 380
 381	if (p->exit_state != EXIT_ZOMBIE)
 382		return false;
 383
 384	dead = !thread_group_leader(p);
 385
 386	if (!dead && thread_group_empty(p)) {
 387		if (!same_thread_group(p->real_parent, tracer))
 388			dead = do_notify_parent(p, p->exit_signal);
 389		else if (ignoring_children(tracer->sighand)) {
 390			__wake_up_parent(p, tracer);
 391			dead = true;
 392		}
 393	}
 394	/* Mark it as in the process of being reaped. */
 395	if (dead)
 396		p->exit_state = EXIT_DEAD;
 397	return dead;
 398}
 399
 400static int ptrace_detach(struct task_struct *child, unsigned int data)
 401{
 402	bool dead = false;
 403
 404	if (!valid_signal(data))
 405		return -EIO;
 406
 407	/* Architecture-specific hardware disable .. */
 408	ptrace_disable(child);
 409	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 410
 411	write_lock_irq(&tasklist_lock);
 412	/*
 413	 * This child can be already killed. Make sure de_thread() or
 414	 * our sub-thread doing do_wait() didn't do release_task() yet.
 415	 */
 416	if (child->ptrace) {
 417		child->exit_code = data;
 418		dead = __ptrace_detach(current, child);
 419	}
 
 
 
 420	write_unlock_irq(&tasklist_lock);
 421
 422	proc_ptrace_connector(child, PTRACE_DETACH);
 423	if (unlikely(dead))
 424		release_task(child);
 425
 426	return 0;
 427}
 428
 429/*
 430 * Detach all tasks we were using ptrace on. Called with tasklist held
 431 * for writing, and returns with it held too. But note it can release
 432 * and reacquire the lock.
 433 */
 434void exit_ptrace(struct task_struct *tracer)
 435	__releases(&tasklist_lock)
 436	__acquires(&tasklist_lock)
 437{
 438	struct task_struct *p, *n;
 439	LIST_HEAD(ptrace_dead);
 440
 441	if (likely(list_empty(&tracer->ptraced)))
 442		return;
 
 443
 444	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 445		if (__ptrace_detach(tracer, p))
 446			list_add(&p->ptrace_entry, &ptrace_dead);
 447	}
 448
 449	write_unlock_irq(&tasklist_lock);
 450	BUG_ON(!list_empty(&tracer->ptraced));
 451
 452	list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
 453		list_del_init(&p->ptrace_entry);
 454		release_task(p);
 455	}
 456
 457	write_lock_irq(&tasklist_lock);
 458}
 459
 460int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 461{
 462	int copied = 0;
 463
 464	while (len > 0) {
 465		char buf[128];
 466		int this_len, retval;
 467
 468		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 469		retval = access_process_vm(tsk, src, buf, this_len, 0);
 
 470		if (!retval) {
 471			if (copied)
 472				break;
 473			return -EIO;
 474		}
 475		if (copy_to_user(dst, buf, retval))
 476			return -EFAULT;
 477		copied += retval;
 478		src += retval;
 479		dst += retval;
 480		len -= retval;
 481	}
 482	return copied;
 483}
 484
 485int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 486{
 487	int copied = 0;
 488
 489	while (len > 0) {
 490		char buf[128];
 491		int this_len, retval;
 492
 493		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 494		if (copy_from_user(buf, src, this_len))
 495			return -EFAULT;
 496		retval = access_process_vm(tsk, dst, buf, this_len, 1);
 
 497		if (!retval) {
 498			if (copied)
 499				break;
 500			return -EIO;
 501		}
 502		copied += retval;
 503		src += retval;
 504		dst += retval;
 505		len -= retval;
 506	}
 507	return copied;
 508}
 509
 510static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 511{
 512	child->ptrace &= ~PT_TRACE_MASK;
 513
 514	if (data & PTRACE_O_TRACESYSGOOD)
 515		child->ptrace |= PT_TRACESYSGOOD;
 516
 517	if (data & PTRACE_O_TRACEFORK)
 518		child->ptrace |= PT_TRACE_FORK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519
 520	if (data & PTRACE_O_TRACEVFORK)
 521		child->ptrace |= PT_TRACE_VFORK;
 522
 523	if (data & PTRACE_O_TRACECLONE)
 524		child->ptrace |= PT_TRACE_CLONE;
 525
 526	if (data & PTRACE_O_TRACEEXEC)
 527		child->ptrace |= PT_TRACE_EXEC;
 528
 529	if (data & PTRACE_O_TRACEVFORKDONE)
 530		child->ptrace |= PT_TRACE_VFORK_DONE;
 531
 532	if (data & PTRACE_O_TRACEEXIT)
 533		child->ptrace |= PT_TRACE_EXIT;
 534
 535	return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
 536}
 537
 538static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
 539{
 540	unsigned long flags;
 541	int error = -ESRCH;
 542
 543	if (lock_task_sighand(child, &flags)) {
 544		error = -EINVAL;
 545		if (likely(child->last_siginfo != NULL)) {
 546			*info = *child->last_siginfo;
 547			error = 0;
 548		}
 549		unlock_task_sighand(child, &flags);
 550	}
 551	return error;
 552}
 553
 554static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 555{
 556	unsigned long flags;
 557	int error = -ESRCH;
 558
 559	if (lock_task_sighand(child, &flags)) {
 560		error = -EINVAL;
 561		if (likely(child->last_siginfo != NULL)) {
 562			*child->last_siginfo = *info;
 563			error = 0;
 564		}
 565		unlock_task_sighand(child, &flags);
 566	}
 567	return error;
 568}
 569
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 570
 571#ifdef PTRACE_SINGLESTEP
 572#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
 573#else
 574#define is_singlestep(request)		0
 575#endif
 576
 577#ifdef PTRACE_SINGLEBLOCK
 578#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
 579#else
 580#define is_singleblock(request)		0
 581#endif
 582
 583#ifdef PTRACE_SYSEMU
 584#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
 585#else
 586#define is_sysemu_singlestep(request)	0
 587#endif
 588
 589static int ptrace_resume(struct task_struct *child, long request,
 590			 unsigned long data)
 591{
 
 
 592	if (!valid_signal(data))
 593		return -EIO;
 594
 595	if (request == PTRACE_SYSCALL)
 596		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 597	else
 598		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 599
 600#ifdef TIF_SYSCALL_EMU
 601	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 602		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 603	else
 604		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 605#endif
 606
 607	if (is_singleblock(request)) {
 608		if (unlikely(!arch_has_block_step()))
 609			return -EIO;
 610		user_enable_block_step(child);
 611	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 612		if (unlikely(!arch_has_single_step()))
 613			return -EIO;
 614		user_enable_single_step(child);
 615	} else {
 616		user_disable_single_step(child);
 617	}
 618
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 619	child->exit_code = data;
 620	wake_up_state(child, __TASK_TRACED);
 
 
 621
 622	return 0;
 623}
 624
 625#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 626
 627static const struct user_regset *
 628find_regset(const struct user_regset_view *view, unsigned int type)
 629{
 630	const struct user_regset *regset;
 631	int n;
 632
 633	for (n = 0; n < view->n; ++n) {
 634		regset = view->regsets + n;
 635		if (regset->core_note_type == type)
 636			return regset;
 637	}
 638
 639	return NULL;
 640}
 641
 642static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 643			 struct iovec *kiov)
 644{
 645	const struct user_regset_view *view = task_user_regset_view(task);
 646	const struct user_regset *regset = find_regset(view, type);
 647	int regset_no;
 648
 649	if (!regset || (kiov->iov_len % regset->size) != 0)
 650		return -EINVAL;
 651
 652	regset_no = regset - view->regsets;
 653	kiov->iov_len = min(kiov->iov_len,
 654			    (__kernel_size_t) (regset->n * regset->size));
 655
 656	if (req == PTRACE_GETREGSET)
 657		return copy_regset_to_user(task, view, regset_no, 0,
 658					   kiov->iov_len, kiov->iov_base);
 659	else
 660		return copy_regset_from_user(task, view, regset_no, 0,
 661					     kiov->iov_len, kiov->iov_base);
 662}
 663
 
 
 
 
 
 
 664#endif
 665
 666int ptrace_request(struct task_struct *child, long request,
 667		   unsigned long addr, unsigned long data)
 668{
 669	bool seized = child->ptrace & PT_SEIZED;
 670	int ret = -EIO;
 671	siginfo_t siginfo, *si;
 672	void __user *datavp = (void __user *) data;
 673	unsigned long __user *datalp = datavp;
 674	unsigned long flags;
 675
 676	switch (request) {
 677	case PTRACE_PEEKTEXT:
 678	case PTRACE_PEEKDATA:
 679		return generic_ptrace_peekdata(child, addr, data);
 680	case PTRACE_POKETEXT:
 681	case PTRACE_POKEDATA:
 682		return generic_ptrace_pokedata(child, addr, data);
 683
 684#ifdef PTRACE_OLDSETOPTIONS
 685	case PTRACE_OLDSETOPTIONS:
 686#endif
 687	case PTRACE_SETOPTIONS:
 688		ret = ptrace_setoptions(child, data);
 689		break;
 690	case PTRACE_GETEVENTMSG:
 691		ret = put_user(child->ptrace_message, datalp);
 692		break;
 693
 
 
 
 
 694	case PTRACE_GETSIGINFO:
 695		ret = ptrace_getsiginfo(child, &siginfo);
 696		if (!ret)
 697			ret = copy_siginfo_to_user(datavp, &siginfo);
 698		break;
 699
 700	case PTRACE_SETSIGINFO:
 701		if (copy_from_user(&siginfo, datavp, sizeof siginfo))
 702			ret = -EFAULT;
 703		else
 704			ret = ptrace_setsiginfo(child, &siginfo);
 705		break;
 706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707	case PTRACE_INTERRUPT:
 708		/*
 709		 * Stop tracee without any side-effect on signal or job
 710		 * control.  At least one trap is guaranteed to happen
 711		 * after this request.  If @child is already trapped, the
 712		 * current trap is not disturbed and another trap will
 713		 * happen after the current trap is ended with PTRACE_CONT.
 714		 *
 715		 * The actual trap might not be PTRACE_EVENT_STOP trap but
 716		 * the pending condition is cleared regardless.
 717		 */
 718		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 719			break;
 720
 721		/*
 722		 * INTERRUPT doesn't disturb existing trap sans one
 723		 * exception.  If ptracer issued LISTEN for the current
 724		 * STOP, this INTERRUPT should clear LISTEN and re-trap
 725		 * tracee into STOP.
 726		 */
 727		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
 728			signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 729
 730		unlock_task_sighand(child, &flags);
 731		ret = 0;
 732		break;
 733
 734	case PTRACE_LISTEN:
 735		/*
 736		 * Listen for events.  Tracee must be in STOP.  It's not
 737		 * resumed per-se but is not considered to be in TRACED by
 738		 * wait(2) or ptrace(2).  If an async event (e.g. group
 739		 * stop state change) happens, tracee will enter STOP trap
 740		 * again.  Alternatively, ptracer can issue INTERRUPT to
 741		 * finish listening and re-trap tracee into STOP.
 742		 */
 743		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 744			break;
 745
 746		si = child->last_siginfo;
 747		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
 748			child->jobctl |= JOBCTL_LISTENING;
 749			/*
 750			 * If NOTIFY is set, it means event happened between
 751			 * start of this trap and now.  Trigger re-trap.
 752			 */
 753			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
 754				signal_wake_up(child, true);
 755			ret = 0;
 756		}
 757		unlock_task_sighand(child, &flags);
 758		break;
 759
 760	case PTRACE_DETACH:	 /* detach a process that was attached. */
 761		ret = ptrace_detach(child, data);
 762		break;
 763
 764#ifdef CONFIG_BINFMT_ELF_FDPIC
 765	case PTRACE_GETFDPIC: {
 766		struct mm_struct *mm = get_task_mm(child);
 767		unsigned long tmp = 0;
 768
 769		ret = -ESRCH;
 770		if (!mm)
 771			break;
 772
 773		switch (addr) {
 774		case PTRACE_GETFDPIC_EXEC:
 775			tmp = mm->context.exec_fdpic_loadmap;
 776			break;
 777		case PTRACE_GETFDPIC_INTERP:
 778			tmp = mm->context.interp_fdpic_loadmap;
 779			break;
 780		default:
 781			break;
 782		}
 783		mmput(mm);
 784
 785		ret = put_user(tmp, datalp);
 786		break;
 787	}
 788#endif
 789
 790#ifdef PTRACE_SINGLESTEP
 791	case PTRACE_SINGLESTEP:
 792#endif
 793#ifdef PTRACE_SINGLEBLOCK
 794	case PTRACE_SINGLEBLOCK:
 795#endif
 796#ifdef PTRACE_SYSEMU
 797	case PTRACE_SYSEMU:
 798	case PTRACE_SYSEMU_SINGLESTEP:
 799#endif
 800	case PTRACE_SYSCALL:
 801	case PTRACE_CONT:
 802		return ptrace_resume(child, request, data);
 803
 804	case PTRACE_KILL:
 805		if (child->exit_state)	/* already dead */
 806			return 0;
 807		return ptrace_resume(child, request, SIGKILL);
 808
 809#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 810	case PTRACE_GETREGSET:
 811	case PTRACE_SETREGSET:
 812	{
 813		struct iovec kiov;
 814		struct iovec __user *uiov = datavp;
 815
 816		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
 817			return -EFAULT;
 818
 819		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
 820		    __get_user(kiov.iov_len, &uiov->iov_len))
 821			return -EFAULT;
 822
 823		ret = ptrace_regset(child, request, addr, &kiov);
 824		if (!ret)
 825			ret = __put_user(kiov.iov_len, &uiov->iov_len);
 826		break;
 827	}
 828#endif
 
 
 
 
 
 829	default:
 830		break;
 831	}
 832
 833	return ret;
 834}
 835
 836static struct task_struct *ptrace_get_task_struct(pid_t pid)
 837{
 838	struct task_struct *child;
 839
 840	rcu_read_lock();
 841	child = find_task_by_vpid(pid);
 842	if (child)
 843		get_task_struct(child);
 844	rcu_read_unlock();
 845
 846	if (!child)
 847		return ERR_PTR(-ESRCH);
 848	return child;
 849}
 850
 851#ifndef arch_ptrace_attach
 852#define arch_ptrace_attach(child)	do { } while (0)
 853#endif
 854
 855SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
 856		unsigned long, data)
 857{
 858	struct task_struct *child;
 859	long ret;
 860
 861	if (request == PTRACE_TRACEME) {
 862		ret = ptrace_traceme();
 863		if (!ret)
 864			arch_ptrace_attach(current);
 865		goto out;
 866	}
 867
 868	child = ptrace_get_task_struct(pid);
 869	if (IS_ERR(child)) {
 870		ret = PTR_ERR(child);
 871		goto out;
 872	}
 873
 874	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
 875		ret = ptrace_attach(child, request, data);
 876		/*
 877		 * Some architectures need to do book-keeping after
 878		 * a ptrace attach.
 879		 */
 880		if (!ret)
 881			arch_ptrace_attach(child);
 882		goto out_put_task_struct;
 883	}
 884
 885	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
 886				  request == PTRACE_INTERRUPT);
 887	if (ret < 0)
 888		goto out_put_task_struct;
 889
 890	ret = arch_ptrace(child, request, addr, data);
 
 
 891
 892 out_put_task_struct:
 893	put_task_struct(child);
 894 out:
 895	return ret;
 896}
 897
 898int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
 899			    unsigned long data)
 900{
 901	unsigned long tmp;
 902	int copied;
 903
 904	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
 905	if (copied != sizeof(tmp))
 906		return -EIO;
 907	return put_user(tmp, (unsigned long __user *)data);
 908}
 909
 910int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
 911			    unsigned long data)
 912{
 913	int copied;
 914
 915	copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
 
 916	return (copied == sizeof(data)) ? 0 : -EIO;
 917}
 918
 919#if defined CONFIG_COMPAT
 920#include <linux/compat.h>
 921
 922int compat_ptrace_request(struct task_struct *child, compat_long_t request,
 923			  compat_ulong_t addr, compat_ulong_t data)
 924{
 925	compat_ulong_t __user *datap = compat_ptr(data);
 926	compat_ulong_t word;
 927	siginfo_t siginfo;
 928	int ret;
 929
 930	switch (request) {
 931	case PTRACE_PEEKTEXT:
 932	case PTRACE_PEEKDATA:
 933		ret = access_process_vm(child, addr, &word, sizeof(word), 0);
 
 934		if (ret != sizeof(word))
 935			ret = -EIO;
 936		else
 937			ret = put_user(word, datap);
 938		break;
 939
 940	case PTRACE_POKETEXT:
 941	case PTRACE_POKEDATA:
 942		ret = access_process_vm(child, addr, &data, sizeof(data), 1);
 
 943		ret = (ret != sizeof(data) ? -EIO : 0);
 944		break;
 945
 946	case PTRACE_GETEVENTMSG:
 947		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
 948		break;
 949
 950	case PTRACE_GETSIGINFO:
 951		ret = ptrace_getsiginfo(child, &siginfo);
 952		if (!ret)
 953			ret = copy_siginfo_to_user32(
 954				(struct compat_siginfo __user *) datap,
 955				&siginfo);
 956		break;
 957
 958	case PTRACE_SETSIGINFO:
 959		memset(&siginfo, 0, sizeof siginfo);
 960		if (copy_siginfo_from_user32(
 961			    &siginfo, (struct compat_siginfo __user *) datap))
 962			ret = -EFAULT;
 963		else
 964			ret = ptrace_setsiginfo(child, &siginfo);
 965		break;
 966#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 967	case PTRACE_GETREGSET:
 968	case PTRACE_SETREGSET:
 969	{
 970		struct iovec kiov;
 971		struct compat_iovec __user *uiov =
 972			(struct compat_iovec __user *) datap;
 973		compat_uptr_t ptr;
 974		compat_size_t len;
 975
 976		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
 977			return -EFAULT;
 978
 979		if (__get_user(ptr, &uiov->iov_base) ||
 980		    __get_user(len, &uiov->iov_len))
 981			return -EFAULT;
 982
 983		kiov.iov_base = compat_ptr(ptr);
 984		kiov.iov_len = len;
 985
 986		ret = ptrace_regset(child, request, addr, &kiov);
 987		if (!ret)
 988			ret = __put_user(kiov.iov_len, &uiov->iov_len);
 989		break;
 990	}
 991#endif
 992
 993	default:
 994		ret = ptrace_request(child, request, addr, data);
 995	}
 996
 997	return ret;
 998}
 999
1000asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1001				  compat_long_t addr, compat_long_t data)
1002{
1003	struct task_struct *child;
1004	long ret;
1005
1006	if (request == PTRACE_TRACEME) {
1007		ret = ptrace_traceme();
1008		goto out;
1009	}
1010
1011	child = ptrace_get_task_struct(pid);
1012	if (IS_ERR(child)) {
1013		ret = PTR_ERR(child);
1014		goto out;
1015	}
1016
1017	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1018		ret = ptrace_attach(child, request, data);
1019		/*
1020		 * Some architectures need to do book-keeping after
1021		 * a ptrace attach.
1022		 */
1023		if (!ret)
1024			arch_ptrace_attach(child);
1025		goto out_put_task_struct;
1026	}
1027
1028	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1029				  request == PTRACE_INTERRUPT);
1030	if (!ret)
1031		ret = compat_arch_ptrace(child, request, addr, data);
 
 
 
1032
1033 out_put_task_struct:
1034	put_task_struct(child);
1035 out:
1036	return ret;
1037}
1038#endif	/* CONFIG_COMPAT */
1039
1040#ifdef CONFIG_HAVE_HW_BREAKPOINT
1041int ptrace_get_breakpoints(struct task_struct *tsk)
1042{
1043	if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
1044		return 0;
1045
1046	return -1;
1047}
1048
1049void ptrace_put_breakpoints(struct task_struct *tsk)
1050{
1051	if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
1052		flush_ptrace_hw_breakpoint(tsk);
1053}
1054#endif /* CONFIG_HAVE_HW_BREAKPOINT */