Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/signal.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   8 *
   9 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
  10 *		Changes to use preallocated sigqueue structures
  11 *		to allow signals to be sent reliably.
  12 */
  13
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/init.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/user.h>
  19#include <linux/sched/debug.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/sched/cputime.h>
  23#include <linux/file.h>
  24#include <linux/fs.h>
  25#include <linux/mm.h>
  26#include <linux/proc_fs.h>
  27#include <linux/tty.h>
  28#include <linux/binfmts.h>
  29#include <linux/coredump.h>
  30#include <linux/security.h>
  31#include <linux/syscalls.h>
  32#include <linux/ptrace.h>
  33#include <linux/signal.h>
  34#include <linux/signalfd.h>
  35#include <linux/ratelimit.h>
  36#include <linux/task_work.h>
  37#include <linux/capability.h>
  38#include <linux/freezer.h>
  39#include <linux/pid_namespace.h>
  40#include <linux/nsproxy.h>
  41#include <linux/user_namespace.h>
  42#include <linux/uprobes.h>
  43#include <linux/compat.h>
  44#include <linux/cn_proc.h>
  45#include <linux/compiler.h>
  46#include <linux/posix-timers.h>
  47#include <linux/cgroup.h>
  48#include <linux/audit.h>
  49#include <linux/sysctl.h>
  50
  51#define CREATE_TRACE_POINTS
  52#include <trace/events/signal.h>
  53
  54#include <asm/param.h>
  55#include <linux/uaccess.h>
  56#include <asm/unistd.h>
  57#include <asm/siginfo.h>
  58#include <asm/cacheflush.h>
  59#include <asm/syscall.h>	/* for syscall_get_* */
  60
  61/*
  62 * SLAB caches for signal bits.
  63 */
  64
  65static struct kmem_cache *sigqueue_cachep;
  66
  67int print_fatal_signals __read_mostly;
  68
  69static void __user *sig_handler(struct task_struct *t, int sig)
  70{
  71	return t->sighand->action[sig - 1].sa.sa_handler;
  72}
  73
  74static inline bool sig_handler_ignored(void __user *handler, int sig)
  75{
  76	/* Is it explicitly or implicitly ignored? */
  77	return handler == SIG_IGN ||
  78	       (handler == SIG_DFL && sig_kernel_ignore(sig));
  79}
  80
  81static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
 
  82{
  83	void __user *handler;
  84
  85	handler = sig_handler(t, sig);
  86
  87	/* SIGKILL and SIGSTOP may not be sent to the global init */
  88	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
  89		return true;
  90
  91	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  92	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  93		return true;
  94
  95	/* Only allow kernel generated signals to this kthread */
  96	if (unlikely((t->flags & PF_KTHREAD) &&
  97		     (handler == SIG_KTHREAD_KERNEL) && !force))
  98		return true;
  99
 100	return sig_handler_ignored(handler, sig);
 101}
 102
 103static bool sig_ignored(struct task_struct *t, int sig, bool force)
 104{
 105	/*
 106	 * Blocked signals are never ignored, since the
 107	 * signal handler may change by the time it is
 108	 * unblocked.
 109	 */
 110	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 111		return false;
 
 
 
 112
 113	/*
 114	 * Tracers may want to know about even ignored signal unless it
 115	 * is SIGKILL which can't be reported anyway but can be ignored
 116	 * by SIGNAL_UNKILLABLE task.
 117	 */
 118	if (t->ptrace && sig != SIGKILL)
 119		return false;
 120
 121	return sig_task_ignored(t, sig, force);
 122}
 123
 124/*
 125 * Re-calculate pending state from the set of locally pending
 126 * signals, globally pending signals, and blocked signals.
 127 */
 128static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
 129{
 130	unsigned long ready;
 131	long i;
 132
 133	switch (_NSIG_WORDS) {
 134	default:
 135		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 136			ready |= signal->sig[i] &~ blocked->sig[i];
 137		break;
 138
 139	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 140		ready |= signal->sig[2] &~ blocked->sig[2];
 141		ready |= signal->sig[1] &~ blocked->sig[1];
 142		ready |= signal->sig[0] &~ blocked->sig[0];
 143		break;
 144
 145	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 146		ready |= signal->sig[0] &~ blocked->sig[0];
 147		break;
 148
 149	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 150	}
 151	return ready !=	0;
 152}
 153
 154#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 155
 156static bool recalc_sigpending_tsk(struct task_struct *t)
 157{
 158	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
 159	    PENDING(&t->pending, &t->blocked) ||
 160	    PENDING(&t->signal->shared_pending, &t->blocked) ||
 161	    cgroup_task_frozen(t)) {
 162		set_tsk_thread_flag(t, TIF_SIGPENDING);
 163		return true;
 164	}
 165
 166	/*
 167	 * We must never clear the flag in another thread, or in current
 168	 * when it's possible the current syscall is returning -ERESTART*.
 169	 * So we don't clear it here, and only callers who know they should do.
 170	 */
 171	return false;
 
 
 
 
 
 
 
 
 
 
 172}
 173
 174void recalc_sigpending(void)
 175{
 176	if (!recalc_sigpending_tsk(current) && !freezing(current))
 177		clear_thread_flag(TIF_SIGPENDING);
 178
 179}
 180EXPORT_SYMBOL(recalc_sigpending);
 181
 182void calculate_sigpending(void)
 183{
 184	/* Have any signals or users of TIF_SIGPENDING been delayed
 185	 * until after fork?
 186	 */
 187	spin_lock_irq(&current->sighand->siglock);
 188	set_tsk_thread_flag(current, TIF_SIGPENDING);
 189	recalc_sigpending();
 190	spin_unlock_irq(&current->sighand->siglock);
 191}
 192
 193/* Given the mask, find the first available signal that should be serviced. */
 194
 195#define SYNCHRONOUS_MASK \
 196	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 197	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 198
 199int next_signal(struct sigpending *pending, sigset_t *mask)
 200{
 201	unsigned long i, *s, *m, x;
 202	int sig = 0;
 203
 204	s = pending->signal.sig;
 205	m = mask->sig;
 206
 207	/*
 208	 * Handle the first word specially: it contains the
 209	 * synchronous signals that need to be dequeued first.
 210	 */
 211	x = *s &~ *m;
 212	if (x) {
 213		if (x & SYNCHRONOUS_MASK)
 214			x &= SYNCHRONOUS_MASK;
 215		sig = ffz(~x) + 1;
 216		return sig;
 217	}
 218
 219	switch (_NSIG_WORDS) {
 220	default:
 221		for (i = 1; i < _NSIG_WORDS; ++i) {
 222			x = *++s &~ *++m;
 223			if (!x)
 224				continue;
 225			sig = ffz(~x) + i*_NSIG_BPW + 1;
 226			break;
 227		}
 228		break;
 229
 230	case 2:
 231		x = s[1] &~ m[1];
 232		if (!x)
 233			break;
 234		sig = ffz(~x) + _NSIG_BPW + 1;
 235		break;
 236
 237	case 1:
 238		/* Nothing to do */
 239		break;
 240	}
 241
 242	return sig;
 243}
 244
 245static inline void print_dropped_signal(int sig)
 246{
 247	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 248
 249	if (!print_fatal_signals)
 250		return;
 251
 252	if (!__ratelimit(&ratelimit_state))
 253		return;
 254
 255	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 256				current->comm, current->pid, sig);
 257}
 258
 259/**
 260 * task_set_jobctl_pending - set jobctl pending bits
 261 * @task: target task
 262 * @mask: pending bits to set
 263 *
 264 * Clear @mask from @task->jobctl.  @mask must be subset of
 265 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 266 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 267 * cleared.  If @task is already being killed or exiting, this function
 268 * becomes noop.
 269 *
 270 * CONTEXT:
 271 * Must be called with @task->sighand->siglock held.
 272 *
 273 * RETURNS:
 274 * %true if @mask is set, %false if made noop because @task was dying.
 275 */
 276bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 277{
 278	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 279			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 280	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 281
 282	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 283		return false;
 284
 285	if (mask & JOBCTL_STOP_SIGMASK)
 286		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 287
 288	task->jobctl |= mask;
 289	return true;
 290}
 291
 292/**
 293 * task_clear_jobctl_trapping - clear jobctl trapping bit
 294 * @task: target task
 295 *
 296 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 297 * Clear it and wake up the ptracer.  Note that we don't need any further
 298 * locking.  @task->siglock guarantees that @task->parent points to the
 299 * ptracer.
 300 *
 301 * CONTEXT:
 302 * Must be called with @task->sighand->siglock held.
 303 */
 304void task_clear_jobctl_trapping(struct task_struct *task)
 305{
 306	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 307		task->jobctl &= ~JOBCTL_TRAPPING;
 308		smp_mb();	/* advised by wake_up_bit() */
 309		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 310	}
 311}
 312
 313/**
 314 * task_clear_jobctl_pending - clear jobctl pending bits
 315 * @task: target task
 316 * @mask: pending bits to clear
 317 *
 318 * Clear @mask from @task->jobctl.  @mask must be subset of
 319 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 320 * STOP bits are cleared together.
 321 *
 322 * If clearing of @mask leaves no stop or trap pending, this function calls
 323 * task_clear_jobctl_trapping().
 324 *
 325 * CONTEXT:
 326 * Must be called with @task->sighand->siglock held.
 327 */
 328void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 329{
 330	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 331
 332	if (mask & JOBCTL_STOP_PENDING)
 333		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 334
 335	task->jobctl &= ~mask;
 336
 337	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 338		task_clear_jobctl_trapping(task);
 339}
 340
 341/**
 342 * task_participate_group_stop - participate in a group stop
 343 * @task: task participating in a group stop
 344 *
 345 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 346 * Group stop states are cleared and the group stop count is consumed if
 347 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 348 * stop, the appropriate `SIGNAL_*` flags are set.
 349 *
 350 * CONTEXT:
 351 * Must be called with @task->sighand->siglock held.
 352 *
 353 * RETURNS:
 354 * %true if group stop completion should be notified to the parent, %false
 355 * otherwise.
 356 */
 357static bool task_participate_group_stop(struct task_struct *task)
 358{
 359	struct signal_struct *sig = task->signal;
 360	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 361
 362	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 363
 364	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 365
 366	if (!consume)
 367		return false;
 368
 369	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 370		sig->group_stop_count--;
 371
 372	/*
 373	 * Tell the caller to notify completion iff we are entering into a
 374	 * fresh group stop.  Read comment in do_signal_stop() for details.
 375	 */
 376	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 377		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 378		return true;
 379	}
 380	return false;
 381}
 382
 383void task_join_group_stop(struct task_struct *task)
 384{
 385	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
 386	struct signal_struct *sig = current->signal;
 387
 388	if (sig->group_stop_count) {
 389		sig->group_stop_count++;
 390		mask |= JOBCTL_STOP_CONSUME;
 391	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
 392		return;
 393
 394	/* Have the new thread join an on-going signal group stop */
 395	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
 396}
 397
 398/*
 399 * allocate a new signal queue record
 400 * - this may be called without locks if and only if t == current, otherwise an
 401 *   appropriate lock must be held to stop the target task from exiting
 402 */
 403static struct sigqueue *
 404__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
 405		 int override_rlimit, const unsigned int sigqueue_flags)
 406{
 407	struct sigqueue *q = NULL;
 408	struct ucounts *ucounts;
 409	long sigpending;
 410
 411	/*
 412	 * Protect access to @t credentials. This can go away when all
 413	 * callers hold rcu read lock.
 414	 *
 415	 * NOTE! A pending signal will hold on to the user refcount,
 416	 * and we get/put the refcount only when the sigpending count
 417	 * changes from/to zero.
 418	 */
 419	rcu_read_lock();
 420	ucounts = task_ucounts(t);
 421	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 422	rcu_read_unlock();
 423	if (!sigpending)
 424		return NULL;
 425
 426	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
 427		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
 
 
 428	} else {
 429		print_dropped_signal(sig);
 430	}
 431
 432	if (unlikely(q == NULL)) {
 433		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 
 434	} else {
 435		INIT_LIST_HEAD(&q->list);
 436		q->flags = sigqueue_flags;
 437		q->ucounts = ucounts;
 438	}
 
 439	return q;
 440}
 441
 442static void __sigqueue_free(struct sigqueue *q)
 443{
 444	if (q->flags & SIGQUEUE_PREALLOC)
 445		return;
 446	if (q->ucounts) {
 447		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
 448		q->ucounts = NULL;
 449	}
 450	kmem_cache_free(sigqueue_cachep, q);
 451}
 452
 453void flush_sigqueue(struct sigpending *queue)
 454{
 455	struct sigqueue *q;
 456
 457	sigemptyset(&queue->signal);
 458	while (!list_empty(&queue->list)) {
 459		q = list_entry(queue->list.next, struct sigqueue , list);
 460		list_del_init(&q->list);
 461		__sigqueue_free(q);
 462	}
 463}
 464
 465/*
 466 * Flush all pending signals for this kthread.
 467 */
 
 
 
 
 
 
 
 468void flush_signals(struct task_struct *t)
 469{
 470	unsigned long flags;
 471
 472	spin_lock_irqsave(&t->sighand->siglock, flags);
 473	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 474	flush_sigqueue(&t->pending);
 475	flush_sigqueue(&t->signal->shared_pending);
 476	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 477}
 478EXPORT_SYMBOL(flush_signals);
 479
 480#ifdef CONFIG_POSIX_TIMERS
 481static void __flush_itimer_signals(struct sigpending *pending)
 482{
 483	sigset_t signal, retain;
 484	struct sigqueue *q, *n;
 485
 486	signal = pending->signal;
 487	sigemptyset(&retain);
 488
 489	list_for_each_entry_safe(q, n, &pending->list, list) {
 490		int sig = q->info.si_signo;
 491
 492		if (likely(q->info.si_code != SI_TIMER)) {
 493			sigaddset(&retain, sig);
 494		} else {
 495			sigdelset(&signal, sig);
 496			list_del_init(&q->list);
 497			__sigqueue_free(q);
 498		}
 499	}
 500
 501	sigorsets(&pending->signal, &signal, &retain);
 502}
 503
 504void flush_itimer_signals(void)
 505{
 506	struct task_struct *tsk = current;
 507	unsigned long flags;
 508
 509	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 510	__flush_itimer_signals(&tsk->pending);
 511	__flush_itimer_signals(&tsk->signal->shared_pending);
 512	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 513}
 514#endif
 515
 516void ignore_signals(struct task_struct *t)
 517{
 518	int i;
 519
 520	for (i = 0; i < _NSIG; ++i)
 521		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 522
 523	flush_signals(t);
 524}
 525
 526/*
 527 * Flush all handlers for a task.
 528 */
 529
 530void
 531flush_signal_handlers(struct task_struct *t, int force_default)
 532{
 533	int i;
 534	struct k_sigaction *ka = &t->sighand->action[0];
 535	for (i = _NSIG ; i != 0 ; i--) {
 536		if (force_default || ka->sa.sa_handler != SIG_IGN)
 537			ka->sa.sa_handler = SIG_DFL;
 538		ka->sa.sa_flags = 0;
 539#ifdef __ARCH_HAS_SA_RESTORER
 540		ka->sa.sa_restorer = NULL;
 541#endif
 542		sigemptyset(&ka->sa.sa_mask);
 543		ka++;
 544	}
 545}
 546
 547bool unhandled_signal(struct task_struct *tsk, int sig)
 548{
 549	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 550	if (is_global_init(tsk))
 551		return true;
 552
 553	if (handler != SIG_IGN && handler != SIG_DFL)
 554		return false;
 555
 556	/* If dying, we handle all new signals by ignoring them */
 557	if (fatal_signal_pending(tsk))
 558		return false;
 559
 560	/* if ptraced, let the tracer determine */
 561	return !tsk->ptrace;
 562}
 563
 564static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
 565			   bool *resched_timer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566{
 567	struct sigqueue *q, *first = NULL;
 568
 569	/*
 570	 * Collect the siginfo appropriate to this signal.  Check if
 571	 * there is another siginfo for the same signal.
 572	*/
 573	list_for_each_entry(q, &list->list, list) {
 574		if (q->info.si_signo == sig) {
 575			if (first)
 576				goto still_pending;
 577			first = q;
 578		}
 579	}
 580
 581	sigdelset(&list->signal, sig);
 582
 583	if (first) {
 584still_pending:
 585		list_del_init(&first->list);
 586		copy_siginfo(info, &first->info);
 587
 588		*resched_timer =
 589			(first->flags & SIGQUEUE_PREALLOC) &&
 590			(info->si_code == SI_TIMER) &&
 591			(info->si_sys_private);
 592
 593		__sigqueue_free(first);
 594	} else {
 595		/*
 596		 * Ok, it wasn't in the queue.  This must be
 597		 * a fast-pathed signal or we must have been
 598		 * out of queue space.  So zero out the info.
 599		 */
 600		clear_siginfo(info);
 601		info->si_signo = sig;
 602		info->si_errno = 0;
 603		info->si_code = SI_USER;
 604		info->si_pid = 0;
 605		info->si_uid = 0;
 606	}
 607}
 608
 609static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 610			kernel_siginfo_t *info, bool *resched_timer)
 611{
 612	int sig = next_signal(pending, mask);
 613
 614	if (sig)
 615		collect_signal(sig, pending, info, resched_timer);
 
 
 
 
 
 
 
 
 
 
 
 616	return sig;
 617}
 618
 619/*
 620 * Dequeue a signal and return the element to the caller, which is
 621 * expected to free it.
 622 *
 623 * All callers have to hold the siglock.
 624 */
 625int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
 626		   kernel_siginfo_t *info, enum pid_type *type)
 627{
 628	bool resched_timer = false;
 629	int signr;
 630
 631	/* We only dequeue private signals from ourselves, we don't let
 632	 * signalfd steal them
 633	 */
 634	*type = PIDTYPE_PID;
 635	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 636	if (!signr) {
 637		*type = PIDTYPE_TGID;
 638		signr = __dequeue_signal(&tsk->signal->shared_pending,
 639					 mask, info, &resched_timer);
 640#ifdef CONFIG_POSIX_TIMERS
 641		/*
 642		 * itimer signal ?
 643		 *
 644		 * itimers are process shared and we restart periodic
 645		 * itimers in the signal delivery path to prevent DoS
 646		 * attacks in the high resolution timer case. This is
 647		 * compliant with the old way of self-restarting
 648		 * itimers, as the SIGALRM is a legacy signal and only
 649		 * queued once. Changing the restart behaviour to
 650		 * restart the timer in the signal dequeue path is
 651		 * reducing the timer noise on heavy loaded !highres
 652		 * systems too.
 653		 */
 654		if (unlikely(signr == SIGALRM)) {
 655			struct hrtimer *tmr = &tsk->signal->real_timer;
 656
 657			if (!hrtimer_is_queued(tmr) &&
 658			    tsk->signal->it_real_incr != 0) {
 659				hrtimer_forward(tmr, tmr->base->get_time(),
 660						tsk->signal->it_real_incr);
 661				hrtimer_restart(tmr);
 662			}
 663		}
 664#endif
 665	}
 666
 667	recalc_sigpending();
 668	if (!signr)
 669		return 0;
 670
 671	if (unlikely(sig_kernel_stop(signr))) {
 672		/*
 673		 * Set a marker that we have dequeued a stop signal.  Our
 674		 * caller might release the siglock and then the pending
 675		 * stop signal it is about to process is no longer in the
 676		 * pending bitmasks, but must still be cleared by a SIGCONT
 677		 * (and overruled by a SIGKILL).  So those cases clear this
 678		 * shared flag after we've set it.  Note that this flag may
 679		 * remain set after the signal we return is ignored or
 680		 * handled.  That doesn't matter because its only purpose
 681		 * is to alert stop-signal processing code when another
 682		 * processor has come along and cleared the flag.
 683		 */
 684		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 685	}
 686#ifdef CONFIG_POSIX_TIMERS
 687	if (resched_timer) {
 688		/*
 689		 * Release the siglock to ensure proper locking order
 690		 * of timer locks outside of siglocks.  Note, we leave
 691		 * irqs disabled here, since the posix-timers code is
 692		 * about to disable them again anyway.
 693		 */
 694		spin_unlock(&tsk->sighand->siglock);
 695		posixtimer_rearm(info);
 696		spin_lock(&tsk->sighand->siglock);
 697
 698		/* Don't expose the si_sys_private value to userspace */
 699		info->si_sys_private = 0;
 700	}
 701#endif
 702	return signr;
 703}
 704EXPORT_SYMBOL_GPL(dequeue_signal);
 705
 706static int dequeue_synchronous_signal(kernel_siginfo_t *info)
 707{
 708	struct task_struct *tsk = current;
 709	struct sigpending *pending = &tsk->pending;
 710	struct sigqueue *q, *sync = NULL;
 711
 712	/*
 713	 * Might a synchronous signal be in the queue?
 714	 */
 715	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
 716		return 0;
 717
 718	/*
 719	 * Return the first synchronous signal in the queue.
 720	 */
 721	list_for_each_entry(q, &pending->list, list) {
 722		/* Synchronous signals have a positive si_code */
 723		if ((q->info.si_code > SI_USER) &&
 724		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
 725			sync = q;
 726			goto next;
 727		}
 728	}
 729	return 0;
 730next:
 731	/*
 732	 * Check if there is another siginfo for the same signal.
 733	 */
 734	list_for_each_entry_continue(q, &pending->list, list) {
 735		if (q->info.si_signo == sync->info.si_signo)
 736			goto still_pending;
 737	}
 738
 739	sigdelset(&pending->signal, sync->info.si_signo);
 740	recalc_sigpending();
 741still_pending:
 742	list_del_init(&sync->list);
 743	copy_siginfo(info, &sync->info);
 744	__sigqueue_free(sync);
 745	return info->si_signo;
 746}
 747
 748/*
 749 * Tell a process that it has a new active signal..
 750 *
 751 * NOTE! we rely on the previous spin_lock to
 752 * lock interrupts for us! We can only be called with
 753 * "siglock" held, and the local interrupt must
 754 * have been disabled when that got acquired!
 755 *
 756 * No need to set need_resched since signal event passing
 757 * goes through ->blocked
 758 */
 759void signal_wake_up_state(struct task_struct *t, unsigned int state)
 760{
 761	lockdep_assert_held(&t->sighand->siglock);
 762
 763	set_tsk_thread_flag(t, TIF_SIGPENDING);
 764
 765	/*
 766	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 767	 * case. We don't check t->state here because there is a race with it
 768	 * executing another processor and just now entering stopped state.
 769	 * By using wake_up_state, we ensure the process will wake up and
 770	 * handle its death signal.
 771	 */
 772	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 
 
 
 773		kick_process(t);
 774}
 775
 776/*
 777 * Remove signals in mask from the pending set and queue.
 778 * Returns 1 if any signals were found.
 779 *
 780 * All callers must be holding the siglock.
 
 
 
 781 */
 782static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 783{
 784	struct sigqueue *q, *n;
 785	sigset_t m;
 786
 787	sigandsets(&m, mask, &s->signal);
 788	if (sigisemptyset(&m))
 789		return;
 790
 791	sigandnsets(&s->signal, &s->signal, mask);
 792	list_for_each_entry_safe(q, n, &s->list, list) {
 793		if (sigismember(mask, q->info.si_signo)) {
 794			list_del_init(&q->list);
 795			__sigqueue_free(q);
 796		}
 797	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798}
 799
 800static inline int is_si_special(const struct kernel_siginfo *info)
 801{
 802	return info <= SEND_SIG_PRIV;
 803}
 804
 805static inline bool si_fromuser(const struct kernel_siginfo *info)
 806{
 807	return info == SEND_SIG_NOINFO ||
 808		(!is_si_special(info) && SI_FROMUSER(info));
 809}
 810
 811/*
 812 * called with RCU read lock from check_kill_permission()
 813 */
 814static bool kill_ok_by_cred(struct task_struct *t)
 815{
 816	const struct cred *cred = current_cred();
 817	const struct cred *tcred = __task_cred(t);
 818
 819	return uid_eq(cred->euid, tcred->suid) ||
 820	       uid_eq(cred->euid, tcred->uid) ||
 821	       uid_eq(cred->uid, tcred->suid) ||
 822	       uid_eq(cred->uid, tcred->uid) ||
 823	       ns_capable(tcred->user_ns, CAP_KILL);
 
 
 
 
 
 
 824}
 825
 826/*
 827 * Bad permissions for sending the signal
 828 * - the caller must hold the RCU read lock
 829 */
 830static int check_kill_permission(int sig, struct kernel_siginfo *info,
 831				 struct task_struct *t)
 832{
 833	struct pid *sid;
 834	int error;
 835
 836	if (!valid_signal(sig))
 837		return -EINVAL;
 838
 839	if (!si_fromuser(info))
 840		return 0;
 841
 842	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 843	if (error)
 844		return error;
 845
 846	if (!same_thread_group(current, t) &&
 847	    !kill_ok_by_cred(t)) {
 848		switch (sig) {
 849		case SIGCONT:
 850			sid = task_session(t);
 851			/*
 852			 * We don't return the error if sid == NULL. The
 853			 * task was unhashed, the caller must notice this.
 854			 */
 855			if (!sid || sid == task_session(current))
 856				break;
 857			fallthrough;
 858		default:
 859			return -EPERM;
 860		}
 861	}
 862
 863	return security_task_kill(t, info, sig, NULL);
 864}
 865
 866/**
 867 * ptrace_trap_notify - schedule trap to notify ptracer
 868 * @t: tracee wanting to notify tracer
 869 *
 870 * This function schedules sticky ptrace trap which is cleared on the next
 871 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 872 * ptracer.
 873 *
 874 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 875 * ptracer is listening for events, tracee is woken up so that it can
 876 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 877 * eventually taken without returning to userland after the existing traps
 878 * are finished by PTRACE_CONT.
 879 *
 880 * CONTEXT:
 881 * Must be called with @task->sighand->siglock held.
 882 */
 883static void ptrace_trap_notify(struct task_struct *t)
 884{
 885	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 886	lockdep_assert_held(&t->sighand->siglock);
 887
 888	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 889	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 890}
 891
 892/*
 893 * Handle magic process-wide effects of stop/continue signals. Unlike
 894 * the signal actions, these happen immediately at signal-generation
 895 * time regardless of blocking, ignoring, or handling.  This does the
 896 * actual continuing for SIGCONT, but not the actual stopping for stop
 897 * signals. The process stop is done as a signal action for SIG_DFL.
 898 *
 899 * Returns true if the signal should be actually delivered, otherwise
 900 * it should be dropped.
 901 */
 902static bool prepare_signal(int sig, struct task_struct *p, bool force)
 903{
 904	struct signal_struct *signal = p->signal;
 905	struct task_struct *t;
 906	sigset_t flush;
 907
 908	if (signal->flags & SIGNAL_GROUP_EXIT) {
 909		if (signal->core_state)
 910			return sig == SIGKILL;
 911		/*
 912		 * The process is in the middle of dying, drop the signal.
 913		 */
 914		return false;
 915	} else if (sig_kernel_stop(sig)) {
 916		/*
 917		 * This is a stop signal.  Remove SIGCONT from all queues.
 918		 */
 919		siginitset(&flush, sigmask(SIGCONT));
 920		flush_sigqueue_mask(&flush, &signal->shared_pending);
 921		for_each_thread(p, t)
 922			flush_sigqueue_mask(&flush, &t->pending);
 
 923	} else if (sig == SIGCONT) {
 924		unsigned int why;
 925		/*
 926		 * Remove all stop signals from all queues, wake all threads.
 927		 */
 928		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 929		flush_sigqueue_mask(&flush, &signal->shared_pending);
 930		for_each_thread(p, t) {
 931			flush_sigqueue_mask(&flush, &t->pending);
 932			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 933			if (likely(!(t->ptrace & PT_SEIZED))) {
 934				t->jobctl &= ~JOBCTL_STOPPED;
 935				wake_up_state(t, __TASK_STOPPED);
 936			} else
 937				ptrace_trap_notify(t);
 938		}
 939
 940		/*
 941		 * Notify the parent with CLD_CONTINUED if we were stopped.
 942		 *
 943		 * If we were in the middle of a group stop, we pretend it
 944		 * was already finished, and then continued. Since SIGCHLD
 945		 * doesn't queue we report only CLD_STOPPED, as if the next
 946		 * CLD_CONTINUED was dropped.
 947		 */
 948		why = 0;
 949		if (signal->flags & SIGNAL_STOP_STOPPED)
 950			why |= SIGNAL_CLD_CONTINUED;
 951		else if (signal->group_stop_count)
 952			why |= SIGNAL_CLD_STOPPED;
 953
 954		if (why) {
 955			/*
 956			 * The first thread which returns from do_signal_stop()
 957			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 958			 * notify its parent. See get_signal().
 959			 */
 960			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 961			signal->group_stop_count = 0;
 962			signal->group_exit_code = 0;
 963		}
 964	}
 965
 966	return !sig_ignored(p, sig, force);
 967}
 968
 969/*
 970 * Test if P wants to take SIG.  After we've checked all threads with this,
 971 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 972 * blocking SIG were ruled out because they are not running and already
 973 * have pending signals.  Such threads will dequeue from the shared queue
 974 * as soon as they're available, so putting the signal on the shared queue
 975 * will be equivalent to sending it to one such thread.
 976 */
 977static inline bool wants_signal(int sig, struct task_struct *p)
 978{
 979	if (sigismember(&p->blocked, sig))
 980		return false;
 981
 982	if (p->flags & PF_EXITING)
 983		return false;
 984
 985	if (sig == SIGKILL)
 986		return true;
 987
 988	if (task_is_stopped_or_traced(p))
 989		return false;
 990
 991	return task_curr(p) || !task_sigpending(p);
 992}
 993
 994static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 995{
 996	struct signal_struct *signal = p->signal;
 997	struct task_struct *t;
 998
 999	/*
1000	 * Now find a thread we can wake up to take the signal off the queue.
1001	 *
1002	 * Try the suggested task first (may or may not be the main thread).
 
1003	 */
1004	if (wants_signal(sig, p))
1005		t = p;
1006	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1007		/*
1008		 * There is just one thread and it does not need to be woken.
1009		 * It will dequeue unblocked signals before it runs again.
1010		 */
1011		return;
1012	else {
1013		/*
1014		 * Otherwise try to find a suitable thread.
1015		 */
1016		t = signal->curr_target;
1017		while (!wants_signal(sig, t)) {
1018			t = next_thread(t);
1019			if (t == signal->curr_target)
1020				/*
1021				 * No thread needs to be woken.
1022				 * Any eligible threads will see
1023				 * the signal in the queue soon.
1024				 */
1025				return;
1026		}
1027		signal->curr_target = t;
1028	}
1029
1030	/*
1031	 * Found a killable thread.  If the signal will be fatal,
1032	 * then start taking the whole group down immediately.
1033	 */
1034	if (sig_fatal(p, sig) &&
1035	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1036	    !sigismember(&t->real_blocked, sig) &&
1037	    (sig == SIGKILL || !p->ptrace)) {
1038		/*
1039		 * This signal will be fatal to the whole group.
1040		 */
1041		if (!sig_kernel_coredump(sig)) {
1042			/*
1043			 * Start a group exit and wake everybody up.
1044			 * This way we don't have other threads
1045			 * running and doing things after a slower
1046			 * thread has the fatal signal pending.
1047			 */
1048			signal->flags = SIGNAL_GROUP_EXIT;
1049			signal->group_exit_code = sig;
1050			signal->group_stop_count = 0;
1051			__for_each_thread(signal, t) {
 
1052				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1053				sigaddset(&t->pending.signal, SIGKILL);
1054				signal_wake_up(t, 1);
1055			}
1056			return;
1057		}
1058	}
1059
1060	/*
1061	 * The signal is already in the shared-pending queue.
1062	 * Tell the chosen thread to wake up and dequeue it.
1063	 */
1064	signal_wake_up(t, sig == SIGKILL);
1065	return;
1066}
1067
1068static inline bool legacy_queue(struct sigpending *signals, int sig)
1069{
1070	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1071}
1072
1073static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1074				struct task_struct *t, enum pid_type type, bool force)
1075{
1076	struct sigpending *pending;
1077	struct sigqueue *q;
1078	int override_rlimit;
1079	int ret = 0, result;
1080
1081	lockdep_assert_held(&t->sighand->siglock);
1082
1083	result = TRACE_SIGNAL_IGNORED;
1084	if (!prepare_signal(sig, t, force))
1085		goto ret;
 
1086
1087	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1088	/*
1089	 * Short-circuit ignored signals and support queuing
1090	 * exactly one non-rt signal, so that we can get more
1091	 * detailed information about the cause of the signal.
1092	 */
1093	result = TRACE_SIGNAL_ALREADY_PENDING;
1094	if (legacy_queue(pending, sig))
1095		goto ret;
1096
1097	result = TRACE_SIGNAL_DELIVERED;
1098	/*
1099	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
 
1100	 */
1101	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1102		goto out_set;
1103
1104	/*
1105	 * Real-time signals must be queued if sent by sigqueue, or
1106	 * some other real-time mechanism.  It is implementation
1107	 * defined whether kill() does so.  We attempt to do so, on
1108	 * the principle of least surprise, but since kill is not
1109	 * allowed to fail with EAGAIN when low on memory we just
1110	 * make sure at least one signal gets delivered and don't
1111	 * pass on the info struct.
1112	 */
1113	if (sig < SIGRTMIN)
1114		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1115	else
1116		override_rlimit = 0;
1117
1118	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1119
1120	if (q) {
1121		list_add_tail(&q->list, &pending->list);
1122		switch ((unsigned long) info) {
1123		case (unsigned long) SEND_SIG_NOINFO:
1124			clear_siginfo(&q->info);
1125			q->info.si_signo = sig;
1126			q->info.si_errno = 0;
1127			q->info.si_code = SI_USER;
1128			q->info.si_pid = task_tgid_nr_ns(current,
1129							task_active_pid_ns(t));
1130			rcu_read_lock();
1131			q->info.si_uid =
1132				from_kuid_munged(task_cred_xxx(t, user_ns),
1133						 current_uid());
1134			rcu_read_unlock();
1135			break;
1136		case (unsigned long) SEND_SIG_PRIV:
1137			clear_siginfo(&q->info);
1138			q->info.si_signo = sig;
1139			q->info.si_errno = 0;
1140			q->info.si_code = SI_KERNEL;
1141			q->info.si_pid = 0;
1142			q->info.si_uid = 0;
1143			break;
1144		default:
1145			copy_siginfo(&q->info, info);
 
 
1146			break;
1147		}
1148	} else if (!is_si_special(info) &&
1149		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1150		/*
1151		 * Queue overflow, abort.  We may abort if the
1152		 * signal was rt and sent by user using something
1153		 * other than kill().
1154		 */
1155		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1156		ret = -EAGAIN;
1157		goto ret;
1158	} else {
1159		/*
1160		 * This is a silent loss of information.  We still
1161		 * send the signal, but the *info bits are lost.
1162		 */
1163		result = TRACE_SIGNAL_LOSE_INFO;
1164	}
1165
1166out_set:
1167	signalfd_notify(t, sig);
1168	sigaddset(&pending->signal, sig);
1169
1170	/* Let multiprocess signals appear after on-going forks */
1171	if (type > PIDTYPE_TGID) {
1172		struct multiprocess_signals *delayed;
1173		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1174			sigset_t *signal = &delayed->signal;
1175			/* Can't queue both a stop and a continue signal */
1176			if (sig == SIGCONT)
1177				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1178			else if (sig_kernel_stop(sig))
1179				sigdelset(signal, SIGCONT);
1180			sigaddset(signal, sig);
1181		}
1182	}
1183
1184	complete_signal(sig, t, type);
1185ret:
1186	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1187	return ret;
1188}
1189
1190static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191{
1192	bool ret = false;
1193	switch (siginfo_layout(info->si_signo, info->si_code)) {
1194	case SIL_KILL:
1195	case SIL_CHLD:
1196	case SIL_RT:
1197		ret = true;
1198		break;
1199	case SIL_TIMER:
1200	case SIL_POLL:
1201	case SIL_FAULT:
1202	case SIL_FAULT_TRAPNO:
1203	case SIL_FAULT_MCEERR:
1204	case SIL_FAULT_BNDERR:
1205	case SIL_FAULT_PKUERR:
1206	case SIL_FAULT_PERF_EVENT:
1207	case SIL_SYS:
1208		ret = false;
1209		break;
1210	}
1211	return ret;
1212}
1213
1214int send_signal_locked(int sig, struct kernel_siginfo *info,
1215		       struct task_struct *t, enum pid_type type)
1216{
1217	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1218	bool force = false;
1219
1220	if (info == SEND_SIG_NOINFO) {
1221		/* Force if sent from an ancestor pid namespace */
1222		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1223	} else if (info == SEND_SIG_PRIV) {
1224		/* Don't ignore kernel generated signals */
1225		force = true;
1226	} else if (has_si_pid_and_uid(info)) {
1227		/* SIGKILL and SIGSTOP is special or has ids */
1228		struct user_namespace *t_user_ns;
1229
1230		rcu_read_lock();
1231		t_user_ns = task_cred_xxx(t, user_ns);
1232		if (current_user_ns() != t_user_ns) {
1233			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1234			info->si_uid = from_kuid_munged(t_user_ns, uid);
1235		}
1236		rcu_read_unlock();
1237
1238		/* A kernel generated signal? */
1239		force = (info->si_code == SI_KERNEL);
 
 
1240
1241		/* From an ancestor pid namespace? */
1242		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1243			info->si_pid = 0;
1244			force = true;
1245		}
1246	}
1247	return __send_signal_locked(sig, info, t, type, force);
1248}
1249
1250static void print_fatal_signal(int signr)
1251{
1252	struct pt_regs *regs = task_pt_regs(current);
1253	struct file *exe_file;
1254
1255	exe_file = get_task_exe_file(current);
1256	if (exe_file) {
1257		pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1258			exe_file, current->comm, signr);
1259		fput(exe_file);
1260	} else {
1261		pr_info("%s: potentially unexpected fatal signal %d.\n",
1262			current->comm, signr);
1263	}
1264
1265#if defined(__i386__) && !defined(__arch_um__)
1266	pr_info("code at %08lx: ", regs->ip);
1267	{
1268		int i;
1269		for (i = 0; i < 16; i++) {
1270			unsigned char insn;
1271
1272			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1273				break;
1274			pr_cont("%02x ", insn);
1275		}
1276	}
1277	pr_cont("\n");
1278#endif
 
1279	preempt_disable();
1280	show_regs(regs);
1281	preempt_enable();
1282}
1283
1284static int __init setup_print_fatal_signals(char *str)
1285{
1286	get_option (&str, &print_fatal_signals);
1287
1288	return 1;
1289}
1290
1291__setup("print-fatal-signals=", setup_print_fatal_signals);
1292
1293int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1294			enum pid_type type)
 
 
 
 
 
 
 
 
 
 
 
 
1295{
1296	unsigned long flags;
1297	int ret = -ESRCH;
1298
1299	if (lock_task_sighand(p, &flags)) {
1300		ret = send_signal_locked(sig, info, p, type);
1301		unlock_task_sighand(p, &flags);
1302	}
1303
1304	return ret;
1305}
1306
1307enum sig_handler {
1308	HANDLER_CURRENT, /* If reachable use the current handler */
1309	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1310	HANDLER_EXIT,	 /* Only visible as the process exit code */
1311};
1312
1313/*
1314 * Force a signal that the process can't ignore: if necessary
1315 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1316 *
1317 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1318 * since we do not want to have a signal handler that was blocked
1319 * be invoked when user space had explicitly blocked it.
1320 *
1321 * We don't want to have recursive SIGSEGV's etc, for example,
1322 * that is why we also clear SIGNAL_UNKILLABLE.
1323 */
1324static int
1325force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1326	enum sig_handler handler)
1327{
1328	unsigned long int flags;
1329	int ret, blocked, ignored;
1330	struct k_sigaction *action;
1331	int sig = info->si_signo;
1332
1333	spin_lock_irqsave(&t->sighand->siglock, flags);
1334	action = &t->sighand->action[sig-1];
1335	ignored = action->sa.sa_handler == SIG_IGN;
1336	blocked = sigismember(&t->blocked, sig);
1337	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1338		action->sa.sa_handler = SIG_DFL;
1339		if (handler == HANDLER_EXIT)
1340			action->sa.sa_flags |= SA_IMMUTABLE;
1341		if (blocked)
1342			sigdelset(&t->blocked, sig);
 
 
1343	}
1344	/*
1345	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1346	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1347	 */
1348	if (action->sa.sa_handler == SIG_DFL &&
1349	    (!t->ptrace || (handler == HANDLER_EXIT)))
1350		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1351	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1352	/* This can happen if the signal was already pending and blocked */
1353	if (!task_sigpending(t))
1354		signal_wake_up(t, 0);
1355	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1356
1357	return ret;
1358}
1359
1360int force_sig_info(struct kernel_siginfo *info)
1361{
1362	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1363}
1364
1365/*
1366 * Nuke all other threads in the group.
1367 */
1368int zap_other_threads(struct task_struct *p)
1369{
1370	struct task_struct *t;
1371	int count = 0;
1372
1373	p->signal->group_stop_count = 0;
1374
1375	for_other_threads(p, t) {
1376		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1377		/* Don't require de_thread to wait for the vhost_worker */
1378		if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1379			count++;
1380
1381		/* Don't bother with already dead threads */
1382		if (t->exit_state)
1383			continue;
1384		sigaddset(&t->pending.signal, SIGKILL);
1385		signal_wake_up(t, 1);
1386	}
1387
1388	return count;
1389}
1390
1391struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1392					   unsigned long *flags)
1393{
1394	struct sighand_struct *sighand;
1395
1396	rcu_read_lock();
1397	for (;;) {
 
 
1398		sighand = rcu_dereference(tsk->sighand);
1399		if (unlikely(sighand == NULL))
 
 
1400			break;
 
1401
1402		/*
1403		 * This sighand can be already freed and even reused, but
1404		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1405		 * initializes ->siglock: this slab can't go away, it has
1406		 * the same object type, ->siglock can't be reinitialized.
1407		 *
1408		 * We need to ensure that tsk->sighand is still the same
1409		 * after we take the lock, we can race with de_thread() or
1410		 * __exit_signal(). In the latter case the next iteration
1411		 * must see ->sighand == NULL.
1412		 */
1413		spin_lock_irqsave(&sighand->siglock, *flags);
1414		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1415			break;
1416		spin_unlock_irqrestore(&sighand->siglock, *flags);
 
 
 
1417	}
1418	rcu_read_unlock();
1419
1420	return sighand;
1421}
1422
1423#ifdef CONFIG_LOCKDEP
1424void lockdep_assert_task_sighand_held(struct task_struct *task)
1425{
1426	struct sighand_struct *sighand;
1427
1428	rcu_read_lock();
1429	sighand = rcu_dereference(task->sighand);
1430	if (sighand)
1431		lockdep_assert_held(&sighand->siglock);
1432	else
1433		WARN_ON_ONCE(1);
1434	rcu_read_unlock();
1435}
1436#endif
1437
1438/*
1439 * send signal info to all the members of a group
1440 */
1441int group_send_sig_info(int sig, struct kernel_siginfo *info,
1442			struct task_struct *p, enum pid_type type)
1443{
1444	int ret;
1445
1446	rcu_read_lock();
1447	ret = check_kill_permission(sig, info, p);
1448	rcu_read_unlock();
1449
1450	if (!ret && sig)
1451		ret = do_send_sig_info(sig, info, p, type);
1452
1453	return ret;
1454}
1455
1456/*
1457 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1458 * control characters do (^C, ^Z etc)
1459 * - the caller must hold at least a readlock on tasklist_lock
1460 */
1461int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1462{
1463	struct task_struct *p = NULL;
1464	int ret = -ESRCH;
1465
 
 
1466	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1467		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1468		/*
1469		 * If group_send_sig_info() succeeds at least once ret
1470		 * becomes 0 and after that the code below has no effect.
1471		 * Otherwise we return the last err or -ESRCH if this
1472		 * process group is empty.
1473		 */
1474		if (ret)
1475			ret = err;
1476	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1477
1478	return ret;
1479}
1480
1481int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1482{
1483	int error = -ESRCH;
1484	struct task_struct *p;
1485
1486	for (;;) {
1487		rcu_read_lock();
1488		p = pid_task(pid, PIDTYPE_PID);
1489		if (p)
1490			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1491		rcu_read_unlock();
1492		if (likely(!p || error != -ESRCH))
1493			return error;
1494
1495		/*
1496		 * The task was unhashed in between, try again.  If it
1497		 * is dead, pid_task() will return NULL, if we race with
1498		 * de_thread() it will find the new leader.
1499		 */
1500	}
 
 
 
1501}
1502
1503static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1504{
1505	int error;
1506	rcu_read_lock();
1507	error = kill_pid_info(sig, info, find_vpid(pid));
1508	rcu_read_unlock();
1509	return error;
1510}
1511
1512static inline bool kill_as_cred_perm(const struct cred *cred,
1513				     struct task_struct *target)
1514{
1515	const struct cred *pcred = __task_cred(target);
1516
1517	return uid_eq(cred->euid, pcred->suid) ||
1518	       uid_eq(cred->euid, pcred->uid) ||
1519	       uid_eq(cred->uid, pcred->suid) ||
1520	       uid_eq(cred->uid, pcred->uid);
1521}
1522
1523/*
1524 * The usb asyncio usage of siginfo is wrong.  The glibc support
1525 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526 * AKA after the generic fields:
1527 *	kernel_pid_t	si_pid;
1528 *	kernel_uid32_t	si_uid;
1529 *	sigval_t	si_value;
1530 *
1531 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532 * after the generic fields is:
1533 *	void __user 	*si_addr;
1534 *
1535 * This is a practical problem when there is a 64bit big endian kernel
1536 * and a 32bit userspace.  As the 32bit address will encoded in the low
1537 * 32bits of the pointer.  Those low 32bits will be stored at higher
1538 * address than appear in a 32 bit pointer.  So userspace will not
1539 * see the address it was expecting for it's completions.
1540 *
1541 * There is nothing in the encoding that can allow
1542 * copy_siginfo_to_user32 to detect this confusion of formats, so
1543 * handle this by requiring the caller of kill_pid_usb_asyncio to
1544 * notice when this situration takes place and to store the 32bit
1545 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1546 * parameter.
1547 */
1548int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549			 struct pid *pid, const struct cred *cred)
1550{
1551	struct kernel_siginfo info;
1552	struct task_struct *p;
 
1553	unsigned long flags;
1554	int ret = -EINVAL;
1555
1556	if (!valid_signal(sig))
1557		return ret;
1558
1559	clear_siginfo(&info);
1560	info.si_signo = sig;
1561	info.si_errno = errno;
1562	info.si_code = SI_ASYNCIO;
1563	*((sigval_t *)&info.si_pid) = addr;
1564
1565	rcu_read_lock();
1566	p = pid_task(pid, PIDTYPE_PID);
1567	if (!p) {
1568		ret = -ESRCH;
1569		goto out_unlock;
1570	}
1571	if (!kill_as_cred_perm(cred, p)) {
 
 
 
1572		ret = -EPERM;
1573		goto out_unlock;
1574	}
1575	ret = security_task_kill(p, &info, sig, cred);
1576	if (ret)
1577		goto out_unlock;
1578
1579	if (sig) {
1580		if (lock_task_sighand(p, &flags)) {
1581			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1582			unlock_task_sighand(p, &flags);
1583		} else
1584			ret = -ESRCH;
1585	}
1586out_unlock:
1587	rcu_read_unlock();
1588	return ret;
1589}
1590EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1591
1592/*
1593 * kill_something_info() interprets pid in interesting ways just like kill(2).
1594 *
1595 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596 * is probably wrong.  Should make it like BSD or SYSV.
1597 */
1598
1599static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1600{
1601	int ret;
1602
1603	if (pid > 0)
1604		return kill_proc_info(sig, info, pid);
1605
1606	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1607	if (pid == INT_MIN)
1608		return -ESRCH;
1609
1610	read_lock(&tasklist_lock);
1611	if (pid != -1) {
1612		ret = __kill_pgrp_info(sig, info,
1613				pid ? find_vpid(-pid) : task_pgrp(current));
1614	} else {
1615		int retval = 0, count = 0;
1616		struct task_struct * p;
1617
1618		for_each_process(p) {
1619			if (task_pid_vnr(p) > 1 &&
1620					!same_thread_group(p, current)) {
1621				int err = group_send_sig_info(sig, info, p,
1622							      PIDTYPE_MAX);
1623				++count;
1624				if (err != -EPERM)
1625					retval = err;
1626			}
1627		}
1628		ret = count ? retval : -ESRCH;
1629	}
1630	read_unlock(&tasklist_lock);
1631
1632	return ret;
1633}
1634
1635/*
1636 * These are for backward compatibility with the rest of the kernel source.
1637 */
1638
1639int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1640{
1641	/*
1642	 * Make sure legacy kernel users don't send in bad values
1643	 * (normal paths check this in check_kill_permission).
1644	 */
1645	if (!valid_signal(sig))
1646		return -EINVAL;
1647
1648	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1649}
1650EXPORT_SYMBOL(send_sig_info);
1651
1652#define __si_special(priv) \
1653	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1654
1655int
1656send_sig(int sig, struct task_struct *p, int priv)
1657{
1658	return send_sig_info(sig, __si_special(priv), p);
1659}
1660EXPORT_SYMBOL(send_sig);
1661
1662void force_sig(int sig)
1663{
1664	struct kernel_siginfo info;
1665
1666	clear_siginfo(&info);
1667	info.si_signo = sig;
1668	info.si_errno = 0;
1669	info.si_code = SI_KERNEL;
1670	info.si_pid = 0;
1671	info.si_uid = 0;
1672	force_sig_info(&info);
1673}
1674EXPORT_SYMBOL(force_sig);
1675
1676void force_fatal_sig(int sig)
1677{
1678	struct kernel_siginfo info;
1679
1680	clear_siginfo(&info);
1681	info.si_signo = sig;
1682	info.si_errno = 0;
1683	info.si_code = SI_KERNEL;
1684	info.si_pid = 0;
1685	info.si_uid = 0;
1686	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1687}
1688
1689void force_exit_sig(int sig)
1690{
1691	struct kernel_siginfo info;
1692
1693	clear_siginfo(&info);
1694	info.si_signo = sig;
1695	info.si_errno = 0;
1696	info.si_code = SI_KERNEL;
1697	info.si_pid = 0;
1698	info.si_uid = 0;
1699	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1700}
1701
1702/*
1703 * When things go south during signal handling, we
1704 * will force a SIGSEGV. And if the signal that caused
1705 * the problem was already a SIGSEGV, we'll want to
1706 * make sure we don't even try to deliver the signal..
1707 */
1708void force_sigsegv(int sig)
1709{
1710	if (sig == SIGSEGV)
1711		force_fatal_sig(SIGSEGV);
1712	else
1713		force_sig(SIGSEGV);
1714}
1715
1716int force_sig_fault_to_task(int sig, int code, void __user *addr,
1717			    struct task_struct *t)
1718{
1719	struct kernel_siginfo info;
1720
1721	clear_siginfo(&info);
1722	info.si_signo = sig;
1723	info.si_errno = 0;
1724	info.si_code  = code;
1725	info.si_addr  = addr;
1726	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1727}
1728
1729int force_sig_fault(int sig, int code, void __user *addr)
1730{
1731	return force_sig_fault_to_task(sig, code, addr, current);
1732}
1733
1734int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1735{
1736	struct kernel_siginfo info;
1737
1738	clear_siginfo(&info);
1739	info.si_signo = sig;
1740	info.si_errno = 0;
1741	info.si_code  = code;
1742	info.si_addr  = addr;
1743	return send_sig_info(info.si_signo, &info, t);
1744}
1745
1746int force_sig_mceerr(int code, void __user *addr, short lsb)
1747{
1748	struct kernel_siginfo info;
1749
1750	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1751	clear_siginfo(&info);
1752	info.si_signo = SIGBUS;
1753	info.si_errno = 0;
1754	info.si_code = code;
1755	info.si_addr = addr;
1756	info.si_addr_lsb = lsb;
1757	return force_sig_info(&info);
1758}
1759
1760int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1761{
1762	struct kernel_siginfo info;
1763
1764	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1765	clear_siginfo(&info);
1766	info.si_signo = SIGBUS;
1767	info.si_errno = 0;
1768	info.si_code = code;
1769	info.si_addr = addr;
1770	info.si_addr_lsb = lsb;
1771	return send_sig_info(info.si_signo, &info, t);
1772}
1773EXPORT_SYMBOL(send_sig_mceerr);
1774
1775int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1776{
1777	struct kernel_siginfo info;
1778
1779	clear_siginfo(&info);
1780	info.si_signo = SIGSEGV;
1781	info.si_errno = 0;
1782	info.si_code  = SEGV_BNDERR;
1783	info.si_addr  = addr;
1784	info.si_lower = lower;
1785	info.si_upper = upper;
1786	return force_sig_info(&info);
1787}
1788
1789#ifdef SEGV_PKUERR
1790int force_sig_pkuerr(void __user *addr, u32 pkey)
1791{
1792	struct kernel_siginfo info;
1793
1794	clear_siginfo(&info);
1795	info.si_signo = SIGSEGV;
1796	info.si_errno = 0;
1797	info.si_code  = SEGV_PKUERR;
1798	info.si_addr  = addr;
1799	info.si_pkey  = pkey;
1800	return force_sig_info(&info);
1801}
1802#endif
1803
1804int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1805{
1806	struct kernel_siginfo info;
1807
1808	clear_siginfo(&info);
1809	info.si_signo     = SIGTRAP;
1810	info.si_errno     = 0;
1811	info.si_code      = TRAP_PERF;
1812	info.si_addr      = addr;
1813	info.si_perf_data = sig_data;
1814	info.si_perf_type = type;
1815
1816	/*
1817	 * Signals generated by perf events should not terminate the whole
1818	 * process if SIGTRAP is blocked, however, delivering the signal
1819	 * asynchronously is better than not delivering at all. But tell user
1820	 * space if the signal was asynchronous, so it can clearly be
1821	 * distinguished from normal synchronous ones.
1822	 */
1823	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1824				     TRAP_PERF_FLAG_ASYNC :
1825				     0;
1826
1827	return send_sig_info(info.si_signo, &info, current);
1828}
1829
1830/**
1831 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1832 * @syscall: syscall number to send to userland
1833 * @reason: filter-supplied reason code to send to userland (via si_errno)
1834 * @force_coredump: true to trigger a coredump
1835 *
1836 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1837 */
1838int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1839{
1840	struct kernel_siginfo info;
1841
1842	clear_siginfo(&info);
1843	info.si_signo = SIGSYS;
1844	info.si_code = SYS_SECCOMP;
1845	info.si_call_addr = (void __user *)KSTK_EIP(current);
1846	info.si_errno = reason;
1847	info.si_arch = syscall_get_arch(current);
1848	info.si_syscall = syscall;
1849	return force_sig_info_to_task(&info, current,
1850		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1851}
1852
1853/* For the crazy architectures that include trap information in
1854 * the errno field, instead of an actual errno value.
1855 */
1856int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1857{
1858	struct kernel_siginfo info;
1859
1860	clear_siginfo(&info);
1861	info.si_signo = SIGTRAP;
1862	info.si_errno = errno;
1863	info.si_code  = TRAP_HWBKPT;
1864	info.si_addr  = addr;
1865	return force_sig_info(&info);
1866}
1867
1868/* For the rare architectures that include trap information using
1869 * si_trapno.
1870 */
1871int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1872{
1873	struct kernel_siginfo info;
1874
1875	clear_siginfo(&info);
1876	info.si_signo = sig;
1877	info.si_errno = 0;
1878	info.si_code  = code;
1879	info.si_addr  = addr;
1880	info.si_trapno = trapno;
1881	return force_sig_info(&info);
1882}
1883
1884/* For the rare architectures that include trap information using
1885 * si_trapno.
1886 */
1887int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1888			  struct task_struct *t)
1889{
1890	struct kernel_siginfo info;
1891
1892	clear_siginfo(&info);
1893	info.si_signo = sig;
1894	info.si_errno = 0;
1895	info.si_code  = code;
1896	info.si_addr  = addr;
1897	info.si_trapno = trapno;
1898	return send_sig_info(info.si_signo, &info, t);
1899}
1900
1901int kill_pgrp(struct pid *pid, int sig, int priv)
1902{
1903	int ret;
1904
1905	read_lock(&tasklist_lock);
1906	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1907	read_unlock(&tasklist_lock);
1908
1909	return ret;
1910}
1911EXPORT_SYMBOL(kill_pgrp);
1912
1913int kill_pid(struct pid *pid, int sig, int priv)
1914{
1915	return kill_pid_info(sig, __si_special(priv), pid);
1916}
1917EXPORT_SYMBOL(kill_pid);
1918
1919/*
1920 * These functions support sending signals using preallocated sigqueue
1921 * structures.  This is needed "because realtime applications cannot
1922 * afford to lose notifications of asynchronous events, like timer
1923 * expirations or I/O completions".  In the case of POSIX Timers
1924 * we allocate the sigqueue structure from the timer_create.  If this
1925 * allocation fails we are able to report the failure to the application
1926 * with an EAGAIN error.
1927 */
1928struct sigqueue *sigqueue_alloc(void)
1929{
1930	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
 
 
 
 
 
1931}
1932
1933void sigqueue_free(struct sigqueue *q)
1934{
1935	unsigned long flags;
1936	spinlock_t *lock = &current->sighand->siglock;
1937
1938	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1939	/*
1940	 * We must hold ->siglock while testing q->list
1941	 * to serialize with collect_signal() or with
1942	 * __exit_signal()->flush_sigqueue().
1943	 */
1944	spin_lock_irqsave(lock, flags);
1945	q->flags &= ~SIGQUEUE_PREALLOC;
1946	/*
1947	 * If it is queued it will be freed when dequeued,
1948	 * like the "regular" sigqueue.
1949	 */
1950	if (!list_empty(&q->list))
1951		q = NULL;
1952	spin_unlock_irqrestore(lock, flags);
1953
1954	if (q)
1955		__sigqueue_free(q);
1956}
1957
1958int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1959{
1960	int sig = q->info.si_signo;
1961	struct sigpending *pending;
1962	struct task_struct *t;
1963	unsigned long flags;
1964	int ret, result;
1965
1966	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1967
1968	ret = -1;
1969	rcu_read_lock();
1970
1971	/*
1972	 * This function is used by POSIX timers to deliver a timer signal.
1973	 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1974	 * set), the signal must be delivered to the specific thread (queues
1975	 * into t->pending).
1976	 *
1977	 * Where type is not PIDTYPE_PID, signals must be delivered to the
1978	 * process. In this case, prefer to deliver to current if it is in
1979	 * the same thread group as the target process, which avoids
1980	 * unnecessarily waking up a potentially idle task.
1981	 */
1982	t = pid_task(pid, type);
1983	if (!t)
1984		goto ret;
1985	if (type != PIDTYPE_PID && same_thread_group(t, current))
1986		t = current;
1987	if (!likely(lock_task_sighand(t, &flags)))
1988		goto ret;
1989
1990	ret = 1; /* the signal is ignored */
1991	result = TRACE_SIGNAL_IGNORED;
1992	if (!prepare_signal(sig, t, false))
1993		goto out;
1994
1995	ret = 0;
1996	if (unlikely(!list_empty(&q->list))) {
1997		/*
1998		 * If an SI_TIMER entry is already queue just increment
1999		 * the overrun count.
2000		 */
2001		BUG_ON(q->info.si_code != SI_TIMER);
2002		q->info.si_overrun++;
2003		result = TRACE_SIGNAL_ALREADY_PENDING;
2004		goto out;
2005	}
2006	q->info.si_overrun = 0;
2007
2008	signalfd_notify(t, sig);
2009	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2010	list_add_tail(&q->list, &pending->list);
2011	sigaddset(&pending->signal, sig);
2012	complete_signal(sig, t, type);
2013	result = TRACE_SIGNAL_DELIVERED;
2014out:
2015	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2016	unlock_task_sighand(t, &flags);
2017ret:
2018	rcu_read_unlock();
2019	return ret;
2020}
2021
2022static void do_notify_pidfd(struct task_struct *task)
2023{
2024	struct pid *pid;
2025
2026	WARN_ON(task->exit_state == 0);
2027	pid = task_pid(task);
2028	wake_up_all(&pid->wait_pidfd);
2029}
2030
2031/*
2032 * Let a parent know about the death of a child.
2033 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2034 *
2035 * Returns true if our parent ignored us and so we've switched to
2036 * self-reaping.
2037 */
2038bool do_notify_parent(struct task_struct *tsk, int sig)
2039{
2040	struct kernel_siginfo info;
2041	unsigned long flags;
2042	struct sighand_struct *psig;
2043	bool autoreap = false;
2044	u64 utime, stime;
2045
2046	WARN_ON_ONCE(sig == -1);
2047
2048	/* do_notify_parent_cldstop should have been called instead.  */
2049	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2050
2051	WARN_ON_ONCE(!tsk->ptrace &&
2052	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2053
2054	/* Wake up all pidfd waiters */
2055	do_notify_pidfd(tsk);
2056
2057	if (sig != SIGCHLD) {
2058		/*
2059		 * This is only possible if parent == real_parent.
2060		 * Check if it has changed security domain.
2061		 */
2062		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2063			sig = SIGCHLD;
2064	}
2065
2066	clear_siginfo(&info);
2067	info.si_signo = sig;
2068	info.si_errno = 0;
2069	/*
2070	 * We are under tasklist_lock here so our parent is tied to
2071	 * us and cannot change.
2072	 *
2073	 * task_active_pid_ns will always return the same pid namespace
2074	 * until a task passes through release_task.
 
2075	 *
2076	 * write_lock() currently calls preempt_disable() which is the
2077	 * same as rcu_read_lock(), but according to Oleg, this is not
2078	 * correct to rely on this
2079	 */
2080	rcu_read_lock();
2081	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2082	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2083				       task_uid(tsk));
2084	rcu_read_unlock();
2085
2086	task_cputime(tsk, &utime, &stime);
2087	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2088	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
 
2089
2090	info.si_status = tsk->exit_code & 0x7f;
2091	if (tsk->exit_code & 0x80)
2092		info.si_code = CLD_DUMPED;
2093	else if (tsk->exit_code & 0x7f)
2094		info.si_code = CLD_KILLED;
2095	else {
2096		info.si_code = CLD_EXITED;
2097		info.si_status = tsk->exit_code >> 8;
2098	}
2099
2100	psig = tsk->parent->sighand;
2101	spin_lock_irqsave(&psig->siglock, flags);
2102	if (!tsk->ptrace && sig == SIGCHLD &&
2103	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2104	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2105		/*
2106		 * We are exiting and our parent doesn't care.  POSIX.1
2107		 * defines special semantics for setting SIGCHLD to SIG_IGN
2108		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2109		 * automatically and not left for our parent's wait4 call.
2110		 * Rather than having the parent do it as a magic kind of
2111		 * signal handler, we just set this to tell do_exit that we
2112		 * can be cleaned up without becoming a zombie.  Note that
2113		 * we still call __wake_up_parent in this case, because a
2114		 * blocked sys_wait4 might now return -ECHILD.
2115		 *
2116		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2117		 * is implementation-defined: we do (if you don't want
2118		 * it, just use SIG_IGN instead).
2119		 */
2120		autoreap = true;
2121		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2122			sig = 0;
2123	}
2124	/*
2125	 * Send with __send_signal as si_pid and si_uid are in the
2126	 * parent's namespaces.
2127	 */
2128	if (valid_signal(sig) && sig)
2129		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2130	__wake_up_parent(tsk, tsk->parent);
2131	spin_unlock_irqrestore(&psig->siglock, flags);
2132
2133	return autoreap;
2134}
2135
2136/**
2137 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2138 * @tsk: task reporting the state change
2139 * @for_ptracer: the notification is for ptracer
2140 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2141 *
2142 * Notify @tsk's parent that the stopped/continued state has changed.  If
2143 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2144 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2145 *
2146 * CONTEXT:
2147 * Must be called with tasklist_lock at least read locked.
2148 */
2149static void do_notify_parent_cldstop(struct task_struct *tsk,
2150				     bool for_ptracer, int why)
2151{
2152	struct kernel_siginfo info;
2153	unsigned long flags;
2154	struct task_struct *parent;
2155	struct sighand_struct *sighand;
2156	u64 utime, stime;
2157
2158	if (for_ptracer) {
2159		parent = tsk->parent;
2160	} else {
2161		tsk = tsk->group_leader;
2162		parent = tsk->real_parent;
2163	}
2164
2165	clear_siginfo(&info);
2166	info.si_signo = SIGCHLD;
2167	info.si_errno = 0;
2168	/*
2169	 * see comment in do_notify_parent() about the following 4 lines
2170	 */
2171	rcu_read_lock();
2172	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2173	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2174	rcu_read_unlock();
2175
2176	task_cputime(tsk, &utime, &stime);
2177	info.si_utime = nsec_to_clock_t(utime);
2178	info.si_stime = nsec_to_clock_t(stime);
2179
2180 	info.si_code = why;
2181 	switch (why) {
2182 	case CLD_CONTINUED:
2183 		info.si_status = SIGCONT;
2184 		break;
2185 	case CLD_STOPPED:
2186 		info.si_status = tsk->signal->group_exit_code & 0x7f;
2187 		break;
2188 	case CLD_TRAPPED:
2189 		info.si_status = tsk->exit_code & 0x7f;
2190 		break;
2191 	default:
2192 		BUG();
2193 	}
2194
2195	sighand = parent->sighand;
2196	spin_lock_irqsave(&sighand->siglock, flags);
2197	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2198	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2199		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2200	/*
2201	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2202	 */
2203	__wake_up_parent(tsk, parent);
2204	spin_unlock_irqrestore(&sighand->siglock, flags);
2205}
2206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2207/*
2208 * This must be called with current->sighand->siglock held.
2209 *
2210 * This should be the path for all ptrace stops.
2211 * We always set current->last_siginfo while stopped here.
2212 * That makes it a way to test a stopped process for
2213 * being ptrace-stopped vs being job-control-stopped.
2214 *
2215 * Returns the signal the ptracer requested the code resume
2216 * with.  If the code did not stop because the tracer is gone,
2217 * the stop signal remains unchanged unless clear_code.
2218 */
2219static int ptrace_stop(int exit_code, int why, unsigned long message,
2220		       kernel_siginfo_t *info)
2221	__releases(&current->sighand->siglock)
2222	__acquires(&current->sighand->siglock)
2223{
2224	bool gstop_done = false;
2225
2226	if (arch_ptrace_stop_needed()) {
2227		/*
2228		 * The arch code has something special to do before a
2229		 * ptrace stop.  This is allowed to block, e.g. for faults
2230		 * on user stack pages.  We can't keep the siglock while
2231		 * calling arch_ptrace_stop, so we must release it now.
2232		 * To preserve proper semantics, we must do this before
2233		 * any signal bookkeeping like checking group_stop_count.
 
 
 
2234		 */
2235		spin_unlock_irq(&current->sighand->siglock);
2236		arch_ptrace_stop();
2237		spin_lock_irq(&current->sighand->siglock);
 
 
2238	}
2239
2240	/*
2241	 * After this point ptrace_signal_wake_up or signal_wake_up
2242	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2243	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2244	 * signals here to prevent ptrace_stop sleeping in schedule.
2245	 */
2246	if (!current->ptrace || __fatal_signal_pending(current))
2247		return exit_code;
2248
2249	set_special_state(TASK_TRACED);
2250	current->jobctl |= JOBCTL_TRACED;
2251
2252	/*
2253	 * We're committing to trapping.  TRACED should be visible before
2254	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2255	 * Also, transition to TRACED and updates to ->jobctl should be
2256	 * atomic with respect to siglock and should be done after the arch
2257	 * hook as siglock is released and regrabbed across it.
2258	 *
2259	 *     TRACER				    TRACEE
2260	 *
2261	 *     ptrace_attach()
2262	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2263	 *     do_wait()
2264	 *       set_current_state()                smp_wmb();
2265	 *       ptrace_do_wait()
2266	 *         wait_task_stopped()
2267	 *           task_stopped_code()
2268	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2269	 */
2270	smp_wmb();
2271
2272	current->ptrace_message = message;
2273	current->last_siginfo = info;
2274	current->exit_code = exit_code;
2275
2276	/*
2277	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2278	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2279	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2280	 * could be clear now.  We act as if SIGCONT is received after
2281	 * TASK_TRACED is entered - ignore it.
2282	 */
2283	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2284		gstop_done = task_participate_group_stop(current);
2285
2286	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2287	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2288	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2289		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2290
2291	/* entering a trap, clear TRAPPING */
2292	task_clear_jobctl_trapping(current);
2293
2294	spin_unlock_irq(&current->sighand->siglock);
2295	read_lock(&tasklist_lock);
2296	/*
2297	 * Notify parents of the stop.
2298	 *
2299	 * While ptraced, there are two parents - the ptracer and
2300	 * the real_parent of the group_leader.  The ptracer should
2301	 * know about every stop while the real parent is only
2302	 * interested in the completion of group stop.  The states
2303	 * for the two don't interact with each other.  Notify
2304	 * separately unless they're gonna be duplicates.
2305	 */
2306	if (current->ptrace)
2307		do_notify_parent_cldstop(current, true, why);
2308	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2309		do_notify_parent_cldstop(current, false, why);
2310
2311	/*
2312	 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2313	 * One a PREEMPTION kernel this can result in preemption requirement
2314	 * which will be fulfilled after read_unlock() and the ptracer will be
2315	 * put on the CPU.
2316	 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2317	 * this task wait in schedule(). If this task gets preempted then it
2318	 * remains enqueued on the runqueue. The ptracer will observe this and
2319	 * then sleep for a delay of one HZ tick. In the meantime this task
2320	 * gets scheduled, enters schedule() and will wait for the ptracer.
2321	 *
2322	 * This preemption point is not bad from a correctness point of
2323	 * view but extends the runtime by one HZ tick time due to the
2324	 * ptracer's sleep.  The preempt-disable section ensures that there
2325	 * will be no preemption between unlock and schedule() and so
2326	 * improving the performance since the ptracer will observe that
2327	 * the tracee is scheduled out once it gets on the CPU.
2328	 *
2329	 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2330	 * Therefore the task can be preempted after do_notify_parent_cldstop()
2331	 * before unlocking tasklist_lock so there is no benefit in doing this.
2332	 *
2333	 * In fact disabling preemption is harmful on PREEMPT_RT because
2334	 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2335	 * with preemption disabled due to the 'sleeping' spinlock
2336	 * substitution of RT.
2337	 */
2338	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2339		preempt_disable();
2340	read_unlock(&tasklist_lock);
2341	cgroup_enter_frozen();
2342	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2343		preempt_enable_no_resched();
2344	schedule();
2345	cgroup_leave_frozen(true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2346
2347	/*
2348	 * We are back.  Now reacquire the siglock before touching
2349	 * last_siginfo, so that we are sure to have synchronized with
2350	 * any signal-sending on another CPU that wants to examine it.
2351	 */
2352	spin_lock_irq(&current->sighand->siglock);
2353	exit_code = current->exit_code;
2354	current->last_siginfo = NULL;
2355	current->ptrace_message = 0;
2356	current->exit_code = 0;
2357
2358	/* LISTENING can be set only during STOP traps, clear it */
2359	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2360
2361	/*
2362	 * Queued signals ignored us while we were stopped for tracing.
2363	 * So check for any that we should take before resuming user mode.
2364	 * This sets TIF_SIGPENDING, but never clears it.
2365	 */
2366	recalc_sigpending_tsk(current);
2367	return exit_code;
2368}
2369
2370static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2371{
2372	kernel_siginfo_t info;
2373
2374	clear_siginfo(&info);
2375	info.si_signo = signr;
2376	info.si_code = exit_code;
2377	info.si_pid = task_pid_vnr(current);
2378	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2379
2380	/* Let the debugger run.  */
2381	return ptrace_stop(exit_code, why, message, &info);
2382}
2383
2384int ptrace_notify(int exit_code, unsigned long message)
2385{
2386	int signr;
2387
2388	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2389	if (unlikely(task_work_pending(current)))
2390		task_work_run();
2391
2392	spin_lock_irq(&current->sighand->siglock);
2393	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2394	spin_unlock_irq(&current->sighand->siglock);
2395	return signr;
2396}
2397
2398/**
2399 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2400 * @signr: signr causing group stop if initiating
2401 *
2402 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2403 * and participate in it.  If already set, participate in the existing
2404 * group stop.  If participated in a group stop (and thus slept), %true is
2405 * returned with siglock released.
2406 *
2407 * If ptraced, this function doesn't handle stop itself.  Instead,
2408 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2409 * untouched.  The caller must ensure that INTERRUPT trap handling takes
2410 * places afterwards.
2411 *
2412 * CONTEXT:
2413 * Must be called with @current->sighand->siglock held, which is released
2414 * on %true return.
2415 *
2416 * RETURNS:
2417 * %false if group stop is already cancelled or ptrace trap is scheduled.
2418 * %true if participated in group stop.
2419 */
2420static bool do_signal_stop(int signr)
2421	__releases(&current->sighand->siglock)
2422{
2423	struct signal_struct *sig = current->signal;
2424
2425	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2426		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2427		struct task_struct *t;
2428
2429		/* signr will be recorded in task->jobctl for retries */
2430		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2431
2432		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2433		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2434		    unlikely(sig->group_exec_task))
2435			return false;
2436		/*
2437		 * There is no group stop already in progress.  We must
2438		 * initiate one now.
2439		 *
2440		 * While ptraced, a task may be resumed while group stop is
2441		 * still in effect and then receive a stop signal and
2442		 * initiate another group stop.  This deviates from the
2443		 * usual behavior as two consecutive stop signals can't
2444		 * cause two group stops when !ptraced.  That is why we
2445		 * also check !task_is_stopped(t) below.
2446		 *
2447		 * The condition can be distinguished by testing whether
2448		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2449		 * group_exit_code in such case.
2450		 *
2451		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2452		 * an intervening stop signal is required to cause two
2453		 * continued events regardless of ptrace.
2454		 */
2455		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2456			sig->group_exit_code = signr;
 
 
2457
2458		sig->group_stop_count = 0;
 
2459		if (task_set_jobctl_pending(current, signr | gstop))
2460			sig->group_stop_count++;
2461
2462		for_other_threads(current, t) {
 
2463			/*
2464			 * Setting state to TASK_STOPPED for a group
2465			 * stop is always done with the siglock held,
2466			 * so this check has no races.
2467			 */
2468			if (!task_is_stopped(t) &&
2469			    task_set_jobctl_pending(t, signr | gstop)) {
2470				sig->group_stop_count++;
2471				if (likely(!(t->ptrace & PT_SEIZED)))
2472					signal_wake_up(t, 0);
2473				else
2474					ptrace_trap_notify(t);
2475			}
2476		}
2477	}
2478
2479	if (likely(!current->ptrace)) {
2480		int notify = 0;
2481
2482		/*
2483		 * If there are no other threads in the group, or if there
2484		 * is a group stop in progress and we are the last to stop,
2485		 * report to the parent.
2486		 */
2487		if (task_participate_group_stop(current))
2488			notify = CLD_STOPPED;
2489
2490		current->jobctl |= JOBCTL_STOPPED;
2491		set_special_state(TASK_STOPPED);
2492		spin_unlock_irq(&current->sighand->siglock);
2493
2494		/*
2495		 * Notify the parent of the group stop completion.  Because
2496		 * we're not holding either the siglock or tasklist_lock
2497		 * here, ptracer may attach inbetween; however, this is for
2498		 * group stop and should always be delivered to the real
2499		 * parent of the group leader.  The new ptracer will get
2500		 * its notification when this task transitions into
2501		 * TASK_TRACED.
2502		 */
2503		if (notify) {
2504			read_lock(&tasklist_lock);
2505			do_notify_parent_cldstop(current, false, notify);
2506			read_unlock(&tasklist_lock);
2507		}
2508
2509		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2510		cgroup_enter_frozen();
2511		schedule();
2512		return true;
2513	} else {
2514		/*
2515		 * While ptraced, group stop is handled by STOP trap.
2516		 * Schedule it and let the caller deal with it.
2517		 */
2518		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2519		return false;
2520	}
2521}
2522
2523/**
2524 * do_jobctl_trap - take care of ptrace jobctl traps
2525 *
2526 * When PT_SEIZED, it's used for both group stop and explicit
2527 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2528 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2529 * the stop signal; otherwise, %SIGTRAP.
2530 *
2531 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2532 * number as exit_code and no siginfo.
2533 *
2534 * CONTEXT:
2535 * Must be called with @current->sighand->siglock held, which may be
2536 * released and re-acquired before returning with intervening sleep.
2537 */
2538static void do_jobctl_trap(void)
2539{
2540	struct signal_struct *signal = current->signal;
2541	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2542
2543	if (current->ptrace & PT_SEIZED) {
2544		if (!signal->group_stop_count &&
2545		    !(signal->flags & SIGNAL_STOP_STOPPED))
2546			signr = SIGTRAP;
2547		WARN_ON_ONCE(!signr);
2548		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2549				 CLD_STOPPED, 0);
2550	} else {
2551		WARN_ON_ONCE(!signr);
2552		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
 
2553	}
2554}
2555
2556/**
2557 * do_freezer_trap - handle the freezer jobctl trap
2558 *
2559 * Puts the task into frozen state, if only the task is not about to quit.
2560 * In this case it drops JOBCTL_TRAP_FREEZE.
2561 *
2562 * CONTEXT:
2563 * Must be called with @current->sighand->siglock held,
2564 * which is always released before returning.
2565 */
2566static void do_freezer_trap(void)
2567	__releases(&current->sighand->siglock)
2568{
2569	/*
2570	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2571	 * let's make another loop to give it a chance to be handled.
2572	 * In any case, we'll return back.
2573	 */
2574	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2575	     JOBCTL_TRAP_FREEZE) {
2576		spin_unlock_irq(&current->sighand->siglock);
2577		return;
2578	}
2579
2580	/*
2581	 * Now we're sure that there is no pending fatal signal and no
2582	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2583	 * immediately (if there is a non-fatal signal pending), and
2584	 * put the task into sleep.
2585	 */
2586	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2587	clear_thread_flag(TIF_SIGPENDING);
2588	spin_unlock_irq(&current->sighand->siglock);
2589	cgroup_enter_frozen();
2590	schedule();
2591}
2592
2593static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2594{
 
2595	/*
2596	 * We do not check sig_kernel_stop(signr) but set this marker
2597	 * unconditionally because we do not know whether debugger will
2598	 * change signr. This flag has no meaning unless we are going
2599	 * to stop after return from ptrace_stop(). In this case it will
2600	 * be checked in do_signal_stop(), we should only stop if it was
2601	 * not cleared by SIGCONT while we were sleeping. See also the
2602	 * comment in dequeue_signal().
2603	 */
2604	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2605	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2606
2607	/* We're back.  Did the debugger cancel the sig?  */
 
2608	if (signr == 0)
2609		return signr;
2610
 
 
2611	/*
2612	 * Update the siginfo structure if the signal has
2613	 * changed.  If the debugger wanted something
2614	 * specific in the siginfo structure then it should
2615	 * have updated *info via PTRACE_SETSIGINFO.
2616	 */
2617	if (signr != info->si_signo) {
2618		clear_siginfo(info);
2619		info->si_signo = signr;
2620		info->si_errno = 0;
2621		info->si_code = SI_USER;
2622		rcu_read_lock();
2623		info->si_pid = task_pid_vnr(current->parent);
2624		info->si_uid = from_kuid_munged(current_user_ns(),
2625						task_uid(current->parent));
2626		rcu_read_unlock();
2627	}
2628
2629	/* If the (new) signal is now blocked, requeue it.  */
2630	if (sigismember(&current->blocked, signr) ||
2631	    fatal_signal_pending(current)) {
2632		send_signal_locked(signr, info, current, type);
2633		signr = 0;
2634	}
2635
2636	return signr;
2637}
2638
2639static void hide_si_addr_tag_bits(struct ksignal *ksig)
2640{
2641	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2642	case SIL_FAULT:
2643	case SIL_FAULT_TRAPNO:
2644	case SIL_FAULT_MCEERR:
2645	case SIL_FAULT_BNDERR:
2646	case SIL_FAULT_PKUERR:
2647	case SIL_FAULT_PERF_EVENT:
2648		ksig->info.si_addr = arch_untagged_si_addr(
2649			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2650		break;
2651	case SIL_KILL:
2652	case SIL_TIMER:
2653	case SIL_POLL:
2654	case SIL_CHLD:
2655	case SIL_RT:
2656	case SIL_SYS:
2657		break;
2658	}
2659}
2660
2661bool get_signal(struct ksignal *ksig)
2662{
2663	struct sighand_struct *sighand = current->sighand;
2664	struct signal_struct *signal = current->signal;
2665	int signr;
2666
2667	clear_notify_signal();
2668	if (unlikely(task_work_pending(current)))
2669		task_work_run();
2670
2671	if (!task_sigpending(current))
2672		return false;
2673
2674	if (unlikely(uprobe_deny_signal()))
2675		return false;
2676
2677	/*
2678	 * Do this once, we can't return to user-mode if freezing() == T.
2679	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2680	 * thus do not need another check after return.
 
2681	 */
2682	try_to_freeze();
2683
2684relock:
2685	spin_lock_irq(&sighand->siglock);
2686
2687	/*
2688	 * Every stopped thread goes here after wakeup. Check to see if
2689	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2690	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2691	 */
2692	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2693		int why;
2694
2695		if (signal->flags & SIGNAL_CLD_CONTINUED)
2696			why = CLD_CONTINUED;
2697		else
2698			why = CLD_STOPPED;
2699
2700		signal->flags &= ~SIGNAL_CLD_MASK;
2701
2702		spin_unlock_irq(&sighand->siglock);
2703
2704		/*
2705		 * Notify the parent that we're continuing.  This event is
2706		 * always per-process and doesn't make whole lot of sense
2707		 * for ptracers, who shouldn't consume the state via
2708		 * wait(2) either, but, for backward compatibility, notify
2709		 * the ptracer of the group leader too unless it's gonna be
2710		 * a duplicate.
2711		 */
2712		read_lock(&tasklist_lock);
2713		do_notify_parent_cldstop(current, false, why);
2714
2715		if (ptrace_reparented(current->group_leader))
2716			do_notify_parent_cldstop(current->group_leader,
2717						true, why);
2718		read_unlock(&tasklist_lock);
2719
2720		goto relock;
2721	}
2722
2723	for (;;) {
2724		struct k_sigaction *ka;
2725		enum pid_type type;
2726
2727		/* Has this task already been marked for death? */
2728		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2729		     signal->group_exec_task) {
2730			clear_siginfo(&ksig->info);
2731			ksig->info.si_signo = signr = SIGKILL;
2732			sigdelset(&current->pending.signal, SIGKILL);
2733			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2734				&sighand->action[SIGKILL - 1]);
2735			recalc_sigpending();
2736			goto fatal;
2737		}
2738
2739		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2740		    do_signal_stop(0))
2741			goto relock;
2742
2743		if (unlikely(current->jobctl &
2744			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2745			if (current->jobctl & JOBCTL_TRAP_MASK) {
2746				do_jobctl_trap();
2747				spin_unlock_irq(&sighand->siglock);
2748			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2749				do_freezer_trap();
2750
2751			goto relock;
2752		}
2753
2754		/*
2755		 * If the task is leaving the frozen state, let's update
2756		 * cgroup counters and reset the frozen bit.
2757		 */
2758		if (unlikely(cgroup_task_frozen(current))) {
2759			spin_unlock_irq(&sighand->siglock);
2760			cgroup_leave_frozen(false);
2761			goto relock;
2762		}
2763
2764		/*
2765		 * Signals generated by the execution of an instruction
2766		 * need to be delivered before any other pending signals
2767		 * so that the instruction pointer in the signal stack
2768		 * frame points to the faulting instruction.
2769		 */
2770		type = PIDTYPE_PID;
2771		signr = dequeue_synchronous_signal(&ksig->info);
2772		if (!signr)
2773			signr = dequeue_signal(current, &current->blocked,
2774					       &ksig->info, &type);
2775
2776		if (!signr)
2777			break; /* will return 0 */
2778
2779		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2780		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2781			signr = ptrace_signal(signr, &ksig->info, type);
2782			if (!signr)
2783				continue;
2784		}
2785
2786		ka = &sighand->action[signr-1];
2787
2788		/* Trace actually delivered signals. */
2789		trace_signal_deliver(signr, &ksig->info, ka);
2790
2791		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2792			continue;
2793		if (ka->sa.sa_handler != SIG_DFL) {
2794			/* Run the handler.  */
2795			ksig->ka = *ka;
2796
2797			if (ka->sa.sa_flags & SA_ONESHOT)
2798				ka->sa.sa_handler = SIG_DFL;
2799
2800			break; /* will return non-zero "signr" value */
2801		}
2802
2803		/*
2804		 * Now we are doing the default action for this signal.
2805		 */
2806		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2807			continue;
2808
2809		/*
2810		 * Global init gets no signals it doesn't want.
2811		 * Container-init gets no signals it doesn't want from same
2812		 * container.
2813		 *
2814		 * Note that if global/container-init sees a sig_kernel_only()
2815		 * signal here, the signal must have been generated internally
2816		 * or must have come from an ancestor namespace. In either
2817		 * case, the signal cannot be dropped.
2818		 */
2819		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2820				!sig_kernel_only(signr))
2821			continue;
2822
2823		if (sig_kernel_stop(signr)) {
2824			/*
2825			 * The default action is to stop all threads in
2826			 * the thread group.  The job control signals
2827			 * do nothing in an orphaned pgrp, but SIGSTOP
2828			 * always works.  Note that siglock needs to be
2829			 * dropped during the call to is_orphaned_pgrp()
2830			 * because of lock ordering with tasklist_lock.
2831			 * This allows an intervening SIGCONT to be posted.
2832			 * We need to check for that and bail out if necessary.
2833			 */
2834			if (signr != SIGSTOP) {
2835				spin_unlock_irq(&sighand->siglock);
2836
2837				/* signals can be posted during this window */
2838
2839				if (is_current_pgrp_orphaned())
2840					goto relock;
2841
2842				spin_lock_irq(&sighand->siglock);
2843			}
2844
2845			if (likely(do_signal_stop(ksig->info.si_signo))) {
2846				/* It released the siglock.  */
2847				goto relock;
2848			}
2849
2850			/*
2851			 * We didn't actually stop, due to a race
2852			 * with SIGCONT or something like that.
2853			 */
2854			continue;
2855		}
2856
2857	fatal:
2858		spin_unlock_irq(&sighand->siglock);
2859		if (unlikely(cgroup_task_frozen(current)))
2860			cgroup_leave_frozen(true);
2861
2862		/*
2863		 * Anything else is fatal, maybe with a core dump.
2864		 */
2865		current->flags |= PF_SIGNALED;
2866
2867		if (sig_kernel_coredump(signr)) {
2868			if (print_fatal_signals)
2869				print_fatal_signal(ksig->info.si_signo);
2870			proc_coredump_connector(current);
2871			/*
2872			 * If it was able to dump core, this kills all
2873			 * other threads in the group and synchronizes with
2874			 * their demise.  If we lost the race with another
2875			 * thread getting here, it set group_exit_code
2876			 * first and our do_group_exit call below will use
2877			 * that value and ignore the one we pass it.
2878			 */
2879			do_coredump(&ksig->info);
2880		}
2881
2882		/*
2883		 * PF_USER_WORKER threads will catch and exit on fatal signals
2884		 * themselves. They have cleanup that must be performed, so
2885		 * we cannot call do_exit() on their behalf.
2886		 */
2887		if (current->flags & PF_USER_WORKER)
2888			goto out;
2889
2890		/*
2891		 * Death signals, no core dump.
2892		 */
2893		do_group_exit(ksig->info.si_signo);
2894		/* NOTREACHED */
2895	}
2896	spin_unlock_irq(&sighand->siglock);
2897out:
2898	ksig->sig = signr;
2899
2900	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2901		hide_si_addr_tag_bits(ksig);
2902
2903	return ksig->sig > 0;
2904}
2905
2906/**
2907 * signal_delivered - called after signal delivery to update blocked signals
2908 * @ksig:		kernel signal struct
2909 * @stepping:		nonzero if debugger single-step or block-step in use
2910 *
2911 * This function should be called when a signal has successfully been
2912 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2913 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2914 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2915 */
2916static void signal_delivered(struct ksignal *ksig, int stepping)
2917{
2918	sigset_t blocked;
2919
2920	/* A signal was successfully delivered, and the
2921	   saved sigmask was stored on the signal frame,
2922	   and will be restored by sigreturn.  So we can
2923	   simply clear the restore sigmask flag.  */
2924	clear_restore_sigmask();
2925
2926	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2927	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2928		sigaddset(&blocked, ksig->sig);
2929	set_current_blocked(&blocked);
2930	if (current->sas_ss_flags & SS_AUTODISARM)
2931		sas_ss_reset(current);
2932	if (stepping)
2933		ptrace_notify(SIGTRAP, 0);
2934}
2935
2936void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2937{
2938	if (failed)
2939		force_sigsegv(ksig->sig);
2940	else
2941		signal_delivered(ksig, stepping);
2942}
2943
2944/*
2945 * It could be that complete_signal() picked us to notify about the
2946 * group-wide signal. Other threads should be notified now to take
2947 * the shared signals in @which since we will not.
2948 */
2949static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2950{
2951	sigset_t retarget;
2952	struct task_struct *t;
2953
2954	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2955	if (sigisemptyset(&retarget))
2956		return;
2957
2958	for_other_threads(tsk, t) {
 
2959		if (t->flags & PF_EXITING)
2960			continue;
2961
2962		if (!has_pending_signals(&retarget, &t->blocked))
2963			continue;
2964		/* Remove the signals this thread can handle. */
2965		sigandsets(&retarget, &retarget, &t->blocked);
2966
2967		if (!task_sigpending(t))
2968			signal_wake_up(t, 0);
2969
2970		if (sigisemptyset(&retarget))
2971			break;
2972	}
2973}
2974
2975void exit_signals(struct task_struct *tsk)
2976{
2977	int group_stop = 0;
2978	sigset_t unblocked;
2979
2980	/*
2981	 * @tsk is about to have PF_EXITING set - lock out users which
2982	 * expect stable threadgroup.
2983	 */
2984	cgroup_threadgroup_change_begin(tsk);
2985
2986	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2987		sched_mm_cid_exit_signals(tsk);
2988		tsk->flags |= PF_EXITING;
2989		cgroup_threadgroup_change_end(tsk);
2990		return;
2991	}
2992
2993	spin_lock_irq(&tsk->sighand->siglock);
2994	/*
2995	 * From now this task is not visible for group-wide signals,
2996	 * see wants_signal(), do_signal_stop().
2997	 */
2998	sched_mm_cid_exit_signals(tsk);
2999	tsk->flags |= PF_EXITING;
3000
3001	cgroup_threadgroup_change_end(tsk);
3002
3003	if (!task_sigpending(tsk))
3004		goto out;
3005
3006	unblocked = tsk->blocked;
3007	signotset(&unblocked);
3008	retarget_shared_pending(tsk, &unblocked);
3009
3010	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3011	    task_participate_group_stop(tsk))
3012		group_stop = CLD_STOPPED;
3013out:
3014	spin_unlock_irq(&tsk->sighand->siglock);
3015
3016	/*
3017	 * If group stop has completed, deliver the notification.  This
3018	 * should always go to the real parent of the group leader.
3019	 */
3020	if (unlikely(group_stop)) {
3021		read_lock(&tasklist_lock);
3022		do_notify_parent_cldstop(tsk, false, group_stop);
3023		read_unlock(&tasklist_lock);
3024	}
3025}
3026
 
 
 
 
 
 
 
 
 
 
 
3027/*
3028 * System call entry points.
3029 */
3030
3031/**
3032 *  sys_restart_syscall - restart a system call
3033 */
3034SYSCALL_DEFINE0(restart_syscall)
3035{
3036	struct restart_block *restart = &current->restart_block;
3037	return restart->fn(restart);
3038}
3039
3040long do_no_restart_syscall(struct restart_block *param)
3041{
3042	return -EINTR;
3043}
3044
3045static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3046{
3047	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3048		sigset_t newblocked;
3049		/* A set of now blocked but previously unblocked signals. */
3050		sigandnsets(&newblocked, newset, &current->blocked);
3051		retarget_shared_pending(tsk, &newblocked);
3052	}
3053	tsk->blocked = *newset;
3054	recalc_sigpending();
3055}
3056
3057/**
3058 * set_current_blocked - change current->blocked mask
3059 * @newset: new mask
3060 *
3061 * It is wrong to change ->blocked directly, this helper should be used
3062 * to ensure the process can't miss a shared signal we are going to block.
3063 */
3064void set_current_blocked(sigset_t *newset)
3065{
3066	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3067	__set_current_blocked(newset);
3068}
3069
3070void __set_current_blocked(const sigset_t *newset)
3071{
3072	struct task_struct *tsk = current;
3073
3074	/*
3075	 * In case the signal mask hasn't changed, there is nothing we need
3076	 * to do. The current->blocked shouldn't be modified by other task.
3077	 */
3078	if (sigequalsets(&tsk->blocked, newset))
3079		return;
3080
3081	spin_lock_irq(&tsk->sighand->siglock);
3082	__set_task_blocked(tsk, newset);
3083	spin_unlock_irq(&tsk->sighand->siglock);
3084}
3085
3086/*
3087 * This is also useful for kernel threads that want to temporarily
3088 * (or permanently) block certain signals.
3089 *
3090 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3091 * interface happily blocks "unblockable" signals like SIGKILL
3092 * and friends.
3093 */
3094int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3095{
3096	struct task_struct *tsk = current;
3097	sigset_t newset;
3098
3099	/* Lockless, only current can change ->blocked, never from irq */
3100	if (oldset)
3101		*oldset = tsk->blocked;
3102
3103	switch (how) {
3104	case SIG_BLOCK:
3105		sigorsets(&newset, &tsk->blocked, set);
3106		break;
3107	case SIG_UNBLOCK:
3108		sigandnsets(&newset, &tsk->blocked, set);
3109		break;
3110	case SIG_SETMASK:
3111		newset = *set;
3112		break;
3113	default:
3114		return -EINVAL;
3115	}
3116
3117	__set_current_blocked(&newset);
3118	return 0;
3119}
3120EXPORT_SYMBOL(sigprocmask);
3121
3122/*
3123 * The api helps set app-provided sigmasks.
3124 *
3125 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3126 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3127 *
3128 * Note that it does set_restore_sigmask() in advance, so it must be always
3129 * paired with restore_saved_sigmask_unless() before return from syscall.
3130 */
3131int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3132{
3133	sigset_t kmask;
3134
3135	if (!umask)
3136		return 0;
3137	if (sigsetsize != sizeof(sigset_t))
3138		return -EINVAL;
3139	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3140		return -EFAULT;
3141
3142	set_restore_sigmask();
3143	current->saved_sigmask = current->blocked;
3144	set_current_blocked(&kmask);
3145
3146	return 0;
3147}
3148
3149#ifdef CONFIG_COMPAT
3150int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3151			    size_t sigsetsize)
3152{
3153	sigset_t kmask;
3154
3155	if (!umask)
3156		return 0;
3157	if (sigsetsize != sizeof(compat_sigset_t))
3158		return -EINVAL;
3159	if (get_compat_sigset(&kmask, umask))
3160		return -EFAULT;
3161
3162	set_restore_sigmask();
3163	current->saved_sigmask = current->blocked;
3164	set_current_blocked(&kmask);
3165
3166	return 0;
3167}
3168#endif
3169
3170/**
3171 *  sys_rt_sigprocmask - change the list of currently blocked signals
3172 *  @how: whether to add, remove, or set signals
3173 *  @nset: stores pending signals
3174 *  @oset: previous value of signal mask if non-null
3175 *  @sigsetsize: size of sigset_t type
3176 */
3177SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3178		sigset_t __user *, oset, size_t, sigsetsize)
3179{
3180	sigset_t old_set, new_set;
3181	int error;
3182
3183	/* XXX: Don't preclude handling different sized sigset_t's.  */
3184	if (sigsetsize != sizeof(sigset_t))
3185		return -EINVAL;
3186
3187	old_set = current->blocked;
3188
3189	if (nset) {
3190		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3191			return -EFAULT;
3192		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3193
3194		error = sigprocmask(how, &new_set, NULL);
3195		if (error)
3196			return error;
3197	}
3198
3199	if (oset) {
3200		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3201			return -EFAULT;
3202	}
3203
3204	return 0;
3205}
3206
3207#ifdef CONFIG_COMPAT
3208COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3209		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3210{
3211	sigset_t old_set = current->blocked;
3212
3213	/* XXX: Don't preclude handling different sized sigset_t's.  */
3214	if (sigsetsize != sizeof(sigset_t))
3215		return -EINVAL;
3216
3217	if (nset) {
3218		sigset_t new_set;
3219		int error;
3220		if (get_compat_sigset(&new_set, nset))
3221			return -EFAULT;
3222		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3223
3224		error = sigprocmask(how, &new_set, NULL);
3225		if (error)
3226			return error;
3227	}
3228	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3229}
3230#endif
3231
3232static void do_sigpending(sigset_t *set)
3233{
3234	spin_lock_irq(&current->sighand->siglock);
3235	sigorsets(set, &current->pending.signal,
3236		  &current->signal->shared_pending.signal);
3237	spin_unlock_irq(&current->sighand->siglock);
3238
3239	/* Outside the lock because only this thread touches it.  */
3240	sigandsets(set, &current->blocked, set);
 
 
 
 
 
 
 
3241}
3242
3243/**
3244 *  sys_rt_sigpending - examine a pending signal that has been raised
3245 *			while blocked
3246 *  @uset: stores pending signals
3247 *  @sigsetsize: size of sigset_t type or larger
3248 */
3249SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3250{
3251	sigset_t set;
3252
3253	if (sigsetsize > sizeof(*uset))
3254		return -EINVAL;
3255
3256	do_sigpending(&set);
3257
3258	if (copy_to_user(uset, &set, sigsetsize))
3259		return -EFAULT;
3260
3261	return 0;
3262}
3263
3264#ifdef CONFIG_COMPAT
3265COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3266		compat_size_t, sigsetsize)
3267{
3268	sigset_t set;
3269
3270	if (sigsetsize > sizeof(*uset))
3271		return -EINVAL;
3272
3273	do_sigpending(&set);
3274
3275	return put_compat_sigset(uset, &set, sigsetsize);
3276}
3277#endif
3278
3279static const struct {
3280	unsigned char limit, layout;
3281} sig_sicodes[] = {
3282	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3283	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3284	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3285	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3286	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3287#if defined(SIGEMT)
3288	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3289#endif
3290	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3291	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3292	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3293};
3294
3295static bool known_siginfo_layout(unsigned sig, int si_code)
3296{
3297	if (si_code == SI_KERNEL)
3298		return true;
3299	else if ((si_code > SI_USER)) {
3300		if (sig_specific_sicodes(sig)) {
3301			if (si_code <= sig_sicodes[sig].limit)
3302				return true;
3303		}
3304		else if (si_code <= NSIGPOLL)
3305			return true;
3306	}
3307	else if (si_code >= SI_DETHREAD)
3308		return true;
3309	else if (si_code == SI_ASYNCNL)
3310		return true;
3311	return false;
3312}
3313
3314enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3315{
3316	enum siginfo_layout layout = SIL_KILL;
3317	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3318		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3319		    (si_code <= sig_sicodes[sig].limit)) {
3320			layout = sig_sicodes[sig].layout;
3321			/* Handle the exceptions */
3322			if ((sig == SIGBUS) &&
3323			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3324				layout = SIL_FAULT_MCEERR;
3325			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3326				layout = SIL_FAULT_BNDERR;
3327#ifdef SEGV_PKUERR
3328			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3329				layout = SIL_FAULT_PKUERR;
3330#endif
3331			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3332				layout = SIL_FAULT_PERF_EVENT;
3333			else if (IS_ENABLED(CONFIG_SPARC) &&
3334				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3335				layout = SIL_FAULT_TRAPNO;
3336			else if (IS_ENABLED(CONFIG_ALPHA) &&
3337				 ((sig == SIGFPE) ||
3338				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3339				layout = SIL_FAULT_TRAPNO;
3340		}
3341		else if (si_code <= NSIGPOLL)
3342			layout = SIL_POLL;
3343	} else {
3344		if (si_code == SI_TIMER)
3345			layout = SIL_TIMER;
3346		else if (si_code == SI_SIGIO)
3347			layout = SIL_POLL;
3348		else if (si_code < 0)
3349			layout = SIL_RT;
3350	}
3351	return layout;
3352}
3353
3354static inline char __user *si_expansion(const siginfo_t __user *info)
3355{
3356	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3357}
3358
3359int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3360{
3361	char __user *expansion = si_expansion(to);
3362	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3363		return -EFAULT;
3364	if (clear_user(expansion, SI_EXPANSION_SIZE))
3365		return -EFAULT;
3366	return 0;
3367}
3368
3369static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3370				       const siginfo_t __user *from)
3371{
3372	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3373		char __user *expansion = si_expansion(from);
3374		char buf[SI_EXPANSION_SIZE];
3375		int i;
3376		/*
3377		 * An unknown si_code might need more than
3378		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3379		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3380		 * will return this data to userspace exactly.
3381		 */
3382		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3383			return -EFAULT;
3384		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3385			if (buf[i] != 0)
3386				return -E2BIG;
3387		}
3388	}
3389	return 0;
3390}
3391
3392static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3393				    const siginfo_t __user *from)
3394{
3395	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3396		return -EFAULT;
3397	to->si_signo = signo;
3398	return post_copy_siginfo_from_user(to, from);
3399}
3400
3401int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3402{
3403	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3404		return -EFAULT;
3405	return post_copy_siginfo_from_user(to, from);
3406}
3407
3408#ifdef CONFIG_COMPAT
3409/**
3410 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3411 * @to: compat siginfo destination
3412 * @from: kernel siginfo source
3413 *
3414 * Note: This function does not work properly for the SIGCHLD on x32, but
3415 * fortunately it doesn't have to.  The only valid callers for this function are
3416 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3417 * The latter does not care because SIGCHLD will never cause a coredump.
3418 */
3419void copy_siginfo_to_external32(struct compat_siginfo *to,
3420		const struct kernel_siginfo *from)
3421{
3422	memset(to, 0, sizeof(*to));
3423
3424	to->si_signo = from->si_signo;
3425	to->si_errno = from->si_errno;
3426	to->si_code  = from->si_code;
3427	switch(siginfo_layout(from->si_signo, from->si_code)) {
3428	case SIL_KILL:
3429		to->si_pid = from->si_pid;
3430		to->si_uid = from->si_uid;
3431		break;
3432	case SIL_TIMER:
3433		to->si_tid     = from->si_tid;
3434		to->si_overrun = from->si_overrun;
3435		to->si_int     = from->si_int;
3436		break;
3437	case SIL_POLL:
3438		to->si_band = from->si_band;
3439		to->si_fd   = from->si_fd;
3440		break;
3441	case SIL_FAULT:
3442		to->si_addr = ptr_to_compat(from->si_addr);
3443		break;
3444	case SIL_FAULT_TRAPNO:
3445		to->si_addr = ptr_to_compat(from->si_addr);
3446		to->si_trapno = from->si_trapno;
3447		break;
3448	case SIL_FAULT_MCEERR:
3449		to->si_addr = ptr_to_compat(from->si_addr);
3450		to->si_addr_lsb = from->si_addr_lsb;
3451		break;
3452	case SIL_FAULT_BNDERR:
3453		to->si_addr = ptr_to_compat(from->si_addr);
3454		to->si_lower = ptr_to_compat(from->si_lower);
3455		to->si_upper = ptr_to_compat(from->si_upper);
3456		break;
3457	case SIL_FAULT_PKUERR:
3458		to->si_addr = ptr_to_compat(from->si_addr);
3459		to->si_pkey = from->si_pkey;
3460		break;
3461	case SIL_FAULT_PERF_EVENT:
3462		to->si_addr = ptr_to_compat(from->si_addr);
3463		to->si_perf_data = from->si_perf_data;
3464		to->si_perf_type = from->si_perf_type;
3465		to->si_perf_flags = from->si_perf_flags;
3466		break;
3467	case SIL_CHLD:
3468		to->si_pid = from->si_pid;
3469		to->si_uid = from->si_uid;
3470		to->si_status = from->si_status;
3471		to->si_utime = from->si_utime;
3472		to->si_stime = from->si_stime;
3473		break;
3474	case SIL_RT:
3475		to->si_pid = from->si_pid;
3476		to->si_uid = from->si_uid;
3477		to->si_int = from->si_int;
3478		break;
3479	case SIL_SYS:
3480		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3481		to->si_syscall   = from->si_syscall;
3482		to->si_arch      = from->si_arch;
3483		break;
3484	}
 
3485}
3486
3487int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3488			   const struct kernel_siginfo *from)
3489{
3490	struct compat_siginfo new;
3491
3492	copy_siginfo_to_external32(&new, from);
3493	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3494		return -EFAULT;
3495	return 0;
3496}
3497
3498static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3499					 const struct compat_siginfo *from)
3500{
3501	clear_siginfo(to);
3502	to->si_signo = from->si_signo;
3503	to->si_errno = from->si_errno;
3504	to->si_code  = from->si_code;
3505	switch(siginfo_layout(from->si_signo, from->si_code)) {
3506	case SIL_KILL:
3507		to->si_pid = from->si_pid;
3508		to->si_uid = from->si_uid;
3509		break;
3510	case SIL_TIMER:
3511		to->si_tid     = from->si_tid;
3512		to->si_overrun = from->si_overrun;
3513		to->si_int     = from->si_int;
3514		break;
3515	case SIL_POLL:
3516		to->si_band = from->si_band;
3517		to->si_fd   = from->si_fd;
3518		break;
3519	case SIL_FAULT:
3520		to->si_addr = compat_ptr(from->si_addr);
3521		break;
3522	case SIL_FAULT_TRAPNO:
3523		to->si_addr = compat_ptr(from->si_addr);
3524		to->si_trapno = from->si_trapno;
3525		break;
3526	case SIL_FAULT_MCEERR:
3527		to->si_addr = compat_ptr(from->si_addr);
3528		to->si_addr_lsb = from->si_addr_lsb;
3529		break;
3530	case SIL_FAULT_BNDERR:
3531		to->si_addr = compat_ptr(from->si_addr);
3532		to->si_lower = compat_ptr(from->si_lower);
3533		to->si_upper = compat_ptr(from->si_upper);
3534		break;
3535	case SIL_FAULT_PKUERR:
3536		to->si_addr = compat_ptr(from->si_addr);
3537		to->si_pkey = from->si_pkey;
3538		break;
3539	case SIL_FAULT_PERF_EVENT:
3540		to->si_addr = compat_ptr(from->si_addr);
3541		to->si_perf_data = from->si_perf_data;
3542		to->si_perf_type = from->si_perf_type;
3543		to->si_perf_flags = from->si_perf_flags;
3544		break;
3545	case SIL_CHLD:
3546		to->si_pid    = from->si_pid;
3547		to->si_uid    = from->si_uid;
3548		to->si_status = from->si_status;
3549#ifdef CONFIG_X86_X32_ABI
3550		if (in_x32_syscall()) {
3551			to->si_utime = from->_sifields._sigchld_x32._utime;
3552			to->si_stime = from->_sifields._sigchld_x32._stime;
3553		} else
3554#endif
3555		{
3556			to->si_utime = from->si_utime;
3557			to->si_stime = from->si_stime;
3558		}
3559		break;
3560	case SIL_RT:
3561		to->si_pid = from->si_pid;
3562		to->si_uid = from->si_uid;
3563		to->si_int = from->si_int;
3564		break;
3565	case SIL_SYS:
3566		to->si_call_addr = compat_ptr(from->si_call_addr);
3567		to->si_syscall   = from->si_syscall;
3568		to->si_arch      = from->si_arch;
3569		break;
3570	}
3571	return 0;
3572}
3573
3574static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3575				      const struct compat_siginfo __user *ufrom)
3576{
3577	struct compat_siginfo from;
3578
3579	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3580		return -EFAULT;
3581
3582	from.si_signo = signo;
3583	return post_copy_siginfo_from_user32(to, &from);
3584}
3585
3586int copy_siginfo_from_user32(struct kernel_siginfo *to,
3587			     const struct compat_siginfo __user *ufrom)
3588{
3589	struct compat_siginfo from;
3590
3591	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3592		return -EFAULT;
3593
3594	return post_copy_siginfo_from_user32(to, &from);
3595}
3596#endif /* CONFIG_COMPAT */
3597
3598/**
3599 *  do_sigtimedwait - wait for queued signals specified in @which
3600 *  @which: queued signals to wait for
3601 *  @info: if non-null, the signal's siginfo is returned here
3602 *  @ts: upper bound on process time suspension
3603 */
3604static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3605		    const struct timespec64 *ts)
3606{
3607	ktime_t *to = NULL, timeout = KTIME_MAX;
3608	struct task_struct *tsk = current;
 
3609	sigset_t mask = *which;
3610	enum pid_type type;
3611	int sig, ret = 0;
3612
3613	if (ts) {
3614		if (!timespec64_valid(ts))
3615			return -EINVAL;
3616		timeout = timespec64_to_ktime(*ts);
3617		to = &timeout;
 
 
 
 
 
3618	}
3619
3620	/*
3621	 * Invert the set of allowed signals to get those we want to block.
3622	 */
3623	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3624	signotset(&mask);
3625
3626	spin_lock_irq(&tsk->sighand->siglock);
3627	sig = dequeue_signal(tsk, &mask, info, &type);
3628	if (!sig && timeout) {
3629		/*
3630		 * None ready, temporarily unblock those we're interested
3631		 * while we are sleeping in so that we'll be awakened when
3632		 * they arrive. Unblocking is always fine, we can avoid
3633		 * set_current_blocked().
3634		 */
3635		tsk->real_blocked = tsk->blocked;
3636		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3637		recalc_sigpending();
3638		spin_unlock_irq(&tsk->sighand->siglock);
3639
3640		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3641		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3642					       HRTIMER_MODE_REL);
3643		spin_lock_irq(&tsk->sighand->siglock);
3644		__set_task_blocked(tsk, &tsk->real_blocked);
3645		sigemptyset(&tsk->real_blocked);
3646		sig = dequeue_signal(tsk, &mask, info, &type);
3647	}
3648	spin_unlock_irq(&tsk->sighand->siglock);
3649
3650	if (sig)
3651		return sig;
3652	return ret ? -EINTR : -EAGAIN;
3653}
3654
3655/**
3656 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3657 *			in @uthese
3658 *  @uthese: queued signals to wait for
3659 *  @uinfo: if non-null, the signal's siginfo is returned here
3660 *  @uts: upper bound on process time suspension
3661 *  @sigsetsize: size of sigset_t type
3662 */
3663SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3664		siginfo_t __user *, uinfo,
3665		const struct __kernel_timespec __user *, uts,
3666		size_t, sigsetsize)
3667{
3668	sigset_t these;
3669	struct timespec64 ts;
3670	kernel_siginfo_t info;
3671	int ret;
3672
3673	/* XXX: Don't preclude handling different sized sigset_t's.  */
3674	if (sigsetsize != sizeof(sigset_t))
3675		return -EINVAL;
3676
3677	if (copy_from_user(&these, uthese, sizeof(these)))
3678		return -EFAULT;
3679
3680	if (uts) {
3681		if (get_timespec64(&ts, uts))
3682			return -EFAULT;
3683	}
3684
3685	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3686
3687	if (ret > 0 && uinfo) {
3688		if (copy_siginfo_to_user(uinfo, &info))
3689			ret = -EFAULT;
3690	}
3691
3692	return ret;
3693}
3694
3695#ifdef CONFIG_COMPAT_32BIT_TIME
3696SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3697		siginfo_t __user *, uinfo,
3698		const struct old_timespec32 __user *, uts,
3699		size_t, sigsetsize)
3700{
3701	sigset_t these;
3702	struct timespec64 ts;
3703	kernel_siginfo_t info;
3704	int ret;
3705
3706	if (sigsetsize != sizeof(sigset_t))
3707		return -EINVAL;
3708
3709	if (copy_from_user(&these, uthese, sizeof(these)))
3710		return -EFAULT;
3711
3712	if (uts) {
3713		if (get_old_timespec32(&ts, uts))
3714			return -EFAULT;
3715	}
3716
3717	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3718
3719	if (ret > 0 && uinfo) {
3720		if (copy_siginfo_to_user(uinfo, &info))
3721			ret = -EFAULT;
3722	}
3723
3724	return ret;
3725}
3726#endif
3727
3728#ifdef CONFIG_COMPAT
3729COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3730		struct compat_siginfo __user *, uinfo,
3731		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3732{
3733	sigset_t s;
3734	struct timespec64 t;
3735	kernel_siginfo_t info;
3736	long ret;
3737
3738	if (sigsetsize != sizeof(sigset_t))
3739		return -EINVAL;
3740
3741	if (get_compat_sigset(&s, uthese))
3742		return -EFAULT;
3743
3744	if (uts) {
3745		if (get_timespec64(&t, uts))
3746			return -EFAULT;
3747	}
3748
3749	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3750
3751	if (ret > 0 && uinfo) {
3752		if (copy_siginfo_to_user32(uinfo, &info))
3753			ret = -EFAULT;
3754	}
3755
3756	return ret;
3757}
3758
3759#ifdef CONFIG_COMPAT_32BIT_TIME
3760COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3761		struct compat_siginfo __user *, uinfo,
3762		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3763{
3764	sigset_t s;
3765	struct timespec64 t;
3766	kernel_siginfo_t info;
3767	long ret;
3768
3769	if (sigsetsize != sizeof(sigset_t))
3770		return -EINVAL;
3771
3772	if (get_compat_sigset(&s, uthese))
3773		return -EFAULT;
3774
3775	if (uts) {
3776		if (get_old_timespec32(&t, uts))
3777			return -EFAULT;
3778	}
3779
3780	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3781
3782	if (ret > 0 && uinfo) {
3783		if (copy_siginfo_to_user32(uinfo, &info))
3784			ret = -EFAULT;
3785	}
3786
3787	return ret;
3788}
3789#endif
3790#endif
3791
3792static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3793{
3794	clear_siginfo(info);
3795	info->si_signo = sig;
3796	info->si_errno = 0;
3797	info->si_code = SI_USER;
3798	info->si_pid = task_tgid_vnr(current);
3799	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3800}
3801
3802/**
3803 *  sys_kill - send a signal to a process
3804 *  @pid: the PID of the process
3805 *  @sig: signal to be sent
3806 */
3807SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3808{
3809	struct kernel_siginfo info;
3810
3811	prepare_kill_siginfo(sig, &info);
 
 
 
 
3812
3813	return kill_something_info(sig, &info, pid);
3814}
3815
3816/*
3817 * Verify that the signaler and signalee either are in the same pid namespace
3818 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3819 * namespace.
3820 */
3821static bool access_pidfd_pidns(struct pid *pid)
3822{
3823	struct pid_namespace *active = task_active_pid_ns(current);
3824	struct pid_namespace *p = ns_of_pid(pid);
3825
3826	for (;;) {
3827		if (!p)
3828			return false;
3829		if (p == active)
3830			break;
3831		p = p->parent;
3832	}
3833
3834	return true;
3835}
3836
3837static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3838		siginfo_t __user *info)
3839{
3840#ifdef CONFIG_COMPAT
3841	/*
3842	 * Avoid hooking up compat syscalls and instead handle necessary
3843	 * conversions here. Note, this is a stop-gap measure and should not be
3844	 * considered a generic solution.
3845	 */
3846	if (in_compat_syscall())
3847		return copy_siginfo_from_user32(
3848			kinfo, (struct compat_siginfo __user *)info);
3849#endif
3850	return copy_siginfo_from_user(kinfo, info);
3851}
3852
3853static struct pid *pidfd_to_pid(const struct file *file)
3854{
3855	struct pid *pid;
3856
3857	pid = pidfd_pid(file);
3858	if (!IS_ERR(pid))
3859		return pid;
3860
3861	return tgid_pidfd_to_pid(file);
3862}
3863
3864/**
3865 * sys_pidfd_send_signal - Signal a process through a pidfd
3866 * @pidfd:  file descriptor of the process
3867 * @sig:    signal to send
3868 * @info:   signal info
3869 * @flags:  future flags
3870 *
3871 * The syscall currently only signals via PIDTYPE_PID which covers
3872 * kill(<positive-pid>, <signal>. It does not signal threads or process
3873 * groups.
3874 * In order to extend the syscall to threads and process groups the @flags
3875 * argument should be used. In essence, the @flags argument will determine
3876 * what is signaled and not the file descriptor itself. Put in other words,
3877 * grouping is a property of the flags argument not a property of the file
3878 * descriptor.
3879 *
3880 * Return: 0 on success, negative errno on failure
3881 */
3882SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3883		siginfo_t __user *, info, unsigned int, flags)
3884{
3885	int ret;
3886	struct fd f;
3887	struct pid *pid;
3888	kernel_siginfo_t kinfo;
3889
3890	/* Enforce flags be set to 0 until we add an extension. */
3891	if (flags)
3892		return -EINVAL;
3893
3894	f = fdget(pidfd);
3895	if (!f.file)
3896		return -EBADF;
3897
3898	/* Is this a pidfd? */
3899	pid = pidfd_to_pid(f.file);
3900	if (IS_ERR(pid)) {
3901		ret = PTR_ERR(pid);
3902		goto err;
3903	}
3904
3905	ret = -EINVAL;
3906	if (!access_pidfd_pidns(pid))
3907		goto err;
3908
3909	if (info) {
3910		ret = copy_siginfo_from_user_any(&kinfo, info);
3911		if (unlikely(ret))
3912			goto err;
3913
3914		ret = -EINVAL;
3915		if (unlikely(sig != kinfo.si_signo))
3916			goto err;
3917
3918		/* Only allow sending arbitrary signals to yourself. */
3919		ret = -EPERM;
3920		if ((task_pid(current) != pid) &&
3921		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3922			goto err;
3923	} else {
3924		prepare_kill_siginfo(sig, &kinfo);
3925	}
3926
3927	ret = kill_pid_info(sig, &kinfo, pid);
3928
3929err:
3930	fdput(f);
3931	return ret;
3932}
3933
3934static int
3935do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3936{
3937	struct task_struct *p;
3938	int error = -ESRCH;
3939
3940	rcu_read_lock();
3941	p = find_task_by_vpid(pid);
3942	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3943		error = check_kill_permission(sig, info, p);
3944		/*
3945		 * The null signal is a permissions and process existence
3946		 * probe.  No signal is actually delivered.
3947		 */
3948		if (!error && sig) {
3949			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3950			/*
3951			 * If lock_task_sighand() failed we pretend the task
3952			 * dies after receiving the signal. The window is tiny,
3953			 * and the signal is private anyway.
3954			 */
3955			if (unlikely(error == -ESRCH))
3956				error = 0;
3957		}
3958	}
3959	rcu_read_unlock();
3960
3961	return error;
3962}
3963
3964static int do_tkill(pid_t tgid, pid_t pid, int sig)
3965{
3966	struct kernel_siginfo info;
3967
3968	clear_siginfo(&info);
3969	info.si_signo = sig;
3970	info.si_errno = 0;
3971	info.si_code = SI_TKILL;
3972	info.si_pid = task_tgid_vnr(current);
3973	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3974
3975	return do_send_specific(tgid, pid, sig, &info);
3976}
3977
3978/**
3979 *  sys_tgkill - send signal to one specific thread
3980 *  @tgid: the thread group ID of the thread
3981 *  @pid: the PID of the thread
3982 *  @sig: signal to be sent
3983 *
3984 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3985 *  exists but it's not belonging to the target process anymore. This
3986 *  method solves the problem of threads exiting and PIDs getting reused.
3987 */
3988SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3989{
3990	/* This is only valid for single tasks */
3991	if (pid <= 0 || tgid <= 0)
3992		return -EINVAL;
3993
3994	return do_tkill(tgid, pid, sig);
3995}
3996
3997/**
3998 *  sys_tkill - send signal to one specific task
3999 *  @pid: the PID of the task
4000 *  @sig: signal to be sent
4001 *
4002 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4003 */
4004SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4005{
4006	/* This is only valid for single tasks */
4007	if (pid <= 0)
4008		return -EINVAL;
4009
4010	return do_tkill(0, pid, sig);
4011}
4012
4013static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4014{
4015	/* Not even root can pretend to send signals from the kernel.
4016	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4017	 */
4018	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4019	    (task_pid_vnr(current) != pid))
4020		return -EPERM;
4021
4022	/* POSIX.1b doesn't mention process groups.  */
4023	return kill_proc_info(sig, info, pid);
4024}
4025
4026/**
4027 *  sys_rt_sigqueueinfo - send signal information to a signal
4028 *  @pid: the PID of the thread
4029 *  @sig: signal to be sent
4030 *  @uinfo: signal info to be sent
4031 */
4032SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4033		siginfo_t __user *, uinfo)
4034{
4035	kernel_siginfo_t info;
4036	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4037	if (unlikely(ret))
4038		return ret;
4039	return do_rt_sigqueueinfo(pid, sig, &info);
4040}
4041
4042#ifdef CONFIG_COMPAT
4043COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4044			compat_pid_t, pid,
4045			int, sig,
4046			struct compat_siginfo __user *, uinfo)
4047{
4048	kernel_siginfo_t info;
4049	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4050	if (unlikely(ret))
4051		return ret;
4052	return do_rt_sigqueueinfo(pid, sig, &info);
 
 
 
 
4053}
4054#endif
4055
4056static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4057{
4058	/* This is only valid for single tasks */
4059	if (pid <= 0 || tgid <= 0)
4060		return -EINVAL;
4061
4062	/* Not even root can pretend to send signals from the kernel.
4063	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4064	 */
4065	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4066	    (task_pid_vnr(current) != pid))
 
4067		return -EPERM;
 
 
4068
4069	return do_send_specific(tgid, pid, sig, info);
4070}
4071
4072SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4073		siginfo_t __user *, uinfo)
4074{
4075	kernel_siginfo_t info;
4076	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4077	if (unlikely(ret))
4078		return ret;
4079	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4080}
4081
4082#ifdef CONFIG_COMPAT
4083COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4084			compat_pid_t, tgid,
4085			compat_pid_t, pid,
4086			int, sig,
4087			struct compat_siginfo __user *, uinfo)
4088{
4089	kernel_siginfo_t info;
4090	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4091	if (unlikely(ret))
4092		return ret;
4093	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4094}
4095#endif
4096
4097/*
4098 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4099 */
4100void kernel_sigaction(int sig, __sighandler_t action)
4101{
4102	spin_lock_irq(&current->sighand->siglock);
4103	current->sighand->action[sig - 1].sa.sa_handler = action;
4104	if (action == SIG_IGN) {
4105		sigset_t mask;
4106
4107		sigemptyset(&mask);
4108		sigaddset(&mask, sig);
4109
4110		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4111		flush_sigqueue_mask(&mask, &current->pending);
4112		recalc_sigpending();
4113	}
4114	spin_unlock_irq(&current->sighand->siglock);
4115}
4116EXPORT_SYMBOL(kernel_sigaction);
4117
4118void __weak sigaction_compat_abi(struct k_sigaction *act,
4119		struct k_sigaction *oact)
4120{
4121}
4122
4123int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4124{
4125	struct task_struct *p = current, *t;
4126	struct k_sigaction *k;
4127	sigset_t mask;
4128
4129	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4130		return -EINVAL;
4131
4132	k = &p->sighand->action[sig-1];
4133
4134	spin_lock_irq(&p->sighand->siglock);
4135	if (k->sa.sa_flags & SA_IMMUTABLE) {
4136		spin_unlock_irq(&p->sighand->siglock);
4137		return -EINVAL;
4138	}
4139	if (oact)
4140		*oact = *k;
4141
4142	/*
4143	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4144	 * e.g. by having an architecture use the bit in their uapi.
4145	 */
4146	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4147
4148	/*
4149	 * Clear unknown flag bits in order to allow userspace to detect missing
4150	 * support for flag bits and to allow the kernel to use non-uapi bits
4151	 * internally.
4152	 */
4153	if (act)
4154		act->sa.sa_flags &= UAPI_SA_FLAGS;
4155	if (oact)
4156		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4157
4158	sigaction_compat_abi(act, oact);
4159
4160	if (act) {
4161		sigdelsetmask(&act->sa.sa_mask,
4162			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4163		*k = *act;
4164		/*
4165		 * POSIX 3.3.1.3:
4166		 *  "Setting a signal action to SIG_IGN for a signal that is
4167		 *   pending shall cause the pending signal to be discarded,
4168		 *   whether or not it is blocked."
4169		 *
4170		 *  "Setting a signal action to SIG_DFL for a signal that is
4171		 *   pending and whose default action is to ignore the signal
4172		 *   (for example, SIGCHLD), shall cause the pending signal to
4173		 *   be discarded, whether or not it is blocked"
4174		 */
4175		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4176			sigemptyset(&mask);
4177			sigaddset(&mask, sig);
4178			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4179			for_each_thread(p, t)
4180				flush_sigqueue_mask(&mask, &t->pending);
 
 
4181		}
4182	}
4183
4184	spin_unlock_irq(&p->sighand->siglock);
4185	return 0;
4186}
4187
4188#ifdef CONFIG_DYNAMIC_SIGFRAME
4189static inline void sigaltstack_lock(void)
4190	__acquires(&current->sighand->siglock)
4191{
4192	spin_lock_irq(&current->sighand->siglock);
4193}
4194
4195static inline void sigaltstack_unlock(void)
4196	__releases(&current->sighand->siglock)
4197{
4198	spin_unlock_irq(&current->sighand->siglock);
 
4199}
4200#else
4201static inline void sigaltstack_lock(void) { }
4202static inline void sigaltstack_unlock(void) { }
4203#endif
4204
4205static int
4206do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4207		size_t min_ss_size)
4208{
4209	struct task_struct *t = current;
4210	int ret = 0;
4211
4212	if (oss) {
4213		memset(oss, 0, sizeof(stack_t));
4214		oss->ss_sp = (void __user *) t->sas_ss_sp;
4215		oss->ss_size = t->sas_ss_size;
4216		oss->ss_flags = sas_ss_flags(sp) |
4217			(current->sas_ss_flags & SS_FLAG_BITS);
4218	}
4219
4220	if (ss) {
4221		void __user *ss_sp = ss->ss_sp;
4222		size_t ss_size = ss->ss_size;
4223		unsigned ss_flags = ss->ss_flags;
4224		int ss_mode;
4225
4226		if (unlikely(on_sig_stack(sp)))
4227			return -EPERM;
 
 
 
 
 
 
4228
4229		ss_mode = ss_flags & ~SS_FLAG_BITS;
4230		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4231				ss_mode != 0))
4232			return -EINVAL;
4233
 
4234		/*
4235		 * Return before taking any locks if no actual
4236		 * sigaltstack changes were requested.
 
 
 
4237		 */
4238		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4239		    t->sas_ss_size == ss_size &&
4240		    t->sas_ss_flags == ss_flags)
4241			return 0;
4242
4243		sigaltstack_lock();
4244		if (ss_mode == SS_DISABLE) {
4245			ss_size = 0;
4246			ss_sp = NULL;
4247		} else {
4248			if (unlikely(ss_size < min_ss_size))
4249				ret = -ENOMEM;
4250			if (!sigaltstack_size_valid(ss_size))
4251				ret = -ENOMEM;
4252		}
4253		if (!ret) {
4254			t->sas_ss_sp = (unsigned long) ss_sp;
4255			t->sas_ss_size = ss_size;
4256			t->sas_ss_flags = ss_flags;
4257		}
4258		sigaltstack_unlock();
4259	}
4260	return ret;
4261}
4262
4263SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4264{
4265	stack_t new, old;
4266	int err;
4267	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4268		return -EFAULT;
4269	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4270			      current_user_stack_pointer(),
4271			      MINSIGSTKSZ);
4272	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4273		err = -EFAULT;
4274	return err;
4275}
4276
4277int restore_altstack(const stack_t __user *uss)
4278{
4279	stack_t new;
4280	if (copy_from_user(&new, uss, sizeof(stack_t)))
4281		return -EFAULT;
4282	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4283			     MINSIGSTKSZ);
4284	/* squash all but EFAULT for now */
4285	return 0;
4286}
4287
4288int __save_altstack(stack_t __user *uss, unsigned long sp)
4289{
4290	struct task_struct *t = current;
4291	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4292		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4293		__put_user(t->sas_ss_size, &uss->ss_size);
4294	return err;
4295}
4296
4297#ifdef CONFIG_COMPAT
4298static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4299				 compat_stack_t __user *uoss_ptr)
4300{
4301	stack_t uss, uoss;
4302	int ret;
4303
4304	if (uss_ptr) {
4305		compat_stack_t uss32;
4306		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4307			return -EFAULT;
4308		uss.ss_sp = compat_ptr(uss32.ss_sp);
4309		uss.ss_flags = uss32.ss_flags;
4310		uss.ss_size = uss32.ss_size;
4311	}
4312	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4313			     compat_user_stack_pointer(),
4314			     COMPAT_MINSIGSTKSZ);
4315	if (ret >= 0 && uoss_ptr)  {
4316		compat_stack_t old;
4317		memset(&old, 0, sizeof(old));
4318		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4319		old.ss_flags = uoss.ss_flags;
4320		old.ss_size = uoss.ss_size;
4321		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4322			ret = -EFAULT;
4323	}
4324	return ret;
4325}
4326
4327COMPAT_SYSCALL_DEFINE2(sigaltstack,
4328			const compat_stack_t __user *, uss_ptr,
4329			compat_stack_t __user *, uoss_ptr)
4330{
4331	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4332}
4333
4334int compat_restore_altstack(const compat_stack_t __user *uss)
4335{
4336	int err = do_compat_sigaltstack(uss, NULL);
4337	/* squash all but -EFAULT for now */
4338	return err == -EFAULT ? err : 0;
4339}
 
 
 
4340
4341int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4342{
4343	int err;
4344	struct task_struct *t = current;
4345	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4346			 &uss->ss_sp) |
4347		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4348		__put_user(t->sas_ss_size, &uss->ss_size);
4349	return err;
4350}
4351#endif
4352
4353#ifdef __ARCH_WANT_SYS_SIGPENDING
4354
4355/**
4356 *  sys_sigpending - examine pending signals
4357 *  @uset: where mask of pending signal is returned
4358 */
4359SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4360{
4361	sigset_t set;
4362
4363	if (sizeof(old_sigset_t) > sizeof(*uset))
4364		return -EINVAL;
4365
4366	do_sigpending(&set);
4367
4368	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4369		return -EFAULT;
4370
4371	return 0;
4372}
4373
4374#ifdef CONFIG_COMPAT
4375COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4376{
4377	sigset_t set;
4378
4379	do_sigpending(&set);
4380
4381	return put_user(set.sig[0], set32);
4382}
4383#endif
4384
4385#endif
4386
4387#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4388/**
4389 *  sys_sigprocmask - examine and change blocked signals
4390 *  @how: whether to add, remove, or set signals
4391 *  @nset: signals to add or remove (if non-null)
4392 *  @oset: previous value of signal mask if non-null
4393 *
4394 * Some platforms have their own version with special arguments;
4395 * others support only sys_rt_sigprocmask.
4396 */
4397
4398SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4399		old_sigset_t __user *, oset)
4400{
4401	old_sigset_t old_set, new_set;
4402	sigset_t new_blocked;
4403
4404	old_set = current->blocked.sig[0];
4405
4406	if (nset) {
4407		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4408			return -EFAULT;
 
4409
4410		new_blocked = current->blocked;
4411
4412		switch (how) {
4413		case SIG_BLOCK:
4414			sigaddsetmask(&new_blocked, new_set);
4415			break;
4416		case SIG_UNBLOCK:
4417			sigdelsetmask(&new_blocked, new_set);
4418			break;
4419		case SIG_SETMASK:
4420			new_blocked.sig[0] = new_set;
4421			break;
4422		default:
4423			return -EINVAL;
4424		}
4425
4426		set_current_blocked(&new_blocked);
4427	}
4428
4429	if (oset) {
4430		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4431			return -EFAULT;
4432	}
4433
4434	return 0;
4435}
4436#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4437
4438#ifndef CONFIG_ODD_RT_SIGACTION
4439/**
4440 *  sys_rt_sigaction - alter an action taken by a process
4441 *  @sig: signal to be sent
4442 *  @act: new sigaction
4443 *  @oact: used to save the previous sigaction
4444 *  @sigsetsize: size of sigset_t type
4445 */
4446SYSCALL_DEFINE4(rt_sigaction, int, sig,
4447		const struct sigaction __user *, act,
4448		struct sigaction __user *, oact,
4449		size_t, sigsetsize)
4450{
4451	struct k_sigaction new_sa, old_sa;
4452	int ret;
4453
4454	/* XXX: Don't preclude handling different sized sigset_t's.  */
4455	if (sigsetsize != sizeof(sigset_t))
4456		return -EINVAL;
4457
4458	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4459		return -EFAULT;
4460
4461	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4462	if (ret)
4463		return ret;
4464
4465	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4466		return -EFAULT;
4467
4468	return 0;
4469}
4470#ifdef CONFIG_COMPAT
4471COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4472		const struct compat_sigaction __user *, act,
4473		struct compat_sigaction __user *, oact,
4474		compat_size_t, sigsetsize)
4475{
4476	struct k_sigaction new_ka, old_ka;
4477#ifdef __ARCH_HAS_SA_RESTORER
4478	compat_uptr_t restorer;
4479#endif
4480	int ret;
4481
4482	/* XXX: Don't preclude handling different sized sigset_t's.  */
4483	if (sigsetsize != sizeof(compat_sigset_t))
4484		return -EINVAL;
4485
4486	if (act) {
4487		compat_uptr_t handler;
4488		ret = get_user(handler, &act->sa_handler);
4489		new_ka.sa.sa_handler = compat_ptr(handler);
4490#ifdef __ARCH_HAS_SA_RESTORER
4491		ret |= get_user(restorer, &act->sa_restorer);
4492		new_ka.sa.sa_restorer = compat_ptr(restorer);
4493#endif
4494		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4495		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4496		if (ret)
4497			return -EFAULT;
4498	}
4499
4500	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4501	if (!ret && oact) {
4502		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
4503			       &oact->sa_handler);
4504		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4505					 sizeof(oact->sa_mask));
4506		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4507#ifdef __ARCH_HAS_SA_RESTORER
4508		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4509				&oact->sa_restorer);
4510#endif
4511	}
4512	return ret;
4513}
4514#endif
4515#endif /* !CONFIG_ODD_RT_SIGACTION */
4516
4517#ifdef CONFIG_OLD_SIGACTION
4518SYSCALL_DEFINE3(sigaction, int, sig,
4519		const struct old_sigaction __user *, act,
4520	        struct old_sigaction __user *, oact)
4521{
4522	struct k_sigaction new_ka, old_ka;
4523	int ret;
4524
4525	if (act) {
4526		old_sigset_t mask;
4527		if (!access_ok(act, sizeof(*act)) ||
4528		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4529		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4530		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4531		    __get_user(mask, &act->sa_mask))
4532			return -EFAULT;
4533#ifdef __ARCH_HAS_KA_RESTORER
4534		new_ka.ka_restorer = NULL;
4535#endif
4536		siginitset(&new_ka.sa.sa_mask, mask);
4537	}
4538
4539	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4540
4541	if (!ret && oact) {
4542		if (!access_ok(oact, sizeof(*oact)) ||
4543		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4544		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4545		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4546		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4547			return -EFAULT;
4548	}
4549
4550	return ret;
4551}
4552#endif
4553#ifdef CONFIG_COMPAT_OLD_SIGACTION
4554COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4555		const struct compat_old_sigaction __user *, act,
4556	        struct compat_old_sigaction __user *, oact)
4557{
4558	struct k_sigaction new_ka, old_ka;
4559	int ret;
4560	compat_old_sigset_t mask;
4561	compat_uptr_t handler, restorer;
4562
4563	if (act) {
4564		if (!access_ok(act, sizeof(*act)) ||
4565		    __get_user(handler, &act->sa_handler) ||
4566		    __get_user(restorer, &act->sa_restorer) ||
4567		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4568		    __get_user(mask, &act->sa_mask))
4569			return -EFAULT;
4570
4571#ifdef __ARCH_HAS_KA_RESTORER
4572		new_ka.ka_restorer = NULL;
4573#endif
4574		new_ka.sa.sa_handler = compat_ptr(handler);
4575		new_ka.sa.sa_restorer = compat_ptr(restorer);
4576		siginitset(&new_ka.sa.sa_mask, mask);
4577	}
4578
4579	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4580
4581	if (!ret && oact) {
4582		if (!access_ok(oact, sizeof(*oact)) ||
4583		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4584			       &oact->sa_handler) ||
4585		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4586			       &oact->sa_restorer) ||
4587		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4588		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4589			return -EFAULT;
4590	}
 
4591	return ret;
4592}
4593#endif
4594
4595#ifdef CONFIG_SGETMASK_SYSCALL
4596
4597/*
4598 * For backwards compatibility.  Functionality superseded by sigprocmask.
4599 */
4600SYSCALL_DEFINE0(sgetmask)
4601{
4602	/* SMP safe */
4603	return current->blocked.sig[0];
4604}
4605
4606SYSCALL_DEFINE1(ssetmask, int, newmask)
4607{
4608	int old = current->blocked.sig[0];
4609	sigset_t newset;
4610
4611	siginitset(&newset, newmask);
4612	set_current_blocked(&newset);
4613
4614	return old;
4615}
4616#endif /* CONFIG_SGETMASK_SYSCALL */
4617
4618#ifdef __ARCH_WANT_SYS_SIGNAL
4619/*
4620 * For backwards compatibility.  Functionality superseded by sigaction.
4621 */
4622SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4623{
4624	struct k_sigaction new_sa, old_sa;
4625	int ret;
4626
4627	new_sa.sa.sa_handler = handler;
4628	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4629	sigemptyset(&new_sa.sa.sa_mask);
4630
4631	ret = do_sigaction(sig, &new_sa, &old_sa);
4632
4633	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4634}
4635#endif /* __ARCH_WANT_SYS_SIGNAL */
4636
4637#ifdef __ARCH_WANT_SYS_PAUSE
4638
4639SYSCALL_DEFINE0(pause)
4640{
4641	while (!signal_pending(current)) {
4642		__set_current_state(TASK_INTERRUPTIBLE);
4643		schedule();
4644	}
4645	return -ERESTARTNOHAND;
4646}
4647
4648#endif
4649
4650static int sigsuspend(sigset_t *set)
4651{
4652	current->saved_sigmask = current->blocked;
4653	set_current_blocked(set);
4654
4655	while (!signal_pending(current)) {
4656		__set_current_state(TASK_INTERRUPTIBLE);
4657		schedule();
4658	}
4659	set_restore_sigmask();
4660	return -ERESTARTNOHAND;
4661}
4662
4663/**
4664 *  sys_rt_sigsuspend - replace the signal mask for a value with the
4665 *	@unewset value until a signal is received
4666 *  @unewset: new signal mask value
4667 *  @sigsetsize: size of sigset_t type
4668 */
4669SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4670{
4671	sigset_t newset;
4672
4673	/* XXX: Don't preclude handling different sized sigset_t's.  */
4674	if (sigsetsize != sizeof(sigset_t))
4675		return -EINVAL;
4676
4677	if (copy_from_user(&newset, unewset, sizeof(newset)))
4678		return -EFAULT;
4679	return sigsuspend(&newset);
4680}
4681 
4682#ifdef CONFIG_COMPAT
4683COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4684{
4685	sigset_t newset;
4686
4687	/* XXX: Don't preclude handling different sized sigset_t's.  */
4688	if (sigsetsize != sizeof(sigset_t))
4689		return -EINVAL;
4690
4691	if (get_compat_sigset(&newset, unewset))
4692		return -EFAULT;
4693	return sigsuspend(&newset);
4694}
4695#endif
4696
4697#ifdef CONFIG_OLD_SIGSUSPEND
4698SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4699{
4700	sigset_t blocked;
4701	siginitset(&blocked, mask);
4702	return sigsuspend(&blocked);
4703}
4704#endif
4705#ifdef CONFIG_OLD_SIGSUSPEND3
4706SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4707{
4708	sigset_t blocked;
4709	siginitset(&blocked, mask);
4710	return sigsuspend(&blocked);
4711}
4712#endif
4713
4714__weak const char *arch_vma_name(struct vm_area_struct *vma)
4715{
4716	return NULL;
4717}
4718
4719static inline void siginfo_buildtime_checks(void)
4720{
4721	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4722
4723	/* Verify the offsets in the two siginfos match */
4724#define CHECK_OFFSET(field) \
4725	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4726
4727	/* kill */
4728	CHECK_OFFSET(si_pid);
4729	CHECK_OFFSET(si_uid);
4730
4731	/* timer */
4732	CHECK_OFFSET(si_tid);
4733	CHECK_OFFSET(si_overrun);
4734	CHECK_OFFSET(si_value);
4735
4736	/* rt */
4737	CHECK_OFFSET(si_pid);
4738	CHECK_OFFSET(si_uid);
4739	CHECK_OFFSET(si_value);
4740
4741	/* sigchld */
4742	CHECK_OFFSET(si_pid);
4743	CHECK_OFFSET(si_uid);
4744	CHECK_OFFSET(si_status);
4745	CHECK_OFFSET(si_utime);
4746	CHECK_OFFSET(si_stime);
4747
4748	/* sigfault */
4749	CHECK_OFFSET(si_addr);
4750	CHECK_OFFSET(si_trapno);
4751	CHECK_OFFSET(si_addr_lsb);
4752	CHECK_OFFSET(si_lower);
4753	CHECK_OFFSET(si_upper);
4754	CHECK_OFFSET(si_pkey);
4755	CHECK_OFFSET(si_perf_data);
4756	CHECK_OFFSET(si_perf_type);
4757	CHECK_OFFSET(si_perf_flags);
4758
4759	/* sigpoll */
4760	CHECK_OFFSET(si_band);
4761	CHECK_OFFSET(si_fd);
4762
4763	/* sigsys */
4764	CHECK_OFFSET(si_call_addr);
4765	CHECK_OFFSET(si_syscall);
4766	CHECK_OFFSET(si_arch);
4767#undef CHECK_OFFSET
4768
4769	/* usb asyncio */
4770	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4771		     offsetof(struct siginfo, si_addr));
4772	if (sizeof(int) == sizeof(void __user *)) {
4773		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4774			     sizeof(void __user *));
4775	} else {
4776		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4777			      sizeof_field(struct siginfo, si_uid)) !=
4778			     sizeof(void __user *));
4779		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4780			     offsetof(struct siginfo, si_uid));
4781	}
4782#ifdef CONFIG_COMPAT
4783	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4784		     offsetof(struct compat_siginfo, si_addr));
4785	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4786		     sizeof(compat_uptr_t));
4787	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4788		     sizeof_field(struct siginfo, si_pid));
4789#endif
4790}
4791
4792#if defined(CONFIG_SYSCTL)
4793static struct ctl_table signal_debug_table[] = {
4794#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4795	{
4796		.procname	= "exception-trace",
4797		.data		= &show_unhandled_signals,
4798		.maxlen		= sizeof(int),
4799		.mode		= 0644,
4800		.proc_handler	= proc_dointvec
4801	},
4802#endif
4803	{ }
4804};
4805
4806static int __init init_signal_sysctls(void)
4807{
4808	register_sysctl_init("debug", signal_debug_table);
4809	return 0;
4810}
4811early_initcall(init_signal_sysctls);
4812#endif /* CONFIG_SYSCTL */
4813
4814void __init signals_init(void)
4815{
4816	siginfo_buildtime_checks();
4817
4818	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4819}
4820
4821#ifdef CONFIG_KGDB_KDB
4822#include <linux/kdb.h>
4823/*
4824 * kdb_send_sig - Allows kdb to send signals without exposing
4825 * signal internals.  This function checks if the required locks are
4826 * available before calling the main signal code, to avoid kdb
4827 * deadlocks.
4828 */
4829void kdb_send_sig(struct task_struct *t, int sig)
 
4830{
4831	static struct task_struct *kdb_prev_t;
4832	int new_t, ret;
4833	if (!spin_trylock(&t->sighand->siglock)) {
4834		kdb_printf("Can't do kill command now.\n"
4835			   "The sigmask lock is held somewhere else in "
4836			   "kernel, try again later\n");
4837		return;
4838	}
 
4839	new_t = kdb_prev_t != t;
4840	kdb_prev_t = t;
4841	if (!task_is_running(t) && new_t) {
4842		spin_unlock(&t->sighand->siglock);
4843		kdb_printf("Process is not RUNNING, sending a signal from "
4844			   "kdb risks deadlock\n"
4845			   "on the run queue locks. "
4846			   "The signal has _not_ been sent.\n"
4847			   "Reissue the kill command if you want to risk "
4848			   "the deadlock.\n");
4849		return;
4850	}
4851	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4852	spin_unlock(&t->sighand->siglock);
4853	if (ret)
4854		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4855			   sig, t->pid);
4856	else
4857		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4858}
4859#endif	/* CONFIG_KGDB_KDB */
v3.1
 
   1/*
   2 *  linux/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   7 *
   8 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
   9 *		Changes to use preallocated sigqueue structures
  10 *		to allow signals to be sent reliably.
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/module.h>
  15#include <linux/init.h>
  16#include <linux/sched.h>
 
 
 
 
 
 
  17#include <linux/fs.h>
 
 
  18#include <linux/tty.h>
  19#include <linux/binfmts.h>
 
  20#include <linux/security.h>
  21#include <linux/syscalls.h>
  22#include <linux/ptrace.h>
  23#include <linux/signal.h>
  24#include <linux/signalfd.h>
  25#include <linux/ratelimit.h>
  26#include <linux/tracehook.h>
  27#include <linux/capability.h>
  28#include <linux/freezer.h>
  29#include <linux/pid_namespace.h>
  30#include <linux/nsproxy.h>
 
 
 
 
 
 
 
 
 
 
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/signal.h>
  33
  34#include <asm/param.h>
  35#include <asm/uaccess.h>
  36#include <asm/unistd.h>
  37#include <asm/siginfo.h>
  38#include "audit.h"	/* audit_signal_info() */
 
  39
  40/*
  41 * SLAB caches for signal bits.
  42 */
  43
  44static struct kmem_cache *sigqueue_cachep;
  45
  46int print_fatal_signals __read_mostly;
  47
  48static void __user *sig_handler(struct task_struct *t, int sig)
  49{
  50	return t->sighand->action[sig - 1].sa.sa_handler;
  51}
  52
  53static int sig_handler_ignored(void __user *handler, int sig)
  54{
  55	/* Is it explicitly or implicitly ignored? */
  56	return handler == SIG_IGN ||
  57		(handler == SIG_DFL && sig_kernel_ignore(sig));
  58}
  59
  60static int sig_task_ignored(struct task_struct *t, int sig,
  61		int from_ancestor_ns)
  62{
  63	void __user *handler;
  64
  65	handler = sig_handler(t, sig);
  66
 
 
 
 
  67	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  68			handler == SIG_DFL && !from_ancestor_ns)
  69		return 1;
 
 
 
 
 
  70
  71	return sig_handler_ignored(handler, sig);
  72}
  73
  74static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
  75{
  76	/*
  77	 * Blocked signals are never ignored, since the
  78	 * signal handler may change by the time it is
  79	 * unblocked.
  80	 */
  81	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
  82		return 0;
  83
  84	if (!sig_task_ignored(t, sig, from_ancestor_ns))
  85		return 0;
  86
  87	/*
  88	 * Tracers may want to know about even ignored signals.
 
 
  89	 */
  90	return !t->ptrace;
 
 
 
  91}
  92
  93/*
  94 * Re-calculate pending state from the set of locally pending
  95 * signals, globally pending signals, and blocked signals.
  96 */
  97static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
  98{
  99	unsigned long ready;
 100	long i;
 101
 102	switch (_NSIG_WORDS) {
 103	default:
 104		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 105			ready |= signal->sig[i] &~ blocked->sig[i];
 106		break;
 107
 108	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 109		ready |= signal->sig[2] &~ blocked->sig[2];
 110		ready |= signal->sig[1] &~ blocked->sig[1];
 111		ready |= signal->sig[0] &~ blocked->sig[0];
 112		break;
 113
 114	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 115		ready |= signal->sig[0] &~ blocked->sig[0];
 116		break;
 117
 118	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 119	}
 120	return ready !=	0;
 121}
 122
 123#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 124
 125static int recalc_sigpending_tsk(struct task_struct *t)
 126{
 127	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
 128	    PENDING(&t->pending, &t->blocked) ||
 129	    PENDING(&t->signal->shared_pending, &t->blocked)) {
 
 130		set_tsk_thread_flag(t, TIF_SIGPENDING);
 131		return 1;
 132	}
 
 133	/*
 134	 * We must never clear the flag in another thread, or in current
 135	 * when it's possible the current syscall is returning -ERESTART*.
 136	 * So we don't clear it here, and only callers who know they should do.
 137	 */
 138	return 0;
 139}
 140
 141/*
 142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 143 * This is superfluous when called on current, the wakeup is a harmless no-op.
 144 */
 145void recalc_sigpending_and_wake(struct task_struct *t)
 146{
 147	if (recalc_sigpending_tsk(t))
 148		signal_wake_up(t, 0);
 149}
 150
 151void recalc_sigpending(void)
 152{
 153	if (!recalc_sigpending_tsk(current) && !freezing(current))
 154		clear_thread_flag(TIF_SIGPENDING);
 155
 156}
 
 
 
 
 
 
 
 
 
 
 
 
 157
 158/* Given the mask, find the first available signal that should be serviced. */
 159
 160#define SYNCHRONOUS_MASK \
 161	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 162	 sigmask(SIGTRAP) | sigmask(SIGFPE))
 163
 164int next_signal(struct sigpending *pending, sigset_t *mask)
 165{
 166	unsigned long i, *s, *m, x;
 167	int sig = 0;
 168
 169	s = pending->signal.sig;
 170	m = mask->sig;
 171
 172	/*
 173	 * Handle the first word specially: it contains the
 174	 * synchronous signals that need to be dequeued first.
 175	 */
 176	x = *s &~ *m;
 177	if (x) {
 178		if (x & SYNCHRONOUS_MASK)
 179			x &= SYNCHRONOUS_MASK;
 180		sig = ffz(~x) + 1;
 181		return sig;
 182	}
 183
 184	switch (_NSIG_WORDS) {
 185	default:
 186		for (i = 1; i < _NSIG_WORDS; ++i) {
 187			x = *++s &~ *++m;
 188			if (!x)
 189				continue;
 190			sig = ffz(~x) + i*_NSIG_BPW + 1;
 191			break;
 192		}
 193		break;
 194
 195	case 2:
 196		x = s[1] &~ m[1];
 197		if (!x)
 198			break;
 199		sig = ffz(~x) + _NSIG_BPW + 1;
 200		break;
 201
 202	case 1:
 203		/* Nothing to do */
 204		break;
 205	}
 206
 207	return sig;
 208}
 209
 210static inline void print_dropped_signal(int sig)
 211{
 212	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 213
 214	if (!print_fatal_signals)
 215		return;
 216
 217	if (!__ratelimit(&ratelimit_state))
 218		return;
 219
 220	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 221				current->comm, current->pid, sig);
 222}
 223
 224/**
 225 * task_set_jobctl_pending - set jobctl pending bits
 226 * @task: target task
 227 * @mask: pending bits to set
 228 *
 229 * Clear @mask from @task->jobctl.  @mask must be subset of
 230 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 231 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 232 * cleared.  If @task is already being killed or exiting, this function
 233 * becomes noop.
 234 *
 235 * CONTEXT:
 236 * Must be called with @task->sighand->siglock held.
 237 *
 238 * RETURNS:
 239 * %true if @mask is set, %false if made noop because @task was dying.
 240 */
 241bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
 242{
 243	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 244			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 245	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 246
 247	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 248		return false;
 249
 250	if (mask & JOBCTL_STOP_SIGMASK)
 251		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 252
 253	task->jobctl |= mask;
 254	return true;
 255}
 256
 257/**
 258 * task_clear_jobctl_trapping - clear jobctl trapping bit
 259 * @task: target task
 260 *
 261 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 262 * Clear it and wake up the ptracer.  Note that we don't need any further
 263 * locking.  @task->siglock guarantees that @task->parent points to the
 264 * ptracer.
 265 *
 266 * CONTEXT:
 267 * Must be called with @task->sighand->siglock held.
 268 */
 269void task_clear_jobctl_trapping(struct task_struct *task)
 270{
 271	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 272		task->jobctl &= ~JOBCTL_TRAPPING;
 
 273		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 274	}
 275}
 276
 277/**
 278 * task_clear_jobctl_pending - clear jobctl pending bits
 279 * @task: target task
 280 * @mask: pending bits to clear
 281 *
 282 * Clear @mask from @task->jobctl.  @mask must be subset of
 283 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 284 * STOP bits are cleared together.
 285 *
 286 * If clearing of @mask leaves no stop or trap pending, this function calls
 287 * task_clear_jobctl_trapping().
 288 *
 289 * CONTEXT:
 290 * Must be called with @task->sighand->siglock held.
 291 */
 292void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
 293{
 294	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 295
 296	if (mask & JOBCTL_STOP_PENDING)
 297		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 298
 299	task->jobctl &= ~mask;
 300
 301	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 302		task_clear_jobctl_trapping(task);
 303}
 304
 305/**
 306 * task_participate_group_stop - participate in a group stop
 307 * @task: task participating in a group stop
 308 *
 309 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 310 * Group stop states are cleared and the group stop count is consumed if
 311 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 312 * stop, the appropriate %SIGNAL_* flags are set.
 313 *
 314 * CONTEXT:
 315 * Must be called with @task->sighand->siglock held.
 316 *
 317 * RETURNS:
 318 * %true if group stop completion should be notified to the parent, %false
 319 * otherwise.
 320 */
 321static bool task_participate_group_stop(struct task_struct *task)
 322{
 323	struct signal_struct *sig = task->signal;
 324	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 325
 326	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 327
 328	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 329
 330	if (!consume)
 331		return false;
 332
 333	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 334		sig->group_stop_count--;
 335
 336	/*
 337	 * Tell the caller to notify completion iff we are entering into a
 338	 * fresh group stop.  Read comment in do_signal_stop() for details.
 339	 */
 340	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 341		sig->flags = SIGNAL_STOP_STOPPED;
 342		return true;
 343	}
 344	return false;
 345}
 346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347/*
 348 * allocate a new signal queue record
 349 * - this may be called without locks if and only if t == current, otherwise an
 350 *   appropriate lock must be held to stop the target task from exiting
 351 */
 352static struct sigqueue *
 353__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
 
 354{
 355	struct sigqueue *q = NULL;
 356	struct user_struct *user;
 
 357
 358	/*
 359	 * Protect access to @t credentials. This can go away when all
 360	 * callers hold rcu read lock.
 
 
 
 
 361	 */
 362	rcu_read_lock();
 363	user = get_uid(__task_cred(t)->user);
 364	atomic_inc(&user->sigpending);
 365	rcu_read_unlock();
 
 
 366
 367	if (override_rlimit ||
 368	    atomic_read(&user->sigpending) <=
 369			task_rlimit(t, RLIMIT_SIGPENDING)) {
 370		q = kmem_cache_alloc(sigqueue_cachep, flags);
 371	} else {
 372		print_dropped_signal(sig);
 373	}
 374
 375	if (unlikely(q == NULL)) {
 376		atomic_dec(&user->sigpending);
 377		free_uid(user);
 378	} else {
 379		INIT_LIST_HEAD(&q->list);
 380		q->flags = 0;
 381		q->user = user;
 382	}
 383
 384	return q;
 385}
 386
 387static void __sigqueue_free(struct sigqueue *q)
 388{
 389	if (q->flags & SIGQUEUE_PREALLOC)
 390		return;
 391	atomic_dec(&q->user->sigpending);
 392	free_uid(q->user);
 
 
 393	kmem_cache_free(sigqueue_cachep, q);
 394}
 395
 396void flush_sigqueue(struct sigpending *queue)
 397{
 398	struct sigqueue *q;
 399
 400	sigemptyset(&queue->signal);
 401	while (!list_empty(&queue->list)) {
 402		q = list_entry(queue->list.next, struct sigqueue , list);
 403		list_del_init(&q->list);
 404		__sigqueue_free(q);
 405	}
 406}
 407
 408/*
 409 * Flush all pending signals for a task.
 410 */
 411void __flush_signals(struct task_struct *t)
 412{
 413	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 414	flush_sigqueue(&t->pending);
 415	flush_sigqueue(&t->signal->shared_pending);
 416}
 417
 418void flush_signals(struct task_struct *t)
 419{
 420	unsigned long flags;
 421
 422	spin_lock_irqsave(&t->sighand->siglock, flags);
 423	__flush_signals(t);
 
 
 424	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 425}
 
 426
 
 427static void __flush_itimer_signals(struct sigpending *pending)
 428{
 429	sigset_t signal, retain;
 430	struct sigqueue *q, *n;
 431
 432	signal = pending->signal;
 433	sigemptyset(&retain);
 434
 435	list_for_each_entry_safe(q, n, &pending->list, list) {
 436		int sig = q->info.si_signo;
 437
 438		if (likely(q->info.si_code != SI_TIMER)) {
 439			sigaddset(&retain, sig);
 440		} else {
 441			sigdelset(&signal, sig);
 442			list_del_init(&q->list);
 443			__sigqueue_free(q);
 444		}
 445	}
 446
 447	sigorsets(&pending->signal, &signal, &retain);
 448}
 449
 450void flush_itimer_signals(void)
 451{
 452	struct task_struct *tsk = current;
 453	unsigned long flags;
 454
 455	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 456	__flush_itimer_signals(&tsk->pending);
 457	__flush_itimer_signals(&tsk->signal->shared_pending);
 458	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 459}
 
 460
 461void ignore_signals(struct task_struct *t)
 462{
 463	int i;
 464
 465	for (i = 0; i < _NSIG; ++i)
 466		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 467
 468	flush_signals(t);
 469}
 470
 471/*
 472 * Flush all handlers for a task.
 473 */
 474
 475void
 476flush_signal_handlers(struct task_struct *t, int force_default)
 477{
 478	int i;
 479	struct k_sigaction *ka = &t->sighand->action[0];
 480	for (i = _NSIG ; i != 0 ; i--) {
 481		if (force_default || ka->sa.sa_handler != SIG_IGN)
 482			ka->sa.sa_handler = SIG_DFL;
 483		ka->sa.sa_flags = 0;
 
 
 
 484		sigemptyset(&ka->sa.sa_mask);
 485		ka++;
 486	}
 487}
 488
 489int unhandled_signal(struct task_struct *tsk, int sig)
 490{
 491	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 492	if (is_global_init(tsk))
 493		return 1;
 
 494	if (handler != SIG_IGN && handler != SIG_DFL)
 495		return 0;
 
 
 
 
 
 496	/* if ptraced, let the tracer determine */
 497	return !tsk->ptrace;
 498}
 499
 500/*
 501 * Notify the system that a driver wants to block all signals for this
 502 * process, and wants to be notified if any signals at all were to be
 503 * sent/acted upon.  If the notifier routine returns non-zero, then the
 504 * signal will be acted upon after all.  If the notifier routine returns 0,
 505 * then then signal will be blocked.  Only one block per process is
 506 * allowed.  priv is a pointer to private data that the notifier routine
 507 * can use to determine if the signal should be blocked or not.
 508 */
 509void
 510block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
 511{
 512	unsigned long flags;
 513
 514	spin_lock_irqsave(&current->sighand->siglock, flags);
 515	current->notifier_mask = mask;
 516	current->notifier_data = priv;
 517	current->notifier = notifier;
 518	spin_unlock_irqrestore(&current->sighand->siglock, flags);
 519}
 520
 521/* Notify the system that blocking has ended. */
 522
 523void
 524unblock_all_signals(void)
 525{
 526	unsigned long flags;
 527
 528	spin_lock_irqsave(&current->sighand->siglock, flags);
 529	current->notifier = NULL;
 530	current->notifier_data = NULL;
 531	recalc_sigpending();
 532	spin_unlock_irqrestore(&current->sighand->siglock, flags);
 533}
 534
 535static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 536{
 537	struct sigqueue *q, *first = NULL;
 538
 539	/*
 540	 * Collect the siginfo appropriate to this signal.  Check if
 541	 * there is another siginfo for the same signal.
 542	*/
 543	list_for_each_entry(q, &list->list, list) {
 544		if (q->info.si_signo == sig) {
 545			if (first)
 546				goto still_pending;
 547			first = q;
 548		}
 549	}
 550
 551	sigdelset(&list->signal, sig);
 552
 553	if (first) {
 554still_pending:
 555		list_del_init(&first->list);
 556		copy_siginfo(info, &first->info);
 
 
 
 
 
 
 557		__sigqueue_free(first);
 558	} else {
 559		/*
 560		 * Ok, it wasn't in the queue.  This must be
 561		 * a fast-pathed signal or we must have been
 562		 * out of queue space.  So zero out the info.
 563		 */
 
 564		info->si_signo = sig;
 565		info->si_errno = 0;
 566		info->si_code = SI_USER;
 567		info->si_pid = 0;
 568		info->si_uid = 0;
 569	}
 570}
 571
 572static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 573			siginfo_t *info)
 574{
 575	int sig = next_signal(pending, mask);
 576
 577	if (sig) {
 578		if (current->notifier) {
 579			if (sigismember(current->notifier_mask, sig)) {
 580				if (!(current->notifier)(current->notifier_data)) {
 581					clear_thread_flag(TIF_SIGPENDING);
 582					return 0;
 583				}
 584			}
 585		}
 586
 587		collect_signal(sig, pending, info);
 588	}
 589
 590	return sig;
 591}
 592
 593/*
 594 * Dequeue a signal and return the element to the caller, which is
 595 * expected to free it.
 596 *
 597 * All callers have to hold the siglock.
 598 */
 599int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 
 600{
 
 601	int signr;
 602
 603	/* We only dequeue private signals from ourselves, we don't let
 604	 * signalfd steal them
 605	 */
 606	signr = __dequeue_signal(&tsk->pending, mask, info);
 
 607	if (!signr) {
 
 608		signr = __dequeue_signal(&tsk->signal->shared_pending,
 609					 mask, info);
 
 610		/*
 611		 * itimer signal ?
 612		 *
 613		 * itimers are process shared and we restart periodic
 614		 * itimers in the signal delivery path to prevent DoS
 615		 * attacks in the high resolution timer case. This is
 616		 * compliant with the old way of self-restarting
 617		 * itimers, as the SIGALRM is a legacy signal and only
 618		 * queued once. Changing the restart behaviour to
 619		 * restart the timer in the signal dequeue path is
 620		 * reducing the timer noise on heavy loaded !highres
 621		 * systems too.
 622		 */
 623		if (unlikely(signr == SIGALRM)) {
 624			struct hrtimer *tmr = &tsk->signal->real_timer;
 625
 626			if (!hrtimer_is_queued(tmr) &&
 627			    tsk->signal->it_real_incr.tv64 != 0) {
 628				hrtimer_forward(tmr, tmr->base->get_time(),
 629						tsk->signal->it_real_incr);
 630				hrtimer_restart(tmr);
 631			}
 632		}
 
 633	}
 634
 635	recalc_sigpending();
 636	if (!signr)
 637		return 0;
 638
 639	if (unlikely(sig_kernel_stop(signr))) {
 640		/*
 641		 * Set a marker that we have dequeued a stop signal.  Our
 642		 * caller might release the siglock and then the pending
 643		 * stop signal it is about to process is no longer in the
 644		 * pending bitmasks, but must still be cleared by a SIGCONT
 645		 * (and overruled by a SIGKILL).  So those cases clear this
 646		 * shared flag after we've set it.  Note that this flag may
 647		 * remain set after the signal we return is ignored or
 648		 * handled.  That doesn't matter because its only purpose
 649		 * is to alert stop-signal processing code when another
 650		 * processor has come along and cleared the flag.
 651		 */
 652		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 653	}
 654	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
 
 655		/*
 656		 * Release the siglock to ensure proper locking order
 657		 * of timer locks outside of siglocks.  Note, we leave
 658		 * irqs disabled here, since the posix-timers code is
 659		 * about to disable them again anyway.
 660		 */
 661		spin_unlock(&tsk->sighand->siglock);
 662		do_schedule_next_timer(info);
 663		spin_lock(&tsk->sighand->siglock);
 
 
 
 664	}
 
 665	return signr;
 666}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667
 668/*
 669 * Tell a process that it has a new active signal..
 670 *
 671 * NOTE! we rely on the previous spin_lock to
 672 * lock interrupts for us! We can only be called with
 673 * "siglock" held, and the local interrupt must
 674 * have been disabled when that got acquired!
 675 *
 676 * No need to set need_resched since signal event passing
 677 * goes through ->blocked
 678 */
 679void signal_wake_up(struct task_struct *t, int resume)
 680{
 681	unsigned int mask;
 682
 683	set_tsk_thread_flag(t, TIF_SIGPENDING);
 684
 685	/*
 686	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
 687	 * case. We don't check t->state here because there is a race with it
 688	 * executing another processor and just now entering stopped state.
 689	 * By using wake_up_state, we ensure the process will wake up and
 690	 * handle its death signal.
 691	 */
 692	mask = TASK_INTERRUPTIBLE;
 693	if (resume)
 694		mask |= TASK_WAKEKILL;
 695	if (!wake_up_state(t, mask))
 696		kick_process(t);
 697}
 698
 699/*
 700 * Remove signals in mask from the pending set and queue.
 701 * Returns 1 if any signals were found.
 702 *
 703 * All callers must be holding the siglock.
 704 *
 705 * This version takes a sigset mask and looks at all signals,
 706 * not just those in the first mask word.
 707 */
 708static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
 709{
 710	struct sigqueue *q, *n;
 711	sigset_t m;
 712
 713	sigandsets(&m, mask, &s->signal);
 714	if (sigisemptyset(&m))
 715		return 0;
 716
 717	sigandnsets(&s->signal, &s->signal, mask);
 718	list_for_each_entry_safe(q, n, &s->list, list) {
 719		if (sigismember(mask, q->info.si_signo)) {
 720			list_del_init(&q->list);
 721			__sigqueue_free(q);
 722		}
 723	}
 724	return 1;
 725}
 726/*
 727 * Remove signals in mask from the pending set and queue.
 728 * Returns 1 if any signals were found.
 729 *
 730 * All callers must be holding the siglock.
 731 */
 732static int rm_from_queue(unsigned long mask, struct sigpending *s)
 733{
 734	struct sigqueue *q, *n;
 735
 736	if (!sigtestsetmask(&s->signal, mask))
 737		return 0;
 738
 739	sigdelsetmask(&s->signal, mask);
 740	list_for_each_entry_safe(q, n, &s->list, list) {
 741		if (q->info.si_signo < SIGRTMIN &&
 742		    (mask & sigmask(q->info.si_signo))) {
 743			list_del_init(&q->list);
 744			__sigqueue_free(q);
 745		}
 746	}
 747	return 1;
 748}
 749
 750static inline int is_si_special(const struct siginfo *info)
 751{
 752	return info <= SEND_SIG_FORCED;
 753}
 754
 755static inline bool si_fromuser(const struct siginfo *info)
 756{
 757	return info == SEND_SIG_NOINFO ||
 758		(!is_si_special(info) && SI_FROMUSER(info));
 759}
 760
 761/*
 762 * called with RCU read lock from check_kill_permission()
 763 */
 764static int kill_ok_by_cred(struct task_struct *t)
 765{
 766	const struct cred *cred = current_cred();
 767	const struct cred *tcred = __task_cred(t);
 768
 769	if (cred->user->user_ns == tcred->user->user_ns &&
 770	    (cred->euid == tcred->suid ||
 771	     cred->euid == tcred->uid ||
 772	     cred->uid  == tcred->suid ||
 773	     cred->uid  == tcred->uid))
 774		return 1;
 775
 776	if (ns_capable(tcred->user->user_ns, CAP_KILL))
 777		return 1;
 778
 779	return 0;
 780}
 781
 782/*
 783 * Bad permissions for sending the signal
 784 * - the caller must hold the RCU read lock
 785 */
 786static int check_kill_permission(int sig, struct siginfo *info,
 787				 struct task_struct *t)
 788{
 789	struct pid *sid;
 790	int error;
 791
 792	if (!valid_signal(sig))
 793		return -EINVAL;
 794
 795	if (!si_fromuser(info))
 796		return 0;
 797
 798	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 799	if (error)
 800		return error;
 801
 802	if (!same_thread_group(current, t) &&
 803	    !kill_ok_by_cred(t)) {
 804		switch (sig) {
 805		case SIGCONT:
 806			sid = task_session(t);
 807			/*
 808			 * We don't return the error if sid == NULL. The
 809			 * task was unhashed, the caller must notice this.
 810			 */
 811			if (!sid || sid == task_session(current))
 812				break;
 
 813		default:
 814			return -EPERM;
 815		}
 816	}
 817
 818	return security_task_kill(t, info, sig, 0);
 819}
 820
 821/**
 822 * ptrace_trap_notify - schedule trap to notify ptracer
 823 * @t: tracee wanting to notify tracer
 824 *
 825 * This function schedules sticky ptrace trap which is cleared on the next
 826 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 827 * ptracer.
 828 *
 829 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 830 * ptracer is listening for events, tracee is woken up so that it can
 831 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 832 * eventually taken without returning to userland after the existing traps
 833 * are finished by PTRACE_CONT.
 834 *
 835 * CONTEXT:
 836 * Must be called with @task->sighand->siglock held.
 837 */
 838static void ptrace_trap_notify(struct task_struct *t)
 839{
 840	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 841	assert_spin_locked(&t->sighand->siglock);
 842
 843	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 844	signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 845}
 846
 847/*
 848 * Handle magic process-wide effects of stop/continue signals. Unlike
 849 * the signal actions, these happen immediately at signal-generation
 850 * time regardless of blocking, ignoring, or handling.  This does the
 851 * actual continuing for SIGCONT, but not the actual stopping for stop
 852 * signals. The process stop is done as a signal action for SIG_DFL.
 853 *
 854 * Returns true if the signal should be actually delivered, otherwise
 855 * it should be dropped.
 856 */
 857static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
 858{
 859	struct signal_struct *signal = p->signal;
 860	struct task_struct *t;
 
 861
 862	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
 
 
 863		/*
 864		 * The process is in the middle of dying, nothing to do.
 865		 */
 
 866	} else if (sig_kernel_stop(sig)) {
 867		/*
 868		 * This is a stop signal.  Remove SIGCONT from all queues.
 869		 */
 870		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
 871		t = p;
 872		do {
 873			rm_from_queue(sigmask(SIGCONT), &t->pending);
 874		} while_each_thread(p, t);
 875	} else if (sig == SIGCONT) {
 876		unsigned int why;
 877		/*
 878		 * Remove all stop signals from all queues, wake all threads.
 879		 */
 880		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
 881		t = p;
 882		do {
 
 883			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 884			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
 885			if (likely(!(t->ptrace & PT_SEIZED)))
 886				wake_up_state(t, __TASK_STOPPED);
 887			else
 888				ptrace_trap_notify(t);
 889		} while_each_thread(p, t);
 890
 891		/*
 892		 * Notify the parent with CLD_CONTINUED if we were stopped.
 893		 *
 894		 * If we were in the middle of a group stop, we pretend it
 895		 * was already finished, and then continued. Since SIGCHLD
 896		 * doesn't queue we report only CLD_STOPPED, as if the next
 897		 * CLD_CONTINUED was dropped.
 898		 */
 899		why = 0;
 900		if (signal->flags & SIGNAL_STOP_STOPPED)
 901			why |= SIGNAL_CLD_CONTINUED;
 902		else if (signal->group_stop_count)
 903			why |= SIGNAL_CLD_STOPPED;
 904
 905		if (why) {
 906			/*
 907			 * The first thread which returns from do_signal_stop()
 908			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 909			 * notify its parent. See get_signal_to_deliver().
 910			 */
 911			signal->flags = why | SIGNAL_STOP_CONTINUED;
 912			signal->group_stop_count = 0;
 913			signal->group_exit_code = 0;
 914		}
 915	}
 916
 917	return !sig_ignored(p, sig, from_ancestor_ns);
 918}
 919
 920/*
 921 * Test if P wants to take SIG.  After we've checked all threads with this,
 922 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 923 * blocking SIG were ruled out because they are not running and already
 924 * have pending signals.  Such threads will dequeue from the shared queue
 925 * as soon as they're available, so putting the signal on the shared queue
 926 * will be equivalent to sending it to one such thread.
 927 */
 928static inline int wants_signal(int sig, struct task_struct *p)
 929{
 930	if (sigismember(&p->blocked, sig))
 931		return 0;
 
 932	if (p->flags & PF_EXITING)
 933		return 0;
 
 934	if (sig == SIGKILL)
 935		return 1;
 
 936	if (task_is_stopped_or_traced(p))
 937		return 0;
 938	return task_curr(p) || !signal_pending(p);
 
 939}
 940
 941static void complete_signal(int sig, struct task_struct *p, int group)
 942{
 943	struct signal_struct *signal = p->signal;
 944	struct task_struct *t;
 945
 946	/*
 947	 * Now find a thread we can wake up to take the signal off the queue.
 948	 *
 949	 * If the main thread wants the signal, it gets first crack.
 950	 * Probably the least surprising to the average bear.
 951	 */
 952	if (wants_signal(sig, p))
 953		t = p;
 954	else if (!group || thread_group_empty(p))
 955		/*
 956		 * There is just one thread and it does not need to be woken.
 957		 * It will dequeue unblocked signals before it runs again.
 958		 */
 959		return;
 960	else {
 961		/*
 962		 * Otherwise try to find a suitable thread.
 963		 */
 964		t = signal->curr_target;
 965		while (!wants_signal(sig, t)) {
 966			t = next_thread(t);
 967			if (t == signal->curr_target)
 968				/*
 969				 * No thread needs to be woken.
 970				 * Any eligible threads will see
 971				 * the signal in the queue soon.
 972				 */
 973				return;
 974		}
 975		signal->curr_target = t;
 976	}
 977
 978	/*
 979	 * Found a killable thread.  If the signal will be fatal,
 980	 * then start taking the whole group down immediately.
 981	 */
 982	if (sig_fatal(p, sig) &&
 983	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
 984	    !sigismember(&t->real_blocked, sig) &&
 985	    (sig == SIGKILL || !t->ptrace)) {
 986		/*
 987		 * This signal will be fatal to the whole group.
 988		 */
 989		if (!sig_kernel_coredump(sig)) {
 990			/*
 991			 * Start a group exit and wake everybody up.
 992			 * This way we don't have other threads
 993			 * running and doing things after a slower
 994			 * thread has the fatal signal pending.
 995			 */
 996			signal->flags = SIGNAL_GROUP_EXIT;
 997			signal->group_exit_code = sig;
 998			signal->group_stop_count = 0;
 999			t = p;
1000			do {
1001				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1002				sigaddset(&t->pending.signal, SIGKILL);
1003				signal_wake_up(t, 1);
1004			} while_each_thread(p, t);
1005			return;
1006		}
1007	}
1008
1009	/*
1010	 * The signal is already in the shared-pending queue.
1011	 * Tell the chosen thread to wake up and dequeue it.
1012	 */
1013	signal_wake_up(t, sig == SIGKILL);
1014	return;
1015}
1016
1017static inline int legacy_queue(struct sigpending *signals, int sig)
1018{
1019	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1020}
1021
1022static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1023			int group, int from_ancestor_ns)
1024{
1025	struct sigpending *pending;
1026	struct sigqueue *q;
1027	int override_rlimit;
 
1028
1029	trace_signal_generate(sig, info, t);
1030
1031	assert_spin_locked(&t->sighand->siglock);
1032
1033	if (!prepare_signal(sig, t, from_ancestor_ns))
1034		return 0;
1035
1036	pending = group ? &t->signal->shared_pending : &t->pending;
1037	/*
1038	 * Short-circuit ignored signals and support queuing
1039	 * exactly one non-rt signal, so that we can get more
1040	 * detailed information about the cause of the signal.
1041	 */
 
1042	if (legacy_queue(pending, sig))
1043		return 0;
 
 
1044	/*
1045	 * fast-pathed signals for kernel-internal things like SIGSTOP
1046	 * or SIGKILL.
1047	 */
1048	if (info == SEND_SIG_FORCED)
1049		goto out_set;
1050
1051	/*
1052	 * Real-time signals must be queued if sent by sigqueue, or
1053	 * some other real-time mechanism.  It is implementation
1054	 * defined whether kill() does so.  We attempt to do so, on
1055	 * the principle of least surprise, but since kill is not
1056	 * allowed to fail with EAGAIN when low on memory we just
1057	 * make sure at least one signal gets delivered and don't
1058	 * pass on the info struct.
1059	 */
1060	if (sig < SIGRTMIN)
1061		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1062	else
1063		override_rlimit = 0;
1064
1065	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1066		override_rlimit);
1067	if (q) {
1068		list_add_tail(&q->list, &pending->list);
1069		switch ((unsigned long) info) {
1070		case (unsigned long) SEND_SIG_NOINFO:
 
1071			q->info.si_signo = sig;
1072			q->info.si_errno = 0;
1073			q->info.si_code = SI_USER;
1074			q->info.si_pid = task_tgid_nr_ns(current,
1075							task_active_pid_ns(t));
1076			q->info.si_uid = current_uid();
 
 
 
 
1077			break;
1078		case (unsigned long) SEND_SIG_PRIV:
 
1079			q->info.si_signo = sig;
1080			q->info.si_errno = 0;
1081			q->info.si_code = SI_KERNEL;
1082			q->info.si_pid = 0;
1083			q->info.si_uid = 0;
1084			break;
1085		default:
1086			copy_siginfo(&q->info, info);
1087			if (from_ancestor_ns)
1088				q->info.si_pid = 0;
1089			break;
1090		}
1091	} else if (!is_si_special(info)) {
1092		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1093			/*
1094			 * Queue overflow, abort.  We may abort if the
1095			 * signal was rt and sent by user using something
1096			 * other than kill().
1097			 */
1098			trace_signal_overflow_fail(sig, group, info);
1099			return -EAGAIN;
1100		} else {
1101			/*
1102			 * This is a silent loss of information.  We still
1103			 * send the signal, but the *info bits are lost.
1104			 */
1105			trace_signal_lose_info(sig, group, info);
1106		}
1107	}
1108
1109out_set:
1110	signalfd_notify(t, sig);
1111	sigaddset(&pending->signal, sig);
1112	complete_signal(sig, t, group);
1113	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1114}
1115
1116static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1117			int group)
1118{
1119	int from_ancestor_ns = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120
1121#ifdef CONFIG_PID_NS
1122	from_ancestor_ns = si_fromuser(info) &&
1123			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1124#endif
1125
1126	return __send_signal(sig, info, t, group, from_ancestor_ns);
 
 
 
 
 
 
1127}
1128
1129static void print_fatal_signal(struct pt_regs *regs, int signr)
1130{
1131	printk("%s/%d: potentially unexpected fatal signal %d.\n",
1132		current->comm, task_pid_nr(current), signr);
 
 
 
 
 
 
 
 
 
 
1133
1134#if defined(__i386__) && !defined(__arch_um__)
1135	printk("code at %08lx: ", regs->ip);
1136	{
1137		int i;
1138		for (i = 0; i < 16; i++) {
1139			unsigned char insn;
1140
1141			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1142				break;
1143			printk("%02x ", insn);
1144		}
1145	}
 
1146#endif
1147	printk("\n");
1148	preempt_disable();
1149	show_regs(regs);
1150	preempt_enable();
1151}
1152
1153static int __init setup_print_fatal_signals(char *str)
1154{
1155	get_option (&str, &print_fatal_signals);
1156
1157	return 1;
1158}
1159
1160__setup("print-fatal-signals=", setup_print_fatal_signals);
1161
1162int
1163__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1164{
1165	return send_signal(sig, info, p, 1);
1166}
1167
1168static int
1169specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1170{
1171	return send_signal(sig, info, t, 0);
1172}
1173
1174int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1175			bool group)
1176{
1177	unsigned long flags;
1178	int ret = -ESRCH;
1179
1180	if (lock_task_sighand(p, &flags)) {
1181		ret = send_signal(sig, info, p, group);
1182		unlock_task_sighand(p, &flags);
1183	}
1184
1185	return ret;
1186}
1187
 
 
 
 
 
 
1188/*
1189 * Force a signal that the process can't ignore: if necessary
1190 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1191 *
1192 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1193 * since we do not want to have a signal handler that was blocked
1194 * be invoked when user space had explicitly blocked it.
1195 *
1196 * We don't want to have recursive SIGSEGV's etc, for example,
1197 * that is why we also clear SIGNAL_UNKILLABLE.
1198 */
1199int
1200force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 
1201{
1202	unsigned long int flags;
1203	int ret, blocked, ignored;
1204	struct k_sigaction *action;
 
1205
1206	spin_lock_irqsave(&t->sighand->siglock, flags);
1207	action = &t->sighand->action[sig-1];
1208	ignored = action->sa.sa_handler == SIG_IGN;
1209	blocked = sigismember(&t->blocked, sig);
1210	if (blocked || ignored) {
1211		action->sa.sa_handler = SIG_DFL;
1212		if (blocked) {
 
 
1213			sigdelset(&t->blocked, sig);
1214			recalc_sigpending_and_wake(t);
1215		}
1216	}
1217	if (action->sa.sa_handler == SIG_DFL)
 
 
 
 
 
1218		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1219	ret = specific_send_sig_info(sig, info, t);
 
 
 
1220	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1221
1222	return ret;
1223}
1224
 
 
 
 
 
1225/*
1226 * Nuke all other threads in the group.
1227 */
1228int zap_other_threads(struct task_struct *p)
1229{
1230	struct task_struct *t = p;
1231	int count = 0;
1232
1233	p->signal->group_stop_count = 0;
1234
1235	while_each_thread(p, t) {
1236		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1237		count++;
 
 
1238
1239		/* Don't bother with already dead threads */
1240		if (t->exit_state)
1241			continue;
1242		sigaddset(&t->pending.signal, SIGKILL);
1243		signal_wake_up(t, 1);
1244	}
1245
1246	return count;
1247}
1248
1249struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1250					   unsigned long *flags)
1251{
1252	struct sighand_struct *sighand;
1253
 
1254	for (;;) {
1255		local_irq_save(*flags);
1256		rcu_read_lock();
1257		sighand = rcu_dereference(tsk->sighand);
1258		if (unlikely(sighand == NULL)) {
1259			rcu_read_unlock();
1260			local_irq_restore(*flags);
1261			break;
1262		}
1263
1264		spin_lock(&sighand->siglock);
1265		if (likely(sighand == tsk->sighand)) {
1266			rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
1267			break;
1268		}
1269		spin_unlock(&sighand->siglock);
1270		rcu_read_unlock();
1271		local_irq_restore(*flags);
1272	}
 
1273
1274	return sighand;
1275}
1276
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1277/*
1278 * send signal info to all the members of a group
1279 */
1280int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 
1281{
1282	int ret;
1283
1284	rcu_read_lock();
1285	ret = check_kill_permission(sig, info, p);
1286	rcu_read_unlock();
1287
1288	if (!ret && sig)
1289		ret = do_send_sig_info(sig, info, p, true);
1290
1291	return ret;
1292}
1293
1294/*
1295 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1296 * control characters do (^C, ^Z etc)
1297 * - the caller must hold at least a readlock on tasklist_lock
1298 */
1299int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1300{
1301	struct task_struct *p = NULL;
1302	int retval, success;
1303
1304	success = 0;
1305	retval = -ESRCH;
1306	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1307		int err = group_send_sig_info(sig, info, p);
1308		success |= !err;
1309		retval = err;
 
 
 
 
 
 
1310	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1311	return success ? 0 : retval;
 
1312}
1313
1314int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1315{
1316	int error = -ESRCH;
1317	struct task_struct *p;
1318
1319	rcu_read_lock();
1320retry:
1321	p = pid_task(pid, PIDTYPE_PID);
1322	if (p) {
1323		error = group_send_sig_info(sig, info, p);
1324		if (unlikely(error == -ESRCH))
1325			/*
1326			 * The task was unhashed in between, try again.
1327			 * If it is dead, pid_task() will return NULL,
1328			 * if we race with de_thread() it will find the
1329			 * new leader.
1330			 */
1331			goto retry;
 
1332	}
1333	rcu_read_unlock();
1334
1335	return error;
1336}
1337
1338int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1339{
1340	int error;
1341	rcu_read_lock();
1342	error = kill_pid_info(sig, info, find_vpid(pid));
1343	rcu_read_unlock();
1344	return error;
1345}
1346
1347/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1348int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1349		      uid_t uid, uid_t euid, u32 secid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1350{
1351	int ret = -EINVAL;
1352	struct task_struct *p;
1353	const struct cred *pcred;
1354	unsigned long flags;
 
1355
1356	if (!valid_signal(sig))
1357		return ret;
1358
 
 
 
 
 
 
1359	rcu_read_lock();
1360	p = pid_task(pid, PIDTYPE_PID);
1361	if (!p) {
1362		ret = -ESRCH;
1363		goto out_unlock;
1364	}
1365	pcred = __task_cred(p);
1366	if (si_fromuser(info) &&
1367	    euid != pcred->suid && euid != pcred->uid &&
1368	    uid  != pcred->suid && uid  != pcred->uid) {
1369		ret = -EPERM;
1370		goto out_unlock;
1371	}
1372	ret = security_task_kill(p, info, sig, secid);
1373	if (ret)
1374		goto out_unlock;
1375
1376	if (sig) {
1377		if (lock_task_sighand(p, &flags)) {
1378			ret = __send_signal(sig, info, p, 1, 0);
1379			unlock_task_sighand(p, &flags);
1380		} else
1381			ret = -ESRCH;
1382	}
1383out_unlock:
1384	rcu_read_unlock();
1385	return ret;
1386}
1387EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1388
1389/*
1390 * kill_something_info() interprets pid in interesting ways just like kill(2).
1391 *
1392 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1393 * is probably wrong.  Should make it like BSD or SYSV.
1394 */
1395
1396static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1397{
1398	int ret;
1399
1400	if (pid > 0) {
1401		rcu_read_lock();
1402		ret = kill_pid_info(sig, info, find_vpid(pid));
1403		rcu_read_unlock();
1404		return ret;
1405	}
1406
1407	read_lock(&tasklist_lock);
1408	if (pid != -1) {
1409		ret = __kill_pgrp_info(sig, info,
1410				pid ? find_vpid(-pid) : task_pgrp(current));
1411	} else {
1412		int retval = 0, count = 0;
1413		struct task_struct * p;
1414
1415		for_each_process(p) {
1416			if (task_pid_vnr(p) > 1 &&
1417					!same_thread_group(p, current)) {
1418				int err = group_send_sig_info(sig, info, p);
 
1419				++count;
1420				if (err != -EPERM)
1421					retval = err;
1422			}
1423		}
1424		ret = count ? retval : -ESRCH;
1425	}
1426	read_unlock(&tasklist_lock);
1427
1428	return ret;
1429}
1430
1431/*
1432 * These are for backward compatibility with the rest of the kernel source.
1433 */
1434
1435int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1436{
1437	/*
1438	 * Make sure legacy kernel users don't send in bad values
1439	 * (normal paths check this in check_kill_permission).
1440	 */
1441	if (!valid_signal(sig))
1442		return -EINVAL;
1443
1444	return do_send_sig_info(sig, info, p, false);
1445}
 
1446
1447#define __si_special(priv) \
1448	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1449
1450int
1451send_sig(int sig, struct task_struct *p, int priv)
1452{
1453	return send_sig_info(sig, __si_special(priv), p);
1454}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1455
1456void
1457force_sig(int sig, struct task_struct *p)
 
 
 
 
 
 
 
 
 
 
 
 
1458{
1459	force_sig_info(sig, SEND_SIG_PRIV, p);
 
 
 
 
 
 
 
 
1460}
1461
1462/*
1463 * When things go south during signal handling, we
1464 * will force a SIGSEGV. And if the signal that caused
1465 * the problem was already a SIGSEGV, we'll want to
1466 * make sure we don't even try to deliver the signal..
1467 */
1468int
1469force_sigsegv(int sig, struct task_struct *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1470{
1471	if (sig == SIGSEGV) {
1472		unsigned long flags;
1473		spin_lock_irqsave(&p->sighand->siglock, flags);
1474		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1475		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1476	}
1477	force_sig(SIGSEGV, p);
1478	return 0;
 
1479}
1480
1481int kill_pgrp(struct pid *pid, int sig, int priv)
1482{
1483	int ret;
1484
1485	read_lock(&tasklist_lock);
1486	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1487	read_unlock(&tasklist_lock);
1488
1489	return ret;
1490}
1491EXPORT_SYMBOL(kill_pgrp);
1492
1493int kill_pid(struct pid *pid, int sig, int priv)
1494{
1495	return kill_pid_info(sig, __si_special(priv), pid);
1496}
1497EXPORT_SYMBOL(kill_pid);
1498
1499/*
1500 * These functions support sending signals using preallocated sigqueue
1501 * structures.  This is needed "because realtime applications cannot
1502 * afford to lose notifications of asynchronous events, like timer
1503 * expirations or I/O completions".  In the case of POSIX Timers
1504 * we allocate the sigqueue structure from the timer_create.  If this
1505 * allocation fails we are able to report the failure to the application
1506 * with an EAGAIN error.
1507 */
1508struct sigqueue *sigqueue_alloc(void)
1509{
1510	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1511
1512	if (q)
1513		q->flags |= SIGQUEUE_PREALLOC;
1514
1515	return q;
1516}
1517
1518void sigqueue_free(struct sigqueue *q)
1519{
1520	unsigned long flags;
1521	spinlock_t *lock = &current->sighand->siglock;
1522
1523	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1524	/*
1525	 * We must hold ->siglock while testing q->list
1526	 * to serialize with collect_signal() or with
1527	 * __exit_signal()->flush_sigqueue().
1528	 */
1529	spin_lock_irqsave(lock, flags);
1530	q->flags &= ~SIGQUEUE_PREALLOC;
1531	/*
1532	 * If it is queued it will be freed when dequeued,
1533	 * like the "regular" sigqueue.
1534	 */
1535	if (!list_empty(&q->list))
1536		q = NULL;
1537	spin_unlock_irqrestore(lock, flags);
1538
1539	if (q)
1540		__sigqueue_free(q);
1541}
1542
1543int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1544{
1545	int sig = q->info.si_signo;
1546	struct sigpending *pending;
 
1547	unsigned long flags;
1548	int ret;
1549
1550	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1551
1552	ret = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1553	if (!likely(lock_task_sighand(t, &flags)))
1554		goto ret;
1555
1556	ret = 1; /* the signal is ignored */
1557	if (!prepare_signal(sig, t, 0))
 
1558		goto out;
1559
1560	ret = 0;
1561	if (unlikely(!list_empty(&q->list))) {
1562		/*
1563		 * If an SI_TIMER entry is already queue just increment
1564		 * the overrun count.
1565		 */
1566		BUG_ON(q->info.si_code != SI_TIMER);
1567		q->info.si_overrun++;
 
1568		goto out;
1569	}
1570	q->info.si_overrun = 0;
1571
1572	signalfd_notify(t, sig);
1573	pending = group ? &t->signal->shared_pending : &t->pending;
1574	list_add_tail(&q->list, &pending->list);
1575	sigaddset(&pending->signal, sig);
1576	complete_signal(sig, t, group);
 
1577out:
 
1578	unlock_task_sighand(t, &flags);
1579ret:
 
1580	return ret;
1581}
1582
 
 
 
 
 
 
 
 
 
1583/*
1584 * Let a parent know about the death of a child.
1585 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1586 *
1587 * Returns true if our parent ignored us and so we've switched to
1588 * self-reaping.
1589 */
1590bool do_notify_parent(struct task_struct *tsk, int sig)
1591{
1592	struct siginfo info;
1593	unsigned long flags;
1594	struct sighand_struct *psig;
1595	bool autoreap = false;
 
1596
1597	BUG_ON(sig == -1);
1598
1599 	/* do_notify_parent_cldstop should have been called instead.  */
1600 	BUG_ON(task_is_stopped_or_traced(tsk));
1601
1602	BUG_ON(!tsk->ptrace &&
1603	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1604
 
 
 
 
 
 
 
 
 
 
 
 
 
1605	info.si_signo = sig;
1606	info.si_errno = 0;
1607	/*
1608	 * we are under tasklist_lock here so our parent is tied to
1609	 * us and cannot exit and release its namespace.
1610	 *
1611	 * the only it can is to switch its nsproxy with sys_unshare,
1612	 * bu uncharing pid namespaces is not allowed, so we'll always
1613	 * see relevant namespace
1614	 *
1615	 * write_lock() currently calls preempt_disable() which is the
1616	 * same as rcu_read_lock(), but according to Oleg, this is not
1617	 * correct to rely on this
1618	 */
1619	rcu_read_lock();
1620	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1621	info.si_uid = __task_cred(tsk)->uid;
 
1622	rcu_read_unlock();
1623
1624	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1625				tsk->signal->utime));
1626	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1627				tsk->signal->stime));
1628
1629	info.si_status = tsk->exit_code & 0x7f;
1630	if (tsk->exit_code & 0x80)
1631		info.si_code = CLD_DUMPED;
1632	else if (tsk->exit_code & 0x7f)
1633		info.si_code = CLD_KILLED;
1634	else {
1635		info.si_code = CLD_EXITED;
1636		info.si_status = tsk->exit_code >> 8;
1637	}
1638
1639	psig = tsk->parent->sighand;
1640	spin_lock_irqsave(&psig->siglock, flags);
1641	if (!tsk->ptrace && sig == SIGCHLD &&
1642	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1643	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1644		/*
1645		 * We are exiting and our parent doesn't care.  POSIX.1
1646		 * defines special semantics for setting SIGCHLD to SIG_IGN
1647		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1648		 * automatically and not left for our parent's wait4 call.
1649		 * Rather than having the parent do it as a magic kind of
1650		 * signal handler, we just set this to tell do_exit that we
1651		 * can be cleaned up without becoming a zombie.  Note that
1652		 * we still call __wake_up_parent in this case, because a
1653		 * blocked sys_wait4 might now return -ECHILD.
1654		 *
1655		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1656		 * is implementation-defined: we do (if you don't want
1657		 * it, just use SIG_IGN instead).
1658		 */
1659		autoreap = true;
1660		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1661			sig = 0;
1662	}
 
 
 
 
1663	if (valid_signal(sig) && sig)
1664		__group_send_sig_info(sig, &info, tsk->parent);
1665	__wake_up_parent(tsk, tsk->parent);
1666	spin_unlock_irqrestore(&psig->siglock, flags);
1667
1668	return autoreap;
1669}
1670
1671/**
1672 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1673 * @tsk: task reporting the state change
1674 * @for_ptracer: the notification is for ptracer
1675 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1676 *
1677 * Notify @tsk's parent that the stopped/continued state has changed.  If
1678 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1679 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1680 *
1681 * CONTEXT:
1682 * Must be called with tasklist_lock at least read locked.
1683 */
1684static void do_notify_parent_cldstop(struct task_struct *tsk,
1685				     bool for_ptracer, int why)
1686{
1687	struct siginfo info;
1688	unsigned long flags;
1689	struct task_struct *parent;
1690	struct sighand_struct *sighand;
 
1691
1692	if (for_ptracer) {
1693		parent = tsk->parent;
1694	} else {
1695		tsk = tsk->group_leader;
1696		parent = tsk->real_parent;
1697	}
1698
 
1699	info.si_signo = SIGCHLD;
1700	info.si_errno = 0;
1701	/*
1702	 * see comment in do_notify_parent() about the following 4 lines
1703	 */
1704	rcu_read_lock();
1705	info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1706	info.si_uid = __task_cred(tsk)->uid;
1707	rcu_read_unlock();
1708
1709	info.si_utime = cputime_to_clock_t(tsk->utime);
1710	info.si_stime = cputime_to_clock_t(tsk->stime);
 
1711
1712 	info.si_code = why;
1713 	switch (why) {
1714 	case CLD_CONTINUED:
1715 		info.si_status = SIGCONT;
1716 		break;
1717 	case CLD_STOPPED:
1718 		info.si_status = tsk->signal->group_exit_code & 0x7f;
1719 		break;
1720 	case CLD_TRAPPED:
1721 		info.si_status = tsk->exit_code & 0x7f;
1722 		break;
1723 	default:
1724 		BUG();
1725 	}
1726
1727	sighand = parent->sighand;
1728	spin_lock_irqsave(&sighand->siglock, flags);
1729	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1730	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1731		__group_send_sig_info(SIGCHLD, &info, parent);
1732	/*
1733	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1734	 */
1735	__wake_up_parent(tsk, parent);
1736	spin_unlock_irqrestore(&sighand->siglock, flags);
1737}
1738
1739static inline int may_ptrace_stop(void)
1740{
1741	if (!likely(current->ptrace))
1742		return 0;
1743	/*
1744	 * Are we in the middle of do_coredump?
1745	 * If so and our tracer is also part of the coredump stopping
1746	 * is a deadlock situation, and pointless because our tracer
1747	 * is dead so don't allow us to stop.
1748	 * If SIGKILL was already sent before the caller unlocked
1749	 * ->siglock we must see ->core_state != NULL. Otherwise it
1750	 * is safe to enter schedule().
1751	 */
1752	if (unlikely(current->mm->core_state) &&
1753	    unlikely(current->mm == current->parent->mm))
1754		return 0;
1755
1756	return 1;
1757}
1758
1759/*
1760 * Return non-zero if there is a SIGKILL that should be waking us up.
1761 * Called with the siglock held.
1762 */
1763static int sigkill_pending(struct task_struct *tsk)
1764{
1765	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1766		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1767}
1768
1769/*
1770 * This must be called with current->sighand->siglock held.
1771 *
1772 * This should be the path for all ptrace stops.
1773 * We always set current->last_siginfo while stopped here.
1774 * That makes it a way to test a stopped process for
1775 * being ptrace-stopped vs being job-control-stopped.
1776 *
1777 * If we actually decide not to stop at all because the tracer
1778 * is gone, we keep current->exit_code unless clear_code.
 
1779 */
1780static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
 
1781	__releases(&current->sighand->siglock)
1782	__acquires(&current->sighand->siglock)
1783{
1784	bool gstop_done = false;
1785
1786	if (arch_ptrace_stop_needed(exit_code, info)) {
1787		/*
1788		 * The arch code has something special to do before a
1789		 * ptrace stop.  This is allowed to block, e.g. for faults
1790		 * on user stack pages.  We can't keep the siglock while
1791		 * calling arch_ptrace_stop, so we must release it now.
1792		 * To preserve proper semantics, we must do this before
1793		 * any signal bookkeeping like checking group_stop_count.
1794		 * Meanwhile, a SIGKILL could come in before we retake the
1795		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1796		 * So after regaining the lock, we must check for SIGKILL.
1797		 */
1798		spin_unlock_irq(&current->sighand->siglock);
1799		arch_ptrace_stop(exit_code, info);
1800		spin_lock_irq(&current->sighand->siglock);
1801		if (sigkill_pending(current))
1802			return;
1803	}
1804
1805	/*
 
 
 
 
 
 
 
 
 
 
 
 
1806	 * We're committing to trapping.  TRACED should be visible before
1807	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1808	 * Also, transition to TRACED and updates to ->jobctl should be
1809	 * atomic with respect to siglock and should be done after the arch
1810	 * hook as siglock is released and regrabbed across it.
 
 
 
 
 
 
 
 
 
 
 
1811	 */
1812	set_current_state(TASK_TRACED);
1813
 
1814	current->last_siginfo = info;
1815	current->exit_code = exit_code;
1816
1817	/*
1818	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1819	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1820	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1821	 * could be clear now.  We act as if SIGCONT is received after
1822	 * TASK_TRACED is entered - ignore it.
1823	 */
1824	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1825		gstop_done = task_participate_group_stop(current);
1826
1827	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1828	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1829	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1830		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1831
1832	/* entering a trap, clear TRAPPING */
1833	task_clear_jobctl_trapping(current);
1834
1835	spin_unlock_irq(&current->sighand->siglock);
1836	read_lock(&tasklist_lock);
1837	if (may_ptrace_stop()) {
1838		/*
1839		 * Notify parents of the stop.
1840		 *
1841		 * While ptraced, there are two parents - the ptracer and
1842		 * the real_parent of the group_leader.  The ptracer should
1843		 * know about every stop while the real parent is only
1844		 * interested in the completion of group stop.  The states
1845		 * for the two don't interact with each other.  Notify
1846		 * separately unless they're gonna be duplicates.
1847		 */
1848		do_notify_parent_cldstop(current, true, why);
1849		if (gstop_done && ptrace_reparented(current))
1850			do_notify_parent_cldstop(current, false, why);
1851
1852		/*
1853		 * Don't want to allow preemption here, because
1854		 * sys_ptrace() needs this task to be inactive.
1855		 *
1856		 * XXX: implement read_unlock_no_resched().
1857		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1858		preempt_disable();
1859		read_unlock(&tasklist_lock);
 
 
1860		preempt_enable_no_resched();
1861		schedule();
1862	} else {
1863		/*
1864		 * By the time we got the lock, our tracer went away.
1865		 * Don't drop the lock yet, another tracer may come.
1866		 *
1867		 * If @gstop_done, the ptracer went away between group stop
1868		 * completion and here.  During detach, it would have set
1869		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1870		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1871		 * the real parent of the group stop completion is enough.
1872		 */
1873		if (gstop_done)
1874			do_notify_parent_cldstop(current, false, why);
1875
1876		__set_current_state(TASK_RUNNING);
1877		if (clear_code)
1878			current->exit_code = 0;
1879		read_unlock(&tasklist_lock);
1880	}
1881
1882	/*
1883	 * While in TASK_TRACED, we were considered "frozen enough".
1884	 * Now that we woke up, it's crucial if we're supposed to be
1885	 * frozen that we freeze now before running anything substantial.
1886	 */
1887	try_to_freeze();
1888
1889	/*
1890	 * We are back.  Now reacquire the siglock before touching
1891	 * last_siginfo, so that we are sure to have synchronized with
1892	 * any signal-sending on another CPU that wants to examine it.
1893	 */
1894	spin_lock_irq(&current->sighand->siglock);
 
1895	current->last_siginfo = NULL;
 
 
1896
1897	/* LISTENING can be set only during STOP traps, clear it */
1898	current->jobctl &= ~JOBCTL_LISTENING;
1899
1900	/*
1901	 * Queued signals ignored us while we were stopped for tracing.
1902	 * So check for any that we should take before resuming user mode.
1903	 * This sets TIF_SIGPENDING, but never clears it.
1904	 */
1905	recalc_sigpending_tsk(current);
 
1906}
1907
1908static void ptrace_do_notify(int signr, int exit_code, int why)
1909{
1910	siginfo_t info;
1911
1912	memset(&info, 0, sizeof info);
1913	info.si_signo = signr;
1914	info.si_code = exit_code;
1915	info.si_pid = task_pid_vnr(current);
1916	info.si_uid = current_uid();
1917
1918	/* Let the debugger run.  */
1919	ptrace_stop(exit_code, why, 1, &info);
1920}
1921
1922void ptrace_notify(int exit_code)
1923{
 
 
1924	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
 
 
1925
1926	spin_lock_irq(&current->sighand->siglock);
1927	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1928	spin_unlock_irq(&current->sighand->siglock);
 
1929}
1930
1931/**
1932 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1933 * @signr: signr causing group stop if initiating
1934 *
1935 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1936 * and participate in it.  If already set, participate in the existing
1937 * group stop.  If participated in a group stop (and thus slept), %true is
1938 * returned with siglock released.
1939 *
1940 * If ptraced, this function doesn't handle stop itself.  Instead,
1941 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1942 * untouched.  The caller must ensure that INTERRUPT trap handling takes
1943 * places afterwards.
1944 *
1945 * CONTEXT:
1946 * Must be called with @current->sighand->siglock held, which is released
1947 * on %true return.
1948 *
1949 * RETURNS:
1950 * %false if group stop is already cancelled or ptrace trap is scheduled.
1951 * %true if participated in group stop.
1952 */
1953static bool do_signal_stop(int signr)
1954	__releases(&current->sighand->siglock)
1955{
1956	struct signal_struct *sig = current->signal;
1957
1958	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1959		unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1960		struct task_struct *t;
1961
1962		/* signr will be recorded in task->jobctl for retries */
1963		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1964
1965		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1966		    unlikely(signal_group_exit(sig)))
 
1967			return false;
1968		/*
1969		 * There is no group stop already in progress.  We must
1970		 * initiate one now.
1971		 *
1972		 * While ptraced, a task may be resumed while group stop is
1973		 * still in effect and then receive a stop signal and
1974		 * initiate another group stop.  This deviates from the
1975		 * usual behavior as two consecutive stop signals can't
1976		 * cause two group stops when !ptraced.  That is why we
1977		 * also check !task_is_stopped(t) below.
1978		 *
1979		 * The condition can be distinguished by testing whether
1980		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
1981		 * group_exit_code in such case.
1982		 *
1983		 * This is not necessary for SIGNAL_STOP_CONTINUED because
1984		 * an intervening stop signal is required to cause two
1985		 * continued events regardless of ptrace.
1986		 */
1987		if (!(sig->flags & SIGNAL_STOP_STOPPED))
1988			sig->group_exit_code = signr;
1989		else
1990			WARN_ON_ONCE(!current->ptrace);
1991
1992		sig->group_stop_count = 0;
1993
1994		if (task_set_jobctl_pending(current, signr | gstop))
1995			sig->group_stop_count++;
1996
1997		for (t = next_thread(current); t != current;
1998		     t = next_thread(t)) {
1999			/*
2000			 * Setting state to TASK_STOPPED for a group
2001			 * stop is always done with the siglock held,
2002			 * so this check has no races.
2003			 */
2004			if (!task_is_stopped(t) &&
2005			    task_set_jobctl_pending(t, signr | gstop)) {
2006				sig->group_stop_count++;
2007				if (likely(!(t->ptrace & PT_SEIZED)))
2008					signal_wake_up(t, 0);
2009				else
2010					ptrace_trap_notify(t);
2011			}
2012		}
2013	}
2014
2015	if (likely(!current->ptrace)) {
2016		int notify = 0;
2017
2018		/*
2019		 * If there are no other threads in the group, or if there
2020		 * is a group stop in progress and we are the last to stop,
2021		 * report to the parent.
2022		 */
2023		if (task_participate_group_stop(current))
2024			notify = CLD_STOPPED;
2025
2026		__set_current_state(TASK_STOPPED);
 
2027		spin_unlock_irq(&current->sighand->siglock);
2028
2029		/*
2030		 * Notify the parent of the group stop completion.  Because
2031		 * we're not holding either the siglock or tasklist_lock
2032		 * here, ptracer may attach inbetween; however, this is for
2033		 * group stop and should always be delivered to the real
2034		 * parent of the group leader.  The new ptracer will get
2035		 * its notification when this task transitions into
2036		 * TASK_TRACED.
2037		 */
2038		if (notify) {
2039			read_lock(&tasklist_lock);
2040			do_notify_parent_cldstop(current, false, notify);
2041			read_unlock(&tasklist_lock);
2042		}
2043
2044		/* Now we don't run again until woken by SIGCONT or SIGKILL */
 
2045		schedule();
2046		return true;
2047	} else {
2048		/*
2049		 * While ptraced, group stop is handled by STOP trap.
2050		 * Schedule it and let the caller deal with it.
2051		 */
2052		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2053		return false;
2054	}
2055}
2056
2057/**
2058 * do_jobctl_trap - take care of ptrace jobctl traps
2059 *
2060 * When PT_SEIZED, it's used for both group stop and explicit
2061 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2062 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2063 * the stop signal; otherwise, %SIGTRAP.
2064 *
2065 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2066 * number as exit_code and no siginfo.
2067 *
2068 * CONTEXT:
2069 * Must be called with @current->sighand->siglock held, which may be
2070 * released and re-acquired before returning with intervening sleep.
2071 */
2072static void do_jobctl_trap(void)
2073{
2074	struct signal_struct *signal = current->signal;
2075	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2076
2077	if (current->ptrace & PT_SEIZED) {
2078		if (!signal->group_stop_count &&
2079		    !(signal->flags & SIGNAL_STOP_STOPPED))
2080			signr = SIGTRAP;
2081		WARN_ON_ONCE(!signr);
2082		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2083				 CLD_STOPPED);
2084	} else {
2085		WARN_ON_ONCE(!signr);
2086		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2087		current->exit_code = 0;
2088	}
2089}
2090
2091static int ptrace_signal(int signr, siginfo_t *info,
2092			 struct pt_regs *regs, void *cookie)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2093{
2094	ptrace_signal_deliver(regs, cookie);
2095	/*
2096	 * We do not check sig_kernel_stop(signr) but set this marker
2097	 * unconditionally because we do not know whether debugger will
2098	 * change signr. This flag has no meaning unless we are going
2099	 * to stop after return from ptrace_stop(). In this case it will
2100	 * be checked in do_signal_stop(), we should only stop if it was
2101	 * not cleared by SIGCONT while we were sleeping. See also the
2102	 * comment in dequeue_signal().
2103	 */
2104	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2105	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2106
2107	/* We're back.  Did the debugger cancel the sig?  */
2108	signr = current->exit_code;
2109	if (signr == 0)
2110		return signr;
2111
2112	current->exit_code = 0;
2113
2114	/*
2115	 * Update the siginfo structure if the signal has
2116	 * changed.  If the debugger wanted something
2117	 * specific in the siginfo structure then it should
2118	 * have updated *info via PTRACE_SETSIGINFO.
2119	 */
2120	if (signr != info->si_signo) {
 
2121		info->si_signo = signr;
2122		info->si_errno = 0;
2123		info->si_code = SI_USER;
 
2124		info->si_pid = task_pid_vnr(current->parent);
2125		info->si_uid = task_uid(current->parent);
 
 
2126	}
2127
2128	/* If the (new) signal is now blocked, requeue it.  */
2129	if (sigismember(&current->blocked, signr)) {
2130		specific_send_sig_info(signr, info, current);
 
2131		signr = 0;
2132	}
2133
2134	return signr;
2135}
2136
2137int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2138			  struct pt_regs *regs, void *cookie)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139{
2140	struct sighand_struct *sighand = current->sighand;
2141	struct signal_struct *signal = current->signal;
2142	int signr;
2143
2144relock:
 
 
 
 
 
 
 
 
 
2145	/*
2146	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2147	 * While in TASK_STOPPED, we were considered "frozen enough".
2148	 * Now that we woke up, it's crucial if we're supposed to be
2149	 * frozen that we freeze now before running anything substantial.
2150	 */
2151	try_to_freeze();
2152
 
2153	spin_lock_irq(&sighand->siglock);
 
2154	/*
2155	 * Every stopped thread goes here after wakeup. Check to see if
2156	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2157	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2158	 */
2159	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2160		int why;
2161
2162		if (signal->flags & SIGNAL_CLD_CONTINUED)
2163			why = CLD_CONTINUED;
2164		else
2165			why = CLD_STOPPED;
2166
2167		signal->flags &= ~SIGNAL_CLD_MASK;
2168
2169		spin_unlock_irq(&sighand->siglock);
2170
2171		/*
2172		 * Notify the parent that we're continuing.  This event is
2173		 * always per-process and doesn't make whole lot of sense
2174		 * for ptracers, who shouldn't consume the state via
2175		 * wait(2) either, but, for backward compatibility, notify
2176		 * the ptracer of the group leader too unless it's gonna be
2177		 * a duplicate.
2178		 */
2179		read_lock(&tasklist_lock);
2180		do_notify_parent_cldstop(current, false, why);
2181
2182		if (ptrace_reparented(current->group_leader))
2183			do_notify_parent_cldstop(current->group_leader,
2184						true, why);
2185		read_unlock(&tasklist_lock);
2186
2187		goto relock;
2188	}
2189
2190	for (;;) {
2191		struct k_sigaction *ka;
 
 
 
 
 
 
 
 
 
 
 
 
 
2192
2193		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2194		    do_signal_stop(0))
2195			goto relock;
2196
2197		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2198			do_jobctl_trap();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2199			spin_unlock_irq(&sighand->siglock);
 
2200			goto relock;
2201		}
2202
2203		signr = dequeue_signal(current, &current->blocked, info);
 
 
 
 
 
 
 
 
 
 
2204
2205		if (!signr)
2206			break; /* will return 0 */
2207
2208		if (unlikely(current->ptrace) && signr != SIGKILL) {
2209			signr = ptrace_signal(signr, info,
2210					      regs, cookie);
2211			if (!signr)
2212				continue;
2213		}
2214
2215		ka = &sighand->action[signr-1];
2216
2217		/* Trace actually delivered signals. */
2218		trace_signal_deliver(signr, info, ka);
2219
2220		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2221			continue;
2222		if (ka->sa.sa_handler != SIG_DFL) {
2223			/* Run the handler.  */
2224			*return_ka = *ka;
2225
2226			if (ka->sa.sa_flags & SA_ONESHOT)
2227				ka->sa.sa_handler = SIG_DFL;
2228
2229			break; /* will return non-zero "signr" value */
2230		}
2231
2232		/*
2233		 * Now we are doing the default action for this signal.
2234		 */
2235		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2236			continue;
2237
2238		/*
2239		 * Global init gets no signals it doesn't want.
2240		 * Container-init gets no signals it doesn't want from same
2241		 * container.
2242		 *
2243		 * Note that if global/container-init sees a sig_kernel_only()
2244		 * signal here, the signal must have been generated internally
2245		 * or must have come from an ancestor namespace. In either
2246		 * case, the signal cannot be dropped.
2247		 */
2248		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2249				!sig_kernel_only(signr))
2250			continue;
2251
2252		if (sig_kernel_stop(signr)) {
2253			/*
2254			 * The default action is to stop all threads in
2255			 * the thread group.  The job control signals
2256			 * do nothing in an orphaned pgrp, but SIGSTOP
2257			 * always works.  Note that siglock needs to be
2258			 * dropped during the call to is_orphaned_pgrp()
2259			 * because of lock ordering with tasklist_lock.
2260			 * This allows an intervening SIGCONT to be posted.
2261			 * We need to check for that and bail out if necessary.
2262			 */
2263			if (signr != SIGSTOP) {
2264				spin_unlock_irq(&sighand->siglock);
2265
2266				/* signals can be posted during this window */
2267
2268				if (is_current_pgrp_orphaned())
2269					goto relock;
2270
2271				spin_lock_irq(&sighand->siglock);
2272			}
2273
2274			if (likely(do_signal_stop(info->si_signo))) {
2275				/* It released the siglock.  */
2276				goto relock;
2277			}
2278
2279			/*
2280			 * We didn't actually stop, due to a race
2281			 * with SIGCONT or something like that.
2282			 */
2283			continue;
2284		}
2285
 
2286		spin_unlock_irq(&sighand->siglock);
 
 
2287
2288		/*
2289		 * Anything else is fatal, maybe with a core dump.
2290		 */
2291		current->flags |= PF_SIGNALED;
2292
2293		if (sig_kernel_coredump(signr)) {
2294			if (print_fatal_signals)
2295				print_fatal_signal(regs, info->si_signo);
 
2296			/*
2297			 * If it was able to dump core, this kills all
2298			 * other threads in the group and synchronizes with
2299			 * their demise.  If we lost the race with another
2300			 * thread getting here, it set group_exit_code
2301			 * first and our do_group_exit call below will use
2302			 * that value and ignore the one we pass it.
2303			 */
2304			do_coredump(info->si_signo, info->si_signo, regs);
2305		}
2306
2307		/*
 
 
 
 
 
 
 
 
2308		 * Death signals, no core dump.
2309		 */
2310		do_group_exit(info->si_signo);
2311		/* NOTREACHED */
2312	}
2313	spin_unlock_irq(&sighand->siglock);
2314	return signr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2315}
2316
2317/*
2318 * It could be that complete_signal() picked us to notify about the
2319 * group-wide signal. Other threads should be notified now to take
2320 * the shared signals in @which since we will not.
2321 */
2322static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2323{
2324	sigset_t retarget;
2325	struct task_struct *t;
2326
2327	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2328	if (sigisemptyset(&retarget))
2329		return;
2330
2331	t = tsk;
2332	while_each_thread(tsk, t) {
2333		if (t->flags & PF_EXITING)
2334			continue;
2335
2336		if (!has_pending_signals(&retarget, &t->blocked))
2337			continue;
2338		/* Remove the signals this thread can handle. */
2339		sigandsets(&retarget, &retarget, &t->blocked);
2340
2341		if (!signal_pending(t))
2342			signal_wake_up(t, 0);
2343
2344		if (sigisemptyset(&retarget))
2345			break;
2346	}
2347}
2348
2349void exit_signals(struct task_struct *tsk)
2350{
2351	int group_stop = 0;
2352	sigset_t unblocked;
2353
2354	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
 
 
 
 
 
 
 
2355		tsk->flags |= PF_EXITING;
 
2356		return;
2357	}
2358
2359	spin_lock_irq(&tsk->sighand->siglock);
2360	/*
2361	 * From now this task is not visible for group-wide signals,
2362	 * see wants_signal(), do_signal_stop().
2363	 */
 
2364	tsk->flags |= PF_EXITING;
2365	if (!signal_pending(tsk))
 
 
 
2366		goto out;
2367
2368	unblocked = tsk->blocked;
2369	signotset(&unblocked);
2370	retarget_shared_pending(tsk, &unblocked);
2371
2372	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2373	    task_participate_group_stop(tsk))
2374		group_stop = CLD_STOPPED;
2375out:
2376	spin_unlock_irq(&tsk->sighand->siglock);
2377
2378	/*
2379	 * If group stop has completed, deliver the notification.  This
2380	 * should always go to the real parent of the group leader.
2381	 */
2382	if (unlikely(group_stop)) {
2383		read_lock(&tasklist_lock);
2384		do_notify_parent_cldstop(tsk, false, group_stop);
2385		read_unlock(&tasklist_lock);
2386	}
2387}
2388
2389EXPORT_SYMBOL(recalc_sigpending);
2390EXPORT_SYMBOL_GPL(dequeue_signal);
2391EXPORT_SYMBOL(flush_signals);
2392EXPORT_SYMBOL(force_sig);
2393EXPORT_SYMBOL(send_sig);
2394EXPORT_SYMBOL(send_sig_info);
2395EXPORT_SYMBOL(sigprocmask);
2396EXPORT_SYMBOL(block_all_signals);
2397EXPORT_SYMBOL(unblock_all_signals);
2398
2399
2400/*
2401 * System call entry points.
2402 */
2403
2404/**
2405 *  sys_restart_syscall - restart a system call
2406 */
2407SYSCALL_DEFINE0(restart_syscall)
2408{
2409	struct restart_block *restart = &current_thread_info()->restart_block;
2410	return restart->fn(restart);
2411}
2412
2413long do_no_restart_syscall(struct restart_block *param)
2414{
2415	return -EINTR;
2416}
2417
2418static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2419{
2420	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2421		sigset_t newblocked;
2422		/* A set of now blocked but previously unblocked signals. */
2423		sigandnsets(&newblocked, newset, &current->blocked);
2424		retarget_shared_pending(tsk, &newblocked);
2425	}
2426	tsk->blocked = *newset;
2427	recalc_sigpending();
2428}
2429
2430/**
2431 * set_current_blocked - change current->blocked mask
2432 * @newset: new mask
2433 *
2434 * It is wrong to change ->blocked directly, this helper should be used
2435 * to ensure the process can't miss a shared signal we are going to block.
2436 */
2437void set_current_blocked(const sigset_t *newset)
 
 
 
 
 
 
2438{
2439	struct task_struct *tsk = current;
2440
 
 
 
 
 
 
 
2441	spin_lock_irq(&tsk->sighand->siglock);
2442	__set_task_blocked(tsk, newset);
2443	spin_unlock_irq(&tsk->sighand->siglock);
2444}
2445
2446/*
2447 * This is also useful for kernel threads that want to temporarily
2448 * (or permanently) block certain signals.
2449 *
2450 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2451 * interface happily blocks "unblockable" signals like SIGKILL
2452 * and friends.
2453 */
2454int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2455{
2456	struct task_struct *tsk = current;
2457	sigset_t newset;
2458
2459	/* Lockless, only current can change ->blocked, never from irq */
2460	if (oldset)
2461		*oldset = tsk->blocked;
2462
2463	switch (how) {
2464	case SIG_BLOCK:
2465		sigorsets(&newset, &tsk->blocked, set);
2466		break;
2467	case SIG_UNBLOCK:
2468		sigandnsets(&newset, &tsk->blocked, set);
2469		break;
2470	case SIG_SETMASK:
2471		newset = *set;
2472		break;
2473	default:
2474		return -EINVAL;
2475	}
2476
2477	set_current_blocked(&newset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2478	return 0;
2479}
 
2480
2481/**
2482 *  sys_rt_sigprocmask - change the list of currently blocked signals
2483 *  @how: whether to add, remove, or set signals
2484 *  @nset: stores pending signals
2485 *  @oset: previous value of signal mask if non-null
2486 *  @sigsetsize: size of sigset_t type
2487 */
2488SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2489		sigset_t __user *, oset, size_t, sigsetsize)
2490{
2491	sigset_t old_set, new_set;
2492	int error;
2493
2494	/* XXX: Don't preclude handling different sized sigset_t's.  */
2495	if (sigsetsize != sizeof(sigset_t))
2496		return -EINVAL;
2497
2498	old_set = current->blocked;
2499
2500	if (nset) {
2501		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2502			return -EFAULT;
2503		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2504
2505		error = sigprocmask(how, &new_set, NULL);
2506		if (error)
2507			return error;
2508	}
2509
2510	if (oset) {
2511		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2512			return -EFAULT;
2513	}
2514
2515	return 0;
2516}
2517
2518long do_sigpending(void __user *set, unsigned long sigsetsize)
 
 
2519{
2520	long error = -EINVAL;
2521	sigset_t pending;
 
 
 
 
 
 
 
 
 
 
2522
2523	if (sigsetsize > sizeof(sigset_t))
2524		goto out;
 
 
 
 
 
2525
 
 
2526	spin_lock_irq(&current->sighand->siglock);
2527	sigorsets(&pending, &current->pending.signal,
2528		  &current->signal->shared_pending.signal);
2529	spin_unlock_irq(&current->sighand->siglock);
2530
2531	/* Outside the lock because only this thread touches it.  */
2532	sigandsets(&pending, &current->blocked, &pending);
2533
2534	error = -EFAULT;
2535	if (!copy_to_user(set, &pending, sigsetsize))
2536		error = 0;
2537
2538out:
2539	return error;
2540}
2541
2542/**
2543 *  sys_rt_sigpending - examine a pending signal that has been raised
2544 *			while blocked
2545 *  @set: stores pending signals
2546 *  @sigsetsize: size of sigset_t type or larger
2547 */
2548SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2549{
2550	return do_sigpending(set, sigsetsize);
 
 
 
 
 
 
 
2551}
 
2552
2553#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2554
2555int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2556{
2557	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2558
2559	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
 
 
2560		return -EFAULT;
2561	if (from->si_code < 0)
2562		return __copy_to_user(to, from, sizeof(siginfo_t))
2563			? -EFAULT : 0;
2564	/*
2565	 * If you change siginfo_t structure, please be sure
2566	 * this code is fixed accordingly.
2567	 * Please remember to update the signalfd_copyinfo() function
2568	 * inside fs/signalfd.c too, in case siginfo_t changes.
2569	 * It should never copy any pad contained in the structure
2570	 * to avoid security leaks, but must copy the generic
2571	 * 3 ints plus the relevant union member.
2572	 */
2573	err = __put_user(from->si_signo, &to->si_signo);
2574	err |= __put_user(from->si_errno, &to->si_errno);
2575	err |= __put_user((short)from->si_code, &to->si_code);
2576	switch (from->si_code & __SI_MASK) {
2577	case __SI_KILL:
2578		err |= __put_user(from->si_pid, &to->si_pid);
2579		err |= __put_user(from->si_uid, &to->si_uid);
2580		break;
2581	case __SI_TIMER:
2582		 err |= __put_user(from->si_tid, &to->si_tid);
2583		 err |= __put_user(from->si_overrun, &to->si_overrun);
2584		 err |= __put_user(from->si_ptr, &to->si_ptr);
2585		break;
2586	case __SI_POLL:
2587		err |= __put_user(from->si_band, &to->si_band);
2588		err |= __put_user(from->si_fd, &to->si_fd);
2589		break;
2590	case __SI_FAULT:
2591		err |= __put_user(from->si_addr, &to->si_addr);
2592#ifdef __ARCH_SI_TRAPNO
2593		err |= __put_user(from->si_trapno, &to->si_trapno);
2594#endif
2595#ifdef BUS_MCEERR_AO
2596		/*
2597		 * Other callers might not initialize the si_lsb field,
2598		 * so check explicitly for the right codes here.
2599		 */
2600		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2601			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2602#endif
2603		break;
2604	case __SI_CHLD:
2605		err |= __put_user(from->si_pid, &to->si_pid);
2606		err |= __put_user(from->si_uid, &to->si_uid);
2607		err |= __put_user(from->si_status, &to->si_status);
2608		err |= __put_user(from->si_utime, &to->si_utime);
2609		err |= __put_user(from->si_stime, &to->si_stime);
2610		break;
2611	case __SI_RT: /* This is not generated by the kernel as of now. */
2612	case __SI_MESGQ: /* But this is */
2613		err |= __put_user(from->si_pid, &to->si_pid);
2614		err |= __put_user(from->si_uid, &to->si_uid);
2615		err |= __put_user(from->si_ptr, &to->si_ptr);
2616		break;
2617	default: /* this is just in case for now ... */
2618		err |= __put_user(from->si_pid, &to->si_pid);
2619		err |= __put_user(from->si_uid, &to->si_uid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2620		break;
2621	}
2622	return err;
2623}
2624
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2625#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2626
2627/**
2628 *  do_sigtimedwait - wait for queued signals specified in @which
2629 *  @which: queued signals to wait for
2630 *  @info: if non-null, the signal's siginfo is returned here
2631 *  @ts: upper bound on process time suspension
2632 */
2633int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2634			const struct timespec *ts)
2635{
 
2636	struct task_struct *tsk = current;
2637	long timeout = MAX_SCHEDULE_TIMEOUT;
2638	sigset_t mask = *which;
2639	int sig;
 
2640
2641	if (ts) {
2642		if (!timespec_valid(ts))
2643			return -EINVAL;
2644		timeout = timespec_to_jiffies(ts);
2645		/*
2646		 * We can be close to the next tick, add another one
2647		 * to ensure we will wait at least the time asked for.
2648		 */
2649		if (ts->tv_sec || ts->tv_nsec)
2650			timeout++;
2651	}
2652
2653	/*
2654	 * Invert the set of allowed signals to get those we want to block.
2655	 */
2656	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2657	signotset(&mask);
2658
2659	spin_lock_irq(&tsk->sighand->siglock);
2660	sig = dequeue_signal(tsk, &mask, info);
2661	if (!sig && timeout) {
2662		/*
2663		 * None ready, temporarily unblock those we're interested
2664		 * while we are sleeping in so that we'll be awakened when
2665		 * they arrive. Unblocking is always fine, we can avoid
2666		 * set_current_blocked().
2667		 */
2668		tsk->real_blocked = tsk->blocked;
2669		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2670		recalc_sigpending();
2671		spin_unlock_irq(&tsk->sighand->siglock);
2672
2673		timeout = schedule_timeout_interruptible(timeout);
2674
 
2675		spin_lock_irq(&tsk->sighand->siglock);
2676		__set_task_blocked(tsk, &tsk->real_blocked);
2677		siginitset(&tsk->real_blocked, 0);
2678		sig = dequeue_signal(tsk, &mask, info);
2679	}
2680	spin_unlock_irq(&tsk->sighand->siglock);
2681
2682	if (sig)
2683		return sig;
2684	return timeout ? -EINTR : -EAGAIN;
2685}
2686
2687/**
2688 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2689 *			in @uthese
2690 *  @uthese: queued signals to wait for
2691 *  @uinfo: if non-null, the signal's siginfo is returned here
2692 *  @uts: upper bound on process time suspension
2693 *  @sigsetsize: size of sigset_t type
2694 */
2695SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2696		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
 
2697		size_t, sigsetsize)
2698{
2699	sigset_t these;
2700	struct timespec ts;
2701	siginfo_t info;
2702	int ret;
2703
2704	/* XXX: Don't preclude handling different sized sigset_t's.  */
2705	if (sigsetsize != sizeof(sigset_t))
2706		return -EINVAL;
2707
2708	if (copy_from_user(&these, uthese, sizeof(these)))
2709		return -EFAULT;
2710
2711	if (uts) {
2712		if (copy_from_user(&ts, uts, sizeof(ts)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2713			return -EFAULT;
2714	}
2715
2716	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2717
2718	if (ret > 0 && uinfo) {
2719		if (copy_siginfo_to_user(uinfo, &info))
2720			ret = -EFAULT;
2721	}
2722
2723	return ret;
2724}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2725
2726/**
2727 *  sys_kill - send a signal to a process
2728 *  @pid: the PID of the process
2729 *  @sig: signal to be sent
2730 */
2731SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2732{
2733	struct siginfo info;
2734
2735	info.si_signo = sig;
2736	info.si_errno = 0;
2737	info.si_code = SI_USER;
2738	info.si_pid = task_tgid_vnr(current);
2739	info.si_uid = current_uid();
2740
2741	return kill_something_info(sig, &info, pid);
2742}
2743
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2744static int
2745do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2746{
2747	struct task_struct *p;
2748	int error = -ESRCH;
2749
2750	rcu_read_lock();
2751	p = find_task_by_vpid(pid);
2752	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2753		error = check_kill_permission(sig, info, p);
2754		/*
2755		 * The null signal is a permissions and process existence
2756		 * probe.  No signal is actually delivered.
2757		 */
2758		if (!error && sig) {
2759			error = do_send_sig_info(sig, info, p, false);
2760			/*
2761			 * If lock_task_sighand() failed we pretend the task
2762			 * dies after receiving the signal. The window is tiny,
2763			 * and the signal is private anyway.
2764			 */
2765			if (unlikely(error == -ESRCH))
2766				error = 0;
2767		}
2768	}
2769	rcu_read_unlock();
2770
2771	return error;
2772}
2773
2774static int do_tkill(pid_t tgid, pid_t pid, int sig)
2775{
2776	struct siginfo info;
2777
 
2778	info.si_signo = sig;
2779	info.si_errno = 0;
2780	info.si_code = SI_TKILL;
2781	info.si_pid = task_tgid_vnr(current);
2782	info.si_uid = current_uid();
2783
2784	return do_send_specific(tgid, pid, sig, &info);
2785}
2786
2787/**
2788 *  sys_tgkill - send signal to one specific thread
2789 *  @tgid: the thread group ID of the thread
2790 *  @pid: the PID of the thread
2791 *  @sig: signal to be sent
2792 *
2793 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2794 *  exists but it's not belonging to the target process anymore. This
2795 *  method solves the problem of threads exiting and PIDs getting reused.
2796 */
2797SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2798{
2799	/* This is only valid for single tasks */
2800	if (pid <= 0 || tgid <= 0)
2801		return -EINVAL;
2802
2803	return do_tkill(tgid, pid, sig);
2804}
2805
2806/**
2807 *  sys_tkill - send signal to one specific task
2808 *  @pid: the PID of the task
2809 *  @sig: signal to be sent
2810 *
2811 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2812 */
2813SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2814{
2815	/* This is only valid for single tasks */
2816	if (pid <= 0)
2817		return -EINVAL;
2818
2819	return do_tkill(0, pid, sig);
2820}
2821
 
 
 
 
 
 
 
 
 
 
 
 
 
2822/**
2823 *  sys_rt_sigqueueinfo - send signal information to a signal
2824 *  @pid: the PID of the thread
2825 *  @sig: signal to be sent
2826 *  @uinfo: signal info to be sent
2827 */
2828SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2829		siginfo_t __user *, uinfo)
2830{
2831	siginfo_t info;
 
 
 
 
 
2832
2833	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2834		return -EFAULT;
2835
2836	/* Not even root can pretend to send signals from the kernel.
2837	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2838	 */
2839	if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2840		/* We used to allow any < 0 si_code */
2841		WARN_ON_ONCE(info.si_code < 0);
2842		return -EPERM;
2843	}
2844	info.si_signo = sig;
2845
2846	/* POSIX.1b doesn't mention process groups.  */
2847	return kill_proc_info(sig, &info, pid);
2848}
 
2849
2850long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2851{
2852	/* This is only valid for single tasks */
2853	if (pid <= 0 || tgid <= 0)
2854		return -EINVAL;
2855
2856	/* Not even root can pretend to send signals from the kernel.
2857	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2858	 */
2859	if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2860		/* We used to allow any < 0 si_code */
2861		WARN_ON_ONCE(info->si_code < 0);
2862		return -EPERM;
2863	}
2864	info->si_signo = sig;
2865
2866	return do_send_specific(tgid, pid, sig, info);
2867}
2868
2869SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2870		siginfo_t __user *, uinfo)
2871{
2872	siginfo_t info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2873
2874	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2875		return -EFAULT;
 
 
 
 
 
2876
2877	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
 
 
2878}
2879
2880int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2881{
2882	struct task_struct *t = current;
2883	struct k_sigaction *k;
2884	sigset_t mask;
2885
2886	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2887		return -EINVAL;
2888
2889	k = &t->sighand->action[sig-1];
2890
2891	spin_lock_irq(&current->sighand->siglock);
 
 
 
 
2892	if (oact)
2893		*oact = *k;
2894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2895	if (act) {
2896		sigdelsetmask(&act->sa.sa_mask,
2897			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2898		*k = *act;
2899		/*
2900		 * POSIX 3.3.1.3:
2901		 *  "Setting a signal action to SIG_IGN for a signal that is
2902		 *   pending shall cause the pending signal to be discarded,
2903		 *   whether or not it is blocked."
2904		 *
2905		 *  "Setting a signal action to SIG_DFL for a signal that is
2906		 *   pending and whose default action is to ignore the signal
2907		 *   (for example, SIGCHLD), shall cause the pending signal to
2908		 *   be discarded, whether or not it is blocked"
2909		 */
2910		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2911			sigemptyset(&mask);
2912			sigaddset(&mask, sig);
2913			rm_from_queue_full(&mask, &t->signal->shared_pending);
2914			do {
2915				rm_from_queue_full(&mask, &t->pending);
2916				t = next_thread(t);
2917			} while (t != current);
2918		}
2919	}
2920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2921	spin_unlock_irq(&current->sighand->siglock);
2922	return 0;
2923}
 
 
 
 
2924
2925int 
2926do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
 
2927{
2928	stack_t oss;
2929	int error;
2930
2931	oss.ss_sp = (void __user *) current->sas_ss_sp;
2932	oss.ss_size = current->sas_ss_size;
2933	oss.ss_flags = sas_ss_flags(sp);
2934
2935	if (uss) {
2936		void __user *ss_sp;
2937		size_t ss_size;
2938		int ss_flags;
 
 
 
 
 
2939
2940		error = -EFAULT;
2941		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2942			goto out;
2943		error = __get_user(ss_sp, &uss->ss_sp) |
2944			__get_user(ss_flags, &uss->ss_flags) |
2945			__get_user(ss_size, &uss->ss_size);
2946		if (error)
2947			goto out;
2948
2949		error = -EPERM;
2950		if (on_sig_stack(sp))
2951			goto out;
 
2952
2953		error = -EINVAL;
2954		/*
2955		 * Note - this code used to test ss_flags incorrectly:
2956		 *  	  old code may have been written using ss_flags==0
2957		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2958		 *	  way that worked) - this fix preserves that older
2959		 *	  mechanism.
2960		 */
2961		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2962			goto out;
 
 
2963
2964		if (ss_flags == SS_DISABLE) {
 
2965			ss_size = 0;
2966			ss_sp = NULL;
2967		} else {
2968			error = -ENOMEM;
2969			if (ss_size < MINSIGSTKSZ)
2970				goto out;
 
 
 
 
 
 
2971		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2972
2973		current->sas_ss_sp = (unsigned long) ss_sp;
2974		current->sas_ss_size = ss_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2975	}
 
 
 
 
 
 
 
 
 
2976
2977	error = 0;
2978	if (uoss) {
2979		error = -EFAULT;
2980		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2981			goto out;
2982		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2983			__put_user(oss.ss_size, &uoss->ss_size) |
2984			__put_user(oss.ss_flags, &uoss->ss_flags);
2985	}
2986
2987out:
2988	return error;
 
 
 
 
 
 
 
2989}
 
2990
2991#ifdef __ARCH_WANT_SYS_SIGPENDING
2992
2993/**
2994 *  sys_sigpending - examine pending signals
2995 *  @set: where mask of pending signal is returned
2996 */
2997SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2998{
2999	return do_sigpending(set, sizeof(*set));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3000}
 
3001
3002#endif
3003
3004#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3005/**
3006 *  sys_sigprocmask - examine and change blocked signals
3007 *  @how: whether to add, remove, or set signals
3008 *  @nset: signals to add or remove (if non-null)
3009 *  @oset: previous value of signal mask if non-null
3010 *
3011 * Some platforms have their own version with special arguments;
3012 * others support only sys_rt_sigprocmask.
3013 */
3014
3015SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3016		old_sigset_t __user *, oset)
3017{
3018	old_sigset_t old_set, new_set;
3019	sigset_t new_blocked;
3020
3021	old_set = current->blocked.sig[0];
3022
3023	if (nset) {
3024		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3025			return -EFAULT;
3026		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
3027
3028		new_blocked = current->blocked;
3029
3030		switch (how) {
3031		case SIG_BLOCK:
3032			sigaddsetmask(&new_blocked, new_set);
3033			break;
3034		case SIG_UNBLOCK:
3035			sigdelsetmask(&new_blocked, new_set);
3036			break;
3037		case SIG_SETMASK:
3038			new_blocked.sig[0] = new_set;
3039			break;
3040		default:
3041			return -EINVAL;
3042		}
3043
3044		set_current_blocked(&new_blocked);
3045	}
3046
3047	if (oset) {
3048		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3049			return -EFAULT;
3050	}
3051
3052	return 0;
3053}
3054#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3055
3056#ifdef __ARCH_WANT_SYS_RT_SIGACTION
3057/**
3058 *  sys_rt_sigaction - alter an action taken by a process
3059 *  @sig: signal to be sent
3060 *  @act: new sigaction
3061 *  @oact: used to save the previous sigaction
3062 *  @sigsetsize: size of sigset_t type
3063 */
3064SYSCALL_DEFINE4(rt_sigaction, int, sig,
3065		const struct sigaction __user *, act,
3066		struct sigaction __user *, oact,
3067		size_t, sigsetsize)
3068{
3069	struct k_sigaction new_sa, old_sa;
3070	int ret = -EINVAL;
3071
3072	/* XXX: Don't preclude handling different sized sigset_t's.  */
3073	if (sigsetsize != sizeof(sigset_t))
3074		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3075
3076	if (act) {
3077		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
 
 
 
 
3078			return -EFAULT;
 
 
 
 
 
 
 
3079	}
3080
3081	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3082
3083	if (!ret && oact) {
3084		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
 
 
 
 
 
 
3085			return -EFAULT;
3086	}
3087out:
3088	return ret;
3089}
3090#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
3091
3092#ifdef __ARCH_WANT_SYS_SGETMASK
3093
3094/*
3095 * For backwards compatibility.  Functionality superseded by sigprocmask.
3096 */
3097SYSCALL_DEFINE0(sgetmask)
3098{
3099	/* SMP safe */
3100	return current->blocked.sig[0];
3101}
3102
3103SYSCALL_DEFINE1(ssetmask, int, newmask)
3104{
3105	int old = current->blocked.sig[0];
3106	sigset_t newset;
3107
3108	siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
3109	set_current_blocked(&newset);
3110
3111	return old;
3112}
3113#endif /* __ARCH_WANT_SGETMASK */
3114
3115#ifdef __ARCH_WANT_SYS_SIGNAL
3116/*
3117 * For backwards compatibility.  Functionality superseded by sigaction.
3118 */
3119SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3120{
3121	struct k_sigaction new_sa, old_sa;
3122	int ret;
3123
3124	new_sa.sa.sa_handler = handler;
3125	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3126	sigemptyset(&new_sa.sa.sa_mask);
3127
3128	ret = do_sigaction(sig, &new_sa, &old_sa);
3129
3130	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3131}
3132#endif /* __ARCH_WANT_SYS_SIGNAL */
3133
3134#ifdef __ARCH_WANT_SYS_PAUSE
3135
3136SYSCALL_DEFINE0(pause)
3137{
3138	while (!signal_pending(current)) {
3139		current->state = TASK_INTERRUPTIBLE;
3140		schedule();
3141	}
3142	return -ERESTARTNOHAND;
3143}
3144
3145#endif
3146
3147#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 
 
 
 
 
 
 
 
 
 
 
3148/**
3149 *  sys_rt_sigsuspend - replace the signal mask for a value with the
3150 *	@unewset value until a signal is received
3151 *  @unewset: new signal mask value
3152 *  @sigsetsize: size of sigset_t type
3153 */
3154SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3155{
3156	sigset_t newset;
3157
3158	/* XXX: Don't preclude handling different sized sigset_t's.  */
3159	if (sigsetsize != sizeof(sigset_t))
3160		return -EINVAL;
3161
3162	if (copy_from_user(&newset, unewset, sizeof(newset)))
3163		return -EFAULT;
3164	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
 
 
 
 
 
 
 
 
 
 
3165
3166	current->saved_sigmask = current->blocked;
3167	set_current_blocked(&newset);
 
 
 
3168
3169	current->state = TASK_INTERRUPTIBLE;
3170	schedule();
3171	set_restore_sigmask();
3172	return -ERESTARTNOHAND;
 
 
 
 
 
 
 
 
 
 
3173}
3174#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3175
3176__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3177{
3178	return NULL;
3179}
3180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3181void __init signals_init(void)
3182{
3183	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
 
 
3184}
3185
3186#ifdef CONFIG_KGDB_KDB
3187#include <linux/kdb.h>
3188/*
3189 * kdb_send_sig_info - Allows kdb to send signals without exposing
3190 * signal internals.  This function checks if the required locks are
3191 * available before calling the main signal code, to avoid kdb
3192 * deadlocks.
3193 */
3194void
3195kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3196{
3197	static struct task_struct *kdb_prev_t;
3198	int sig, new_t;
3199	if (!spin_trylock(&t->sighand->siglock)) {
3200		kdb_printf("Can't do kill command now.\n"
3201			   "The sigmask lock is held somewhere else in "
3202			   "kernel, try again later\n");
3203		return;
3204	}
3205	spin_unlock(&t->sighand->siglock);
3206	new_t = kdb_prev_t != t;
3207	kdb_prev_t = t;
3208	if (t->state != TASK_RUNNING && new_t) {
 
3209		kdb_printf("Process is not RUNNING, sending a signal from "
3210			   "kdb risks deadlock\n"
3211			   "on the run queue locks. "
3212			   "The signal has _not_ been sent.\n"
3213			   "Reissue the kill command if you want to risk "
3214			   "the deadlock.\n");
3215		return;
3216	}
3217	sig = info->si_signo;
3218	if (send_sig_info(sig, info, t))
 
3219		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3220			   sig, t->pid);
3221	else
3222		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3223}
3224#endif	/* CONFIG_KGDB_KDB */