Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/signal.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   8 *
   9 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
  10 *		Changes to use preallocated sigqueue structures
  11 *		to allow signals to be sent reliably.
  12 */
  13
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/init.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/user.h>
  19#include <linux/sched/debug.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/sched/cputime.h>
  23#include <linux/file.h>
  24#include <linux/fs.h>
 
  25#include <linux/proc_fs.h>
  26#include <linux/tty.h>
  27#include <linux/binfmts.h>
  28#include <linux/coredump.h>
  29#include <linux/security.h>
  30#include <linux/syscalls.h>
  31#include <linux/ptrace.h>
  32#include <linux/signal.h>
  33#include <linux/signalfd.h>
  34#include <linux/ratelimit.h>
  35#include <linux/tracehook.h>
  36#include <linux/capability.h>
  37#include <linux/freezer.h>
  38#include <linux/pid_namespace.h>
  39#include <linux/nsproxy.h>
  40#include <linux/user_namespace.h>
  41#include <linux/uprobes.h>
  42#include <linux/compat.h>
  43#include <linux/cn_proc.h>
  44#include <linux/compiler.h>
  45#include <linux/posix-timers.h>
  46#include <linux/livepatch.h>
  47#include <linux/cgroup.h>
  48#include <linux/audit.h>
 
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/signal.h>
  52
  53#include <asm/param.h>
  54#include <linux/uaccess.h>
  55#include <asm/unistd.h>
  56#include <asm/siginfo.h>
  57#include <asm/cacheflush.h>
 
  58
  59/*
  60 * SLAB caches for signal bits.
  61 */
  62
  63static struct kmem_cache *sigqueue_cachep;
  64
  65int print_fatal_signals __read_mostly;
  66
  67static void __user *sig_handler(struct task_struct *t, int sig)
  68{
  69	return t->sighand->action[sig - 1].sa.sa_handler;
  70}
  71
  72static inline bool sig_handler_ignored(void __user *handler, int sig)
  73{
  74	/* Is it explicitly or implicitly ignored? */
  75	return handler == SIG_IGN ||
  76	       (handler == SIG_DFL && sig_kernel_ignore(sig));
  77}
  78
  79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
  80{
  81	void __user *handler;
  82
  83	handler = sig_handler(t, sig);
  84
  85	/* SIGKILL and SIGSTOP may not be sent to the global init */
  86	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
  87		return true;
  88
  89	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  90	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  91		return true;
  92
  93	/* Only allow kernel generated signals to this kthread */
  94	if (unlikely((t->flags & PF_KTHREAD) &&
  95		     (handler == SIG_KTHREAD_KERNEL) && !force))
  96		return true;
  97
  98	return sig_handler_ignored(handler, sig);
  99}
 100
 101static bool sig_ignored(struct task_struct *t, int sig, bool force)
 102{
 103	/*
 104	 * Blocked signals are never ignored, since the
 105	 * signal handler may change by the time it is
 106	 * unblocked.
 107	 */
 108	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 109		return false;
 110
 111	/*
 112	 * Tracers may want to know about even ignored signal unless it
 113	 * is SIGKILL which can't be reported anyway but can be ignored
 114	 * by SIGNAL_UNKILLABLE task.
 115	 */
 116	if (t->ptrace && sig != SIGKILL)
 117		return false;
 118
 119	return sig_task_ignored(t, sig, force);
 120}
 121
 122/*
 123 * Re-calculate pending state from the set of locally pending
 124 * signals, globally pending signals, and blocked signals.
 125 */
 126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
 127{
 128	unsigned long ready;
 129	long i;
 130
 131	switch (_NSIG_WORDS) {
 132	default:
 133		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 134			ready |= signal->sig[i] &~ blocked->sig[i];
 135		break;
 136
 137	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 138		ready |= signal->sig[2] &~ blocked->sig[2];
 139		ready |= signal->sig[1] &~ blocked->sig[1];
 140		ready |= signal->sig[0] &~ blocked->sig[0];
 141		break;
 142
 143	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 144		ready |= signal->sig[0] &~ blocked->sig[0];
 145		break;
 146
 147	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 148	}
 149	return ready !=	0;
 150}
 151
 152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 153
 154static bool recalc_sigpending_tsk(struct task_struct *t)
 155{
 156	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
 157	    PENDING(&t->pending, &t->blocked) ||
 158	    PENDING(&t->signal->shared_pending, &t->blocked) ||
 159	    cgroup_task_frozen(t)) {
 160		set_tsk_thread_flag(t, TIF_SIGPENDING);
 161		return true;
 162	}
 163
 164	/*
 165	 * We must never clear the flag in another thread, or in current
 166	 * when it's possible the current syscall is returning -ERESTART*.
 167	 * So we don't clear it here, and only callers who know they should do.
 168	 */
 169	return false;
 170}
 171
 172/*
 173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 174 * This is superfluous when called on current, the wakeup is a harmless no-op.
 175 */
 176void recalc_sigpending_and_wake(struct task_struct *t)
 177{
 178	if (recalc_sigpending_tsk(t))
 179		signal_wake_up(t, 0);
 180}
 181
 182void recalc_sigpending(void)
 183{
 184	if (!recalc_sigpending_tsk(current) && !freezing(current) &&
 185	    !klp_patch_pending(current))
 186		clear_thread_flag(TIF_SIGPENDING);
 187
 188}
 189EXPORT_SYMBOL(recalc_sigpending);
 190
 191void calculate_sigpending(void)
 192{
 193	/* Have any signals or users of TIF_SIGPENDING been delayed
 194	 * until after fork?
 195	 */
 196	spin_lock_irq(&current->sighand->siglock);
 197	set_tsk_thread_flag(current, TIF_SIGPENDING);
 198	recalc_sigpending();
 199	spin_unlock_irq(&current->sighand->siglock);
 200}
 201
 202/* Given the mask, find the first available signal that should be serviced. */
 203
 204#define SYNCHRONOUS_MASK \
 205	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 206	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 207
 208int next_signal(struct sigpending *pending, sigset_t *mask)
 209{
 210	unsigned long i, *s, *m, x;
 211	int sig = 0;
 212
 213	s = pending->signal.sig;
 214	m = mask->sig;
 215
 216	/*
 217	 * Handle the first word specially: it contains the
 218	 * synchronous signals that need to be dequeued first.
 219	 */
 220	x = *s &~ *m;
 221	if (x) {
 222		if (x & SYNCHRONOUS_MASK)
 223			x &= SYNCHRONOUS_MASK;
 224		sig = ffz(~x) + 1;
 225		return sig;
 226	}
 227
 228	switch (_NSIG_WORDS) {
 229	default:
 230		for (i = 1; i < _NSIG_WORDS; ++i) {
 231			x = *++s &~ *++m;
 232			if (!x)
 233				continue;
 234			sig = ffz(~x) + i*_NSIG_BPW + 1;
 235			break;
 236		}
 237		break;
 238
 239	case 2:
 240		x = s[1] &~ m[1];
 241		if (!x)
 242			break;
 243		sig = ffz(~x) + _NSIG_BPW + 1;
 244		break;
 245
 246	case 1:
 247		/* Nothing to do */
 248		break;
 249	}
 250
 251	return sig;
 252}
 253
 254static inline void print_dropped_signal(int sig)
 255{
 256	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 257
 258	if (!print_fatal_signals)
 259		return;
 260
 261	if (!__ratelimit(&ratelimit_state))
 262		return;
 263
 264	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 265				current->comm, current->pid, sig);
 266}
 267
 268/**
 269 * task_set_jobctl_pending - set jobctl pending bits
 270 * @task: target task
 271 * @mask: pending bits to set
 272 *
 273 * Clear @mask from @task->jobctl.  @mask must be subset of
 274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 275 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 276 * cleared.  If @task is already being killed or exiting, this function
 277 * becomes noop.
 278 *
 279 * CONTEXT:
 280 * Must be called with @task->sighand->siglock held.
 281 *
 282 * RETURNS:
 283 * %true if @mask is set, %false if made noop because @task was dying.
 284 */
 285bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 286{
 287	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 288			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 289	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 290
 291	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 292		return false;
 293
 294	if (mask & JOBCTL_STOP_SIGMASK)
 295		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 296
 297	task->jobctl |= mask;
 298	return true;
 299}
 300
 301/**
 302 * task_clear_jobctl_trapping - clear jobctl trapping bit
 303 * @task: target task
 304 *
 305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 306 * Clear it and wake up the ptracer.  Note that we don't need any further
 307 * locking.  @task->siglock guarantees that @task->parent points to the
 308 * ptracer.
 309 *
 310 * CONTEXT:
 311 * Must be called with @task->sighand->siglock held.
 312 */
 313void task_clear_jobctl_trapping(struct task_struct *task)
 314{
 315	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 316		task->jobctl &= ~JOBCTL_TRAPPING;
 317		smp_mb();	/* advised by wake_up_bit() */
 318		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 319	}
 320}
 321
 322/**
 323 * task_clear_jobctl_pending - clear jobctl pending bits
 324 * @task: target task
 325 * @mask: pending bits to clear
 326 *
 327 * Clear @mask from @task->jobctl.  @mask must be subset of
 328 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 329 * STOP bits are cleared together.
 330 *
 331 * If clearing of @mask leaves no stop or trap pending, this function calls
 332 * task_clear_jobctl_trapping().
 333 *
 334 * CONTEXT:
 335 * Must be called with @task->sighand->siglock held.
 336 */
 337void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 338{
 339	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 340
 341	if (mask & JOBCTL_STOP_PENDING)
 342		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 343
 344	task->jobctl &= ~mask;
 345
 346	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 347		task_clear_jobctl_trapping(task);
 348}
 349
 350/**
 351 * task_participate_group_stop - participate in a group stop
 352 * @task: task participating in a group stop
 353 *
 354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 355 * Group stop states are cleared and the group stop count is consumed if
 356 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 357 * stop, the appropriate `SIGNAL_*` flags are set.
 358 *
 359 * CONTEXT:
 360 * Must be called with @task->sighand->siglock held.
 361 *
 362 * RETURNS:
 363 * %true if group stop completion should be notified to the parent, %false
 364 * otherwise.
 365 */
 366static bool task_participate_group_stop(struct task_struct *task)
 367{
 368	struct signal_struct *sig = task->signal;
 369	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 370
 371	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 372
 373	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 374
 375	if (!consume)
 376		return false;
 377
 378	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 379		sig->group_stop_count--;
 380
 381	/*
 382	 * Tell the caller to notify completion iff we are entering into a
 383	 * fresh group stop.  Read comment in do_signal_stop() for details.
 384	 */
 385	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 386		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 387		return true;
 388	}
 389	return false;
 390}
 391
 392void task_join_group_stop(struct task_struct *task)
 393{
 
 
 
 
 
 
 
 
 
 394	/* Have the new thread join an on-going signal group stop */
 395	unsigned long jobctl = current->jobctl;
 396	if (jobctl & JOBCTL_STOP_PENDING) {
 397		struct signal_struct *sig = current->signal;
 398		unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
 399		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
 400		if (task_set_jobctl_pending(task, signr | gstop)) {
 401			sig->group_stop_count++;
 402		}
 403	}
 404}
 405
 406/*
 407 * allocate a new signal queue record
 408 * - this may be called without locks if and only if t == current, otherwise an
 409 *   appropriate lock must be held to stop the target task from exiting
 410 */
 411static struct sigqueue *
 412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
 
 413{
 414	struct sigqueue *q = NULL;
 415	struct user_struct *user;
 
 416
 417	/*
 418	 * Protect access to @t credentials. This can go away when all
 419	 * callers hold rcu read lock.
 
 
 
 
 420	 */
 421	rcu_read_lock();
 422	user = get_uid(__task_cred(t)->user);
 423	atomic_inc(&user->sigpending);
 424	rcu_read_unlock();
 
 
 425
 426	if (override_rlimit ||
 427	    atomic_read(&user->sigpending) <=
 428			task_rlimit(t, RLIMIT_SIGPENDING)) {
 429		q = kmem_cache_alloc(sigqueue_cachep, flags);
 430	} else {
 431		print_dropped_signal(sig);
 432	}
 433
 434	if (unlikely(q == NULL)) {
 435		atomic_dec(&user->sigpending);
 436		free_uid(user);
 437	} else {
 438		INIT_LIST_HEAD(&q->list);
 439		q->flags = 0;
 440		q->user = user;
 441	}
 442
 443	return q;
 444}
 445
 446static void __sigqueue_free(struct sigqueue *q)
 447{
 448	if (q->flags & SIGQUEUE_PREALLOC)
 449		return;
 450	atomic_dec(&q->user->sigpending);
 451	free_uid(q->user);
 
 
 452	kmem_cache_free(sigqueue_cachep, q);
 453}
 454
 455void flush_sigqueue(struct sigpending *queue)
 456{
 457	struct sigqueue *q;
 458
 459	sigemptyset(&queue->signal);
 460	while (!list_empty(&queue->list)) {
 461		q = list_entry(queue->list.next, struct sigqueue , list);
 462		list_del_init(&q->list);
 463		__sigqueue_free(q);
 464	}
 465}
 466
 467/*
 468 * Flush all pending signals for this kthread.
 469 */
 470void flush_signals(struct task_struct *t)
 471{
 472	unsigned long flags;
 473
 474	spin_lock_irqsave(&t->sighand->siglock, flags);
 475	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 476	flush_sigqueue(&t->pending);
 477	flush_sigqueue(&t->signal->shared_pending);
 478	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 479}
 480EXPORT_SYMBOL(flush_signals);
 481
 482#ifdef CONFIG_POSIX_TIMERS
 483static void __flush_itimer_signals(struct sigpending *pending)
 484{
 485	sigset_t signal, retain;
 486	struct sigqueue *q, *n;
 487
 488	signal = pending->signal;
 489	sigemptyset(&retain);
 490
 491	list_for_each_entry_safe(q, n, &pending->list, list) {
 492		int sig = q->info.si_signo;
 493
 494		if (likely(q->info.si_code != SI_TIMER)) {
 495			sigaddset(&retain, sig);
 496		} else {
 497			sigdelset(&signal, sig);
 498			list_del_init(&q->list);
 499			__sigqueue_free(q);
 500		}
 501	}
 502
 503	sigorsets(&pending->signal, &signal, &retain);
 504}
 505
 506void flush_itimer_signals(void)
 507{
 508	struct task_struct *tsk = current;
 509	unsigned long flags;
 510
 511	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 512	__flush_itimer_signals(&tsk->pending);
 513	__flush_itimer_signals(&tsk->signal->shared_pending);
 514	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 515}
 516#endif
 517
 518void ignore_signals(struct task_struct *t)
 519{
 520	int i;
 521
 522	for (i = 0; i < _NSIG; ++i)
 523		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 524
 525	flush_signals(t);
 526}
 527
 528/*
 529 * Flush all handlers for a task.
 530 */
 531
 532void
 533flush_signal_handlers(struct task_struct *t, int force_default)
 534{
 535	int i;
 536	struct k_sigaction *ka = &t->sighand->action[0];
 537	for (i = _NSIG ; i != 0 ; i--) {
 538		if (force_default || ka->sa.sa_handler != SIG_IGN)
 539			ka->sa.sa_handler = SIG_DFL;
 540		ka->sa.sa_flags = 0;
 541#ifdef __ARCH_HAS_SA_RESTORER
 542		ka->sa.sa_restorer = NULL;
 543#endif
 544		sigemptyset(&ka->sa.sa_mask);
 545		ka++;
 546	}
 547}
 548
 549bool unhandled_signal(struct task_struct *tsk, int sig)
 550{
 551	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 552	if (is_global_init(tsk))
 553		return true;
 554
 555	if (handler != SIG_IGN && handler != SIG_DFL)
 556		return false;
 557
 
 
 
 
 558	/* if ptraced, let the tracer determine */
 559	return !tsk->ptrace;
 560}
 561
 562static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
 563			   bool *resched_timer)
 564{
 565	struct sigqueue *q, *first = NULL;
 566
 567	/*
 568	 * Collect the siginfo appropriate to this signal.  Check if
 569	 * there is another siginfo for the same signal.
 570	*/
 571	list_for_each_entry(q, &list->list, list) {
 572		if (q->info.si_signo == sig) {
 573			if (first)
 574				goto still_pending;
 575			first = q;
 576		}
 577	}
 578
 579	sigdelset(&list->signal, sig);
 580
 581	if (first) {
 582still_pending:
 583		list_del_init(&first->list);
 584		copy_siginfo(info, &first->info);
 585
 586		*resched_timer =
 587			(first->flags & SIGQUEUE_PREALLOC) &&
 588			(info->si_code == SI_TIMER) &&
 589			(info->si_sys_private);
 590
 591		__sigqueue_free(first);
 592	} else {
 593		/*
 594		 * Ok, it wasn't in the queue.  This must be
 595		 * a fast-pathed signal or we must have been
 596		 * out of queue space.  So zero out the info.
 597		 */
 598		clear_siginfo(info);
 599		info->si_signo = sig;
 600		info->si_errno = 0;
 601		info->si_code = SI_USER;
 602		info->si_pid = 0;
 603		info->si_uid = 0;
 604	}
 605}
 606
 607static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 608			kernel_siginfo_t *info, bool *resched_timer)
 609{
 610	int sig = next_signal(pending, mask);
 611
 612	if (sig)
 613		collect_signal(sig, pending, info, resched_timer);
 614	return sig;
 615}
 616
 617/*
 618 * Dequeue a signal and return the element to the caller, which is
 619 * expected to free it.
 620 *
 621 * All callers have to hold the siglock.
 622 */
 623int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
 
 624{
 625	bool resched_timer = false;
 626	int signr;
 627
 628	/* We only dequeue private signals from ourselves, we don't let
 629	 * signalfd steal them
 630	 */
 
 631	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 632	if (!signr) {
 
 633		signr = __dequeue_signal(&tsk->signal->shared_pending,
 634					 mask, info, &resched_timer);
 635#ifdef CONFIG_POSIX_TIMERS
 636		/*
 637		 * itimer signal ?
 638		 *
 639		 * itimers are process shared and we restart periodic
 640		 * itimers in the signal delivery path to prevent DoS
 641		 * attacks in the high resolution timer case. This is
 642		 * compliant with the old way of self-restarting
 643		 * itimers, as the SIGALRM is a legacy signal and only
 644		 * queued once. Changing the restart behaviour to
 645		 * restart the timer in the signal dequeue path is
 646		 * reducing the timer noise on heavy loaded !highres
 647		 * systems too.
 648		 */
 649		if (unlikely(signr == SIGALRM)) {
 650			struct hrtimer *tmr = &tsk->signal->real_timer;
 651
 652			if (!hrtimer_is_queued(tmr) &&
 653			    tsk->signal->it_real_incr != 0) {
 654				hrtimer_forward(tmr, tmr->base->get_time(),
 655						tsk->signal->it_real_incr);
 656				hrtimer_restart(tmr);
 657			}
 658		}
 659#endif
 660	}
 661
 662	recalc_sigpending();
 663	if (!signr)
 664		return 0;
 665
 666	if (unlikely(sig_kernel_stop(signr))) {
 667		/*
 668		 * Set a marker that we have dequeued a stop signal.  Our
 669		 * caller might release the siglock and then the pending
 670		 * stop signal it is about to process is no longer in the
 671		 * pending bitmasks, but must still be cleared by a SIGCONT
 672		 * (and overruled by a SIGKILL).  So those cases clear this
 673		 * shared flag after we've set it.  Note that this flag may
 674		 * remain set after the signal we return is ignored or
 675		 * handled.  That doesn't matter because its only purpose
 676		 * is to alert stop-signal processing code when another
 677		 * processor has come along and cleared the flag.
 678		 */
 679		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 680	}
 681#ifdef CONFIG_POSIX_TIMERS
 682	if (resched_timer) {
 683		/*
 684		 * Release the siglock to ensure proper locking order
 685		 * of timer locks outside of siglocks.  Note, we leave
 686		 * irqs disabled here, since the posix-timers code is
 687		 * about to disable them again anyway.
 688		 */
 689		spin_unlock(&tsk->sighand->siglock);
 690		posixtimer_rearm(info);
 691		spin_lock(&tsk->sighand->siglock);
 692
 693		/* Don't expose the si_sys_private value to userspace */
 694		info->si_sys_private = 0;
 695	}
 696#endif
 697	return signr;
 698}
 699EXPORT_SYMBOL_GPL(dequeue_signal);
 700
 701static int dequeue_synchronous_signal(kernel_siginfo_t *info)
 702{
 703	struct task_struct *tsk = current;
 704	struct sigpending *pending = &tsk->pending;
 705	struct sigqueue *q, *sync = NULL;
 706
 707	/*
 708	 * Might a synchronous signal be in the queue?
 709	 */
 710	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
 711		return 0;
 712
 713	/*
 714	 * Return the first synchronous signal in the queue.
 715	 */
 716	list_for_each_entry(q, &pending->list, list) {
 717		/* Synchronous signals have a postive si_code */
 718		if ((q->info.si_code > SI_USER) &&
 719		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
 720			sync = q;
 721			goto next;
 722		}
 723	}
 724	return 0;
 725next:
 726	/*
 727	 * Check if there is another siginfo for the same signal.
 728	 */
 729	list_for_each_entry_continue(q, &pending->list, list) {
 730		if (q->info.si_signo == sync->info.si_signo)
 731			goto still_pending;
 732	}
 733
 734	sigdelset(&pending->signal, sync->info.si_signo);
 735	recalc_sigpending();
 736still_pending:
 737	list_del_init(&sync->list);
 738	copy_siginfo(info, &sync->info);
 739	__sigqueue_free(sync);
 740	return info->si_signo;
 741}
 742
 743/*
 744 * Tell a process that it has a new active signal..
 745 *
 746 * NOTE! we rely on the previous spin_lock to
 747 * lock interrupts for us! We can only be called with
 748 * "siglock" held, and the local interrupt must
 749 * have been disabled when that got acquired!
 750 *
 751 * No need to set need_resched since signal event passing
 752 * goes through ->blocked
 753 */
 754void signal_wake_up_state(struct task_struct *t, unsigned int state)
 755{
 
 
 756	set_tsk_thread_flag(t, TIF_SIGPENDING);
 
 757	/*
 758	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 759	 * case. We don't check t->state here because there is a race with it
 760	 * executing another processor and just now entering stopped state.
 761	 * By using wake_up_state, we ensure the process will wake up and
 762	 * handle its death signal.
 763	 */
 764	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 765		kick_process(t);
 766}
 767
 768/*
 769 * Remove signals in mask from the pending set and queue.
 770 * Returns 1 if any signals were found.
 771 *
 772 * All callers must be holding the siglock.
 773 */
 774static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 775{
 776	struct sigqueue *q, *n;
 777	sigset_t m;
 778
 779	sigandsets(&m, mask, &s->signal);
 780	if (sigisemptyset(&m))
 781		return;
 782
 783	sigandnsets(&s->signal, &s->signal, mask);
 784	list_for_each_entry_safe(q, n, &s->list, list) {
 785		if (sigismember(mask, q->info.si_signo)) {
 786			list_del_init(&q->list);
 787			__sigqueue_free(q);
 788		}
 789	}
 790}
 791
 792static inline int is_si_special(const struct kernel_siginfo *info)
 793{
 794	return info <= SEND_SIG_PRIV;
 795}
 796
 797static inline bool si_fromuser(const struct kernel_siginfo *info)
 798{
 799	return info == SEND_SIG_NOINFO ||
 800		(!is_si_special(info) && SI_FROMUSER(info));
 801}
 802
 803/*
 804 * called with RCU read lock from check_kill_permission()
 805 */
 806static bool kill_ok_by_cred(struct task_struct *t)
 807{
 808	const struct cred *cred = current_cred();
 809	const struct cred *tcred = __task_cred(t);
 810
 811	return uid_eq(cred->euid, tcred->suid) ||
 812	       uid_eq(cred->euid, tcred->uid) ||
 813	       uid_eq(cred->uid, tcred->suid) ||
 814	       uid_eq(cred->uid, tcred->uid) ||
 815	       ns_capable(tcred->user_ns, CAP_KILL);
 816}
 817
 818/*
 819 * Bad permissions for sending the signal
 820 * - the caller must hold the RCU read lock
 821 */
 822static int check_kill_permission(int sig, struct kernel_siginfo *info,
 823				 struct task_struct *t)
 824{
 825	struct pid *sid;
 826	int error;
 827
 828	if (!valid_signal(sig))
 829		return -EINVAL;
 830
 831	if (!si_fromuser(info))
 832		return 0;
 833
 834	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 835	if (error)
 836		return error;
 837
 838	if (!same_thread_group(current, t) &&
 839	    !kill_ok_by_cred(t)) {
 840		switch (sig) {
 841		case SIGCONT:
 842			sid = task_session(t);
 843			/*
 844			 * We don't return the error if sid == NULL. The
 845			 * task was unhashed, the caller must notice this.
 846			 */
 847			if (!sid || sid == task_session(current))
 848				break;
 849			/* fall through */
 850		default:
 851			return -EPERM;
 852		}
 853	}
 854
 855	return security_task_kill(t, info, sig, NULL);
 856}
 857
 858/**
 859 * ptrace_trap_notify - schedule trap to notify ptracer
 860 * @t: tracee wanting to notify tracer
 861 *
 862 * This function schedules sticky ptrace trap which is cleared on the next
 863 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 864 * ptracer.
 865 *
 866 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 867 * ptracer is listening for events, tracee is woken up so that it can
 868 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 869 * eventually taken without returning to userland after the existing traps
 870 * are finished by PTRACE_CONT.
 871 *
 872 * CONTEXT:
 873 * Must be called with @task->sighand->siglock held.
 874 */
 875static void ptrace_trap_notify(struct task_struct *t)
 876{
 877	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 878	assert_spin_locked(&t->sighand->siglock);
 879
 880	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 881	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 882}
 883
 884/*
 885 * Handle magic process-wide effects of stop/continue signals. Unlike
 886 * the signal actions, these happen immediately at signal-generation
 887 * time regardless of blocking, ignoring, or handling.  This does the
 888 * actual continuing for SIGCONT, but not the actual stopping for stop
 889 * signals. The process stop is done as a signal action for SIG_DFL.
 890 *
 891 * Returns true if the signal should be actually delivered, otherwise
 892 * it should be dropped.
 893 */
 894static bool prepare_signal(int sig, struct task_struct *p, bool force)
 895{
 896	struct signal_struct *signal = p->signal;
 897	struct task_struct *t;
 898	sigset_t flush;
 899
 900	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
 901		if (!(signal->flags & SIGNAL_GROUP_EXIT))
 902			return sig == SIGKILL;
 903		/*
 904		 * The process is in the middle of dying, nothing to do.
 905		 */
 
 906	} else if (sig_kernel_stop(sig)) {
 907		/*
 908		 * This is a stop signal.  Remove SIGCONT from all queues.
 909		 */
 910		siginitset(&flush, sigmask(SIGCONT));
 911		flush_sigqueue_mask(&flush, &signal->shared_pending);
 912		for_each_thread(p, t)
 913			flush_sigqueue_mask(&flush, &t->pending);
 914	} else if (sig == SIGCONT) {
 915		unsigned int why;
 916		/*
 917		 * Remove all stop signals from all queues, wake all threads.
 918		 */
 919		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 920		flush_sigqueue_mask(&flush, &signal->shared_pending);
 921		for_each_thread(p, t) {
 922			flush_sigqueue_mask(&flush, &t->pending);
 923			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 924			if (likely(!(t->ptrace & PT_SEIZED)))
 
 925				wake_up_state(t, __TASK_STOPPED);
 926			else
 927				ptrace_trap_notify(t);
 928		}
 929
 930		/*
 931		 * Notify the parent with CLD_CONTINUED if we were stopped.
 932		 *
 933		 * If we were in the middle of a group stop, we pretend it
 934		 * was already finished, and then continued. Since SIGCHLD
 935		 * doesn't queue we report only CLD_STOPPED, as if the next
 936		 * CLD_CONTINUED was dropped.
 937		 */
 938		why = 0;
 939		if (signal->flags & SIGNAL_STOP_STOPPED)
 940			why |= SIGNAL_CLD_CONTINUED;
 941		else if (signal->group_stop_count)
 942			why |= SIGNAL_CLD_STOPPED;
 943
 944		if (why) {
 945			/*
 946			 * The first thread which returns from do_signal_stop()
 947			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 948			 * notify its parent. See get_signal().
 949			 */
 950			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 951			signal->group_stop_count = 0;
 952			signal->group_exit_code = 0;
 953		}
 954	}
 955
 956	return !sig_ignored(p, sig, force);
 957}
 958
 959/*
 960 * Test if P wants to take SIG.  After we've checked all threads with this,
 961 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 962 * blocking SIG were ruled out because they are not running and already
 963 * have pending signals.  Such threads will dequeue from the shared queue
 964 * as soon as they're available, so putting the signal on the shared queue
 965 * will be equivalent to sending it to one such thread.
 966 */
 967static inline bool wants_signal(int sig, struct task_struct *p)
 968{
 969	if (sigismember(&p->blocked, sig))
 970		return false;
 971
 972	if (p->flags & PF_EXITING)
 973		return false;
 974
 975	if (sig == SIGKILL)
 976		return true;
 977
 978	if (task_is_stopped_or_traced(p))
 979		return false;
 980
 981	return task_curr(p) || !signal_pending(p);
 982}
 983
 984static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 985{
 986	struct signal_struct *signal = p->signal;
 987	struct task_struct *t;
 988
 989	/*
 990	 * Now find a thread we can wake up to take the signal off the queue.
 991	 *
 992	 * If the main thread wants the signal, it gets first crack.
 993	 * Probably the least surprising to the average bear.
 994	 */
 995	if (wants_signal(sig, p))
 996		t = p;
 997	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
 998		/*
 999		 * There is just one thread and it does not need to be woken.
1000		 * It will dequeue unblocked signals before it runs again.
1001		 */
1002		return;
1003	else {
1004		/*
1005		 * Otherwise try to find a suitable thread.
1006		 */
1007		t = signal->curr_target;
1008		while (!wants_signal(sig, t)) {
1009			t = next_thread(t);
1010			if (t == signal->curr_target)
1011				/*
1012				 * No thread needs to be woken.
1013				 * Any eligible threads will see
1014				 * the signal in the queue soon.
1015				 */
1016				return;
1017		}
1018		signal->curr_target = t;
1019	}
1020
1021	/*
1022	 * Found a killable thread.  If the signal will be fatal,
1023	 * then start taking the whole group down immediately.
1024	 */
1025	if (sig_fatal(p, sig) &&
1026	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1027	    !sigismember(&t->real_blocked, sig) &&
1028	    (sig == SIGKILL || !p->ptrace)) {
1029		/*
1030		 * This signal will be fatal to the whole group.
1031		 */
1032		if (!sig_kernel_coredump(sig)) {
1033			/*
1034			 * Start a group exit and wake everybody up.
1035			 * This way we don't have other threads
1036			 * running and doing things after a slower
1037			 * thread has the fatal signal pending.
1038			 */
1039			signal->flags = SIGNAL_GROUP_EXIT;
1040			signal->group_exit_code = sig;
1041			signal->group_stop_count = 0;
1042			t = p;
1043			do {
1044				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1045				sigaddset(&t->pending.signal, SIGKILL);
1046				signal_wake_up(t, 1);
1047			} while_each_thread(p, t);
1048			return;
1049		}
1050	}
1051
1052	/*
1053	 * The signal is already in the shared-pending queue.
1054	 * Tell the chosen thread to wake up and dequeue it.
1055	 */
1056	signal_wake_up(t, sig == SIGKILL);
1057	return;
1058}
1059
1060static inline bool legacy_queue(struct sigpending *signals, int sig)
1061{
1062	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1063}
1064
1065static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1066			enum pid_type type, bool force)
1067{
1068	struct sigpending *pending;
1069	struct sigqueue *q;
1070	int override_rlimit;
1071	int ret = 0, result;
1072
1073	assert_spin_locked(&t->sighand->siglock);
1074
1075	result = TRACE_SIGNAL_IGNORED;
1076	if (!prepare_signal(sig, t, force))
1077		goto ret;
1078
1079	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1080	/*
1081	 * Short-circuit ignored signals and support queuing
1082	 * exactly one non-rt signal, so that we can get more
1083	 * detailed information about the cause of the signal.
1084	 */
1085	result = TRACE_SIGNAL_ALREADY_PENDING;
1086	if (legacy_queue(pending, sig))
1087		goto ret;
1088
1089	result = TRACE_SIGNAL_DELIVERED;
1090	/*
1091	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1092	 */
1093	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1094		goto out_set;
1095
1096	/*
1097	 * Real-time signals must be queued if sent by sigqueue, or
1098	 * some other real-time mechanism.  It is implementation
1099	 * defined whether kill() does so.  We attempt to do so, on
1100	 * the principle of least surprise, but since kill is not
1101	 * allowed to fail with EAGAIN when low on memory we just
1102	 * make sure at least one signal gets delivered and don't
1103	 * pass on the info struct.
1104	 */
1105	if (sig < SIGRTMIN)
1106		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1107	else
1108		override_rlimit = 0;
1109
1110	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
 
1111	if (q) {
1112		list_add_tail(&q->list, &pending->list);
1113		switch ((unsigned long) info) {
1114		case (unsigned long) SEND_SIG_NOINFO:
1115			clear_siginfo(&q->info);
1116			q->info.si_signo = sig;
1117			q->info.si_errno = 0;
1118			q->info.si_code = SI_USER;
1119			q->info.si_pid = task_tgid_nr_ns(current,
1120							task_active_pid_ns(t));
1121			rcu_read_lock();
1122			q->info.si_uid =
1123				from_kuid_munged(task_cred_xxx(t, user_ns),
1124						 current_uid());
1125			rcu_read_unlock();
1126			break;
1127		case (unsigned long) SEND_SIG_PRIV:
1128			clear_siginfo(&q->info);
1129			q->info.si_signo = sig;
1130			q->info.si_errno = 0;
1131			q->info.si_code = SI_KERNEL;
1132			q->info.si_pid = 0;
1133			q->info.si_uid = 0;
1134			break;
1135		default:
1136			copy_siginfo(&q->info, info);
1137			break;
1138		}
1139	} else if (!is_si_special(info) &&
1140		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1141		/*
1142		 * Queue overflow, abort.  We may abort if the
1143		 * signal was rt and sent by user using something
1144		 * other than kill().
1145		 */
1146		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1147		ret = -EAGAIN;
1148		goto ret;
1149	} else {
1150		/*
1151		 * This is a silent loss of information.  We still
1152		 * send the signal, but the *info bits are lost.
1153		 */
1154		result = TRACE_SIGNAL_LOSE_INFO;
1155	}
1156
1157out_set:
1158	signalfd_notify(t, sig);
1159	sigaddset(&pending->signal, sig);
1160
1161	/* Let multiprocess signals appear after on-going forks */
1162	if (type > PIDTYPE_TGID) {
1163		struct multiprocess_signals *delayed;
1164		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1165			sigset_t *signal = &delayed->signal;
1166			/* Can't queue both a stop and a continue signal */
1167			if (sig == SIGCONT)
1168				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1169			else if (sig_kernel_stop(sig))
1170				sigdelset(signal, SIGCONT);
1171			sigaddset(signal, sig);
1172		}
1173	}
1174
1175	complete_signal(sig, t, type);
1176ret:
1177	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1178	return ret;
1179}
1180
1181static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1182{
1183	bool ret = false;
1184	switch (siginfo_layout(info->si_signo, info->si_code)) {
1185	case SIL_KILL:
1186	case SIL_CHLD:
1187	case SIL_RT:
1188		ret = true;
1189		break;
1190	case SIL_TIMER:
1191	case SIL_POLL:
1192	case SIL_FAULT:
 
1193	case SIL_FAULT_MCEERR:
1194	case SIL_FAULT_BNDERR:
1195	case SIL_FAULT_PKUERR:
 
1196	case SIL_SYS:
1197		ret = false;
1198		break;
1199	}
1200	return ret;
1201}
1202
1203static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1204			enum pid_type type)
1205{
1206	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1207	bool force = false;
1208
1209	if (info == SEND_SIG_NOINFO) {
1210		/* Force if sent from an ancestor pid namespace */
1211		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1212	} else if (info == SEND_SIG_PRIV) {
1213		/* Don't ignore kernel generated signals */
1214		force = true;
1215	} else if (has_si_pid_and_uid(info)) {
1216		/* SIGKILL and SIGSTOP is special or has ids */
1217		struct user_namespace *t_user_ns;
1218
1219		rcu_read_lock();
1220		t_user_ns = task_cred_xxx(t, user_ns);
1221		if (current_user_ns() != t_user_ns) {
1222			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1223			info->si_uid = from_kuid_munged(t_user_ns, uid);
1224		}
1225		rcu_read_unlock();
1226
1227		/* A kernel generated signal? */
1228		force = (info->si_code == SI_KERNEL);
1229
1230		/* From an ancestor pid namespace? */
1231		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1232			info->si_pid = 0;
1233			force = true;
1234		}
1235	}
1236	return __send_signal(sig, info, t, type, force);
1237}
1238
1239static void print_fatal_signal(int signr)
1240{
1241	struct pt_regs *regs = signal_pt_regs();
1242	pr_info("potentially unexpected fatal signal %d.\n", signr);
 
 
 
 
 
 
 
 
 
 
1243
1244#if defined(__i386__) && !defined(__arch_um__)
1245	pr_info("code at %08lx: ", regs->ip);
1246	{
1247		int i;
1248		for (i = 0; i < 16; i++) {
1249			unsigned char insn;
1250
1251			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1252				break;
1253			pr_cont("%02x ", insn);
1254		}
1255	}
1256	pr_cont("\n");
1257#endif
1258	preempt_disable();
1259	show_regs(regs);
1260	preempt_enable();
1261}
1262
1263static int __init setup_print_fatal_signals(char *str)
1264{
1265	get_option (&str, &print_fatal_signals);
1266
1267	return 1;
1268}
1269
1270__setup("print-fatal-signals=", setup_print_fatal_signals);
1271
1272int
1273__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1274{
1275	return send_signal(sig, info, p, PIDTYPE_TGID);
1276}
1277
1278int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1279			enum pid_type type)
1280{
1281	unsigned long flags;
1282	int ret = -ESRCH;
1283
1284	if (lock_task_sighand(p, &flags)) {
1285		ret = send_signal(sig, info, p, type);
1286		unlock_task_sighand(p, &flags);
1287	}
1288
1289	return ret;
1290}
1291
 
 
 
 
 
 
1292/*
1293 * Force a signal that the process can't ignore: if necessary
1294 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1295 *
1296 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1297 * since we do not want to have a signal handler that was blocked
1298 * be invoked when user space had explicitly blocked it.
1299 *
1300 * We don't want to have recursive SIGSEGV's etc, for example,
1301 * that is why we also clear SIGNAL_UNKILLABLE.
1302 */
1303static int
1304force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
 
1305{
1306	unsigned long int flags;
1307	int ret, blocked, ignored;
1308	struct k_sigaction *action;
1309	int sig = info->si_signo;
1310
1311	spin_lock_irqsave(&t->sighand->siglock, flags);
1312	action = &t->sighand->action[sig-1];
1313	ignored = action->sa.sa_handler == SIG_IGN;
1314	blocked = sigismember(&t->blocked, sig);
1315	if (blocked || ignored) {
1316		action->sa.sa_handler = SIG_DFL;
1317		if (blocked) {
 
 
1318			sigdelset(&t->blocked, sig);
1319			recalc_sigpending_and_wake(t);
1320		}
1321	}
1322	/*
1323	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1324	 * debugging to leave init killable.
1325	 */
1326	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
 
1327		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1328	ret = send_signal(sig, info, t, PIDTYPE_PID);
 
 
 
1329	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1330
1331	return ret;
1332}
1333
1334int force_sig_info(struct kernel_siginfo *info)
1335{
1336	return force_sig_info_to_task(info, current);
1337}
1338
1339/*
1340 * Nuke all other threads in the group.
1341 */
1342int zap_other_threads(struct task_struct *p)
1343{
1344	struct task_struct *t = p;
1345	int count = 0;
1346
1347	p->signal->group_stop_count = 0;
1348
1349	while_each_thread(p, t) {
1350		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1351		count++;
 
 
1352
1353		/* Don't bother with already dead threads */
1354		if (t->exit_state)
1355			continue;
1356		sigaddset(&t->pending.signal, SIGKILL);
1357		signal_wake_up(t, 1);
1358	}
1359
1360	return count;
1361}
1362
1363struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1364					   unsigned long *flags)
1365{
1366	struct sighand_struct *sighand;
1367
1368	rcu_read_lock();
1369	for (;;) {
1370		sighand = rcu_dereference(tsk->sighand);
1371		if (unlikely(sighand == NULL))
1372			break;
1373
1374		/*
1375		 * This sighand can be already freed and even reused, but
1376		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1377		 * initializes ->siglock: this slab can't go away, it has
1378		 * the same object type, ->siglock can't be reinitialized.
1379		 *
1380		 * We need to ensure that tsk->sighand is still the same
1381		 * after we take the lock, we can race with de_thread() or
1382		 * __exit_signal(). In the latter case the next iteration
1383		 * must see ->sighand == NULL.
1384		 */
1385		spin_lock_irqsave(&sighand->siglock, *flags);
1386		if (likely(sighand == tsk->sighand))
1387			break;
1388		spin_unlock_irqrestore(&sighand->siglock, *flags);
1389	}
1390	rcu_read_unlock();
1391
1392	return sighand;
1393}
1394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1395/*
1396 * send signal info to all the members of a group
1397 */
1398int group_send_sig_info(int sig, struct kernel_siginfo *info,
1399			struct task_struct *p, enum pid_type type)
1400{
1401	int ret;
1402
1403	rcu_read_lock();
1404	ret = check_kill_permission(sig, info, p);
1405	rcu_read_unlock();
1406
1407	if (!ret && sig)
1408		ret = do_send_sig_info(sig, info, p, type);
1409
1410	return ret;
1411}
1412
1413/*
1414 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1415 * control characters do (^C, ^Z etc)
1416 * - the caller must hold at least a readlock on tasklist_lock
1417 */
1418int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1419{
1420	struct task_struct *p = NULL;
1421	int retval, success;
1422
1423	success = 0;
1424	retval = -ESRCH;
1425	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1426		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1427		success |= !err;
1428		retval = err;
 
 
 
 
 
 
1429	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1430	return success ? 0 : retval;
 
1431}
1432
1433int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1434{
1435	int error = -ESRCH;
1436	struct task_struct *p;
1437
1438	for (;;) {
1439		rcu_read_lock();
1440		p = pid_task(pid, PIDTYPE_PID);
1441		if (p)
1442			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1443		rcu_read_unlock();
1444		if (likely(!p || error != -ESRCH))
1445			return error;
1446
1447		/*
1448		 * The task was unhashed in between, try again.  If it
1449		 * is dead, pid_task() will return NULL, if we race with
1450		 * de_thread() it will find the new leader.
1451		 */
1452	}
1453}
1454
1455static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1456{
1457	int error;
1458	rcu_read_lock();
1459	error = kill_pid_info(sig, info, find_vpid(pid));
1460	rcu_read_unlock();
1461	return error;
1462}
1463
1464static inline bool kill_as_cred_perm(const struct cred *cred,
1465				     struct task_struct *target)
1466{
1467	const struct cred *pcred = __task_cred(target);
1468
1469	return uid_eq(cred->euid, pcred->suid) ||
1470	       uid_eq(cred->euid, pcred->uid) ||
1471	       uid_eq(cred->uid, pcred->suid) ||
1472	       uid_eq(cred->uid, pcred->uid);
1473}
1474
1475/*
1476 * The usb asyncio usage of siginfo is wrong.  The glibc support
1477 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1478 * AKA after the generic fields:
1479 *	kernel_pid_t	si_pid;
1480 *	kernel_uid32_t	si_uid;
1481 *	sigval_t	si_value;
1482 *
1483 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1484 * after the generic fields is:
1485 *	void __user 	*si_addr;
1486 *
1487 * This is a practical problem when there is a 64bit big endian kernel
1488 * and a 32bit userspace.  As the 32bit address will encoded in the low
1489 * 32bits of the pointer.  Those low 32bits will be stored at higher
1490 * address than appear in a 32 bit pointer.  So userspace will not
1491 * see the address it was expecting for it's completions.
1492 *
1493 * There is nothing in the encoding that can allow
1494 * copy_siginfo_to_user32 to detect this confusion of formats, so
1495 * handle this by requiring the caller of kill_pid_usb_asyncio to
1496 * notice when this situration takes place and to store the 32bit
1497 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1498 * parameter.
1499 */
1500int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1501			 struct pid *pid, const struct cred *cred)
1502{
1503	struct kernel_siginfo info;
1504	struct task_struct *p;
1505	unsigned long flags;
1506	int ret = -EINVAL;
1507
 
 
 
1508	clear_siginfo(&info);
1509	info.si_signo = sig;
1510	info.si_errno = errno;
1511	info.si_code = SI_ASYNCIO;
1512	*((sigval_t *)&info.si_pid) = addr;
1513
1514	if (!valid_signal(sig))
1515		return ret;
1516
1517	rcu_read_lock();
1518	p = pid_task(pid, PIDTYPE_PID);
1519	if (!p) {
1520		ret = -ESRCH;
1521		goto out_unlock;
1522	}
1523	if (!kill_as_cred_perm(cred, p)) {
1524		ret = -EPERM;
1525		goto out_unlock;
1526	}
1527	ret = security_task_kill(p, &info, sig, cred);
1528	if (ret)
1529		goto out_unlock;
1530
1531	if (sig) {
1532		if (lock_task_sighand(p, &flags)) {
1533			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1534			unlock_task_sighand(p, &flags);
1535		} else
1536			ret = -ESRCH;
1537	}
1538out_unlock:
1539	rcu_read_unlock();
1540	return ret;
1541}
1542EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1543
1544/*
1545 * kill_something_info() interprets pid in interesting ways just like kill(2).
1546 *
1547 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1548 * is probably wrong.  Should make it like BSD or SYSV.
1549 */
1550
1551static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1552{
1553	int ret;
1554
1555	if (pid > 0) {
1556		rcu_read_lock();
1557		ret = kill_pid_info(sig, info, find_vpid(pid));
1558		rcu_read_unlock();
1559		return ret;
1560	}
1561
1562	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1563	if (pid == INT_MIN)
1564		return -ESRCH;
1565
1566	read_lock(&tasklist_lock);
1567	if (pid != -1) {
1568		ret = __kill_pgrp_info(sig, info,
1569				pid ? find_vpid(-pid) : task_pgrp(current));
1570	} else {
1571		int retval = 0, count = 0;
1572		struct task_struct * p;
1573
1574		for_each_process(p) {
1575			if (task_pid_vnr(p) > 1 &&
1576					!same_thread_group(p, current)) {
1577				int err = group_send_sig_info(sig, info, p,
1578							      PIDTYPE_MAX);
1579				++count;
1580				if (err != -EPERM)
1581					retval = err;
1582			}
1583		}
1584		ret = count ? retval : -ESRCH;
1585	}
1586	read_unlock(&tasklist_lock);
1587
1588	return ret;
1589}
1590
1591/*
1592 * These are for backward compatibility with the rest of the kernel source.
1593 */
1594
1595int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1596{
1597	/*
1598	 * Make sure legacy kernel users don't send in bad values
1599	 * (normal paths check this in check_kill_permission).
1600	 */
1601	if (!valid_signal(sig))
1602		return -EINVAL;
1603
1604	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1605}
1606EXPORT_SYMBOL(send_sig_info);
1607
1608#define __si_special(priv) \
1609	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1610
1611int
1612send_sig(int sig, struct task_struct *p, int priv)
1613{
1614	return send_sig_info(sig, __si_special(priv), p);
1615}
1616EXPORT_SYMBOL(send_sig);
1617
1618void force_sig(int sig)
1619{
1620	struct kernel_siginfo info;
1621
1622	clear_siginfo(&info);
1623	info.si_signo = sig;
1624	info.si_errno = 0;
1625	info.si_code = SI_KERNEL;
1626	info.si_pid = 0;
1627	info.si_uid = 0;
1628	force_sig_info(&info);
1629}
1630EXPORT_SYMBOL(force_sig);
1631
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1632/*
1633 * When things go south during signal handling, we
1634 * will force a SIGSEGV. And if the signal that caused
1635 * the problem was already a SIGSEGV, we'll want to
1636 * make sure we don't even try to deliver the signal..
1637 */
1638void force_sigsegv(int sig)
1639{
1640	struct task_struct *p = current;
1641
1642	if (sig == SIGSEGV) {
1643		unsigned long flags;
1644		spin_lock_irqsave(&p->sighand->siglock, flags);
1645		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1646		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1647	}
1648	force_sig(SIGSEGV);
1649}
1650
1651int force_sig_fault_to_task(int sig, int code, void __user *addr
1652	___ARCH_SI_TRAPNO(int trapno)
1653	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1654	, struct task_struct *t)
1655{
1656	struct kernel_siginfo info;
1657
1658	clear_siginfo(&info);
1659	info.si_signo = sig;
1660	info.si_errno = 0;
1661	info.si_code  = code;
1662	info.si_addr  = addr;
1663#ifdef __ARCH_SI_TRAPNO
1664	info.si_trapno = trapno;
1665#endif
1666#ifdef __ia64__
1667	info.si_imm = imm;
1668	info.si_flags = flags;
1669	info.si_isr = isr;
1670#endif
1671	return force_sig_info_to_task(&info, t);
1672}
1673
1674int force_sig_fault(int sig, int code, void __user *addr
1675	___ARCH_SI_TRAPNO(int trapno)
1676	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1677{
1678	return force_sig_fault_to_task(sig, code, addr
1679				       ___ARCH_SI_TRAPNO(trapno)
1680				       ___ARCH_SI_IA64(imm, flags, isr), current);
1681}
1682
1683int send_sig_fault(int sig, int code, void __user *addr
1684	___ARCH_SI_TRAPNO(int trapno)
1685	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1686	, struct task_struct *t)
1687{
1688	struct kernel_siginfo info;
1689
1690	clear_siginfo(&info);
1691	info.si_signo = sig;
1692	info.si_errno = 0;
1693	info.si_code  = code;
1694	info.si_addr  = addr;
1695#ifdef __ARCH_SI_TRAPNO
1696	info.si_trapno = trapno;
1697#endif
1698#ifdef __ia64__
1699	info.si_imm = imm;
1700	info.si_flags = flags;
1701	info.si_isr = isr;
1702#endif
1703	return send_sig_info(info.si_signo, &info, t);
1704}
1705
1706int force_sig_mceerr(int code, void __user *addr, short lsb)
1707{
1708	struct kernel_siginfo info;
1709
1710	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1711	clear_siginfo(&info);
1712	info.si_signo = SIGBUS;
1713	info.si_errno = 0;
1714	info.si_code = code;
1715	info.si_addr = addr;
1716	info.si_addr_lsb = lsb;
1717	return force_sig_info(&info);
1718}
1719
1720int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1721{
1722	struct kernel_siginfo info;
1723
1724	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1725	clear_siginfo(&info);
1726	info.si_signo = SIGBUS;
1727	info.si_errno = 0;
1728	info.si_code = code;
1729	info.si_addr = addr;
1730	info.si_addr_lsb = lsb;
1731	return send_sig_info(info.si_signo, &info, t);
1732}
1733EXPORT_SYMBOL(send_sig_mceerr);
1734
1735int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1736{
1737	struct kernel_siginfo info;
1738
1739	clear_siginfo(&info);
1740	info.si_signo = SIGSEGV;
1741	info.si_errno = 0;
1742	info.si_code  = SEGV_BNDERR;
1743	info.si_addr  = addr;
1744	info.si_lower = lower;
1745	info.si_upper = upper;
1746	return force_sig_info(&info);
1747}
1748
1749#ifdef SEGV_PKUERR
1750int force_sig_pkuerr(void __user *addr, u32 pkey)
1751{
1752	struct kernel_siginfo info;
1753
1754	clear_siginfo(&info);
1755	info.si_signo = SIGSEGV;
1756	info.si_errno = 0;
1757	info.si_code  = SEGV_PKUERR;
1758	info.si_addr  = addr;
1759	info.si_pkey  = pkey;
1760	return force_sig_info(&info);
1761}
1762#endif
1763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1764/* For the crazy architectures that include trap information in
1765 * the errno field, instead of an actual errno value.
1766 */
1767int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1768{
1769	struct kernel_siginfo info;
1770
1771	clear_siginfo(&info);
1772	info.si_signo = SIGTRAP;
1773	info.si_errno = errno;
1774	info.si_code  = TRAP_HWBKPT;
1775	info.si_addr  = addr;
1776	return force_sig_info(&info);
1777}
1778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1779int kill_pgrp(struct pid *pid, int sig, int priv)
1780{
1781	int ret;
1782
1783	read_lock(&tasklist_lock);
1784	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1785	read_unlock(&tasklist_lock);
1786
1787	return ret;
1788}
1789EXPORT_SYMBOL(kill_pgrp);
1790
1791int kill_pid(struct pid *pid, int sig, int priv)
1792{
1793	return kill_pid_info(sig, __si_special(priv), pid);
1794}
1795EXPORT_SYMBOL(kill_pid);
1796
1797/*
1798 * These functions support sending signals using preallocated sigqueue
1799 * structures.  This is needed "because realtime applications cannot
1800 * afford to lose notifications of asynchronous events, like timer
1801 * expirations or I/O completions".  In the case of POSIX Timers
1802 * we allocate the sigqueue structure from the timer_create.  If this
1803 * allocation fails we are able to report the failure to the application
1804 * with an EAGAIN error.
1805 */
1806struct sigqueue *sigqueue_alloc(void)
1807{
1808	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1809
1810	if (q)
1811		q->flags |= SIGQUEUE_PREALLOC;
1812
1813	return q;
1814}
1815
1816void sigqueue_free(struct sigqueue *q)
1817{
1818	unsigned long flags;
1819	spinlock_t *lock = &current->sighand->siglock;
1820
1821	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1822	/*
1823	 * We must hold ->siglock while testing q->list
1824	 * to serialize with collect_signal() or with
1825	 * __exit_signal()->flush_sigqueue().
1826	 */
1827	spin_lock_irqsave(lock, flags);
1828	q->flags &= ~SIGQUEUE_PREALLOC;
1829	/*
1830	 * If it is queued it will be freed when dequeued,
1831	 * like the "regular" sigqueue.
1832	 */
1833	if (!list_empty(&q->list))
1834		q = NULL;
1835	spin_unlock_irqrestore(lock, flags);
1836
1837	if (q)
1838		__sigqueue_free(q);
1839}
1840
1841int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1842{
1843	int sig = q->info.si_signo;
1844	struct sigpending *pending;
1845	struct task_struct *t;
1846	unsigned long flags;
1847	int ret, result;
1848
1849	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1850
1851	ret = -1;
1852	rcu_read_lock();
 
 
 
 
 
 
 
 
 
 
 
 
1853	t = pid_task(pid, type);
1854	if (!t || !likely(lock_task_sighand(t, &flags)))
 
 
 
 
1855		goto ret;
1856
1857	ret = 1; /* the signal is ignored */
1858	result = TRACE_SIGNAL_IGNORED;
1859	if (!prepare_signal(sig, t, false))
1860		goto out;
1861
1862	ret = 0;
1863	if (unlikely(!list_empty(&q->list))) {
1864		/*
1865		 * If an SI_TIMER entry is already queue just increment
1866		 * the overrun count.
1867		 */
1868		BUG_ON(q->info.si_code != SI_TIMER);
1869		q->info.si_overrun++;
1870		result = TRACE_SIGNAL_ALREADY_PENDING;
1871		goto out;
1872	}
1873	q->info.si_overrun = 0;
1874
1875	signalfd_notify(t, sig);
1876	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1877	list_add_tail(&q->list, &pending->list);
1878	sigaddset(&pending->signal, sig);
1879	complete_signal(sig, t, type);
1880	result = TRACE_SIGNAL_DELIVERED;
1881out:
1882	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1883	unlock_task_sighand(t, &flags);
1884ret:
1885	rcu_read_unlock();
1886	return ret;
1887}
1888
1889static void do_notify_pidfd(struct task_struct *task)
1890{
1891	struct pid *pid;
1892
1893	WARN_ON(task->exit_state == 0);
1894	pid = task_pid(task);
1895	wake_up_all(&pid->wait_pidfd);
1896}
1897
1898/*
1899 * Let a parent know about the death of a child.
1900 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1901 *
1902 * Returns true if our parent ignored us and so we've switched to
1903 * self-reaping.
1904 */
1905bool do_notify_parent(struct task_struct *tsk, int sig)
1906{
1907	struct kernel_siginfo info;
1908	unsigned long flags;
1909	struct sighand_struct *psig;
1910	bool autoreap = false;
1911	u64 utime, stime;
1912
1913	BUG_ON(sig == -1);
1914
1915 	/* do_notify_parent_cldstop should have been called instead.  */
1916 	BUG_ON(task_is_stopped_or_traced(tsk));
1917
1918	BUG_ON(!tsk->ptrace &&
1919	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1920
1921	/* Wake up all pidfd waiters */
1922	do_notify_pidfd(tsk);
1923
1924	if (sig != SIGCHLD) {
1925		/*
1926		 * This is only possible if parent == real_parent.
1927		 * Check if it has changed security domain.
1928		 */
1929		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1930			sig = SIGCHLD;
1931	}
1932
1933	clear_siginfo(&info);
1934	info.si_signo = sig;
1935	info.si_errno = 0;
1936	/*
1937	 * We are under tasklist_lock here so our parent is tied to
1938	 * us and cannot change.
1939	 *
1940	 * task_active_pid_ns will always return the same pid namespace
1941	 * until a task passes through release_task.
1942	 *
1943	 * write_lock() currently calls preempt_disable() which is the
1944	 * same as rcu_read_lock(), but according to Oleg, this is not
1945	 * correct to rely on this
1946	 */
1947	rcu_read_lock();
1948	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1949	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1950				       task_uid(tsk));
1951	rcu_read_unlock();
1952
1953	task_cputime(tsk, &utime, &stime);
1954	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1955	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1956
1957	info.si_status = tsk->exit_code & 0x7f;
1958	if (tsk->exit_code & 0x80)
1959		info.si_code = CLD_DUMPED;
1960	else if (tsk->exit_code & 0x7f)
1961		info.si_code = CLD_KILLED;
1962	else {
1963		info.si_code = CLD_EXITED;
1964		info.si_status = tsk->exit_code >> 8;
1965	}
1966
1967	psig = tsk->parent->sighand;
1968	spin_lock_irqsave(&psig->siglock, flags);
1969	if (!tsk->ptrace && sig == SIGCHLD &&
1970	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1971	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1972		/*
1973		 * We are exiting and our parent doesn't care.  POSIX.1
1974		 * defines special semantics for setting SIGCHLD to SIG_IGN
1975		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1976		 * automatically and not left for our parent's wait4 call.
1977		 * Rather than having the parent do it as a magic kind of
1978		 * signal handler, we just set this to tell do_exit that we
1979		 * can be cleaned up without becoming a zombie.  Note that
1980		 * we still call __wake_up_parent in this case, because a
1981		 * blocked sys_wait4 might now return -ECHILD.
1982		 *
1983		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1984		 * is implementation-defined: we do (if you don't want
1985		 * it, just use SIG_IGN instead).
1986		 */
1987		autoreap = true;
1988		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1989			sig = 0;
1990	}
 
 
 
 
1991	if (valid_signal(sig) && sig)
1992		__group_send_sig_info(sig, &info, tsk->parent);
1993	__wake_up_parent(tsk, tsk->parent);
1994	spin_unlock_irqrestore(&psig->siglock, flags);
1995
1996	return autoreap;
1997}
1998
1999/**
2000 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2001 * @tsk: task reporting the state change
2002 * @for_ptracer: the notification is for ptracer
2003 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2004 *
2005 * Notify @tsk's parent that the stopped/continued state has changed.  If
2006 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2007 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2008 *
2009 * CONTEXT:
2010 * Must be called with tasklist_lock at least read locked.
2011 */
2012static void do_notify_parent_cldstop(struct task_struct *tsk,
2013				     bool for_ptracer, int why)
2014{
2015	struct kernel_siginfo info;
2016	unsigned long flags;
2017	struct task_struct *parent;
2018	struct sighand_struct *sighand;
2019	u64 utime, stime;
2020
2021	if (for_ptracer) {
2022		parent = tsk->parent;
2023	} else {
2024		tsk = tsk->group_leader;
2025		parent = tsk->real_parent;
2026	}
2027
2028	clear_siginfo(&info);
2029	info.si_signo = SIGCHLD;
2030	info.si_errno = 0;
2031	/*
2032	 * see comment in do_notify_parent() about the following 4 lines
2033	 */
2034	rcu_read_lock();
2035	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2036	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2037	rcu_read_unlock();
2038
2039	task_cputime(tsk, &utime, &stime);
2040	info.si_utime = nsec_to_clock_t(utime);
2041	info.si_stime = nsec_to_clock_t(stime);
2042
2043 	info.si_code = why;
2044 	switch (why) {
2045 	case CLD_CONTINUED:
2046 		info.si_status = SIGCONT;
2047 		break;
2048 	case CLD_STOPPED:
2049 		info.si_status = tsk->signal->group_exit_code & 0x7f;
2050 		break;
2051 	case CLD_TRAPPED:
2052 		info.si_status = tsk->exit_code & 0x7f;
2053 		break;
2054 	default:
2055 		BUG();
2056 	}
2057
2058	sighand = parent->sighand;
2059	spin_lock_irqsave(&sighand->siglock, flags);
2060	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2061	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2062		__group_send_sig_info(SIGCHLD, &info, parent);
2063	/*
2064	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2065	 */
2066	__wake_up_parent(tsk, parent);
2067	spin_unlock_irqrestore(&sighand->siglock, flags);
2068}
2069
2070static inline bool may_ptrace_stop(void)
2071{
2072	if (!likely(current->ptrace))
2073		return false;
2074	/*
2075	 * Are we in the middle of do_coredump?
2076	 * If so and our tracer is also part of the coredump stopping
2077	 * is a deadlock situation, and pointless because our tracer
2078	 * is dead so don't allow us to stop.
2079	 * If SIGKILL was already sent before the caller unlocked
2080	 * ->siglock we must see ->core_state != NULL. Otherwise it
2081	 * is safe to enter schedule().
2082	 *
2083	 * This is almost outdated, a task with the pending SIGKILL can't
2084	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2085	 * after SIGKILL was already dequeued.
2086	 */
2087	if (unlikely(current->mm->core_state) &&
2088	    unlikely(current->mm == current->parent->mm))
2089		return false;
2090
2091	return true;
2092}
2093
2094/*
2095 * Return non-zero if there is a SIGKILL that should be waking us up.
2096 * Called with the siglock held.
2097 */
2098static bool sigkill_pending(struct task_struct *tsk)
2099{
2100	return sigismember(&tsk->pending.signal, SIGKILL) ||
2101	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2102}
2103
2104/*
2105 * This must be called with current->sighand->siglock held.
2106 *
2107 * This should be the path for all ptrace stops.
2108 * We always set current->last_siginfo while stopped here.
2109 * That makes it a way to test a stopped process for
2110 * being ptrace-stopped vs being job-control-stopped.
2111 *
2112 * If we actually decide not to stop at all because the tracer
2113 * is gone, we keep current->exit_code unless clear_code.
 
2114 */
2115static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
 
2116	__releases(&current->sighand->siglock)
2117	__acquires(&current->sighand->siglock)
2118{
2119	bool gstop_done = false;
2120
2121	if (arch_ptrace_stop_needed(exit_code, info)) {
2122		/*
2123		 * The arch code has something special to do before a
2124		 * ptrace stop.  This is allowed to block, e.g. for faults
2125		 * on user stack pages.  We can't keep the siglock while
2126		 * calling arch_ptrace_stop, so we must release it now.
2127		 * To preserve proper semantics, we must do this before
2128		 * any signal bookkeeping like checking group_stop_count.
2129		 * Meanwhile, a SIGKILL could come in before we retake the
2130		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
2131		 * So after regaining the lock, we must check for SIGKILL.
2132		 */
2133		spin_unlock_irq(&current->sighand->siglock);
2134		arch_ptrace_stop(exit_code, info);
2135		spin_lock_irq(&current->sighand->siglock);
2136		if (sigkill_pending(current))
2137			return;
2138	}
2139
 
 
 
 
 
 
 
 
 
2140	set_special_state(TASK_TRACED);
 
2141
2142	/*
2143	 * We're committing to trapping.  TRACED should be visible before
2144	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2145	 * Also, transition to TRACED and updates to ->jobctl should be
2146	 * atomic with respect to siglock and should be done after the arch
2147	 * hook as siglock is released and regrabbed across it.
2148	 *
2149	 *     TRACER				    TRACEE
2150	 *
2151	 *     ptrace_attach()
2152	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2153	 *     do_wait()
2154	 *       set_current_state()                smp_wmb();
2155	 *       ptrace_do_wait()
2156	 *         wait_task_stopped()
2157	 *           task_stopped_code()
2158	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2159	 */
2160	smp_wmb();
2161
 
2162	current->last_siginfo = info;
2163	current->exit_code = exit_code;
2164
2165	/*
2166	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2167	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2168	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2169	 * could be clear now.  We act as if SIGCONT is received after
2170	 * TASK_TRACED is entered - ignore it.
2171	 */
2172	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2173		gstop_done = task_participate_group_stop(current);
2174
2175	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2176	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2177	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2178		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2179
2180	/* entering a trap, clear TRAPPING */
2181	task_clear_jobctl_trapping(current);
2182
2183	spin_unlock_irq(&current->sighand->siglock);
2184	read_lock(&tasklist_lock);
2185	if (may_ptrace_stop()) {
2186		/*
2187		 * Notify parents of the stop.
2188		 *
2189		 * While ptraced, there are two parents - the ptracer and
2190		 * the real_parent of the group_leader.  The ptracer should
2191		 * know about every stop while the real parent is only
2192		 * interested in the completion of group stop.  The states
2193		 * for the two don't interact with each other.  Notify
2194		 * separately unless they're gonna be duplicates.
2195		 */
2196		do_notify_parent_cldstop(current, true, why);
2197		if (gstop_done && ptrace_reparented(current))
2198			do_notify_parent_cldstop(current, false, why);
2199
2200		/*
2201		 * Don't want to allow preemption here, because
2202		 * sys_ptrace() needs this task to be inactive.
2203		 *
2204		 * XXX: implement read_unlock_no_resched().
2205		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2206		preempt_disable();
2207		read_unlock(&tasklist_lock);
2208		cgroup_enter_frozen();
 
2209		preempt_enable_no_resched();
2210		freezable_schedule();
2211		cgroup_leave_frozen(true);
2212	} else {
2213		/*
2214		 * By the time we got the lock, our tracer went away.
2215		 * Don't drop the lock yet, another tracer may come.
2216		 *
2217		 * If @gstop_done, the ptracer went away between group stop
2218		 * completion and here.  During detach, it would have set
2219		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2220		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2221		 * the real parent of the group stop completion is enough.
2222		 */
2223		if (gstop_done)
2224			do_notify_parent_cldstop(current, false, why);
2225
2226		/* tasklist protects us from ptrace_freeze_traced() */
2227		__set_current_state(TASK_RUNNING);
2228		if (clear_code)
2229			current->exit_code = 0;
2230		read_unlock(&tasklist_lock);
2231	}
2232
2233	/*
2234	 * We are back.  Now reacquire the siglock before touching
2235	 * last_siginfo, so that we are sure to have synchronized with
2236	 * any signal-sending on another CPU that wants to examine it.
2237	 */
2238	spin_lock_irq(&current->sighand->siglock);
 
2239	current->last_siginfo = NULL;
 
 
2240
2241	/* LISTENING can be set only during STOP traps, clear it */
2242	current->jobctl &= ~JOBCTL_LISTENING;
2243
2244	/*
2245	 * Queued signals ignored us while we were stopped for tracing.
2246	 * So check for any that we should take before resuming user mode.
2247	 * This sets TIF_SIGPENDING, but never clears it.
2248	 */
2249	recalc_sigpending_tsk(current);
 
2250}
2251
2252static void ptrace_do_notify(int signr, int exit_code, int why)
2253{
2254	kernel_siginfo_t info;
2255
2256	clear_siginfo(&info);
2257	info.si_signo = signr;
2258	info.si_code = exit_code;
2259	info.si_pid = task_pid_vnr(current);
2260	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2261
2262	/* Let the debugger run.  */
2263	ptrace_stop(exit_code, why, 1, &info);
2264}
2265
2266void ptrace_notify(int exit_code)
2267{
 
 
2268	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2269	if (unlikely(current->task_works))
2270		task_work_run();
2271
2272	spin_lock_irq(&current->sighand->siglock);
2273	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2274	spin_unlock_irq(&current->sighand->siglock);
 
2275}
2276
2277/**
2278 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2279 * @signr: signr causing group stop if initiating
2280 *
2281 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2282 * and participate in it.  If already set, participate in the existing
2283 * group stop.  If participated in a group stop (and thus slept), %true is
2284 * returned with siglock released.
2285 *
2286 * If ptraced, this function doesn't handle stop itself.  Instead,
2287 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2288 * untouched.  The caller must ensure that INTERRUPT trap handling takes
2289 * places afterwards.
2290 *
2291 * CONTEXT:
2292 * Must be called with @current->sighand->siglock held, which is released
2293 * on %true return.
2294 *
2295 * RETURNS:
2296 * %false if group stop is already cancelled or ptrace trap is scheduled.
2297 * %true if participated in group stop.
2298 */
2299static bool do_signal_stop(int signr)
2300	__releases(&current->sighand->siglock)
2301{
2302	struct signal_struct *sig = current->signal;
2303
2304	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2305		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2306		struct task_struct *t;
2307
2308		/* signr will be recorded in task->jobctl for retries */
2309		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2310
2311		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2312		    unlikely(signal_group_exit(sig)))
 
2313			return false;
2314		/*
2315		 * There is no group stop already in progress.  We must
2316		 * initiate one now.
2317		 *
2318		 * While ptraced, a task may be resumed while group stop is
2319		 * still in effect and then receive a stop signal and
2320		 * initiate another group stop.  This deviates from the
2321		 * usual behavior as two consecutive stop signals can't
2322		 * cause two group stops when !ptraced.  That is why we
2323		 * also check !task_is_stopped(t) below.
2324		 *
2325		 * The condition can be distinguished by testing whether
2326		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2327		 * group_exit_code in such case.
2328		 *
2329		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2330		 * an intervening stop signal is required to cause two
2331		 * continued events regardless of ptrace.
2332		 */
2333		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2334			sig->group_exit_code = signr;
2335
2336		sig->group_stop_count = 0;
2337
2338		if (task_set_jobctl_pending(current, signr | gstop))
2339			sig->group_stop_count++;
2340
2341		t = current;
2342		while_each_thread(current, t) {
2343			/*
2344			 * Setting state to TASK_STOPPED for a group
2345			 * stop is always done with the siglock held,
2346			 * so this check has no races.
2347			 */
2348			if (!task_is_stopped(t) &&
2349			    task_set_jobctl_pending(t, signr | gstop)) {
2350				sig->group_stop_count++;
2351				if (likely(!(t->ptrace & PT_SEIZED)))
2352					signal_wake_up(t, 0);
2353				else
2354					ptrace_trap_notify(t);
2355			}
2356		}
2357	}
2358
2359	if (likely(!current->ptrace)) {
2360		int notify = 0;
2361
2362		/*
2363		 * If there are no other threads in the group, or if there
2364		 * is a group stop in progress and we are the last to stop,
2365		 * report to the parent.
2366		 */
2367		if (task_participate_group_stop(current))
2368			notify = CLD_STOPPED;
2369
 
2370		set_special_state(TASK_STOPPED);
2371		spin_unlock_irq(&current->sighand->siglock);
2372
2373		/*
2374		 * Notify the parent of the group stop completion.  Because
2375		 * we're not holding either the siglock or tasklist_lock
2376		 * here, ptracer may attach inbetween; however, this is for
2377		 * group stop and should always be delivered to the real
2378		 * parent of the group leader.  The new ptracer will get
2379		 * its notification when this task transitions into
2380		 * TASK_TRACED.
2381		 */
2382		if (notify) {
2383			read_lock(&tasklist_lock);
2384			do_notify_parent_cldstop(current, false, notify);
2385			read_unlock(&tasklist_lock);
2386		}
2387
2388		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2389		cgroup_enter_frozen();
2390		freezable_schedule();
2391		return true;
2392	} else {
2393		/*
2394		 * While ptraced, group stop is handled by STOP trap.
2395		 * Schedule it and let the caller deal with it.
2396		 */
2397		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2398		return false;
2399	}
2400}
2401
2402/**
2403 * do_jobctl_trap - take care of ptrace jobctl traps
2404 *
2405 * When PT_SEIZED, it's used for both group stop and explicit
2406 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2407 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2408 * the stop signal; otherwise, %SIGTRAP.
2409 *
2410 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2411 * number as exit_code and no siginfo.
2412 *
2413 * CONTEXT:
2414 * Must be called with @current->sighand->siglock held, which may be
2415 * released and re-acquired before returning with intervening sleep.
2416 */
2417static void do_jobctl_trap(void)
2418{
2419	struct signal_struct *signal = current->signal;
2420	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2421
2422	if (current->ptrace & PT_SEIZED) {
2423		if (!signal->group_stop_count &&
2424		    !(signal->flags & SIGNAL_STOP_STOPPED))
2425			signr = SIGTRAP;
2426		WARN_ON_ONCE(!signr);
2427		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2428				 CLD_STOPPED);
2429	} else {
2430		WARN_ON_ONCE(!signr);
2431		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2432		current->exit_code = 0;
2433	}
2434}
2435
2436/**
2437 * do_freezer_trap - handle the freezer jobctl trap
2438 *
2439 * Puts the task into frozen state, if only the task is not about to quit.
2440 * In this case it drops JOBCTL_TRAP_FREEZE.
2441 *
2442 * CONTEXT:
2443 * Must be called with @current->sighand->siglock held,
2444 * which is always released before returning.
2445 */
2446static void do_freezer_trap(void)
2447	__releases(&current->sighand->siglock)
2448{
2449	/*
2450	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2451	 * let's make another loop to give it a chance to be handled.
2452	 * In any case, we'll return back.
2453	 */
2454	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2455	     JOBCTL_TRAP_FREEZE) {
2456		spin_unlock_irq(&current->sighand->siglock);
2457		return;
2458	}
2459
2460	/*
2461	 * Now we're sure that there is no pending fatal signal and no
2462	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2463	 * immediately (if there is a non-fatal signal pending), and
2464	 * put the task into sleep.
2465	 */
2466	__set_current_state(TASK_INTERRUPTIBLE);
2467	clear_thread_flag(TIF_SIGPENDING);
2468	spin_unlock_irq(&current->sighand->siglock);
2469	cgroup_enter_frozen();
2470	freezable_schedule();
2471}
2472
2473static int ptrace_signal(int signr, kernel_siginfo_t *info)
2474{
2475	/*
2476	 * We do not check sig_kernel_stop(signr) but set this marker
2477	 * unconditionally because we do not know whether debugger will
2478	 * change signr. This flag has no meaning unless we are going
2479	 * to stop after return from ptrace_stop(). In this case it will
2480	 * be checked in do_signal_stop(), we should only stop if it was
2481	 * not cleared by SIGCONT while we were sleeping. See also the
2482	 * comment in dequeue_signal().
2483	 */
2484	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2485	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2486
2487	/* We're back.  Did the debugger cancel the sig?  */
2488	signr = current->exit_code;
2489	if (signr == 0)
2490		return signr;
2491
2492	current->exit_code = 0;
2493
2494	/*
2495	 * Update the siginfo structure if the signal has
2496	 * changed.  If the debugger wanted something
2497	 * specific in the siginfo structure then it should
2498	 * have updated *info via PTRACE_SETSIGINFO.
2499	 */
2500	if (signr != info->si_signo) {
2501		clear_siginfo(info);
2502		info->si_signo = signr;
2503		info->si_errno = 0;
2504		info->si_code = SI_USER;
2505		rcu_read_lock();
2506		info->si_pid = task_pid_vnr(current->parent);
2507		info->si_uid = from_kuid_munged(current_user_ns(),
2508						task_uid(current->parent));
2509		rcu_read_unlock();
2510	}
2511
2512	/* If the (new) signal is now blocked, requeue it.  */
2513	if (sigismember(&current->blocked, signr)) {
2514		send_signal(signr, info, current, PIDTYPE_PID);
 
2515		signr = 0;
2516	}
2517
2518	return signr;
2519}
2520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2521bool get_signal(struct ksignal *ksig)
2522{
2523	struct sighand_struct *sighand = current->sighand;
2524	struct signal_struct *signal = current->signal;
2525	int signr;
2526
2527	if (unlikely(current->task_works))
 
2528		task_work_run();
2529
 
 
 
2530	if (unlikely(uprobe_deny_signal()))
2531		return false;
2532
2533	/*
2534	 * Do this once, we can't return to user-mode if freezing() == T.
2535	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2536	 * thus do not need another check after return.
2537	 */
2538	try_to_freeze();
2539
2540relock:
2541	spin_lock_irq(&sighand->siglock);
 
2542	/*
2543	 * Every stopped thread goes here after wakeup. Check to see if
2544	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2545	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2546	 */
2547	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2548		int why;
2549
2550		if (signal->flags & SIGNAL_CLD_CONTINUED)
2551			why = CLD_CONTINUED;
2552		else
2553			why = CLD_STOPPED;
2554
2555		signal->flags &= ~SIGNAL_CLD_MASK;
2556
2557		spin_unlock_irq(&sighand->siglock);
2558
2559		/*
2560		 * Notify the parent that we're continuing.  This event is
2561		 * always per-process and doesn't make whole lot of sense
2562		 * for ptracers, who shouldn't consume the state via
2563		 * wait(2) either, but, for backward compatibility, notify
2564		 * the ptracer of the group leader too unless it's gonna be
2565		 * a duplicate.
2566		 */
2567		read_lock(&tasklist_lock);
2568		do_notify_parent_cldstop(current, false, why);
2569
2570		if (ptrace_reparented(current->group_leader))
2571			do_notify_parent_cldstop(current->group_leader,
2572						true, why);
2573		read_unlock(&tasklist_lock);
2574
2575		goto relock;
2576	}
2577
2578	/* Has this task already been marked for death? */
2579	if (signal_group_exit(signal)) {
2580		ksig->info.si_signo = signr = SIGKILL;
2581		sigdelset(&current->pending.signal, SIGKILL);
2582		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2583				&sighand->action[SIGKILL - 1]);
2584		recalc_sigpending();
2585		goto fatal;
2586	}
2587
2588	for (;;) {
2589		struct k_sigaction *ka;
 
 
 
 
 
 
 
 
 
 
 
 
 
2590
2591		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2592		    do_signal_stop(0))
2593			goto relock;
2594
2595		if (unlikely(current->jobctl &
2596			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2597			if (current->jobctl & JOBCTL_TRAP_MASK) {
2598				do_jobctl_trap();
2599				spin_unlock_irq(&sighand->siglock);
2600			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2601				do_freezer_trap();
2602
2603			goto relock;
2604		}
2605
2606		/*
2607		 * If the task is leaving the frozen state, let's update
2608		 * cgroup counters and reset the frozen bit.
2609		 */
2610		if (unlikely(cgroup_task_frozen(current))) {
2611			spin_unlock_irq(&sighand->siglock);
2612			cgroup_leave_frozen(false);
2613			goto relock;
2614		}
2615
2616		/*
2617		 * Signals generated by the execution of an instruction
2618		 * need to be delivered before any other pending signals
2619		 * so that the instruction pointer in the signal stack
2620		 * frame points to the faulting instruction.
2621		 */
 
2622		signr = dequeue_synchronous_signal(&ksig->info);
2623		if (!signr)
2624			signr = dequeue_signal(current, &current->blocked, &ksig->info);
 
2625
2626		if (!signr)
2627			break; /* will return 0 */
2628
2629		if (unlikely(current->ptrace) && signr != SIGKILL) {
2630			signr = ptrace_signal(signr, &ksig->info);
 
2631			if (!signr)
2632				continue;
2633		}
2634
2635		ka = &sighand->action[signr-1];
2636
2637		/* Trace actually delivered signals. */
2638		trace_signal_deliver(signr, &ksig->info, ka);
2639
2640		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2641			continue;
2642		if (ka->sa.sa_handler != SIG_DFL) {
2643			/* Run the handler.  */
2644			ksig->ka = *ka;
2645
2646			if (ka->sa.sa_flags & SA_ONESHOT)
2647				ka->sa.sa_handler = SIG_DFL;
2648
2649			break; /* will return non-zero "signr" value */
2650		}
2651
2652		/*
2653		 * Now we are doing the default action for this signal.
2654		 */
2655		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2656			continue;
2657
2658		/*
2659		 * Global init gets no signals it doesn't want.
2660		 * Container-init gets no signals it doesn't want from same
2661		 * container.
2662		 *
2663		 * Note that if global/container-init sees a sig_kernel_only()
2664		 * signal here, the signal must have been generated internally
2665		 * or must have come from an ancestor namespace. In either
2666		 * case, the signal cannot be dropped.
2667		 */
2668		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2669				!sig_kernel_only(signr))
2670			continue;
2671
2672		if (sig_kernel_stop(signr)) {
2673			/*
2674			 * The default action is to stop all threads in
2675			 * the thread group.  The job control signals
2676			 * do nothing in an orphaned pgrp, but SIGSTOP
2677			 * always works.  Note that siglock needs to be
2678			 * dropped during the call to is_orphaned_pgrp()
2679			 * because of lock ordering with tasklist_lock.
2680			 * This allows an intervening SIGCONT to be posted.
2681			 * We need to check for that and bail out if necessary.
2682			 */
2683			if (signr != SIGSTOP) {
2684				spin_unlock_irq(&sighand->siglock);
2685
2686				/* signals can be posted during this window */
2687
2688				if (is_current_pgrp_orphaned())
2689					goto relock;
2690
2691				spin_lock_irq(&sighand->siglock);
2692			}
2693
2694			if (likely(do_signal_stop(ksig->info.si_signo))) {
2695				/* It released the siglock.  */
2696				goto relock;
2697			}
2698
2699			/*
2700			 * We didn't actually stop, due to a race
2701			 * with SIGCONT or something like that.
2702			 */
2703			continue;
2704		}
2705
2706	fatal:
2707		spin_unlock_irq(&sighand->siglock);
2708		if (unlikely(cgroup_task_frozen(current)))
2709			cgroup_leave_frozen(true);
2710
2711		/*
2712		 * Anything else is fatal, maybe with a core dump.
2713		 */
2714		current->flags |= PF_SIGNALED;
2715
2716		if (sig_kernel_coredump(signr)) {
2717			if (print_fatal_signals)
2718				print_fatal_signal(ksig->info.si_signo);
2719			proc_coredump_connector(current);
2720			/*
2721			 * If it was able to dump core, this kills all
2722			 * other threads in the group and synchronizes with
2723			 * their demise.  If we lost the race with another
2724			 * thread getting here, it set group_exit_code
2725			 * first and our do_group_exit call below will use
2726			 * that value and ignore the one we pass it.
2727			 */
2728			do_coredump(&ksig->info);
2729		}
2730
2731		/*
 
 
 
 
 
 
 
 
2732		 * Death signals, no core dump.
2733		 */
2734		do_group_exit(ksig->info.si_signo);
2735		/* NOTREACHED */
2736	}
2737	spin_unlock_irq(&sighand->siglock);
2738
2739	ksig->sig = signr;
 
 
 
 
2740	return ksig->sig > 0;
2741}
2742
2743/**
2744 * signal_delivered - 
2745 * @ksig:		kernel signal struct
2746 * @stepping:		nonzero if debugger single-step or block-step in use
2747 *
2748 * This function should be called when a signal has successfully been
2749 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2750 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2751 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2752 */
2753static void signal_delivered(struct ksignal *ksig, int stepping)
2754{
2755	sigset_t blocked;
2756
2757	/* A signal was successfully delivered, and the
2758	   saved sigmask was stored on the signal frame,
2759	   and will be restored by sigreturn.  So we can
2760	   simply clear the restore sigmask flag.  */
2761	clear_restore_sigmask();
2762
2763	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2764	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2765		sigaddset(&blocked, ksig->sig);
2766	set_current_blocked(&blocked);
2767	tracehook_signal_handler(stepping);
 
 
 
2768}
2769
2770void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2771{
2772	if (failed)
2773		force_sigsegv(ksig->sig);
2774	else
2775		signal_delivered(ksig, stepping);
2776}
2777
2778/*
2779 * It could be that complete_signal() picked us to notify about the
2780 * group-wide signal. Other threads should be notified now to take
2781 * the shared signals in @which since we will not.
2782 */
2783static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2784{
2785	sigset_t retarget;
2786	struct task_struct *t;
2787
2788	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2789	if (sigisemptyset(&retarget))
2790		return;
2791
2792	t = tsk;
2793	while_each_thread(tsk, t) {
2794		if (t->flags & PF_EXITING)
2795			continue;
2796
2797		if (!has_pending_signals(&retarget, &t->blocked))
2798			continue;
2799		/* Remove the signals this thread can handle. */
2800		sigandsets(&retarget, &retarget, &t->blocked);
2801
2802		if (!signal_pending(t))
2803			signal_wake_up(t, 0);
2804
2805		if (sigisemptyset(&retarget))
2806			break;
2807	}
2808}
2809
2810void exit_signals(struct task_struct *tsk)
2811{
2812	int group_stop = 0;
2813	sigset_t unblocked;
2814
2815	/*
2816	 * @tsk is about to have PF_EXITING set - lock out users which
2817	 * expect stable threadgroup.
2818	 */
2819	cgroup_threadgroup_change_begin(tsk);
2820
2821	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
 
2822		tsk->flags |= PF_EXITING;
2823		cgroup_threadgroup_change_end(tsk);
2824		return;
2825	}
2826
2827	spin_lock_irq(&tsk->sighand->siglock);
2828	/*
2829	 * From now this task is not visible for group-wide signals,
2830	 * see wants_signal(), do_signal_stop().
2831	 */
 
2832	tsk->flags |= PF_EXITING;
2833
2834	cgroup_threadgroup_change_end(tsk);
2835
2836	if (!signal_pending(tsk))
2837		goto out;
2838
2839	unblocked = tsk->blocked;
2840	signotset(&unblocked);
2841	retarget_shared_pending(tsk, &unblocked);
2842
2843	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2844	    task_participate_group_stop(tsk))
2845		group_stop = CLD_STOPPED;
2846out:
2847	spin_unlock_irq(&tsk->sighand->siglock);
2848
2849	/*
2850	 * If group stop has completed, deliver the notification.  This
2851	 * should always go to the real parent of the group leader.
2852	 */
2853	if (unlikely(group_stop)) {
2854		read_lock(&tasklist_lock);
2855		do_notify_parent_cldstop(tsk, false, group_stop);
2856		read_unlock(&tasklist_lock);
2857	}
2858}
2859
2860/*
2861 * System call entry points.
2862 */
2863
2864/**
2865 *  sys_restart_syscall - restart a system call
2866 */
2867SYSCALL_DEFINE0(restart_syscall)
2868{
2869	struct restart_block *restart = &current->restart_block;
2870	return restart->fn(restart);
2871}
2872
2873long do_no_restart_syscall(struct restart_block *param)
2874{
2875	return -EINTR;
2876}
2877
2878static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2879{
2880	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2881		sigset_t newblocked;
2882		/* A set of now blocked but previously unblocked signals. */
2883		sigandnsets(&newblocked, newset, &current->blocked);
2884		retarget_shared_pending(tsk, &newblocked);
2885	}
2886	tsk->blocked = *newset;
2887	recalc_sigpending();
2888}
2889
2890/**
2891 * set_current_blocked - change current->blocked mask
2892 * @newset: new mask
2893 *
2894 * It is wrong to change ->blocked directly, this helper should be used
2895 * to ensure the process can't miss a shared signal we are going to block.
2896 */
2897void set_current_blocked(sigset_t *newset)
2898{
2899	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2900	__set_current_blocked(newset);
2901}
2902
2903void __set_current_blocked(const sigset_t *newset)
2904{
2905	struct task_struct *tsk = current;
2906
2907	/*
2908	 * In case the signal mask hasn't changed, there is nothing we need
2909	 * to do. The current->blocked shouldn't be modified by other task.
2910	 */
2911	if (sigequalsets(&tsk->blocked, newset))
2912		return;
2913
2914	spin_lock_irq(&tsk->sighand->siglock);
2915	__set_task_blocked(tsk, newset);
2916	spin_unlock_irq(&tsk->sighand->siglock);
2917}
2918
2919/*
2920 * This is also useful for kernel threads that want to temporarily
2921 * (or permanently) block certain signals.
2922 *
2923 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2924 * interface happily blocks "unblockable" signals like SIGKILL
2925 * and friends.
2926 */
2927int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2928{
2929	struct task_struct *tsk = current;
2930	sigset_t newset;
2931
2932	/* Lockless, only current can change ->blocked, never from irq */
2933	if (oldset)
2934		*oldset = tsk->blocked;
2935
2936	switch (how) {
2937	case SIG_BLOCK:
2938		sigorsets(&newset, &tsk->blocked, set);
2939		break;
2940	case SIG_UNBLOCK:
2941		sigandnsets(&newset, &tsk->blocked, set);
2942		break;
2943	case SIG_SETMASK:
2944		newset = *set;
2945		break;
2946	default:
2947		return -EINVAL;
2948	}
2949
2950	__set_current_blocked(&newset);
2951	return 0;
2952}
2953EXPORT_SYMBOL(sigprocmask);
2954
2955/*
2956 * The api helps set app-provided sigmasks.
2957 *
2958 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2959 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2960 *
2961 * Note that it does set_restore_sigmask() in advance, so it must be always
2962 * paired with restore_saved_sigmask_unless() before return from syscall.
2963 */
2964int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2965{
2966	sigset_t kmask;
2967
2968	if (!umask)
2969		return 0;
2970	if (sigsetsize != sizeof(sigset_t))
2971		return -EINVAL;
2972	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2973		return -EFAULT;
2974
2975	set_restore_sigmask();
2976	current->saved_sigmask = current->blocked;
2977	set_current_blocked(&kmask);
2978
2979	return 0;
2980}
2981
2982#ifdef CONFIG_COMPAT
2983int set_compat_user_sigmask(const compat_sigset_t __user *umask,
2984			    size_t sigsetsize)
2985{
2986	sigset_t kmask;
2987
2988	if (!umask)
2989		return 0;
2990	if (sigsetsize != sizeof(compat_sigset_t))
2991		return -EINVAL;
2992	if (get_compat_sigset(&kmask, umask))
2993		return -EFAULT;
2994
2995	set_restore_sigmask();
2996	current->saved_sigmask = current->blocked;
2997	set_current_blocked(&kmask);
2998
2999	return 0;
3000}
3001#endif
3002
3003/**
3004 *  sys_rt_sigprocmask - change the list of currently blocked signals
3005 *  @how: whether to add, remove, or set signals
3006 *  @nset: stores pending signals
3007 *  @oset: previous value of signal mask if non-null
3008 *  @sigsetsize: size of sigset_t type
3009 */
3010SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3011		sigset_t __user *, oset, size_t, sigsetsize)
3012{
3013	sigset_t old_set, new_set;
3014	int error;
3015
3016	/* XXX: Don't preclude handling different sized sigset_t's.  */
3017	if (sigsetsize != sizeof(sigset_t))
3018		return -EINVAL;
3019
3020	old_set = current->blocked;
3021
3022	if (nset) {
3023		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3024			return -EFAULT;
3025		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3026
3027		error = sigprocmask(how, &new_set, NULL);
3028		if (error)
3029			return error;
3030	}
3031
3032	if (oset) {
3033		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3034			return -EFAULT;
3035	}
3036
3037	return 0;
3038}
3039
3040#ifdef CONFIG_COMPAT
3041COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3042		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3043{
3044	sigset_t old_set = current->blocked;
3045
3046	/* XXX: Don't preclude handling different sized sigset_t's.  */
3047	if (sigsetsize != sizeof(sigset_t))
3048		return -EINVAL;
3049
3050	if (nset) {
3051		sigset_t new_set;
3052		int error;
3053		if (get_compat_sigset(&new_set, nset))
3054			return -EFAULT;
3055		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3056
3057		error = sigprocmask(how, &new_set, NULL);
3058		if (error)
3059			return error;
3060	}
3061	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3062}
3063#endif
3064
3065static void do_sigpending(sigset_t *set)
3066{
3067	spin_lock_irq(&current->sighand->siglock);
3068	sigorsets(set, &current->pending.signal,
3069		  &current->signal->shared_pending.signal);
3070	spin_unlock_irq(&current->sighand->siglock);
3071
3072	/* Outside the lock because only this thread touches it.  */
3073	sigandsets(set, &current->blocked, set);
3074}
3075
3076/**
3077 *  sys_rt_sigpending - examine a pending signal that has been raised
3078 *			while blocked
3079 *  @uset: stores pending signals
3080 *  @sigsetsize: size of sigset_t type or larger
3081 */
3082SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3083{
3084	sigset_t set;
3085
3086	if (sigsetsize > sizeof(*uset))
3087		return -EINVAL;
3088
3089	do_sigpending(&set);
3090
3091	if (copy_to_user(uset, &set, sigsetsize))
3092		return -EFAULT;
3093
3094	return 0;
3095}
3096
3097#ifdef CONFIG_COMPAT
3098COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3099		compat_size_t, sigsetsize)
3100{
3101	sigset_t set;
3102
3103	if (sigsetsize > sizeof(*uset))
3104		return -EINVAL;
3105
3106	do_sigpending(&set);
3107
3108	return put_compat_sigset(uset, &set, sigsetsize);
3109}
3110#endif
3111
3112static const struct {
3113	unsigned char limit, layout;
3114} sig_sicodes[] = {
3115	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3116	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3117	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3118	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3119	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3120#if defined(SIGEMT)
3121	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3122#endif
3123	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3124	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3125	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3126};
3127
3128static bool known_siginfo_layout(unsigned sig, int si_code)
3129{
3130	if (si_code == SI_KERNEL)
3131		return true;
3132	else if ((si_code > SI_USER)) {
3133		if (sig_specific_sicodes(sig)) {
3134			if (si_code <= sig_sicodes[sig].limit)
3135				return true;
3136		}
3137		else if (si_code <= NSIGPOLL)
3138			return true;
3139	}
3140	else if (si_code >= SI_DETHREAD)
3141		return true;
3142	else if (si_code == SI_ASYNCNL)
3143		return true;
3144	return false;
3145}
3146
3147enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3148{
3149	enum siginfo_layout layout = SIL_KILL;
3150	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3151		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3152		    (si_code <= sig_sicodes[sig].limit)) {
3153			layout = sig_sicodes[sig].layout;
3154			/* Handle the exceptions */
3155			if ((sig == SIGBUS) &&
3156			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3157				layout = SIL_FAULT_MCEERR;
3158			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3159				layout = SIL_FAULT_BNDERR;
3160#ifdef SEGV_PKUERR
3161			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3162				layout = SIL_FAULT_PKUERR;
3163#endif
 
 
 
 
 
 
 
 
 
3164		}
3165		else if (si_code <= NSIGPOLL)
3166			layout = SIL_POLL;
3167	} else {
3168		if (si_code == SI_TIMER)
3169			layout = SIL_TIMER;
3170		else if (si_code == SI_SIGIO)
3171			layout = SIL_POLL;
3172		else if (si_code < 0)
3173			layout = SIL_RT;
3174	}
3175	return layout;
3176}
3177
3178static inline char __user *si_expansion(const siginfo_t __user *info)
3179{
3180	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3181}
3182
3183int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3184{
3185	char __user *expansion = si_expansion(to);
3186	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3187		return -EFAULT;
3188	if (clear_user(expansion, SI_EXPANSION_SIZE))
3189		return -EFAULT;
3190	return 0;
3191}
3192
3193static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3194				       const siginfo_t __user *from)
3195{
3196	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3197		char __user *expansion = si_expansion(from);
3198		char buf[SI_EXPANSION_SIZE];
3199		int i;
3200		/*
3201		 * An unknown si_code might need more than
3202		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3203		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3204		 * will return this data to userspace exactly.
3205		 */
3206		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3207			return -EFAULT;
3208		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3209			if (buf[i] != 0)
3210				return -E2BIG;
3211		}
3212	}
3213	return 0;
3214}
3215
3216static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3217				    const siginfo_t __user *from)
3218{
3219	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3220		return -EFAULT;
3221	to->si_signo = signo;
3222	return post_copy_siginfo_from_user(to, from);
3223}
3224
3225int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3226{
3227	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3228		return -EFAULT;
3229	return post_copy_siginfo_from_user(to, from);
3230}
3231
3232#ifdef CONFIG_COMPAT
3233int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3234			   const struct kernel_siginfo *from)
3235#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3236{
3237	return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3238}
3239int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3240			     const struct kernel_siginfo *from, bool x32_ABI)
3241#endif
 
 
 
3242{
3243	struct compat_siginfo new;
3244	memset(&new, 0, sizeof(new));
3245
3246	new.si_signo = from->si_signo;
3247	new.si_errno = from->si_errno;
3248	new.si_code  = from->si_code;
3249	switch(siginfo_layout(from->si_signo, from->si_code)) {
3250	case SIL_KILL:
3251		new.si_pid = from->si_pid;
3252		new.si_uid = from->si_uid;
3253		break;
3254	case SIL_TIMER:
3255		new.si_tid     = from->si_tid;
3256		new.si_overrun = from->si_overrun;
3257		new.si_int     = from->si_int;
3258		break;
3259	case SIL_POLL:
3260		new.si_band = from->si_band;
3261		new.si_fd   = from->si_fd;
3262		break;
3263	case SIL_FAULT:
3264		new.si_addr = ptr_to_compat(from->si_addr);
3265#ifdef __ARCH_SI_TRAPNO
3266		new.si_trapno = from->si_trapno;
3267#endif
 
3268		break;
3269	case SIL_FAULT_MCEERR:
3270		new.si_addr = ptr_to_compat(from->si_addr);
3271#ifdef __ARCH_SI_TRAPNO
3272		new.si_trapno = from->si_trapno;
3273#endif
3274		new.si_addr_lsb = from->si_addr_lsb;
3275		break;
3276	case SIL_FAULT_BNDERR:
3277		new.si_addr = ptr_to_compat(from->si_addr);
3278#ifdef __ARCH_SI_TRAPNO
3279		new.si_trapno = from->si_trapno;
3280#endif
3281		new.si_lower = ptr_to_compat(from->si_lower);
3282		new.si_upper = ptr_to_compat(from->si_upper);
3283		break;
3284	case SIL_FAULT_PKUERR:
3285		new.si_addr = ptr_to_compat(from->si_addr);
3286#ifdef __ARCH_SI_TRAPNO
3287		new.si_trapno = from->si_trapno;
3288#endif
3289		new.si_pkey = from->si_pkey;
 
 
 
3290		break;
3291	case SIL_CHLD:
3292		new.si_pid    = from->si_pid;
3293		new.si_uid    = from->si_uid;
3294		new.si_status = from->si_status;
3295#ifdef CONFIG_X86_X32_ABI
3296		if (x32_ABI) {
3297			new._sifields._sigchld_x32._utime = from->si_utime;
3298			new._sifields._sigchld_x32._stime = from->si_stime;
3299		} else
3300#endif
3301		{
3302			new.si_utime = from->si_utime;
3303			new.si_stime = from->si_stime;
3304		}
3305		break;
3306	case SIL_RT:
3307		new.si_pid = from->si_pid;
3308		new.si_uid = from->si_uid;
3309		new.si_int = from->si_int;
3310		break;
3311	case SIL_SYS:
3312		new.si_call_addr = ptr_to_compat(from->si_call_addr);
3313		new.si_syscall   = from->si_syscall;
3314		new.si_arch      = from->si_arch;
3315		break;
3316	}
 
3317
 
 
 
 
 
 
3318	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3319		return -EFAULT;
3320
3321	return 0;
3322}
3323
3324static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3325					 const struct compat_siginfo *from)
3326{
3327	clear_siginfo(to);
3328	to->si_signo = from->si_signo;
3329	to->si_errno = from->si_errno;
3330	to->si_code  = from->si_code;
3331	switch(siginfo_layout(from->si_signo, from->si_code)) {
3332	case SIL_KILL:
3333		to->si_pid = from->si_pid;
3334		to->si_uid = from->si_uid;
3335		break;
3336	case SIL_TIMER:
3337		to->si_tid     = from->si_tid;
3338		to->si_overrun = from->si_overrun;
3339		to->si_int     = from->si_int;
3340		break;
3341	case SIL_POLL:
3342		to->si_band = from->si_band;
3343		to->si_fd   = from->si_fd;
3344		break;
3345	case SIL_FAULT:
3346		to->si_addr = compat_ptr(from->si_addr);
3347#ifdef __ARCH_SI_TRAPNO
 
 
3348		to->si_trapno = from->si_trapno;
3349#endif
3350		break;
3351	case SIL_FAULT_MCEERR:
3352		to->si_addr = compat_ptr(from->si_addr);
3353#ifdef __ARCH_SI_TRAPNO
3354		to->si_trapno = from->si_trapno;
3355#endif
3356		to->si_addr_lsb = from->si_addr_lsb;
3357		break;
3358	case SIL_FAULT_BNDERR:
3359		to->si_addr = compat_ptr(from->si_addr);
3360#ifdef __ARCH_SI_TRAPNO
3361		to->si_trapno = from->si_trapno;
3362#endif
3363		to->si_lower = compat_ptr(from->si_lower);
3364		to->si_upper = compat_ptr(from->si_upper);
3365		break;
3366	case SIL_FAULT_PKUERR:
3367		to->si_addr = compat_ptr(from->si_addr);
3368#ifdef __ARCH_SI_TRAPNO
3369		to->si_trapno = from->si_trapno;
3370#endif
3371		to->si_pkey = from->si_pkey;
3372		break;
 
 
 
 
 
 
3373	case SIL_CHLD:
3374		to->si_pid    = from->si_pid;
3375		to->si_uid    = from->si_uid;
3376		to->si_status = from->si_status;
3377#ifdef CONFIG_X86_X32_ABI
3378		if (in_x32_syscall()) {
3379			to->si_utime = from->_sifields._sigchld_x32._utime;
3380			to->si_stime = from->_sifields._sigchld_x32._stime;
3381		} else
3382#endif
3383		{
3384			to->si_utime = from->si_utime;
3385			to->si_stime = from->si_stime;
3386		}
3387		break;
3388	case SIL_RT:
3389		to->si_pid = from->si_pid;
3390		to->si_uid = from->si_uid;
3391		to->si_int = from->si_int;
3392		break;
3393	case SIL_SYS:
3394		to->si_call_addr = compat_ptr(from->si_call_addr);
3395		to->si_syscall   = from->si_syscall;
3396		to->si_arch      = from->si_arch;
3397		break;
3398	}
3399	return 0;
3400}
3401
3402static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3403				      const struct compat_siginfo __user *ufrom)
3404{
3405	struct compat_siginfo from;
3406
3407	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3408		return -EFAULT;
3409
3410	from.si_signo = signo;
3411	return post_copy_siginfo_from_user32(to, &from);
3412}
3413
3414int copy_siginfo_from_user32(struct kernel_siginfo *to,
3415			     const struct compat_siginfo __user *ufrom)
3416{
3417	struct compat_siginfo from;
3418
3419	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3420		return -EFAULT;
3421
3422	return post_copy_siginfo_from_user32(to, &from);
3423}
3424#endif /* CONFIG_COMPAT */
3425
3426/**
3427 *  do_sigtimedwait - wait for queued signals specified in @which
3428 *  @which: queued signals to wait for
3429 *  @info: if non-null, the signal's siginfo is returned here
3430 *  @ts: upper bound on process time suspension
3431 */
3432static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3433		    const struct timespec64 *ts)
3434{
3435	ktime_t *to = NULL, timeout = KTIME_MAX;
3436	struct task_struct *tsk = current;
3437	sigset_t mask = *which;
 
3438	int sig, ret = 0;
3439
3440	if (ts) {
3441		if (!timespec64_valid(ts))
3442			return -EINVAL;
3443		timeout = timespec64_to_ktime(*ts);
3444		to = &timeout;
3445	}
3446
3447	/*
3448	 * Invert the set of allowed signals to get those we want to block.
3449	 */
3450	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3451	signotset(&mask);
3452
3453	spin_lock_irq(&tsk->sighand->siglock);
3454	sig = dequeue_signal(tsk, &mask, info);
3455	if (!sig && timeout) {
3456		/*
3457		 * None ready, temporarily unblock those we're interested
3458		 * while we are sleeping in so that we'll be awakened when
3459		 * they arrive. Unblocking is always fine, we can avoid
3460		 * set_current_blocked().
3461		 */
3462		tsk->real_blocked = tsk->blocked;
3463		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3464		recalc_sigpending();
3465		spin_unlock_irq(&tsk->sighand->siglock);
3466
3467		__set_current_state(TASK_INTERRUPTIBLE);
3468		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3469							 HRTIMER_MODE_REL);
3470		spin_lock_irq(&tsk->sighand->siglock);
3471		__set_task_blocked(tsk, &tsk->real_blocked);
3472		sigemptyset(&tsk->real_blocked);
3473		sig = dequeue_signal(tsk, &mask, info);
3474	}
3475	spin_unlock_irq(&tsk->sighand->siglock);
3476
3477	if (sig)
3478		return sig;
3479	return ret ? -EINTR : -EAGAIN;
3480}
3481
3482/**
3483 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3484 *			in @uthese
3485 *  @uthese: queued signals to wait for
3486 *  @uinfo: if non-null, the signal's siginfo is returned here
3487 *  @uts: upper bound on process time suspension
3488 *  @sigsetsize: size of sigset_t type
3489 */
3490SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3491		siginfo_t __user *, uinfo,
3492		const struct __kernel_timespec __user *, uts,
3493		size_t, sigsetsize)
3494{
3495	sigset_t these;
3496	struct timespec64 ts;
3497	kernel_siginfo_t info;
3498	int ret;
3499
3500	/* XXX: Don't preclude handling different sized sigset_t's.  */
3501	if (sigsetsize != sizeof(sigset_t))
3502		return -EINVAL;
3503
3504	if (copy_from_user(&these, uthese, sizeof(these)))
3505		return -EFAULT;
3506
3507	if (uts) {
3508		if (get_timespec64(&ts, uts))
3509			return -EFAULT;
3510	}
3511
3512	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3513
3514	if (ret > 0 && uinfo) {
3515		if (copy_siginfo_to_user(uinfo, &info))
3516			ret = -EFAULT;
3517	}
3518
3519	return ret;
3520}
3521
3522#ifdef CONFIG_COMPAT_32BIT_TIME
3523SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3524		siginfo_t __user *, uinfo,
3525		const struct old_timespec32 __user *, uts,
3526		size_t, sigsetsize)
3527{
3528	sigset_t these;
3529	struct timespec64 ts;
3530	kernel_siginfo_t info;
3531	int ret;
3532
3533	if (sigsetsize != sizeof(sigset_t))
3534		return -EINVAL;
3535
3536	if (copy_from_user(&these, uthese, sizeof(these)))
3537		return -EFAULT;
3538
3539	if (uts) {
3540		if (get_old_timespec32(&ts, uts))
3541			return -EFAULT;
3542	}
3543
3544	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3545
3546	if (ret > 0 && uinfo) {
3547		if (copy_siginfo_to_user(uinfo, &info))
3548			ret = -EFAULT;
3549	}
3550
3551	return ret;
3552}
3553#endif
3554
3555#ifdef CONFIG_COMPAT
3556COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3557		struct compat_siginfo __user *, uinfo,
3558		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3559{
3560	sigset_t s;
3561	struct timespec64 t;
3562	kernel_siginfo_t info;
3563	long ret;
3564
3565	if (sigsetsize != sizeof(sigset_t))
3566		return -EINVAL;
3567
3568	if (get_compat_sigset(&s, uthese))
3569		return -EFAULT;
3570
3571	if (uts) {
3572		if (get_timespec64(&t, uts))
3573			return -EFAULT;
3574	}
3575
3576	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3577
3578	if (ret > 0 && uinfo) {
3579		if (copy_siginfo_to_user32(uinfo, &info))
3580			ret = -EFAULT;
3581	}
3582
3583	return ret;
3584}
3585
3586#ifdef CONFIG_COMPAT_32BIT_TIME
3587COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3588		struct compat_siginfo __user *, uinfo,
3589		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3590{
3591	sigset_t s;
3592	struct timespec64 t;
3593	kernel_siginfo_t info;
3594	long ret;
3595
3596	if (sigsetsize != sizeof(sigset_t))
3597		return -EINVAL;
3598
3599	if (get_compat_sigset(&s, uthese))
3600		return -EFAULT;
3601
3602	if (uts) {
3603		if (get_old_timespec32(&t, uts))
3604			return -EFAULT;
3605	}
3606
3607	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3608
3609	if (ret > 0 && uinfo) {
3610		if (copy_siginfo_to_user32(uinfo, &info))
3611			ret = -EFAULT;
3612	}
3613
3614	return ret;
3615}
3616#endif
3617#endif
3618
3619static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3620{
3621	clear_siginfo(info);
3622	info->si_signo = sig;
3623	info->si_errno = 0;
3624	info->si_code = SI_USER;
3625	info->si_pid = task_tgid_vnr(current);
3626	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3627}
3628
3629/**
3630 *  sys_kill - send a signal to a process
3631 *  @pid: the PID of the process
3632 *  @sig: signal to be sent
3633 */
3634SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3635{
3636	struct kernel_siginfo info;
3637
3638	prepare_kill_siginfo(sig, &info);
3639
3640	return kill_something_info(sig, &info, pid);
3641}
3642
3643/*
3644 * Verify that the signaler and signalee either are in the same pid namespace
3645 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3646 * namespace.
3647 */
3648static bool access_pidfd_pidns(struct pid *pid)
3649{
3650	struct pid_namespace *active = task_active_pid_ns(current);
3651	struct pid_namespace *p = ns_of_pid(pid);
3652
3653	for (;;) {
3654		if (!p)
3655			return false;
3656		if (p == active)
3657			break;
3658		p = p->parent;
3659	}
3660
3661	return true;
3662}
3663
3664static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
 
3665{
3666#ifdef CONFIG_COMPAT
3667	/*
3668	 * Avoid hooking up compat syscalls and instead handle necessary
3669	 * conversions here. Note, this is a stop-gap measure and should not be
3670	 * considered a generic solution.
3671	 */
3672	if (in_compat_syscall())
3673		return copy_siginfo_from_user32(
3674			kinfo, (struct compat_siginfo __user *)info);
3675#endif
3676	return copy_siginfo_from_user(kinfo, info);
3677}
3678
3679static struct pid *pidfd_to_pid(const struct file *file)
3680{
3681	struct pid *pid;
3682
3683	pid = pidfd_pid(file);
3684	if (!IS_ERR(pid))
3685		return pid;
3686
3687	return tgid_pidfd_to_pid(file);
3688}
3689
3690/**
3691 * sys_pidfd_send_signal - Signal a process through a pidfd
3692 * @pidfd:  file descriptor of the process
3693 * @sig:    signal to send
3694 * @info:   signal info
3695 * @flags:  future flags
3696 *
3697 * The syscall currently only signals via PIDTYPE_PID which covers
3698 * kill(<positive-pid>, <signal>. It does not signal threads or process
3699 * groups.
3700 * In order to extend the syscall to threads and process groups the @flags
3701 * argument should be used. In essence, the @flags argument will determine
3702 * what is signaled and not the file descriptor itself. Put in other words,
3703 * grouping is a property of the flags argument not a property of the file
3704 * descriptor.
3705 *
3706 * Return: 0 on success, negative errno on failure
3707 */
3708SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3709		siginfo_t __user *, info, unsigned int, flags)
3710{
3711	int ret;
3712	struct fd f;
3713	struct pid *pid;
3714	kernel_siginfo_t kinfo;
3715
3716	/* Enforce flags be set to 0 until we add an extension. */
3717	if (flags)
3718		return -EINVAL;
3719
3720	f = fdget(pidfd);
3721	if (!f.file)
3722		return -EBADF;
3723
3724	/* Is this a pidfd? */
3725	pid = pidfd_to_pid(f.file);
3726	if (IS_ERR(pid)) {
3727		ret = PTR_ERR(pid);
3728		goto err;
3729	}
3730
3731	ret = -EINVAL;
3732	if (!access_pidfd_pidns(pid))
3733		goto err;
3734
3735	if (info) {
3736		ret = copy_siginfo_from_user_any(&kinfo, info);
3737		if (unlikely(ret))
3738			goto err;
3739
3740		ret = -EINVAL;
3741		if (unlikely(sig != kinfo.si_signo))
3742			goto err;
3743
3744		/* Only allow sending arbitrary signals to yourself. */
3745		ret = -EPERM;
3746		if ((task_pid(current) != pid) &&
3747		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3748			goto err;
3749	} else {
3750		prepare_kill_siginfo(sig, &kinfo);
3751	}
3752
3753	ret = kill_pid_info(sig, &kinfo, pid);
3754
3755err:
3756	fdput(f);
3757	return ret;
3758}
3759
3760static int
3761do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3762{
3763	struct task_struct *p;
3764	int error = -ESRCH;
3765
3766	rcu_read_lock();
3767	p = find_task_by_vpid(pid);
3768	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3769		error = check_kill_permission(sig, info, p);
3770		/*
3771		 * The null signal is a permissions and process existence
3772		 * probe.  No signal is actually delivered.
3773		 */
3774		if (!error && sig) {
3775			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3776			/*
3777			 * If lock_task_sighand() failed we pretend the task
3778			 * dies after receiving the signal. The window is tiny,
3779			 * and the signal is private anyway.
3780			 */
3781			if (unlikely(error == -ESRCH))
3782				error = 0;
3783		}
3784	}
3785	rcu_read_unlock();
3786
3787	return error;
3788}
3789
3790static int do_tkill(pid_t tgid, pid_t pid, int sig)
3791{
3792	struct kernel_siginfo info;
3793
3794	clear_siginfo(&info);
3795	info.si_signo = sig;
3796	info.si_errno = 0;
3797	info.si_code = SI_TKILL;
3798	info.si_pid = task_tgid_vnr(current);
3799	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3800
3801	return do_send_specific(tgid, pid, sig, &info);
3802}
3803
3804/**
3805 *  sys_tgkill - send signal to one specific thread
3806 *  @tgid: the thread group ID of the thread
3807 *  @pid: the PID of the thread
3808 *  @sig: signal to be sent
3809 *
3810 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3811 *  exists but it's not belonging to the target process anymore. This
3812 *  method solves the problem of threads exiting and PIDs getting reused.
3813 */
3814SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3815{
3816	/* This is only valid for single tasks */
3817	if (pid <= 0 || tgid <= 0)
3818		return -EINVAL;
3819
3820	return do_tkill(tgid, pid, sig);
3821}
3822
3823/**
3824 *  sys_tkill - send signal to one specific task
3825 *  @pid: the PID of the task
3826 *  @sig: signal to be sent
3827 *
3828 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3829 */
3830SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3831{
3832	/* This is only valid for single tasks */
3833	if (pid <= 0)
3834		return -EINVAL;
3835
3836	return do_tkill(0, pid, sig);
3837}
3838
3839static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3840{
3841	/* Not even root can pretend to send signals from the kernel.
3842	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3843	 */
3844	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3845	    (task_pid_vnr(current) != pid))
3846		return -EPERM;
3847
3848	/* POSIX.1b doesn't mention process groups.  */
3849	return kill_proc_info(sig, info, pid);
3850}
3851
3852/**
3853 *  sys_rt_sigqueueinfo - send signal information to a signal
3854 *  @pid: the PID of the thread
3855 *  @sig: signal to be sent
3856 *  @uinfo: signal info to be sent
3857 */
3858SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3859		siginfo_t __user *, uinfo)
3860{
3861	kernel_siginfo_t info;
3862	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3863	if (unlikely(ret))
3864		return ret;
3865	return do_rt_sigqueueinfo(pid, sig, &info);
3866}
3867
3868#ifdef CONFIG_COMPAT
3869COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3870			compat_pid_t, pid,
3871			int, sig,
3872			struct compat_siginfo __user *, uinfo)
3873{
3874	kernel_siginfo_t info;
3875	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3876	if (unlikely(ret))
3877		return ret;
3878	return do_rt_sigqueueinfo(pid, sig, &info);
3879}
3880#endif
3881
3882static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3883{
3884	/* This is only valid for single tasks */
3885	if (pid <= 0 || tgid <= 0)
3886		return -EINVAL;
3887
3888	/* Not even root can pretend to send signals from the kernel.
3889	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3890	 */
3891	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3892	    (task_pid_vnr(current) != pid))
3893		return -EPERM;
3894
3895	return do_send_specific(tgid, pid, sig, info);
3896}
3897
3898SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3899		siginfo_t __user *, uinfo)
3900{
3901	kernel_siginfo_t info;
3902	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3903	if (unlikely(ret))
3904		return ret;
3905	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3906}
3907
3908#ifdef CONFIG_COMPAT
3909COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3910			compat_pid_t, tgid,
3911			compat_pid_t, pid,
3912			int, sig,
3913			struct compat_siginfo __user *, uinfo)
3914{
3915	kernel_siginfo_t info;
3916	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3917	if (unlikely(ret))
3918		return ret;
3919	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3920}
3921#endif
3922
3923/*
3924 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3925 */
3926void kernel_sigaction(int sig, __sighandler_t action)
3927{
3928	spin_lock_irq(&current->sighand->siglock);
3929	current->sighand->action[sig - 1].sa.sa_handler = action;
3930	if (action == SIG_IGN) {
3931		sigset_t mask;
3932
3933		sigemptyset(&mask);
3934		sigaddset(&mask, sig);
3935
3936		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3937		flush_sigqueue_mask(&mask, &current->pending);
3938		recalc_sigpending();
3939	}
3940	spin_unlock_irq(&current->sighand->siglock);
3941}
3942EXPORT_SYMBOL(kernel_sigaction);
3943
3944void __weak sigaction_compat_abi(struct k_sigaction *act,
3945		struct k_sigaction *oact)
3946{
3947}
3948
3949int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3950{
3951	struct task_struct *p = current, *t;
3952	struct k_sigaction *k;
3953	sigset_t mask;
3954
3955	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3956		return -EINVAL;
3957
3958	k = &p->sighand->action[sig-1];
3959
3960	spin_lock_irq(&p->sighand->siglock);
 
 
 
 
3961	if (oact)
3962		*oact = *k;
3963
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3964	sigaction_compat_abi(act, oact);
3965
3966	if (act) {
3967		sigdelsetmask(&act->sa.sa_mask,
3968			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3969		*k = *act;
3970		/*
3971		 * POSIX 3.3.1.3:
3972		 *  "Setting a signal action to SIG_IGN for a signal that is
3973		 *   pending shall cause the pending signal to be discarded,
3974		 *   whether or not it is blocked."
3975		 *
3976		 *  "Setting a signal action to SIG_DFL for a signal that is
3977		 *   pending and whose default action is to ignore the signal
3978		 *   (for example, SIGCHLD), shall cause the pending signal to
3979		 *   be discarded, whether or not it is blocked"
3980		 */
3981		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3982			sigemptyset(&mask);
3983			sigaddset(&mask, sig);
3984			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3985			for_each_thread(p, t)
3986				flush_sigqueue_mask(&mask, &t->pending);
3987		}
3988	}
3989
3990	spin_unlock_irq(&p->sighand->siglock);
3991	return 0;
3992}
3993
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3994static int
3995do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3996		size_t min_ss_size)
3997{
3998	struct task_struct *t = current;
 
3999
4000	if (oss) {
4001		memset(oss, 0, sizeof(stack_t));
4002		oss->ss_sp = (void __user *) t->sas_ss_sp;
4003		oss->ss_size = t->sas_ss_size;
4004		oss->ss_flags = sas_ss_flags(sp) |
4005			(current->sas_ss_flags & SS_FLAG_BITS);
4006	}
4007
4008	if (ss) {
4009		void __user *ss_sp = ss->ss_sp;
4010		size_t ss_size = ss->ss_size;
4011		unsigned ss_flags = ss->ss_flags;
4012		int ss_mode;
4013
4014		if (unlikely(on_sig_stack(sp)))
4015			return -EPERM;
4016
4017		ss_mode = ss_flags & ~SS_FLAG_BITS;
4018		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4019				ss_mode != 0))
4020			return -EINVAL;
4021
 
 
 
 
 
 
 
 
 
 
4022		if (ss_mode == SS_DISABLE) {
4023			ss_size = 0;
4024			ss_sp = NULL;
4025		} else {
4026			if (unlikely(ss_size < min_ss_size))
4027				return -ENOMEM;
 
 
4028		}
4029
4030		t->sas_ss_sp = (unsigned long) ss_sp;
4031		t->sas_ss_size = ss_size;
4032		t->sas_ss_flags = ss_flags;
 
 
4033	}
4034	return 0;
4035}
4036
4037SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4038{
4039	stack_t new, old;
4040	int err;
4041	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4042		return -EFAULT;
4043	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4044			      current_user_stack_pointer(),
4045			      MINSIGSTKSZ);
4046	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4047		err = -EFAULT;
4048	return err;
4049}
4050
4051int restore_altstack(const stack_t __user *uss)
4052{
4053	stack_t new;
4054	if (copy_from_user(&new, uss, sizeof(stack_t)))
4055		return -EFAULT;
4056	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4057			     MINSIGSTKSZ);
4058	/* squash all but EFAULT for now */
4059	return 0;
4060}
4061
4062int __save_altstack(stack_t __user *uss, unsigned long sp)
4063{
4064	struct task_struct *t = current;
4065	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4066		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4067		__put_user(t->sas_ss_size, &uss->ss_size);
4068	if (err)
4069		return err;
4070	if (t->sas_ss_flags & SS_AUTODISARM)
4071		sas_ss_reset(t);
4072	return 0;
4073}
4074
4075#ifdef CONFIG_COMPAT
4076static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4077				 compat_stack_t __user *uoss_ptr)
4078{
4079	stack_t uss, uoss;
4080	int ret;
4081
4082	if (uss_ptr) {
4083		compat_stack_t uss32;
4084		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4085			return -EFAULT;
4086		uss.ss_sp = compat_ptr(uss32.ss_sp);
4087		uss.ss_flags = uss32.ss_flags;
4088		uss.ss_size = uss32.ss_size;
4089	}
4090	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4091			     compat_user_stack_pointer(),
4092			     COMPAT_MINSIGSTKSZ);
4093	if (ret >= 0 && uoss_ptr)  {
4094		compat_stack_t old;
4095		memset(&old, 0, sizeof(old));
4096		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4097		old.ss_flags = uoss.ss_flags;
4098		old.ss_size = uoss.ss_size;
4099		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4100			ret = -EFAULT;
4101	}
4102	return ret;
4103}
4104
4105COMPAT_SYSCALL_DEFINE2(sigaltstack,
4106			const compat_stack_t __user *, uss_ptr,
4107			compat_stack_t __user *, uoss_ptr)
4108{
4109	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4110}
4111
4112int compat_restore_altstack(const compat_stack_t __user *uss)
4113{
4114	int err = do_compat_sigaltstack(uss, NULL);
4115	/* squash all but -EFAULT for now */
4116	return err == -EFAULT ? err : 0;
4117}
4118
4119int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4120{
4121	int err;
4122	struct task_struct *t = current;
4123	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4124			 &uss->ss_sp) |
4125		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4126		__put_user(t->sas_ss_size, &uss->ss_size);
4127	if (err)
4128		return err;
4129	if (t->sas_ss_flags & SS_AUTODISARM)
4130		sas_ss_reset(t);
4131	return 0;
4132}
4133#endif
4134
4135#ifdef __ARCH_WANT_SYS_SIGPENDING
4136
4137/**
4138 *  sys_sigpending - examine pending signals
4139 *  @uset: where mask of pending signal is returned
4140 */
4141SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4142{
4143	sigset_t set;
4144
4145	if (sizeof(old_sigset_t) > sizeof(*uset))
4146		return -EINVAL;
4147
4148	do_sigpending(&set);
4149
4150	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4151		return -EFAULT;
4152
4153	return 0;
4154}
4155
4156#ifdef CONFIG_COMPAT
4157COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4158{
4159	sigset_t set;
4160
4161	do_sigpending(&set);
4162
4163	return put_user(set.sig[0], set32);
4164}
4165#endif
4166
4167#endif
4168
4169#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4170/**
4171 *  sys_sigprocmask - examine and change blocked signals
4172 *  @how: whether to add, remove, or set signals
4173 *  @nset: signals to add or remove (if non-null)
4174 *  @oset: previous value of signal mask if non-null
4175 *
4176 * Some platforms have their own version with special arguments;
4177 * others support only sys_rt_sigprocmask.
4178 */
4179
4180SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4181		old_sigset_t __user *, oset)
4182{
4183	old_sigset_t old_set, new_set;
4184	sigset_t new_blocked;
4185
4186	old_set = current->blocked.sig[0];
4187
4188	if (nset) {
4189		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4190			return -EFAULT;
4191
4192		new_blocked = current->blocked;
4193
4194		switch (how) {
4195		case SIG_BLOCK:
4196			sigaddsetmask(&new_blocked, new_set);
4197			break;
4198		case SIG_UNBLOCK:
4199			sigdelsetmask(&new_blocked, new_set);
4200			break;
4201		case SIG_SETMASK:
4202			new_blocked.sig[0] = new_set;
4203			break;
4204		default:
4205			return -EINVAL;
4206		}
4207
4208		set_current_blocked(&new_blocked);
4209	}
4210
4211	if (oset) {
4212		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4213			return -EFAULT;
4214	}
4215
4216	return 0;
4217}
4218#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4219
4220#ifndef CONFIG_ODD_RT_SIGACTION
4221/**
4222 *  sys_rt_sigaction - alter an action taken by a process
4223 *  @sig: signal to be sent
4224 *  @act: new sigaction
4225 *  @oact: used to save the previous sigaction
4226 *  @sigsetsize: size of sigset_t type
4227 */
4228SYSCALL_DEFINE4(rt_sigaction, int, sig,
4229		const struct sigaction __user *, act,
4230		struct sigaction __user *, oact,
4231		size_t, sigsetsize)
4232{
4233	struct k_sigaction new_sa, old_sa;
4234	int ret;
4235
4236	/* XXX: Don't preclude handling different sized sigset_t's.  */
4237	if (sigsetsize != sizeof(sigset_t))
4238		return -EINVAL;
4239
4240	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4241		return -EFAULT;
4242
4243	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4244	if (ret)
4245		return ret;
4246
4247	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4248		return -EFAULT;
4249
4250	return 0;
4251}
4252#ifdef CONFIG_COMPAT
4253COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4254		const struct compat_sigaction __user *, act,
4255		struct compat_sigaction __user *, oact,
4256		compat_size_t, sigsetsize)
4257{
4258	struct k_sigaction new_ka, old_ka;
4259#ifdef __ARCH_HAS_SA_RESTORER
4260	compat_uptr_t restorer;
4261#endif
4262	int ret;
4263
4264	/* XXX: Don't preclude handling different sized sigset_t's.  */
4265	if (sigsetsize != sizeof(compat_sigset_t))
4266		return -EINVAL;
4267
4268	if (act) {
4269		compat_uptr_t handler;
4270		ret = get_user(handler, &act->sa_handler);
4271		new_ka.sa.sa_handler = compat_ptr(handler);
4272#ifdef __ARCH_HAS_SA_RESTORER
4273		ret |= get_user(restorer, &act->sa_restorer);
4274		new_ka.sa.sa_restorer = compat_ptr(restorer);
4275#endif
4276		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4277		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4278		if (ret)
4279			return -EFAULT;
4280	}
4281
4282	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4283	if (!ret && oact) {
4284		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
4285			       &oact->sa_handler);
4286		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4287					 sizeof(oact->sa_mask));
4288		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4289#ifdef __ARCH_HAS_SA_RESTORER
4290		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4291				&oact->sa_restorer);
4292#endif
4293	}
4294	return ret;
4295}
4296#endif
4297#endif /* !CONFIG_ODD_RT_SIGACTION */
4298
4299#ifdef CONFIG_OLD_SIGACTION
4300SYSCALL_DEFINE3(sigaction, int, sig,
4301		const struct old_sigaction __user *, act,
4302	        struct old_sigaction __user *, oact)
4303{
4304	struct k_sigaction new_ka, old_ka;
4305	int ret;
4306
4307	if (act) {
4308		old_sigset_t mask;
4309		if (!access_ok(act, sizeof(*act)) ||
4310		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4311		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4312		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4313		    __get_user(mask, &act->sa_mask))
4314			return -EFAULT;
4315#ifdef __ARCH_HAS_KA_RESTORER
4316		new_ka.ka_restorer = NULL;
4317#endif
4318		siginitset(&new_ka.sa.sa_mask, mask);
4319	}
4320
4321	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4322
4323	if (!ret && oact) {
4324		if (!access_ok(oact, sizeof(*oact)) ||
4325		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4326		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4327		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4328		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4329			return -EFAULT;
4330	}
4331
4332	return ret;
4333}
4334#endif
4335#ifdef CONFIG_COMPAT_OLD_SIGACTION
4336COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4337		const struct compat_old_sigaction __user *, act,
4338	        struct compat_old_sigaction __user *, oact)
4339{
4340	struct k_sigaction new_ka, old_ka;
4341	int ret;
4342	compat_old_sigset_t mask;
4343	compat_uptr_t handler, restorer;
4344
4345	if (act) {
4346		if (!access_ok(act, sizeof(*act)) ||
4347		    __get_user(handler, &act->sa_handler) ||
4348		    __get_user(restorer, &act->sa_restorer) ||
4349		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4350		    __get_user(mask, &act->sa_mask))
4351			return -EFAULT;
4352
4353#ifdef __ARCH_HAS_KA_RESTORER
4354		new_ka.ka_restorer = NULL;
4355#endif
4356		new_ka.sa.sa_handler = compat_ptr(handler);
4357		new_ka.sa.sa_restorer = compat_ptr(restorer);
4358		siginitset(&new_ka.sa.sa_mask, mask);
4359	}
4360
4361	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4362
4363	if (!ret && oact) {
4364		if (!access_ok(oact, sizeof(*oact)) ||
4365		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4366			       &oact->sa_handler) ||
4367		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4368			       &oact->sa_restorer) ||
4369		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4370		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4371			return -EFAULT;
4372	}
4373	return ret;
4374}
4375#endif
4376
4377#ifdef CONFIG_SGETMASK_SYSCALL
4378
4379/*
4380 * For backwards compatibility.  Functionality superseded by sigprocmask.
4381 */
4382SYSCALL_DEFINE0(sgetmask)
4383{
4384	/* SMP safe */
4385	return current->blocked.sig[0];
4386}
4387
4388SYSCALL_DEFINE1(ssetmask, int, newmask)
4389{
4390	int old = current->blocked.sig[0];
4391	sigset_t newset;
4392
4393	siginitset(&newset, newmask);
4394	set_current_blocked(&newset);
4395
4396	return old;
4397}
4398#endif /* CONFIG_SGETMASK_SYSCALL */
4399
4400#ifdef __ARCH_WANT_SYS_SIGNAL
4401/*
4402 * For backwards compatibility.  Functionality superseded by sigaction.
4403 */
4404SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4405{
4406	struct k_sigaction new_sa, old_sa;
4407	int ret;
4408
4409	new_sa.sa.sa_handler = handler;
4410	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4411	sigemptyset(&new_sa.sa.sa_mask);
4412
4413	ret = do_sigaction(sig, &new_sa, &old_sa);
4414
4415	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4416}
4417#endif /* __ARCH_WANT_SYS_SIGNAL */
4418
4419#ifdef __ARCH_WANT_SYS_PAUSE
4420
4421SYSCALL_DEFINE0(pause)
4422{
4423	while (!signal_pending(current)) {
4424		__set_current_state(TASK_INTERRUPTIBLE);
4425		schedule();
4426	}
4427	return -ERESTARTNOHAND;
4428}
4429
4430#endif
4431
4432static int sigsuspend(sigset_t *set)
4433{
4434	current->saved_sigmask = current->blocked;
4435	set_current_blocked(set);
4436
4437	while (!signal_pending(current)) {
4438		__set_current_state(TASK_INTERRUPTIBLE);
4439		schedule();
4440	}
4441	set_restore_sigmask();
4442	return -ERESTARTNOHAND;
4443}
4444
4445/**
4446 *  sys_rt_sigsuspend - replace the signal mask for a value with the
4447 *	@unewset value until a signal is received
4448 *  @unewset: new signal mask value
4449 *  @sigsetsize: size of sigset_t type
4450 */
4451SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4452{
4453	sigset_t newset;
4454
4455	/* XXX: Don't preclude handling different sized sigset_t's.  */
4456	if (sigsetsize != sizeof(sigset_t))
4457		return -EINVAL;
4458
4459	if (copy_from_user(&newset, unewset, sizeof(newset)))
4460		return -EFAULT;
4461	return sigsuspend(&newset);
4462}
4463 
4464#ifdef CONFIG_COMPAT
4465COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4466{
4467	sigset_t newset;
4468
4469	/* XXX: Don't preclude handling different sized sigset_t's.  */
4470	if (sigsetsize != sizeof(sigset_t))
4471		return -EINVAL;
4472
4473	if (get_compat_sigset(&newset, unewset))
4474		return -EFAULT;
4475	return sigsuspend(&newset);
4476}
4477#endif
4478
4479#ifdef CONFIG_OLD_SIGSUSPEND
4480SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4481{
4482	sigset_t blocked;
4483	siginitset(&blocked, mask);
4484	return sigsuspend(&blocked);
4485}
4486#endif
4487#ifdef CONFIG_OLD_SIGSUSPEND3
4488SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4489{
4490	sigset_t blocked;
4491	siginitset(&blocked, mask);
4492	return sigsuspend(&blocked);
4493}
4494#endif
4495
4496__weak const char *arch_vma_name(struct vm_area_struct *vma)
4497{
4498	return NULL;
4499}
4500
4501static inline void siginfo_buildtime_checks(void)
4502{
4503	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4504
4505	/* Verify the offsets in the two siginfos match */
4506#define CHECK_OFFSET(field) \
4507	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4508
4509	/* kill */
4510	CHECK_OFFSET(si_pid);
4511	CHECK_OFFSET(si_uid);
4512
4513	/* timer */
4514	CHECK_OFFSET(si_tid);
4515	CHECK_OFFSET(si_overrun);
4516	CHECK_OFFSET(si_value);
4517
4518	/* rt */
4519	CHECK_OFFSET(si_pid);
4520	CHECK_OFFSET(si_uid);
4521	CHECK_OFFSET(si_value);
4522
4523	/* sigchld */
4524	CHECK_OFFSET(si_pid);
4525	CHECK_OFFSET(si_uid);
4526	CHECK_OFFSET(si_status);
4527	CHECK_OFFSET(si_utime);
4528	CHECK_OFFSET(si_stime);
4529
4530	/* sigfault */
4531	CHECK_OFFSET(si_addr);
 
4532	CHECK_OFFSET(si_addr_lsb);
4533	CHECK_OFFSET(si_lower);
4534	CHECK_OFFSET(si_upper);
4535	CHECK_OFFSET(si_pkey);
 
 
 
4536
4537	/* sigpoll */
4538	CHECK_OFFSET(si_band);
4539	CHECK_OFFSET(si_fd);
4540
4541	/* sigsys */
4542	CHECK_OFFSET(si_call_addr);
4543	CHECK_OFFSET(si_syscall);
4544	CHECK_OFFSET(si_arch);
4545#undef CHECK_OFFSET
4546
4547	/* usb asyncio */
4548	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4549		     offsetof(struct siginfo, si_addr));
4550	if (sizeof(int) == sizeof(void __user *)) {
4551		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4552			     sizeof(void __user *));
4553	} else {
4554		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4555			      sizeof_field(struct siginfo, si_uid)) !=
4556			     sizeof(void __user *));
4557		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4558			     offsetof(struct siginfo, si_uid));
4559	}
4560#ifdef CONFIG_COMPAT
4561	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4562		     offsetof(struct compat_siginfo, si_addr));
4563	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4564		     sizeof(compat_uptr_t));
4565	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4566		     sizeof_field(struct siginfo, si_pid));
4567#endif
4568}
4569
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4570void __init signals_init(void)
4571{
4572	siginfo_buildtime_checks();
4573
4574	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4575}
4576
4577#ifdef CONFIG_KGDB_KDB
4578#include <linux/kdb.h>
4579/*
4580 * kdb_send_sig - Allows kdb to send signals without exposing
4581 * signal internals.  This function checks if the required locks are
4582 * available before calling the main signal code, to avoid kdb
4583 * deadlocks.
4584 */
4585void kdb_send_sig(struct task_struct *t, int sig)
4586{
4587	static struct task_struct *kdb_prev_t;
4588	int new_t, ret;
4589	if (!spin_trylock(&t->sighand->siglock)) {
4590		kdb_printf("Can't do kill command now.\n"
4591			   "The sigmask lock is held somewhere else in "
4592			   "kernel, try again later\n");
4593		return;
4594	}
4595	new_t = kdb_prev_t != t;
4596	kdb_prev_t = t;
4597	if (t->state != TASK_RUNNING && new_t) {
4598		spin_unlock(&t->sighand->siglock);
4599		kdb_printf("Process is not RUNNING, sending a signal from "
4600			   "kdb risks deadlock\n"
4601			   "on the run queue locks. "
4602			   "The signal has _not_ been sent.\n"
4603			   "Reissue the kill command if you want to risk "
4604			   "the deadlock.\n");
4605		return;
4606	}
4607	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4608	spin_unlock(&t->sighand->siglock);
4609	if (ret)
4610		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4611			   sig, t->pid);
4612	else
4613		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4614}
4615#endif	/* CONFIG_KGDB_KDB */
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/signal.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   8 *
   9 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
  10 *		Changes to use preallocated sigqueue structures
  11 *		to allow signals to be sent reliably.
  12 */
  13
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/init.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/user.h>
  19#include <linux/sched/debug.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/sched/cputime.h>
  23#include <linux/file.h>
  24#include <linux/fs.h>
  25#include <linux/mm.h>
  26#include <linux/proc_fs.h>
  27#include <linux/tty.h>
  28#include <linux/binfmts.h>
  29#include <linux/coredump.h>
  30#include <linux/security.h>
  31#include <linux/syscalls.h>
  32#include <linux/ptrace.h>
  33#include <linux/signal.h>
  34#include <linux/signalfd.h>
  35#include <linux/ratelimit.h>
  36#include <linux/task_work.h>
  37#include <linux/capability.h>
  38#include <linux/freezer.h>
  39#include <linux/pid_namespace.h>
  40#include <linux/nsproxy.h>
  41#include <linux/user_namespace.h>
  42#include <linux/uprobes.h>
  43#include <linux/compat.h>
  44#include <linux/cn_proc.h>
  45#include <linux/compiler.h>
  46#include <linux/posix-timers.h>
 
  47#include <linux/cgroup.h>
  48#include <linux/audit.h>
  49#include <linux/sysctl.h>
  50
  51#define CREATE_TRACE_POINTS
  52#include <trace/events/signal.h>
  53
  54#include <asm/param.h>
  55#include <linux/uaccess.h>
  56#include <asm/unistd.h>
  57#include <asm/siginfo.h>
  58#include <asm/cacheflush.h>
  59#include <asm/syscall.h>	/* for syscall_get_* */
  60
  61/*
  62 * SLAB caches for signal bits.
  63 */
  64
  65static struct kmem_cache *sigqueue_cachep;
  66
  67int print_fatal_signals __read_mostly;
  68
  69static void __user *sig_handler(struct task_struct *t, int sig)
  70{
  71	return t->sighand->action[sig - 1].sa.sa_handler;
  72}
  73
  74static inline bool sig_handler_ignored(void __user *handler, int sig)
  75{
  76	/* Is it explicitly or implicitly ignored? */
  77	return handler == SIG_IGN ||
  78	       (handler == SIG_DFL && sig_kernel_ignore(sig));
  79}
  80
  81static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
  82{
  83	void __user *handler;
  84
  85	handler = sig_handler(t, sig);
  86
  87	/* SIGKILL and SIGSTOP may not be sent to the global init */
  88	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
  89		return true;
  90
  91	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  92	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  93		return true;
  94
  95	/* Only allow kernel generated signals to this kthread */
  96	if (unlikely((t->flags & PF_KTHREAD) &&
  97		     (handler == SIG_KTHREAD_KERNEL) && !force))
  98		return true;
  99
 100	return sig_handler_ignored(handler, sig);
 101}
 102
 103static bool sig_ignored(struct task_struct *t, int sig, bool force)
 104{
 105	/*
 106	 * Blocked signals are never ignored, since the
 107	 * signal handler may change by the time it is
 108	 * unblocked.
 109	 */
 110	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 111		return false;
 112
 113	/*
 114	 * Tracers may want to know about even ignored signal unless it
 115	 * is SIGKILL which can't be reported anyway but can be ignored
 116	 * by SIGNAL_UNKILLABLE task.
 117	 */
 118	if (t->ptrace && sig != SIGKILL)
 119		return false;
 120
 121	return sig_task_ignored(t, sig, force);
 122}
 123
 124/*
 125 * Re-calculate pending state from the set of locally pending
 126 * signals, globally pending signals, and blocked signals.
 127 */
 128static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
 129{
 130	unsigned long ready;
 131	long i;
 132
 133	switch (_NSIG_WORDS) {
 134	default:
 135		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 136			ready |= signal->sig[i] &~ blocked->sig[i];
 137		break;
 138
 139	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 140		ready |= signal->sig[2] &~ blocked->sig[2];
 141		ready |= signal->sig[1] &~ blocked->sig[1];
 142		ready |= signal->sig[0] &~ blocked->sig[0];
 143		break;
 144
 145	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 146		ready |= signal->sig[0] &~ blocked->sig[0];
 147		break;
 148
 149	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 150	}
 151	return ready !=	0;
 152}
 153
 154#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 155
 156static bool recalc_sigpending_tsk(struct task_struct *t)
 157{
 158	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
 159	    PENDING(&t->pending, &t->blocked) ||
 160	    PENDING(&t->signal->shared_pending, &t->blocked) ||
 161	    cgroup_task_frozen(t)) {
 162		set_tsk_thread_flag(t, TIF_SIGPENDING);
 163		return true;
 164	}
 165
 166	/*
 167	 * We must never clear the flag in another thread, or in current
 168	 * when it's possible the current syscall is returning -ERESTART*.
 169	 * So we don't clear it here, and only callers who know they should do.
 170	 */
 171	return false;
 172}
 173
 
 
 
 
 
 
 
 
 
 
 174void recalc_sigpending(void)
 175{
 176	if (!recalc_sigpending_tsk(current) && !freezing(current))
 
 177		clear_thread_flag(TIF_SIGPENDING);
 178
 179}
 180EXPORT_SYMBOL(recalc_sigpending);
 181
 182void calculate_sigpending(void)
 183{
 184	/* Have any signals or users of TIF_SIGPENDING been delayed
 185	 * until after fork?
 186	 */
 187	spin_lock_irq(&current->sighand->siglock);
 188	set_tsk_thread_flag(current, TIF_SIGPENDING);
 189	recalc_sigpending();
 190	spin_unlock_irq(&current->sighand->siglock);
 191}
 192
 193/* Given the mask, find the first available signal that should be serviced. */
 194
 195#define SYNCHRONOUS_MASK \
 196	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 197	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 198
 199int next_signal(struct sigpending *pending, sigset_t *mask)
 200{
 201	unsigned long i, *s, *m, x;
 202	int sig = 0;
 203
 204	s = pending->signal.sig;
 205	m = mask->sig;
 206
 207	/*
 208	 * Handle the first word specially: it contains the
 209	 * synchronous signals that need to be dequeued first.
 210	 */
 211	x = *s &~ *m;
 212	if (x) {
 213		if (x & SYNCHRONOUS_MASK)
 214			x &= SYNCHRONOUS_MASK;
 215		sig = ffz(~x) + 1;
 216		return sig;
 217	}
 218
 219	switch (_NSIG_WORDS) {
 220	default:
 221		for (i = 1; i < _NSIG_WORDS; ++i) {
 222			x = *++s &~ *++m;
 223			if (!x)
 224				continue;
 225			sig = ffz(~x) + i*_NSIG_BPW + 1;
 226			break;
 227		}
 228		break;
 229
 230	case 2:
 231		x = s[1] &~ m[1];
 232		if (!x)
 233			break;
 234		sig = ffz(~x) + _NSIG_BPW + 1;
 235		break;
 236
 237	case 1:
 238		/* Nothing to do */
 239		break;
 240	}
 241
 242	return sig;
 243}
 244
 245static inline void print_dropped_signal(int sig)
 246{
 247	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 248
 249	if (!print_fatal_signals)
 250		return;
 251
 252	if (!__ratelimit(&ratelimit_state))
 253		return;
 254
 255	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 256				current->comm, current->pid, sig);
 257}
 258
 259/**
 260 * task_set_jobctl_pending - set jobctl pending bits
 261 * @task: target task
 262 * @mask: pending bits to set
 263 *
 264 * Clear @mask from @task->jobctl.  @mask must be subset of
 265 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 266 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 267 * cleared.  If @task is already being killed or exiting, this function
 268 * becomes noop.
 269 *
 270 * CONTEXT:
 271 * Must be called with @task->sighand->siglock held.
 272 *
 273 * RETURNS:
 274 * %true if @mask is set, %false if made noop because @task was dying.
 275 */
 276bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 277{
 278	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 279			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 280	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 281
 282	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 283		return false;
 284
 285	if (mask & JOBCTL_STOP_SIGMASK)
 286		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 287
 288	task->jobctl |= mask;
 289	return true;
 290}
 291
 292/**
 293 * task_clear_jobctl_trapping - clear jobctl trapping bit
 294 * @task: target task
 295 *
 296 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 297 * Clear it and wake up the ptracer.  Note that we don't need any further
 298 * locking.  @task->siglock guarantees that @task->parent points to the
 299 * ptracer.
 300 *
 301 * CONTEXT:
 302 * Must be called with @task->sighand->siglock held.
 303 */
 304void task_clear_jobctl_trapping(struct task_struct *task)
 305{
 306	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 307		task->jobctl &= ~JOBCTL_TRAPPING;
 308		smp_mb();	/* advised by wake_up_bit() */
 309		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 310	}
 311}
 312
 313/**
 314 * task_clear_jobctl_pending - clear jobctl pending bits
 315 * @task: target task
 316 * @mask: pending bits to clear
 317 *
 318 * Clear @mask from @task->jobctl.  @mask must be subset of
 319 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 320 * STOP bits are cleared together.
 321 *
 322 * If clearing of @mask leaves no stop or trap pending, this function calls
 323 * task_clear_jobctl_trapping().
 324 *
 325 * CONTEXT:
 326 * Must be called with @task->sighand->siglock held.
 327 */
 328void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 329{
 330	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 331
 332	if (mask & JOBCTL_STOP_PENDING)
 333		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 334
 335	task->jobctl &= ~mask;
 336
 337	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 338		task_clear_jobctl_trapping(task);
 339}
 340
 341/**
 342 * task_participate_group_stop - participate in a group stop
 343 * @task: task participating in a group stop
 344 *
 345 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 346 * Group stop states are cleared and the group stop count is consumed if
 347 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 348 * stop, the appropriate `SIGNAL_*` flags are set.
 349 *
 350 * CONTEXT:
 351 * Must be called with @task->sighand->siglock held.
 352 *
 353 * RETURNS:
 354 * %true if group stop completion should be notified to the parent, %false
 355 * otherwise.
 356 */
 357static bool task_participate_group_stop(struct task_struct *task)
 358{
 359	struct signal_struct *sig = task->signal;
 360	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 361
 362	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 363
 364	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 365
 366	if (!consume)
 367		return false;
 368
 369	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 370		sig->group_stop_count--;
 371
 372	/*
 373	 * Tell the caller to notify completion iff we are entering into a
 374	 * fresh group stop.  Read comment in do_signal_stop() for details.
 375	 */
 376	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 377		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 378		return true;
 379	}
 380	return false;
 381}
 382
 383void task_join_group_stop(struct task_struct *task)
 384{
 385	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
 386	struct signal_struct *sig = current->signal;
 387
 388	if (sig->group_stop_count) {
 389		sig->group_stop_count++;
 390		mask |= JOBCTL_STOP_CONSUME;
 391	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
 392		return;
 393
 394	/* Have the new thread join an on-going signal group stop */
 395	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
 
 
 
 
 
 
 
 
 396}
 397
 398/*
 399 * allocate a new signal queue record
 400 * - this may be called without locks if and only if t == current, otherwise an
 401 *   appropriate lock must be held to stop the target task from exiting
 402 */
 403static struct sigqueue *
 404__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
 405		 int override_rlimit, const unsigned int sigqueue_flags)
 406{
 407	struct sigqueue *q = NULL;
 408	struct ucounts *ucounts;
 409	long sigpending;
 410
 411	/*
 412	 * Protect access to @t credentials. This can go away when all
 413	 * callers hold rcu read lock.
 414	 *
 415	 * NOTE! A pending signal will hold on to the user refcount,
 416	 * and we get/put the refcount only when the sigpending count
 417	 * changes from/to zero.
 418	 */
 419	rcu_read_lock();
 420	ucounts = task_ucounts(t);
 421	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 422	rcu_read_unlock();
 423	if (!sigpending)
 424		return NULL;
 425
 426	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
 427		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
 
 
 428	} else {
 429		print_dropped_signal(sig);
 430	}
 431
 432	if (unlikely(q == NULL)) {
 433		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 
 434	} else {
 435		INIT_LIST_HEAD(&q->list);
 436		q->flags = sigqueue_flags;
 437		q->ucounts = ucounts;
 438	}
 
 439	return q;
 440}
 441
 442static void __sigqueue_free(struct sigqueue *q)
 443{
 444	if (q->flags & SIGQUEUE_PREALLOC)
 445		return;
 446	if (q->ucounts) {
 447		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
 448		q->ucounts = NULL;
 449	}
 450	kmem_cache_free(sigqueue_cachep, q);
 451}
 452
 453void flush_sigqueue(struct sigpending *queue)
 454{
 455	struct sigqueue *q;
 456
 457	sigemptyset(&queue->signal);
 458	while (!list_empty(&queue->list)) {
 459		q = list_entry(queue->list.next, struct sigqueue , list);
 460		list_del_init(&q->list);
 461		__sigqueue_free(q);
 462	}
 463}
 464
 465/*
 466 * Flush all pending signals for this kthread.
 467 */
 468void flush_signals(struct task_struct *t)
 469{
 470	unsigned long flags;
 471
 472	spin_lock_irqsave(&t->sighand->siglock, flags);
 473	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 474	flush_sigqueue(&t->pending);
 475	flush_sigqueue(&t->signal->shared_pending);
 476	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 477}
 478EXPORT_SYMBOL(flush_signals);
 479
 480#ifdef CONFIG_POSIX_TIMERS
 481static void __flush_itimer_signals(struct sigpending *pending)
 482{
 483	sigset_t signal, retain;
 484	struct sigqueue *q, *n;
 485
 486	signal = pending->signal;
 487	sigemptyset(&retain);
 488
 489	list_for_each_entry_safe(q, n, &pending->list, list) {
 490		int sig = q->info.si_signo;
 491
 492		if (likely(q->info.si_code != SI_TIMER)) {
 493			sigaddset(&retain, sig);
 494		} else {
 495			sigdelset(&signal, sig);
 496			list_del_init(&q->list);
 497			__sigqueue_free(q);
 498		}
 499	}
 500
 501	sigorsets(&pending->signal, &signal, &retain);
 502}
 503
 504void flush_itimer_signals(void)
 505{
 506	struct task_struct *tsk = current;
 507	unsigned long flags;
 508
 509	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 510	__flush_itimer_signals(&tsk->pending);
 511	__flush_itimer_signals(&tsk->signal->shared_pending);
 512	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 513}
 514#endif
 515
 516void ignore_signals(struct task_struct *t)
 517{
 518	int i;
 519
 520	for (i = 0; i < _NSIG; ++i)
 521		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 522
 523	flush_signals(t);
 524}
 525
 526/*
 527 * Flush all handlers for a task.
 528 */
 529
 530void
 531flush_signal_handlers(struct task_struct *t, int force_default)
 532{
 533	int i;
 534	struct k_sigaction *ka = &t->sighand->action[0];
 535	for (i = _NSIG ; i != 0 ; i--) {
 536		if (force_default || ka->sa.sa_handler != SIG_IGN)
 537			ka->sa.sa_handler = SIG_DFL;
 538		ka->sa.sa_flags = 0;
 539#ifdef __ARCH_HAS_SA_RESTORER
 540		ka->sa.sa_restorer = NULL;
 541#endif
 542		sigemptyset(&ka->sa.sa_mask);
 543		ka++;
 544	}
 545}
 546
 547bool unhandled_signal(struct task_struct *tsk, int sig)
 548{
 549	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 550	if (is_global_init(tsk))
 551		return true;
 552
 553	if (handler != SIG_IGN && handler != SIG_DFL)
 554		return false;
 555
 556	/* If dying, we handle all new signals by ignoring them */
 557	if (fatal_signal_pending(tsk))
 558		return false;
 559
 560	/* if ptraced, let the tracer determine */
 561	return !tsk->ptrace;
 562}
 563
 564static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
 565			   bool *resched_timer)
 566{
 567	struct sigqueue *q, *first = NULL;
 568
 569	/*
 570	 * Collect the siginfo appropriate to this signal.  Check if
 571	 * there is another siginfo for the same signal.
 572	*/
 573	list_for_each_entry(q, &list->list, list) {
 574		if (q->info.si_signo == sig) {
 575			if (first)
 576				goto still_pending;
 577			first = q;
 578		}
 579	}
 580
 581	sigdelset(&list->signal, sig);
 582
 583	if (first) {
 584still_pending:
 585		list_del_init(&first->list);
 586		copy_siginfo(info, &first->info);
 587
 588		*resched_timer =
 589			(first->flags & SIGQUEUE_PREALLOC) &&
 590			(info->si_code == SI_TIMER) &&
 591			(info->si_sys_private);
 592
 593		__sigqueue_free(first);
 594	} else {
 595		/*
 596		 * Ok, it wasn't in the queue.  This must be
 597		 * a fast-pathed signal or we must have been
 598		 * out of queue space.  So zero out the info.
 599		 */
 600		clear_siginfo(info);
 601		info->si_signo = sig;
 602		info->si_errno = 0;
 603		info->si_code = SI_USER;
 604		info->si_pid = 0;
 605		info->si_uid = 0;
 606	}
 607}
 608
 609static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 610			kernel_siginfo_t *info, bool *resched_timer)
 611{
 612	int sig = next_signal(pending, mask);
 613
 614	if (sig)
 615		collect_signal(sig, pending, info, resched_timer);
 616	return sig;
 617}
 618
 619/*
 620 * Dequeue a signal and return the element to the caller, which is
 621 * expected to free it.
 622 *
 623 * All callers have to hold the siglock.
 624 */
 625int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
 626		   kernel_siginfo_t *info, enum pid_type *type)
 627{
 628	bool resched_timer = false;
 629	int signr;
 630
 631	/* We only dequeue private signals from ourselves, we don't let
 632	 * signalfd steal them
 633	 */
 634	*type = PIDTYPE_PID;
 635	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 636	if (!signr) {
 637		*type = PIDTYPE_TGID;
 638		signr = __dequeue_signal(&tsk->signal->shared_pending,
 639					 mask, info, &resched_timer);
 640#ifdef CONFIG_POSIX_TIMERS
 641		/*
 642		 * itimer signal ?
 643		 *
 644		 * itimers are process shared and we restart periodic
 645		 * itimers in the signal delivery path to prevent DoS
 646		 * attacks in the high resolution timer case. This is
 647		 * compliant with the old way of self-restarting
 648		 * itimers, as the SIGALRM is a legacy signal and only
 649		 * queued once. Changing the restart behaviour to
 650		 * restart the timer in the signal dequeue path is
 651		 * reducing the timer noise on heavy loaded !highres
 652		 * systems too.
 653		 */
 654		if (unlikely(signr == SIGALRM)) {
 655			struct hrtimer *tmr = &tsk->signal->real_timer;
 656
 657			if (!hrtimer_is_queued(tmr) &&
 658			    tsk->signal->it_real_incr != 0) {
 659				hrtimer_forward(tmr, tmr->base->get_time(),
 660						tsk->signal->it_real_incr);
 661				hrtimer_restart(tmr);
 662			}
 663		}
 664#endif
 665	}
 666
 667	recalc_sigpending();
 668	if (!signr)
 669		return 0;
 670
 671	if (unlikely(sig_kernel_stop(signr))) {
 672		/*
 673		 * Set a marker that we have dequeued a stop signal.  Our
 674		 * caller might release the siglock and then the pending
 675		 * stop signal it is about to process is no longer in the
 676		 * pending bitmasks, but must still be cleared by a SIGCONT
 677		 * (and overruled by a SIGKILL).  So those cases clear this
 678		 * shared flag after we've set it.  Note that this flag may
 679		 * remain set after the signal we return is ignored or
 680		 * handled.  That doesn't matter because its only purpose
 681		 * is to alert stop-signal processing code when another
 682		 * processor has come along and cleared the flag.
 683		 */
 684		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 685	}
 686#ifdef CONFIG_POSIX_TIMERS
 687	if (resched_timer) {
 688		/*
 689		 * Release the siglock to ensure proper locking order
 690		 * of timer locks outside of siglocks.  Note, we leave
 691		 * irqs disabled here, since the posix-timers code is
 692		 * about to disable them again anyway.
 693		 */
 694		spin_unlock(&tsk->sighand->siglock);
 695		posixtimer_rearm(info);
 696		spin_lock(&tsk->sighand->siglock);
 697
 698		/* Don't expose the si_sys_private value to userspace */
 699		info->si_sys_private = 0;
 700	}
 701#endif
 702	return signr;
 703}
 704EXPORT_SYMBOL_GPL(dequeue_signal);
 705
 706static int dequeue_synchronous_signal(kernel_siginfo_t *info)
 707{
 708	struct task_struct *tsk = current;
 709	struct sigpending *pending = &tsk->pending;
 710	struct sigqueue *q, *sync = NULL;
 711
 712	/*
 713	 * Might a synchronous signal be in the queue?
 714	 */
 715	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
 716		return 0;
 717
 718	/*
 719	 * Return the first synchronous signal in the queue.
 720	 */
 721	list_for_each_entry(q, &pending->list, list) {
 722		/* Synchronous signals have a positive si_code */
 723		if ((q->info.si_code > SI_USER) &&
 724		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
 725			sync = q;
 726			goto next;
 727		}
 728	}
 729	return 0;
 730next:
 731	/*
 732	 * Check if there is another siginfo for the same signal.
 733	 */
 734	list_for_each_entry_continue(q, &pending->list, list) {
 735		if (q->info.si_signo == sync->info.si_signo)
 736			goto still_pending;
 737	}
 738
 739	sigdelset(&pending->signal, sync->info.si_signo);
 740	recalc_sigpending();
 741still_pending:
 742	list_del_init(&sync->list);
 743	copy_siginfo(info, &sync->info);
 744	__sigqueue_free(sync);
 745	return info->si_signo;
 746}
 747
 748/*
 749 * Tell a process that it has a new active signal..
 750 *
 751 * NOTE! we rely on the previous spin_lock to
 752 * lock interrupts for us! We can only be called with
 753 * "siglock" held, and the local interrupt must
 754 * have been disabled when that got acquired!
 755 *
 756 * No need to set need_resched since signal event passing
 757 * goes through ->blocked
 758 */
 759void signal_wake_up_state(struct task_struct *t, unsigned int state)
 760{
 761	lockdep_assert_held(&t->sighand->siglock);
 762
 763	set_tsk_thread_flag(t, TIF_SIGPENDING);
 764
 765	/*
 766	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 767	 * case. We don't check t->state here because there is a race with it
 768	 * executing another processor and just now entering stopped state.
 769	 * By using wake_up_state, we ensure the process will wake up and
 770	 * handle its death signal.
 771	 */
 772	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 773		kick_process(t);
 774}
 775
 776/*
 777 * Remove signals in mask from the pending set and queue.
 778 * Returns 1 if any signals were found.
 779 *
 780 * All callers must be holding the siglock.
 781 */
 782static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 783{
 784	struct sigqueue *q, *n;
 785	sigset_t m;
 786
 787	sigandsets(&m, mask, &s->signal);
 788	if (sigisemptyset(&m))
 789		return;
 790
 791	sigandnsets(&s->signal, &s->signal, mask);
 792	list_for_each_entry_safe(q, n, &s->list, list) {
 793		if (sigismember(mask, q->info.si_signo)) {
 794			list_del_init(&q->list);
 795			__sigqueue_free(q);
 796		}
 797	}
 798}
 799
 800static inline int is_si_special(const struct kernel_siginfo *info)
 801{
 802	return info <= SEND_SIG_PRIV;
 803}
 804
 805static inline bool si_fromuser(const struct kernel_siginfo *info)
 806{
 807	return info == SEND_SIG_NOINFO ||
 808		(!is_si_special(info) && SI_FROMUSER(info));
 809}
 810
 811/*
 812 * called with RCU read lock from check_kill_permission()
 813 */
 814static bool kill_ok_by_cred(struct task_struct *t)
 815{
 816	const struct cred *cred = current_cred();
 817	const struct cred *tcred = __task_cred(t);
 818
 819	return uid_eq(cred->euid, tcred->suid) ||
 820	       uid_eq(cred->euid, tcred->uid) ||
 821	       uid_eq(cred->uid, tcred->suid) ||
 822	       uid_eq(cred->uid, tcred->uid) ||
 823	       ns_capable(tcred->user_ns, CAP_KILL);
 824}
 825
 826/*
 827 * Bad permissions for sending the signal
 828 * - the caller must hold the RCU read lock
 829 */
 830static int check_kill_permission(int sig, struct kernel_siginfo *info,
 831				 struct task_struct *t)
 832{
 833	struct pid *sid;
 834	int error;
 835
 836	if (!valid_signal(sig))
 837		return -EINVAL;
 838
 839	if (!si_fromuser(info))
 840		return 0;
 841
 842	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 843	if (error)
 844		return error;
 845
 846	if (!same_thread_group(current, t) &&
 847	    !kill_ok_by_cred(t)) {
 848		switch (sig) {
 849		case SIGCONT:
 850			sid = task_session(t);
 851			/*
 852			 * We don't return the error if sid == NULL. The
 853			 * task was unhashed, the caller must notice this.
 854			 */
 855			if (!sid || sid == task_session(current))
 856				break;
 857			fallthrough;
 858		default:
 859			return -EPERM;
 860		}
 861	}
 862
 863	return security_task_kill(t, info, sig, NULL);
 864}
 865
 866/**
 867 * ptrace_trap_notify - schedule trap to notify ptracer
 868 * @t: tracee wanting to notify tracer
 869 *
 870 * This function schedules sticky ptrace trap which is cleared on the next
 871 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 872 * ptracer.
 873 *
 874 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 875 * ptracer is listening for events, tracee is woken up so that it can
 876 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 877 * eventually taken without returning to userland after the existing traps
 878 * are finished by PTRACE_CONT.
 879 *
 880 * CONTEXT:
 881 * Must be called with @task->sighand->siglock held.
 882 */
 883static void ptrace_trap_notify(struct task_struct *t)
 884{
 885	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 886	lockdep_assert_held(&t->sighand->siglock);
 887
 888	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 889	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 890}
 891
 892/*
 893 * Handle magic process-wide effects of stop/continue signals. Unlike
 894 * the signal actions, these happen immediately at signal-generation
 895 * time regardless of blocking, ignoring, or handling.  This does the
 896 * actual continuing for SIGCONT, but not the actual stopping for stop
 897 * signals. The process stop is done as a signal action for SIG_DFL.
 898 *
 899 * Returns true if the signal should be actually delivered, otherwise
 900 * it should be dropped.
 901 */
 902static bool prepare_signal(int sig, struct task_struct *p, bool force)
 903{
 904	struct signal_struct *signal = p->signal;
 905	struct task_struct *t;
 906	sigset_t flush;
 907
 908	if (signal->flags & SIGNAL_GROUP_EXIT) {
 909		if (signal->core_state)
 910			return sig == SIGKILL;
 911		/*
 912		 * The process is in the middle of dying, drop the signal.
 913		 */
 914		return false;
 915	} else if (sig_kernel_stop(sig)) {
 916		/*
 917		 * This is a stop signal.  Remove SIGCONT from all queues.
 918		 */
 919		siginitset(&flush, sigmask(SIGCONT));
 920		flush_sigqueue_mask(&flush, &signal->shared_pending);
 921		for_each_thread(p, t)
 922			flush_sigqueue_mask(&flush, &t->pending);
 923	} else if (sig == SIGCONT) {
 924		unsigned int why;
 925		/*
 926		 * Remove all stop signals from all queues, wake all threads.
 927		 */
 928		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 929		flush_sigqueue_mask(&flush, &signal->shared_pending);
 930		for_each_thread(p, t) {
 931			flush_sigqueue_mask(&flush, &t->pending);
 932			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 933			if (likely(!(t->ptrace & PT_SEIZED))) {
 934				t->jobctl &= ~JOBCTL_STOPPED;
 935				wake_up_state(t, __TASK_STOPPED);
 936			} else
 937				ptrace_trap_notify(t);
 938		}
 939
 940		/*
 941		 * Notify the parent with CLD_CONTINUED if we were stopped.
 942		 *
 943		 * If we were in the middle of a group stop, we pretend it
 944		 * was already finished, and then continued. Since SIGCHLD
 945		 * doesn't queue we report only CLD_STOPPED, as if the next
 946		 * CLD_CONTINUED was dropped.
 947		 */
 948		why = 0;
 949		if (signal->flags & SIGNAL_STOP_STOPPED)
 950			why |= SIGNAL_CLD_CONTINUED;
 951		else if (signal->group_stop_count)
 952			why |= SIGNAL_CLD_STOPPED;
 953
 954		if (why) {
 955			/*
 956			 * The first thread which returns from do_signal_stop()
 957			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 958			 * notify its parent. See get_signal().
 959			 */
 960			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 961			signal->group_stop_count = 0;
 962			signal->group_exit_code = 0;
 963		}
 964	}
 965
 966	return !sig_ignored(p, sig, force);
 967}
 968
 969/*
 970 * Test if P wants to take SIG.  After we've checked all threads with this,
 971 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 972 * blocking SIG were ruled out because they are not running and already
 973 * have pending signals.  Such threads will dequeue from the shared queue
 974 * as soon as they're available, so putting the signal on the shared queue
 975 * will be equivalent to sending it to one such thread.
 976 */
 977static inline bool wants_signal(int sig, struct task_struct *p)
 978{
 979	if (sigismember(&p->blocked, sig))
 980		return false;
 981
 982	if (p->flags & PF_EXITING)
 983		return false;
 984
 985	if (sig == SIGKILL)
 986		return true;
 987
 988	if (task_is_stopped_or_traced(p))
 989		return false;
 990
 991	return task_curr(p) || !task_sigpending(p);
 992}
 993
 994static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 995{
 996	struct signal_struct *signal = p->signal;
 997	struct task_struct *t;
 998
 999	/*
1000	 * Now find a thread we can wake up to take the signal off the queue.
1001	 *
1002	 * Try the suggested task first (may or may not be the main thread).
 
1003	 */
1004	if (wants_signal(sig, p))
1005		t = p;
1006	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1007		/*
1008		 * There is just one thread and it does not need to be woken.
1009		 * It will dequeue unblocked signals before it runs again.
1010		 */
1011		return;
1012	else {
1013		/*
1014		 * Otherwise try to find a suitable thread.
1015		 */
1016		t = signal->curr_target;
1017		while (!wants_signal(sig, t)) {
1018			t = next_thread(t);
1019			if (t == signal->curr_target)
1020				/*
1021				 * No thread needs to be woken.
1022				 * Any eligible threads will see
1023				 * the signal in the queue soon.
1024				 */
1025				return;
1026		}
1027		signal->curr_target = t;
1028	}
1029
1030	/*
1031	 * Found a killable thread.  If the signal will be fatal,
1032	 * then start taking the whole group down immediately.
1033	 */
1034	if (sig_fatal(p, sig) &&
1035	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1036	    !sigismember(&t->real_blocked, sig) &&
1037	    (sig == SIGKILL || !p->ptrace)) {
1038		/*
1039		 * This signal will be fatal to the whole group.
1040		 */
1041		if (!sig_kernel_coredump(sig)) {
1042			/*
1043			 * Start a group exit and wake everybody up.
1044			 * This way we don't have other threads
1045			 * running and doing things after a slower
1046			 * thread has the fatal signal pending.
1047			 */
1048			signal->flags = SIGNAL_GROUP_EXIT;
1049			signal->group_exit_code = sig;
1050			signal->group_stop_count = 0;
1051			__for_each_thread(signal, t) {
 
1052				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1053				sigaddset(&t->pending.signal, SIGKILL);
1054				signal_wake_up(t, 1);
1055			}
1056			return;
1057		}
1058	}
1059
1060	/*
1061	 * The signal is already in the shared-pending queue.
1062	 * Tell the chosen thread to wake up and dequeue it.
1063	 */
1064	signal_wake_up(t, sig == SIGKILL);
1065	return;
1066}
1067
1068static inline bool legacy_queue(struct sigpending *signals, int sig)
1069{
1070	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1071}
1072
1073static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1074				struct task_struct *t, enum pid_type type, bool force)
1075{
1076	struct sigpending *pending;
1077	struct sigqueue *q;
1078	int override_rlimit;
1079	int ret = 0, result;
1080
1081	lockdep_assert_held(&t->sighand->siglock);
1082
1083	result = TRACE_SIGNAL_IGNORED;
1084	if (!prepare_signal(sig, t, force))
1085		goto ret;
1086
1087	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1088	/*
1089	 * Short-circuit ignored signals and support queuing
1090	 * exactly one non-rt signal, so that we can get more
1091	 * detailed information about the cause of the signal.
1092	 */
1093	result = TRACE_SIGNAL_ALREADY_PENDING;
1094	if (legacy_queue(pending, sig))
1095		goto ret;
1096
1097	result = TRACE_SIGNAL_DELIVERED;
1098	/*
1099	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1100	 */
1101	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1102		goto out_set;
1103
1104	/*
1105	 * Real-time signals must be queued if sent by sigqueue, or
1106	 * some other real-time mechanism.  It is implementation
1107	 * defined whether kill() does so.  We attempt to do so, on
1108	 * the principle of least surprise, but since kill is not
1109	 * allowed to fail with EAGAIN when low on memory we just
1110	 * make sure at least one signal gets delivered and don't
1111	 * pass on the info struct.
1112	 */
1113	if (sig < SIGRTMIN)
1114		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1115	else
1116		override_rlimit = 0;
1117
1118	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1119
1120	if (q) {
1121		list_add_tail(&q->list, &pending->list);
1122		switch ((unsigned long) info) {
1123		case (unsigned long) SEND_SIG_NOINFO:
1124			clear_siginfo(&q->info);
1125			q->info.si_signo = sig;
1126			q->info.si_errno = 0;
1127			q->info.si_code = SI_USER;
1128			q->info.si_pid = task_tgid_nr_ns(current,
1129							task_active_pid_ns(t));
1130			rcu_read_lock();
1131			q->info.si_uid =
1132				from_kuid_munged(task_cred_xxx(t, user_ns),
1133						 current_uid());
1134			rcu_read_unlock();
1135			break;
1136		case (unsigned long) SEND_SIG_PRIV:
1137			clear_siginfo(&q->info);
1138			q->info.si_signo = sig;
1139			q->info.si_errno = 0;
1140			q->info.si_code = SI_KERNEL;
1141			q->info.si_pid = 0;
1142			q->info.si_uid = 0;
1143			break;
1144		default:
1145			copy_siginfo(&q->info, info);
1146			break;
1147		}
1148	} else if (!is_si_special(info) &&
1149		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1150		/*
1151		 * Queue overflow, abort.  We may abort if the
1152		 * signal was rt and sent by user using something
1153		 * other than kill().
1154		 */
1155		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1156		ret = -EAGAIN;
1157		goto ret;
1158	} else {
1159		/*
1160		 * This is a silent loss of information.  We still
1161		 * send the signal, but the *info bits are lost.
1162		 */
1163		result = TRACE_SIGNAL_LOSE_INFO;
1164	}
1165
1166out_set:
1167	signalfd_notify(t, sig);
1168	sigaddset(&pending->signal, sig);
1169
1170	/* Let multiprocess signals appear after on-going forks */
1171	if (type > PIDTYPE_TGID) {
1172		struct multiprocess_signals *delayed;
1173		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1174			sigset_t *signal = &delayed->signal;
1175			/* Can't queue both a stop and a continue signal */
1176			if (sig == SIGCONT)
1177				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1178			else if (sig_kernel_stop(sig))
1179				sigdelset(signal, SIGCONT);
1180			sigaddset(signal, sig);
1181		}
1182	}
1183
1184	complete_signal(sig, t, type);
1185ret:
1186	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1187	return ret;
1188}
1189
1190static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191{
1192	bool ret = false;
1193	switch (siginfo_layout(info->si_signo, info->si_code)) {
1194	case SIL_KILL:
1195	case SIL_CHLD:
1196	case SIL_RT:
1197		ret = true;
1198		break;
1199	case SIL_TIMER:
1200	case SIL_POLL:
1201	case SIL_FAULT:
1202	case SIL_FAULT_TRAPNO:
1203	case SIL_FAULT_MCEERR:
1204	case SIL_FAULT_BNDERR:
1205	case SIL_FAULT_PKUERR:
1206	case SIL_FAULT_PERF_EVENT:
1207	case SIL_SYS:
1208		ret = false;
1209		break;
1210	}
1211	return ret;
1212}
1213
1214int send_signal_locked(int sig, struct kernel_siginfo *info,
1215		       struct task_struct *t, enum pid_type type)
1216{
1217	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1218	bool force = false;
1219
1220	if (info == SEND_SIG_NOINFO) {
1221		/* Force if sent from an ancestor pid namespace */
1222		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1223	} else if (info == SEND_SIG_PRIV) {
1224		/* Don't ignore kernel generated signals */
1225		force = true;
1226	} else if (has_si_pid_and_uid(info)) {
1227		/* SIGKILL and SIGSTOP is special or has ids */
1228		struct user_namespace *t_user_ns;
1229
1230		rcu_read_lock();
1231		t_user_ns = task_cred_xxx(t, user_ns);
1232		if (current_user_ns() != t_user_ns) {
1233			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1234			info->si_uid = from_kuid_munged(t_user_ns, uid);
1235		}
1236		rcu_read_unlock();
1237
1238		/* A kernel generated signal? */
1239		force = (info->si_code == SI_KERNEL);
1240
1241		/* From an ancestor pid namespace? */
1242		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1243			info->si_pid = 0;
1244			force = true;
1245		}
1246	}
1247	return __send_signal_locked(sig, info, t, type, force);
1248}
1249
1250static void print_fatal_signal(int signr)
1251{
1252	struct pt_regs *regs = task_pt_regs(current);
1253	struct file *exe_file;
1254
1255	exe_file = get_task_exe_file(current);
1256	if (exe_file) {
1257		pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1258			exe_file, current->comm, signr);
1259		fput(exe_file);
1260	} else {
1261		pr_info("%s: potentially unexpected fatal signal %d.\n",
1262			current->comm, signr);
1263	}
1264
1265#if defined(__i386__) && !defined(__arch_um__)
1266	pr_info("code at %08lx: ", regs->ip);
1267	{
1268		int i;
1269		for (i = 0; i < 16; i++) {
1270			unsigned char insn;
1271
1272			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1273				break;
1274			pr_cont("%02x ", insn);
1275		}
1276	}
1277	pr_cont("\n");
1278#endif
1279	preempt_disable();
1280	show_regs(regs);
1281	preempt_enable();
1282}
1283
1284static int __init setup_print_fatal_signals(char *str)
1285{
1286	get_option (&str, &print_fatal_signals);
1287
1288	return 1;
1289}
1290
1291__setup("print-fatal-signals=", setup_print_fatal_signals);
1292
 
 
 
 
 
 
1293int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1294			enum pid_type type)
1295{
1296	unsigned long flags;
1297	int ret = -ESRCH;
1298
1299	if (lock_task_sighand(p, &flags)) {
1300		ret = send_signal_locked(sig, info, p, type);
1301		unlock_task_sighand(p, &flags);
1302	}
1303
1304	return ret;
1305}
1306
1307enum sig_handler {
1308	HANDLER_CURRENT, /* If reachable use the current handler */
1309	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1310	HANDLER_EXIT,	 /* Only visible as the process exit code */
1311};
1312
1313/*
1314 * Force a signal that the process can't ignore: if necessary
1315 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1316 *
1317 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1318 * since we do not want to have a signal handler that was blocked
1319 * be invoked when user space had explicitly blocked it.
1320 *
1321 * We don't want to have recursive SIGSEGV's etc, for example,
1322 * that is why we also clear SIGNAL_UNKILLABLE.
1323 */
1324static int
1325force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1326	enum sig_handler handler)
1327{
1328	unsigned long int flags;
1329	int ret, blocked, ignored;
1330	struct k_sigaction *action;
1331	int sig = info->si_signo;
1332
1333	spin_lock_irqsave(&t->sighand->siglock, flags);
1334	action = &t->sighand->action[sig-1];
1335	ignored = action->sa.sa_handler == SIG_IGN;
1336	blocked = sigismember(&t->blocked, sig);
1337	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1338		action->sa.sa_handler = SIG_DFL;
1339		if (handler == HANDLER_EXIT)
1340			action->sa.sa_flags |= SA_IMMUTABLE;
1341		if (blocked)
1342			sigdelset(&t->blocked, sig);
 
 
1343	}
1344	/*
1345	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1346	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1347	 */
1348	if (action->sa.sa_handler == SIG_DFL &&
1349	    (!t->ptrace || (handler == HANDLER_EXIT)))
1350		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1351	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1352	/* This can happen if the signal was already pending and blocked */
1353	if (!task_sigpending(t))
1354		signal_wake_up(t, 0);
1355	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1356
1357	return ret;
1358}
1359
1360int force_sig_info(struct kernel_siginfo *info)
1361{
1362	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1363}
1364
1365/*
1366 * Nuke all other threads in the group.
1367 */
1368int zap_other_threads(struct task_struct *p)
1369{
1370	struct task_struct *t;
1371	int count = 0;
1372
1373	p->signal->group_stop_count = 0;
1374
1375	for_other_threads(p, t) {
1376		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1377		/* Don't require de_thread to wait for the vhost_worker */
1378		if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1379			count++;
1380
1381		/* Don't bother with already dead threads */
1382		if (t->exit_state)
1383			continue;
1384		sigaddset(&t->pending.signal, SIGKILL);
1385		signal_wake_up(t, 1);
1386	}
1387
1388	return count;
1389}
1390
1391struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1392					   unsigned long *flags)
1393{
1394	struct sighand_struct *sighand;
1395
1396	rcu_read_lock();
1397	for (;;) {
1398		sighand = rcu_dereference(tsk->sighand);
1399		if (unlikely(sighand == NULL))
1400			break;
1401
1402		/*
1403		 * This sighand can be already freed and even reused, but
1404		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1405		 * initializes ->siglock: this slab can't go away, it has
1406		 * the same object type, ->siglock can't be reinitialized.
1407		 *
1408		 * We need to ensure that tsk->sighand is still the same
1409		 * after we take the lock, we can race with de_thread() or
1410		 * __exit_signal(). In the latter case the next iteration
1411		 * must see ->sighand == NULL.
1412		 */
1413		spin_lock_irqsave(&sighand->siglock, *flags);
1414		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1415			break;
1416		spin_unlock_irqrestore(&sighand->siglock, *flags);
1417	}
1418	rcu_read_unlock();
1419
1420	return sighand;
1421}
1422
1423#ifdef CONFIG_LOCKDEP
1424void lockdep_assert_task_sighand_held(struct task_struct *task)
1425{
1426	struct sighand_struct *sighand;
1427
1428	rcu_read_lock();
1429	sighand = rcu_dereference(task->sighand);
1430	if (sighand)
1431		lockdep_assert_held(&sighand->siglock);
1432	else
1433		WARN_ON_ONCE(1);
1434	rcu_read_unlock();
1435}
1436#endif
1437
1438/*
1439 * send signal info to all the members of a group
1440 */
1441int group_send_sig_info(int sig, struct kernel_siginfo *info,
1442			struct task_struct *p, enum pid_type type)
1443{
1444	int ret;
1445
1446	rcu_read_lock();
1447	ret = check_kill_permission(sig, info, p);
1448	rcu_read_unlock();
1449
1450	if (!ret && sig)
1451		ret = do_send_sig_info(sig, info, p, type);
1452
1453	return ret;
1454}
1455
1456/*
1457 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1458 * control characters do (^C, ^Z etc)
1459 * - the caller must hold at least a readlock on tasklist_lock
1460 */
1461int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1462{
1463	struct task_struct *p = NULL;
1464	int ret = -ESRCH;
1465
 
 
1466	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1467		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1468		/*
1469		 * If group_send_sig_info() succeeds at least once ret
1470		 * becomes 0 and after that the code below has no effect.
1471		 * Otherwise we return the last err or -ESRCH if this
1472		 * process group is empty.
1473		 */
1474		if (ret)
1475			ret = err;
1476	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1477
1478	return ret;
1479}
1480
1481int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1482{
1483	int error = -ESRCH;
1484	struct task_struct *p;
1485
1486	for (;;) {
1487		rcu_read_lock();
1488		p = pid_task(pid, PIDTYPE_PID);
1489		if (p)
1490			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1491		rcu_read_unlock();
1492		if (likely(!p || error != -ESRCH))
1493			return error;
1494
1495		/*
1496		 * The task was unhashed in between, try again.  If it
1497		 * is dead, pid_task() will return NULL, if we race with
1498		 * de_thread() it will find the new leader.
1499		 */
1500	}
1501}
1502
1503static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1504{
1505	int error;
1506	rcu_read_lock();
1507	error = kill_pid_info(sig, info, find_vpid(pid));
1508	rcu_read_unlock();
1509	return error;
1510}
1511
1512static inline bool kill_as_cred_perm(const struct cred *cred,
1513				     struct task_struct *target)
1514{
1515	const struct cred *pcred = __task_cred(target);
1516
1517	return uid_eq(cred->euid, pcred->suid) ||
1518	       uid_eq(cred->euid, pcred->uid) ||
1519	       uid_eq(cred->uid, pcred->suid) ||
1520	       uid_eq(cred->uid, pcred->uid);
1521}
1522
1523/*
1524 * The usb asyncio usage of siginfo is wrong.  The glibc support
1525 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526 * AKA after the generic fields:
1527 *	kernel_pid_t	si_pid;
1528 *	kernel_uid32_t	si_uid;
1529 *	sigval_t	si_value;
1530 *
1531 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532 * after the generic fields is:
1533 *	void __user 	*si_addr;
1534 *
1535 * This is a practical problem when there is a 64bit big endian kernel
1536 * and a 32bit userspace.  As the 32bit address will encoded in the low
1537 * 32bits of the pointer.  Those low 32bits will be stored at higher
1538 * address than appear in a 32 bit pointer.  So userspace will not
1539 * see the address it was expecting for it's completions.
1540 *
1541 * There is nothing in the encoding that can allow
1542 * copy_siginfo_to_user32 to detect this confusion of formats, so
1543 * handle this by requiring the caller of kill_pid_usb_asyncio to
1544 * notice when this situration takes place and to store the 32bit
1545 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1546 * parameter.
1547 */
1548int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549			 struct pid *pid, const struct cred *cred)
1550{
1551	struct kernel_siginfo info;
1552	struct task_struct *p;
1553	unsigned long flags;
1554	int ret = -EINVAL;
1555
1556	if (!valid_signal(sig))
1557		return ret;
1558
1559	clear_siginfo(&info);
1560	info.si_signo = sig;
1561	info.si_errno = errno;
1562	info.si_code = SI_ASYNCIO;
1563	*((sigval_t *)&info.si_pid) = addr;
1564
 
 
 
1565	rcu_read_lock();
1566	p = pid_task(pid, PIDTYPE_PID);
1567	if (!p) {
1568		ret = -ESRCH;
1569		goto out_unlock;
1570	}
1571	if (!kill_as_cred_perm(cred, p)) {
1572		ret = -EPERM;
1573		goto out_unlock;
1574	}
1575	ret = security_task_kill(p, &info, sig, cred);
1576	if (ret)
1577		goto out_unlock;
1578
1579	if (sig) {
1580		if (lock_task_sighand(p, &flags)) {
1581			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1582			unlock_task_sighand(p, &flags);
1583		} else
1584			ret = -ESRCH;
1585	}
1586out_unlock:
1587	rcu_read_unlock();
1588	return ret;
1589}
1590EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1591
1592/*
1593 * kill_something_info() interprets pid in interesting ways just like kill(2).
1594 *
1595 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596 * is probably wrong.  Should make it like BSD or SYSV.
1597 */
1598
1599static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1600{
1601	int ret;
1602
1603	if (pid > 0)
1604		return kill_proc_info(sig, info, pid);
 
 
 
 
1605
1606	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1607	if (pid == INT_MIN)
1608		return -ESRCH;
1609
1610	read_lock(&tasklist_lock);
1611	if (pid != -1) {
1612		ret = __kill_pgrp_info(sig, info,
1613				pid ? find_vpid(-pid) : task_pgrp(current));
1614	} else {
1615		int retval = 0, count = 0;
1616		struct task_struct * p;
1617
1618		for_each_process(p) {
1619			if (task_pid_vnr(p) > 1 &&
1620					!same_thread_group(p, current)) {
1621				int err = group_send_sig_info(sig, info, p,
1622							      PIDTYPE_MAX);
1623				++count;
1624				if (err != -EPERM)
1625					retval = err;
1626			}
1627		}
1628		ret = count ? retval : -ESRCH;
1629	}
1630	read_unlock(&tasklist_lock);
1631
1632	return ret;
1633}
1634
1635/*
1636 * These are for backward compatibility with the rest of the kernel source.
1637 */
1638
1639int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1640{
1641	/*
1642	 * Make sure legacy kernel users don't send in bad values
1643	 * (normal paths check this in check_kill_permission).
1644	 */
1645	if (!valid_signal(sig))
1646		return -EINVAL;
1647
1648	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1649}
1650EXPORT_SYMBOL(send_sig_info);
1651
1652#define __si_special(priv) \
1653	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1654
1655int
1656send_sig(int sig, struct task_struct *p, int priv)
1657{
1658	return send_sig_info(sig, __si_special(priv), p);
1659}
1660EXPORT_SYMBOL(send_sig);
1661
1662void force_sig(int sig)
1663{
1664	struct kernel_siginfo info;
1665
1666	clear_siginfo(&info);
1667	info.si_signo = sig;
1668	info.si_errno = 0;
1669	info.si_code = SI_KERNEL;
1670	info.si_pid = 0;
1671	info.si_uid = 0;
1672	force_sig_info(&info);
1673}
1674EXPORT_SYMBOL(force_sig);
1675
1676void force_fatal_sig(int sig)
1677{
1678	struct kernel_siginfo info;
1679
1680	clear_siginfo(&info);
1681	info.si_signo = sig;
1682	info.si_errno = 0;
1683	info.si_code = SI_KERNEL;
1684	info.si_pid = 0;
1685	info.si_uid = 0;
1686	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1687}
1688
1689void force_exit_sig(int sig)
1690{
1691	struct kernel_siginfo info;
1692
1693	clear_siginfo(&info);
1694	info.si_signo = sig;
1695	info.si_errno = 0;
1696	info.si_code = SI_KERNEL;
1697	info.si_pid = 0;
1698	info.si_uid = 0;
1699	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1700}
1701
1702/*
1703 * When things go south during signal handling, we
1704 * will force a SIGSEGV. And if the signal that caused
1705 * the problem was already a SIGSEGV, we'll want to
1706 * make sure we don't even try to deliver the signal..
1707 */
1708void force_sigsegv(int sig)
1709{
1710	if (sig == SIGSEGV)
1711		force_fatal_sig(SIGSEGV);
1712	else
1713		force_sig(SIGSEGV);
 
 
 
 
 
1714}
1715
1716int force_sig_fault_to_task(int sig, int code, void __user *addr,
1717			    struct task_struct *t)
 
 
1718{
1719	struct kernel_siginfo info;
1720
1721	clear_siginfo(&info);
1722	info.si_signo = sig;
1723	info.si_errno = 0;
1724	info.si_code  = code;
1725	info.si_addr  = addr;
1726	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
 
 
 
 
 
 
 
 
1727}
1728
1729int force_sig_fault(int sig, int code, void __user *addr)
 
 
1730{
1731	return force_sig_fault_to_task(sig, code, addr, current);
 
 
1732}
1733
1734int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
 
 
 
1735{
1736	struct kernel_siginfo info;
1737
1738	clear_siginfo(&info);
1739	info.si_signo = sig;
1740	info.si_errno = 0;
1741	info.si_code  = code;
1742	info.si_addr  = addr;
 
 
 
 
 
 
 
 
1743	return send_sig_info(info.si_signo, &info, t);
1744}
1745
1746int force_sig_mceerr(int code, void __user *addr, short lsb)
1747{
1748	struct kernel_siginfo info;
1749
1750	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1751	clear_siginfo(&info);
1752	info.si_signo = SIGBUS;
1753	info.si_errno = 0;
1754	info.si_code = code;
1755	info.si_addr = addr;
1756	info.si_addr_lsb = lsb;
1757	return force_sig_info(&info);
1758}
1759
1760int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1761{
1762	struct kernel_siginfo info;
1763
1764	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1765	clear_siginfo(&info);
1766	info.si_signo = SIGBUS;
1767	info.si_errno = 0;
1768	info.si_code = code;
1769	info.si_addr = addr;
1770	info.si_addr_lsb = lsb;
1771	return send_sig_info(info.si_signo, &info, t);
1772}
1773EXPORT_SYMBOL(send_sig_mceerr);
1774
1775int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1776{
1777	struct kernel_siginfo info;
1778
1779	clear_siginfo(&info);
1780	info.si_signo = SIGSEGV;
1781	info.si_errno = 0;
1782	info.si_code  = SEGV_BNDERR;
1783	info.si_addr  = addr;
1784	info.si_lower = lower;
1785	info.si_upper = upper;
1786	return force_sig_info(&info);
1787}
1788
1789#ifdef SEGV_PKUERR
1790int force_sig_pkuerr(void __user *addr, u32 pkey)
1791{
1792	struct kernel_siginfo info;
1793
1794	clear_siginfo(&info);
1795	info.si_signo = SIGSEGV;
1796	info.si_errno = 0;
1797	info.si_code  = SEGV_PKUERR;
1798	info.si_addr  = addr;
1799	info.si_pkey  = pkey;
1800	return force_sig_info(&info);
1801}
1802#endif
1803
1804int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1805{
1806	struct kernel_siginfo info;
1807
1808	clear_siginfo(&info);
1809	info.si_signo     = SIGTRAP;
1810	info.si_errno     = 0;
1811	info.si_code      = TRAP_PERF;
1812	info.si_addr      = addr;
1813	info.si_perf_data = sig_data;
1814	info.si_perf_type = type;
1815
1816	/*
1817	 * Signals generated by perf events should not terminate the whole
1818	 * process if SIGTRAP is blocked, however, delivering the signal
1819	 * asynchronously is better than not delivering at all. But tell user
1820	 * space if the signal was asynchronous, so it can clearly be
1821	 * distinguished from normal synchronous ones.
1822	 */
1823	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1824				     TRAP_PERF_FLAG_ASYNC :
1825				     0;
1826
1827	return send_sig_info(info.si_signo, &info, current);
1828}
1829
1830/**
1831 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1832 * @syscall: syscall number to send to userland
1833 * @reason: filter-supplied reason code to send to userland (via si_errno)
1834 * @force_coredump: true to trigger a coredump
1835 *
1836 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1837 */
1838int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1839{
1840	struct kernel_siginfo info;
1841
1842	clear_siginfo(&info);
1843	info.si_signo = SIGSYS;
1844	info.si_code = SYS_SECCOMP;
1845	info.si_call_addr = (void __user *)KSTK_EIP(current);
1846	info.si_errno = reason;
1847	info.si_arch = syscall_get_arch(current);
1848	info.si_syscall = syscall;
1849	return force_sig_info_to_task(&info, current,
1850		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1851}
1852
1853/* For the crazy architectures that include trap information in
1854 * the errno field, instead of an actual errno value.
1855 */
1856int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1857{
1858	struct kernel_siginfo info;
1859
1860	clear_siginfo(&info);
1861	info.si_signo = SIGTRAP;
1862	info.si_errno = errno;
1863	info.si_code  = TRAP_HWBKPT;
1864	info.si_addr  = addr;
1865	return force_sig_info(&info);
1866}
1867
1868/* For the rare architectures that include trap information using
1869 * si_trapno.
1870 */
1871int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1872{
1873	struct kernel_siginfo info;
1874
1875	clear_siginfo(&info);
1876	info.si_signo = sig;
1877	info.si_errno = 0;
1878	info.si_code  = code;
1879	info.si_addr  = addr;
1880	info.si_trapno = trapno;
1881	return force_sig_info(&info);
1882}
1883
1884/* For the rare architectures that include trap information using
1885 * si_trapno.
1886 */
1887int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1888			  struct task_struct *t)
1889{
1890	struct kernel_siginfo info;
1891
1892	clear_siginfo(&info);
1893	info.si_signo = sig;
1894	info.si_errno = 0;
1895	info.si_code  = code;
1896	info.si_addr  = addr;
1897	info.si_trapno = trapno;
1898	return send_sig_info(info.si_signo, &info, t);
1899}
1900
1901int kill_pgrp(struct pid *pid, int sig, int priv)
1902{
1903	int ret;
1904
1905	read_lock(&tasklist_lock);
1906	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1907	read_unlock(&tasklist_lock);
1908
1909	return ret;
1910}
1911EXPORT_SYMBOL(kill_pgrp);
1912
1913int kill_pid(struct pid *pid, int sig, int priv)
1914{
1915	return kill_pid_info(sig, __si_special(priv), pid);
1916}
1917EXPORT_SYMBOL(kill_pid);
1918
1919/*
1920 * These functions support sending signals using preallocated sigqueue
1921 * structures.  This is needed "because realtime applications cannot
1922 * afford to lose notifications of asynchronous events, like timer
1923 * expirations or I/O completions".  In the case of POSIX Timers
1924 * we allocate the sigqueue structure from the timer_create.  If this
1925 * allocation fails we are able to report the failure to the application
1926 * with an EAGAIN error.
1927 */
1928struct sigqueue *sigqueue_alloc(void)
1929{
1930	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
 
 
 
 
 
1931}
1932
1933void sigqueue_free(struct sigqueue *q)
1934{
1935	unsigned long flags;
1936	spinlock_t *lock = &current->sighand->siglock;
1937
1938	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1939	/*
1940	 * We must hold ->siglock while testing q->list
1941	 * to serialize with collect_signal() or with
1942	 * __exit_signal()->flush_sigqueue().
1943	 */
1944	spin_lock_irqsave(lock, flags);
1945	q->flags &= ~SIGQUEUE_PREALLOC;
1946	/*
1947	 * If it is queued it will be freed when dequeued,
1948	 * like the "regular" sigqueue.
1949	 */
1950	if (!list_empty(&q->list))
1951		q = NULL;
1952	spin_unlock_irqrestore(lock, flags);
1953
1954	if (q)
1955		__sigqueue_free(q);
1956}
1957
1958int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1959{
1960	int sig = q->info.si_signo;
1961	struct sigpending *pending;
1962	struct task_struct *t;
1963	unsigned long flags;
1964	int ret, result;
1965
1966	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1967
1968	ret = -1;
1969	rcu_read_lock();
1970
1971	/*
1972	 * This function is used by POSIX timers to deliver a timer signal.
1973	 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1974	 * set), the signal must be delivered to the specific thread (queues
1975	 * into t->pending).
1976	 *
1977	 * Where type is not PIDTYPE_PID, signals must be delivered to the
1978	 * process. In this case, prefer to deliver to current if it is in
1979	 * the same thread group as the target process, which avoids
1980	 * unnecessarily waking up a potentially idle task.
1981	 */
1982	t = pid_task(pid, type);
1983	if (!t)
1984		goto ret;
1985	if (type != PIDTYPE_PID && same_thread_group(t, current))
1986		t = current;
1987	if (!likely(lock_task_sighand(t, &flags)))
1988		goto ret;
1989
1990	ret = 1; /* the signal is ignored */
1991	result = TRACE_SIGNAL_IGNORED;
1992	if (!prepare_signal(sig, t, false))
1993		goto out;
1994
1995	ret = 0;
1996	if (unlikely(!list_empty(&q->list))) {
1997		/*
1998		 * If an SI_TIMER entry is already queue just increment
1999		 * the overrun count.
2000		 */
2001		BUG_ON(q->info.si_code != SI_TIMER);
2002		q->info.si_overrun++;
2003		result = TRACE_SIGNAL_ALREADY_PENDING;
2004		goto out;
2005	}
2006	q->info.si_overrun = 0;
2007
2008	signalfd_notify(t, sig);
2009	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2010	list_add_tail(&q->list, &pending->list);
2011	sigaddset(&pending->signal, sig);
2012	complete_signal(sig, t, type);
2013	result = TRACE_SIGNAL_DELIVERED;
2014out:
2015	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2016	unlock_task_sighand(t, &flags);
2017ret:
2018	rcu_read_unlock();
2019	return ret;
2020}
2021
2022static void do_notify_pidfd(struct task_struct *task)
2023{
2024	struct pid *pid;
2025
2026	WARN_ON(task->exit_state == 0);
2027	pid = task_pid(task);
2028	wake_up_all(&pid->wait_pidfd);
2029}
2030
2031/*
2032 * Let a parent know about the death of a child.
2033 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2034 *
2035 * Returns true if our parent ignored us and so we've switched to
2036 * self-reaping.
2037 */
2038bool do_notify_parent(struct task_struct *tsk, int sig)
2039{
2040	struct kernel_siginfo info;
2041	unsigned long flags;
2042	struct sighand_struct *psig;
2043	bool autoreap = false;
2044	u64 utime, stime;
2045
2046	WARN_ON_ONCE(sig == -1);
2047
2048	/* do_notify_parent_cldstop should have been called instead.  */
2049	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2050
2051	WARN_ON_ONCE(!tsk->ptrace &&
2052	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2053
2054	/* Wake up all pidfd waiters */
2055	do_notify_pidfd(tsk);
2056
2057	if (sig != SIGCHLD) {
2058		/*
2059		 * This is only possible if parent == real_parent.
2060		 * Check if it has changed security domain.
2061		 */
2062		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2063			sig = SIGCHLD;
2064	}
2065
2066	clear_siginfo(&info);
2067	info.si_signo = sig;
2068	info.si_errno = 0;
2069	/*
2070	 * We are under tasklist_lock here so our parent is tied to
2071	 * us and cannot change.
2072	 *
2073	 * task_active_pid_ns will always return the same pid namespace
2074	 * until a task passes through release_task.
2075	 *
2076	 * write_lock() currently calls preempt_disable() which is the
2077	 * same as rcu_read_lock(), but according to Oleg, this is not
2078	 * correct to rely on this
2079	 */
2080	rcu_read_lock();
2081	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2082	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2083				       task_uid(tsk));
2084	rcu_read_unlock();
2085
2086	task_cputime(tsk, &utime, &stime);
2087	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2088	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2089
2090	info.si_status = tsk->exit_code & 0x7f;
2091	if (tsk->exit_code & 0x80)
2092		info.si_code = CLD_DUMPED;
2093	else if (tsk->exit_code & 0x7f)
2094		info.si_code = CLD_KILLED;
2095	else {
2096		info.si_code = CLD_EXITED;
2097		info.si_status = tsk->exit_code >> 8;
2098	}
2099
2100	psig = tsk->parent->sighand;
2101	spin_lock_irqsave(&psig->siglock, flags);
2102	if (!tsk->ptrace && sig == SIGCHLD &&
2103	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2104	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2105		/*
2106		 * We are exiting and our parent doesn't care.  POSIX.1
2107		 * defines special semantics for setting SIGCHLD to SIG_IGN
2108		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2109		 * automatically and not left for our parent's wait4 call.
2110		 * Rather than having the parent do it as a magic kind of
2111		 * signal handler, we just set this to tell do_exit that we
2112		 * can be cleaned up without becoming a zombie.  Note that
2113		 * we still call __wake_up_parent in this case, because a
2114		 * blocked sys_wait4 might now return -ECHILD.
2115		 *
2116		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2117		 * is implementation-defined: we do (if you don't want
2118		 * it, just use SIG_IGN instead).
2119		 */
2120		autoreap = true;
2121		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2122			sig = 0;
2123	}
2124	/*
2125	 * Send with __send_signal as si_pid and si_uid are in the
2126	 * parent's namespaces.
2127	 */
2128	if (valid_signal(sig) && sig)
2129		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2130	__wake_up_parent(tsk, tsk->parent);
2131	spin_unlock_irqrestore(&psig->siglock, flags);
2132
2133	return autoreap;
2134}
2135
2136/**
2137 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2138 * @tsk: task reporting the state change
2139 * @for_ptracer: the notification is for ptracer
2140 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2141 *
2142 * Notify @tsk's parent that the stopped/continued state has changed.  If
2143 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2144 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2145 *
2146 * CONTEXT:
2147 * Must be called with tasklist_lock at least read locked.
2148 */
2149static void do_notify_parent_cldstop(struct task_struct *tsk,
2150				     bool for_ptracer, int why)
2151{
2152	struct kernel_siginfo info;
2153	unsigned long flags;
2154	struct task_struct *parent;
2155	struct sighand_struct *sighand;
2156	u64 utime, stime;
2157
2158	if (for_ptracer) {
2159		parent = tsk->parent;
2160	} else {
2161		tsk = tsk->group_leader;
2162		parent = tsk->real_parent;
2163	}
2164
2165	clear_siginfo(&info);
2166	info.si_signo = SIGCHLD;
2167	info.si_errno = 0;
2168	/*
2169	 * see comment in do_notify_parent() about the following 4 lines
2170	 */
2171	rcu_read_lock();
2172	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2173	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2174	rcu_read_unlock();
2175
2176	task_cputime(tsk, &utime, &stime);
2177	info.si_utime = nsec_to_clock_t(utime);
2178	info.si_stime = nsec_to_clock_t(stime);
2179
2180 	info.si_code = why;
2181 	switch (why) {
2182 	case CLD_CONTINUED:
2183 		info.si_status = SIGCONT;
2184 		break;
2185 	case CLD_STOPPED:
2186 		info.si_status = tsk->signal->group_exit_code & 0x7f;
2187 		break;
2188 	case CLD_TRAPPED:
2189 		info.si_status = tsk->exit_code & 0x7f;
2190 		break;
2191 	default:
2192 		BUG();
2193 	}
2194
2195	sighand = parent->sighand;
2196	spin_lock_irqsave(&sighand->siglock, flags);
2197	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2198	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2199		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2200	/*
2201	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2202	 */
2203	__wake_up_parent(tsk, parent);
2204	spin_unlock_irqrestore(&sighand->siglock, flags);
2205}
2206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2207/*
2208 * This must be called with current->sighand->siglock held.
2209 *
2210 * This should be the path for all ptrace stops.
2211 * We always set current->last_siginfo while stopped here.
2212 * That makes it a way to test a stopped process for
2213 * being ptrace-stopped vs being job-control-stopped.
2214 *
2215 * Returns the signal the ptracer requested the code resume
2216 * with.  If the code did not stop because the tracer is gone,
2217 * the stop signal remains unchanged unless clear_code.
2218 */
2219static int ptrace_stop(int exit_code, int why, unsigned long message,
2220		       kernel_siginfo_t *info)
2221	__releases(&current->sighand->siglock)
2222	__acquires(&current->sighand->siglock)
2223{
2224	bool gstop_done = false;
2225
2226	if (arch_ptrace_stop_needed()) {
2227		/*
2228		 * The arch code has something special to do before a
2229		 * ptrace stop.  This is allowed to block, e.g. for faults
2230		 * on user stack pages.  We can't keep the siglock while
2231		 * calling arch_ptrace_stop, so we must release it now.
2232		 * To preserve proper semantics, we must do this before
2233		 * any signal bookkeeping like checking group_stop_count.
 
 
 
2234		 */
2235		spin_unlock_irq(&current->sighand->siglock);
2236		arch_ptrace_stop();
2237		spin_lock_irq(&current->sighand->siglock);
 
 
2238	}
2239
2240	/*
2241	 * After this point ptrace_signal_wake_up or signal_wake_up
2242	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2243	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2244	 * signals here to prevent ptrace_stop sleeping in schedule.
2245	 */
2246	if (!current->ptrace || __fatal_signal_pending(current))
2247		return exit_code;
2248
2249	set_special_state(TASK_TRACED);
2250	current->jobctl |= JOBCTL_TRACED;
2251
2252	/*
2253	 * We're committing to trapping.  TRACED should be visible before
2254	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2255	 * Also, transition to TRACED and updates to ->jobctl should be
2256	 * atomic with respect to siglock and should be done after the arch
2257	 * hook as siglock is released and regrabbed across it.
2258	 *
2259	 *     TRACER				    TRACEE
2260	 *
2261	 *     ptrace_attach()
2262	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2263	 *     do_wait()
2264	 *       set_current_state()                smp_wmb();
2265	 *       ptrace_do_wait()
2266	 *         wait_task_stopped()
2267	 *           task_stopped_code()
2268	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2269	 */
2270	smp_wmb();
2271
2272	current->ptrace_message = message;
2273	current->last_siginfo = info;
2274	current->exit_code = exit_code;
2275
2276	/*
2277	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2278	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2279	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2280	 * could be clear now.  We act as if SIGCONT is received after
2281	 * TASK_TRACED is entered - ignore it.
2282	 */
2283	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2284		gstop_done = task_participate_group_stop(current);
2285
2286	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2287	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2288	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2289		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2290
2291	/* entering a trap, clear TRAPPING */
2292	task_clear_jobctl_trapping(current);
2293
2294	spin_unlock_irq(&current->sighand->siglock);
2295	read_lock(&tasklist_lock);
2296	/*
2297	 * Notify parents of the stop.
2298	 *
2299	 * While ptraced, there are two parents - the ptracer and
2300	 * the real_parent of the group_leader.  The ptracer should
2301	 * know about every stop while the real parent is only
2302	 * interested in the completion of group stop.  The states
2303	 * for the two don't interact with each other.  Notify
2304	 * separately unless they're gonna be duplicates.
2305	 */
2306	if (current->ptrace)
2307		do_notify_parent_cldstop(current, true, why);
2308	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2309		do_notify_parent_cldstop(current, false, why);
2310
2311	/*
2312	 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2313	 * One a PREEMPTION kernel this can result in preemption requirement
2314	 * which will be fulfilled after read_unlock() and the ptracer will be
2315	 * put on the CPU.
2316	 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2317	 * this task wait in schedule(). If this task gets preempted then it
2318	 * remains enqueued on the runqueue. The ptracer will observe this and
2319	 * then sleep for a delay of one HZ tick. In the meantime this task
2320	 * gets scheduled, enters schedule() and will wait for the ptracer.
2321	 *
2322	 * This preemption point is not bad from a correctness point of
2323	 * view but extends the runtime by one HZ tick time due to the
2324	 * ptracer's sleep.  The preempt-disable section ensures that there
2325	 * will be no preemption between unlock and schedule() and so
2326	 * improving the performance since the ptracer will observe that
2327	 * the tracee is scheduled out once it gets on the CPU.
2328	 *
2329	 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2330	 * Therefore the task can be preempted after do_notify_parent_cldstop()
2331	 * before unlocking tasklist_lock so there is no benefit in doing this.
2332	 *
2333	 * In fact disabling preemption is harmful on PREEMPT_RT because
2334	 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2335	 * with preemption disabled due to the 'sleeping' spinlock
2336	 * substitution of RT.
2337	 */
2338	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2339		preempt_disable();
2340	read_unlock(&tasklist_lock);
2341	cgroup_enter_frozen();
2342	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2343		preempt_enable_no_resched();
2344	schedule();
2345	cgroup_leave_frozen(true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2346
2347	/*
2348	 * We are back.  Now reacquire the siglock before touching
2349	 * last_siginfo, so that we are sure to have synchronized with
2350	 * any signal-sending on another CPU that wants to examine it.
2351	 */
2352	spin_lock_irq(&current->sighand->siglock);
2353	exit_code = current->exit_code;
2354	current->last_siginfo = NULL;
2355	current->ptrace_message = 0;
2356	current->exit_code = 0;
2357
2358	/* LISTENING can be set only during STOP traps, clear it */
2359	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2360
2361	/*
2362	 * Queued signals ignored us while we were stopped for tracing.
2363	 * So check for any that we should take before resuming user mode.
2364	 * This sets TIF_SIGPENDING, but never clears it.
2365	 */
2366	recalc_sigpending_tsk(current);
2367	return exit_code;
2368}
2369
2370static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2371{
2372	kernel_siginfo_t info;
2373
2374	clear_siginfo(&info);
2375	info.si_signo = signr;
2376	info.si_code = exit_code;
2377	info.si_pid = task_pid_vnr(current);
2378	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2379
2380	/* Let the debugger run.  */
2381	return ptrace_stop(exit_code, why, message, &info);
2382}
2383
2384int ptrace_notify(int exit_code, unsigned long message)
2385{
2386	int signr;
2387
2388	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2389	if (unlikely(task_work_pending(current)))
2390		task_work_run();
2391
2392	spin_lock_irq(&current->sighand->siglock);
2393	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2394	spin_unlock_irq(&current->sighand->siglock);
2395	return signr;
2396}
2397
2398/**
2399 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2400 * @signr: signr causing group stop if initiating
2401 *
2402 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2403 * and participate in it.  If already set, participate in the existing
2404 * group stop.  If participated in a group stop (and thus slept), %true is
2405 * returned with siglock released.
2406 *
2407 * If ptraced, this function doesn't handle stop itself.  Instead,
2408 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2409 * untouched.  The caller must ensure that INTERRUPT trap handling takes
2410 * places afterwards.
2411 *
2412 * CONTEXT:
2413 * Must be called with @current->sighand->siglock held, which is released
2414 * on %true return.
2415 *
2416 * RETURNS:
2417 * %false if group stop is already cancelled or ptrace trap is scheduled.
2418 * %true if participated in group stop.
2419 */
2420static bool do_signal_stop(int signr)
2421	__releases(&current->sighand->siglock)
2422{
2423	struct signal_struct *sig = current->signal;
2424
2425	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2426		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2427		struct task_struct *t;
2428
2429		/* signr will be recorded in task->jobctl for retries */
2430		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2431
2432		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2433		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2434		    unlikely(sig->group_exec_task))
2435			return false;
2436		/*
2437		 * There is no group stop already in progress.  We must
2438		 * initiate one now.
2439		 *
2440		 * While ptraced, a task may be resumed while group stop is
2441		 * still in effect and then receive a stop signal and
2442		 * initiate another group stop.  This deviates from the
2443		 * usual behavior as two consecutive stop signals can't
2444		 * cause two group stops when !ptraced.  That is why we
2445		 * also check !task_is_stopped(t) below.
2446		 *
2447		 * The condition can be distinguished by testing whether
2448		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2449		 * group_exit_code in such case.
2450		 *
2451		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2452		 * an intervening stop signal is required to cause two
2453		 * continued events regardless of ptrace.
2454		 */
2455		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2456			sig->group_exit_code = signr;
2457
2458		sig->group_stop_count = 0;
 
2459		if (task_set_jobctl_pending(current, signr | gstop))
2460			sig->group_stop_count++;
2461
2462		for_other_threads(current, t) {
 
2463			/*
2464			 * Setting state to TASK_STOPPED for a group
2465			 * stop is always done with the siglock held,
2466			 * so this check has no races.
2467			 */
2468			if (!task_is_stopped(t) &&
2469			    task_set_jobctl_pending(t, signr | gstop)) {
2470				sig->group_stop_count++;
2471				if (likely(!(t->ptrace & PT_SEIZED)))
2472					signal_wake_up(t, 0);
2473				else
2474					ptrace_trap_notify(t);
2475			}
2476		}
2477	}
2478
2479	if (likely(!current->ptrace)) {
2480		int notify = 0;
2481
2482		/*
2483		 * If there are no other threads in the group, or if there
2484		 * is a group stop in progress and we are the last to stop,
2485		 * report to the parent.
2486		 */
2487		if (task_participate_group_stop(current))
2488			notify = CLD_STOPPED;
2489
2490		current->jobctl |= JOBCTL_STOPPED;
2491		set_special_state(TASK_STOPPED);
2492		spin_unlock_irq(&current->sighand->siglock);
2493
2494		/*
2495		 * Notify the parent of the group stop completion.  Because
2496		 * we're not holding either the siglock or tasklist_lock
2497		 * here, ptracer may attach inbetween; however, this is for
2498		 * group stop and should always be delivered to the real
2499		 * parent of the group leader.  The new ptracer will get
2500		 * its notification when this task transitions into
2501		 * TASK_TRACED.
2502		 */
2503		if (notify) {
2504			read_lock(&tasklist_lock);
2505			do_notify_parent_cldstop(current, false, notify);
2506			read_unlock(&tasklist_lock);
2507		}
2508
2509		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2510		cgroup_enter_frozen();
2511		schedule();
2512		return true;
2513	} else {
2514		/*
2515		 * While ptraced, group stop is handled by STOP trap.
2516		 * Schedule it and let the caller deal with it.
2517		 */
2518		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2519		return false;
2520	}
2521}
2522
2523/**
2524 * do_jobctl_trap - take care of ptrace jobctl traps
2525 *
2526 * When PT_SEIZED, it's used for both group stop and explicit
2527 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2528 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2529 * the stop signal; otherwise, %SIGTRAP.
2530 *
2531 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2532 * number as exit_code and no siginfo.
2533 *
2534 * CONTEXT:
2535 * Must be called with @current->sighand->siglock held, which may be
2536 * released and re-acquired before returning with intervening sleep.
2537 */
2538static void do_jobctl_trap(void)
2539{
2540	struct signal_struct *signal = current->signal;
2541	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2542
2543	if (current->ptrace & PT_SEIZED) {
2544		if (!signal->group_stop_count &&
2545		    !(signal->flags & SIGNAL_STOP_STOPPED))
2546			signr = SIGTRAP;
2547		WARN_ON_ONCE(!signr);
2548		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2549				 CLD_STOPPED, 0);
2550	} else {
2551		WARN_ON_ONCE(!signr);
2552		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
 
2553	}
2554}
2555
2556/**
2557 * do_freezer_trap - handle the freezer jobctl trap
2558 *
2559 * Puts the task into frozen state, if only the task is not about to quit.
2560 * In this case it drops JOBCTL_TRAP_FREEZE.
2561 *
2562 * CONTEXT:
2563 * Must be called with @current->sighand->siglock held,
2564 * which is always released before returning.
2565 */
2566static void do_freezer_trap(void)
2567	__releases(&current->sighand->siglock)
2568{
2569	/*
2570	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2571	 * let's make another loop to give it a chance to be handled.
2572	 * In any case, we'll return back.
2573	 */
2574	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2575	     JOBCTL_TRAP_FREEZE) {
2576		spin_unlock_irq(&current->sighand->siglock);
2577		return;
2578	}
2579
2580	/*
2581	 * Now we're sure that there is no pending fatal signal and no
2582	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2583	 * immediately (if there is a non-fatal signal pending), and
2584	 * put the task into sleep.
2585	 */
2586	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2587	clear_thread_flag(TIF_SIGPENDING);
2588	spin_unlock_irq(&current->sighand->siglock);
2589	cgroup_enter_frozen();
2590	schedule();
2591}
2592
2593static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2594{
2595	/*
2596	 * We do not check sig_kernel_stop(signr) but set this marker
2597	 * unconditionally because we do not know whether debugger will
2598	 * change signr. This flag has no meaning unless we are going
2599	 * to stop after return from ptrace_stop(). In this case it will
2600	 * be checked in do_signal_stop(), we should only stop if it was
2601	 * not cleared by SIGCONT while we were sleeping. See also the
2602	 * comment in dequeue_signal().
2603	 */
2604	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2605	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2606
2607	/* We're back.  Did the debugger cancel the sig?  */
 
2608	if (signr == 0)
2609		return signr;
2610
 
 
2611	/*
2612	 * Update the siginfo structure if the signal has
2613	 * changed.  If the debugger wanted something
2614	 * specific in the siginfo structure then it should
2615	 * have updated *info via PTRACE_SETSIGINFO.
2616	 */
2617	if (signr != info->si_signo) {
2618		clear_siginfo(info);
2619		info->si_signo = signr;
2620		info->si_errno = 0;
2621		info->si_code = SI_USER;
2622		rcu_read_lock();
2623		info->si_pid = task_pid_vnr(current->parent);
2624		info->si_uid = from_kuid_munged(current_user_ns(),
2625						task_uid(current->parent));
2626		rcu_read_unlock();
2627	}
2628
2629	/* If the (new) signal is now blocked, requeue it.  */
2630	if (sigismember(&current->blocked, signr) ||
2631	    fatal_signal_pending(current)) {
2632		send_signal_locked(signr, info, current, type);
2633		signr = 0;
2634	}
2635
2636	return signr;
2637}
2638
2639static void hide_si_addr_tag_bits(struct ksignal *ksig)
2640{
2641	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2642	case SIL_FAULT:
2643	case SIL_FAULT_TRAPNO:
2644	case SIL_FAULT_MCEERR:
2645	case SIL_FAULT_BNDERR:
2646	case SIL_FAULT_PKUERR:
2647	case SIL_FAULT_PERF_EVENT:
2648		ksig->info.si_addr = arch_untagged_si_addr(
2649			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2650		break;
2651	case SIL_KILL:
2652	case SIL_TIMER:
2653	case SIL_POLL:
2654	case SIL_CHLD:
2655	case SIL_RT:
2656	case SIL_SYS:
2657		break;
2658	}
2659}
2660
2661bool get_signal(struct ksignal *ksig)
2662{
2663	struct sighand_struct *sighand = current->sighand;
2664	struct signal_struct *signal = current->signal;
2665	int signr;
2666
2667	clear_notify_signal();
2668	if (unlikely(task_work_pending(current)))
2669		task_work_run();
2670
2671	if (!task_sigpending(current))
2672		return false;
2673
2674	if (unlikely(uprobe_deny_signal()))
2675		return false;
2676
2677	/*
2678	 * Do this once, we can't return to user-mode if freezing() == T.
2679	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2680	 * thus do not need another check after return.
2681	 */
2682	try_to_freeze();
2683
2684relock:
2685	spin_lock_irq(&sighand->siglock);
2686
2687	/*
2688	 * Every stopped thread goes here after wakeup. Check to see if
2689	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2690	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2691	 */
2692	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2693		int why;
2694
2695		if (signal->flags & SIGNAL_CLD_CONTINUED)
2696			why = CLD_CONTINUED;
2697		else
2698			why = CLD_STOPPED;
2699
2700		signal->flags &= ~SIGNAL_CLD_MASK;
2701
2702		spin_unlock_irq(&sighand->siglock);
2703
2704		/*
2705		 * Notify the parent that we're continuing.  This event is
2706		 * always per-process and doesn't make whole lot of sense
2707		 * for ptracers, who shouldn't consume the state via
2708		 * wait(2) either, but, for backward compatibility, notify
2709		 * the ptracer of the group leader too unless it's gonna be
2710		 * a duplicate.
2711		 */
2712		read_lock(&tasklist_lock);
2713		do_notify_parent_cldstop(current, false, why);
2714
2715		if (ptrace_reparented(current->group_leader))
2716			do_notify_parent_cldstop(current->group_leader,
2717						true, why);
2718		read_unlock(&tasklist_lock);
2719
2720		goto relock;
2721	}
2722
 
 
 
 
 
 
 
 
 
 
2723	for (;;) {
2724		struct k_sigaction *ka;
2725		enum pid_type type;
2726
2727		/* Has this task already been marked for death? */
2728		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2729		     signal->group_exec_task) {
2730			clear_siginfo(&ksig->info);
2731			ksig->info.si_signo = signr = SIGKILL;
2732			sigdelset(&current->pending.signal, SIGKILL);
2733			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2734				&sighand->action[SIGKILL - 1]);
2735			recalc_sigpending();
2736			goto fatal;
2737		}
2738
2739		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2740		    do_signal_stop(0))
2741			goto relock;
2742
2743		if (unlikely(current->jobctl &
2744			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2745			if (current->jobctl & JOBCTL_TRAP_MASK) {
2746				do_jobctl_trap();
2747				spin_unlock_irq(&sighand->siglock);
2748			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2749				do_freezer_trap();
2750
2751			goto relock;
2752		}
2753
2754		/*
2755		 * If the task is leaving the frozen state, let's update
2756		 * cgroup counters and reset the frozen bit.
2757		 */
2758		if (unlikely(cgroup_task_frozen(current))) {
2759			spin_unlock_irq(&sighand->siglock);
2760			cgroup_leave_frozen(false);
2761			goto relock;
2762		}
2763
2764		/*
2765		 * Signals generated by the execution of an instruction
2766		 * need to be delivered before any other pending signals
2767		 * so that the instruction pointer in the signal stack
2768		 * frame points to the faulting instruction.
2769		 */
2770		type = PIDTYPE_PID;
2771		signr = dequeue_synchronous_signal(&ksig->info);
2772		if (!signr)
2773			signr = dequeue_signal(current, &current->blocked,
2774					       &ksig->info, &type);
2775
2776		if (!signr)
2777			break; /* will return 0 */
2778
2779		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2780		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2781			signr = ptrace_signal(signr, &ksig->info, type);
2782			if (!signr)
2783				continue;
2784		}
2785
2786		ka = &sighand->action[signr-1];
2787
2788		/* Trace actually delivered signals. */
2789		trace_signal_deliver(signr, &ksig->info, ka);
2790
2791		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2792			continue;
2793		if (ka->sa.sa_handler != SIG_DFL) {
2794			/* Run the handler.  */
2795			ksig->ka = *ka;
2796
2797			if (ka->sa.sa_flags & SA_ONESHOT)
2798				ka->sa.sa_handler = SIG_DFL;
2799
2800			break; /* will return non-zero "signr" value */
2801		}
2802
2803		/*
2804		 * Now we are doing the default action for this signal.
2805		 */
2806		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2807			continue;
2808
2809		/*
2810		 * Global init gets no signals it doesn't want.
2811		 * Container-init gets no signals it doesn't want from same
2812		 * container.
2813		 *
2814		 * Note that if global/container-init sees a sig_kernel_only()
2815		 * signal here, the signal must have been generated internally
2816		 * or must have come from an ancestor namespace. In either
2817		 * case, the signal cannot be dropped.
2818		 */
2819		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2820				!sig_kernel_only(signr))
2821			continue;
2822
2823		if (sig_kernel_stop(signr)) {
2824			/*
2825			 * The default action is to stop all threads in
2826			 * the thread group.  The job control signals
2827			 * do nothing in an orphaned pgrp, but SIGSTOP
2828			 * always works.  Note that siglock needs to be
2829			 * dropped during the call to is_orphaned_pgrp()
2830			 * because of lock ordering with tasklist_lock.
2831			 * This allows an intervening SIGCONT to be posted.
2832			 * We need to check for that and bail out if necessary.
2833			 */
2834			if (signr != SIGSTOP) {
2835				spin_unlock_irq(&sighand->siglock);
2836
2837				/* signals can be posted during this window */
2838
2839				if (is_current_pgrp_orphaned())
2840					goto relock;
2841
2842				spin_lock_irq(&sighand->siglock);
2843			}
2844
2845			if (likely(do_signal_stop(ksig->info.si_signo))) {
2846				/* It released the siglock.  */
2847				goto relock;
2848			}
2849
2850			/*
2851			 * We didn't actually stop, due to a race
2852			 * with SIGCONT or something like that.
2853			 */
2854			continue;
2855		}
2856
2857	fatal:
2858		spin_unlock_irq(&sighand->siglock);
2859		if (unlikely(cgroup_task_frozen(current)))
2860			cgroup_leave_frozen(true);
2861
2862		/*
2863		 * Anything else is fatal, maybe with a core dump.
2864		 */
2865		current->flags |= PF_SIGNALED;
2866
2867		if (sig_kernel_coredump(signr)) {
2868			if (print_fatal_signals)
2869				print_fatal_signal(ksig->info.si_signo);
2870			proc_coredump_connector(current);
2871			/*
2872			 * If it was able to dump core, this kills all
2873			 * other threads in the group and synchronizes with
2874			 * their demise.  If we lost the race with another
2875			 * thread getting here, it set group_exit_code
2876			 * first and our do_group_exit call below will use
2877			 * that value and ignore the one we pass it.
2878			 */
2879			do_coredump(&ksig->info);
2880		}
2881
2882		/*
2883		 * PF_USER_WORKER threads will catch and exit on fatal signals
2884		 * themselves. They have cleanup that must be performed, so
2885		 * we cannot call do_exit() on their behalf.
2886		 */
2887		if (current->flags & PF_USER_WORKER)
2888			goto out;
2889
2890		/*
2891		 * Death signals, no core dump.
2892		 */
2893		do_group_exit(ksig->info.si_signo);
2894		/* NOTREACHED */
2895	}
2896	spin_unlock_irq(&sighand->siglock);
2897out:
2898	ksig->sig = signr;
2899
2900	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2901		hide_si_addr_tag_bits(ksig);
2902
2903	return ksig->sig > 0;
2904}
2905
2906/**
2907 * signal_delivered - called after signal delivery to update blocked signals
2908 * @ksig:		kernel signal struct
2909 * @stepping:		nonzero if debugger single-step or block-step in use
2910 *
2911 * This function should be called when a signal has successfully been
2912 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2913 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2914 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2915 */
2916static void signal_delivered(struct ksignal *ksig, int stepping)
2917{
2918	sigset_t blocked;
2919
2920	/* A signal was successfully delivered, and the
2921	   saved sigmask was stored on the signal frame,
2922	   and will be restored by sigreturn.  So we can
2923	   simply clear the restore sigmask flag.  */
2924	clear_restore_sigmask();
2925
2926	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2927	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2928		sigaddset(&blocked, ksig->sig);
2929	set_current_blocked(&blocked);
2930	if (current->sas_ss_flags & SS_AUTODISARM)
2931		sas_ss_reset(current);
2932	if (stepping)
2933		ptrace_notify(SIGTRAP, 0);
2934}
2935
2936void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2937{
2938	if (failed)
2939		force_sigsegv(ksig->sig);
2940	else
2941		signal_delivered(ksig, stepping);
2942}
2943
2944/*
2945 * It could be that complete_signal() picked us to notify about the
2946 * group-wide signal. Other threads should be notified now to take
2947 * the shared signals in @which since we will not.
2948 */
2949static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2950{
2951	sigset_t retarget;
2952	struct task_struct *t;
2953
2954	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2955	if (sigisemptyset(&retarget))
2956		return;
2957
2958	for_other_threads(tsk, t) {
 
2959		if (t->flags & PF_EXITING)
2960			continue;
2961
2962		if (!has_pending_signals(&retarget, &t->blocked))
2963			continue;
2964		/* Remove the signals this thread can handle. */
2965		sigandsets(&retarget, &retarget, &t->blocked);
2966
2967		if (!task_sigpending(t))
2968			signal_wake_up(t, 0);
2969
2970		if (sigisemptyset(&retarget))
2971			break;
2972	}
2973}
2974
2975void exit_signals(struct task_struct *tsk)
2976{
2977	int group_stop = 0;
2978	sigset_t unblocked;
2979
2980	/*
2981	 * @tsk is about to have PF_EXITING set - lock out users which
2982	 * expect stable threadgroup.
2983	 */
2984	cgroup_threadgroup_change_begin(tsk);
2985
2986	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2987		sched_mm_cid_exit_signals(tsk);
2988		tsk->flags |= PF_EXITING;
2989		cgroup_threadgroup_change_end(tsk);
2990		return;
2991	}
2992
2993	spin_lock_irq(&tsk->sighand->siglock);
2994	/*
2995	 * From now this task is not visible for group-wide signals,
2996	 * see wants_signal(), do_signal_stop().
2997	 */
2998	sched_mm_cid_exit_signals(tsk);
2999	tsk->flags |= PF_EXITING;
3000
3001	cgroup_threadgroup_change_end(tsk);
3002
3003	if (!task_sigpending(tsk))
3004		goto out;
3005
3006	unblocked = tsk->blocked;
3007	signotset(&unblocked);
3008	retarget_shared_pending(tsk, &unblocked);
3009
3010	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3011	    task_participate_group_stop(tsk))
3012		group_stop = CLD_STOPPED;
3013out:
3014	spin_unlock_irq(&tsk->sighand->siglock);
3015
3016	/*
3017	 * If group stop has completed, deliver the notification.  This
3018	 * should always go to the real parent of the group leader.
3019	 */
3020	if (unlikely(group_stop)) {
3021		read_lock(&tasklist_lock);
3022		do_notify_parent_cldstop(tsk, false, group_stop);
3023		read_unlock(&tasklist_lock);
3024	}
3025}
3026
3027/*
3028 * System call entry points.
3029 */
3030
3031/**
3032 *  sys_restart_syscall - restart a system call
3033 */
3034SYSCALL_DEFINE0(restart_syscall)
3035{
3036	struct restart_block *restart = &current->restart_block;
3037	return restart->fn(restart);
3038}
3039
3040long do_no_restart_syscall(struct restart_block *param)
3041{
3042	return -EINTR;
3043}
3044
3045static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3046{
3047	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3048		sigset_t newblocked;
3049		/* A set of now blocked but previously unblocked signals. */
3050		sigandnsets(&newblocked, newset, &current->blocked);
3051		retarget_shared_pending(tsk, &newblocked);
3052	}
3053	tsk->blocked = *newset;
3054	recalc_sigpending();
3055}
3056
3057/**
3058 * set_current_blocked - change current->blocked mask
3059 * @newset: new mask
3060 *
3061 * It is wrong to change ->blocked directly, this helper should be used
3062 * to ensure the process can't miss a shared signal we are going to block.
3063 */
3064void set_current_blocked(sigset_t *newset)
3065{
3066	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3067	__set_current_blocked(newset);
3068}
3069
3070void __set_current_blocked(const sigset_t *newset)
3071{
3072	struct task_struct *tsk = current;
3073
3074	/*
3075	 * In case the signal mask hasn't changed, there is nothing we need
3076	 * to do. The current->blocked shouldn't be modified by other task.
3077	 */
3078	if (sigequalsets(&tsk->blocked, newset))
3079		return;
3080
3081	spin_lock_irq(&tsk->sighand->siglock);
3082	__set_task_blocked(tsk, newset);
3083	spin_unlock_irq(&tsk->sighand->siglock);
3084}
3085
3086/*
3087 * This is also useful for kernel threads that want to temporarily
3088 * (or permanently) block certain signals.
3089 *
3090 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3091 * interface happily blocks "unblockable" signals like SIGKILL
3092 * and friends.
3093 */
3094int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3095{
3096	struct task_struct *tsk = current;
3097	sigset_t newset;
3098
3099	/* Lockless, only current can change ->blocked, never from irq */
3100	if (oldset)
3101		*oldset = tsk->blocked;
3102
3103	switch (how) {
3104	case SIG_BLOCK:
3105		sigorsets(&newset, &tsk->blocked, set);
3106		break;
3107	case SIG_UNBLOCK:
3108		sigandnsets(&newset, &tsk->blocked, set);
3109		break;
3110	case SIG_SETMASK:
3111		newset = *set;
3112		break;
3113	default:
3114		return -EINVAL;
3115	}
3116
3117	__set_current_blocked(&newset);
3118	return 0;
3119}
3120EXPORT_SYMBOL(sigprocmask);
3121
3122/*
3123 * The api helps set app-provided sigmasks.
3124 *
3125 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3126 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3127 *
3128 * Note that it does set_restore_sigmask() in advance, so it must be always
3129 * paired with restore_saved_sigmask_unless() before return from syscall.
3130 */
3131int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3132{
3133	sigset_t kmask;
3134
3135	if (!umask)
3136		return 0;
3137	if (sigsetsize != sizeof(sigset_t))
3138		return -EINVAL;
3139	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3140		return -EFAULT;
3141
3142	set_restore_sigmask();
3143	current->saved_sigmask = current->blocked;
3144	set_current_blocked(&kmask);
3145
3146	return 0;
3147}
3148
3149#ifdef CONFIG_COMPAT
3150int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3151			    size_t sigsetsize)
3152{
3153	sigset_t kmask;
3154
3155	if (!umask)
3156		return 0;
3157	if (sigsetsize != sizeof(compat_sigset_t))
3158		return -EINVAL;
3159	if (get_compat_sigset(&kmask, umask))
3160		return -EFAULT;
3161
3162	set_restore_sigmask();
3163	current->saved_sigmask = current->blocked;
3164	set_current_blocked(&kmask);
3165
3166	return 0;
3167}
3168#endif
3169
3170/**
3171 *  sys_rt_sigprocmask - change the list of currently blocked signals
3172 *  @how: whether to add, remove, or set signals
3173 *  @nset: stores pending signals
3174 *  @oset: previous value of signal mask if non-null
3175 *  @sigsetsize: size of sigset_t type
3176 */
3177SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3178		sigset_t __user *, oset, size_t, sigsetsize)
3179{
3180	sigset_t old_set, new_set;
3181	int error;
3182
3183	/* XXX: Don't preclude handling different sized sigset_t's.  */
3184	if (sigsetsize != sizeof(sigset_t))
3185		return -EINVAL;
3186
3187	old_set = current->blocked;
3188
3189	if (nset) {
3190		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3191			return -EFAULT;
3192		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3193
3194		error = sigprocmask(how, &new_set, NULL);
3195		if (error)
3196			return error;
3197	}
3198
3199	if (oset) {
3200		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3201			return -EFAULT;
3202	}
3203
3204	return 0;
3205}
3206
3207#ifdef CONFIG_COMPAT
3208COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3209		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3210{
3211	sigset_t old_set = current->blocked;
3212
3213	/* XXX: Don't preclude handling different sized sigset_t's.  */
3214	if (sigsetsize != sizeof(sigset_t))
3215		return -EINVAL;
3216
3217	if (nset) {
3218		sigset_t new_set;
3219		int error;
3220		if (get_compat_sigset(&new_set, nset))
3221			return -EFAULT;
3222		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3223
3224		error = sigprocmask(how, &new_set, NULL);
3225		if (error)
3226			return error;
3227	}
3228	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3229}
3230#endif
3231
3232static void do_sigpending(sigset_t *set)
3233{
3234	spin_lock_irq(&current->sighand->siglock);
3235	sigorsets(set, &current->pending.signal,
3236		  &current->signal->shared_pending.signal);
3237	spin_unlock_irq(&current->sighand->siglock);
3238
3239	/* Outside the lock because only this thread touches it.  */
3240	sigandsets(set, &current->blocked, set);
3241}
3242
3243/**
3244 *  sys_rt_sigpending - examine a pending signal that has been raised
3245 *			while blocked
3246 *  @uset: stores pending signals
3247 *  @sigsetsize: size of sigset_t type or larger
3248 */
3249SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3250{
3251	sigset_t set;
3252
3253	if (sigsetsize > sizeof(*uset))
3254		return -EINVAL;
3255
3256	do_sigpending(&set);
3257
3258	if (copy_to_user(uset, &set, sigsetsize))
3259		return -EFAULT;
3260
3261	return 0;
3262}
3263
3264#ifdef CONFIG_COMPAT
3265COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3266		compat_size_t, sigsetsize)
3267{
3268	sigset_t set;
3269
3270	if (sigsetsize > sizeof(*uset))
3271		return -EINVAL;
3272
3273	do_sigpending(&set);
3274
3275	return put_compat_sigset(uset, &set, sigsetsize);
3276}
3277#endif
3278
3279static const struct {
3280	unsigned char limit, layout;
3281} sig_sicodes[] = {
3282	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3283	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3284	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3285	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3286	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3287#if defined(SIGEMT)
3288	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3289#endif
3290	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3291	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3292	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3293};
3294
3295static bool known_siginfo_layout(unsigned sig, int si_code)
3296{
3297	if (si_code == SI_KERNEL)
3298		return true;
3299	else if ((si_code > SI_USER)) {
3300		if (sig_specific_sicodes(sig)) {
3301			if (si_code <= sig_sicodes[sig].limit)
3302				return true;
3303		}
3304		else if (si_code <= NSIGPOLL)
3305			return true;
3306	}
3307	else if (si_code >= SI_DETHREAD)
3308		return true;
3309	else if (si_code == SI_ASYNCNL)
3310		return true;
3311	return false;
3312}
3313
3314enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3315{
3316	enum siginfo_layout layout = SIL_KILL;
3317	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3318		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3319		    (si_code <= sig_sicodes[sig].limit)) {
3320			layout = sig_sicodes[sig].layout;
3321			/* Handle the exceptions */
3322			if ((sig == SIGBUS) &&
3323			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3324				layout = SIL_FAULT_MCEERR;
3325			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3326				layout = SIL_FAULT_BNDERR;
3327#ifdef SEGV_PKUERR
3328			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3329				layout = SIL_FAULT_PKUERR;
3330#endif
3331			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3332				layout = SIL_FAULT_PERF_EVENT;
3333			else if (IS_ENABLED(CONFIG_SPARC) &&
3334				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3335				layout = SIL_FAULT_TRAPNO;
3336			else if (IS_ENABLED(CONFIG_ALPHA) &&
3337				 ((sig == SIGFPE) ||
3338				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3339				layout = SIL_FAULT_TRAPNO;
3340		}
3341		else if (si_code <= NSIGPOLL)
3342			layout = SIL_POLL;
3343	} else {
3344		if (si_code == SI_TIMER)
3345			layout = SIL_TIMER;
3346		else if (si_code == SI_SIGIO)
3347			layout = SIL_POLL;
3348		else if (si_code < 0)
3349			layout = SIL_RT;
3350	}
3351	return layout;
3352}
3353
3354static inline char __user *si_expansion(const siginfo_t __user *info)
3355{
3356	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3357}
3358
3359int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3360{
3361	char __user *expansion = si_expansion(to);
3362	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3363		return -EFAULT;
3364	if (clear_user(expansion, SI_EXPANSION_SIZE))
3365		return -EFAULT;
3366	return 0;
3367}
3368
3369static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3370				       const siginfo_t __user *from)
3371{
3372	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3373		char __user *expansion = si_expansion(from);
3374		char buf[SI_EXPANSION_SIZE];
3375		int i;
3376		/*
3377		 * An unknown si_code might need more than
3378		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3379		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3380		 * will return this data to userspace exactly.
3381		 */
3382		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3383			return -EFAULT;
3384		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3385			if (buf[i] != 0)
3386				return -E2BIG;
3387		}
3388	}
3389	return 0;
3390}
3391
3392static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3393				    const siginfo_t __user *from)
3394{
3395	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3396		return -EFAULT;
3397	to->si_signo = signo;
3398	return post_copy_siginfo_from_user(to, from);
3399}
3400
3401int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3402{
3403	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3404		return -EFAULT;
3405	return post_copy_siginfo_from_user(to, from);
3406}
3407
3408#ifdef CONFIG_COMPAT
3409/**
3410 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3411 * @to: compat siginfo destination
3412 * @from: kernel siginfo source
3413 *
3414 * Note: This function does not work properly for the SIGCHLD on x32, but
3415 * fortunately it doesn't have to.  The only valid callers for this function are
3416 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3417 * The latter does not care because SIGCHLD will never cause a coredump.
3418 */
3419void copy_siginfo_to_external32(struct compat_siginfo *to,
3420		const struct kernel_siginfo *from)
3421{
3422	memset(to, 0, sizeof(*to));
 
3423
3424	to->si_signo = from->si_signo;
3425	to->si_errno = from->si_errno;
3426	to->si_code  = from->si_code;
3427	switch(siginfo_layout(from->si_signo, from->si_code)) {
3428	case SIL_KILL:
3429		to->si_pid = from->si_pid;
3430		to->si_uid = from->si_uid;
3431		break;
3432	case SIL_TIMER:
3433		to->si_tid     = from->si_tid;
3434		to->si_overrun = from->si_overrun;
3435		to->si_int     = from->si_int;
3436		break;
3437	case SIL_POLL:
3438		to->si_band = from->si_band;
3439		to->si_fd   = from->si_fd;
3440		break;
3441	case SIL_FAULT:
3442		to->si_addr = ptr_to_compat(from->si_addr);
3443		break;
3444	case SIL_FAULT_TRAPNO:
3445		to->si_addr = ptr_to_compat(from->si_addr);
3446		to->si_trapno = from->si_trapno;
3447		break;
3448	case SIL_FAULT_MCEERR:
3449		to->si_addr = ptr_to_compat(from->si_addr);
3450		to->si_addr_lsb = from->si_addr_lsb;
 
 
 
3451		break;
3452	case SIL_FAULT_BNDERR:
3453		to->si_addr = ptr_to_compat(from->si_addr);
3454		to->si_lower = ptr_to_compat(from->si_lower);
3455		to->si_upper = ptr_to_compat(from->si_upper);
 
 
 
3456		break;
3457	case SIL_FAULT_PKUERR:
3458		to->si_addr = ptr_to_compat(from->si_addr);
3459		to->si_pkey = from->si_pkey;
3460		break;
3461	case SIL_FAULT_PERF_EVENT:
3462		to->si_addr = ptr_to_compat(from->si_addr);
3463		to->si_perf_data = from->si_perf_data;
3464		to->si_perf_type = from->si_perf_type;
3465		to->si_perf_flags = from->si_perf_flags;
3466		break;
3467	case SIL_CHLD:
3468		to->si_pid = from->si_pid;
3469		to->si_uid = from->si_uid;
3470		to->si_status = from->si_status;
3471		to->si_utime = from->si_utime;
3472		to->si_stime = from->si_stime;
 
 
 
 
 
 
 
 
3473		break;
3474	case SIL_RT:
3475		to->si_pid = from->si_pid;
3476		to->si_uid = from->si_uid;
3477		to->si_int = from->si_int;
3478		break;
3479	case SIL_SYS:
3480		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3481		to->si_syscall   = from->si_syscall;
3482		to->si_arch      = from->si_arch;
3483		break;
3484	}
3485}
3486
3487int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3488			   const struct kernel_siginfo *from)
3489{
3490	struct compat_siginfo new;
3491
3492	copy_siginfo_to_external32(&new, from);
3493	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3494		return -EFAULT;
 
3495	return 0;
3496}
3497
3498static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3499					 const struct compat_siginfo *from)
3500{
3501	clear_siginfo(to);
3502	to->si_signo = from->si_signo;
3503	to->si_errno = from->si_errno;
3504	to->si_code  = from->si_code;
3505	switch(siginfo_layout(from->si_signo, from->si_code)) {
3506	case SIL_KILL:
3507		to->si_pid = from->si_pid;
3508		to->si_uid = from->si_uid;
3509		break;
3510	case SIL_TIMER:
3511		to->si_tid     = from->si_tid;
3512		to->si_overrun = from->si_overrun;
3513		to->si_int     = from->si_int;
3514		break;
3515	case SIL_POLL:
3516		to->si_band = from->si_band;
3517		to->si_fd   = from->si_fd;
3518		break;
3519	case SIL_FAULT:
3520		to->si_addr = compat_ptr(from->si_addr);
3521		break;
3522	case SIL_FAULT_TRAPNO:
3523		to->si_addr = compat_ptr(from->si_addr);
3524		to->si_trapno = from->si_trapno;
 
3525		break;
3526	case SIL_FAULT_MCEERR:
3527		to->si_addr = compat_ptr(from->si_addr);
 
 
 
3528		to->si_addr_lsb = from->si_addr_lsb;
3529		break;
3530	case SIL_FAULT_BNDERR:
3531		to->si_addr = compat_ptr(from->si_addr);
 
 
 
3532		to->si_lower = compat_ptr(from->si_lower);
3533		to->si_upper = compat_ptr(from->si_upper);
3534		break;
3535	case SIL_FAULT_PKUERR:
3536		to->si_addr = compat_ptr(from->si_addr);
 
 
 
3537		to->si_pkey = from->si_pkey;
3538		break;
3539	case SIL_FAULT_PERF_EVENT:
3540		to->si_addr = compat_ptr(from->si_addr);
3541		to->si_perf_data = from->si_perf_data;
3542		to->si_perf_type = from->si_perf_type;
3543		to->si_perf_flags = from->si_perf_flags;
3544		break;
3545	case SIL_CHLD:
3546		to->si_pid    = from->si_pid;
3547		to->si_uid    = from->si_uid;
3548		to->si_status = from->si_status;
3549#ifdef CONFIG_X86_X32_ABI
3550		if (in_x32_syscall()) {
3551			to->si_utime = from->_sifields._sigchld_x32._utime;
3552			to->si_stime = from->_sifields._sigchld_x32._stime;
3553		} else
3554#endif
3555		{
3556			to->si_utime = from->si_utime;
3557			to->si_stime = from->si_stime;
3558		}
3559		break;
3560	case SIL_RT:
3561		to->si_pid = from->si_pid;
3562		to->si_uid = from->si_uid;
3563		to->si_int = from->si_int;
3564		break;
3565	case SIL_SYS:
3566		to->si_call_addr = compat_ptr(from->si_call_addr);
3567		to->si_syscall   = from->si_syscall;
3568		to->si_arch      = from->si_arch;
3569		break;
3570	}
3571	return 0;
3572}
3573
3574static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3575				      const struct compat_siginfo __user *ufrom)
3576{
3577	struct compat_siginfo from;
3578
3579	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3580		return -EFAULT;
3581
3582	from.si_signo = signo;
3583	return post_copy_siginfo_from_user32(to, &from);
3584}
3585
3586int copy_siginfo_from_user32(struct kernel_siginfo *to,
3587			     const struct compat_siginfo __user *ufrom)
3588{
3589	struct compat_siginfo from;
3590
3591	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3592		return -EFAULT;
3593
3594	return post_copy_siginfo_from_user32(to, &from);
3595}
3596#endif /* CONFIG_COMPAT */
3597
3598/**
3599 *  do_sigtimedwait - wait for queued signals specified in @which
3600 *  @which: queued signals to wait for
3601 *  @info: if non-null, the signal's siginfo is returned here
3602 *  @ts: upper bound on process time suspension
3603 */
3604static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3605		    const struct timespec64 *ts)
3606{
3607	ktime_t *to = NULL, timeout = KTIME_MAX;
3608	struct task_struct *tsk = current;
3609	sigset_t mask = *which;
3610	enum pid_type type;
3611	int sig, ret = 0;
3612
3613	if (ts) {
3614		if (!timespec64_valid(ts))
3615			return -EINVAL;
3616		timeout = timespec64_to_ktime(*ts);
3617		to = &timeout;
3618	}
3619
3620	/*
3621	 * Invert the set of allowed signals to get those we want to block.
3622	 */
3623	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3624	signotset(&mask);
3625
3626	spin_lock_irq(&tsk->sighand->siglock);
3627	sig = dequeue_signal(tsk, &mask, info, &type);
3628	if (!sig && timeout) {
3629		/*
3630		 * None ready, temporarily unblock those we're interested
3631		 * while we are sleeping in so that we'll be awakened when
3632		 * they arrive. Unblocking is always fine, we can avoid
3633		 * set_current_blocked().
3634		 */
3635		tsk->real_blocked = tsk->blocked;
3636		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3637		recalc_sigpending();
3638		spin_unlock_irq(&tsk->sighand->siglock);
3639
3640		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3641		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3642					       HRTIMER_MODE_REL);
3643		spin_lock_irq(&tsk->sighand->siglock);
3644		__set_task_blocked(tsk, &tsk->real_blocked);
3645		sigemptyset(&tsk->real_blocked);
3646		sig = dequeue_signal(tsk, &mask, info, &type);
3647	}
3648	spin_unlock_irq(&tsk->sighand->siglock);
3649
3650	if (sig)
3651		return sig;
3652	return ret ? -EINTR : -EAGAIN;
3653}
3654
3655/**
3656 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3657 *			in @uthese
3658 *  @uthese: queued signals to wait for
3659 *  @uinfo: if non-null, the signal's siginfo is returned here
3660 *  @uts: upper bound on process time suspension
3661 *  @sigsetsize: size of sigset_t type
3662 */
3663SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3664		siginfo_t __user *, uinfo,
3665		const struct __kernel_timespec __user *, uts,
3666		size_t, sigsetsize)
3667{
3668	sigset_t these;
3669	struct timespec64 ts;
3670	kernel_siginfo_t info;
3671	int ret;
3672
3673	/* XXX: Don't preclude handling different sized sigset_t's.  */
3674	if (sigsetsize != sizeof(sigset_t))
3675		return -EINVAL;
3676
3677	if (copy_from_user(&these, uthese, sizeof(these)))
3678		return -EFAULT;
3679
3680	if (uts) {
3681		if (get_timespec64(&ts, uts))
3682			return -EFAULT;
3683	}
3684
3685	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3686
3687	if (ret > 0 && uinfo) {
3688		if (copy_siginfo_to_user(uinfo, &info))
3689			ret = -EFAULT;
3690	}
3691
3692	return ret;
3693}
3694
3695#ifdef CONFIG_COMPAT_32BIT_TIME
3696SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3697		siginfo_t __user *, uinfo,
3698		const struct old_timespec32 __user *, uts,
3699		size_t, sigsetsize)
3700{
3701	sigset_t these;
3702	struct timespec64 ts;
3703	kernel_siginfo_t info;
3704	int ret;
3705
3706	if (sigsetsize != sizeof(sigset_t))
3707		return -EINVAL;
3708
3709	if (copy_from_user(&these, uthese, sizeof(these)))
3710		return -EFAULT;
3711
3712	if (uts) {
3713		if (get_old_timespec32(&ts, uts))
3714			return -EFAULT;
3715	}
3716
3717	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3718
3719	if (ret > 0 && uinfo) {
3720		if (copy_siginfo_to_user(uinfo, &info))
3721			ret = -EFAULT;
3722	}
3723
3724	return ret;
3725}
3726#endif
3727
3728#ifdef CONFIG_COMPAT
3729COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3730		struct compat_siginfo __user *, uinfo,
3731		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3732{
3733	sigset_t s;
3734	struct timespec64 t;
3735	kernel_siginfo_t info;
3736	long ret;
3737
3738	if (sigsetsize != sizeof(sigset_t))
3739		return -EINVAL;
3740
3741	if (get_compat_sigset(&s, uthese))
3742		return -EFAULT;
3743
3744	if (uts) {
3745		if (get_timespec64(&t, uts))
3746			return -EFAULT;
3747	}
3748
3749	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3750
3751	if (ret > 0 && uinfo) {
3752		if (copy_siginfo_to_user32(uinfo, &info))
3753			ret = -EFAULT;
3754	}
3755
3756	return ret;
3757}
3758
3759#ifdef CONFIG_COMPAT_32BIT_TIME
3760COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3761		struct compat_siginfo __user *, uinfo,
3762		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3763{
3764	sigset_t s;
3765	struct timespec64 t;
3766	kernel_siginfo_t info;
3767	long ret;
3768
3769	if (sigsetsize != sizeof(sigset_t))
3770		return -EINVAL;
3771
3772	if (get_compat_sigset(&s, uthese))
3773		return -EFAULT;
3774
3775	if (uts) {
3776		if (get_old_timespec32(&t, uts))
3777			return -EFAULT;
3778	}
3779
3780	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3781
3782	if (ret > 0 && uinfo) {
3783		if (copy_siginfo_to_user32(uinfo, &info))
3784			ret = -EFAULT;
3785	}
3786
3787	return ret;
3788}
3789#endif
3790#endif
3791
3792static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3793{
3794	clear_siginfo(info);
3795	info->si_signo = sig;
3796	info->si_errno = 0;
3797	info->si_code = SI_USER;
3798	info->si_pid = task_tgid_vnr(current);
3799	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3800}
3801
3802/**
3803 *  sys_kill - send a signal to a process
3804 *  @pid: the PID of the process
3805 *  @sig: signal to be sent
3806 */
3807SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3808{
3809	struct kernel_siginfo info;
3810
3811	prepare_kill_siginfo(sig, &info);
3812
3813	return kill_something_info(sig, &info, pid);
3814}
3815
3816/*
3817 * Verify that the signaler and signalee either are in the same pid namespace
3818 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3819 * namespace.
3820 */
3821static bool access_pidfd_pidns(struct pid *pid)
3822{
3823	struct pid_namespace *active = task_active_pid_ns(current);
3824	struct pid_namespace *p = ns_of_pid(pid);
3825
3826	for (;;) {
3827		if (!p)
3828			return false;
3829		if (p == active)
3830			break;
3831		p = p->parent;
3832	}
3833
3834	return true;
3835}
3836
3837static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3838		siginfo_t __user *info)
3839{
3840#ifdef CONFIG_COMPAT
3841	/*
3842	 * Avoid hooking up compat syscalls and instead handle necessary
3843	 * conversions here. Note, this is a stop-gap measure and should not be
3844	 * considered a generic solution.
3845	 */
3846	if (in_compat_syscall())
3847		return copy_siginfo_from_user32(
3848			kinfo, (struct compat_siginfo __user *)info);
3849#endif
3850	return copy_siginfo_from_user(kinfo, info);
3851}
3852
3853static struct pid *pidfd_to_pid(const struct file *file)
3854{
3855	struct pid *pid;
3856
3857	pid = pidfd_pid(file);
3858	if (!IS_ERR(pid))
3859		return pid;
3860
3861	return tgid_pidfd_to_pid(file);
3862}
3863
3864/**
3865 * sys_pidfd_send_signal - Signal a process through a pidfd
3866 * @pidfd:  file descriptor of the process
3867 * @sig:    signal to send
3868 * @info:   signal info
3869 * @flags:  future flags
3870 *
3871 * The syscall currently only signals via PIDTYPE_PID which covers
3872 * kill(<positive-pid>, <signal>. It does not signal threads or process
3873 * groups.
3874 * In order to extend the syscall to threads and process groups the @flags
3875 * argument should be used. In essence, the @flags argument will determine
3876 * what is signaled and not the file descriptor itself. Put in other words,
3877 * grouping is a property of the flags argument not a property of the file
3878 * descriptor.
3879 *
3880 * Return: 0 on success, negative errno on failure
3881 */
3882SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3883		siginfo_t __user *, info, unsigned int, flags)
3884{
3885	int ret;
3886	struct fd f;
3887	struct pid *pid;
3888	kernel_siginfo_t kinfo;
3889
3890	/* Enforce flags be set to 0 until we add an extension. */
3891	if (flags)
3892		return -EINVAL;
3893
3894	f = fdget(pidfd);
3895	if (!f.file)
3896		return -EBADF;
3897
3898	/* Is this a pidfd? */
3899	pid = pidfd_to_pid(f.file);
3900	if (IS_ERR(pid)) {
3901		ret = PTR_ERR(pid);
3902		goto err;
3903	}
3904
3905	ret = -EINVAL;
3906	if (!access_pidfd_pidns(pid))
3907		goto err;
3908
3909	if (info) {
3910		ret = copy_siginfo_from_user_any(&kinfo, info);
3911		if (unlikely(ret))
3912			goto err;
3913
3914		ret = -EINVAL;
3915		if (unlikely(sig != kinfo.si_signo))
3916			goto err;
3917
3918		/* Only allow sending arbitrary signals to yourself. */
3919		ret = -EPERM;
3920		if ((task_pid(current) != pid) &&
3921		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3922			goto err;
3923	} else {
3924		prepare_kill_siginfo(sig, &kinfo);
3925	}
3926
3927	ret = kill_pid_info(sig, &kinfo, pid);
3928
3929err:
3930	fdput(f);
3931	return ret;
3932}
3933
3934static int
3935do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3936{
3937	struct task_struct *p;
3938	int error = -ESRCH;
3939
3940	rcu_read_lock();
3941	p = find_task_by_vpid(pid);
3942	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3943		error = check_kill_permission(sig, info, p);
3944		/*
3945		 * The null signal is a permissions and process existence
3946		 * probe.  No signal is actually delivered.
3947		 */
3948		if (!error && sig) {
3949			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3950			/*
3951			 * If lock_task_sighand() failed we pretend the task
3952			 * dies after receiving the signal. The window is tiny,
3953			 * and the signal is private anyway.
3954			 */
3955			if (unlikely(error == -ESRCH))
3956				error = 0;
3957		}
3958	}
3959	rcu_read_unlock();
3960
3961	return error;
3962}
3963
3964static int do_tkill(pid_t tgid, pid_t pid, int sig)
3965{
3966	struct kernel_siginfo info;
3967
3968	clear_siginfo(&info);
3969	info.si_signo = sig;
3970	info.si_errno = 0;
3971	info.si_code = SI_TKILL;
3972	info.si_pid = task_tgid_vnr(current);
3973	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3974
3975	return do_send_specific(tgid, pid, sig, &info);
3976}
3977
3978/**
3979 *  sys_tgkill - send signal to one specific thread
3980 *  @tgid: the thread group ID of the thread
3981 *  @pid: the PID of the thread
3982 *  @sig: signal to be sent
3983 *
3984 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3985 *  exists but it's not belonging to the target process anymore. This
3986 *  method solves the problem of threads exiting and PIDs getting reused.
3987 */
3988SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3989{
3990	/* This is only valid for single tasks */
3991	if (pid <= 0 || tgid <= 0)
3992		return -EINVAL;
3993
3994	return do_tkill(tgid, pid, sig);
3995}
3996
3997/**
3998 *  sys_tkill - send signal to one specific task
3999 *  @pid: the PID of the task
4000 *  @sig: signal to be sent
4001 *
4002 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4003 */
4004SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4005{
4006	/* This is only valid for single tasks */
4007	if (pid <= 0)
4008		return -EINVAL;
4009
4010	return do_tkill(0, pid, sig);
4011}
4012
4013static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4014{
4015	/* Not even root can pretend to send signals from the kernel.
4016	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4017	 */
4018	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4019	    (task_pid_vnr(current) != pid))
4020		return -EPERM;
4021
4022	/* POSIX.1b doesn't mention process groups.  */
4023	return kill_proc_info(sig, info, pid);
4024}
4025
4026/**
4027 *  sys_rt_sigqueueinfo - send signal information to a signal
4028 *  @pid: the PID of the thread
4029 *  @sig: signal to be sent
4030 *  @uinfo: signal info to be sent
4031 */
4032SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4033		siginfo_t __user *, uinfo)
4034{
4035	kernel_siginfo_t info;
4036	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4037	if (unlikely(ret))
4038		return ret;
4039	return do_rt_sigqueueinfo(pid, sig, &info);
4040}
4041
4042#ifdef CONFIG_COMPAT
4043COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4044			compat_pid_t, pid,
4045			int, sig,
4046			struct compat_siginfo __user *, uinfo)
4047{
4048	kernel_siginfo_t info;
4049	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4050	if (unlikely(ret))
4051		return ret;
4052	return do_rt_sigqueueinfo(pid, sig, &info);
4053}
4054#endif
4055
4056static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4057{
4058	/* This is only valid for single tasks */
4059	if (pid <= 0 || tgid <= 0)
4060		return -EINVAL;
4061
4062	/* Not even root can pretend to send signals from the kernel.
4063	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4064	 */
4065	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4066	    (task_pid_vnr(current) != pid))
4067		return -EPERM;
4068
4069	return do_send_specific(tgid, pid, sig, info);
4070}
4071
4072SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4073		siginfo_t __user *, uinfo)
4074{
4075	kernel_siginfo_t info;
4076	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4077	if (unlikely(ret))
4078		return ret;
4079	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4080}
4081
4082#ifdef CONFIG_COMPAT
4083COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4084			compat_pid_t, tgid,
4085			compat_pid_t, pid,
4086			int, sig,
4087			struct compat_siginfo __user *, uinfo)
4088{
4089	kernel_siginfo_t info;
4090	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4091	if (unlikely(ret))
4092		return ret;
4093	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4094}
4095#endif
4096
4097/*
4098 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4099 */
4100void kernel_sigaction(int sig, __sighandler_t action)
4101{
4102	spin_lock_irq(&current->sighand->siglock);
4103	current->sighand->action[sig - 1].sa.sa_handler = action;
4104	if (action == SIG_IGN) {
4105		sigset_t mask;
4106
4107		sigemptyset(&mask);
4108		sigaddset(&mask, sig);
4109
4110		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4111		flush_sigqueue_mask(&mask, &current->pending);
4112		recalc_sigpending();
4113	}
4114	spin_unlock_irq(&current->sighand->siglock);
4115}
4116EXPORT_SYMBOL(kernel_sigaction);
4117
4118void __weak sigaction_compat_abi(struct k_sigaction *act,
4119		struct k_sigaction *oact)
4120{
4121}
4122
4123int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4124{
4125	struct task_struct *p = current, *t;
4126	struct k_sigaction *k;
4127	sigset_t mask;
4128
4129	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4130		return -EINVAL;
4131
4132	k = &p->sighand->action[sig-1];
4133
4134	spin_lock_irq(&p->sighand->siglock);
4135	if (k->sa.sa_flags & SA_IMMUTABLE) {
4136		spin_unlock_irq(&p->sighand->siglock);
4137		return -EINVAL;
4138	}
4139	if (oact)
4140		*oact = *k;
4141
4142	/*
4143	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4144	 * e.g. by having an architecture use the bit in their uapi.
4145	 */
4146	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4147
4148	/*
4149	 * Clear unknown flag bits in order to allow userspace to detect missing
4150	 * support for flag bits and to allow the kernel to use non-uapi bits
4151	 * internally.
4152	 */
4153	if (act)
4154		act->sa.sa_flags &= UAPI_SA_FLAGS;
4155	if (oact)
4156		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4157
4158	sigaction_compat_abi(act, oact);
4159
4160	if (act) {
4161		sigdelsetmask(&act->sa.sa_mask,
4162			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4163		*k = *act;
4164		/*
4165		 * POSIX 3.3.1.3:
4166		 *  "Setting a signal action to SIG_IGN for a signal that is
4167		 *   pending shall cause the pending signal to be discarded,
4168		 *   whether or not it is blocked."
4169		 *
4170		 *  "Setting a signal action to SIG_DFL for a signal that is
4171		 *   pending and whose default action is to ignore the signal
4172		 *   (for example, SIGCHLD), shall cause the pending signal to
4173		 *   be discarded, whether or not it is blocked"
4174		 */
4175		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4176			sigemptyset(&mask);
4177			sigaddset(&mask, sig);
4178			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4179			for_each_thread(p, t)
4180				flush_sigqueue_mask(&mask, &t->pending);
4181		}
4182	}
4183
4184	spin_unlock_irq(&p->sighand->siglock);
4185	return 0;
4186}
4187
4188#ifdef CONFIG_DYNAMIC_SIGFRAME
4189static inline void sigaltstack_lock(void)
4190	__acquires(&current->sighand->siglock)
4191{
4192	spin_lock_irq(&current->sighand->siglock);
4193}
4194
4195static inline void sigaltstack_unlock(void)
4196	__releases(&current->sighand->siglock)
4197{
4198	spin_unlock_irq(&current->sighand->siglock);
4199}
4200#else
4201static inline void sigaltstack_lock(void) { }
4202static inline void sigaltstack_unlock(void) { }
4203#endif
4204
4205static int
4206do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4207		size_t min_ss_size)
4208{
4209	struct task_struct *t = current;
4210	int ret = 0;
4211
4212	if (oss) {
4213		memset(oss, 0, sizeof(stack_t));
4214		oss->ss_sp = (void __user *) t->sas_ss_sp;
4215		oss->ss_size = t->sas_ss_size;
4216		oss->ss_flags = sas_ss_flags(sp) |
4217			(current->sas_ss_flags & SS_FLAG_BITS);
4218	}
4219
4220	if (ss) {
4221		void __user *ss_sp = ss->ss_sp;
4222		size_t ss_size = ss->ss_size;
4223		unsigned ss_flags = ss->ss_flags;
4224		int ss_mode;
4225
4226		if (unlikely(on_sig_stack(sp)))
4227			return -EPERM;
4228
4229		ss_mode = ss_flags & ~SS_FLAG_BITS;
4230		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4231				ss_mode != 0))
4232			return -EINVAL;
4233
4234		/*
4235		 * Return before taking any locks if no actual
4236		 * sigaltstack changes were requested.
4237		 */
4238		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4239		    t->sas_ss_size == ss_size &&
4240		    t->sas_ss_flags == ss_flags)
4241			return 0;
4242
4243		sigaltstack_lock();
4244		if (ss_mode == SS_DISABLE) {
4245			ss_size = 0;
4246			ss_sp = NULL;
4247		} else {
4248			if (unlikely(ss_size < min_ss_size))
4249				ret = -ENOMEM;
4250			if (!sigaltstack_size_valid(ss_size))
4251				ret = -ENOMEM;
4252		}
4253		if (!ret) {
4254			t->sas_ss_sp = (unsigned long) ss_sp;
4255			t->sas_ss_size = ss_size;
4256			t->sas_ss_flags = ss_flags;
4257		}
4258		sigaltstack_unlock();
4259	}
4260	return ret;
4261}
4262
4263SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4264{
4265	stack_t new, old;
4266	int err;
4267	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4268		return -EFAULT;
4269	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4270			      current_user_stack_pointer(),
4271			      MINSIGSTKSZ);
4272	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4273		err = -EFAULT;
4274	return err;
4275}
4276
4277int restore_altstack(const stack_t __user *uss)
4278{
4279	stack_t new;
4280	if (copy_from_user(&new, uss, sizeof(stack_t)))
4281		return -EFAULT;
4282	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4283			     MINSIGSTKSZ);
4284	/* squash all but EFAULT for now */
4285	return 0;
4286}
4287
4288int __save_altstack(stack_t __user *uss, unsigned long sp)
4289{
4290	struct task_struct *t = current;
4291	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4292		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4293		__put_user(t->sas_ss_size, &uss->ss_size);
4294	return err;
 
 
 
 
4295}
4296
4297#ifdef CONFIG_COMPAT
4298static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4299				 compat_stack_t __user *uoss_ptr)
4300{
4301	stack_t uss, uoss;
4302	int ret;
4303
4304	if (uss_ptr) {
4305		compat_stack_t uss32;
4306		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4307			return -EFAULT;
4308		uss.ss_sp = compat_ptr(uss32.ss_sp);
4309		uss.ss_flags = uss32.ss_flags;
4310		uss.ss_size = uss32.ss_size;
4311	}
4312	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4313			     compat_user_stack_pointer(),
4314			     COMPAT_MINSIGSTKSZ);
4315	if (ret >= 0 && uoss_ptr)  {
4316		compat_stack_t old;
4317		memset(&old, 0, sizeof(old));
4318		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4319		old.ss_flags = uoss.ss_flags;
4320		old.ss_size = uoss.ss_size;
4321		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4322			ret = -EFAULT;
4323	}
4324	return ret;
4325}
4326
4327COMPAT_SYSCALL_DEFINE2(sigaltstack,
4328			const compat_stack_t __user *, uss_ptr,
4329			compat_stack_t __user *, uoss_ptr)
4330{
4331	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4332}
4333
4334int compat_restore_altstack(const compat_stack_t __user *uss)
4335{
4336	int err = do_compat_sigaltstack(uss, NULL);
4337	/* squash all but -EFAULT for now */
4338	return err == -EFAULT ? err : 0;
4339}
4340
4341int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4342{
4343	int err;
4344	struct task_struct *t = current;
4345	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4346			 &uss->ss_sp) |
4347		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4348		__put_user(t->sas_ss_size, &uss->ss_size);
4349	return err;
 
 
 
 
4350}
4351#endif
4352
4353#ifdef __ARCH_WANT_SYS_SIGPENDING
4354
4355/**
4356 *  sys_sigpending - examine pending signals
4357 *  @uset: where mask of pending signal is returned
4358 */
4359SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4360{
4361	sigset_t set;
4362
4363	if (sizeof(old_sigset_t) > sizeof(*uset))
4364		return -EINVAL;
4365
4366	do_sigpending(&set);
4367
4368	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4369		return -EFAULT;
4370
4371	return 0;
4372}
4373
4374#ifdef CONFIG_COMPAT
4375COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4376{
4377	sigset_t set;
4378
4379	do_sigpending(&set);
4380
4381	return put_user(set.sig[0], set32);
4382}
4383#endif
4384
4385#endif
4386
4387#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4388/**
4389 *  sys_sigprocmask - examine and change blocked signals
4390 *  @how: whether to add, remove, or set signals
4391 *  @nset: signals to add or remove (if non-null)
4392 *  @oset: previous value of signal mask if non-null
4393 *
4394 * Some platforms have their own version with special arguments;
4395 * others support only sys_rt_sigprocmask.
4396 */
4397
4398SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4399		old_sigset_t __user *, oset)
4400{
4401	old_sigset_t old_set, new_set;
4402	sigset_t new_blocked;
4403
4404	old_set = current->blocked.sig[0];
4405
4406	if (nset) {
4407		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4408			return -EFAULT;
4409
4410		new_blocked = current->blocked;
4411
4412		switch (how) {
4413		case SIG_BLOCK:
4414			sigaddsetmask(&new_blocked, new_set);
4415			break;
4416		case SIG_UNBLOCK:
4417			sigdelsetmask(&new_blocked, new_set);
4418			break;
4419		case SIG_SETMASK:
4420			new_blocked.sig[0] = new_set;
4421			break;
4422		default:
4423			return -EINVAL;
4424		}
4425
4426		set_current_blocked(&new_blocked);
4427	}
4428
4429	if (oset) {
4430		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4431			return -EFAULT;
4432	}
4433
4434	return 0;
4435}
4436#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4437
4438#ifndef CONFIG_ODD_RT_SIGACTION
4439/**
4440 *  sys_rt_sigaction - alter an action taken by a process
4441 *  @sig: signal to be sent
4442 *  @act: new sigaction
4443 *  @oact: used to save the previous sigaction
4444 *  @sigsetsize: size of sigset_t type
4445 */
4446SYSCALL_DEFINE4(rt_sigaction, int, sig,
4447		const struct sigaction __user *, act,
4448		struct sigaction __user *, oact,
4449		size_t, sigsetsize)
4450{
4451	struct k_sigaction new_sa, old_sa;
4452	int ret;
4453
4454	/* XXX: Don't preclude handling different sized sigset_t's.  */
4455	if (sigsetsize != sizeof(sigset_t))
4456		return -EINVAL;
4457
4458	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4459		return -EFAULT;
4460
4461	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4462	if (ret)
4463		return ret;
4464
4465	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4466		return -EFAULT;
4467
4468	return 0;
4469}
4470#ifdef CONFIG_COMPAT
4471COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4472		const struct compat_sigaction __user *, act,
4473		struct compat_sigaction __user *, oact,
4474		compat_size_t, sigsetsize)
4475{
4476	struct k_sigaction new_ka, old_ka;
4477#ifdef __ARCH_HAS_SA_RESTORER
4478	compat_uptr_t restorer;
4479#endif
4480	int ret;
4481
4482	/* XXX: Don't preclude handling different sized sigset_t's.  */
4483	if (sigsetsize != sizeof(compat_sigset_t))
4484		return -EINVAL;
4485
4486	if (act) {
4487		compat_uptr_t handler;
4488		ret = get_user(handler, &act->sa_handler);
4489		new_ka.sa.sa_handler = compat_ptr(handler);
4490#ifdef __ARCH_HAS_SA_RESTORER
4491		ret |= get_user(restorer, &act->sa_restorer);
4492		new_ka.sa.sa_restorer = compat_ptr(restorer);
4493#endif
4494		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4495		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4496		if (ret)
4497			return -EFAULT;
4498	}
4499
4500	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4501	if (!ret && oact) {
4502		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
4503			       &oact->sa_handler);
4504		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4505					 sizeof(oact->sa_mask));
4506		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4507#ifdef __ARCH_HAS_SA_RESTORER
4508		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4509				&oact->sa_restorer);
4510#endif
4511	}
4512	return ret;
4513}
4514#endif
4515#endif /* !CONFIG_ODD_RT_SIGACTION */
4516
4517#ifdef CONFIG_OLD_SIGACTION
4518SYSCALL_DEFINE3(sigaction, int, sig,
4519		const struct old_sigaction __user *, act,
4520	        struct old_sigaction __user *, oact)
4521{
4522	struct k_sigaction new_ka, old_ka;
4523	int ret;
4524
4525	if (act) {
4526		old_sigset_t mask;
4527		if (!access_ok(act, sizeof(*act)) ||
4528		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4529		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4530		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4531		    __get_user(mask, &act->sa_mask))
4532			return -EFAULT;
4533#ifdef __ARCH_HAS_KA_RESTORER
4534		new_ka.ka_restorer = NULL;
4535#endif
4536		siginitset(&new_ka.sa.sa_mask, mask);
4537	}
4538
4539	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4540
4541	if (!ret && oact) {
4542		if (!access_ok(oact, sizeof(*oact)) ||
4543		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4544		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4545		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4546		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4547			return -EFAULT;
4548	}
4549
4550	return ret;
4551}
4552#endif
4553#ifdef CONFIG_COMPAT_OLD_SIGACTION
4554COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4555		const struct compat_old_sigaction __user *, act,
4556	        struct compat_old_sigaction __user *, oact)
4557{
4558	struct k_sigaction new_ka, old_ka;
4559	int ret;
4560	compat_old_sigset_t mask;
4561	compat_uptr_t handler, restorer;
4562
4563	if (act) {
4564		if (!access_ok(act, sizeof(*act)) ||
4565		    __get_user(handler, &act->sa_handler) ||
4566		    __get_user(restorer, &act->sa_restorer) ||
4567		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4568		    __get_user(mask, &act->sa_mask))
4569			return -EFAULT;
4570
4571#ifdef __ARCH_HAS_KA_RESTORER
4572		new_ka.ka_restorer = NULL;
4573#endif
4574		new_ka.sa.sa_handler = compat_ptr(handler);
4575		new_ka.sa.sa_restorer = compat_ptr(restorer);
4576		siginitset(&new_ka.sa.sa_mask, mask);
4577	}
4578
4579	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4580
4581	if (!ret && oact) {
4582		if (!access_ok(oact, sizeof(*oact)) ||
4583		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4584			       &oact->sa_handler) ||
4585		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4586			       &oact->sa_restorer) ||
4587		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4588		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4589			return -EFAULT;
4590	}
4591	return ret;
4592}
4593#endif
4594
4595#ifdef CONFIG_SGETMASK_SYSCALL
4596
4597/*
4598 * For backwards compatibility.  Functionality superseded by sigprocmask.
4599 */
4600SYSCALL_DEFINE0(sgetmask)
4601{
4602	/* SMP safe */
4603	return current->blocked.sig[0];
4604}
4605
4606SYSCALL_DEFINE1(ssetmask, int, newmask)
4607{
4608	int old = current->blocked.sig[0];
4609	sigset_t newset;
4610
4611	siginitset(&newset, newmask);
4612	set_current_blocked(&newset);
4613
4614	return old;
4615}
4616#endif /* CONFIG_SGETMASK_SYSCALL */
4617
4618#ifdef __ARCH_WANT_SYS_SIGNAL
4619/*
4620 * For backwards compatibility.  Functionality superseded by sigaction.
4621 */
4622SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4623{
4624	struct k_sigaction new_sa, old_sa;
4625	int ret;
4626
4627	new_sa.sa.sa_handler = handler;
4628	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4629	sigemptyset(&new_sa.sa.sa_mask);
4630
4631	ret = do_sigaction(sig, &new_sa, &old_sa);
4632
4633	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4634}
4635#endif /* __ARCH_WANT_SYS_SIGNAL */
4636
4637#ifdef __ARCH_WANT_SYS_PAUSE
4638
4639SYSCALL_DEFINE0(pause)
4640{
4641	while (!signal_pending(current)) {
4642		__set_current_state(TASK_INTERRUPTIBLE);
4643		schedule();
4644	}
4645	return -ERESTARTNOHAND;
4646}
4647
4648#endif
4649
4650static int sigsuspend(sigset_t *set)
4651{
4652	current->saved_sigmask = current->blocked;
4653	set_current_blocked(set);
4654
4655	while (!signal_pending(current)) {
4656		__set_current_state(TASK_INTERRUPTIBLE);
4657		schedule();
4658	}
4659	set_restore_sigmask();
4660	return -ERESTARTNOHAND;
4661}
4662
4663/**
4664 *  sys_rt_sigsuspend - replace the signal mask for a value with the
4665 *	@unewset value until a signal is received
4666 *  @unewset: new signal mask value
4667 *  @sigsetsize: size of sigset_t type
4668 */
4669SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4670{
4671	sigset_t newset;
4672
4673	/* XXX: Don't preclude handling different sized sigset_t's.  */
4674	if (sigsetsize != sizeof(sigset_t))
4675		return -EINVAL;
4676
4677	if (copy_from_user(&newset, unewset, sizeof(newset)))
4678		return -EFAULT;
4679	return sigsuspend(&newset);
4680}
4681 
4682#ifdef CONFIG_COMPAT
4683COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4684{
4685	sigset_t newset;
4686
4687	/* XXX: Don't preclude handling different sized sigset_t's.  */
4688	if (sigsetsize != sizeof(sigset_t))
4689		return -EINVAL;
4690
4691	if (get_compat_sigset(&newset, unewset))
4692		return -EFAULT;
4693	return sigsuspend(&newset);
4694}
4695#endif
4696
4697#ifdef CONFIG_OLD_SIGSUSPEND
4698SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4699{
4700	sigset_t blocked;
4701	siginitset(&blocked, mask);
4702	return sigsuspend(&blocked);
4703}
4704#endif
4705#ifdef CONFIG_OLD_SIGSUSPEND3
4706SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4707{
4708	sigset_t blocked;
4709	siginitset(&blocked, mask);
4710	return sigsuspend(&blocked);
4711}
4712#endif
4713
4714__weak const char *arch_vma_name(struct vm_area_struct *vma)
4715{
4716	return NULL;
4717}
4718
4719static inline void siginfo_buildtime_checks(void)
4720{
4721	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4722
4723	/* Verify the offsets in the two siginfos match */
4724#define CHECK_OFFSET(field) \
4725	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4726
4727	/* kill */
4728	CHECK_OFFSET(si_pid);
4729	CHECK_OFFSET(si_uid);
4730
4731	/* timer */
4732	CHECK_OFFSET(si_tid);
4733	CHECK_OFFSET(si_overrun);
4734	CHECK_OFFSET(si_value);
4735
4736	/* rt */
4737	CHECK_OFFSET(si_pid);
4738	CHECK_OFFSET(si_uid);
4739	CHECK_OFFSET(si_value);
4740
4741	/* sigchld */
4742	CHECK_OFFSET(si_pid);
4743	CHECK_OFFSET(si_uid);
4744	CHECK_OFFSET(si_status);
4745	CHECK_OFFSET(si_utime);
4746	CHECK_OFFSET(si_stime);
4747
4748	/* sigfault */
4749	CHECK_OFFSET(si_addr);
4750	CHECK_OFFSET(si_trapno);
4751	CHECK_OFFSET(si_addr_lsb);
4752	CHECK_OFFSET(si_lower);
4753	CHECK_OFFSET(si_upper);
4754	CHECK_OFFSET(si_pkey);
4755	CHECK_OFFSET(si_perf_data);
4756	CHECK_OFFSET(si_perf_type);
4757	CHECK_OFFSET(si_perf_flags);
4758
4759	/* sigpoll */
4760	CHECK_OFFSET(si_band);
4761	CHECK_OFFSET(si_fd);
4762
4763	/* sigsys */
4764	CHECK_OFFSET(si_call_addr);
4765	CHECK_OFFSET(si_syscall);
4766	CHECK_OFFSET(si_arch);
4767#undef CHECK_OFFSET
4768
4769	/* usb asyncio */
4770	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4771		     offsetof(struct siginfo, si_addr));
4772	if (sizeof(int) == sizeof(void __user *)) {
4773		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4774			     sizeof(void __user *));
4775	} else {
4776		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4777			      sizeof_field(struct siginfo, si_uid)) !=
4778			     sizeof(void __user *));
4779		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4780			     offsetof(struct siginfo, si_uid));
4781	}
4782#ifdef CONFIG_COMPAT
4783	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4784		     offsetof(struct compat_siginfo, si_addr));
4785	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4786		     sizeof(compat_uptr_t));
4787	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4788		     sizeof_field(struct siginfo, si_pid));
4789#endif
4790}
4791
4792#if defined(CONFIG_SYSCTL)
4793static struct ctl_table signal_debug_table[] = {
4794#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4795	{
4796		.procname	= "exception-trace",
4797		.data		= &show_unhandled_signals,
4798		.maxlen		= sizeof(int),
4799		.mode		= 0644,
4800		.proc_handler	= proc_dointvec
4801	},
4802#endif
4803	{ }
4804};
4805
4806static int __init init_signal_sysctls(void)
4807{
4808	register_sysctl_init("debug", signal_debug_table);
4809	return 0;
4810}
4811early_initcall(init_signal_sysctls);
4812#endif /* CONFIG_SYSCTL */
4813
4814void __init signals_init(void)
4815{
4816	siginfo_buildtime_checks();
4817
4818	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4819}
4820
4821#ifdef CONFIG_KGDB_KDB
4822#include <linux/kdb.h>
4823/*
4824 * kdb_send_sig - Allows kdb to send signals without exposing
4825 * signal internals.  This function checks if the required locks are
4826 * available before calling the main signal code, to avoid kdb
4827 * deadlocks.
4828 */
4829void kdb_send_sig(struct task_struct *t, int sig)
4830{
4831	static struct task_struct *kdb_prev_t;
4832	int new_t, ret;
4833	if (!spin_trylock(&t->sighand->siglock)) {
4834		kdb_printf("Can't do kill command now.\n"
4835			   "The sigmask lock is held somewhere else in "
4836			   "kernel, try again later\n");
4837		return;
4838	}
4839	new_t = kdb_prev_t != t;
4840	kdb_prev_t = t;
4841	if (!task_is_running(t) && new_t) {
4842		spin_unlock(&t->sighand->siglock);
4843		kdb_printf("Process is not RUNNING, sending a signal from "
4844			   "kdb risks deadlock\n"
4845			   "on the run queue locks. "
4846			   "The signal has _not_ been sent.\n"
4847			   "Reissue the kill command if you want to risk "
4848			   "the deadlock.\n");
4849		return;
4850	}
4851	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4852	spin_unlock(&t->sighand->siglock);
4853	if (ret)
4854		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4855			   sig, t->pid);
4856	else
4857		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4858}
4859#endif	/* CONFIG_KGDB_KDB */