Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/signal.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   8 *
   9 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
  10 *		Changes to use preallocated sigqueue structures
  11 *		to allow signals to be sent reliably.
  12 */
  13
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/init.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/user.h>
  19#include <linux/sched/debug.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/sched/cputime.h>
  23#include <linux/file.h>
  24#include <linux/fs.h>
 
  25#include <linux/proc_fs.h>
  26#include <linux/tty.h>
  27#include <linux/binfmts.h>
  28#include <linux/coredump.h>
  29#include <linux/security.h>
  30#include <linux/syscalls.h>
  31#include <linux/ptrace.h>
  32#include <linux/signal.h>
  33#include <linux/signalfd.h>
  34#include <linux/ratelimit.h>
  35#include <linux/task_work.h>
  36#include <linux/capability.h>
  37#include <linux/freezer.h>
  38#include <linux/pid_namespace.h>
  39#include <linux/nsproxy.h>
  40#include <linux/user_namespace.h>
  41#include <linux/uprobes.h>
  42#include <linux/compat.h>
  43#include <linux/cn_proc.h>
  44#include <linux/compiler.h>
  45#include <linux/posix-timers.h>
  46#include <linux/cgroup.h>
  47#include <linux/audit.h>
 
 
  48
  49#define CREATE_TRACE_POINTS
  50#include <trace/events/signal.h>
  51
  52#include <asm/param.h>
  53#include <linux/uaccess.h>
  54#include <asm/unistd.h>
  55#include <asm/siginfo.h>
  56#include <asm/cacheflush.h>
  57#include <asm/syscall.h>	/* for syscall_get_* */
  58
 
 
  59/*
  60 * SLAB caches for signal bits.
  61 */
  62
  63static struct kmem_cache *sigqueue_cachep;
  64
  65int print_fatal_signals __read_mostly;
  66
  67static void __user *sig_handler(struct task_struct *t, int sig)
  68{
  69	return t->sighand->action[sig - 1].sa.sa_handler;
  70}
  71
  72static inline bool sig_handler_ignored(void __user *handler, int sig)
  73{
  74	/* Is it explicitly or implicitly ignored? */
  75	return handler == SIG_IGN ||
  76	       (handler == SIG_DFL && sig_kernel_ignore(sig));
  77}
  78
  79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
  80{
  81	void __user *handler;
  82
  83	handler = sig_handler(t, sig);
  84
  85	/* SIGKILL and SIGSTOP may not be sent to the global init */
  86	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
  87		return true;
  88
  89	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  90	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  91		return true;
  92
  93	/* Only allow kernel generated signals to this kthread */
  94	if (unlikely((t->flags & PF_KTHREAD) &&
  95		     (handler == SIG_KTHREAD_KERNEL) && !force))
  96		return true;
  97
  98	return sig_handler_ignored(handler, sig);
  99}
 100
 101static bool sig_ignored(struct task_struct *t, int sig, bool force)
 102{
 103	/*
 104	 * Blocked signals are never ignored, since the
 105	 * signal handler may change by the time it is
 106	 * unblocked.
 107	 */
 108	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 109		return false;
 110
 111	/*
 112	 * Tracers may want to know about even ignored signal unless it
 113	 * is SIGKILL which can't be reported anyway but can be ignored
 114	 * by SIGNAL_UNKILLABLE task.
 115	 */
 116	if (t->ptrace && sig != SIGKILL)
 117		return false;
 118
 119	return sig_task_ignored(t, sig, force);
 120}
 121
 122/*
 123 * Re-calculate pending state from the set of locally pending
 124 * signals, globally pending signals, and blocked signals.
 125 */
 126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
 127{
 128	unsigned long ready;
 129	long i;
 130
 131	switch (_NSIG_WORDS) {
 132	default:
 133		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 134			ready |= signal->sig[i] &~ blocked->sig[i];
 135		break;
 136
 137	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 138		ready |= signal->sig[2] &~ blocked->sig[2];
 139		ready |= signal->sig[1] &~ blocked->sig[1];
 140		ready |= signal->sig[0] &~ blocked->sig[0];
 141		break;
 142
 143	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 144		ready |= signal->sig[0] &~ blocked->sig[0];
 145		break;
 146
 147	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 148	}
 149	return ready !=	0;
 150}
 151
 152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 153
 154static bool recalc_sigpending_tsk(struct task_struct *t)
 155{
 156	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
 157	    PENDING(&t->pending, &t->blocked) ||
 158	    PENDING(&t->signal->shared_pending, &t->blocked) ||
 159	    cgroup_task_frozen(t)) {
 160		set_tsk_thread_flag(t, TIF_SIGPENDING);
 161		return true;
 162	}
 163
 164	/*
 165	 * We must never clear the flag in another thread, or in current
 166	 * when it's possible the current syscall is returning -ERESTART*.
 167	 * So we don't clear it here, and only callers who know they should do.
 168	 */
 169	return false;
 170}
 171
 172/*
 173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 174 * This is superfluous when called on current, the wakeup is a harmless no-op.
 175 */
 176void recalc_sigpending_and_wake(struct task_struct *t)
 177{
 178	if (recalc_sigpending_tsk(t))
 179		signal_wake_up(t, 0);
 180}
 181
 182void recalc_sigpending(void)
 183{
 184	if (!recalc_sigpending_tsk(current) && !freezing(current))
 185		clear_thread_flag(TIF_SIGPENDING);
 186
 187}
 188EXPORT_SYMBOL(recalc_sigpending);
 189
 190void calculate_sigpending(void)
 191{
 192	/* Have any signals or users of TIF_SIGPENDING been delayed
 193	 * until after fork?
 194	 */
 195	spin_lock_irq(&current->sighand->siglock);
 196	set_tsk_thread_flag(current, TIF_SIGPENDING);
 197	recalc_sigpending();
 198	spin_unlock_irq(&current->sighand->siglock);
 199}
 200
 201/* Given the mask, find the first available signal that should be serviced. */
 202
 203#define SYNCHRONOUS_MASK \
 204	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 205	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 206
 207int next_signal(struct sigpending *pending, sigset_t *mask)
 208{
 209	unsigned long i, *s, *m, x;
 210	int sig = 0;
 211
 212	s = pending->signal.sig;
 213	m = mask->sig;
 214
 215	/*
 216	 * Handle the first word specially: it contains the
 217	 * synchronous signals that need to be dequeued first.
 218	 */
 219	x = *s &~ *m;
 220	if (x) {
 221		if (x & SYNCHRONOUS_MASK)
 222			x &= SYNCHRONOUS_MASK;
 223		sig = ffz(~x) + 1;
 224		return sig;
 225	}
 226
 227	switch (_NSIG_WORDS) {
 228	default:
 229		for (i = 1; i < _NSIG_WORDS; ++i) {
 230			x = *++s &~ *++m;
 231			if (!x)
 232				continue;
 233			sig = ffz(~x) + i*_NSIG_BPW + 1;
 234			break;
 235		}
 236		break;
 237
 238	case 2:
 239		x = s[1] &~ m[1];
 240		if (!x)
 241			break;
 242		sig = ffz(~x) + _NSIG_BPW + 1;
 243		break;
 244
 245	case 1:
 246		/* Nothing to do */
 247		break;
 248	}
 249
 250	return sig;
 251}
 252
 253static inline void print_dropped_signal(int sig)
 254{
 255	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 256
 257	if (!print_fatal_signals)
 258		return;
 259
 260	if (!__ratelimit(&ratelimit_state))
 261		return;
 262
 263	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 264				current->comm, current->pid, sig);
 265}
 266
 267/**
 268 * task_set_jobctl_pending - set jobctl pending bits
 269 * @task: target task
 270 * @mask: pending bits to set
 271 *
 272 * Clear @mask from @task->jobctl.  @mask must be subset of
 273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 274 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 275 * cleared.  If @task is already being killed or exiting, this function
 276 * becomes noop.
 277 *
 278 * CONTEXT:
 279 * Must be called with @task->sighand->siglock held.
 280 *
 281 * RETURNS:
 282 * %true if @mask is set, %false if made noop because @task was dying.
 283 */
 284bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 285{
 286	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 287			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 288	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 289
 290	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 291		return false;
 292
 293	if (mask & JOBCTL_STOP_SIGMASK)
 294		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 295
 296	task->jobctl |= mask;
 297	return true;
 298}
 299
 300/**
 301 * task_clear_jobctl_trapping - clear jobctl trapping bit
 302 * @task: target task
 303 *
 304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 305 * Clear it and wake up the ptracer.  Note that we don't need any further
 306 * locking.  @task->siglock guarantees that @task->parent points to the
 307 * ptracer.
 308 *
 309 * CONTEXT:
 310 * Must be called with @task->sighand->siglock held.
 311 */
 312void task_clear_jobctl_trapping(struct task_struct *task)
 313{
 314	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 315		task->jobctl &= ~JOBCTL_TRAPPING;
 316		smp_mb();	/* advised by wake_up_bit() */
 317		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 318	}
 319}
 320
 321/**
 322 * task_clear_jobctl_pending - clear jobctl pending bits
 323 * @task: target task
 324 * @mask: pending bits to clear
 325 *
 326 * Clear @mask from @task->jobctl.  @mask must be subset of
 327 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 328 * STOP bits are cleared together.
 329 *
 330 * If clearing of @mask leaves no stop or trap pending, this function calls
 331 * task_clear_jobctl_trapping().
 332 *
 333 * CONTEXT:
 334 * Must be called with @task->sighand->siglock held.
 335 */
 336void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 337{
 338	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 339
 340	if (mask & JOBCTL_STOP_PENDING)
 341		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 342
 343	task->jobctl &= ~mask;
 344
 345	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 346		task_clear_jobctl_trapping(task);
 347}
 348
 349/**
 350 * task_participate_group_stop - participate in a group stop
 351 * @task: task participating in a group stop
 352 *
 353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 354 * Group stop states are cleared and the group stop count is consumed if
 355 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 356 * stop, the appropriate `SIGNAL_*` flags are set.
 357 *
 358 * CONTEXT:
 359 * Must be called with @task->sighand->siglock held.
 360 *
 361 * RETURNS:
 362 * %true if group stop completion should be notified to the parent, %false
 363 * otherwise.
 364 */
 365static bool task_participate_group_stop(struct task_struct *task)
 366{
 367	struct signal_struct *sig = task->signal;
 368	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 369
 370	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 371
 372	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 373
 374	if (!consume)
 375		return false;
 376
 377	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 378		sig->group_stop_count--;
 379
 380	/*
 381	 * Tell the caller to notify completion iff we are entering into a
 382	 * fresh group stop.  Read comment in do_signal_stop() for details.
 383	 */
 384	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 385		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 386		return true;
 387	}
 388	return false;
 389}
 390
 391void task_join_group_stop(struct task_struct *task)
 392{
 393	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
 394	struct signal_struct *sig = current->signal;
 395
 396	if (sig->group_stop_count) {
 397		sig->group_stop_count++;
 398		mask |= JOBCTL_STOP_CONSUME;
 399	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
 400		return;
 401
 402	/* Have the new thread join an on-going signal group stop */
 403	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
 404}
 405
 406/*
 407 * allocate a new signal queue record
 408 * - this may be called without locks if and only if t == current, otherwise an
 409 *   appropriate lock must be held to stop the target task from exiting
 410 */
 411static struct sigqueue *
 412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
 413		 int override_rlimit, const unsigned int sigqueue_flags)
 414{
 415	struct sigqueue *q = NULL;
 416	struct ucounts *ucounts = NULL;
 417	long sigpending;
 418
 419	/*
 420	 * Protect access to @t credentials. This can go away when all
 421	 * callers hold rcu read lock.
 422	 *
 423	 * NOTE! A pending signal will hold on to the user refcount,
 424	 * and we get/put the refcount only when the sigpending count
 425	 * changes from/to zero.
 426	 */
 427	rcu_read_lock();
 428	ucounts = task_ucounts(t);
 429	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 
 430	rcu_read_unlock();
 431	if (!sigpending)
 432		return NULL;
 433
 434	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
 435		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
 436	} else {
 437		print_dropped_signal(sig);
 
 438	}
 439
 440	if (unlikely(q == NULL)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 442	} else {
 443		INIT_LIST_HEAD(&q->list);
 444		q->flags = sigqueue_flags;
 445		q->ucounts = ucounts;
 446	}
 
 
 447	return q;
 448}
 449
 450static void __sigqueue_free(struct sigqueue *q)
 451{
 452	if (q->flags & SIGQUEUE_PREALLOC)
 
 453		return;
 
 454	if (q->ucounts) {
 455		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
 456		q->ucounts = NULL;
 457	}
 458	kmem_cache_free(sigqueue_cachep, q);
 459}
 460
 461void flush_sigqueue(struct sigpending *queue)
 462{
 463	struct sigqueue *q;
 464
 465	sigemptyset(&queue->signal);
 466	while (!list_empty(&queue->list)) {
 467		q = list_entry(queue->list.next, struct sigqueue , list);
 468		list_del_init(&q->list);
 469		__sigqueue_free(q);
 470	}
 471}
 472
 473/*
 474 * Flush all pending signals for this kthread.
 475 */
 476void flush_signals(struct task_struct *t)
 477{
 478	unsigned long flags;
 479
 480	spin_lock_irqsave(&t->sighand->siglock, flags);
 481	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 482	flush_sigqueue(&t->pending);
 483	flush_sigqueue(&t->signal->shared_pending);
 484	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 485}
 486EXPORT_SYMBOL(flush_signals);
 487
 488#ifdef CONFIG_POSIX_TIMERS
 489static void __flush_itimer_signals(struct sigpending *pending)
 490{
 491	sigset_t signal, retain;
 492	struct sigqueue *q, *n;
 493
 494	signal = pending->signal;
 495	sigemptyset(&retain);
 496
 497	list_for_each_entry_safe(q, n, &pending->list, list) {
 498		int sig = q->info.si_signo;
 499
 500		if (likely(q->info.si_code != SI_TIMER)) {
 501			sigaddset(&retain, sig);
 502		} else {
 503			sigdelset(&signal, sig);
 504			list_del_init(&q->list);
 505			__sigqueue_free(q);
 506		}
 507	}
 508
 509	sigorsets(&pending->signal, &signal, &retain);
 510}
 511
 512void flush_itimer_signals(void)
 513{
 514	struct task_struct *tsk = current;
 515	unsigned long flags;
 516
 517	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 518	__flush_itimer_signals(&tsk->pending);
 519	__flush_itimer_signals(&tsk->signal->shared_pending);
 520	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 521}
 522#endif
 523
 524void ignore_signals(struct task_struct *t)
 525{
 526	int i;
 527
 528	for (i = 0; i < _NSIG; ++i)
 529		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 530
 531	flush_signals(t);
 532}
 533
 534/*
 535 * Flush all handlers for a task.
 536 */
 537
 538void
 539flush_signal_handlers(struct task_struct *t, int force_default)
 540{
 541	int i;
 542	struct k_sigaction *ka = &t->sighand->action[0];
 543	for (i = _NSIG ; i != 0 ; i--) {
 544		if (force_default || ka->sa.sa_handler != SIG_IGN)
 545			ka->sa.sa_handler = SIG_DFL;
 546		ka->sa.sa_flags = 0;
 547#ifdef __ARCH_HAS_SA_RESTORER
 548		ka->sa.sa_restorer = NULL;
 549#endif
 550		sigemptyset(&ka->sa.sa_mask);
 551		ka++;
 552	}
 553}
 554
 555bool unhandled_signal(struct task_struct *tsk, int sig)
 556{
 557	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 558	if (is_global_init(tsk))
 559		return true;
 560
 561	if (handler != SIG_IGN && handler != SIG_DFL)
 562		return false;
 563
 
 
 
 
 564	/* if ptraced, let the tracer determine */
 565	return !tsk->ptrace;
 566}
 567
 568static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
 569			   bool *resched_timer)
 570{
 571	struct sigqueue *q, *first = NULL;
 572
 573	/*
 574	 * Collect the siginfo appropriate to this signal.  Check if
 575	 * there is another siginfo for the same signal.
 576	*/
 577	list_for_each_entry(q, &list->list, list) {
 578		if (q->info.si_signo == sig) {
 579			if (first)
 580				goto still_pending;
 581			first = q;
 582		}
 583	}
 584
 585	sigdelset(&list->signal, sig);
 586
 587	if (first) {
 588still_pending:
 589		list_del_init(&first->list);
 590		copy_siginfo(info, &first->info);
 591
 592		*resched_timer =
 593			(first->flags & SIGQUEUE_PREALLOC) &&
 594			(info->si_code == SI_TIMER) &&
 595			(info->si_sys_private);
 596
 597		__sigqueue_free(first);
 
 
 
 
 
 598	} else {
 599		/*
 600		 * Ok, it wasn't in the queue.  This must be
 601		 * a fast-pathed signal or we must have been
 602		 * out of queue space.  So zero out the info.
 603		 */
 604		clear_siginfo(info);
 605		info->si_signo = sig;
 606		info->si_errno = 0;
 607		info->si_code = SI_USER;
 608		info->si_pid = 0;
 609		info->si_uid = 0;
 610	}
 611}
 612
 613static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 614			kernel_siginfo_t *info, bool *resched_timer)
 615{
 616	int sig = next_signal(pending, mask);
 617
 618	if (sig)
 619		collect_signal(sig, pending, info, resched_timer);
 620	return sig;
 621}
 622
 623/*
 624 * Dequeue a signal and return the element to the caller, which is
 625 * expected to free it.
 626 *
 627 * All callers have to hold the siglock.
 628 */
 629int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
 630		   kernel_siginfo_t *info, enum pid_type *type)
 631{
 632	bool resched_timer = false;
 
 633	int signr;
 634
 635	/* We only dequeue private signals from ourselves, we don't let
 636	 * signalfd steal them
 637	 */
 638	*type = PIDTYPE_PID;
 639	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 
 640	if (!signr) {
 641		*type = PIDTYPE_TGID;
 642		signr = __dequeue_signal(&tsk->signal->shared_pending,
 643					 mask, info, &resched_timer);
 644#ifdef CONFIG_POSIX_TIMERS
 645		/*
 646		 * itimer signal ?
 647		 *
 648		 * itimers are process shared and we restart periodic
 649		 * itimers in the signal delivery path to prevent DoS
 650		 * attacks in the high resolution timer case. This is
 651		 * compliant with the old way of self-restarting
 652		 * itimers, as the SIGALRM is a legacy signal and only
 653		 * queued once. Changing the restart behaviour to
 654		 * restart the timer in the signal dequeue path is
 655		 * reducing the timer noise on heavy loaded !highres
 656		 * systems too.
 657		 */
 658		if (unlikely(signr == SIGALRM)) {
 659			struct hrtimer *tmr = &tsk->signal->real_timer;
 660
 661			if (!hrtimer_is_queued(tmr) &&
 662			    tsk->signal->it_real_incr != 0) {
 663				hrtimer_forward(tmr, tmr->base->get_time(),
 664						tsk->signal->it_real_incr);
 665				hrtimer_restart(tmr);
 666			}
 667		}
 668#endif
 669	}
 670
 671	recalc_sigpending();
 672	if (!signr)
 673		return 0;
 674
 675	if (unlikely(sig_kernel_stop(signr))) {
 676		/*
 677		 * Set a marker that we have dequeued a stop signal.  Our
 678		 * caller might release the siglock and then the pending
 679		 * stop signal it is about to process is no longer in the
 680		 * pending bitmasks, but must still be cleared by a SIGCONT
 681		 * (and overruled by a SIGKILL).  So those cases clear this
 682		 * shared flag after we've set it.  Note that this flag may
 683		 * remain set after the signal we return is ignored or
 684		 * handled.  That doesn't matter because its only purpose
 685		 * is to alert stop-signal processing code when another
 686		 * processor has come along and cleared the flag.
 687		 */
 688		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 689	}
 690#ifdef CONFIG_POSIX_TIMERS
 691	if (resched_timer) {
 692		/*
 693		 * Release the siglock to ensure proper locking order
 694		 * of timer locks outside of siglocks.  Note, we leave
 695		 * irqs disabled here, since the posix-timers code is
 696		 * about to disable them again anyway.
 697		 */
 698		spin_unlock(&tsk->sighand->siglock);
 699		posixtimer_rearm(info);
 700		spin_lock(&tsk->sighand->siglock);
 701
 702		/* Don't expose the si_sys_private value to userspace */
 703		info->si_sys_private = 0;
 
 704	}
 705#endif
 706	return signr;
 707}
 708EXPORT_SYMBOL_GPL(dequeue_signal);
 709
 710static int dequeue_synchronous_signal(kernel_siginfo_t *info)
 711{
 712	struct task_struct *tsk = current;
 713	struct sigpending *pending = &tsk->pending;
 714	struct sigqueue *q, *sync = NULL;
 715
 716	/*
 717	 * Might a synchronous signal be in the queue?
 718	 */
 719	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
 720		return 0;
 721
 722	/*
 723	 * Return the first synchronous signal in the queue.
 724	 */
 725	list_for_each_entry(q, &pending->list, list) {
 726		/* Synchronous signals have a positive si_code */
 727		if ((q->info.si_code > SI_USER) &&
 728		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
 729			sync = q;
 730			goto next;
 731		}
 732	}
 733	return 0;
 734next:
 735	/*
 736	 * Check if there is another siginfo for the same signal.
 737	 */
 738	list_for_each_entry_continue(q, &pending->list, list) {
 739		if (q->info.si_signo == sync->info.si_signo)
 740			goto still_pending;
 741	}
 742
 743	sigdelset(&pending->signal, sync->info.si_signo);
 744	recalc_sigpending();
 745still_pending:
 746	list_del_init(&sync->list);
 747	copy_siginfo(info, &sync->info);
 748	__sigqueue_free(sync);
 749	return info->si_signo;
 750}
 751
 752/*
 753 * Tell a process that it has a new active signal..
 754 *
 755 * NOTE! we rely on the previous spin_lock to
 756 * lock interrupts for us! We can only be called with
 757 * "siglock" held, and the local interrupt must
 758 * have been disabled when that got acquired!
 759 *
 760 * No need to set need_resched since signal event passing
 761 * goes through ->blocked
 762 */
 763void signal_wake_up_state(struct task_struct *t, unsigned int state)
 764{
 765	lockdep_assert_held(&t->sighand->siglock);
 766
 767	set_tsk_thread_flag(t, TIF_SIGPENDING);
 768
 769	/*
 770	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 771	 * case. We don't check t->state here because there is a race with it
 772	 * executing another processor and just now entering stopped state.
 773	 * By using wake_up_state, we ensure the process will wake up and
 774	 * handle its death signal.
 775	 */
 776	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 777		kick_process(t);
 778}
 779
 780/*
 781 * Remove signals in mask from the pending set and queue.
 782 * Returns 1 if any signals were found.
 783 *
 784 * All callers must be holding the siglock.
 785 */
 786static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 
 
 
 
 
 787{
 788	struct sigqueue *q, *n;
 789	sigset_t m;
 790
 
 
 791	sigandsets(&m, mask, &s->signal);
 792	if (sigisemptyset(&m))
 793		return;
 794
 795	sigandnsets(&s->signal, &s->signal, mask);
 796	list_for_each_entry_safe(q, n, &s->list, list) {
 797		if (sigismember(mask, q->info.si_signo)) {
 798			list_del_init(&q->list);
 799			__sigqueue_free(q);
 800		}
 801	}
 802}
 803
 804static inline int is_si_special(const struct kernel_siginfo *info)
 805{
 806	return info <= SEND_SIG_PRIV;
 807}
 808
 809static inline bool si_fromuser(const struct kernel_siginfo *info)
 810{
 811	return info == SEND_SIG_NOINFO ||
 812		(!is_si_special(info) && SI_FROMUSER(info));
 813}
 814
 815/*
 816 * called with RCU read lock from check_kill_permission()
 817 */
 818static bool kill_ok_by_cred(struct task_struct *t)
 819{
 820	const struct cred *cred = current_cred();
 821	const struct cred *tcred = __task_cred(t);
 822
 823	return uid_eq(cred->euid, tcred->suid) ||
 824	       uid_eq(cred->euid, tcred->uid) ||
 825	       uid_eq(cred->uid, tcred->suid) ||
 826	       uid_eq(cred->uid, tcred->uid) ||
 827	       ns_capable(tcred->user_ns, CAP_KILL);
 828}
 829
 830/*
 831 * Bad permissions for sending the signal
 832 * - the caller must hold the RCU read lock
 833 */
 834static int check_kill_permission(int sig, struct kernel_siginfo *info,
 835				 struct task_struct *t)
 836{
 837	struct pid *sid;
 838	int error;
 839
 840	if (!valid_signal(sig))
 841		return -EINVAL;
 842
 843	if (!si_fromuser(info))
 844		return 0;
 845
 846	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 847	if (error)
 848		return error;
 849
 850	if (!same_thread_group(current, t) &&
 851	    !kill_ok_by_cred(t)) {
 852		switch (sig) {
 853		case SIGCONT:
 854			sid = task_session(t);
 855			/*
 856			 * We don't return the error if sid == NULL. The
 857			 * task was unhashed, the caller must notice this.
 858			 */
 859			if (!sid || sid == task_session(current))
 860				break;
 861			fallthrough;
 862		default:
 863			return -EPERM;
 864		}
 865	}
 866
 867	return security_task_kill(t, info, sig, NULL);
 868}
 869
 870/**
 871 * ptrace_trap_notify - schedule trap to notify ptracer
 872 * @t: tracee wanting to notify tracer
 873 *
 874 * This function schedules sticky ptrace trap which is cleared on the next
 875 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 876 * ptracer.
 877 *
 878 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 879 * ptracer is listening for events, tracee is woken up so that it can
 880 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 881 * eventually taken without returning to userland after the existing traps
 882 * are finished by PTRACE_CONT.
 883 *
 884 * CONTEXT:
 885 * Must be called with @task->sighand->siglock held.
 886 */
 887static void ptrace_trap_notify(struct task_struct *t)
 888{
 889	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 890	lockdep_assert_held(&t->sighand->siglock);
 891
 892	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 893	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 894}
 895
 896/*
 897 * Handle magic process-wide effects of stop/continue signals. Unlike
 898 * the signal actions, these happen immediately at signal-generation
 899 * time regardless of blocking, ignoring, or handling.  This does the
 900 * actual continuing for SIGCONT, but not the actual stopping for stop
 901 * signals. The process stop is done as a signal action for SIG_DFL.
 902 *
 903 * Returns true if the signal should be actually delivered, otherwise
 904 * it should be dropped.
 905 */
 906static bool prepare_signal(int sig, struct task_struct *p, bool force)
 907{
 908	struct signal_struct *signal = p->signal;
 909	struct task_struct *t;
 910	sigset_t flush;
 911
 912	if (signal->flags & SIGNAL_GROUP_EXIT) {
 913		if (signal->core_state)
 914			return sig == SIGKILL;
 915		/*
 916		 * The process is in the middle of dying, drop the signal.
 917		 */
 918		return false;
 919	} else if (sig_kernel_stop(sig)) {
 920		/*
 921		 * This is a stop signal.  Remove SIGCONT from all queues.
 922		 */
 923		siginitset(&flush, sigmask(SIGCONT));
 924		flush_sigqueue_mask(&flush, &signal->shared_pending);
 925		for_each_thread(p, t)
 926			flush_sigqueue_mask(&flush, &t->pending);
 927	} else if (sig == SIGCONT) {
 928		unsigned int why;
 929		/*
 930		 * Remove all stop signals from all queues, wake all threads.
 931		 */
 932		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 933		flush_sigqueue_mask(&flush, &signal->shared_pending);
 934		for_each_thread(p, t) {
 935			flush_sigqueue_mask(&flush, &t->pending);
 936			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 937			if (likely(!(t->ptrace & PT_SEIZED))) {
 938				t->jobctl &= ~JOBCTL_STOPPED;
 939				wake_up_state(t, __TASK_STOPPED);
 940			} else
 941				ptrace_trap_notify(t);
 942		}
 943
 944		/*
 945		 * Notify the parent with CLD_CONTINUED if we were stopped.
 946		 *
 947		 * If we were in the middle of a group stop, we pretend it
 948		 * was already finished, and then continued. Since SIGCHLD
 949		 * doesn't queue we report only CLD_STOPPED, as if the next
 950		 * CLD_CONTINUED was dropped.
 951		 */
 952		why = 0;
 953		if (signal->flags & SIGNAL_STOP_STOPPED)
 954			why |= SIGNAL_CLD_CONTINUED;
 955		else if (signal->group_stop_count)
 956			why |= SIGNAL_CLD_STOPPED;
 957
 958		if (why) {
 959			/*
 960			 * The first thread which returns from do_signal_stop()
 961			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 962			 * notify its parent. See get_signal().
 963			 */
 964			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 965			signal->group_stop_count = 0;
 966			signal->group_exit_code = 0;
 967		}
 968	}
 969
 970	return !sig_ignored(p, sig, force);
 971}
 972
 973/*
 974 * Test if P wants to take SIG.  After we've checked all threads with this,
 975 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 976 * blocking SIG were ruled out because they are not running and already
 977 * have pending signals.  Such threads will dequeue from the shared queue
 978 * as soon as they're available, so putting the signal on the shared queue
 979 * will be equivalent to sending it to one such thread.
 980 */
 981static inline bool wants_signal(int sig, struct task_struct *p)
 982{
 983	if (sigismember(&p->blocked, sig))
 984		return false;
 985
 986	if (p->flags & PF_EXITING)
 987		return false;
 988
 989	if (sig == SIGKILL)
 990		return true;
 991
 992	if (task_is_stopped_or_traced(p))
 993		return false;
 994
 995	return task_curr(p) || !task_sigpending(p);
 996}
 997
 998static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 999{
1000	struct signal_struct *signal = p->signal;
1001	struct task_struct *t;
1002
1003	/*
1004	 * Now find a thread we can wake up to take the signal off the queue.
1005	 *
1006	 * If the main thread wants the signal, it gets first crack.
1007	 * Probably the least surprising to the average bear.
1008	 */
1009	if (wants_signal(sig, p))
1010		t = p;
1011	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1012		/*
1013		 * There is just one thread and it does not need to be woken.
1014		 * It will dequeue unblocked signals before it runs again.
1015		 */
1016		return;
1017	else {
1018		/*
1019		 * Otherwise try to find a suitable thread.
1020		 */
1021		t = signal->curr_target;
1022		while (!wants_signal(sig, t)) {
1023			t = next_thread(t);
1024			if (t == signal->curr_target)
1025				/*
1026				 * No thread needs to be woken.
1027				 * Any eligible threads will see
1028				 * the signal in the queue soon.
1029				 */
1030				return;
1031		}
1032		signal->curr_target = t;
1033	}
1034
1035	/*
1036	 * Found a killable thread.  If the signal will be fatal,
1037	 * then start taking the whole group down immediately.
1038	 */
1039	if (sig_fatal(p, sig) &&
1040	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1041	    !sigismember(&t->real_blocked, sig) &&
1042	    (sig == SIGKILL || !p->ptrace)) {
1043		/*
1044		 * This signal will be fatal to the whole group.
1045		 */
1046		if (!sig_kernel_coredump(sig)) {
1047			/*
1048			 * Start a group exit and wake everybody up.
1049			 * This way we don't have other threads
1050			 * running and doing things after a slower
1051			 * thread has the fatal signal pending.
1052			 */
1053			signal->flags = SIGNAL_GROUP_EXIT;
1054			signal->group_exit_code = sig;
1055			signal->group_stop_count = 0;
1056			t = p;
1057			do {
1058				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1059				sigaddset(&t->pending.signal, SIGKILL);
1060				signal_wake_up(t, 1);
1061			} while_each_thread(p, t);
1062			return;
1063		}
1064	}
1065
1066	/*
1067	 * The signal is already in the shared-pending queue.
1068	 * Tell the chosen thread to wake up and dequeue it.
1069	 */
1070	signal_wake_up(t, sig == SIGKILL);
1071	return;
1072}
1073
1074static inline bool legacy_queue(struct sigpending *signals, int sig)
1075{
1076	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1077}
1078
1079static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1080				struct task_struct *t, enum pid_type type, bool force)
1081{
1082	struct sigpending *pending;
1083	struct sigqueue *q;
1084	int override_rlimit;
1085	int ret = 0, result;
1086
1087	lockdep_assert_held(&t->sighand->siglock);
1088
1089	result = TRACE_SIGNAL_IGNORED;
1090	if (!prepare_signal(sig, t, force))
1091		goto ret;
1092
1093	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1094	/*
1095	 * Short-circuit ignored signals and support queuing
1096	 * exactly one non-rt signal, so that we can get more
1097	 * detailed information about the cause of the signal.
1098	 */
1099	result = TRACE_SIGNAL_ALREADY_PENDING;
1100	if (legacy_queue(pending, sig))
1101		goto ret;
1102
1103	result = TRACE_SIGNAL_DELIVERED;
1104	/*
1105	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1106	 */
1107	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1108		goto out_set;
1109
1110	/*
1111	 * Real-time signals must be queued if sent by sigqueue, or
1112	 * some other real-time mechanism.  It is implementation
1113	 * defined whether kill() does so.  We attempt to do so, on
1114	 * the principle of least surprise, but since kill is not
1115	 * allowed to fail with EAGAIN when low on memory we just
1116	 * make sure at least one signal gets delivered and don't
1117	 * pass on the info struct.
1118	 */
1119	if (sig < SIGRTMIN)
1120		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1121	else
1122		override_rlimit = 0;
1123
1124	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1125
1126	if (q) {
1127		list_add_tail(&q->list, &pending->list);
1128		switch ((unsigned long) info) {
1129		case (unsigned long) SEND_SIG_NOINFO:
1130			clear_siginfo(&q->info);
1131			q->info.si_signo = sig;
1132			q->info.si_errno = 0;
1133			q->info.si_code = SI_USER;
1134			q->info.si_pid = task_tgid_nr_ns(current,
1135							task_active_pid_ns(t));
1136			rcu_read_lock();
1137			q->info.si_uid =
1138				from_kuid_munged(task_cred_xxx(t, user_ns),
1139						 current_uid());
1140			rcu_read_unlock();
1141			break;
1142		case (unsigned long) SEND_SIG_PRIV:
1143			clear_siginfo(&q->info);
1144			q->info.si_signo = sig;
1145			q->info.si_errno = 0;
1146			q->info.si_code = SI_KERNEL;
1147			q->info.si_pid = 0;
1148			q->info.si_uid = 0;
1149			break;
1150		default:
1151			copy_siginfo(&q->info, info);
1152			break;
1153		}
1154	} else if (!is_si_special(info) &&
1155		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1156		/*
1157		 * Queue overflow, abort.  We may abort if the
1158		 * signal was rt and sent by user using something
1159		 * other than kill().
1160		 */
1161		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1162		ret = -EAGAIN;
1163		goto ret;
1164	} else {
1165		/*
1166		 * This is a silent loss of information.  We still
1167		 * send the signal, but the *info bits are lost.
1168		 */
1169		result = TRACE_SIGNAL_LOSE_INFO;
1170	}
1171
1172out_set:
1173	signalfd_notify(t, sig);
1174	sigaddset(&pending->signal, sig);
1175
1176	/* Let multiprocess signals appear after on-going forks */
1177	if (type > PIDTYPE_TGID) {
1178		struct multiprocess_signals *delayed;
1179		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1180			sigset_t *signal = &delayed->signal;
1181			/* Can't queue both a stop and a continue signal */
1182			if (sig == SIGCONT)
1183				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1184			else if (sig_kernel_stop(sig))
1185				sigdelset(signal, SIGCONT);
1186			sigaddset(signal, sig);
1187		}
1188	}
1189
1190	complete_signal(sig, t, type);
1191ret:
1192	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1193	return ret;
1194}
1195
1196static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1197{
1198	bool ret = false;
1199	switch (siginfo_layout(info->si_signo, info->si_code)) {
1200	case SIL_KILL:
1201	case SIL_CHLD:
1202	case SIL_RT:
1203		ret = true;
1204		break;
1205	case SIL_TIMER:
1206	case SIL_POLL:
1207	case SIL_FAULT:
1208	case SIL_FAULT_TRAPNO:
1209	case SIL_FAULT_MCEERR:
1210	case SIL_FAULT_BNDERR:
1211	case SIL_FAULT_PKUERR:
1212	case SIL_FAULT_PERF_EVENT:
1213	case SIL_SYS:
1214		ret = false;
1215		break;
1216	}
1217	return ret;
1218}
1219
1220int send_signal_locked(int sig, struct kernel_siginfo *info,
1221		       struct task_struct *t, enum pid_type type)
1222{
1223	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1224	bool force = false;
1225
1226	if (info == SEND_SIG_NOINFO) {
1227		/* Force if sent from an ancestor pid namespace */
1228		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1229	} else if (info == SEND_SIG_PRIV) {
1230		/* Don't ignore kernel generated signals */
1231		force = true;
1232	} else if (has_si_pid_and_uid(info)) {
1233		/* SIGKILL and SIGSTOP is special or has ids */
1234		struct user_namespace *t_user_ns;
1235
1236		rcu_read_lock();
1237		t_user_ns = task_cred_xxx(t, user_ns);
1238		if (current_user_ns() != t_user_ns) {
1239			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1240			info->si_uid = from_kuid_munged(t_user_ns, uid);
1241		}
1242		rcu_read_unlock();
1243
1244		/* A kernel generated signal? */
1245		force = (info->si_code == SI_KERNEL);
1246
1247		/* From an ancestor pid namespace? */
1248		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1249			info->si_pid = 0;
1250			force = true;
1251		}
1252	}
1253	return __send_signal_locked(sig, info, t, type, force);
1254}
1255
1256static void print_fatal_signal(int signr)
1257{
1258	struct pt_regs *regs = task_pt_regs(current);
1259	pr_info("potentially unexpected fatal signal %d.\n", signr);
 
 
 
 
 
 
 
 
 
 
1260
1261#if defined(__i386__) && !defined(__arch_um__)
1262	pr_info("code at %08lx: ", regs->ip);
1263	{
1264		int i;
1265		for (i = 0; i < 16; i++) {
1266			unsigned char insn;
1267
1268			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1269				break;
1270			pr_cont("%02x ", insn);
1271		}
1272	}
1273	pr_cont("\n");
1274#endif
1275	preempt_disable();
1276	show_regs(regs);
1277	preempt_enable();
1278}
1279
1280static int __init setup_print_fatal_signals(char *str)
1281{
1282	get_option (&str, &print_fatal_signals);
1283
1284	return 1;
1285}
1286
1287__setup("print-fatal-signals=", setup_print_fatal_signals);
1288
1289int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1290			enum pid_type type)
1291{
1292	unsigned long flags;
1293	int ret = -ESRCH;
1294
1295	if (lock_task_sighand(p, &flags)) {
1296		ret = send_signal_locked(sig, info, p, type);
1297		unlock_task_sighand(p, &flags);
1298	}
1299
1300	return ret;
1301}
1302
1303enum sig_handler {
1304	HANDLER_CURRENT, /* If reachable use the current handler */
1305	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1306	HANDLER_EXIT,	 /* Only visible as the process exit code */
1307};
1308
1309/*
1310 * Force a signal that the process can't ignore: if necessary
1311 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1312 *
1313 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1314 * since we do not want to have a signal handler that was blocked
1315 * be invoked when user space had explicitly blocked it.
1316 *
1317 * We don't want to have recursive SIGSEGV's etc, for example,
1318 * that is why we also clear SIGNAL_UNKILLABLE.
1319 */
1320static int
1321force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1322	enum sig_handler handler)
1323{
1324	unsigned long int flags;
1325	int ret, blocked, ignored;
1326	struct k_sigaction *action;
1327	int sig = info->si_signo;
1328
1329	spin_lock_irqsave(&t->sighand->siglock, flags);
1330	action = &t->sighand->action[sig-1];
1331	ignored = action->sa.sa_handler == SIG_IGN;
1332	blocked = sigismember(&t->blocked, sig);
1333	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1334		action->sa.sa_handler = SIG_DFL;
1335		if (handler == HANDLER_EXIT)
1336			action->sa.sa_flags |= SA_IMMUTABLE;
1337		if (blocked) {
1338			sigdelset(&t->blocked, sig);
1339			recalc_sigpending_and_wake(t);
1340		}
1341	}
1342	/*
1343	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1344	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1345	 */
1346	if (action->sa.sa_handler == SIG_DFL &&
1347	    (!t->ptrace || (handler == HANDLER_EXIT)))
1348		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1349	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
 
 
 
1350	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1351
1352	return ret;
1353}
1354
1355int force_sig_info(struct kernel_siginfo *info)
1356{
1357	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1358}
1359
1360/*
1361 * Nuke all other threads in the group.
1362 */
1363int zap_other_threads(struct task_struct *p)
1364{
1365	struct task_struct *t = p;
1366	int count = 0;
1367
1368	p->signal->group_stop_count = 0;
1369
1370	while_each_thread(p, t) {
1371		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1372		count++;
1373
1374		/* Don't bother with already dead threads */
1375		if (t->exit_state)
1376			continue;
1377		sigaddset(&t->pending.signal, SIGKILL);
1378		signal_wake_up(t, 1);
1379	}
1380
1381	return count;
1382}
1383
1384struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1385					   unsigned long *flags)
1386{
1387	struct sighand_struct *sighand;
1388
1389	rcu_read_lock();
1390	for (;;) {
1391		sighand = rcu_dereference(tsk->sighand);
1392		if (unlikely(sighand == NULL))
1393			break;
1394
1395		/*
1396		 * This sighand can be already freed and even reused, but
1397		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1398		 * initializes ->siglock: this slab can't go away, it has
1399		 * the same object type, ->siglock can't be reinitialized.
1400		 *
1401		 * We need to ensure that tsk->sighand is still the same
1402		 * after we take the lock, we can race with de_thread() or
1403		 * __exit_signal(). In the latter case the next iteration
1404		 * must see ->sighand == NULL.
1405		 */
1406		spin_lock_irqsave(&sighand->siglock, *flags);
1407		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1408			break;
1409		spin_unlock_irqrestore(&sighand->siglock, *flags);
1410	}
1411	rcu_read_unlock();
1412
1413	return sighand;
1414}
1415
1416#ifdef CONFIG_LOCKDEP
1417void lockdep_assert_task_sighand_held(struct task_struct *task)
1418{
1419	struct sighand_struct *sighand;
1420
1421	rcu_read_lock();
1422	sighand = rcu_dereference(task->sighand);
1423	if (sighand)
1424		lockdep_assert_held(&sighand->siglock);
1425	else
1426		WARN_ON_ONCE(1);
1427	rcu_read_unlock();
1428}
1429#endif
1430
1431/*
1432 * send signal info to all the members of a group
 
1433 */
1434int group_send_sig_info(int sig, struct kernel_siginfo *info,
1435			struct task_struct *p, enum pid_type type)
1436{
1437	int ret;
1438
1439	rcu_read_lock();
1440	ret = check_kill_permission(sig, info, p);
1441	rcu_read_unlock();
1442
1443	if (!ret && sig)
1444		ret = do_send_sig_info(sig, info, p, type);
1445
1446	return ret;
1447}
1448
1449/*
1450 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1451 * control characters do (^C, ^Z etc)
1452 * - the caller must hold at least a readlock on tasklist_lock
1453 */
1454int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1455{
1456	struct task_struct *p = NULL;
1457	int retval, success;
1458
1459	success = 0;
1460	retval = -ESRCH;
1461	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1462		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1463		success |= !err;
1464		retval = err;
 
 
 
 
 
 
1465	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1466	return success ? 0 : retval;
 
1467}
1468
1469int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
 
1470{
1471	int error = -ESRCH;
1472	struct task_struct *p;
1473
1474	for (;;) {
1475		rcu_read_lock();
1476		p = pid_task(pid, PIDTYPE_PID);
1477		if (p)
1478			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1479		rcu_read_unlock();
1480		if (likely(!p || error != -ESRCH))
1481			return error;
1482
1483		/*
1484		 * The task was unhashed in between, try again.  If it
1485		 * is dead, pid_task() will return NULL, if we race with
1486		 * de_thread() it will find the new leader.
1487		 */
1488	}
1489}
1490
 
 
 
 
 
1491static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1492{
1493	int error;
1494	rcu_read_lock();
1495	error = kill_pid_info(sig, info, find_vpid(pid));
1496	rcu_read_unlock();
1497	return error;
1498}
1499
1500static inline bool kill_as_cred_perm(const struct cred *cred,
1501				     struct task_struct *target)
1502{
1503	const struct cred *pcred = __task_cred(target);
1504
1505	return uid_eq(cred->euid, pcred->suid) ||
1506	       uid_eq(cred->euid, pcred->uid) ||
1507	       uid_eq(cred->uid, pcred->suid) ||
1508	       uid_eq(cred->uid, pcred->uid);
1509}
1510
1511/*
1512 * The usb asyncio usage of siginfo is wrong.  The glibc support
1513 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1514 * AKA after the generic fields:
1515 *	kernel_pid_t	si_pid;
1516 *	kernel_uid32_t	si_uid;
1517 *	sigval_t	si_value;
1518 *
1519 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1520 * after the generic fields is:
1521 *	void __user 	*si_addr;
1522 *
1523 * This is a practical problem when there is a 64bit big endian kernel
1524 * and a 32bit userspace.  As the 32bit address will encoded in the low
1525 * 32bits of the pointer.  Those low 32bits will be stored at higher
1526 * address than appear in a 32 bit pointer.  So userspace will not
1527 * see the address it was expecting for it's completions.
1528 *
1529 * There is nothing in the encoding that can allow
1530 * copy_siginfo_to_user32 to detect this confusion of formats, so
1531 * handle this by requiring the caller of kill_pid_usb_asyncio to
1532 * notice when this situration takes place and to store the 32bit
1533 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1534 * parameter.
1535 */
1536int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1537			 struct pid *pid, const struct cred *cred)
1538{
1539	struct kernel_siginfo info;
1540	struct task_struct *p;
1541	unsigned long flags;
1542	int ret = -EINVAL;
1543
1544	if (!valid_signal(sig))
1545		return ret;
1546
1547	clear_siginfo(&info);
1548	info.si_signo = sig;
1549	info.si_errno = errno;
1550	info.si_code = SI_ASYNCIO;
1551	*((sigval_t *)&info.si_pid) = addr;
1552
1553	rcu_read_lock();
1554	p = pid_task(pid, PIDTYPE_PID);
1555	if (!p) {
1556		ret = -ESRCH;
1557		goto out_unlock;
1558	}
1559	if (!kill_as_cred_perm(cred, p)) {
1560		ret = -EPERM;
1561		goto out_unlock;
1562	}
1563	ret = security_task_kill(p, &info, sig, cred);
1564	if (ret)
1565		goto out_unlock;
1566
1567	if (sig) {
1568		if (lock_task_sighand(p, &flags)) {
1569			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1570			unlock_task_sighand(p, &flags);
1571		} else
1572			ret = -ESRCH;
1573	}
1574out_unlock:
1575	rcu_read_unlock();
1576	return ret;
1577}
1578EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1579
1580/*
1581 * kill_something_info() interprets pid in interesting ways just like kill(2).
1582 *
1583 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1584 * is probably wrong.  Should make it like BSD or SYSV.
1585 */
1586
1587static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1588{
1589	int ret;
1590
1591	if (pid > 0)
1592		return kill_proc_info(sig, info, pid);
1593
1594	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1595	if (pid == INT_MIN)
1596		return -ESRCH;
1597
1598	read_lock(&tasklist_lock);
1599	if (pid != -1) {
1600		ret = __kill_pgrp_info(sig, info,
1601				pid ? find_vpid(-pid) : task_pgrp(current));
1602	} else {
1603		int retval = 0, count = 0;
1604		struct task_struct * p;
1605
1606		for_each_process(p) {
1607			if (task_pid_vnr(p) > 1 &&
1608					!same_thread_group(p, current)) {
1609				int err = group_send_sig_info(sig, info, p,
1610							      PIDTYPE_MAX);
1611				++count;
1612				if (err != -EPERM)
1613					retval = err;
1614			}
1615		}
1616		ret = count ? retval : -ESRCH;
1617	}
1618	read_unlock(&tasklist_lock);
1619
1620	return ret;
1621}
1622
1623/*
1624 * These are for backward compatibility with the rest of the kernel source.
1625 */
1626
1627int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1628{
1629	/*
1630	 * Make sure legacy kernel users don't send in bad values
1631	 * (normal paths check this in check_kill_permission).
1632	 */
1633	if (!valid_signal(sig))
1634		return -EINVAL;
1635
1636	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1637}
1638EXPORT_SYMBOL(send_sig_info);
1639
1640#define __si_special(priv) \
1641	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1642
1643int
1644send_sig(int sig, struct task_struct *p, int priv)
1645{
1646	return send_sig_info(sig, __si_special(priv), p);
1647}
1648EXPORT_SYMBOL(send_sig);
1649
1650void force_sig(int sig)
1651{
1652	struct kernel_siginfo info;
1653
1654	clear_siginfo(&info);
1655	info.si_signo = sig;
1656	info.si_errno = 0;
1657	info.si_code = SI_KERNEL;
1658	info.si_pid = 0;
1659	info.si_uid = 0;
1660	force_sig_info(&info);
1661}
1662EXPORT_SYMBOL(force_sig);
1663
1664void force_fatal_sig(int sig)
1665{
1666	struct kernel_siginfo info;
1667
1668	clear_siginfo(&info);
1669	info.si_signo = sig;
1670	info.si_errno = 0;
1671	info.si_code = SI_KERNEL;
1672	info.si_pid = 0;
1673	info.si_uid = 0;
1674	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1675}
1676
1677void force_exit_sig(int sig)
1678{
1679	struct kernel_siginfo info;
1680
1681	clear_siginfo(&info);
1682	info.si_signo = sig;
1683	info.si_errno = 0;
1684	info.si_code = SI_KERNEL;
1685	info.si_pid = 0;
1686	info.si_uid = 0;
1687	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1688}
1689
1690/*
1691 * When things go south during signal handling, we
1692 * will force a SIGSEGV. And if the signal that caused
1693 * the problem was already a SIGSEGV, we'll want to
1694 * make sure we don't even try to deliver the signal..
1695 */
1696void force_sigsegv(int sig)
1697{
1698	if (sig == SIGSEGV)
1699		force_fatal_sig(SIGSEGV);
1700	else
1701		force_sig(SIGSEGV);
1702}
1703
1704int force_sig_fault_to_task(int sig, int code, void __user *addr
1705	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1706	, struct task_struct *t)
1707{
1708	struct kernel_siginfo info;
1709
1710	clear_siginfo(&info);
1711	info.si_signo = sig;
1712	info.si_errno = 0;
1713	info.si_code  = code;
1714	info.si_addr  = addr;
1715#ifdef __ia64__
1716	info.si_imm = imm;
1717	info.si_flags = flags;
1718	info.si_isr = isr;
1719#endif
1720	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1721}
1722
1723int force_sig_fault(int sig, int code, void __user *addr
1724	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1725{
1726	return force_sig_fault_to_task(sig, code, addr
1727				       ___ARCH_SI_IA64(imm, flags, isr), current);
1728}
1729
1730int send_sig_fault(int sig, int code, void __user *addr
1731	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1732	, struct task_struct *t)
1733{
1734	struct kernel_siginfo info;
1735
1736	clear_siginfo(&info);
1737	info.si_signo = sig;
1738	info.si_errno = 0;
1739	info.si_code  = code;
1740	info.si_addr  = addr;
1741#ifdef __ia64__
1742	info.si_imm = imm;
1743	info.si_flags = flags;
1744	info.si_isr = isr;
1745#endif
1746	return send_sig_info(info.si_signo, &info, t);
1747}
1748
1749int force_sig_mceerr(int code, void __user *addr, short lsb)
1750{
1751	struct kernel_siginfo info;
1752
1753	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1754	clear_siginfo(&info);
1755	info.si_signo = SIGBUS;
1756	info.si_errno = 0;
1757	info.si_code = code;
1758	info.si_addr = addr;
1759	info.si_addr_lsb = lsb;
1760	return force_sig_info(&info);
1761}
1762
1763int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1764{
1765	struct kernel_siginfo info;
1766
1767	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1768	clear_siginfo(&info);
1769	info.si_signo = SIGBUS;
1770	info.si_errno = 0;
1771	info.si_code = code;
1772	info.si_addr = addr;
1773	info.si_addr_lsb = lsb;
1774	return send_sig_info(info.si_signo, &info, t);
1775}
1776EXPORT_SYMBOL(send_sig_mceerr);
1777
1778int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1779{
1780	struct kernel_siginfo info;
1781
1782	clear_siginfo(&info);
1783	info.si_signo = SIGSEGV;
1784	info.si_errno = 0;
1785	info.si_code  = SEGV_BNDERR;
1786	info.si_addr  = addr;
1787	info.si_lower = lower;
1788	info.si_upper = upper;
1789	return force_sig_info(&info);
1790}
1791
1792#ifdef SEGV_PKUERR
1793int force_sig_pkuerr(void __user *addr, u32 pkey)
1794{
1795	struct kernel_siginfo info;
1796
1797	clear_siginfo(&info);
1798	info.si_signo = SIGSEGV;
1799	info.si_errno = 0;
1800	info.si_code  = SEGV_PKUERR;
1801	info.si_addr  = addr;
1802	info.si_pkey  = pkey;
1803	return force_sig_info(&info);
1804}
1805#endif
1806
1807int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1808{
1809	struct kernel_siginfo info;
1810
1811	clear_siginfo(&info);
1812	info.si_signo     = SIGTRAP;
1813	info.si_errno     = 0;
1814	info.si_code      = TRAP_PERF;
1815	info.si_addr      = addr;
1816	info.si_perf_data = sig_data;
1817	info.si_perf_type = type;
1818
1819	/*
1820	 * Signals generated by perf events should not terminate the whole
1821	 * process if SIGTRAP is blocked, however, delivering the signal
1822	 * asynchronously is better than not delivering at all. But tell user
1823	 * space if the signal was asynchronous, so it can clearly be
1824	 * distinguished from normal synchronous ones.
1825	 */
1826	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1827				     TRAP_PERF_FLAG_ASYNC :
1828				     0;
1829
1830	return send_sig_info(info.si_signo, &info, current);
1831}
1832
1833/**
1834 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1835 * @syscall: syscall number to send to userland
1836 * @reason: filter-supplied reason code to send to userland (via si_errno)
1837 * @force_coredump: true to trigger a coredump
1838 *
1839 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1840 */
1841int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1842{
1843	struct kernel_siginfo info;
1844
1845	clear_siginfo(&info);
1846	info.si_signo = SIGSYS;
1847	info.si_code = SYS_SECCOMP;
1848	info.si_call_addr = (void __user *)KSTK_EIP(current);
1849	info.si_errno = reason;
1850	info.si_arch = syscall_get_arch(current);
1851	info.si_syscall = syscall;
1852	return force_sig_info_to_task(&info, current,
1853		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1854}
1855
1856/* For the crazy architectures that include trap information in
1857 * the errno field, instead of an actual errno value.
1858 */
1859int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1860{
1861	struct kernel_siginfo info;
1862
1863	clear_siginfo(&info);
1864	info.si_signo = SIGTRAP;
1865	info.si_errno = errno;
1866	info.si_code  = TRAP_HWBKPT;
1867	info.si_addr  = addr;
1868	return force_sig_info(&info);
1869}
1870
1871/* For the rare architectures that include trap information using
1872 * si_trapno.
1873 */
1874int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1875{
1876	struct kernel_siginfo info;
1877
1878	clear_siginfo(&info);
1879	info.si_signo = sig;
1880	info.si_errno = 0;
1881	info.si_code  = code;
1882	info.si_addr  = addr;
1883	info.si_trapno = trapno;
1884	return force_sig_info(&info);
1885}
1886
1887/* For the rare architectures that include trap information using
1888 * si_trapno.
1889 */
1890int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1891			  struct task_struct *t)
1892{
1893	struct kernel_siginfo info;
1894
1895	clear_siginfo(&info);
1896	info.si_signo = sig;
1897	info.si_errno = 0;
1898	info.si_code  = code;
1899	info.si_addr  = addr;
1900	info.si_trapno = trapno;
1901	return send_sig_info(info.si_signo, &info, t);
1902}
1903
1904int kill_pgrp(struct pid *pid, int sig, int priv)
1905{
1906	int ret;
1907
1908	read_lock(&tasklist_lock);
1909	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1910	read_unlock(&tasklist_lock);
1911
1912	return ret;
1913}
 
 
 
 
 
1914EXPORT_SYMBOL(kill_pgrp);
1915
1916int kill_pid(struct pid *pid, int sig, int priv)
1917{
1918	return kill_pid_info(sig, __si_special(priv), pid);
1919}
1920EXPORT_SYMBOL(kill_pid);
1921
 
1922/*
1923 * These functions support sending signals using preallocated sigqueue
1924 * structures.  This is needed "because realtime applications cannot
1925 * afford to lose notifications of asynchronous events, like timer
1926 * expirations or I/O completions".  In the case of POSIX Timers
1927 * we allocate the sigqueue structure from the timer_create.  If this
1928 * allocation fails we are able to report the failure to the application
1929 * with an EAGAIN error.
1930 */
1931struct sigqueue *sigqueue_alloc(void)
1932{
1933	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1934}
1935
1936void sigqueue_free(struct sigqueue *q)
1937{
1938	unsigned long flags;
1939	spinlock_t *lock = &current->sighand->siglock;
1940
1941	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1942	/*
1943	 * We must hold ->siglock while testing q->list
1944	 * to serialize with collect_signal() or with
1945	 * __exit_signal()->flush_sigqueue().
1946	 */
1947	spin_lock_irqsave(lock, flags);
1948	q->flags &= ~SIGQUEUE_PREALLOC;
1949	/*
1950	 * If it is queued it will be freed when dequeued,
1951	 * like the "regular" sigqueue.
1952	 */
1953	if (!list_empty(&q->list))
1954		q = NULL;
1955	spin_unlock_irqrestore(lock, flags);
1956
1957	if (q)
1958		__sigqueue_free(q);
 
 
 
 
 
 
 
1959}
1960
1961int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1962{
1963	int sig = q->info.si_signo;
1964	struct sigpending *pending;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1965	struct task_struct *t;
1966	unsigned long flags;
1967	int ret, result;
1968
1969	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1970
1971	ret = -1;
1972	rcu_read_lock();
1973	t = pid_task(pid, type);
1974	if (!t || !likely(lock_task_sighand(t, &flags)))
1975		goto ret;
1976
1977	ret = 1; /* the signal is ignored */
1978	result = TRACE_SIGNAL_IGNORED;
1979	if (!prepare_signal(sig, t, false))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1980		goto out;
 
1981
1982	ret = 0;
1983	if (unlikely(!list_empty(&q->list))) {
1984		/*
1985		 * If an SI_TIMER entry is already queue just increment
1986		 * the overrun count.
1987		 */
1988		BUG_ON(q->info.si_code != SI_TIMER);
1989		q->info.si_overrun++;
1990		result = TRACE_SIGNAL_ALREADY_PENDING;
1991		goto out;
1992	}
1993	q->info.si_overrun = 0;
1994
1995	signalfd_notify(t, sig);
1996	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1997	list_add_tail(&q->list, &pending->list);
1998	sigaddset(&pending->signal, sig);
1999	complete_signal(sig, t, type);
 
 
 
 
 
 
 
 
 
2000	result = TRACE_SIGNAL_DELIVERED;
2001out:
2002	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2003	unlock_task_sighand(t, &flags);
2004ret:
2005	rcu_read_unlock();
2006	return ret;
2007}
2008
2009static void do_notify_pidfd(struct task_struct *task)
2010{
2011	struct pid *pid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2012
2013	WARN_ON(task->exit_state == 0);
2014	pid = task_pid(task);
2015	wake_up_all(&pid->wait_pidfd);
 
2016}
2017
2018/*
2019 * Let a parent know about the death of a child.
2020 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2021 *
2022 * Returns true if our parent ignored us and so we've switched to
2023 * self-reaping.
2024 */
2025bool do_notify_parent(struct task_struct *tsk, int sig)
2026{
2027	struct kernel_siginfo info;
2028	unsigned long flags;
2029	struct sighand_struct *psig;
2030	bool autoreap = false;
2031	u64 utime, stime;
2032
2033	WARN_ON_ONCE(sig == -1);
2034
2035	/* do_notify_parent_cldstop should have been called instead.  */
2036	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2037
2038	WARN_ON_ONCE(!tsk->ptrace &&
2039	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2040
2041	/* Wake up all pidfd waiters */
2042	do_notify_pidfd(tsk);
 
 
 
2043
2044	if (sig != SIGCHLD) {
2045		/*
2046		 * This is only possible if parent == real_parent.
2047		 * Check if it has changed security domain.
2048		 */
2049		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2050			sig = SIGCHLD;
2051	}
2052
2053	clear_siginfo(&info);
2054	info.si_signo = sig;
2055	info.si_errno = 0;
2056	/*
2057	 * We are under tasklist_lock here so our parent is tied to
2058	 * us and cannot change.
2059	 *
2060	 * task_active_pid_ns will always return the same pid namespace
2061	 * until a task passes through release_task.
2062	 *
2063	 * write_lock() currently calls preempt_disable() which is the
2064	 * same as rcu_read_lock(), but according to Oleg, this is not
2065	 * correct to rely on this
2066	 */
2067	rcu_read_lock();
2068	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2069	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2070				       task_uid(tsk));
2071	rcu_read_unlock();
2072
2073	task_cputime(tsk, &utime, &stime);
2074	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2075	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2076
2077	info.si_status = tsk->exit_code & 0x7f;
2078	if (tsk->exit_code & 0x80)
2079		info.si_code = CLD_DUMPED;
2080	else if (tsk->exit_code & 0x7f)
2081		info.si_code = CLD_KILLED;
2082	else {
2083		info.si_code = CLD_EXITED;
2084		info.si_status = tsk->exit_code >> 8;
2085	}
2086
2087	psig = tsk->parent->sighand;
2088	spin_lock_irqsave(&psig->siglock, flags);
2089	if (!tsk->ptrace && sig == SIGCHLD &&
2090	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2091	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2092		/*
2093		 * We are exiting and our parent doesn't care.  POSIX.1
2094		 * defines special semantics for setting SIGCHLD to SIG_IGN
2095		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2096		 * automatically and not left for our parent's wait4 call.
2097		 * Rather than having the parent do it as a magic kind of
2098		 * signal handler, we just set this to tell do_exit that we
2099		 * can be cleaned up without becoming a zombie.  Note that
2100		 * we still call __wake_up_parent in this case, because a
2101		 * blocked sys_wait4 might now return -ECHILD.
2102		 *
2103		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2104		 * is implementation-defined: we do (if you don't want
2105		 * it, just use SIG_IGN instead).
2106		 */
2107		autoreap = true;
2108		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2109			sig = 0;
2110	}
2111	/*
2112	 * Send with __send_signal as si_pid and si_uid are in the
2113	 * parent's namespaces.
2114	 */
2115	if (valid_signal(sig) && sig)
2116		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2117	__wake_up_parent(tsk, tsk->parent);
2118	spin_unlock_irqrestore(&psig->siglock, flags);
2119
2120	return autoreap;
2121}
2122
2123/**
2124 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2125 * @tsk: task reporting the state change
2126 * @for_ptracer: the notification is for ptracer
2127 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2128 *
2129 * Notify @tsk's parent that the stopped/continued state has changed.  If
2130 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2131 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2132 *
2133 * CONTEXT:
2134 * Must be called with tasklist_lock at least read locked.
2135 */
2136static void do_notify_parent_cldstop(struct task_struct *tsk,
2137				     bool for_ptracer, int why)
2138{
2139	struct kernel_siginfo info;
2140	unsigned long flags;
2141	struct task_struct *parent;
2142	struct sighand_struct *sighand;
2143	u64 utime, stime;
2144
2145	if (for_ptracer) {
2146		parent = tsk->parent;
2147	} else {
2148		tsk = tsk->group_leader;
2149		parent = tsk->real_parent;
2150	}
2151
2152	clear_siginfo(&info);
2153	info.si_signo = SIGCHLD;
2154	info.si_errno = 0;
2155	/*
2156	 * see comment in do_notify_parent() about the following 4 lines
2157	 */
2158	rcu_read_lock();
2159	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2160	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2161	rcu_read_unlock();
2162
2163	task_cputime(tsk, &utime, &stime);
2164	info.si_utime = nsec_to_clock_t(utime);
2165	info.si_stime = nsec_to_clock_t(stime);
2166
2167 	info.si_code = why;
2168 	switch (why) {
2169 	case CLD_CONTINUED:
2170 		info.si_status = SIGCONT;
2171 		break;
2172 	case CLD_STOPPED:
2173 		info.si_status = tsk->signal->group_exit_code & 0x7f;
2174 		break;
2175 	case CLD_TRAPPED:
2176 		info.si_status = tsk->exit_code & 0x7f;
2177 		break;
2178 	default:
2179 		BUG();
2180 	}
2181
2182	sighand = parent->sighand;
2183	spin_lock_irqsave(&sighand->siglock, flags);
2184	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2185	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2186		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2187	/*
2188	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2189	 */
2190	__wake_up_parent(tsk, parent);
2191	spin_unlock_irqrestore(&sighand->siglock, flags);
2192}
2193
2194/*
2195 * This must be called with current->sighand->siglock held.
2196 *
2197 * This should be the path for all ptrace stops.
2198 * We always set current->last_siginfo while stopped here.
2199 * That makes it a way to test a stopped process for
2200 * being ptrace-stopped vs being job-control-stopped.
2201 *
2202 * Returns the signal the ptracer requested the code resume
2203 * with.  If the code did not stop because the tracer is gone,
2204 * the stop signal remains unchanged unless clear_code.
2205 */
2206static int ptrace_stop(int exit_code, int why, unsigned long message,
2207		       kernel_siginfo_t *info)
2208	__releases(&current->sighand->siglock)
2209	__acquires(&current->sighand->siglock)
2210{
2211	bool gstop_done = false;
2212
2213	if (arch_ptrace_stop_needed()) {
2214		/*
2215		 * The arch code has something special to do before a
2216		 * ptrace stop.  This is allowed to block, e.g. for faults
2217		 * on user stack pages.  We can't keep the siglock while
2218		 * calling arch_ptrace_stop, so we must release it now.
2219		 * To preserve proper semantics, we must do this before
2220		 * any signal bookkeeping like checking group_stop_count.
2221		 */
2222		spin_unlock_irq(&current->sighand->siglock);
2223		arch_ptrace_stop();
2224		spin_lock_irq(&current->sighand->siglock);
2225	}
2226
2227	/*
2228	 * After this point ptrace_signal_wake_up or signal_wake_up
2229	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2230	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2231	 * signals here to prevent ptrace_stop sleeping in schedule.
2232	 */
2233	if (!current->ptrace || __fatal_signal_pending(current))
2234		return exit_code;
2235
2236	set_special_state(TASK_TRACED);
2237	current->jobctl |= JOBCTL_TRACED;
2238
2239	/*
2240	 * We're committing to trapping.  TRACED should be visible before
2241	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2242	 * Also, transition to TRACED and updates to ->jobctl should be
2243	 * atomic with respect to siglock and should be done after the arch
2244	 * hook as siglock is released and regrabbed across it.
2245	 *
2246	 *     TRACER				    TRACEE
2247	 *
2248	 *     ptrace_attach()
2249	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2250	 *     do_wait()
2251	 *       set_current_state()                smp_wmb();
2252	 *       ptrace_do_wait()
2253	 *         wait_task_stopped()
2254	 *           task_stopped_code()
2255	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2256	 */
2257	smp_wmb();
2258
2259	current->ptrace_message = message;
2260	current->last_siginfo = info;
2261	current->exit_code = exit_code;
2262
2263	/*
2264	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2265	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2266	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2267	 * could be clear now.  We act as if SIGCONT is received after
2268	 * TASK_TRACED is entered - ignore it.
2269	 */
2270	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2271		gstop_done = task_participate_group_stop(current);
2272
2273	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2274	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2275	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2276		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2277
2278	/* entering a trap, clear TRAPPING */
2279	task_clear_jobctl_trapping(current);
2280
2281	spin_unlock_irq(&current->sighand->siglock);
2282	read_lock(&tasklist_lock);
2283	/*
2284	 * Notify parents of the stop.
2285	 *
2286	 * While ptraced, there are two parents - the ptracer and
2287	 * the real_parent of the group_leader.  The ptracer should
2288	 * know about every stop while the real parent is only
2289	 * interested in the completion of group stop.  The states
2290	 * for the two don't interact with each other.  Notify
2291	 * separately unless they're gonna be duplicates.
2292	 */
2293	if (current->ptrace)
2294		do_notify_parent_cldstop(current, true, why);
2295	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2296		do_notify_parent_cldstop(current, false, why);
2297
2298	/*
2299	 * Don't want to allow preemption here, because
2300	 * sys_ptrace() needs this task to be inactive.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2301	 *
2302	 * XXX: implement read_unlock_no_resched().
 
 
 
 
 
 
 
2303	 */
2304	preempt_disable();
 
2305	read_unlock(&tasklist_lock);
2306	cgroup_enter_frozen();
2307	preempt_enable_no_resched();
 
2308	schedule();
2309	cgroup_leave_frozen(true);
2310
2311	/*
2312	 * We are back.  Now reacquire the siglock before touching
2313	 * last_siginfo, so that we are sure to have synchronized with
2314	 * any signal-sending on another CPU that wants to examine it.
2315	 */
2316	spin_lock_irq(&current->sighand->siglock);
2317	exit_code = current->exit_code;
2318	current->last_siginfo = NULL;
2319	current->ptrace_message = 0;
2320	current->exit_code = 0;
2321
2322	/* LISTENING can be set only during STOP traps, clear it */
2323	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2324
2325	/*
2326	 * Queued signals ignored us while we were stopped for tracing.
2327	 * So check for any that we should take before resuming user mode.
2328	 * This sets TIF_SIGPENDING, but never clears it.
2329	 */
2330	recalc_sigpending_tsk(current);
2331	return exit_code;
2332}
2333
2334static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2335{
2336	kernel_siginfo_t info;
2337
2338	clear_siginfo(&info);
2339	info.si_signo = signr;
2340	info.si_code = exit_code;
2341	info.si_pid = task_pid_vnr(current);
2342	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2343
2344	/* Let the debugger run.  */
2345	return ptrace_stop(exit_code, why, message, &info);
2346}
2347
2348int ptrace_notify(int exit_code, unsigned long message)
2349{
2350	int signr;
2351
2352	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2353	if (unlikely(task_work_pending(current)))
2354		task_work_run();
2355
2356	spin_lock_irq(&current->sighand->siglock);
2357	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2358	spin_unlock_irq(&current->sighand->siglock);
2359	return signr;
2360}
2361
2362/**
2363 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2364 * @signr: signr causing group stop if initiating
2365 *
2366 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2367 * and participate in it.  If already set, participate in the existing
2368 * group stop.  If participated in a group stop (and thus slept), %true is
2369 * returned with siglock released.
2370 *
2371 * If ptraced, this function doesn't handle stop itself.  Instead,
2372 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2373 * untouched.  The caller must ensure that INTERRUPT trap handling takes
2374 * places afterwards.
2375 *
2376 * CONTEXT:
2377 * Must be called with @current->sighand->siglock held, which is released
2378 * on %true return.
2379 *
2380 * RETURNS:
2381 * %false if group stop is already cancelled or ptrace trap is scheduled.
2382 * %true if participated in group stop.
2383 */
2384static bool do_signal_stop(int signr)
2385	__releases(&current->sighand->siglock)
2386{
2387	struct signal_struct *sig = current->signal;
2388
2389	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2390		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2391		struct task_struct *t;
2392
2393		/* signr will be recorded in task->jobctl for retries */
2394		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2395
2396		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2397		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2398		    unlikely(sig->group_exec_task))
2399			return false;
2400		/*
2401		 * There is no group stop already in progress.  We must
2402		 * initiate one now.
2403		 *
2404		 * While ptraced, a task may be resumed while group stop is
2405		 * still in effect and then receive a stop signal and
2406		 * initiate another group stop.  This deviates from the
2407		 * usual behavior as two consecutive stop signals can't
2408		 * cause two group stops when !ptraced.  That is why we
2409		 * also check !task_is_stopped(t) below.
2410		 *
2411		 * The condition can be distinguished by testing whether
2412		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2413		 * group_exit_code in such case.
2414		 *
2415		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2416		 * an intervening stop signal is required to cause two
2417		 * continued events regardless of ptrace.
2418		 */
2419		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2420			sig->group_exit_code = signr;
2421
2422		sig->group_stop_count = 0;
2423
2424		if (task_set_jobctl_pending(current, signr | gstop))
2425			sig->group_stop_count++;
2426
2427		t = current;
2428		while_each_thread(current, t) {
2429			/*
2430			 * Setting state to TASK_STOPPED for a group
2431			 * stop is always done with the siglock held,
2432			 * so this check has no races.
2433			 */
2434			if (!task_is_stopped(t) &&
2435			    task_set_jobctl_pending(t, signr | gstop)) {
2436				sig->group_stop_count++;
2437				if (likely(!(t->ptrace & PT_SEIZED)))
2438					signal_wake_up(t, 0);
2439				else
2440					ptrace_trap_notify(t);
2441			}
2442		}
2443	}
2444
2445	if (likely(!current->ptrace)) {
2446		int notify = 0;
2447
2448		/*
2449		 * If there are no other threads in the group, or if there
2450		 * is a group stop in progress and we are the last to stop,
2451		 * report to the parent.
2452		 */
2453		if (task_participate_group_stop(current))
2454			notify = CLD_STOPPED;
2455
2456		current->jobctl |= JOBCTL_STOPPED;
2457		set_special_state(TASK_STOPPED);
2458		spin_unlock_irq(&current->sighand->siglock);
2459
2460		/*
2461		 * Notify the parent of the group stop completion.  Because
2462		 * we're not holding either the siglock or tasklist_lock
2463		 * here, ptracer may attach inbetween; however, this is for
2464		 * group stop and should always be delivered to the real
2465		 * parent of the group leader.  The new ptracer will get
2466		 * its notification when this task transitions into
2467		 * TASK_TRACED.
2468		 */
2469		if (notify) {
2470			read_lock(&tasklist_lock);
2471			do_notify_parent_cldstop(current, false, notify);
2472			read_unlock(&tasklist_lock);
2473		}
2474
2475		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2476		cgroup_enter_frozen();
2477		schedule();
2478		return true;
2479	} else {
2480		/*
2481		 * While ptraced, group stop is handled by STOP trap.
2482		 * Schedule it and let the caller deal with it.
2483		 */
2484		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2485		return false;
2486	}
2487}
2488
2489/**
2490 * do_jobctl_trap - take care of ptrace jobctl traps
2491 *
2492 * When PT_SEIZED, it's used for both group stop and explicit
2493 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2494 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2495 * the stop signal; otherwise, %SIGTRAP.
2496 *
2497 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2498 * number as exit_code and no siginfo.
2499 *
2500 * CONTEXT:
2501 * Must be called with @current->sighand->siglock held, which may be
2502 * released and re-acquired before returning with intervening sleep.
2503 */
2504static void do_jobctl_trap(void)
2505{
2506	struct signal_struct *signal = current->signal;
2507	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2508
2509	if (current->ptrace & PT_SEIZED) {
2510		if (!signal->group_stop_count &&
2511		    !(signal->flags & SIGNAL_STOP_STOPPED))
2512			signr = SIGTRAP;
2513		WARN_ON_ONCE(!signr);
2514		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2515				 CLD_STOPPED, 0);
2516	} else {
2517		WARN_ON_ONCE(!signr);
2518		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2519	}
2520}
2521
2522/**
2523 * do_freezer_trap - handle the freezer jobctl trap
2524 *
2525 * Puts the task into frozen state, if only the task is not about to quit.
2526 * In this case it drops JOBCTL_TRAP_FREEZE.
2527 *
2528 * CONTEXT:
2529 * Must be called with @current->sighand->siglock held,
2530 * which is always released before returning.
2531 */
2532static void do_freezer_trap(void)
2533	__releases(&current->sighand->siglock)
2534{
2535	/*
2536	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2537	 * let's make another loop to give it a chance to be handled.
2538	 * In any case, we'll return back.
2539	 */
2540	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2541	     JOBCTL_TRAP_FREEZE) {
2542		spin_unlock_irq(&current->sighand->siglock);
2543		return;
2544	}
2545
2546	/*
2547	 * Now we're sure that there is no pending fatal signal and no
2548	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2549	 * immediately (if there is a non-fatal signal pending), and
2550	 * put the task into sleep.
2551	 */
2552	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2553	clear_thread_flag(TIF_SIGPENDING);
2554	spin_unlock_irq(&current->sighand->siglock);
2555	cgroup_enter_frozen();
2556	schedule();
 
 
 
 
 
 
 
 
2557}
2558
2559static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2560{
2561	/*
2562	 * We do not check sig_kernel_stop(signr) but set this marker
2563	 * unconditionally because we do not know whether debugger will
2564	 * change signr. This flag has no meaning unless we are going
2565	 * to stop after return from ptrace_stop(). In this case it will
2566	 * be checked in do_signal_stop(), we should only stop if it was
2567	 * not cleared by SIGCONT while we were sleeping. See also the
2568	 * comment in dequeue_signal().
2569	 */
2570	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2571	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2572
2573	/* We're back.  Did the debugger cancel the sig?  */
2574	if (signr == 0)
2575		return signr;
2576
2577	/*
2578	 * Update the siginfo structure if the signal has
2579	 * changed.  If the debugger wanted something
2580	 * specific in the siginfo structure then it should
2581	 * have updated *info via PTRACE_SETSIGINFO.
2582	 */
2583	if (signr != info->si_signo) {
2584		clear_siginfo(info);
2585		info->si_signo = signr;
2586		info->si_errno = 0;
2587		info->si_code = SI_USER;
2588		rcu_read_lock();
2589		info->si_pid = task_pid_vnr(current->parent);
2590		info->si_uid = from_kuid_munged(current_user_ns(),
2591						task_uid(current->parent));
2592		rcu_read_unlock();
2593	}
2594
2595	/* If the (new) signal is now blocked, requeue it.  */
2596	if (sigismember(&current->blocked, signr) ||
2597	    fatal_signal_pending(current)) {
2598		send_signal_locked(signr, info, current, type);
2599		signr = 0;
2600	}
2601
2602	return signr;
2603}
2604
2605static void hide_si_addr_tag_bits(struct ksignal *ksig)
2606{
2607	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2608	case SIL_FAULT:
2609	case SIL_FAULT_TRAPNO:
2610	case SIL_FAULT_MCEERR:
2611	case SIL_FAULT_BNDERR:
2612	case SIL_FAULT_PKUERR:
2613	case SIL_FAULT_PERF_EVENT:
2614		ksig->info.si_addr = arch_untagged_si_addr(
2615			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2616		break;
2617	case SIL_KILL:
2618	case SIL_TIMER:
2619	case SIL_POLL:
2620	case SIL_CHLD:
2621	case SIL_RT:
2622	case SIL_SYS:
2623		break;
2624	}
2625}
2626
2627bool get_signal(struct ksignal *ksig)
2628{
2629	struct sighand_struct *sighand = current->sighand;
2630	struct signal_struct *signal = current->signal;
2631	int signr;
2632
2633	clear_notify_signal();
2634	if (unlikely(task_work_pending(current)))
2635		task_work_run();
2636
2637	if (!task_sigpending(current))
2638		return false;
2639
2640	if (unlikely(uprobe_deny_signal()))
2641		return false;
2642
2643	/*
2644	 * Do this once, we can't return to user-mode if freezing() == T.
2645	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2646	 * thus do not need another check after return.
2647	 */
2648	try_to_freeze();
2649
2650relock:
2651	spin_lock_irq(&sighand->siglock);
2652
2653	/*
2654	 * Every stopped thread goes here after wakeup. Check to see if
2655	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2656	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2657	 */
2658	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2659		int why;
2660
2661		if (signal->flags & SIGNAL_CLD_CONTINUED)
2662			why = CLD_CONTINUED;
2663		else
2664			why = CLD_STOPPED;
2665
2666		signal->flags &= ~SIGNAL_CLD_MASK;
2667
2668		spin_unlock_irq(&sighand->siglock);
2669
2670		/*
2671		 * Notify the parent that we're continuing.  This event is
2672		 * always per-process and doesn't make whole lot of sense
2673		 * for ptracers, who shouldn't consume the state via
2674		 * wait(2) either, but, for backward compatibility, notify
2675		 * the ptracer of the group leader too unless it's gonna be
2676		 * a duplicate.
2677		 */
2678		read_lock(&tasklist_lock);
2679		do_notify_parent_cldstop(current, false, why);
2680
2681		if (ptrace_reparented(current->group_leader))
2682			do_notify_parent_cldstop(current->group_leader,
2683						true, why);
2684		read_unlock(&tasklist_lock);
2685
2686		goto relock;
2687	}
2688
2689	for (;;) {
2690		struct k_sigaction *ka;
2691		enum pid_type type;
2692
2693		/* Has this task already been marked for death? */
2694		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2695		     signal->group_exec_task) {
2696			clear_siginfo(&ksig->info);
2697			ksig->info.si_signo = signr = SIGKILL;
2698			sigdelset(&current->pending.signal, SIGKILL);
2699			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2700				&sighand->action[SIGKILL - 1]);
2701			recalc_sigpending();
 
 
 
 
2702			goto fatal;
2703		}
2704
2705		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2706		    do_signal_stop(0))
2707			goto relock;
2708
2709		if (unlikely(current->jobctl &
2710			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2711			if (current->jobctl & JOBCTL_TRAP_MASK) {
2712				do_jobctl_trap();
2713				spin_unlock_irq(&sighand->siglock);
2714			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2715				do_freezer_trap();
2716
2717			goto relock;
2718		}
2719
2720		/*
2721		 * If the task is leaving the frozen state, let's update
2722		 * cgroup counters and reset the frozen bit.
2723		 */
2724		if (unlikely(cgroup_task_frozen(current))) {
2725			spin_unlock_irq(&sighand->siglock);
2726			cgroup_leave_frozen(false);
2727			goto relock;
2728		}
2729
2730		/*
2731		 * Signals generated by the execution of an instruction
2732		 * need to be delivered before any other pending signals
2733		 * so that the instruction pointer in the signal stack
2734		 * frame points to the faulting instruction.
2735		 */
2736		type = PIDTYPE_PID;
2737		signr = dequeue_synchronous_signal(&ksig->info);
2738		if (!signr)
2739			signr = dequeue_signal(current, &current->blocked,
2740					       &ksig->info, &type);
2741
2742		if (!signr)
2743			break; /* will return 0 */
2744
2745		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2746		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2747			signr = ptrace_signal(signr, &ksig->info, type);
2748			if (!signr)
2749				continue;
2750		}
2751
2752		ka = &sighand->action[signr-1];
2753
2754		/* Trace actually delivered signals. */
2755		trace_signal_deliver(signr, &ksig->info, ka);
2756
2757		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2758			continue;
2759		if (ka->sa.sa_handler != SIG_DFL) {
2760			/* Run the handler.  */
2761			ksig->ka = *ka;
2762
2763			if (ka->sa.sa_flags & SA_ONESHOT)
2764				ka->sa.sa_handler = SIG_DFL;
2765
2766			break; /* will return non-zero "signr" value */
2767		}
2768
2769		/*
2770		 * Now we are doing the default action for this signal.
2771		 */
2772		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2773			continue;
2774
2775		/*
2776		 * Global init gets no signals it doesn't want.
2777		 * Container-init gets no signals it doesn't want from same
2778		 * container.
2779		 *
2780		 * Note that if global/container-init sees a sig_kernel_only()
2781		 * signal here, the signal must have been generated internally
2782		 * or must have come from an ancestor namespace. In either
2783		 * case, the signal cannot be dropped.
2784		 */
2785		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2786				!sig_kernel_only(signr))
2787			continue;
2788
2789		if (sig_kernel_stop(signr)) {
2790			/*
2791			 * The default action is to stop all threads in
2792			 * the thread group.  The job control signals
2793			 * do nothing in an orphaned pgrp, but SIGSTOP
2794			 * always works.  Note that siglock needs to be
2795			 * dropped during the call to is_orphaned_pgrp()
2796			 * because of lock ordering with tasklist_lock.
2797			 * This allows an intervening SIGCONT to be posted.
2798			 * We need to check for that and bail out if necessary.
2799			 */
2800			if (signr != SIGSTOP) {
2801				spin_unlock_irq(&sighand->siglock);
2802
2803				/* signals can be posted during this window */
2804
2805				if (is_current_pgrp_orphaned())
2806					goto relock;
2807
2808				spin_lock_irq(&sighand->siglock);
2809			}
2810
2811			if (likely(do_signal_stop(ksig->info.si_signo))) {
2812				/* It released the siglock.  */
2813				goto relock;
2814			}
2815
2816			/*
2817			 * We didn't actually stop, due to a race
2818			 * with SIGCONT or something like that.
2819			 */
2820			continue;
2821		}
2822
2823	fatal:
2824		spin_unlock_irq(&sighand->siglock);
2825		if (unlikely(cgroup_task_frozen(current)))
2826			cgroup_leave_frozen(true);
2827
2828		/*
2829		 * Anything else is fatal, maybe with a core dump.
2830		 */
2831		current->flags |= PF_SIGNALED;
2832
2833		if (sig_kernel_coredump(signr)) {
2834			if (print_fatal_signals)
2835				print_fatal_signal(ksig->info.si_signo);
2836			proc_coredump_connector(current);
2837			/*
2838			 * If it was able to dump core, this kills all
2839			 * other threads in the group and synchronizes with
2840			 * their demise.  If we lost the race with another
2841			 * thread getting here, it set group_exit_code
2842			 * first and our do_group_exit call below will use
2843			 * that value and ignore the one we pass it.
2844			 */
2845			do_coredump(&ksig->info);
2846		}
2847
2848		/*
2849		 * PF_IO_WORKER threads will catch and exit on fatal signals
2850		 * themselves. They have cleanup that must be performed, so
2851		 * we cannot call do_exit() on their behalf.
 
2852		 */
2853		if (current->flags & PF_IO_WORKER)
2854			goto out;
2855
2856		/*
2857		 * Death signals, no core dump.
2858		 */
2859		do_group_exit(ksig->info.si_signo);
2860		/* NOTREACHED */
2861	}
2862	spin_unlock_irq(&sighand->siglock);
2863out:
2864	ksig->sig = signr;
2865
2866	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2867		hide_si_addr_tag_bits(ksig);
2868
2869	return ksig->sig > 0;
2870}
2871
2872/**
2873 * signal_delivered - called after signal delivery to update blocked signals
2874 * @ksig:		kernel signal struct
2875 * @stepping:		nonzero if debugger single-step or block-step in use
2876 *
2877 * This function should be called when a signal has successfully been
2878 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2879 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2880 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2881 */
2882static void signal_delivered(struct ksignal *ksig, int stepping)
2883{
2884	sigset_t blocked;
2885
2886	/* A signal was successfully delivered, and the
2887	   saved sigmask was stored on the signal frame,
2888	   and will be restored by sigreturn.  So we can
2889	   simply clear the restore sigmask flag.  */
2890	clear_restore_sigmask();
2891
2892	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2893	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2894		sigaddset(&blocked, ksig->sig);
2895	set_current_blocked(&blocked);
2896	if (current->sas_ss_flags & SS_AUTODISARM)
2897		sas_ss_reset(current);
2898	if (stepping)
2899		ptrace_notify(SIGTRAP, 0);
2900}
2901
2902void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2903{
2904	if (failed)
2905		force_sigsegv(ksig->sig);
2906	else
2907		signal_delivered(ksig, stepping);
2908}
2909
2910/*
2911 * It could be that complete_signal() picked us to notify about the
2912 * group-wide signal. Other threads should be notified now to take
2913 * the shared signals in @which since we will not.
2914 */
2915static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2916{
2917	sigset_t retarget;
2918	struct task_struct *t;
2919
2920	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2921	if (sigisemptyset(&retarget))
2922		return;
2923
2924	t = tsk;
2925	while_each_thread(tsk, t) {
2926		if (t->flags & PF_EXITING)
2927			continue;
2928
2929		if (!has_pending_signals(&retarget, &t->blocked))
2930			continue;
2931		/* Remove the signals this thread can handle. */
2932		sigandsets(&retarget, &retarget, &t->blocked);
2933
2934		if (!task_sigpending(t))
2935			signal_wake_up(t, 0);
2936
2937		if (sigisemptyset(&retarget))
2938			break;
2939	}
2940}
2941
2942void exit_signals(struct task_struct *tsk)
2943{
2944	int group_stop = 0;
2945	sigset_t unblocked;
2946
2947	/*
2948	 * @tsk is about to have PF_EXITING set - lock out users which
2949	 * expect stable threadgroup.
2950	 */
2951	cgroup_threadgroup_change_begin(tsk);
2952
2953	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
 
2954		tsk->flags |= PF_EXITING;
2955		cgroup_threadgroup_change_end(tsk);
2956		return;
2957	}
2958
2959	spin_lock_irq(&tsk->sighand->siglock);
2960	/*
2961	 * From now this task is not visible for group-wide signals,
2962	 * see wants_signal(), do_signal_stop().
2963	 */
 
2964	tsk->flags |= PF_EXITING;
2965
2966	cgroup_threadgroup_change_end(tsk);
2967
2968	if (!task_sigpending(tsk))
2969		goto out;
2970
2971	unblocked = tsk->blocked;
2972	signotset(&unblocked);
2973	retarget_shared_pending(tsk, &unblocked);
2974
2975	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2976	    task_participate_group_stop(tsk))
2977		group_stop = CLD_STOPPED;
2978out:
2979	spin_unlock_irq(&tsk->sighand->siglock);
2980
2981	/*
2982	 * If group stop has completed, deliver the notification.  This
2983	 * should always go to the real parent of the group leader.
2984	 */
2985	if (unlikely(group_stop)) {
2986		read_lock(&tasklist_lock);
2987		do_notify_parent_cldstop(tsk, false, group_stop);
2988		read_unlock(&tasklist_lock);
2989	}
2990}
2991
2992/*
2993 * System call entry points.
2994 */
2995
2996/**
2997 *  sys_restart_syscall - restart a system call
2998 */
2999SYSCALL_DEFINE0(restart_syscall)
3000{
3001	struct restart_block *restart = &current->restart_block;
3002	return restart->fn(restart);
3003}
3004
3005long do_no_restart_syscall(struct restart_block *param)
3006{
3007	return -EINTR;
3008}
3009
3010static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3011{
3012	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3013		sigset_t newblocked;
3014		/* A set of now blocked but previously unblocked signals. */
3015		sigandnsets(&newblocked, newset, &current->blocked);
3016		retarget_shared_pending(tsk, &newblocked);
3017	}
3018	tsk->blocked = *newset;
3019	recalc_sigpending();
3020}
3021
3022/**
3023 * set_current_blocked - change current->blocked mask
3024 * @newset: new mask
3025 *
3026 * It is wrong to change ->blocked directly, this helper should be used
3027 * to ensure the process can't miss a shared signal we are going to block.
3028 */
3029void set_current_blocked(sigset_t *newset)
3030{
3031	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3032	__set_current_blocked(newset);
3033}
3034
3035void __set_current_blocked(const sigset_t *newset)
3036{
3037	struct task_struct *tsk = current;
3038
3039	/*
3040	 * In case the signal mask hasn't changed, there is nothing we need
3041	 * to do. The current->blocked shouldn't be modified by other task.
3042	 */
3043	if (sigequalsets(&tsk->blocked, newset))
3044		return;
3045
3046	spin_lock_irq(&tsk->sighand->siglock);
3047	__set_task_blocked(tsk, newset);
3048	spin_unlock_irq(&tsk->sighand->siglock);
3049}
3050
3051/*
3052 * This is also useful for kernel threads that want to temporarily
3053 * (or permanently) block certain signals.
3054 *
3055 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3056 * interface happily blocks "unblockable" signals like SIGKILL
3057 * and friends.
3058 */
3059int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3060{
3061	struct task_struct *tsk = current;
3062	sigset_t newset;
3063
3064	/* Lockless, only current can change ->blocked, never from irq */
3065	if (oldset)
3066		*oldset = tsk->blocked;
3067
3068	switch (how) {
3069	case SIG_BLOCK:
3070		sigorsets(&newset, &tsk->blocked, set);
3071		break;
3072	case SIG_UNBLOCK:
3073		sigandnsets(&newset, &tsk->blocked, set);
3074		break;
3075	case SIG_SETMASK:
3076		newset = *set;
3077		break;
3078	default:
3079		return -EINVAL;
3080	}
3081
3082	__set_current_blocked(&newset);
3083	return 0;
3084}
3085EXPORT_SYMBOL(sigprocmask);
3086
3087/*
3088 * The api helps set app-provided sigmasks.
3089 *
3090 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3091 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3092 *
3093 * Note that it does set_restore_sigmask() in advance, so it must be always
3094 * paired with restore_saved_sigmask_unless() before return from syscall.
3095 */
3096int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3097{
3098	sigset_t kmask;
3099
3100	if (!umask)
3101		return 0;
3102	if (sigsetsize != sizeof(sigset_t))
3103		return -EINVAL;
3104	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3105		return -EFAULT;
3106
3107	set_restore_sigmask();
3108	current->saved_sigmask = current->blocked;
3109	set_current_blocked(&kmask);
3110
3111	return 0;
3112}
3113
3114#ifdef CONFIG_COMPAT
3115int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3116			    size_t sigsetsize)
3117{
3118	sigset_t kmask;
3119
3120	if (!umask)
3121		return 0;
3122	if (sigsetsize != sizeof(compat_sigset_t))
3123		return -EINVAL;
3124	if (get_compat_sigset(&kmask, umask))
3125		return -EFAULT;
3126
3127	set_restore_sigmask();
3128	current->saved_sigmask = current->blocked;
3129	set_current_blocked(&kmask);
3130
3131	return 0;
3132}
3133#endif
3134
3135/**
3136 *  sys_rt_sigprocmask - change the list of currently blocked signals
3137 *  @how: whether to add, remove, or set signals
3138 *  @nset: stores pending signals
3139 *  @oset: previous value of signal mask if non-null
3140 *  @sigsetsize: size of sigset_t type
3141 */
3142SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3143		sigset_t __user *, oset, size_t, sigsetsize)
3144{
3145	sigset_t old_set, new_set;
3146	int error;
3147
3148	/* XXX: Don't preclude handling different sized sigset_t's.  */
3149	if (sigsetsize != sizeof(sigset_t))
3150		return -EINVAL;
3151
3152	old_set = current->blocked;
3153
3154	if (nset) {
3155		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3156			return -EFAULT;
3157		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3158
3159		error = sigprocmask(how, &new_set, NULL);
3160		if (error)
3161			return error;
3162	}
3163
3164	if (oset) {
3165		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3166			return -EFAULT;
3167	}
3168
3169	return 0;
3170}
3171
3172#ifdef CONFIG_COMPAT
3173COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3174		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3175{
3176	sigset_t old_set = current->blocked;
3177
3178	/* XXX: Don't preclude handling different sized sigset_t's.  */
3179	if (sigsetsize != sizeof(sigset_t))
3180		return -EINVAL;
3181
3182	if (nset) {
3183		sigset_t new_set;
3184		int error;
3185		if (get_compat_sigset(&new_set, nset))
3186			return -EFAULT;
3187		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3188
3189		error = sigprocmask(how, &new_set, NULL);
3190		if (error)
3191			return error;
3192	}
3193	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3194}
3195#endif
3196
3197static void do_sigpending(sigset_t *set)
3198{
3199	spin_lock_irq(&current->sighand->siglock);
3200	sigorsets(set, &current->pending.signal,
3201		  &current->signal->shared_pending.signal);
3202	spin_unlock_irq(&current->sighand->siglock);
3203
3204	/* Outside the lock because only this thread touches it.  */
3205	sigandsets(set, &current->blocked, set);
3206}
3207
3208/**
3209 *  sys_rt_sigpending - examine a pending signal that has been raised
3210 *			while blocked
3211 *  @uset: stores pending signals
3212 *  @sigsetsize: size of sigset_t type or larger
3213 */
3214SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3215{
3216	sigset_t set;
3217
3218	if (sigsetsize > sizeof(*uset))
3219		return -EINVAL;
3220
3221	do_sigpending(&set);
3222
3223	if (copy_to_user(uset, &set, sigsetsize))
3224		return -EFAULT;
3225
3226	return 0;
3227}
3228
3229#ifdef CONFIG_COMPAT
3230COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3231		compat_size_t, sigsetsize)
3232{
3233	sigset_t set;
3234
3235	if (sigsetsize > sizeof(*uset))
3236		return -EINVAL;
3237
3238	do_sigpending(&set);
3239
3240	return put_compat_sigset(uset, &set, sigsetsize);
3241}
3242#endif
3243
3244static const struct {
3245	unsigned char limit, layout;
3246} sig_sicodes[] = {
3247	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3248	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3249	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3250	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3251	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3252#if defined(SIGEMT)
3253	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3254#endif
3255	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3256	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3257	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3258};
3259
3260static bool known_siginfo_layout(unsigned sig, int si_code)
3261{
3262	if (si_code == SI_KERNEL)
3263		return true;
3264	else if ((si_code > SI_USER)) {
3265		if (sig_specific_sicodes(sig)) {
3266			if (si_code <= sig_sicodes[sig].limit)
3267				return true;
3268		}
3269		else if (si_code <= NSIGPOLL)
3270			return true;
3271	}
3272	else if (si_code >= SI_DETHREAD)
3273		return true;
3274	else if (si_code == SI_ASYNCNL)
3275		return true;
3276	return false;
3277}
3278
3279enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3280{
3281	enum siginfo_layout layout = SIL_KILL;
3282	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3283		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3284		    (si_code <= sig_sicodes[sig].limit)) {
3285			layout = sig_sicodes[sig].layout;
3286			/* Handle the exceptions */
3287			if ((sig == SIGBUS) &&
3288			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3289				layout = SIL_FAULT_MCEERR;
3290			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3291				layout = SIL_FAULT_BNDERR;
3292#ifdef SEGV_PKUERR
3293			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3294				layout = SIL_FAULT_PKUERR;
3295#endif
3296			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3297				layout = SIL_FAULT_PERF_EVENT;
3298			else if (IS_ENABLED(CONFIG_SPARC) &&
3299				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3300				layout = SIL_FAULT_TRAPNO;
3301			else if (IS_ENABLED(CONFIG_ALPHA) &&
3302				 ((sig == SIGFPE) ||
3303				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3304				layout = SIL_FAULT_TRAPNO;
3305		}
3306		else if (si_code <= NSIGPOLL)
3307			layout = SIL_POLL;
3308	} else {
3309		if (si_code == SI_TIMER)
3310			layout = SIL_TIMER;
3311		else if (si_code == SI_SIGIO)
3312			layout = SIL_POLL;
3313		else if (si_code < 0)
3314			layout = SIL_RT;
3315	}
3316	return layout;
3317}
3318
3319static inline char __user *si_expansion(const siginfo_t __user *info)
3320{
3321	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3322}
3323
3324int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3325{
3326	char __user *expansion = si_expansion(to);
3327	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3328		return -EFAULT;
3329	if (clear_user(expansion, SI_EXPANSION_SIZE))
3330		return -EFAULT;
3331	return 0;
3332}
3333
3334static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3335				       const siginfo_t __user *from)
3336{
3337	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3338		char __user *expansion = si_expansion(from);
3339		char buf[SI_EXPANSION_SIZE];
3340		int i;
3341		/*
3342		 * An unknown si_code might need more than
3343		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3344		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3345		 * will return this data to userspace exactly.
3346		 */
3347		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3348			return -EFAULT;
3349		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3350			if (buf[i] != 0)
3351				return -E2BIG;
3352		}
3353	}
3354	return 0;
3355}
3356
3357static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3358				    const siginfo_t __user *from)
3359{
3360	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3361		return -EFAULT;
3362	to->si_signo = signo;
3363	return post_copy_siginfo_from_user(to, from);
3364}
3365
3366int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3367{
3368	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3369		return -EFAULT;
3370	return post_copy_siginfo_from_user(to, from);
3371}
3372
3373#ifdef CONFIG_COMPAT
3374/**
3375 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3376 * @to: compat siginfo destination
3377 * @from: kernel siginfo source
3378 *
3379 * Note: This function does not work properly for the SIGCHLD on x32, but
3380 * fortunately it doesn't have to.  The only valid callers for this function are
3381 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3382 * The latter does not care because SIGCHLD will never cause a coredump.
3383 */
3384void copy_siginfo_to_external32(struct compat_siginfo *to,
3385		const struct kernel_siginfo *from)
3386{
3387	memset(to, 0, sizeof(*to));
3388
3389	to->si_signo = from->si_signo;
3390	to->si_errno = from->si_errno;
3391	to->si_code  = from->si_code;
3392	switch(siginfo_layout(from->si_signo, from->si_code)) {
3393	case SIL_KILL:
3394		to->si_pid = from->si_pid;
3395		to->si_uid = from->si_uid;
3396		break;
3397	case SIL_TIMER:
3398		to->si_tid     = from->si_tid;
3399		to->si_overrun = from->si_overrun;
3400		to->si_int     = from->si_int;
3401		break;
3402	case SIL_POLL:
3403		to->si_band = from->si_band;
3404		to->si_fd   = from->si_fd;
3405		break;
3406	case SIL_FAULT:
3407		to->si_addr = ptr_to_compat(from->si_addr);
3408		break;
3409	case SIL_FAULT_TRAPNO:
3410		to->si_addr = ptr_to_compat(from->si_addr);
3411		to->si_trapno = from->si_trapno;
3412		break;
3413	case SIL_FAULT_MCEERR:
3414		to->si_addr = ptr_to_compat(from->si_addr);
3415		to->si_addr_lsb = from->si_addr_lsb;
3416		break;
3417	case SIL_FAULT_BNDERR:
3418		to->si_addr = ptr_to_compat(from->si_addr);
3419		to->si_lower = ptr_to_compat(from->si_lower);
3420		to->si_upper = ptr_to_compat(from->si_upper);
3421		break;
3422	case SIL_FAULT_PKUERR:
3423		to->si_addr = ptr_to_compat(from->si_addr);
3424		to->si_pkey = from->si_pkey;
3425		break;
3426	case SIL_FAULT_PERF_EVENT:
3427		to->si_addr = ptr_to_compat(from->si_addr);
3428		to->si_perf_data = from->si_perf_data;
3429		to->si_perf_type = from->si_perf_type;
3430		to->si_perf_flags = from->si_perf_flags;
3431		break;
3432	case SIL_CHLD:
3433		to->si_pid = from->si_pid;
3434		to->si_uid = from->si_uid;
3435		to->si_status = from->si_status;
3436		to->si_utime = from->si_utime;
3437		to->si_stime = from->si_stime;
3438		break;
3439	case SIL_RT:
3440		to->si_pid = from->si_pid;
3441		to->si_uid = from->si_uid;
3442		to->si_int = from->si_int;
3443		break;
3444	case SIL_SYS:
3445		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3446		to->si_syscall   = from->si_syscall;
3447		to->si_arch      = from->si_arch;
3448		break;
3449	}
3450}
3451
3452int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3453			   const struct kernel_siginfo *from)
3454{
3455	struct compat_siginfo new;
3456
3457	copy_siginfo_to_external32(&new, from);
3458	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3459		return -EFAULT;
3460	return 0;
3461}
3462
3463static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3464					 const struct compat_siginfo *from)
3465{
3466	clear_siginfo(to);
3467	to->si_signo = from->si_signo;
3468	to->si_errno = from->si_errno;
3469	to->si_code  = from->si_code;
3470	switch(siginfo_layout(from->si_signo, from->si_code)) {
3471	case SIL_KILL:
3472		to->si_pid = from->si_pid;
3473		to->si_uid = from->si_uid;
3474		break;
3475	case SIL_TIMER:
3476		to->si_tid     = from->si_tid;
3477		to->si_overrun = from->si_overrun;
3478		to->si_int     = from->si_int;
3479		break;
3480	case SIL_POLL:
3481		to->si_band = from->si_band;
3482		to->si_fd   = from->si_fd;
3483		break;
3484	case SIL_FAULT:
3485		to->si_addr = compat_ptr(from->si_addr);
3486		break;
3487	case SIL_FAULT_TRAPNO:
3488		to->si_addr = compat_ptr(from->si_addr);
3489		to->si_trapno = from->si_trapno;
3490		break;
3491	case SIL_FAULT_MCEERR:
3492		to->si_addr = compat_ptr(from->si_addr);
3493		to->si_addr_lsb = from->si_addr_lsb;
3494		break;
3495	case SIL_FAULT_BNDERR:
3496		to->si_addr = compat_ptr(from->si_addr);
3497		to->si_lower = compat_ptr(from->si_lower);
3498		to->si_upper = compat_ptr(from->si_upper);
3499		break;
3500	case SIL_FAULT_PKUERR:
3501		to->si_addr = compat_ptr(from->si_addr);
3502		to->si_pkey = from->si_pkey;
3503		break;
3504	case SIL_FAULT_PERF_EVENT:
3505		to->si_addr = compat_ptr(from->si_addr);
3506		to->si_perf_data = from->si_perf_data;
3507		to->si_perf_type = from->si_perf_type;
3508		to->si_perf_flags = from->si_perf_flags;
3509		break;
3510	case SIL_CHLD:
3511		to->si_pid    = from->si_pid;
3512		to->si_uid    = from->si_uid;
3513		to->si_status = from->si_status;
3514#ifdef CONFIG_X86_X32_ABI
3515		if (in_x32_syscall()) {
3516			to->si_utime = from->_sifields._sigchld_x32._utime;
3517			to->si_stime = from->_sifields._sigchld_x32._stime;
3518		} else
3519#endif
3520		{
3521			to->si_utime = from->si_utime;
3522			to->si_stime = from->si_stime;
3523		}
3524		break;
3525	case SIL_RT:
3526		to->si_pid = from->si_pid;
3527		to->si_uid = from->si_uid;
3528		to->si_int = from->si_int;
3529		break;
3530	case SIL_SYS:
3531		to->si_call_addr = compat_ptr(from->si_call_addr);
3532		to->si_syscall   = from->si_syscall;
3533		to->si_arch      = from->si_arch;
3534		break;
3535	}
3536	return 0;
3537}
3538
3539static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3540				      const struct compat_siginfo __user *ufrom)
3541{
3542	struct compat_siginfo from;
3543
3544	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3545		return -EFAULT;
3546
3547	from.si_signo = signo;
3548	return post_copy_siginfo_from_user32(to, &from);
3549}
3550
3551int copy_siginfo_from_user32(struct kernel_siginfo *to,
3552			     const struct compat_siginfo __user *ufrom)
3553{
3554	struct compat_siginfo from;
3555
3556	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3557		return -EFAULT;
3558
3559	return post_copy_siginfo_from_user32(to, &from);
3560}
3561#endif /* CONFIG_COMPAT */
3562
3563/**
3564 *  do_sigtimedwait - wait for queued signals specified in @which
3565 *  @which: queued signals to wait for
3566 *  @info: if non-null, the signal's siginfo is returned here
3567 *  @ts: upper bound on process time suspension
3568 */
3569static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3570		    const struct timespec64 *ts)
3571{
3572	ktime_t *to = NULL, timeout = KTIME_MAX;
3573	struct task_struct *tsk = current;
3574	sigset_t mask = *which;
3575	enum pid_type type;
3576	int sig, ret = 0;
3577
3578	if (ts) {
3579		if (!timespec64_valid(ts))
3580			return -EINVAL;
3581		timeout = timespec64_to_ktime(*ts);
3582		to = &timeout;
3583	}
3584
3585	/*
3586	 * Invert the set of allowed signals to get those we want to block.
3587	 */
3588	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3589	signotset(&mask);
3590
3591	spin_lock_irq(&tsk->sighand->siglock);
3592	sig = dequeue_signal(tsk, &mask, info, &type);
3593	if (!sig && timeout) {
3594		/*
3595		 * None ready, temporarily unblock those we're interested
3596		 * while we are sleeping in so that we'll be awakened when
3597		 * they arrive. Unblocking is always fine, we can avoid
3598		 * set_current_blocked().
3599		 */
3600		tsk->real_blocked = tsk->blocked;
3601		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3602		recalc_sigpending();
3603		spin_unlock_irq(&tsk->sighand->siglock);
3604
3605		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3606		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3607					       HRTIMER_MODE_REL);
3608		spin_lock_irq(&tsk->sighand->siglock);
3609		__set_task_blocked(tsk, &tsk->real_blocked);
3610		sigemptyset(&tsk->real_blocked);
3611		sig = dequeue_signal(tsk, &mask, info, &type);
3612	}
3613	spin_unlock_irq(&tsk->sighand->siglock);
3614
3615	if (sig)
3616		return sig;
3617	return ret ? -EINTR : -EAGAIN;
3618}
3619
3620/**
3621 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3622 *			in @uthese
3623 *  @uthese: queued signals to wait for
3624 *  @uinfo: if non-null, the signal's siginfo is returned here
3625 *  @uts: upper bound on process time suspension
3626 *  @sigsetsize: size of sigset_t type
3627 */
3628SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3629		siginfo_t __user *, uinfo,
3630		const struct __kernel_timespec __user *, uts,
3631		size_t, sigsetsize)
3632{
3633	sigset_t these;
3634	struct timespec64 ts;
3635	kernel_siginfo_t info;
3636	int ret;
3637
3638	/* XXX: Don't preclude handling different sized sigset_t's.  */
3639	if (sigsetsize != sizeof(sigset_t))
3640		return -EINVAL;
3641
3642	if (copy_from_user(&these, uthese, sizeof(these)))
3643		return -EFAULT;
3644
3645	if (uts) {
3646		if (get_timespec64(&ts, uts))
3647			return -EFAULT;
3648	}
3649
3650	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3651
3652	if (ret > 0 && uinfo) {
3653		if (copy_siginfo_to_user(uinfo, &info))
3654			ret = -EFAULT;
3655	}
3656
3657	return ret;
3658}
3659
3660#ifdef CONFIG_COMPAT_32BIT_TIME
3661SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3662		siginfo_t __user *, uinfo,
3663		const struct old_timespec32 __user *, uts,
3664		size_t, sigsetsize)
3665{
3666	sigset_t these;
3667	struct timespec64 ts;
3668	kernel_siginfo_t info;
3669	int ret;
3670
3671	if (sigsetsize != sizeof(sigset_t))
3672		return -EINVAL;
3673
3674	if (copy_from_user(&these, uthese, sizeof(these)))
3675		return -EFAULT;
3676
3677	if (uts) {
3678		if (get_old_timespec32(&ts, uts))
3679			return -EFAULT;
3680	}
3681
3682	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3683
3684	if (ret > 0 && uinfo) {
3685		if (copy_siginfo_to_user(uinfo, &info))
3686			ret = -EFAULT;
3687	}
3688
3689	return ret;
3690}
3691#endif
3692
3693#ifdef CONFIG_COMPAT
3694COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3695		struct compat_siginfo __user *, uinfo,
3696		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3697{
3698	sigset_t s;
3699	struct timespec64 t;
3700	kernel_siginfo_t info;
3701	long ret;
3702
3703	if (sigsetsize != sizeof(sigset_t))
3704		return -EINVAL;
3705
3706	if (get_compat_sigset(&s, uthese))
3707		return -EFAULT;
3708
3709	if (uts) {
3710		if (get_timespec64(&t, uts))
3711			return -EFAULT;
3712	}
3713
3714	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3715
3716	if (ret > 0 && uinfo) {
3717		if (copy_siginfo_to_user32(uinfo, &info))
3718			ret = -EFAULT;
3719	}
3720
3721	return ret;
3722}
3723
3724#ifdef CONFIG_COMPAT_32BIT_TIME
3725COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3726		struct compat_siginfo __user *, uinfo,
3727		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3728{
3729	sigset_t s;
3730	struct timespec64 t;
3731	kernel_siginfo_t info;
3732	long ret;
3733
3734	if (sigsetsize != sizeof(sigset_t))
3735		return -EINVAL;
3736
3737	if (get_compat_sigset(&s, uthese))
3738		return -EFAULT;
3739
3740	if (uts) {
3741		if (get_old_timespec32(&t, uts))
3742			return -EFAULT;
3743	}
3744
3745	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3746
3747	if (ret > 0 && uinfo) {
3748		if (copy_siginfo_to_user32(uinfo, &info))
3749			ret = -EFAULT;
3750	}
3751
3752	return ret;
3753}
3754#endif
3755#endif
3756
3757static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
 
3758{
3759	clear_siginfo(info);
3760	info->si_signo = sig;
3761	info->si_errno = 0;
3762	info->si_code = SI_USER;
3763	info->si_pid = task_tgid_vnr(current);
3764	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3765}
3766
3767/**
3768 *  sys_kill - send a signal to a process
3769 *  @pid: the PID of the process
3770 *  @sig: signal to be sent
3771 */
3772SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3773{
3774	struct kernel_siginfo info;
3775
3776	prepare_kill_siginfo(sig, &info);
3777
3778	return kill_something_info(sig, &info, pid);
3779}
3780
3781/*
3782 * Verify that the signaler and signalee either are in the same pid namespace
3783 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3784 * namespace.
3785 */
3786static bool access_pidfd_pidns(struct pid *pid)
3787{
3788	struct pid_namespace *active = task_active_pid_ns(current);
3789	struct pid_namespace *p = ns_of_pid(pid);
3790
3791	for (;;) {
3792		if (!p)
3793			return false;
3794		if (p == active)
3795			break;
3796		p = p->parent;
3797	}
3798
3799	return true;
3800}
3801
3802static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3803		siginfo_t __user *info)
3804{
3805#ifdef CONFIG_COMPAT
3806	/*
3807	 * Avoid hooking up compat syscalls and instead handle necessary
3808	 * conversions here. Note, this is a stop-gap measure and should not be
3809	 * considered a generic solution.
3810	 */
3811	if (in_compat_syscall())
3812		return copy_siginfo_from_user32(
3813			kinfo, (struct compat_siginfo __user *)info);
3814#endif
3815	return copy_siginfo_from_user(kinfo, info);
3816}
3817
3818static struct pid *pidfd_to_pid(const struct file *file)
3819{
3820	struct pid *pid;
3821
3822	pid = pidfd_pid(file);
3823	if (!IS_ERR(pid))
3824		return pid;
3825
3826	return tgid_pidfd_to_pid(file);
3827}
3828
 
 
 
 
3829/**
3830 * sys_pidfd_send_signal - Signal a process through a pidfd
3831 * @pidfd:  file descriptor of the process
3832 * @sig:    signal to send
3833 * @info:   signal info
3834 * @flags:  future flags
3835 *
3836 * The syscall currently only signals via PIDTYPE_PID which covers
3837 * kill(<positive-pid>, <signal>. It does not signal threads or process
3838 * groups.
3839 * In order to extend the syscall to threads and process groups the @flags
3840 * argument should be used. In essence, the @flags argument will determine
3841 * what is signaled and not the file descriptor itself. Put in other words,
3842 * grouping is a property of the flags argument not a property of the file
3843 * descriptor.
3844 *
3845 * Return: 0 on success, negative errno on failure
3846 */
3847SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3848		siginfo_t __user *, info, unsigned int, flags)
3849{
3850	int ret;
3851	struct fd f;
3852	struct pid *pid;
3853	kernel_siginfo_t kinfo;
 
3854
3855	/* Enforce flags be set to 0 until we add an extension. */
3856	if (flags)
3857		return -EINVAL;
3858
3859	f = fdget(pidfd);
3860	if (!f.file)
 
 
 
 
3861		return -EBADF;
3862
3863	/* Is this a pidfd? */
3864	pid = pidfd_to_pid(f.file);
3865	if (IS_ERR(pid)) {
3866		ret = PTR_ERR(pid);
3867		goto err;
3868	}
3869
3870	ret = -EINVAL;
3871	if (!access_pidfd_pidns(pid))
3872		goto err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3873
3874	if (info) {
3875		ret = copy_siginfo_from_user_any(&kinfo, info);
3876		if (unlikely(ret))
3877			goto err;
3878
3879		ret = -EINVAL;
3880		if (unlikely(sig != kinfo.si_signo))
3881			goto err;
3882
3883		/* Only allow sending arbitrary signals to yourself. */
3884		ret = -EPERM;
3885		if ((task_pid(current) != pid) &&
3886		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3887			goto err;
3888	} else {
3889		prepare_kill_siginfo(sig, &kinfo);
3890	}
3891
3892	ret = kill_pid_info(sig, &kinfo, pid);
3893
3894err:
3895	fdput(f);
3896	return ret;
3897}
3898
3899static int
3900do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3901{
3902	struct task_struct *p;
3903	int error = -ESRCH;
3904
3905	rcu_read_lock();
3906	p = find_task_by_vpid(pid);
3907	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3908		error = check_kill_permission(sig, info, p);
3909		/*
3910		 * The null signal is a permissions and process existence
3911		 * probe.  No signal is actually delivered.
3912		 */
3913		if (!error && sig) {
3914			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3915			/*
3916			 * If lock_task_sighand() failed we pretend the task
3917			 * dies after receiving the signal. The window is tiny,
3918			 * and the signal is private anyway.
3919			 */
3920			if (unlikely(error == -ESRCH))
3921				error = 0;
3922		}
3923	}
3924	rcu_read_unlock();
3925
3926	return error;
3927}
3928
3929static int do_tkill(pid_t tgid, pid_t pid, int sig)
3930{
3931	struct kernel_siginfo info;
3932
3933	clear_siginfo(&info);
3934	info.si_signo = sig;
3935	info.si_errno = 0;
3936	info.si_code = SI_TKILL;
3937	info.si_pid = task_tgid_vnr(current);
3938	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3939
3940	return do_send_specific(tgid, pid, sig, &info);
3941}
3942
3943/**
3944 *  sys_tgkill - send signal to one specific thread
3945 *  @tgid: the thread group ID of the thread
3946 *  @pid: the PID of the thread
3947 *  @sig: signal to be sent
3948 *
3949 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3950 *  exists but it's not belonging to the target process anymore. This
3951 *  method solves the problem of threads exiting and PIDs getting reused.
3952 */
3953SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3954{
3955	/* This is only valid for single tasks */
3956	if (pid <= 0 || tgid <= 0)
3957		return -EINVAL;
3958
3959	return do_tkill(tgid, pid, sig);
3960}
3961
3962/**
3963 *  sys_tkill - send signal to one specific task
3964 *  @pid: the PID of the task
3965 *  @sig: signal to be sent
3966 *
3967 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3968 */
3969SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3970{
3971	/* This is only valid for single tasks */
3972	if (pid <= 0)
3973		return -EINVAL;
3974
3975	return do_tkill(0, pid, sig);
3976}
3977
3978static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3979{
3980	/* Not even root can pretend to send signals from the kernel.
3981	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3982	 */
3983	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3984	    (task_pid_vnr(current) != pid))
3985		return -EPERM;
3986
3987	/* POSIX.1b doesn't mention process groups.  */
3988	return kill_proc_info(sig, info, pid);
3989}
3990
3991/**
3992 *  sys_rt_sigqueueinfo - send signal information to a signal
3993 *  @pid: the PID of the thread
3994 *  @sig: signal to be sent
3995 *  @uinfo: signal info to be sent
3996 */
3997SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3998		siginfo_t __user *, uinfo)
3999{
4000	kernel_siginfo_t info;
4001	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4002	if (unlikely(ret))
4003		return ret;
4004	return do_rt_sigqueueinfo(pid, sig, &info);
4005}
4006
4007#ifdef CONFIG_COMPAT
4008COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4009			compat_pid_t, pid,
4010			int, sig,
4011			struct compat_siginfo __user *, uinfo)
4012{
4013	kernel_siginfo_t info;
4014	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4015	if (unlikely(ret))
4016		return ret;
4017	return do_rt_sigqueueinfo(pid, sig, &info);
4018}
4019#endif
4020
4021static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4022{
4023	/* This is only valid for single tasks */
4024	if (pid <= 0 || tgid <= 0)
4025		return -EINVAL;
4026
4027	/* Not even root can pretend to send signals from the kernel.
4028	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4029	 */
4030	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4031	    (task_pid_vnr(current) != pid))
4032		return -EPERM;
4033
4034	return do_send_specific(tgid, pid, sig, info);
4035}
4036
4037SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4038		siginfo_t __user *, uinfo)
4039{
4040	kernel_siginfo_t info;
4041	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4042	if (unlikely(ret))
4043		return ret;
4044	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4045}
4046
4047#ifdef CONFIG_COMPAT
4048COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4049			compat_pid_t, tgid,
4050			compat_pid_t, pid,
4051			int, sig,
4052			struct compat_siginfo __user *, uinfo)
4053{
4054	kernel_siginfo_t info;
4055	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4056	if (unlikely(ret))
4057		return ret;
4058	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4059}
4060#endif
4061
4062/*
4063 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4064 */
4065void kernel_sigaction(int sig, __sighandler_t action)
4066{
4067	spin_lock_irq(&current->sighand->siglock);
4068	current->sighand->action[sig - 1].sa.sa_handler = action;
4069	if (action == SIG_IGN) {
4070		sigset_t mask;
4071
4072		sigemptyset(&mask);
4073		sigaddset(&mask, sig);
4074
4075		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4076		flush_sigqueue_mask(&mask, &current->pending);
4077		recalc_sigpending();
4078	}
4079	spin_unlock_irq(&current->sighand->siglock);
4080}
4081EXPORT_SYMBOL(kernel_sigaction);
4082
4083void __weak sigaction_compat_abi(struct k_sigaction *act,
4084		struct k_sigaction *oact)
4085{
4086}
4087
4088int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4089{
4090	struct task_struct *p = current, *t;
4091	struct k_sigaction *k;
4092	sigset_t mask;
4093
4094	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4095		return -EINVAL;
4096
4097	k = &p->sighand->action[sig-1];
4098
4099	spin_lock_irq(&p->sighand->siglock);
4100	if (k->sa.sa_flags & SA_IMMUTABLE) {
4101		spin_unlock_irq(&p->sighand->siglock);
4102		return -EINVAL;
4103	}
4104	if (oact)
4105		*oact = *k;
4106
4107	/*
4108	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4109	 * e.g. by having an architecture use the bit in their uapi.
4110	 */
4111	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4112
4113	/*
4114	 * Clear unknown flag bits in order to allow userspace to detect missing
4115	 * support for flag bits and to allow the kernel to use non-uapi bits
4116	 * internally.
4117	 */
4118	if (act)
4119		act->sa.sa_flags &= UAPI_SA_FLAGS;
4120	if (oact)
4121		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4122
4123	sigaction_compat_abi(act, oact);
4124
4125	if (act) {
 
 
4126		sigdelsetmask(&act->sa.sa_mask,
4127			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4128		*k = *act;
4129		/*
4130		 * POSIX 3.3.1.3:
4131		 *  "Setting a signal action to SIG_IGN for a signal that is
4132		 *   pending shall cause the pending signal to be discarded,
4133		 *   whether or not it is blocked."
4134		 *
4135		 *  "Setting a signal action to SIG_DFL for a signal that is
4136		 *   pending and whose default action is to ignore the signal
4137		 *   (for example, SIGCHLD), shall cause the pending signal to
4138		 *   be discarded, whether or not it is blocked"
4139		 */
4140		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4141			sigemptyset(&mask);
4142			sigaddset(&mask, sig);
4143			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4144			for_each_thread(p, t)
4145				flush_sigqueue_mask(&mask, &t->pending);
 
 
4146		}
4147	}
4148
4149	spin_unlock_irq(&p->sighand->siglock);
4150	return 0;
4151}
4152
4153#ifdef CONFIG_DYNAMIC_SIGFRAME
4154static inline void sigaltstack_lock(void)
4155	__acquires(&current->sighand->siglock)
4156{
4157	spin_lock_irq(&current->sighand->siglock);
4158}
4159
4160static inline void sigaltstack_unlock(void)
4161	__releases(&current->sighand->siglock)
4162{
4163	spin_unlock_irq(&current->sighand->siglock);
4164}
4165#else
4166static inline void sigaltstack_lock(void) { }
4167static inline void sigaltstack_unlock(void) { }
4168#endif
4169
4170static int
4171do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4172		size_t min_ss_size)
4173{
4174	struct task_struct *t = current;
4175	int ret = 0;
4176
4177	if (oss) {
4178		memset(oss, 0, sizeof(stack_t));
4179		oss->ss_sp = (void __user *) t->sas_ss_sp;
4180		oss->ss_size = t->sas_ss_size;
4181		oss->ss_flags = sas_ss_flags(sp) |
4182			(current->sas_ss_flags & SS_FLAG_BITS);
4183	}
4184
4185	if (ss) {
4186		void __user *ss_sp = ss->ss_sp;
4187		size_t ss_size = ss->ss_size;
4188		unsigned ss_flags = ss->ss_flags;
4189		int ss_mode;
4190
4191		if (unlikely(on_sig_stack(sp)))
4192			return -EPERM;
4193
4194		ss_mode = ss_flags & ~SS_FLAG_BITS;
4195		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4196				ss_mode != 0))
4197			return -EINVAL;
4198
4199		/*
4200		 * Return before taking any locks if no actual
4201		 * sigaltstack changes were requested.
4202		 */
4203		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4204		    t->sas_ss_size == ss_size &&
4205		    t->sas_ss_flags == ss_flags)
4206			return 0;
4207
4208		sigaltstack_lock();
4209		if (ss_mode == SS_DISABLE) {
4210			ss_size = 0;
4211			ss_sp = NULL;
4212		} else {
4213			if (unlikely(ss_size < min_ss_size))
4214				ret = -ENOMEM;
4215			if (!sigaltstack_size_valid(ss_size))
4216				ret = -ENOMEM;
4217		}
4218		if (!ret) {
4219			t->sas_ss_sp = (unsigned long) ss_sp;
4220			t->sas_ss_size = ss_size;
4221			t->sas_ss_flags = ss_flags;
4222		}
4223		sigaltstack_unlock();
4224	}
4225	return ret;
4226}
4227
4228SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4229{
4230	stack_t new, old;
4231	int err;
4232	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4233		return -EFAULT;
4234	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4235			      current_user_stack_pointer(),
4236			      MINSIGSTKSZ);
4237	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4238		err = -EFAULT;
4239	return err;
4240}
4241
4242int restore_altstack(const stack_t __user *uss)
4243{
4244	stack_t new;
4245	if (copy_from_user(&new, uss, sizeof(stack_t)))
4246		return -EFAULT;
4247	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4248			     MINSIGSTKSZ);
4249	/* squash all but EFAULT for now */
4250	return 0;
4251}
4252
4253int __save_altstack(stack_t __user *uss, unsigned long sp)
4254{
4255	struct task_struct *t = current;
4256	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4257		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4258		__put_user(t->sas_ss_size, &uss->ss_size);
4259	return err;
4260}
4261
4262#ifdef CONFIG_COMPAT
4263static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4264				 compat_stack_t __user *uoss_ptr)
4265{
4266	stack_t uss, uoss;
4267	int ret;
4268
4269	if (uss_ptr) {
4270		compat_stack_t uss32;
4271		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4272			return -EFAULT;
4273		uss.ss_sp = compat_ptr(uss32.ss_sp);
4274		uss.ss_flags = uss32.ss_flags;
4275		uss.ss_size = uss32.ss_size;
4276	}
4277	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4278			     compat_user_stack_pointer(),
4279			     COMPAT_MINSIGSTKSZ);
4280	if (ret >= 0 && uoss_ptr)  {
4281		compat_stack_t old;
4282		memset(&old, 0, sizeof(old));
4283		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4284		old.ss_flags = uoss.ss_flags;
4285		old.ss_size = uoss.ss_size;
4286		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4287			ret = -EFAULT;
4288	}
4289	return ret;
4290}
4291
4292COMPAT_SYSCALL_DEFINE2(sigaltstack,
4293			const compat_stack_t __user *, uss_ptr,
4294			compat_stack_t __user *, uoss_ptr)
4295{
4296	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4297}
4298
4299int compat_restore_altstack(const compat_stack_t __user *uss)
4300{
4301	int err = do_compat_sigaltstack(uss, NULL);
4302	/* squash all but -EFAULT for now */
4303	return err == -EFAULT ? err : 0;
4304}
4305
4306int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4307{
4308	int err;
4309	struct task_struct *t = current;
4310	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4311			 &uss->ss_sp) |
4312		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4313		__put_user(t->sas_ss_size, &uss->ss_size);
4314	return err;
4315}
4316#endif
4317
4318#ifdef __ARCH_WANT_SYS_SIGPENDING
4319
4320/**
4321 *  sys_sigpending - examine pending signals
4322 *  @uset: where mask of pending signal is returned
4323 */
4324SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4325{
4326	sigset_t set;
4327
4328	if (sizeof(old_sigset_t) > sizeof(*uset))
4329		return -EINVAL;
4330
4331	do_sigpending(&set);
4332
4333	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4334		return -EFAULT;
4335
4336	return 0;
4337}
4338
4339#ifdef CONFIG_COMPAT
4340COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4341{
4342	sigset_t set;
4343
4344	do_sigpending(&set);
4345
4346	return put_user(set.sig[0], set32);
4347}
4348#endif
4349
4350#endif
4351
4352#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4353/**
4354 *  sys_sigprocmask - examine and change blocked signals
4355 *  @how: whether to add, remove, or set signals
4356 *  @nset: signals to add or remove (if non-null)
4357 *  @oset: previous value of signal mask if non-null
4358 *
4359 * Some platforms have their own version with special arguments;
4360 * others support only sys_rt_sigprocmask.
4361 */
4362
4363SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4364		old_sigset_t __user *, oset)
4365{
4366	old_sigset_t old_set, new_set;
4367	sigset_t new_blocked;
4368
4369	old_set = current->blocked.sig[0];
4370
4371	if (nset) {
4372		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4373			return -EFAULT;
4374
4375		new_blocked = current->blocked;
4376
4377		switch (how) {
4378		case SIG_BLOCK:
4379			sigaddsetmask(&new_blocked, new_set);
4380			break;
4381		case SIG_UNBLOCK:
4382			sigdelsetmask(&new_blocked, new_set);
4383			break;
4384		case SIG_SETMASK:
4385			new_blocked.sig[0] = new_set;
4386			break;
4387		default:
4388			return -EINVAL;
4389		}
4390
4391		set_current_blocked(&new_blocked);
4392	}
4393
4394	if (oset) {
4395		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4396			return -EFAULT;
4397	}
4398
4399	return 0;
4400}
4401#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4402
4403#ifndef CONFIG_ODD_RT_SIGACTION
4404/**
4405 *  sys_rt_sigaction - alter an action taken by a process
4406 *  @sig: signal to be sent
4407 *  @act: new sigaction
4408 *  @oact: used to save the previous sigaction
4409 *  @sigsetsize: size of sigset_t type
4410 */
4411SYSCALL_DEFINE4(rt_sigaction, int, sig,
4412		const struct sigaction __user *, act,
4413		struct sigaction __user *, oact,
4414		size_t, sigsetsize)
4415{
4416	struct k_sigaction new_sa, old_sa;
4417	int ret;
4418
4419	/* XXX: Don't preclude handling different sized sigset_t's.  */
4420	if (sigsetsize != sizeof(sigset_t))
4421		return -EINVAL;
4422
4423	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4424		return -EFAULT;
4425
4426	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4427	if (ret)
4428		return ret;
4429
4430	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4431		return -EFAULT;
4432
4433	return 0;
4434}
4435#ifdef CONFIG_COMPAT
4436COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4437		const struct compat_sigaction __user *, act,
4438		struct compat_sigaction __user *, oact,
4439		compat_size_t, sigsetsize)
4440{
4441	struct k_sigaction new_ka, old_ka;
4442#ifdef __ARCH_HAS_SA_RESTORER
4443	compat_uptr_t restorer;
4444#endif
4445	int ret;
4446
4447	/* XXX: Don't preclude handling different sized sigset_t's.  */
4448	if (sigsetsize != sizeof(compat_sigset_t))
4449		return -EINVAL;
4450
4451	if (act) {
4452		compat_uptr_t handler;
4453		ret = get_user(handler, &act->sa_handler);
4454		new_ka.sa.sa_handler = compat_ptr(handler);
4455#ifdef __ARCH_HAS_SA_RESTORER
4456		ret |= get_user(restorer, &act->sa_restorer);
4457		new_ka.sa.sa_restorer = compat_ptr(restorer);
4458#endif
4459		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4460		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4461		if (ret)
4462			return -EFAULT;
4463	}
4464
4465	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4466	if (!ret && oact) {
4467		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
4468			       &oact->sa_handler);
4469		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4470					 sizeof(oact->sa_mask));
4471		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4472#ifdef __ARCH_HAS_SA_RESTORER
4473		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4474				&oact->sa_restorer);
4475#endif
4476	}
4477	return ret;
4478}
4479#endif
4480#endif /* !CONFIG_ODD_RT_SIGACTION */
4481
4482#ifdef CONFIG_OLD_SIGACTION
4483SYSCALL_DEFINE3(sigaction, int, sig,
4484		const struct old_sigaction __user *, act,
4485	        struct old_sigaction __user *, oact)
4486{
4487	struct k_sigaction new_ka, old_ka;
4488	int ret;
4489
4490	if (act) {
4491		old_sigset_t mask;
4492		if (!access_ok(act, sizeof(*act)) ||
4493		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4494		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4495		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4496		    __get_user(mask, &act->sa_mask))
4497			return -EFAULT;
4498#ifdef __ARCH_HAS_KA_RESTORER
4499		new_ka.ka_restorer = NULL;
4500#endif
4501		siginitset(&new_ka.sa.sa_mask, mask);
4502	}
4503
4504	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4505
4506	if (!ret && oact) {
4507		if (!access_ok(oact, sizeof(*oact)) ||
4508		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4509		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4510		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4511		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4512			return -EFAULT;
4513	}
4514
4515	return ret;
4516}
4517#endif
4518#ifdef CONFIG_COMPAT_OLD_SIGACTION
4519COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4520		const struct compat_old_sigaction __user *, act,
4521	        struct compat_old_sigaction __user *, oact)
4522{
4523	struct k_sigaction new_ka, old_ka;
4524	int ret;
4525	compat_old_sigset_t mask;
4526	compat_uptr_t handler, restorer;
4527
4528	if (act) {
4529		if (!access_ok(act, sizeof(*act)) ||
4530		    __get_user(handler, &act->sa_handler) ||
4531		    __get_user(restorer, &act->sa_restorer) ||
4532		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4533		    __get_user(mask, &act->sa_mask))
4534			return -EFAULT;
4535
4536#ifdef __ARCH_HAS_KA_RESTORER
4537		new_ka.ka_restorer = NULL;
4538#endif
4539		new_ka.sa.sa_handler = compat_ptr(handler);
4540		new_ka.sa.sa_restorer = compat_ptr(restorer);
4541		siginitset(&new_ka.sa.sa_mask, mask);
4542	}
4543
4544	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4545
4546	if (!ret && oact) {
4547		if (!access_ok(oact, sizeof(*oact)) ||
4548		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4549			       &oact->sa_handler) ||
4550		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4551			       &oact->sa_restorer) ||
4552		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4553		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4554			return -EFAULT;
4555	}
4556	return ret;
4557}
4558#endif
4559
4560#ifdef CONFIG_SGETMASK_SYSCALL
4561
4562/*
4563 * For backwards compatibility.  Functionality superseded by sigprocmask.
4564 */
4565SYSCALL_DEFINE0(sgetmask)
4566{
4567	/* SMP safe */
4568	return current->blocked.sig[0];
4569}
4570
4571SYSCALL_DEFINE1(ssetmask, int, newmask)
4572{
4573	int old = current->blocked.sig[0];
4574	sigset_t newset;
4575
4576	siginitset(&newset, newmask);
4577	set_current_blocked(&newset);
4578
4579	return old;
4580}
4581#endif /* CONFIG_SGETMASK_SYSCALL */
4582
4583#ifdef __ARCH_WANT_SYS_SIGNAL
4584/*
4585 * For backwards compatibility.  Functionality superseded by sigaction.
4586 */
4587SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4588{
4589	struct k_sigaction new_sa, old_sa;
4590	int ret;
4591
4592	new_sa.sa.sa_handler = handler;
4593	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4594	sigemptyset(&new_sa.sa.sa_mask);
4595
4596	ret = do_sigaction(sig, &new_sa, &old_sa);
4597
4598	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4599}
4600#endif /* __ARCH_WANT_SYS_SIGNAL */
4601
4602#ifdef __ARCH_WANT_SYS_PAUSE
4603
4604SYSCALL_DEFINE0(pause)
4605{
4606	while (!signal_pending(current)) {
4607		__set_current_state(TASK_INTERRUPTIBLE);
4608		schedule();
4609	}
4610	return -ERESTARTNOHAND;
4611}
4612
4613#endif
4614
4615static int sigsuspend(sigset_t *set)
4616{
4617	current->saved_sigmask = current->blocked;
4618	set_current_blocked(set);
4619
4620	while (!signal_pending(current)) {
4621		__set_current_state(TASK_INTERRUPTIBLE);
4622		schedule();
4623	}
4624	set_restore_sigmask();
4625	return -ERESTARTNOHAND;
4626}
4627
4628/**
4629 *  sys_rt_sigsuspend - replace the signal mask for a value with the
4630 *	@unewset value until a signal is received
4631 *  @unewset: new signal mask value
4632 *  @sigsetsize: size of sigset_t type
4633 */
4634SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4635{
4636	sigset_t newset;
4637
4638	/* XXX: Don't preclude handling different sized sigset_t's.  */
4639	if (sigsetsize != sizeof(sigset_t))
4640		return -EINVAL;
4641
4642	if (copy_from_user(&newset, unewset, sizeof(newset)))
4643		return -EFAULT;
4644	return sigsuspend(&newset);
4645}
4646 
4647#ifdef CONFIG_COMPAT
4648COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4649{
4650	sigset_t newset;
4651
4652	/* XXX: Don't preclude handling different sized sigset_t's.  */
4653	if (sigsetsize != sizeof(sigset_t))
4654		return -EINVAL;
4655
4656	if (get_compat_sigset(&newset, unewset))
4657		return -EFAULT;
4658	return sigsuspend(&newset);
4659}
4660#endif
4661
4662#ifdef CONFIG_OLD_SIGSUSPEND
4663SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4664{
4665	sigset_t blocked;
4666	siginitset(&blocked, mask);
4667	return sigsuspend(&blocked);
4668}
4669#endif
4670#ifdef CONFIG_OLD_SIGSUSPEND3
4671SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4672{
4673	sigset_t blocked;
4674	siginitset(&blocked, mask);
4675	return sigsuspend(&blocked);
4676}
4677#endif
4678
4679__weak const char *arch_vma_name(struct vm_area_struct *vma)
4680{
4681	return NULL;
4682}
4683
4684static inline void siginfo_buildtime_checks(void)
4685{
4686	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4687
4688	/* Verify the offsets in the two siginfos match */
4689#define CHECK_OFFSET(field) \
4690	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4691
4692	/* kill */
4693	CHECK_OFFSET(si_pid);
4694	CHECK_OFFSET(si_uid);
4695
4696	/* timer */
4697	CHECK_OFFSET(si_tid);
4698	CHECK_OFFSET(si_overrun);
4699	CHECK_OFFSET(si_value);
4700
4701	/* rt */
4702	CHECK_OFFSET(si_pid);
4703	CHECK_OFFSET(si_uid);
4704	CHECK_OFFSET(si_value);
4705
4706	/* sigchld */
4707	CHECK_OFFSET(si_pid);
4708	CHECK_OFFSET(si_uid);
4709	CHECK_OFFSET(si_status);
4710	CHECK_OFFSET(si_utime);
4711	CHECK_OFFSET(si_stime);
4712
4713	/* sigfault */
4714	CHECK_OFFSET(si_addr);
4715	CHECK_OFFSET(si_trapno);
4716	CHECK_OFFSET(si_addr_lsb);
4717	CHECK_OFFSET(si_lower);
4718	CHECK_OFFSET(si_upper);
4719	CHECK_OFFSET(si_pkey);
4720	CHECK_OFFSET(si_perf_data);
4721	CHECK_OFFSET(si_perf_type);
4722	CHECK_OFFSET(si_perf_flags);
4723
4724	/* sigpoll */
4725	CHECK_OFFSET(si_band);
4726	CHECK_OFFSET(si_fd);
4727
4728	/* sigsys */
4729	CHECK_OFFSET(si_call_addr);
4730	CHECK_OFFSET(si_syscall);
4731	CHECK_OFFSET(si_arch);
4732#undef CHECK_OFFSET
4733
4734	/* usb asyncio */
4735	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4736		     offsetof(struct siginfo, si_addr));
4737	if (sizeof(int) == sizeof(void __user *)) {
4738		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4739			     sizeof(void __user *));
4740	} else {
4741		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4742			      sizeof_field(struct siginfo, si_uid)) !=
4743			     sizeof(void __user *));
4744		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4745			     offsetof(struct siginfo, si_uid));
4746	}
4747#ifdef CONFIG_COMPAT
4748	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4749		     offsetof(struct compat_siginfo, si_addr));
4750	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4751		     sizeof(compat_uptr_t));
4752	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4753		     sizeof_field(struct siginfo, si_pid));
4754#endif
4755}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4756
4757void __init signals_init(void)
4758{
4759	siginfo_buildtime_checks();
4760
4761	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4762}
4763
4764#ifdef CONFIG_KGDB_KDB
4765#include <linux/kdb.h>
4766/*
4767 * kdb_send_sig - Allows kdb to send signals without exposing
4768 * signal internals.  This function checks if the required locks are
4769 * available before calling the main signal code, to avoid kdb
4770 * deadlocks.
4771 */
4772void kdb_send_sig(struct task_struct *t, int sig)
4773{
4774	static struct task_struct *kdb_prev_t;
4775	int new_t, ret;
4776	if (!spin_trylock(&t->sighand->siglock)) {
4777		kdb_printf("Can't do kill command now.\n"
4778			   "The sigmask lock is held somewhere else in "
4779			   "kernel, try again later\n");
4780		return;
4781	}
4782	new_t = kdb_prev_t != t;
4783	kdb_prev_t = t;
4784	if (!task_is_running(t) && new_t) {
4785		spin_unlock(&t->sighand->siglock);
4786		kdb_printf("Process is not RUNNING, sending a signal from "
4787			   "kdb risks deadlock\n"
4788			   "on the run queue locks. "
4789			   "The signal has _not_ been sent.\n"
4790			   "Reissue the kill command if you want to risk "
4791			   "the deadlock.\n");
4792		return;
4793	}
4794	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4795	spin_unlock(&t->sighand->siglock);
4796	if (ret)
4797		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4798			   sig, t->pid);
4799	else
4800		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4801}
4802#endif	/* CONFIG_KGDB_KDB */
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/signal.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   8 *
   9 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
  10 *		Changes to use preallocated sigqueue structures
  11 *		to allow signals to be sent reliably.
  12 */
  13
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/init.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/user.h>
  19#include <linux/sched/debug.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/sched/cputime.h>
  23#include <linux/file.h>
  24#include <linux/fs.h>
  25#include <linux/mm.h>
  26#include <linux/proc_fs.h>
  27#include <linux/tty.h>
  28#include <linux/binfmts.h>
  29#include <linux/coredump.h>
  30#include <linux/security.h>
  31#include <linux/syscalls.h>
  32#include <linux/ptrace.h>
  33#include <linux/signal.h>
  34#include <linux/signalfd.h>
  35#include <linux/ratelimit.h>
  36#include <linux/task_work.h>
  37#include <linux/capability.h>
  38#include <linux/freezer.h>
  39#include <linux/pid_namespace.h>
  40#include <linux/nsproxy.h>
  41#include <linux/user_namespace.h>
  42#include <linux/uprobes.h>
  43#include <linux/compat.h>
  44#include <linux/cn_proc.h>
  45#include <linux/compiler.h>
  46#include <linux/posix-timers.h>
  47#include <linux/cgroup.h>
  48#include <linux/audit.h>
  49#include <linux/sysctl.h>
  50#include <uapi/linux/pidfd.h>
  51
  52#define CREATE_TRACE_POINTS
  53#include <trace/events/signal.h>
  54
  55#include <asm/param.h>
  56#include <linux/uaccess.h>
  57#include <asm/unistd.h>
  58#include <asm/siginfo.h>
  59#include <asm/cacheflush.h>
  60#include <asm/syscall.h>	/* for syscall_get_* */
  61
  62#include "time/posix-timers.h"
  63
  64/*
  65 * SLAB caches for signal bits.
  66 */
  67
  68static struct kmem_cache *sigqueue_cachep;
  69
  70int print_fatal_signals __read_mostly;
  71
  72static void __user *sig_handler(struct task_struct *t, int sig)
  73{
  74	return t->sighand->action[sig - 1].sa.sa_handler;
  75}
  76
  77static inline bool sig_handler_ignored(void __user *handler, int sig)
  78{
  79	/* Is it explicitly or implicitly ignored? */
  80	return handler == SIG_IGN ||
  81	       (handler == SIG_DFL && sig_kernel_ignore(sig));
  82}
  83
  84static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
  85{
  86	void __user *handler;
  87
  88	handler = sig_handler(t, sig);
  89
  90	/* SIGKILL and SIGSTOP may not be sent to the global init */
  91	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
  92		return true;
  93
  94	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  95	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  96		return true;
  97
  98	/* Only allow kernel generated signals to this kthread */
  99	if (unlikely((t->flags & PF_KTHREAD) &&
 100		     (handler == SIG_KTHREAD_KERNEL) && !force))
 101		return true;
 102
 103	return sig_handler_ignored(handler, sig);
 104}
 105
 106static bool sig_ignored(struct task_struct *t, int sig, bool force)
 107{
 108	/*
 109	 * Blocked signals are never ignored, since the
 110	 * signal handler may change by the time it is
 111	 * unblocked.
 112	 */
 113	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 114		return false;
 115
 116	/*
 117	 * Tracers may want to know about even ignored signal unless it
 118	 * is SIGKILL which can't be reported anyway but can be ignored
 119	 * by SIGNAL_UNKILLABLE task.
 120	 */
 121	if (t->ptrace && sig != SIGKILL)
 122		return false;
 123
 124	return sig_task_ignored(t, sig, force);
 125}
 126
 127/*
 128 * Re-calculate pending state from the set of locally pending
 129 * signals, globally pending signals, and blocked signals.
 130 */
 131static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
 132{
 133	unsigned long ready;
 134	long i;
 135
 136	switch (_NSIG_WORDS) {
 137	default:
 138		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 139			ready |= signal->sig[i] &~ blocked->sig[i];
 140		break;
 141
 142	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 143		ready |= signal->sig[2] &~ blocked->sig[2];
 144		ready |= signal->sig[1] &~ blocked->sig[1];
 145		ready |= signal->sig[0] &~ blocked->sig[0];
 146		break;
 147
 148	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 149		ready |= signal->sig[0] &~ blocked->sig[0];
 150		break;
 151
 152	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 153	}
 154	return ready !=	0;
 155}
 156
 157#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 158
 159static bool recalc_sigpending_tsk(struct task_struct *t)
 160{
 161	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
 162	    PENDING(&t->pending, &t->blocked) ||
 163	    PENDING(&t->signal->shared_pending, &t->blocked) ||
 164	    cgroup_task_frozen(t)) {
 165		set_tsk_thread_flag(t, TIF_SIGPENDING);
 166		return true;
 167	}
 168
 169	/*
 170	 * We must never clear the flag in another thread, or in current
 171	 * when it's possible the current syscall is returning -ERESTART*.
 172	 * So we don't clear it here, and only callers who know they should do.
 173	 */
 174	return false;
 175}
 176
 
 
 
 
 
 
 
 
 
 
 177void recalc_sigpending(void)
 178{
 179	if (!recalc_sigpending_tsk(current) && !freezing(current))
 180		clear_thread_flag(TIF_SIGPENDING);
 181
 182}
 183EXPORT_SYMBOL(recalc_sigpending);
 184
 185void calculate_sigpending(void)
 186{
 187	/* Have any signals or users of TIF_SIGPENDING been delayed
 188	 * until after fork?
 189	 */
 190	spin_lock_irq(&current->sighand->siglock);
 191	set_tsk_thread_flag(current, TIF_SIGPENDING);
 192	recalc_sigpending();
 193	spin_unlock_irq(&current->sighand->siglock);
 194}
 195
 196/* Given the mask, find the first available signal that should be serviced. */
 197
 198#define SYNCHRONOUS_MASK \
 199	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 200	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 201
 202int next_signal(struct sigpending *pending, sigset_t *mask)
 203{
 204	unsigned long i, *s, *m, x;
 205	int sig = 0;
 206
 207	s = pending->signal.sig;
 208	m = mask->sig;
 209
 210	/*
 211	 * Handle the first word specially: it contains the
 212	 * synchronous signals that need to be dequeued first.
 213	 */
 214	x = *s &~ *m;
 215	if (x) {
 216		if (x & SYNCHRONOUS_MASK)
 217			x &= SYNCHRONOUS_MASK;
 218		sig = ffz(~x) + 1;
 219		return sig;
 220	}
 221
 222	switch (_NSIG_WORDS) {
 223	default:
 224		for (i = 1; i < _NSIG_WORDS; ++i) {
 225			x = *++s &~ *++m;
 226			if (!x)
 227				continue;
 228			sig = ffz(~x) + i*_NSIG_BPW + 1;
 229			break;
 230		}
 231		break;
 232
 233	case 2:
 234		x = s[1] &~ m[1];
 235		if (!x)
 236			break;
 237		sig = ffz(~x) + _NSIG_BPW + 1;
 238		break;
 239
 240	case 1:
 241		/* Nothing to do */
 242		break;
 243	}
 244
 245	return sig;
 246}
 247
 248static inline void print_dropped_signal(int sig)
 249{
 250	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 251
 252	if (!print_fatal_signals)
 253		return;
 254
 255	if (!__ratelimit(&ratelimit_state))
 256		return;
 257
 258	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 259				current->comm, current->pid, sig);
 260}
 261
 262/**
 263 * task_set_jobctl_pending - set jobctl pending bits
 264 * @task: target task
 265 * @mask: pending bits to set
 266 *
 267 * Clear @mask from @task->jobctl.  @mask must be subset of
 268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 269 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 270 * cleared.  If @task is already being killed or exiting, this function
 271 * becomes noop.
 272 *
 273 * CONTEXT:
 274 * Must be called with @task->sighand->siglock held.
 275 *
 276 * RETURNS:
 277 * %true if @mask is set, %false if made noop because @task was dying.
 278 */
 279bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 280{
 281	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 282			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 283	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 284
 285	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 286		return false;
 287
 288	if (mask & JOBCTL_STOP_SIGMASK)
 289		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 290
 291	task->jobctl |= mask;
 292	return true;
 293}
 294
 295/**
 296 * task_clear_jobctl_trapping - clear jobctl trapping bit
 297 * @task: target task
 298 *
 299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 300 * Clear it and wake up the ptracer.  Note that we don't need any further
 301 * locking.  @task->siglock guarantees that @task->parent points to the
 302 * ptracer.
 303 *
 304 * CONTEXT:
 305 * Must be called with @task->sighand->siglock held.
 306 */
 307void task_clear_jobctl_trapping(struct task_struct *task)
 308{
 309	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 310		task->jobctl &= ~JOBCTL_TRAPPING;
 311		smp_mb();	/* advised by wake_up_bit() */
 312		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 313	}
 314}
 315
 316/**
 317 * task_clear_jobctl_pending - clear jobctl pending bits
 318 * @task: target task
 319 * @mask: pending bits to clear
 320 *
 321 * Clear @mask from @task->jobctl.  @mask must be subset of
 322 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 323 * STOP bits are cleared together.
 324 *
 325 * If clearing of @mask leaves no stop or trap pending, this function calls
 326 * task_clear_jobctl_trapping().
 327 *
 328 * CONTEXT:
 329 * Must be called with @task->sighand->siglock held.
 330 */
 331void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 332{
 333	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 334
 335	if (mask & JOBCTL_STOP_PENDING)
 336		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 337
 338	task->jobctl &= ~mask;
 339
 340	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 341		task_clear_jobctl_trapping(task);
 342}
 343
 344/**
 345 * task_participate_group_stop - participate in a group stop
 346 * @task: task participating in a group stop
 347 *
 348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 349 * Group stop states are cleared and the group stop count is consumed if
 350 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 351 * stop, the appropriate `SIGNAL_*` flags are set.
 352 *
 353 * CONTEXT:
 354 * Must be called with @task->sighand->siglock held.
 355 *
 356 * RETURNS:
 357 * %true if group stop completion should be notified to the parent, %false
 358 * otherwise.
 359 */
 360static bool task_participate_group_stop(struct task_struct *task)
 361{
 362	struct signal_struct *sig = task->signal;
 363	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 364
 365	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 366
 367	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 368
 369	if (!consume)
 370		return false;
 371
 372	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 373		sig->group_stop_count--;
 374
 375	/*
 376	 * Tell the caller to notify completion iff we are entering into a
 377	 * fresh group stop.  Read comment in do_signal_stop() for details.
 378	 */
 379	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 380		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 381		return true;
 382	}
 383	return false;
 384}
 385
 386void task_join_group_stop(struct task_struct *task)
 387{
 388	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
 389	struct signal_struct *sig = current->signal;
 390
 391	if (sig->group_stop_count) {
 392		sig->group_stop_count++;
 393		mask |= JOBCTL_STOP_CONSUME;
 394	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
 395		return;
 396
 397	/* Have the new thread join an on-going signal group stop */
 398	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
 399}
 400
 401static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
 402				       int override_rlimit)
 
 
 
 
 
 
 403{
 404	struct ucounts *ucounts;
 
 405	long sigpending;
 406
 407	/*
 408	 * Protect access to @t credentials. This can go away when all
 409	 * callers hold rcu read lock.
 410	 *
 411	 * NOTE! A pending signal will hold on to the user refcount,
 412	 * and we get/put the refcount only when the sigpending count
 413	 * changes from/to zero.
 414	 */
 415	rcu_read_lock();
 416	ucounts = task_ucounts(t);
 417	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
 418					    override_rlimit);
 419	rcu_read_unlock();
 420	if (!sigpending)
 421		return NULL;
 422
 423	if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
 424		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 
 425		print_dropped_signal(sig);
 426		return NULL;
 427	}
 428
 429	return ucounts;
 430}
 431
 432static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
 433			    const unsigned int sigqueue_flags)
 434{
 435	INIT_LIST_HEAD(&q->list);
 436	q->flags = sigqueue_flags;
 437	q->ucounts = ucounts;
 438}
 439
 440/*
 441 * allocate a new signal queue record
 442 * - this may be called without locks if and only if t == current, otherwise an
 443 *   appropriate lock must be held to stop the target task from exiting
 444 */
 445static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
 446				       int override_rlimit)
 447{
 448	struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
 449	struct sigqueue *q;
 450
 451	if (!ucounts)
 452		return NULL;
 453
 454	q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
 455	if (!q) {
 456		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 457		return NULL;
 
 
 
 458	}
 459
 460	__sigqueue_init(q, ucounts, 0);
 461	return q;
 462}
 463
 464static void __sigqueue_free(struct sigqueue *q)
 465{
 466	if (q->flags & SIGQUEUE_PREALLOC) {
 467		posixtimer_sigqueue_putref(q);
 468		return;
 469	}
 470	if (q->ucounts) {
 471		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
 472		q->ucounts = NULL;
 473	}
 474	kmem_cache_free(sigqueue_cachep, q);
 475}
 476
 477void flush_sigqueue(struct sigpending *queue)
 478{
 479	struct sigqueue *q;
 480
 481	sigemptyset(&queue->signal);
 482	while (!list_empty(&queue->list)) {
 483		q = list_entry(queue->list.next, struct sigqueue , list);
 484		list_del_init(&q->list);
 485		__sigqueue_free(q);
 486	}
 487}
 488
 489/*
 490 * Flush all pending signals for this kthread.
 491 */
 492void flush_signals(struct task_struct *t)
 493{
 494	unsigned long flags;
 495
 496	spin_lock_irqsave(&t->sighand->siglock, flags);
 497	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 498	flush_sigqueue(&t->pending);
 499	flush_sigqueue(&t->signal->shared_pending);
 500	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 501}
 502EXPORT_SYMBOL(flush_signals);
 503
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 504void ignore_signals(struct task_struct *t)
 505{
 506	int i;
 507
 508	for (i = 0; i < _NSIG; ++i)
 509		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 510
 511	flush_signals(t);
 512}
 513
 514/*
 515 * Flush all handlers for a task.
 516 */
 517
 518void
 519flush_signal_handlers(struct task_struct *t, int force_default)
 520{
 521	int i;
 522	struct k_sigaction *ka = &t->sighand->action[0];
 523	for (i = _NSIG ; i != 0 ; i--) {
 524		if (force_default || ka->sa.sa_handler != SIG_IGN)
 525			ka->sa.sa_handler = SIG_DFL;
 526		ka->sa.sa_flags = 0;
 527#ifdef __ARCH_HAS_SA_RESTORER
 528		ka->sa.sa_restorer = NULL;
 529#endif
 530		sigemptyset(&ka->sa.sa_mask);
 531		ka++;
 532	}
 533}
 534
 535bool unhandled_signal(struct task_struct *tsk, int sig)
 536{
 537	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 538	if (is_global_init(tsk))
 539		return true;
 540
 541	if (handler != SIG_IGN && handler != SIG_DFL)
 542		return false;
 543
 544	/* If dying, we handle all new signals by ignoring them */
 545	if (fatal_signal_pending(tsk))
 546		return false;
 547
 548	/* if ptraced, let the tracer determine */
 549	return !tsk->ptrace;
 550}
 551
 552static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
 553			   struct sigqueue **timer_sigq)
 554{
 555	struct sigqueue *q, *first = NULL;
 556
 557	/*
 558	 * Collect the siginfo appropriate to this signal.  Check if
 559	 * there is another siginfo for the same signal.
 560	*/
 561	list_for_each_entry(q, &list->list, list) {
 562		if (q->info.si_signo == sig) {
 563			if (first)
 564				goto still_pending;
 565			first = q;
 566		}
 567	}
 568
 569	sigdelset(&list->signal, sig);
 570
 571	if (first) {
 572still_pending:
 573		list_del_init(&first->list);
 574		copy_siginfo(info, &first->info);
 575
 576		/*
 577		 * posix-timer signals are preallocated and freed when the last
 578		 * reference count is dropped in posixtimer_deliver_signal() or
 579		 * immediately on timer deletion when the signal is not pending.
 580		 * Spare the extra round through __sigqueue_free() which is
 581		 * ignoring preallocated signals.
 582		 */
 583		if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
 584			*timer_sigq = first;
 585		else
 586			__sigqueue_free(first);
 587	} else {
 588		/*
 589		 * Ok, it wasn't in the queue.  This must be
 590		 * a fast-pathed signal or we must have been
 591		 * out of queue space.  So zero out the info.
 592		 */
 593		clear_siginfo(info);
 594		info->si_signo = sig;
 595		info->si_errno = 0;
 596		info->si_code = SI_USER;
 597		info->si_pid = 0;
 598		info->si_uid = 0;
 599	}
 600}
 601
 602static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 603			    kernel_siginfo_t *info, struct sigqueue **timer_sigq)
 604{
 605	int sig = next_signal(pending, mask);
 606
 607	if (sig)
 608		collect_signal(sig, pending, info, timer_sigq);
 609	return sig;
 610}
 611
 612/*
 613 * Try to dequeue a signal. If a deliverable signal is found fill in the
 614 * caller provided siginfo and return the signal number. Otherwise return
 615 * 0.
 
 616 */
 617int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
 
 618{
 619	struct task_struct *tsk = current;
 620	struct sigqueue *timer_sigq;
 621	int signr;
 622
 623	lockdep_assert_held(&tsk->sighand->siglock);
 624
 625again:
 626	*type = PIDTYPE_PID;
 627	timer_sigq = NULL;
 628	signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq);
 629	if (!signr) {
 630		*type = PIDTYPE_TGID;
 631		signr = __dequeue_signal(&tsk->signal->shared_pending,
 632					 mask, info, &timer_sigq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634		if (unlikely(signr == SIGALRM))
 635			posixtimer_rearm_itimer(tsk);
 
 
 
 
 
 
 636	}
 637
 638	recalc_sigpending();
 639	if (!signr)
 640		return 0;
 641
 642	if (unlikely(sig_kernel_stop(signr))) {
 643		/*
 644		 * Set a marker that we have dequeued a stop signal.  Our
 645		 * caller might release the siglock and then the pending
 646		 * stop signal it is about to process is no longer in the
 647		 * pending bitmasks, but must still be cleared by a SIGCONT
 648		 * (and overruled by a SIGKILL).  So those cases clear this
 649		 * shared flag after we've set it.  Note that this flag may
 650		 * remain set after the signal we return is ignored or
 651		 * handled.  That doesn't matter because its only purpose
 652		 * is to alert stop-signal processing code when another
 653		 * processor has come along and cleared the flag.
 654		 */
 655		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 656	}
 
 
 
 
 
 
 
 
 
 
 
 657
 658	if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) {
 659		if (!posixtimer_deliver_signal(info, timer_sigq))
 660			goto again;
 661	}
 662
 663	return signr;
 664}
 665EXPORT_SYMBOL_GPL(dequeue_signal);
 666
 667static int dequeue_synchronous_signal(kernel_siginfo_t *info)
 668{
 669	struct task_struct *tsk = current;
 670	struct sigpending *pending = &tsk->pending;
 671	struct sigqueue *q, *sync = NULL;
 672
 673	/*
 674	 * Might a synchronous signal be in the queue?
 675	 */
 676	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
 677		return 0;
 678
 679	/*
 680	 * Return the first synchronous signal in the queue.
 681	 */
 682	list_for_each_entry(q, &pending->list, list) {
 683		/* Synchronous signals have a positive si_code */
 684		if ((q->info.si_code > SI_USER) &&
 685		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
 686			sync = q;
 687			goto next;
 688		}
 689	}
 690	return 0;
 691next:
 692	/*
 693	 * Check if there is another siginfo for the same signal.
 694	 */
 695	list_for_each_entry_continue(q, &pending->list, list) {
 696		if (q->info.si_signo == sync->info.si_signo)
 697			goto still_pending;
 698	}
 699
 700	sigdelset(&pending->signal, sync->info.si_signo);
 701	recalc_sigpending();
 702still_pending:
 703	list_del_init(&sync->list);
 704	copy_siginfo(info, &sync->info);
 705	__sigqueue_free(sync);
 706	return info->si_signo;
 707}
 708
 709/*
 710 * Tell a process that it has a new active signal..
 711 *
 712 * NOTE! we rely on the previous spin_lock to
 713 * lock interrupts for us! We can only be called with
 714 * "siglock" held, and the local interrupt must
 715 * have been disabled when that got acquired!
 716 *
 717 * No need to set need_resched since signal event passing
 718 * goes through ->blocked
 719 */
 720void signal_wake_up_state(struct task_struct *t, unsigned int state)
 721{
 722	lockdep_assert_held(&t->sighand->siglock);
 723
 724	set_tsk_thread_flag(t, TIF_SIGPENDING);
 725
 726	/*
 727	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 728	 * case. We don't check t->state here because there is a race with it
 729	 * executing another processor and just now entering stopped state.
 730	 * By using wake_up_state, we ensure the process will wake up and
 731	 * handle its death signal.
 732	 */
 733	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 734		kick_process(t);
 735}
 736
 737static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
 738
 739static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
 740{
 741	if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
 742		__sigqueue_free(q);
 743	else
 744		posixtimer_sig_ignore(tsk, q);
 745}
 746
 747/* Remove signals in mask from the pending set and queue. */
 748static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
 749{
 750	struct sigqueue *q, *n;
 751	sigset_t m;
 752
 753	lockdep_assert_held(&p->sighand->siglock);
 754
 755	sigandsets(&m, mask, &s->signal);
 756	if (sigisemptyset(&m))
 757		return;
 758
 759	sigandnsets(&s->signal, &s->signal, mask);
 760	list_for_each_entry_safe(q, n, &s->list, list) {
 761		if (sigismember(mask, q->info.si_signo)) {
 762			list_del_init(&q->list);
 763			sigqueue_free_ignored(p, q);
 764		}
 765	}
 766}
 767
 768static inline int is_si_special(const struct kernel_siginfo *info)
 769{
 770	return info <= SEND_SIG_PRIV;
 771}
 772
 773static inline bool si_fromuser(const struct kernel_siginfo *info)
 774{
 775	return info == SEND_SIG_NOINFO ||
 776		(!is_si_special(info) && SI_FROMUSER(info));
 777}
 778
 779/*
 780 * called with RCU read lock from check_kill_permission()
 781 */
 782static bool kill_ok_by_cred(struct task_struct *t)
 783{
 784	const struct cred *cred = current_cred();
 785	const struct cred *tcred = __task_cred(t);
 786
 787	return uid_eq(cred->euid, tcred->suid) ||
 788	       uid_eq(cred->euid, tcred->uid) ||
 789	       uid_eq(cred->uid, tcred->suid) ||
 790	       uid_eq(cred->uid, tcred->uid) ||
 791	       ns_capable(tcred->user_ns, CAP_KILL);
 792}
 793
 794/*
 795 * Bad permissions for sending the signal
 796 * - the caller must hold the RCU read lock
 797 */
 798static int check_kill_permission(int sig, struct kernel_siginfo *info,
 799				 struct task_struct *t)
 800{
 801	struct pid *sid;
 802	int error;
 803
 804	if (!valid_signal(sig))
 805		return -EINVAL;
 806
 807	if (!si_fromuser(info))
 808		return 0;
 809
 810	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 811	if (error)
 812		return error;
 813
 814	if (!same_thread_group(current, t) &&
 815	    !kill_ok_by_cred(t)) {
 816		switch (sig) {
 817		case SIGCONT:
 818			sid = task_session(t);
 819			/*
 820			 * We don't return the error if sid == NULL. The
 821			 * task was unhashed, the caller must notice this.
 822			 */
 823			if (!sid || sid == task_session(current))
 824				break;
 825			fallthrough;
 826		default:
 827			return -EPERM;
 828		}
 829	}
 830
 831	return security_task_kill(t, info, sig, NULL);
 832}
 833
 834/**
 835 * ptrace_trap_notify - schedule trap to notify ptracer
 836 * @t: tracee wanting to notify tracer
 837 *
 838 * This function schedules sticky ptrace trap which is cleared on the next
 839 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 840 * ptracer.
 841 *
 842 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 843 * ptracer is listening for events, tracee is woken up so that it can
 844 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 845 * eventually taken without returning to userland after the existing traps
 846 * are finished by PTRACE_CONT.
 847 *
 848 * CONTEXT:
 849 * Must be called with @task->sighand->siglock held.
 850 */
 851static void ptrace_trap_notify(struct task_struct *t)
 852{
 853	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 854	lockdep_assert_held(&t->sighand->siglock);
 855
 856	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 857	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 858}
 859
 860/*
 861 * Handle magic process-wide effects of stop/continue signals. Unlike
 862 * the signal actions, these happen immediately at signal-generation
 863 * time regardless of blocking, ignoring, or handling.  This does the
 864 * actual continuing for SIGCONT, but not the actual stopping for stop
 865 * signals. The process stop is done as a signal action for SIG_DFL.
 866 *
 867 * Returns true if the signal should be actually delivered, otherwise
 868 * it should be dropped.
 869 */
 870static bool prepare_signal(int sig, struct task_struct *p, bool force)
 871{
 872	struct signal_struct *signal = p->signal;
 873	struct task_struct *t;
 874	sigset_t flush;
 875
 876	if (signal->flags & SIGNAL_GROUP_EXIT) {
 877		if (signal->core_state)
 878			return sig == SIGKILL;
 879		/*
 880		 * The process is in the middle of dying, drop the signal.
 881		 */
 882		return false;
 883	} else if (sig_kernel_stop(sig)) {
 884		/*
 885		 * This is a stop signal.  Remove SIGCONT from all queues.
 886		 */
 887		siginitset(&flush, sigmask(SIGCONT));
 888		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
 889		for_each_thread(p, t)
 890			flush_sigqueue_mask(p, &flush, &t->pending);
 891	} else if (sig == SIGCONT) {
 892		unsigned int why;
 893		/*
 894		 * Remove all stop signals from all queues, wake all threads.
 895		 */
 896		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 897		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
 898		for_each_thread(p, t) {
 899			flush_sigqueue_mask(p, &flush, &t->pending);
 900			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 901			if (likely(!(t->ptrace & PT_SEIZED))) {
 902				t->jobctl &= ~JOBCTL_STOPPED;
 903				wake_up_state(t, __TASK_STOPPED);
 904			} else
 905				ptrace_trap_notify(t);
 906		}
 907
 908		/*
 909		 * Notify the parent with CLD_CONTINUED if we were stopped.
 910		 *
 911		 * If we were in the middle of a group stop, we pretend it
 912		 * was already finished, and then continued. Since SIGCHLD
 913		 * doesn't queue we report only CLD_STOPPED, as if the next
 914		 * CLD_CONTINUED was dropped.
 915		 */
 916		why = 0;
 917		if (signal->flags & SIGNAL_STOP_STOPPED)
 918			why |= SIGNAL_CLD_CONTINUED;
 919		else if (signal->group_stop_count)
 920			why |= SIGNAL_CLD_STOPPED;
 921
 922		if (why) {
 923			/*
 924			 * The first thread which returns from do_signal_stop()
 925			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 926			 * notify its parent. See get_signal().
 927			 */
 928			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 929			signal->group_stop_count = 0;
 930			signal->group_exit_code = 0;
 931		}
 932	}
 933
 934	return !sig_ignored(p, sig, force);
 935}
 936
 937/*
 938 * Test if P wants to take SIG.  After we've checked all threads with this,
 939 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 940 * blocking SIG were ruled out because they are not running and already
 941 * have pending signals.  Such threads will dequeue from the shared queue
 942 * as soon as they're available, so putting the signal on the shared queue
 943 * will be equivalent to sending it to one such thread.
 944 */
 945static inline bool wants_signal(int sig, struct task_struct *p)
 946{
 947	if (sigismember(&p->blocked, sig))
 948		return false;
 949
 950	if (p->flags & PF_EXITING)
 951		return false;
 952
 953	if (sig == SIGKILL)
 954		return true;
 955
 956	if (task_is_stopped_or_traced(p))
 957		return false;
 958
 959	return task_curr(p) || !task_sigpending(p);
 960}
 961
 962static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 963{
 964	struct signal_struct *signal = p->signal;
 965	struct task_struct *t;
 966
 967	/*
 968	 * Now find a thread we can wake up to take the signal off the queue.
 969	 *
 970	 * Try the suggested task first (may or may not be the main thread).
 
 971	 */
 972	if (wants_signal(sig, p))
 973		t = p;
 974	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
 975		/*
 976		 * There is just one thread and it does not need to be woken.
 977		 * It will dequeue unblocked signals before it runs again.
 978		 */
 979		return;
 980	else {
 981		/*
 982		 * Otherwise try to find a suitable thread.
 983		 */
 984		t = signal->curr_target;
 985		while (!wants_signal(sig, t)) {
 986			t = next_thread(t);
 987			if (t == signal->curr_target)
 988				/*
 989				 * No thread needs to be woken.
 990				 * Any eligible threads will see
 991				 * the signal in the queue soon.
 992				 */
 993				return;
 994		}
 995		signal->curr_target = t;
 996	}
 997
 998	/*
 999	 * Found a killable thread.  If the signal will be fatal,
1000	 * then start taking the whole group down immediately.
1001	 */
1002	if (sig_fatal(p, sig) &&
1003	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1004	    !sigismember(&t->real_blocked, sig) &&
1005	    (sig == SIGKILL || !p->ptrace)) {
1006		/*
1007		 * This signal will be fatal to the whole group.
1008		 */
1009		if (!sig_kernel_coredump(sig)) {
1010			/*
1011			 * Start a group exit and wake everybody up.
1012			 * This way we don't have other threads
1013			 * running and doing things after a slower
1014			 * thread has the fatal signal pending.
1015			 */
1016			signal->flags = SIGNAL_GROUP_EXIT;
1017			signal->group_exit_code = sig;
1018			signal->group_stop_count = 0;
1019			__for_each_thread(signal, t) {
 
1020				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1021				sigaddset(&t->pending.signal, SIGKILL);
1022				signal_wake_up(t, 1);
1023			}
1024			return;
1025		}
1026	}
1027
1028	/*
1029	 * The signal is already in the shared-pending queue.
1030	 * Tell the chosen thread to wake up and dequeue it.
1031	 */
1032	signal_wake_up(t, sig == SIGKILL);
1033	return;
1034}
1035
1036static inline bool legacy_queue(struct sigpending *signals, int sig)
1037{
1038	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1039}
1040
1041static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1042				struct task_struct *t, enum pid_type type, bool force)
1043{
1044	struct sigpending *pending;
1045	struct sigqueue *q;
1046	int override_rlimit;
1047	int ret = 0, result;
1048
1049	lockdep_assert_held(&t->sighand->siglock);
1050
1051	result = TRACE_SIGNAL_IGNORED;
1052	if (!prepare_signal(sig, t, force))
1053		goto ret;
1054
1055	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1056	/*
1057	 * Short-circuit ignored signals and support queuing
1058	 * exactly one non-rt signal, so that we can get more
1059	 * detailed information about the cause of the signal.
1060	 */
1061	result = TRACE_SIGNAL_ALREADY_PENDING;
1062	if (legacy_queue(pending, sig))
1063		goto ret;
1064
1065	result = TRACE_SIGNAL_DELIVERED;
1066	/*
1067	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1068	 */
1069	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1070		goto out_set;
1071
1072	/*
1073	 * Real-time signals must be queued if sent by sigqueue, or
1074	 * some other real-time mechanism.  It is implementation
1075	 * defined whether kill() does so.  We attempt to do so, on
1076	 * the principle of least surprise, but since kill is not
1077	 * allowed to fail with EAGAIN when low on memory we just
1078	 * make sure at least one signal gets delivered and don't
1079	 * pass on the info struct.
1080	 */
1081	if (sig < SIGRTMIN)
1082		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1083	else
1084		override_rlimit = 0;
1085
1086	q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1087
1088	if (q) {
1089		list_add_tail(&q->list, &pending->list);
1090		switch ((unsigned long) info) {
1091		case (unsigned long) SEND_SIG_NOINFO:
1092			clear_siginfo(&q->info);
1093			q->info.si_signo = sig;
1094			q->info.si_errno = 0;
1095			q->info.si_code = SI_USER;
1096			q->info.si_pid = task_tgid_nr_ns(current,
1097							task_active_pid_ns(t));
1098			rcu_read_lock();
1099			q->info.si_uid =
1100				from_kuid_munged(task_cred_xxx(t, user_ns),
1101						 current_uid());
1102			rcu_read_unlock();
1103			break;
1104		case (unsigned long) SEND_SIG_PRIV:
1105			clear_siginfo(&q->info);
1106			q->info.si_signo = sig;
1107			q->info.si_errno = 0;
1108			q->info.si_code = SI_KERNEL;
1109			q->info.si_pid = 0;
1110			q->info.si_uid = 0;
1111			break;
1112		default:
1113			copy_siginfo(&q->info, info);
1114			break;
1115		}
1116	} else if (!is_si_special(info) &&
1117		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1118		/*
1119		 * Queue overflow, abort.  We may abort if the
1120		 * signal was rt and sent by user using something
1121		 * other than kill().
1122		 */
1123		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1124		ret = -EAGAIN;
1125		goto ret;
1126	} else {
1127		/*
1128		 * This is a silent loss of information.  We still
1129		 * send the signal, but the *info bits are lost.
1130		 */
1131		result = TRACE_SIGNAL_LOSE_INFO;
1132	}
1133
1134out_set:
1135	signalfd_notify(t, sig);
1136	sigaddset(&pending->signal, sig);
1137
1138	/* Let multiprocess signals appear after on-going forks */
1139	if (type > PIDTYPE_TGID) {
1140		struct multiprocess_signals *delayed;
1141		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1142			sigset_t *signal = &delayed->signal;
1143			/* Can't queue both a stop and a continue signal */
1144			if (sig == SIGCONT)
1145				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1146			else if (sig_kernel_stop(sig))
1147				sigdelset(signal, SIGCONT);
1148			sigaddset(signal, sig);
1149		}
1150	}
1151
1152	complete_signal(sig, t, type);
1153ret:
1154	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1155	return ret;
1156}
1157
1158static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1159{
1160	bool ret = false;
1161	switch (siginfo_layout(info->si_signo, info->si_code)) {
1162	case SIL_KILL:
1163	case SIL_CHLD:
1164	case SIL_RT:
1165		ret = true;
1166		break;
1167	case SIL_TIMER:
1168	case SIL_POLL:
1169	case SIL_FAULT:
1170	case SIL_FAULT_TRAPNO:
1171	case SIL_FAULT_MCEERR:
1172	case SIL_FAULT_BNDERR:
1173	case SIL_FAULT_PKUERR:
1174	case SIL_FAULT_PERF_EVENT:
1175	case SIL_SYS:
1176		ret = false;
1177		break;
1178	}
1179	return ret;
1180}
1181
1182int send_signal_locked(int sig, struct kernel_siginfo *info,
1183		       struct task_struct *t, enum pid_type type)
1184{
1185	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1186	bool force = false;
1187
1188	if (info == SEND_SIG_NOINFO) {
1189		/* Force if sent from an ancestor pid namespace */
1190		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1191	} else if (info == SEND_SIG_PRIV) {
1192		/* Don't ignore kernel generated signals */
1193		force = true;
1194	} else if (has_si_pid_and_uid(info)) {
1195		/* SIGKILL and SIGSTOP is special or has ids */
1196		struct user_namespace *t_user_ns;
1197
1198		rcu_read_lock();
1199		t_user_ns = task_cred_xxx(t, user_ns);
1200		if (current_user_ns() != t_user_ns) {
1201			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1202			info->si_uid = from_kuid_munged(t_user_ns, uid);
1203		}
1204		rcu_read_unlock();
1205
1206		/* A kernel generated signal? */
1207		force = (info->si_code == SI_KERNEL);
1208
1209		/* From an ancestor pid namespace? */
1210		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1211			info->si_pid = 0;
1212			force = true;
1213		}
1214	}
1215	return __send_signal_locked(sig, info, t, type, force);
1216}
1217
1218static void print_fatal_signal(int signr)
1219{
1220	struct pt_regs *regs = task_pt_regs(current);
1221	struct file *exe_file;
1222
1223	exe_file = get_task_exe_file(current);
1224	if (exe_file) {
1225		pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1226			exe_file, current->comm, signr);
1227		fput(exe_file);
1228	} else {
1229		pr_info("%s: potentially unexpected fatal signal %d.\n",
1230			current->comm, signr);
1231	}
1232
1233#if defined(__i386__) && !defined(__arch_um__)
1234	pr_info("code at %08lx: ", regs->ip);
1235	{
1236		int i;
1237		for (i = 0; i < 16; i++) {
1238			unsigned char insn;
1239
1240			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1241				break;
1242			pr_cont("%02x ", insn);
1243		}
1244	}
1245	pr_cont("\n");
1246#endif
1247	preempt_disable();
1248	show_regs(regs);
1249	preempt_enable();
1250}
1251
1252static int __init setup_print_fatal_signals(char *str)
1253{
1254	get_option (&str, &print_fatal_signals);
1255
1256	return 1;
1257}
1258
1259__setup("print-fatal-signals=", setup_print_fatal_signals);
1260
1261int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1262			enum pid_type type)
1263{
1264	unsigned long flags;
1265	int ret = -ESRCH;
1266
1267	if (lock_task_sighand(p, &flags)) {
1268		ret = send_signal_locked(sig, info, p, type);
1269		unlock_task_sighand(p, &flags);
1270	}
1271
1272	return ret;
1273}
1274
1275enum sig_handler {
1276	HANDLER_CURRENT, /* If reachable use the current handler */
1277	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1278	HANDLER_EXIT,	 /* Only visible as the process exit code */
1279};
1280
1281/*
1282 * Force a signal that the process can't ignore: if necessary
1283 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1284 *
1285 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1286 * since we do not want to have a signal handler that was blocked
1287 * be invoked when user space had explicitly blocked it.
1288 *
1289 * We don't want to have recursive SIGSEGV's etc, for example,
1290 * that is why we also clear SIGNAL_UNKILLABLE.
1291 */
1292static int
1293force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1294	enum sig_handler handler)
1295{
1296	unsigned long int flags;
1297	int ret, blocked, ignored;
1298	struct k_sigaction *action;
1299	int sig = info->si_signo;
1300
1301	spin_lock_irqsave(&t->sighand->siglock, flags);
1302	action = &t->sighand->action[sig-1];
1303	ignored = action->sa.sa_handler == SIG_IGN;
1304	blocked = sigismember(&t->blocked, sig);
1305	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1306		action->sa.sa_handler = SIG_DFL;
1307		if (handler == HANDLER_EXIT)
1308			action->sa.sa_flags |= SA_IMMUTABLE;
1309		if (blocked)
1310			sigdelset(&t->blocked, sig);
 
 
1311	}
1312	/*
1313	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1314	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1315	 */
1316	if (action->sa.sa_handler == SIG_DFL &&
1317	    (!t->ptrace || (handler == HANDLER_EXIT)))
1318		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1319	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1320	/* This can happen if the signal was already pending and blocked */
1321	if (!task_sigpending(t))
1322		signal_wake_up(t, 0);
1323	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1324
1325	return ret;
1326}
1327
1328int force_sig_info(struct kernel_siginfo *info)
1329{
1330	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1331}
1332
1333/*
1334 * Nuke all other threads in the group.
1335 */
1336int zap_other_threads(struct task_struct *p)
1337{
1338	struct task_struct *t;
1339	int count = 0;
1340
1341	p->signal->group_stop_count = 0;
1342
1343	for_other_threads(p, t) {
1344		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1345		count++;
1346
1347		/* Don't bother with already dead threads */
1348		if (t->exit_state)
1349			continue;
1350		sigaddset(&t->pending.signal, SIGKILL);
1351		signal_wake_up(t, 1);
1352	}
1353
1354	return count;
1355}
1356
1357struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1358					   unsigned long *flags)
1359{
1360	struct sighand_struct *sighand;
1361
1362	rcu_read_lock();
1363	for (;;) {
1364		sighand = rcu_dereference(tsk->sighand);
1365		if (unlikely(sighand == NULL))
1366			break;
1367
1368		/*
1369		 * This sighand can be already freed and even reused, but
1370		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1371		 * initializes ->siglock: this slab can't go away, it has
1372		 * the same object type, ->siglock can't be reinitialized.
1373		 *
1374		 * We need to ensure that tsk->sighand is still the same
1375		 * after we take the lock, we can race with de_thread() or
1376		 * __exit_signal(). In the latter case the next iteration
1377		 * must see ->sighand == NULL.
1378		 */
1379		spin_lock_irqsave(&sighand->siglock, *flags);
1380		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1381			break;
1382		spin_unlock_irqrestore(&sighand->siglock, *flags);
1383	}
1384	rcu_read_unlock();
1385
1386	return sighand;
1387}
1388
1389#ifdef CONFIG_LOCKDEP
1390void lockdep_assert_task_sighand_held(struct task_struct *task)
1391{
1392	struct sighand_struct *sighand;
1393
1394	rcu_read_lock();
1395	sighand = rcu_dereference(task->sighand);
1396	if (sighand)
1397		lockdep_assert_held(&sighand->siglock);
1398	else
1399		WARN_ON_ONCE(1);
1400	rcu_read_unlock();
1401}
1402#endif
1403
1404/*
1405 * send signal info to all the members of a thread group or to the
1406 * individual thread if type == PIDTYPE_PID.
1407 */
1408int group_send_sig_info(int sig, struct kernel_siginfo *info,
1409			struct task_struct *p, enum pid_type type)
1410{
1411	int ret;
1412
1413	rcu_read_lock();
1414	ret = check_kill_permission(sig, info, p);
1415	rcu_read_unlock();
1416
1417	if (!ret && sig)
1418		ret = do_send_sig_info(sig, info, p, type);
1419
1420	return ret;
1421}
1422
1423/*
1424 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1425 * control characters do (^C, ^Z etc)
1426 * - the caller must hold at least a readlock on tasklist_lock
1427 */
1428int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1429{
1430	struct task_struct *p = NULL;
1431	int ret = -ESRCH;
1432
 
 
1433	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1434		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1435		/*
1436		 * If group_send_sig_info() succeeds at least once ret
1437		 * becomes 0 and after that the code below has no effect.
1438		 * Otherwise we return the last err or -ESRCH if this
1439		 * process group is empty.
1440		 */
1441		if (ret)
1442			ret = err;
1443	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1444
1445	return ret;
1446}
1447
1448static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
1449				struct pid *pid, enum pid_type type)
1450{
1451	int error = -ESRCH;
1452	struct task_struct *p;
1453
1454	for (;;) {
1455		rcu_read_lock();
1456		p = pid_task(pid, PIDTYPE_PID);
1457		if (p)
1458			error = group_send_sig_info(sig, info, p, type);
1459		rcu_read_unlock();
1460		if (likely(!p || error != -ESRCH))
1461			return error;
 
1462		/*
1463		 * The task was unhashed in between, try again.  If it
1464		 * is dead, pid_task() will return NULL, if we race with
1465		 * de_thread() it will find the new leader.
1466		 */
1467	}
1468}
1469
1470int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1471{
1472	return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
1473}
1474
1475static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1476{
1477	int error;
1478	rcu_read_lock();
1479	error = kill_pid_info(sig, info, find_vpid(pid));
1480	rcu_read_unlock();
1481	return error;
1482}
1483
1484static inline bool kill_as_cred_perm(const struct cred *cred,
1485				     struct task_struct *target)
1486{
1487	const struct cred *pcred = __task_cred(target);
1488
1489	return uid_eq(cred->euid, pcred->suid) ||
1490	       uid_eq(cred->euid, pcred->uid) ||
1491	       uid_eq(cred->uid, pcred->suid) ||
1492	       uid_eq(cred->uid, pcred->uid);
1493}
1494
1495/*
1496 * The usb asyncio usage of siginfo is wrong.  The glibc support
1497 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1498 * AKA after the generic fields:
1499 *	kernel_pid_t	si_pid;
1500 *	kernel_uid32_t	si_uid;
1501 *	sigval_t	si_value;
1502 *
1503 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1504 * after the generic fields is:
1505 *	void __user 	*si_addr;
1506 *
1507 * This is a practical problem when there is a 64bit big endian kernel
1508 * and a 32bit userspace.  As the 32bit address will encoded in the low
1509 * 32bits of the pointer.  Those low 32bits will be stored at higher
1510 * address than appear in a 32 bit pointer.  So userspace will not
1511 * see the address it was expecting for it's completions.
1512 *
1513 * There is nothing in the encoding that can allow
1514 * copy_siginfo_to_user32 to detect this confusion of formats, so
1515 * handle this by requiring the caller of kill_pid_usb_asyncio to
1516 * notice when this situration takes place and to store the 32bit
1517 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1518 * parameter.
1519 */
1520int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1521			 struct pid *pid, const struct cred *cred)
1522{
1523	struct kernel_siginfo info;
1524	struct task_struct *p;
1525	unsigned long flags;
1526	int ret = -EINVAL;
1527
1528	if (!valid_signal(sig))
1529		return ret;
1530
1531	clear_siginfo(&info);
1532	info.si_signo = sig;
1533	info.si_errno = errno;
1534	info.si_code = SI_ASYNCIO;
1535	*((sigval_t *)&info.si_pid) = addr;
1536
1537	rcu_read_lock();
1538	p = pid_task(pid, PIDTYPE_PID);
1539	if (!p) {
1540		ret = -ESRCH;
1541		goto out_unlock;
1542	}
1543	if (!kill_as_cred_perm(cred, p)) {
1544		ret = -EPERM;
1545		goto out_unlock;
1546	}
1547	ret = security_task_kill(p, &info, sig, cred);
1548	if (ret)
1549		goto out_unlock;
1550
1551	if (sig) {
1552		if (lock_task_sighand(p, &flags)) {
1553			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1554			unlock_task_sighand(p, &flags);
1555		} else
1556			ret = -ESRCH;
1557	}
1558out_unlock:
1559	rcu_read_unlock();
1560	return ret;
1561}
1562EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1563
1564/*
1565 * kill_something_info() interprets pid in interesting ways just like kill(2).
1566 *
1567 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1568 * is probably wrong.  Should make it like BSD or SYSV.
1569 */
1570
1571static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1572{
1573	int ret;
1574
1575	if (pid > 0)
1576		return kill_proc_info(sig, info, pid);
1577
1578	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1579	if (pid == INT_MIN)
1580		return -ESRCH;
1581
1582	read_lock(&tasklist_lock);
1583	if (pid != -1) {
1584		ret = __kill_pgrp_info(sig, info,
1585				pid ? find_vpid(-pid) : task_pgrp(current));
1586	} else {
1587		int retval = 0, count = 0;
1588		struct task_struct * p;
1589
1590		for_each_process(p) {
1591			if (task_pid_vnr(p) > 1 &&
1592					!same_thread_group(p, current)) {
1593				int err = group_send_sig_info(sig, info, p,
1594							      PIDTYPE_MAX);
1595				++count;
1596				if (err != -EPERM)
1597					retval = err;
1598			}
1599		}
1600		ret = count ? retval : -ESRCH;
1601	}
1602	read_unlock(&tasklist_lock);
1603
1604	return ret;
1605}
1606
1607/*
1608 * These are for backward compatibility with the rest of the kernel source.
1609 */
1610
1611int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1612{
1613	/*
1614	 * Make sure legacy kernel users don't send in bad values
1615	 * (normal paths check this in check_kill_permission).
1616	 */
1617	if (!valid_signal(sig))
1618		return -EINVAL;
1619
1620	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1621}
1622EXPORT_SYMBOL(send_sig_info);
1623
1624#define __si_special(priv) \
1625	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1626
1627int
1628send_sig(int sig, struct task_struct *p, int priv)
1629{
1630	return send_sig_info(sig, __si_special(priv), p);
1631}
1632EXPORT_SYMBOL(send_sig);
1633
1634void force_sig(int sig)
1635{
1636	struct kernel_siginfo info;
1637
1638	clear_siginfo(&info);
1639	info.si_signo = sig;
1640	info.si_errno = 0;
1641	info.si_code = SI_KERNEL;
1642	info.si_pid = 0;
1643	info.si_uid = 0;
1644	force_sig_info(&info);
1645}
1646EXPORT_SYMBOL(force_sig);
1647
1648void force_fatal_sig(int sig)
1649{
1650	struct kernel_siginfo info;
1651
1652	clear_siginfo(&info);
1653	info.si_signo = sig;
1654	info.si_errno = 0;
1655	info.si_code = SI_KERNEL;
1656	info.si_pid = 0;
1657	info.si_uid = 0;
1658	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1659}
1660
1661void force_exit_sig(int sig)
1662{
1663	struct kernel_siginfo info;
1664
1665	clear_siginfo(&info);
1666	info.si_signo = sig;
1667	info.si_errno = 0;
1668	info.si_code = SI_KERNEL;
1669	info.si_pid = 0;
1670	info.si_uid = 0;
1671	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1672}
1673
1674/*
1675 * When things go south during signal handling, we
1676 * will force a SIGSEGV. And if the signal that caused
1677 * the problem was already a SIGSEGV, we'll want to
1678 * make sure we don't even try to deliver the signal..
1679 */
1680void force_sigsegv(int sig)
1681{
1682	if (sig == SIGSEGV)
1683		force_fatal_sig(SIGSEGV);
1684	else
1685		force_sig(SIGSEGV);
1686}
1687
1688int force_sig_fault_to_task(int sig, int code, void __user *addr,
1689			    struct task_struct *t)
 
1690{
1691	struct kernel_siginfo info;
1692
1693	clear_siginfo(&info);
1694	info.si_signo = sig;
1695	info.si_errno = 0;
1696	info.si_code  = code;
1697	info.si_addr  = addr;
 
 
 
 
 
1698	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1699}
1700
1701int force_sig_fault(int sig, int code, void __user *addr)
 
1702{
1703	return force_sig_fault_to_task(sig, code, addr, current);
 
1704}
1705
1706int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
 
 
1707{
1708	struct kernel_siginfo info;
1709
1710	clear_siginfo(&info);
1711	info.si_signo = sig;
1712	info.si_errno = 0;
1713	info.si_code  = code;
1714	info.si_addr  = addr;
 
 
 
 
 
1715	return send_sig_info(info.si_signo, &info, t);
1716}
1717
1718int force_sig_mceerr(int code, void __user *addr, short lsb)
1719{
1720	struct kernel_siginfo info;
1721
1722	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1723	clear_siginfo(&info);
1724	info.si_signo = SIGBUS;
1725	info.si_errno = 0;
1726	info.si_code = code;
1727	info.si_addr = addr;
1728	info.si_addr_lsb = lsb;
1729	return force_sig_info(&info);
1730}
1731
1732int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1733{
1734	struct kernel_siginfo info;
1735
1736	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1737	clear_siginfo(&info);
1738	info.si_signo = SIGBUS;
1739	info.si_errno = 0;
1740	info.si_code = code;
1741	info.si_addr = addr;
1742	info.si_addr_lsb = lsb;
1743	return send_sig_info(info.si_signo, &info, t);
1744}
1745EXPORT_SYMBOL(send_sig_mceerr);
1746
1747int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1748{
1749	struct kernel_siginfo info;
1750
1751	clear_siginfo(&info);
1752	info.si_signo = SIGSEGV;
1753	info.si_errno = 0;
1754	info.si_code  = SEGV_BNDERR;
1755	info.si_addr  = addr;
1756	info.si_lower = lower;
1757	info.si_upper = upper;
1758	return force_sig_info(&info);
1759}
1760
1761#ifdef SEGV_PKUERR
1762int force_sig_pkuerr(void __user *addr, u32 pkey)
1763{
1764	struct kernel_siginfo info;
1765
1766	clear_siginfo(&info);
1767	info.si_signo = SIGSEGV;
1768	info.si_errno = 0;
1769	info.si_code  = SEGV_PKUERR;
1770	info.si_addr  = addr;
1771	info.si_pkey  = pkey;
1772	return force_sig_info(&info);
1773}
1774#endif
1775
1776int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1777{
1778	struct kernel_siginfo info;
1779
1780	clear_siginfo(&info);
1781	info.si_signo     = SIGTRAP;
1782	info.si_errno     = 0;
1783	info.si_code      = TRAP_PERF;
1784	info.si_addr      = addr;
1785	info.si_perf_data = sig_data;
1786	info.si_perf_type = type;
1787
1788	/*
1789	 * Signals generated by perf events should not terminate the whole
1790	 * process if SIGTRAP is blocked, however, delivering the signal
1791	 * asynchronously is better than not delivering at all. But tell user
1792	 * space if the signal was asynchronous, so it can clearly be
1793	 * distinguished from normal synchronous ones.
1794	 */
1795	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1796				     TRAP_PERF_FLAG_ASYNC :
1797				     0;
1798
1799	return send_sig_info(info.si_signo, &info, current);
1800}
1801
1802/**
1803 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1804 * @syscall: syscall number to send to userland
1805 * @reason: filter-supplied reason code to send to userland (via si_errno)
1806 * @force_coredump: true to trigger a coredump
1807 *
1808 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1809 */
1810int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1811{
1812	struct kernel_siginfo info;
1813
1814	clear_siginfo(&info);
1815	info.si_signo = SIGSYS;
1816	info.si_code = SYS_SECCOMP;
1817	info.si_call_addr = (void __user *)KSTK_EIP(current);
1818	info.si_errno = reason;
1819	info.si_arch = syscall_get_arch(current);
1820	info.si_syscall = syscall;
1821	return force_sig_info_to_task(&info, current,
1822		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1823}
1824
1825/* For the crazy architectures that include trap information in
1826 * the errno field, instead of an actual errno value.
1827 */
1828int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1829{
1830	struct kernel_siginfo info;
1831
1832	clear_siginfo(&info);
1833	info.si_signo = SIGTRAP;
1834	info.si_errno = errno;
1835	info.si_code  = TRAP_HWBKPT;
1836	info.si_addr  = addr;
1837	return force_sig_info(&info);
1838}
1839
1840/* For the rare architectures that include trap information using
1841 * si_trapno.
1842 */
1843int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1844{
1845	struct kernel_siginfo info;
1846
1847	clear_siginfo(&info);
1848	info.si_signo = sig;
1849	info.si_errno = 0;
1850	info.si_code  = code;
1851	info.si_addr  = addr;
1852	info.si_trapno = trapno;
1853	return force_sig_info(&info);
1854}
1855
1856/* For the rare architectures that include trap information using
1857 * si_trapno.
1858 */
1859int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1860			  struct task_struct *t)
1861{
1862	struct kernel_siginfo info;
1863
1864	clear_siginfo(&info);
1865	info.si_signo = sig;
1866	info.si_errno = 0;
1867	info.si_code  = code;
1868	info.si_addr  = addr;
1869	info.si_trapno = trapno;
1870	return send_sig_info(info.si_signo, &info, t);
1871}
1872
1873static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1874{
1875	int ret;
 
1876	read_lock(&tasklist_lock);
1877	ret = __kill_pgrp_info(sig, info, pgrp);
1878	read_unlock(&tasklist_lock);
 
1879	return ret;
1880}
1881
1882int kill_pgrp(struct pid *pid, int sig, int priv)
1883{
1884	return kill_pgrp_info(sig, __si_special(priv), pid);
1885}
1886EXPORT_SYMBOL(kill_pgrp);
1887
1888int kill_pid(struct pid *pid, int sig, int priv)
1889{
1890	return kill_pid_info(sig, __si_special(priv), pid);
1891}
1892EXPORT_SYMBOL(kill_pid);
1893
1894#ifdef CONFIG_POSIX_TIMERS
1895/*
1896 * These functions handle POSIX timer signals. POSIX timers use
1897 * preallocated sigqueue structs for sending signals.
 
 
 
 
 
1898 */
1899static void __flush_itimer_signals(struct sigpending *pending)
1900{
1901	sigset_t signal, retain;
1902	struct sigqueue *q, *n;
1903
1904	signal = pending->signal;
1905	sigemptyset(&retain);
1906
1907	list_for_each_entry_safe(q, n, &pending->list, list) {
1908		int sig = q->info.si_signo;
1909
1910		if (likely(q->info.si_code != SI_TIMER)) {
1911			sigaddset(&retain, sig);
1912		} else {
1913			sigdelset(&signal, sig);
1914			list_del_init(&q->list);
1915			__sigqueue_free(q);
1916		}
1917	}
1918
1919	sigorsets(&pending->signal, &signal, &retain);
1920}
1921
1922void flush_itimer_signals(void)
1923{
1924	struct task_struct *tsk = current;
 
1925
1926	guard(spinlock_irqsave)(&tsk->sighand->siglock);
1927	__flush_itimer_signals(&tsk->pending);
1928	__flush_itimer_signals(&tsk->signal->shared_pending);
1929}
 
 
 
 
 
 
 
 
 
 
 
1930
1931bool posixtimer_init_sigqueue(struct sigqueue *q)
1932{
1933	struct ucounts *ucounts = sig_get_ucounts(current, -1, 0);
1934
1935	if (!ucounts)
1936		return false;
1937	clear_siginfo(&q->info);
1938	__sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
1939	return true;
1940}
1941
1942static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
1943{
 
1944	struct sigpending *pending;
1945	int sig = q->info.si_signo;
1946
1947	signalfd_notify(t, sig);
1948	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1949	list_add_tail(&q->list, &pending->list);
1950	sigaddset(&pending->signal, sig);
1951	complete_signal(sig, t, type);
1952}
1953
1954/*
1955 * This function is used by POSIX timers to deliver a timer signal.
1956 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1957 * set), the signal must be delivered to the specific thread (queues
1958 * into t->pending).
1959 *
1960 * Where type is not PIDTYPE_PID, signals must be delivered to the
1961 * process. In this case, prefer to deliver to current if it is in
1962 * the same thread group as the target process and its sighand is
1963 * stable, which avoids unnecessarily waking up a potentially idle task.
1964 */
1965static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
1966{
1967	struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
1968
1969	if (t && tmr->it_pid_type != PIDTYPE_PID &&
1970	    same_thread_group(t, current) && !current->exit_state)
1971		t = current;
1972	return t;
1973}
1974
1975void posixtimer_send_sigqueue(struct k_itimer *tmr)
1976{
1977	struct sigqueue *q = &tmr->sigq;
1978	int sig = q->info.si_signo;
1979	struct task_struct *t;
1980	unsigned long flags;
1981	int result;
1982
1983	guard(rcu)();
1984
1985	t = posixtimer_get_target(tmr);
1986	if (!t)
1987		return;
 
 
1988
1989	if (!likely(lock_task_sighand(t, &flags)))
1990		return;
1991
1992	/*
1993	 * Update @tmr::sigqueue_seq for posix timer signals with sighand
1994	 * locked to prevent a race against dequeue_signal().
1995	 */
1996	tmr->it_sigqueue_seq = tmr->it_signal_seq;
1997
1998	/*
1999	 * Set the signal delivery status under sighand lock, so that the
2000	 * ignored signal handling can distinguish between a periodic and a
2001	 * non-periodic timer.
2002	 */
2003	tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
2004
2005	if (!prepare_signal(sig, t, false)) {
2006		result = TRACE_SIGNAL_IGNORED;
2007
2008		if (!list_empty(&q->list)) {
2009			/*
2010			 * The signal was ignored and blocked. The timer
2011			 * expiry queued it because blocked signals are
2012			 * queued independent of the ignored state.
2013			 *
2014			 * The unblocking set SIGPENDING, but the signal
2015			 * was not yet dequeued from the pending list.
2016			 * So prepare_signal() sees unblocked and ignored,
2017			 * which ends up here. Leave it queued like a
2018			 * regular signal.
2019			 *
2020			 * The same happens when the task group is exiting
2021			 * and the signal is already queued.
2022			 * prepare_signal() treats SIGNAL_GROUP_EXIT as
2023			 * ignored independent of its queued state. This
2024			 * gets cleaned up in __exit_signal().
2025			 */
2026			goto out;
2027		}
2028
2029		/* Periodic timers with SIG_IGN are queued on the ignored list */
2030		if (tmr->it_sig_periodic) {
2031			/*
2032			 * Already queued means the timer was rearmed after
2033			 * the previous expiry got it on the ignore list.
2034			 * Nothing to do for that case.
2035			 */
2036			if (hlist_unhashed(&tmr->ignored_list)) {
2037				/*
2038				 * Take a signal reference and queue it on
2039				 * the ignored list.
2040				 */
2041				posixtimer_sigqueue_getref(q);
2042				posixtimer_sig_ignore(t, q);
2043			}
2044		} else if (!hlist_unhashed(&tmr->ignored_list)) {
2045			/*
2046			 * Covers the case where a timer was periodic and
2047			 * then the signal was ignored. Later it was rearmed
2048			 * as oneshot timer. The previous signal is invalid
2049			 * now, and this oneshot signal has to be dropped.
2050			 * Remove it from the ignored list and drop the
2051			 * reference count as the signal is not longer
2052			 * queued.
2053			 */
2054			hlist_del_init(&tmr->ignored_list);
2055			posixtimer_putref(tmr);
2056		}
2057		goto out;
2058	}
2059
 
2060	if (unlikely(!list_empty(&q->list))) {
2061		/* This holds a reference count already */
 
 
 
 
 
2062		result = TRACE_SIGNAL_ALREADY_PENDING;
2063		goto out;
2064	}
 
2065
2066	/*
2067	 * If the signal is on the ignore list, it got blocked after it was
2068	 * ignored earlier. But nothing lifted the ignore. Move it back to
2069	 * the pending list to be consistent with the regular signal
2070	 * handling. This already holds a reference count.
2071	 *
2072	 * If it's not on the ignore list acquire a reference count.
2073	 */
2074	if (likely(hlist_unhashed(&tmr->ignored_list)))
2075		posixtimer_sigqueue_getref(q);
2076	else
2077		hlist_del_init(&tmr->ignored_list);
2078
2079	posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
2080	result = TRACE_SIGNAL_DELIVERED;
2081out:
2082	trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
2083	unlock_task_sighand(t, &flags);
 
 
 
2084}
2085
2086static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
2087{
2088	struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
2089
2090	/*
2091	 * If the timer is marked deleted already or the signal originates
2092	 * from a non-periodic timer, then just drop the reference
2093	 * count. Otherwise queue it on the ignored list.
2094	 */
2095	if (tmr->it_signal && tmr->it_sig_periodic)
2096		hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers);
2097	else
2098		posixtimer_putref(tmr);
2099}
2100
2101static void posixtimer_sig_unignore(struct task_struct *tsk, int sig)
2102{
2103	struct hlist_head *head = &tsk->signal->ignored_posix_timers;
2104	struct hlist_node *tmp;
2105	struct k_itimer *tmr;
2106
2107	if (likely(hlist_empty(head)))
2108		return;
2109
2110	/*
2111	 * Rearming a timer with sighand lock held is not possible due to
2112	 * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and
2113	 * let the signal delivery path deal with it whether it needs to be
2114	 * rearmed or not. This cannot be decided here w/o dropping sighand
2115	 * lock and creating a loop retry horror show.
2116	 */
2117	hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) {
2118		struct task_struct *target;
2119
2120		/*
2121		 * tmr::sigq.info.si_signo is immutable, so accessing it
2122		 * without holding tmr::it_lock is safe.
2123		 */
2124		if (tmr->sigq.info.si_signo != sig)
2125			continue;
2126
2127		hlist_del_init(&tmr->ignored_list);
2128
2129		/* This should never happen and leaks a reference count */
2130		if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list)))
2131			continue;
2132
2133		/*
2134		 * Get the target for the signal. If target is a thread and
2135		 * has exited by now, drop the reference count.
2136		 */
2137		guard(rcu)();
2138		target = posixtimer_get_target(tmr);
2139		if (target)
2140			posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type);
2141		else
2142			posixtimer_putref(tmr);
2143	}
2144}
2145#else /* CONFIG_POSIX_TIMERS */
2146static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
2147static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { }
2148#endif /* !CONFIG_POSIX_TIMERS */
2149
2150void do_notify_pidfd(struct task_struct *task)
2151{
2152	struct pid *pid = task_pid(task);
2153
2154	WARN_ON(task->exit_state == 0);
2155
2156	__wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
2157			poll_to_key(EPOLLIN | EPOLLRDNORM));
2158}
2159
2160/*
2161 * Let a parent know about the death of a child.
2162 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2163 *
2164 * Returns true if our parent ignored us and so we've switched to
2165 * self-reaping.
2166 */
2167bool do_notify_parent(struct task_struct *tsk, int sig)
2168{
2169	struct kernel_siginfo info;
2170	unsigned long flags;
2171	struct sighand_struct *psig;
2172	bool autoreap = false;
2173	u64 utime, stime;
2174
2175	WARN_ON_ONCE(sig == -1);
2176
2177	/* do_notify_parent_cldstop should have been called instead.  */
2178	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2179
2180	WARN_ON_ONCE(!tsk->ptrace &&
2181	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2182	/*
2183	 * tsk is a group leader and has no threads, wake up the
2184	 * non-PIDFD_THREAD waiters.
2185	 */
2186	if (thread_group_empty(tsk))
2187		do_notify_pidfd(tsk);
2188
2189	if (sig != SIGCHLD) {
2190		/*
2191		 * This is only possible if parent == real_parent.
2192		 * Check if it has changed security domain.
2193		 */
2194		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2195			sig = SIGCHLD;
2196	}
2197
2198	clear_siginfo(&info);
2199	info.si_signo = sig;
2200	info.si_errno = 0;
2201	/*
2202	 * We are under tasklist_lock here so our parent is tied to
2203	 * us and cannot change.
2204	 *
2205	 * task_active_pid_ns will always return the same pid namespace
2206	 * until a task passes through release_task.
2207	 *
2208	 * write_lock() currently calls preempt_disable() which is the
2209	 * same as rcu_read_lock(), but according to Oleg, this is not
2210	 * correct to rely on this
2211	 */
2212	rcu_read_lock();
2213	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2214	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2215				       task_uid(tsk));
2216	rcu_read_unlock();
2217
2218	task_cputime(tsk, &utime, &stime);
2219	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2220	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2221
2222	info.si_status = tsk->exit_code & 0x7f;
2223	if (tsk->exit_code & 0x80)
2224		info.si_code = CLD_DUMPED;
2225	else if (tsk->exit_code & 0x7f)
2226		info.si_code = CLD_KILLED;
2227	else {
2228		info.si_code = CLD_EXITED;
2229		info.si_status = tsk->exit_code >> 8;
2230	}
2231
2232	psig = tsk->parent->sighand;
2233	spin_lock_irqsave(&psig->siglock, flags);
2234	if (!tsk->ptrace && sig == SIGCHLD &&
2235	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2236	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2237		/*
2238		 * We are exiting and our parent doesn't care.  POSIX.1
2239		 * defines special semantics for setting SIGCHLD to SIG_IGN
2240		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2241		 * automatically and not left for our parent's wait4 call.
2242		 * Rather than having the parent do it as a magic kind of
2243		 * signal handler, we just set this to tell do_exit that we
2244		 * can be cleaned up without becoming a zombie.  Note that
2245		 * we still call __wake_up_parent in this case, because a
2246		 * blocked sys_wait4 might now return -ECHILD.
2247		 *
2248		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2249		 * is implementation-defined: we do (if you don't want
2250		 * it, just use SIG_IGN instead).
2251		 */
2252		autoreap = true;
2253		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2254			sig = 0;
2255	}
2256	/*
2257	 * Send with __send_signal as si_pid and si_uid are in the
2258	 * parent's namespaces.
2259	 */
2260	if (valid_signal(sig) && sig)
2261		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2262	__wake_up_parent(tsk, tsk->parent);
2263	spin_unlock_irqrestore(&psig->siglock, flags);
2264
2265	return autoreap;
2266}
2267
2268/**
2269 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2270 * @tsk: task reporting the state change
2271 * @for_ptracer: the notification is for ptracer
2272 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2273 *
2274 * Notify @tsk's parent that the stopped/continued state has changed.  If
2275 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2276 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2277 *
2278 * CONTEXT:
2279 * Must be called with tasklist_lock at least read locked.
2280 */
2281static void do_notify_parent_cldstop(struct task_struct *tsk,
2282				     bool for_ptracer, int why)
2283{
2284	struct kernel_siginfo info;
2285	unsigned long flags;
2286	struct task_struct *parent;
2287	struct sighand_struct *sighand;
2288	u64 utime, stime;
2289
2290	if (for_ptracer) {
2291		parent = tsk->parent;
2292	} else {
2293		tsk = tsk->group_leader;
2294		parent = tsk->real_parent;
2295	}
2296
2297	clear_siginfo(&info);
2298	info.si_signo = SIGCHLD;
2299	info.si_errno = 0;
2300	/*
2301	 * see comment in do_notify_parent() about the following 4 lines
2302	 */
2303	rcu_read_lock();
2304	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2305	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2306	rcu_read_unlock();
2307
2308	task_cputime(tsk, &utime, &stime);
2309	info.si_utime = nsec_to_clock_t(utime);
2310	info.si_stime = nsec_to_clock_t(stime);
2311
2312 	info.si_code = why;
2313 	switch (why) {
2314 	case CLD_CONTINUED:
2315 		info.si_status = SIGCONT;
2316 		break;
2317 	case CLD_STOPPED:
2318 		info.si_status = tsk->signal->group_exit_code & 0x7f;
2319 		break;
2320 	case CLD_TRAPPED:
2321 		info.si_status = tsk->exit_code & 0x7f;
2322 		break;
2323 	default:
2324 		BUG();
2325 	}
2326
2327	sighand = parent->sighand;
2328	spin_lock_irqsave(&sighand->siglock, flags);
2329	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2330	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2331		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2332	/*
2333	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2334	 */
2335	__wake_up_parent(tsk, parent);
2336	spin_unlock_irqrestore(&sighand->siglock, flags);
2337}
2338
2339/*
2340 * This must be called with current->sighand->siglock held.
2341 *
2342 * This should be the path for all ptrace stops.
2343 * We always set current->last_siginfo while stopped here.
2344 * That makes it a way to test a stopped process for
2345 * being ptrace-stopped vs being job-control-stopped.
2346 *
2347 * Returns the signal the ptracer requested the code resume
2348 * with.  If the code did not stop because the tracer is gone,
2349 * the stop signal remains unchanged unless clear_code.
2350 */
2351static int ptrace_stop(int exit_code, int why, unsigned long message,
2352		       kernel_siginfo_t *info)
2353	__releases(&current->sighand->siglock)
2354	__acquires(&current->sighand->siglock)
2355{
2356	bool gstop_done = false;
2357
2358	if (arch_ptrace_stop_needed()) {
2359		/*
2360		 * The arch code has something special to do before a
2361		 * ptrace stop.  This is allowed to block, e.g. for faults
2362		 * on user stack pages.  We can't keep the siglock while
2363		 * calling arch_ptrace_stop, so we must release it now.
2364		 * To preserve proper semantics, we must do this before
2365		 * any signal bookkeeping like checking group_stop_count.
2366		 */
2367		spin_unlock_irq(&current->sighand->siglock);
2368		arch_ptrace_stop();
2369		spin_lock_irq(&current->sighand->siglock);
2370	}
2371
2372	/*
2373	 * After this point ptrace_signal_wake_up or signal_wake_up
2374	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2375	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2376	 * signals here to prevent ptrace_stop sleeping in schedule.
2377	 */
2378	if (!current->ptrace || __fatal_signal_pending(current))
2379		return exit_code;
2380
2381	set_special_state(TASK_TRACED);
2382	current->jobctl |= JOBCTL_TRACED;
2383
2384	/*
2385	 * We're committing to trapping.  TRACED should be visible before
2386	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2387	 * Also, transition to TRACED and updates to ->jobctl should be
2388	 * atomic with respect to siglock and should be done after the arch
2389	 * hook as siglock is released and regrabbed across it.
2390	 *
2391	 *     TRACER				    TRACEE
2392	 *
2393	 *     ptrace_attach()
2394	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2395	 *     do_wait()
2396	 *       set_current_state()                smp_wmb();
2397	 *       ptrace_do_wait()
2398	 *         wait_task_stopped()
2399	 *           task_stopped_code()
2400	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2401	 */
2402	smp_wmb();
2403
2404	current->ptrace_message = message;
2405	current->last_siginfo = info;
2406	current->exit_code = exit_code;
2407
2408	/*
2409	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2410	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2411	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2412	 * could be clear now.  We act as if SIGCONT is received after
2413	 * TASK_TRACED is entered - ignore it.
2414	 */
2415	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2416		gstop_done = task_participate_group_stop(current);
2417
2418	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2419	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2420	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2421		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2422
2423	/* entering a trap, clear TRAPPING */
2424	task_clear_jobctl_trapping(current);
2425
2426	spin_unlock_irq(&current->sighand->siglock);
2427	read_lock(&tasklist_lock);
2428	/*
2429	 * Notify parents of the stop.
2430	 *
2431	 * While ptraced, there are two parents - the ptracer and
2432	 * the real_parent of the group_leader.  The ptracer should
2433	 * know about every stop while the real parent is only
2434	 * interested in the completion of group stop.  The states
2435	 * for the two don't interact with each other.  Notify
2436	 * separately unless they're gonna be duplicates.
2437	 */
2438	if (current->ptrace)
2439		do_notify_parent_cldstop(current, true, why);
2440	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2441		do_notify_parent_cldstop(current, false, why);
2442
2443	/*
2444	 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2445	 * One a PREEMPTION kernel this can result in preemption requirement
2446	 * which will be fulfilled after read_unlock() and the ptracer will be
2447	 * put on the CPU.
2448	 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2449	 * this task wait in schedule(). If this task gets preempted then it
2450	 * remains enqueued on the runqueue. The ptracer will observe this and
2451	 * then sleep for a delay of one HZ tick. In the meantime this task
2452	 * gets scheduled, enters schedule() and will wait for the ptracer.
2453	 *
2454	 * This preemption point is not bad from a correctness point of
2455	 * view but extends the runtime by one HZ tick time due to the
2456	 * ptracer's sleep.  The preempt-disable section ensures that there
2457	 * will be no preemption between unlock and schedule() and so
2458	 * improving the performance since the ptracer will observe that
2459	 * the tracee is scheduled out once it gets on the CPU.
2460	 *
2461	 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2462	 * Therefore the task can be preempted after do_notify_parent_cldstop()
2463	 * before unlocking tasklist_lock so there is no benefit in doing this.
2464	 *
2465	 * In fact disabling preemption is harmful on PREEMPT_RT because
2466	 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2467	 * with preemption disabled due to the 'sleeping' spinlock
2468	 * substitution of RT.
2469	 */
2470	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2471		preempt_disable();
2472	read_unlock(&tasklist_lock);
2473	cgroup_enter_frozen();
2474	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2475		preempt_enable_no_resched();
2476	schedule();
2477	cgroup_leave_frozen(true);
2478
2479	/*
2480	 * We are back.  Now reacquire the siglock before touching
2481	 * last_siginfo, so that we are sure to have synchronized with
2482	 * any signal-sending on another CPU that wants to examine it.
2483	 */
2484	spin_lock_irq(&current->sighand->siglock);
2485	exit_code = current->exit_code;
2486	current->last_siginfo = NULL;
2487	current->ptrace_message = 0;
2488	current->exit_code = 0;
2489
2490	/* LISTENING can be set only during STOP traps, clear it */
2491	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2492
2493	/*
2494	 * Queued signals ignored us while we were stopped for tracing.
2495	 * So check for any that we should take before resuming user mode.
2496	 * This sets TIF_SIGPENDING, but never clears it.
2497	 */
2498	recalc_sigpending_tsk(current);
2499	return exit_code;
2500}
2501
2502static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2503{
2504	kernel_siginfo_t info;
2505
2506	clear_siginfo(&info);
2507	info.si_signo = signr;
2508	info.si_code = exit_code;
2509	info.si_pid = task_pid_vnr(current);
2510	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2511
2512	/* Let the debugger run.  */
2513	return ptrace_stop(exit_code, why, message, &info);
2514}
2515
2516int ptrace_notify(int exit_code, unsigned long message)
2517{
2518	int signr;
2519
2520	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2521	if (unlikely(task_work_pending(current)))
2522		task_work_run();
2523
2524	spin_lock_irq(&current->sighand->siglock);
2525	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2526	spin_unlock_irq(&current->sighand->siglock);
2527	return signr;
2528}
2529
2530/**
2531 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2532 * @signr: signr causing group stop if initiating
2533 *
2534 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2535 * and participate in it.  If already set, participate in the existing
2536 * group stop.  If participated in a group stop (and thus slept), %true is
2537 * returned with siglock released.
2538 *
2539 * If ptraced, this function doesn't handle stop itself.  Instead,
2540 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2541 * untouched.  The caller must ensure that INTERRUPT trap handling takes
2542 * places afterwards.
2543 *
2544 * CONTEXT:
2545 * Must be called with @current->sighand->siglock held, which is released
2546 * on %true return.
2547 *
2548 * RETURNS:
2549 * %false if group stop is already cancelled or ptrace trap is scheduled.
2550 * %true if participated in group stop.
2551 */
2552static bool do_signal_stop(int signr)
2553	__releases(&current->sighand->siglock)
2554{
2555	struct signal_struct *sig = current->signal;
2556
2557	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2558		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2559		struct task_struct *t;
2560
2561		/* signr will be recorded in task->jobctl for retries */
2562		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2563
2564		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2565		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2566		    unlikely(sig->group_exec_task))
2567			return false;
2568		/*
2569		 * There is no group stop already in progress.  We must
2570		 * initiate one now.
2571		 *
2572		 * While ptraced, a task may be resumed while group stop is
2573		 * still in effect and then receive a stop signal and
2574		 * initiate another group stop.  This deviates from the
2575		 * usual behavior as two consecutive stop signals can't
2576		 * cause two group stops when !ptraced.  That is why we
2577		 * also check !task_is_stopped(t) below.
2578		 *
2579		 * The condition can be distinguished by testing whether
2580		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2581		 * group_exit_code in such case.
2582		 *
2583		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2584		 * an intervening stop signal is required to cause two
2585		 * continued events regardless of ptrace.
2586		 */
2587		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2588			sig->group_exit_code = signr;
2589
2590		sig->group_stop_count = 0;
 
2591		if (task_set_jobctl_pending(current, signr | gstop))
2592			sig->group_stop_count++;
2593
2594		for_other_threads(current, t) {
 
2595			/*
2596			 * Setting state to TASK_STOPPED for a group
2597			 * stop is always done with the siglock held,
2598			 * so this check has no races.
2599			 */
2600			if (!task_is_stopped(t) &&
2601			    task_set_jobctl_pending(t, signr | gstop)) {
2602				sig->group_stop_count++;
2603				if (likely(!(t->ptrace & PT_SEIZED)))
2604					signal_wake_up(t, 0);
2605				else
2606					ptrace_trap_notify(t);
2607			}
2608		}
2609	}
2610
2611	if (likely(!current->ptrace)) {
2612		int notify = 0;
2613
2614		/*
2615		 * If there are no other threads in the group, or if there
2616		 * is a group stop in progress and we are the last to stop,
2617		 * report to the parent.
2618		 */
2619		if (task_participate_group_stop(current))
2620			notify = CLD_STOPPED;
2621
2622		current->jobctl |= JOBCTL_STOPPED;
2623		set_special_state(TASK_STOPPED);
2624		spin_unlock_irq(&current->sighand->siglock);
2625
2626		/*
2627		 * Notify the parent of the group stop completion.  Because
2628		 * we're not holding either the siglock or tasklist_lock
2629		 * here, ptracer may attach inbetween; however, this is for
2630		 * group stop and should always be delivered to the real
2631		 * parent of the group leader.  The new ptracer will get
2632		 * its notification when this task transitions into
2633		 * TASK_TRACED.
2634		 */
2635		if (notify) {
2636			read_lock(&tasklist_lock);
2637			do_notify_parent_cldstop(current, false, notify);
2638			read_unlock(&tasklist_lock);
2639		}
2640
2641		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2642		cgroup_enter_frozen();
2643		schedule();
2644		return true;
2645	} else {
2646		/*
2647		 * While ptraced, group stop is handled by STOP trap.
2648		 * Schedule it and let the caller deal with it.
2649		 */
2650		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2651		return false;
2652	}
2653}
2654
2655/**
2656 * do_jobctl_trap - take care of ptrace jobctl traps
2657 *
2658 * When PT_SEIZED, it's used for both group stop and explicit
2659 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2660 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2661 * the stop signal; otherwise, %SIGTRAP.
2662 *
2663 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2664 * number as exit_code and no siginfo.
2665 *
2666 * CONTEXT:
2667 * Must be called with @current->sighand->siglock held, which may be
2668 * released and re-acquired before returning with intervening sleep.
2669 */
2670static void do_jobctl_trap(void)
2671{
2672	struct signal_struct *signal = current->signal;
2673	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2674
2675	if (current->ptrace & PT_SEIZED) {
2676		if (!signal->group_stop_count &&
2677		    !(signal->flags & SIGNAL_STOP_STOPPED))
2678			signr = SIGTRAP;
2679		WARN_ON_ONCE(!signr);
2680		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2681				 CLD_STOPPED, 0);
2682	} else {
2683		WARN_ON_ONCE(!signr);
2684		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2685	}
2686}
2687
2688/**
2689 * do_freezer_trap - handle the freezer jobctl trap
2690 *
2691 * Puts the task into frozen state, if only the task is not about to quit.
2692 * In this case it drops JOBCTL_TRAP_FREEZE.
2693 *
2694 * CONTEXT:
2695 * Must be called with @current->sighand->siglock held,
2696 * which is always released before returning.
2697 */
2698static void do_freezer_trap(void)
2699	__releases(&current->sighand->siglock)
2700{
2701	/*
2702	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2703	 * let's make another loop to give it a chance to be handled.
2704	 * In any case, we'll return back.
2705	 */
2706	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2707	     JOBCTL_TRAP_FREEZE) {
2708		spin_unlock_irq(&current->sighand->siglock);
2709		return;
2710	}
2711
2712	/*
2713	 * Now we're sure that there is no pending fatal signal and no
2714	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2715	 * immediately (if there is a non-fatal signal pending), and
2716	 * put the task into sleep.
2717	 */
2718	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2719	clear_thread_flag(TIF_SIGPENDING);
2720	spin_unlock_irq(&current->sighand->siglock);
2721	cgroup_enter_frozen();
2722	schedule();
2723
2724	/*
2725	 * We could've been woken by task_work, run it to clear
2726	 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
2727	 */
2728	clear_notify_signal();
2729	if (unlikely(task_work_pending(current)))
2730		task_work_run();
2731}
2732
2733static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2734{
2735	/*
2736	 * We do not check sig_kernel_stop(signr) but set this marker
2737	 * unconditionally because we do not know whether debugger will
2738	 * change signr. This flag has no meaning unless we are going
2739	 * to stop after return from ptrace_stop(). In this case it will
2740	 * be checked in do_signal_stop(), we should only stop if it was
2741	 * not cleared by SIGCONT while we were sleeping. See also the
2742	 * comment in dequeue_signal().
2743	 */
2744	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2745	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2746
2747	/* We're back.  Did the debugger cancel the sig?  */
2748	if (signr == 0)
2749		return signr;
2750
2751	/*
2752	 * Update the siginfo structure if the signal has
2753	 * changed.  If the debugger wanted something
2754	 * specific in the siginfo structure then it should
2755	 * have updated *info via PTRACE_SETSIGINFO.
2756	 */
2757	if (signr != info->si_signo) {
2758		clear_siginfo(info);
2759		info->si_signo = signr;
2760		info->si_errno = 0;
2761		info->si_code = SI_USER;
2762		rcu_read_lock();
2763		info->si_pid = task_pid_vnr(current->parent);
2764		info->si_uid = from_kuid_munged(current_user_ns(),
2765						task_uid(current->parent));
2766		rcu_read_unlock();
2767	}
2768
2769	/* If the (new) signal is now blocked, requeue it.  */
2770	if (sigismember(&current->blocked, signr) ||
2771	    fatal_signal_pending(current)) {
2772		send_signal_locked(signr, info, current, type);
2773		signr = 0;
2774	}
2775
2776	return signr;
2777}
2778
2779static void hide_si_addr_tag_bits(struct ksignal *ksig)
2780{
2781	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2782	case SIL_FAULT:
2783	case SIL_FAULT_TRAPNO:
2784	case SIL_FAULT_MCEERR:
2785	case SIL_FAULT_BNDERR:
2786	case SIL_FAULT_PKUERR:
2787	case SIL_FAULT_PERF_EVENT:
2788		ksig->info.si_addr = arch_untagged_si_addr(
2789			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2790		break;
2791	case SIL_KILL:
2792	case SIL_TIMER:
2793	case SIL_POLL:
2794	case SIL_CHLD:
2795	case SIL_RT:
2796	case SIL_SYS:
2797		break;
2798	}
2799}
2800
2801bool get_signal(struct ksignal *ksig)
2802{
2803	struct sighand_struct *sighand = current->sighand;
2804	struct signal_struct *signal = current->signal;
2805	int signr;
2806
2807	clear_notify_signal();
2808	if (unlikely(task_work_pending(current)))
2809		task_work_run();
2810
2811	if (!task_sigpending(current))
2812		return false;
2813
2814	if (unlikely(uprobe_deny_signal()))
2815		return false;
2816
2817	/*
2818	 * Do this once, we can't return to user-mode if freezing() == T.
2819	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2820	 * thus do not need another check after return.
2821	 */
2822	try_to_freeze();
2823
2824relock:
2825	spin_lock_irq(&sighand->siglock);
2826
2827	/*
2828	 * Every stopped thread goes here after wakeup. Check to see if
2829	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2830	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2831	 */
2832	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2833		int why;
2834
2835		if (signal->flags & SIGNAL_CLD_CONTINUED)
2836			why = CLD_CONTINUED;
2837		else
2838			why = CLD_STOPPED;
2839
2840		signal->flags &= ~SIGNAL_CLD_MASK;
2841
2842		spin_unlock_irq(&sighand->siglock);
2843
2844		/*
2845		 * Notify the parent that we're continuing.  This event is
2846		 * always per-process and doesn't make whole lot of sense
2847		 * for ptracers, who shouldn't consume the state via
2848		 * wait(2) either, but, for backward compatibility, notify
2849		 * the ptracer of the group leader too unless it's gonna be
2850		 * a duplicate.
2851		 */
2852		read_lock(&tasklist_lock);
2853		do_notify_parent_cldstop(current, false, why);
2854
2855		if (ptrace_reparented(current->group_leader))
2856			do_notify_parent_cldstop(current->group_leader,
2857						true, why);
2858		read_unlock(&tasklist_lock);
2859
2860		goto relock;
2861	}
2862
2863	for (;;) {
2864		struct k_sigaction *ka;
2865		enum pid_type type;
2866
2867		/* Has this task already been marked for death? */
2868		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2869		     signal->group_exec_task) {
2870			signr = SIGKILL;
 
2871			sigdelset(&current->pending.signal, SIGKILL);
2872			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2873					     &sighand->action[SIGKILL-1]);
2874			recalc_sigpending();
2875			/*
2876			 * implies do_group_exit() or return to PF_USER_WORKER,
2877			 * no need to initialize ksig->info/etc.
2878			 */
2879			goto fatal;
2880		}
2881
2882		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2883		    do_signal_stop(0))
2884			goto relock;
2885
2886		if (unlikely(current->jobctl &
2887			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2888			if (current->jobctl & JOBCTL_TRAP_MASK) {
2889				do_jobctl_trap();
2890				spin_unlock_irq(&sighand->siglock);
2891			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2892				do_freezer_trap();
2893
2894			goto relock;
2895		}
2896
2897		/*
2898		 * If the task is leaving the frozen state, let's update
2899		 * cgroup counters and reset the frozen bit.
2900		 */
2901		if (unlikely(cgroup_task_frozen(current))) {
2902			spin_unlock_irq(&sighand->siglock);
2903			cgroup_leave_frozen(false);
2904			goto relock;
2905		}
2906
2907		/*
2908		 * Signals generated by the execution of an instruction
2909		 * need to be delivered before any other pending signals
2910		 * so that the instruction pointer in the signal stack
2911		 * frame points to the faulting instruction.
2912		 */
2913		type = PIDTYPE_PID;
2914		signr = dequeue_synchronous_signal(&ksig->info);
2915		if (!signr)
2916			signr = dequeue_signal(&current->blocked, &ksig->info, &type);
 
2917
2918		if (!signr)
2919			break; /* will return 0 */
2920
2921		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2922		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2923			signr = ptrace_signal(signr, &ksig->info, type);
2924			if (!signr)
2925				continue;
2926		}
2927
2928		ka = &sighand->action[signr-1];
2929
2930		/* Trace actually delivered signals. */
2931		trace_signal_deliver(signr, &ksig->info, ka);
2932
2933		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2934			continue;
2935		if (ka->sa.sa_handler != SIG_DFL) {
2936			/* Run the handler.  */
2937			ksig->ka = *ka;
2938
2939			if (ka->sa.sa_flags & SA_ONESHOT)
2940				ka->sa.sa_handler = SIG_DFL;
2941
2942			break; /* will return non-zero "signr" value */
2943		}
2944
2945		/*
2946		 * Now we are doing the default action for this signal.
2947		 */
2948		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2949			continue;
2950
2951		/*
2952		 * Global init gets no signals it doesn't want.
2953		 * Container-init gets no signals it doesn't want from same
2954		 * container.
2955		 *
2956		 * Note that if global/container-init sees a sig_kernel_only()
2957		 * signal here, the signal must have been generated internally
2958		 * or must have come from an ancestor namespace. In either
2959		 * case, the signal cannot be dropped.
2960		 */
2961		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2962				!sig_kernel_only(signr))
2963			continue;
2964
2965		if (sig_kernel_stop(signr)) {
2966			/*
2967			 * The default action is to stop all threads in
2968			 * the thread group.  The job control signals
2969			 * do nothing in an orphaned pgrp, but SIGSTOP
2970			 * always works.  Note that siglock needs to be
2971			 * dropped during the call to is_orphaned_pgrp()
2972			 * because of lock ordering with tasklist_lock.
2973			 * This allows an intervening SIGCONT to be posted.
2974			 * We need to check for that and bail out if necessary.
2975			 */
2976			if (signr != SIGSTOP) {
2977				spin_unlock_irq(&sighand->siglock);
2978
2979				/* signals can be posted during this window */
2980
2981				if (is_current_pgrp_orphaned())
2982					goto relock;
2983
2984				spin_lock_irq(&sighand->siglock);
2985			}
2986
2987			if (likely(do_signal_stop(signr))) {
2988				/* It released the siglock.  */
2989				goto relock;
2990			}
2991
2992			/*
2993			 * We didn't actually stop, due to a race
2994			 * with SIGCONT or something like that.
2995			 */
2996			continue;
2997		}
2998
2999	fatal:
3000		spin_unlock_irq(&sighand->siglock);
3001		if (unlikely(cgroup_task_frozen(current)))
3002			cgroup_leave_frozen(true);
3003
3004		/*
3005		 * Anything else is fatal, maybe with a core dump.
3006		 */
3007		current->flags |= PF_SIGNALED;
3008
3009		if (sig_kernel_coredump(signr)) {
3010			if (print_fatal_signals)
3011				print_fatal_signal(signr);
3012			proc_coredump_connector(current);
3013			/*
3014			 * If it was able to dump core, this kills all
3015			 * other threads in the group and synchronizes with
3016			 * their demise.  If we lost the race with another
3017			 * thread getting here, it set group_exit_code
3018			 * first and our do_group_exit call below will use
3019			 * that value and ignore the one we pass it.
3020			 */
3021			do_coredump(&ksig->info);
3022		}
3023
3024		/*
3025		 * PF_USER_WORKER threads will catch and exit on fatal signals
3026		 * themselves. They have cleanup that must be performed, so we
3027		 * cannot call do_exit() on their behalf. Note that ksig won't
3028		 * be properly initialized, PF_USER_WORKER's shouldn't use it.
3029		 */
3030		if (current->flags & PF_USER_WORKER)
3031			goto out;
3032
3033		/*
3034		 * Death signals, no core dump.
3035		 */
3036		do_group_exit(signr);
3037		/* NOTREACHED */
3038	}
3039	spin_unlock_irq(&sighand->siglock);
3040
3041	ksig->sig = signr;
3042
3043	if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
3044		hide_si_addr_tag_bits(ksig);
3045out:
3046	return signr > 0;
3047}
3048
3049/**
3050 * signal_delivered - called after signal delivery to update blocked signals
3051 * @ksig:		kernel signal struct
3052 * @stepping:		nonzero if debugger single-step or block-step in use
3053 *
3054 * This function should be called when a signal has successfully been
3055 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
3056 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
3057 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
3058 */
3059static void signal_delivered(struct ksignal *ksig, int stepping)
3060{
3061	sigset_t blocked;
3062
3063	/* A signal was successfully delivered, and the
3064	   saved sigmask was stored on the signal frame,
3065	   and will be restored by sigreturn.  So we can
3066	   simply clear the restore sigmask flag.  */
3067	clear_restore_sigmask();
3068
3069	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
3070	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
3071		sigaddset(&blocked, ksig->sig);
3072	set_current_blocked(&blocked);
3073	if (current->sas_ss_flags & SS_AUTODISARM)
3074		sas_ss_reset(current);
3075	if (stepping)
3076		ptrace_notify(SIGTRAP, 0);
3077}
3078
3079void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
3080{
3081	if (failed)
3082		force_sigsegv(ksig->sig);
3083	else
3084		signal_delivered(ksig, stepping);
3085}
3086
3087/*
3088 * It could be that complete_signal() picked us to notify about the
3089 * group-wide signal. Other threads should be notified now to take
3090 * the shared signals in @which since we will not.
3091 */
3092static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
3093{
3094	sigset_t retarget;
3095	struct task_struct *t;
3096
3097	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
3098	if (sigisemptyset(&retarget))
3099		return;
3100
3101	for_other_threads(tsk, t) {
 
3102		if (t->flags & PF_EXITING)
3103			continue;
3104
3105		if (!has_pending_signals(&retarget, &t->blocked))
3106			continue;
3107		/* Remove the signals this thread can handle. */
3108		sigandsets(&retarget, &retarget, &t->blocked);
3109
3110		if (!task_sigpending(t))
3111			signal_wake_up(t, 0);
3112
3113		if (sigisemptyset(&retarget))
3114			break;
3115	}
3116}
3117
3118void exit_signals(struct task_struct *tsk)
3119{
3120	int group_stop = 0;
3121	sigset_t unblocked;
3122
3123	/*
3124	 * @tsk is about to have PF_EXITING set - lock out users which
3125	 * expect stable threadgroup.
3126	 */
3127	cgroup_threadgroup_change_begin(tsk);
3128
3129	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3130		sched_mm_cid_exit_signals(tsk);
3131		tsk->flags |= PF_EXITING;
3132		cgroup_threadgroup_change_end(tsk);
3133		return;
3134	}
3135
3136	spin_lock_irq(&tsk->sighand->siglock);
3137	/*
3138	 * From now this task is not visible for group-wide signals,
3139	 * see wants_signal(), do_signal_stop().
3140	 */
3141	sched_mm_cid_exit_signals(tsk);
3142	tsk->flags |= PF_EXITING;
3143
3144	cgroup_threadgroup_change_end(tsk);
3145
3146	if (!task_sigpending(tsk))
3147		goto out;
3148
3149	unblocked = tsk->blocked;
3150	signotset(&unblocked);
3151	retarget_shared_pending(tsk, &unblocked);
3152
3153	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3154	    task_participate_group_stop(tsk))
3155		group_stop = CLD_STOPPED;
3156out:
3157	spin_unlock_irq(&tsk->sighand->siglock);
3158
3159	/*
3160	 * If group stop has completed, deliver the notification.  This
3161	 * should always go to the real parent of the group leader.
3162	 */
3163	if (unlikely(group_stop)) {
3164		read_lock(&tasklist_lock);
3165		do_notify_parent_cldstop(tsk, false, group_stop);
3166		read_unlock(&tasklist_lock);
3167	}
3168}
3169
3170/*
3171 * System call entry points.
3172 */
3173
3174/**
3175 *  sys_restart_syscall - restart a system call
3176 */
3177SYSCALL_DEFINE0(restart_syscall)
3178{
3179	struct restart_block *restart = &current->restart_block;
3180	return restart->fn(restart);
3181}
3182
3183long do_no_restart_syscall(struct restart_block *param)
3184{
3185	return -EINTR;
3186}
3187
3188static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3189{
3190	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3191		sigset_t newblocked;
3192		/* A set of now blocked but previously unblocked signals. */
3193		sigandnsets(&newblocked, newset, &current->blocked);
3194		retarget_shared_pending(tsk, &newblocked);
3195	}
3196	tsk->blocked = *newset;
3197	recalc_sigpending();
3198}
3199
3200/**
3201 * set_current_blocked - change current->blocked mask
3202 * @newset: new mask
3203 *
3204 * It is wrong to change ->blocked directly, this helper should be used
3205 * to ensure the process can't miss a shared signal we are going to block.
3206 */
3207void set_current_blocked(sigset_t *newset)
3208{
3209	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3210	__set_current_blocked(newset);
3211}
3212
3213void __set_current_blocked(const sigset_t *newset)
3214{
3215	struct task_struct *tsk = current;
3216
3217	/*
3218	 * In case the signal mask hasn't changed, there is nothing we need
3219	 * to do. The current->blocked shouldn't be modified by other task.
3220	 */
3221	if (sigequalsets(&tsk->blocked, newset))
3222		return;
3223
3224	spin_lock_irq(&tsk->sighand->siglock);
3225	__set_task_blocked(tsk, newset);
3226	spin_unlock_irq(&tsk->sighand->siglock);
3227}
3228
3229/*
3230 * This is also useful for kernel threads that want to temporarily
3231 * (or permanently) block certain signals.
3232 *
3233 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3234 * interface happily blocks "unblockable" signals like SIGKILL
3235 * and friends.
3236 */
3237int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3238{
3239	struct task_struct *tsk = current;
3240	sigset_t newset;
3241
3242	/* Lockless, only current can change ->blocked, never from irq */
3243	if (oldset)
3244		*oldset = tsk->blocked;
3245
3246	switch (how) {
3247	case SIG_BLOCK:
3248		sigorsets(&newset, &tsk->blocked, set);
3249		break;
3250	case SIG_UNBLOCK:
3251		sigandnsets(&newset, &tsk->blocked, set);
3252		break;
3253	case SIG_SETMASK:
3254		newset = *set;
3255		break;
3256	default:
3257		return -EINVAL;
3258	}
3259
3260	__set_current_blocked(&newset);
3261	return 0;
3262}
3263EXPORT_SYMBOL(sigprocmask);
3264
3265/*
3266 * The api helps set app-provided sigmasks.
3267 *
3268 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3269 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3270 *
3271 * Note that it does set_restore_sigmask() in advance, so it must be always
3272 * paired with restore_saved_sigmask_unless() before return from syscall.
3273 */
3274int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3275{
3276	sigset_t kmask;
3277
3278	if (!umask)
3279		return 0;
3280	if (sigsetsize != sizeof(sigset_t))
3281		return -EINVAL;
3282	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3283		return -EFAULT;
3284
3285	set_restore_sigmask();
3286	current->saved_sigmask = current->blocked;
3287	set_current_blocked(&kmask);
3288
3289	return 0;
3290}
3291
3292#ifdef CONFIG_COMPAT
3293int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3294			    size_t sigsetsize)
3295{
3296	sigset_t kmask;
3297
3298	if (!umask)
3299		return 0;
3300	if (sigsetsize != sizeof(compat_sigset_t))
3301		return -EINVAL;
3302	if (get_compat_sigset(&kmask, umask))
3303		return -EFAULT;
3304
3305	set_restore_sigmask();
3306	current->saved_sigmask = current->blocked;
3307	set_current_blocked(&kmask);
3308
3309	return 0;
3310}
3311#endif
3312
3313/**
3314 *  sys_rt_sigprocmask - change the list of currently blocked signals
3315 *  @how: whether to add, remove, or set signals
3316 *  @nset: stores pending signals
3317 *  @oset: previous value of signal mask if non-null
3318 *  @sigsetsize: size of sigset_t type
3319 */
3320SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3321		sigset_t __user *, oset, size_t, sigsetsize)
3322{
3323	sigset_t old_set, new_set;
3324	int error;
3325
3326	/* XXX: Don't preclude handling different sized sigset_t's.  */
3327	if (sigsetsize != sizeof(sigset_t))
3328		return -EINVAL;
3329
3330	old_set = current->blocked;
3331
3332	if (nset) {
3333		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3334			return -EFAULT;
3335		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3336
3337		error = sigprocmask(how, &new_set, NULL);
3338		if (error)
3339			return error;
3340	}
3341
3342	if (oset) {
3343		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3344			return -EFAULT;
3345	}
3346
3347	return 0;
3348}
3349
3350#ifdef CONFIG_COMPAT
3351COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3352		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3353{
3354	sigset_t old_set = current->blocked;
3355
3356	/* XXX: Don't preclude handling different sized sigset_t's.  */
3357	if (sigsetsize != sizeof(sigset_t))
3358		return -EINVAL;
3359
3360	if (nset) {
3361		sigset_t new_set;
3362		int error;
3363		if (get_compat_sigset(&new_set, nset))
3364			return -EFAULT;
3365		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3366
3367		error = sigprocmask(how, &new_set, NULL);
3368		if (error)
3369			return error;
3370	}
3371	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3372}
3373#endif
3374
3375static void do_sigpending(sigset_t *set)
3376{
3377	spin_lock_irq(&current->sighand->siglock);
3378	sigorsets(set, &current->pending.signal,
3379		  &current->signal->shared_pending.signal);
3380	spin_unlock_irq(&current->sighand->siglock);
3381
3382	/* Outside the lock because only this thread touches it.  */
3383	sigandsets(set, &current->blocked, set);
3384}
3385
3386/**
3387 *  sys_rt_sigpending - examine a pending signal that has been raised
3388 *			while blocked
3389 *  @uset: stores pending signals
3390 *  @sigsetsize: size of sigset_t type or larger
3391 */
3392SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3393{
3394	sigset_t set;
3395
3396	if (sigsetsize > sizeof(*uset))
3397		return -EINVAL;
3398
3399	do_sigpending(&set);
3400
3401	if (copy_to_user(uset, &set, sigsetsize))
3402		return -EFAULT;
3403
3404	return 0;
3405}
3406
3407#ifdef CONFIG_COMPAT
3408COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3409		compat_size_t, sigsetsize)
3410{
3411	sigset_t set;
3412
3413	if (sigsetsize > sizeof(*uset))
3414		return -EINVAL;
3415
3416	do_sigpending(&set);
3417
3418	return put_compat_sigset(uset, &set, sigsetsize);
3419}
3420#endif
3421
3422static const struct {
3423	unsigned char limit, layout;
3424} sig_sicodes[] = {
3425	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3426	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3427	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3428	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3429	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3430#if defined(SIGEMT)
3431	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3432#endif
3433	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3434	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3435	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3436};
3437
3438static bool known_siginfo_layout(unsigned sig, int si_code)
3439{
3440	if (si_code == SI_KERNEL)
3441		return true;
3442	else if ((si_code > SI_USER)) {
3443		if (sig_specific_sicodes(sig)) {
3444			if (si_code <= sig_sicodes[sig].limit)
3445				return true;
3446		}
3447		else if (si_code <= NSIGPOLL)
3448			return true;
3449	}
3450	else if (si_code >= SI_DETHREAD)
3451		return true;
3452	else if (si_code == SI_ASYNCNL)
3453		return true;
3454	return false;
3455}
3456
3457enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3458{
3459	enum siginfo_layout layout = SIL_KILL;
3460	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3461		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3462		    (si_code <= sig_sicodes[sig].limit)) {
3463			layout = sig_sicodes[sig].layout;
3464			/* Handle the exceptions */
3465			if ((sig == SIGBUS) &&
3466			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3467				layout = SIL_FAULT_MCEERR;
3468			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3469				layout = SIL_FAULT_BNDERR;
3470#ifdef SEGV_PKUERR
3471			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3472				layout = SIL_FAULT_PKUERR;
3473#endif
3474			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3475				layout = SIL_FAULT_PERF_EVENT;
3476			else if (IS_ENABLED(CONFIG_SPARC) &&
3477				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3478				layout = SIL_FAULT_TRAPNO;
3479			else if (IS_ENABLED(CONFIG_ALPHA) &&
3480				 ((sig == SIGFPE) ||
3481				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3482				layout = SIL_FAULT_TRAPNO;
3483		}
3484		else if (si_code <= NSIGPOLL)
3485			layout = SIL_POLL;
3486	} else {
3487		if (si_code == SI_TIMER)
3488			layout = SIL_TIMER;
3489		else if (si_code == SI_SIGIO)
3490			layout = SIL_POLL;
3491		else if (si_code < 0)
3492			layout = SIL_RT;
3493	}
3494	return layout;
3495}
3496
3497static inline char __user *si_expansion(const siginfo_t __user *info)
3498{
3499	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3500}
3501
3502int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3503{
3504	char __user *expansion = si_expansion(to);
3505	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3506		return -EFAULT;
3507	if (clear_user(expansion, SI_EXPANSION_SIZE))
3508		return -EFAULT;
3509	return 0;
3510}
3511
3512static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3513				       const siginfo_t __user *from)
3514{
3515	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3516		char __user *expansion = si_expansion(from);
3517		char buf[SI_EXPANSION_SIZE];
3518		int i;
3519		/*
3520		 * An unknown si_code might need more than
3521		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3522		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3523		 * will return this data to userspace exactly.
3524		 */
3525		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3526			return -EFAULT;
3527		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3528			if (buf[i] != 0)
3529				return -E2BIG;
3530		}
3531	}
3532	return 0;
3533}
3534
3535static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3536				    const siginfo_t __user *from)
3537{
3538	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3539		return -EFAULT;
3540	to->si_signo = signo;
3541	return post_copy_siginfo_from_user(to, from);
3542}
3543
3544int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3545{
3546	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3547		return -EFAULT;
3548	return post_copy_siginfo_from_user(to, from);
3549}
3550
3551#ifdef CONFIG_COMPAT
3552/**
3553 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3554 * @to: compat siginfo destination
3555 * @from: kernel siginfo source
3556 *
3557 * Note: This function does not work properly for the SIGCHLD on x32, but
3558 * fortunately it doesn't have to.  The only valid callers for this function are
3559 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3560 * The latter does not care because SIGCHLD will never cause a coredump.
3561 */
3562void copy_siginfo_to_external32(struct compat_siginfo *to,
3563		const struct kernel_siginfo *from)
3564{
3565	memset(to, 0, sizeof(*to));
3566
3567	to->si_signo = from->si_signo;
3568	to->si_errno = from->si_errno;
3569	to->si_code  = from->si_code;
3570	switch(siginfo_layout(from->si_signo, from->si_code)) {
3571	case SIL_KILL:
3572		to->si_pid = from->si_pid;
3573		to->si_uid = from->si_uid;
3574		break;
3575	case SIL_TIMER:
3576		to->si_tid     = from->si_tid;
3577		to->si_overrun = from->si_overrun;
3578		to->si_int     = from->si_int;
3579		break;
3580	case SIL_POLL:
3581		to->si_band = from->si_band;
3582		to->si_fd   = from->si_fd;
3583		break;
3584	case SIL_FAULT:
3585		to->si_addr = ptr_to_compat(from->si_addr);
3586		break;
3587	case SIL_FAULT_TRAPNO:
3588		to->si_addr = ptr_to_compat(from->si_addr);
3589		to->si_trapno = from->si_trapno;
3590		break;
3591	case SIL_FAULT_MCEERR:
3592		to->si_addr = ptr_to_compat(from->si_addr);
3593		to->si_addr_lsb = from->si_addr_lsb;
3594		break;
3595	case SIL_FAULT_BNDERR:
3596		to->si_addr = ptr_to_compat(from->si_addr);
3597		to->si_lower = ptr_to_compat(from->si_lower);
3598		to->si_upper = ptr_to_compat(from->si_upper);
3599		break;
3600	case SIL_FAULT_PKUERR:
3601		to->si_addr = ptr_to_compat(from->si_addr);
3602		to->si_pkey = from->si_pkey;
3603		break;
3604	case SIL_FAULT_PERF_EVENT:
3605		to->si_addr = ptr_to_compat(from->si_addr);
3606		to->si_perf_data = from->si_perf_data;
3607		to->si_perf_type = from->si_perf_type;
3608		to->si_perf_flags = from->si_perf_flags;
3609		break;
3610	case SIL_CHLD:
3611		to->si_pid = from->si_pid;
3612		to->si_uid = from->si_uid;
3613		to->si_status = from->si_status;
3614		to->si_utime = from->si_utime;
3615		to->si_stime = from->si_stime;
3616		break;
3617	case SIL_RT:
3618		to->si_pid = from->si_pid;
3619		to->si_uid = from->si_uid;
3620		to->si_int = from->si_int;
3621		break;
3622	case SIL_SYS:
3623		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3624		to->si_syscall   = from->si_syscall;
3625		to->si_arch      = from->si_arch;
3626		break;
3627	}
3628}
3629
3630int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3631			   const struct kernel_siginfo *from)
3632{
3633	struct compat_siginfo new;
3634
3635	copy_siginfo_to_external32(&new, from);
3636	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3637		return -EFAULT;
3638	return 0;
3639}
3640
3641static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3642					 const struct compat_siginfo *from)
3643{
3644	clear_siginfo(to);
3645	to->si_signo = from->si_signo;
3646	to->si_errno = from->si_errno;
3647	to->si_code  = from->si_code;
3648	switch(siginfo_layout(from->si_signo, from->si_code)) {
3649	case SIL_KILL:
3650		to->si_pid = from->si_pid;
3651		to->si_uid = from->si_uid;
3652		break;
3653	case SIL_TIMER:
3654		to->si_tid     = from->si_tid;
3655		to->si_overrun = from->si_overrun;
3656		to->si_int     = from->si_int;
3657		break;
3658	case SIL_POLL:
3659		to->si_band = from->si_band;
3660		to->si_fd   = from->si_fd;
3661		break;
3662	case SIL_FAULT:
3663		to->si_addr = compat_ptr(from->si_addr);
3664		break;
3665	case SIL_FAULT_TRAPNO:
3666		to->si_addr = compat_ptr(from->si_addr);
3667		to->si_trapno = from->si_trapno;
3668		break;
3669	case SIL_FAULT_MCEERR:
3670		to->si_addr = compat_ptr(from->si_addr);
3671		to->si_addr_lsb = from->si_addr_lsb;
3672		break;
3673	case SIL_FAULT_BNDERR:
3674		to->si_addr = compat_ptr(from->si_addr);
3675		to->si_lower = compat_ptr(from->si_lower);
3676		to->si_upper = compat_ptr(from->si_upper);
3677		break;
3678	case SIL_FAULT_PKUERR:
3679		to->si_addr = compat_ptr(from->si_addr);
3680		to->si_pkey = from->si_pkey;
3681		break;
3682	case SIL_FAULT_PERF_EVENT:
3683		to->si_addr = compat_ptr(from->si_addr);
3684		to->si_perf_data = from->si_perf_data;
3685		to->si_perf_type = from->si_perf_type;
3686		to->si_perf_flags = from->si_perf_flags;
3687		break;
3688	case SIL_CHLD:
3689		to->si_pid    = from->si_pid;
3690		to->si_uid    = from->si_uid;
3691		to->si_status = from->si_status;
3692#ifdef CONFIG_X86_X32_ABI
3693		if (in_x32_syscall()) {
3694			to->si_utime = from->_sifields._sigchld_x32._utime;
3695			to->si_stime = from->_sifields._sigchld_x32._stime;
3696		} else
3697#endif
3698		{
3699			to->si_utime = from->si_utime;
3700			to->si_stime = from->si_stime;
3701		}
3702		break;
3703	case SIL_RT:
3704		to->si_pid = from->si_pid;
3705		to->si_uid = from->si_uid;
3706		to->si_int = from->si_int;
3707		break;
3708	case SIL_SYS:
3709		to->si_call_addr = compat_ptr(from->si_call_addr);
3710		to->si_syscall   = from->si_syscall;
3711		to->si_arch      = from->si_arch;
3712		break;
3713	}
3714	return 0;
3715}
3716
3717static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3718				      const struct compat_siginfo __user *ufrom)
3719{
3720	struct compat_siginfo from;
3721
3722	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3723		return -EFAULT;
3724
3725	from.si_signo = signo;
3726	return post_copy_siginfo_from_user32(to, &from);
3727}
3728
3729int copy_siginfo_from_user32(struct kernel_siginfo *to,
3730			     const struct compat_siginfo __user *ufrom)
3731{
3732	struct compat_siginfo from;
3733
3734	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3735		return -EFAULT;
3736
3737	return post_copy_siginfo_from_user32(to, &from);
3738}
3739#endif /* CONFIG_COMPAT */
3740
3741/**
3742 *  do_sigtimedwait - wait for queued signals specified in @which
3743 *  @which: queued signals to wait for
3744 *  @info: if non-null, the signal's siginfo is returned here
3745 *  @ts: upper bound on process time suspension
3746 */
3747static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3748		    const struct timespec64 *ts)
3749{
3750	ktime_t *to = NULL, timeout = KTIME_MAX;
3751	struct task_struct *tsk = current;
3752	sigset_t mask = *which;
3753	enum pid_type type;
3754	int sig, ret = 0;
3755
3756	if (ts) {
3757		if (!timespec64_valid(ts))
3758			return -EINVAL;
3759		timeout = timespec64_to_ktime(*ts);
3760		to = &timeout;
3761	}
3762
3763	/*
3764	 * Invert the set of allowed signals to get those we want to block.
3765	 */
3766	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3767	signotset(&mask);
3768
3769	spin_lock_irq(&tsk->sighand->siglock);
3770	sig = dequeue_signal(&mask, info, &type);
3771	if (!sig && timeout) {
3772		/*
3773		 * None ready, temporarily unblock those we're interested
3774		 * while we are sleeping in so that we'll be awakened when
3775		 * they arrive. Unblocking is always fine, we can avoid
3776		 * set_current_blocked().
3777		 */
3778		tsk->real_blocked = tsk->blocked;
3779		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3780		recalc_sigpending();
3781		spin_unlock_irq(&tsk->sighand->siglock);
3782
3783		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3784		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3785					       HRTIMER_MODE_REL);
3786		spin_lock_irq(&tsk->sighand->siglock);
3787		__set_task_blocked(tsk, &tsk->real_blocked);
3788		sigemptyset(&tsk->real_blocked);
3789		sig = dequeue_signal(&mask, info, &type);
3790	}
3791	spin_unlock_irq(&tsk->sighand->siglock);
3792
3793	if (sig)
3794		return sig;
3795	return ret ? -EINTR : -EAGAIN;
3796}
3797
3798/**
3799 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3800 *			in @uthese
3801 *  @uthese: queued signals to wait for
3802 *  @uinfo: if non-null, the signal's siginfo is returned here
3803 *  @uts: upper bound on process time suspension
3804 *  @sigsetsize: size of sigset_t type
3805 */
3806SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3807		siginfo_t __user *, uinfo,
3808		const struct __kernel_timespec __user *, uts,
3809		size_t, sigsetsize)
3810{
3811	sigset_t these;
3812	struct timespec64 ts;
3813	kernel_siginfo_t info;
3814	int ret;
3815
3816	/* XXX: Don't preclude handling different sized sigset_t's.  */
3817	if (sigsetsize != sizeof(sigset_t))
3818		return -EINVAL;
3819
3820	if (copy_from_user(&these, uthese, sizeof(these)))
3821		return -EFAULT;
3822
3823	if (uts) {
3824		if (get_timespec64(&ts, uts))
3825			return -EFAULT;
3826	}
3827
3828	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3829
3830	if (ret > 0 && uinfo) {
3831		if (copy_siginfo_to_user(uinfo, &info))
3832			ret = -EFAULT;
3833	}
3834
3835	return ret;
3836}
3837
3838#ifdef CONFIG_COMPAT_32BIT_TIME
3839SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3840		siginfo_t __user *, uinfo,
3841		const struct old_timespec32 __user *, uts,
3842		size_t, sigsetsize)
3843{
3844	sigset_t these;
3845	struct timespec64 ts;
3846	kernel_siginfo_t info;
3847	int ret;
3848
3849	if (sigsetsize != sizeof(sigset_t))
3850		return -EINVAL;
3851
3852	if (copy_from_user(&these, uthese, sizeof(these)))
3853		return -EFAULT;
3854
3855	if (uts) {
3856		if (get_old_timespec32(&ts, uts))
3857			return -EFAULT;
3858	}
3859
3860	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3861
3862	if (ret > 0 && uinfo) {
3863		if (copy_siginfo_to_user(uinfo, &info))
3864			ret = -EFAULT;
3865	}
3866
3867	return ret;
3868}
3869#endif
3870
3871#ifdef CONFIG_COMPAT
3872COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3873		struct compat_siginfo __user *, uinfo,
3874		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3875{
3876	sigset_t s;
3877	struct timespec64 t;
3878	kernel_siginfo_t info;
3879	long ret;
3880
3881	if (sigsetsize != sizeof(sigset_t))
3882		return -EINVAL;
3883
3884	if (get_compat_sigset(&s, uthese))
3885		return -EFAULT;
3886
3887	if (uts) {
3888		if (get_timespec64(&t, uts))
3889			return -EFAULT;
3890	}
3891
3892	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3893
3894	if (ret > 0 && uinfo) {
3895		if (copy_siginfo_to_user32(uinfo, &info))
3896			ret = -EFAULT;
3897	}
3898
3899	return ret;
3900}
3901
3902#ifdef CONFIG_COMPAT_32BIT_TIME
3903COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3904		struct compat_siginfo __user *, uinfo,
3905		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3906{
3907	sigset_t s;
3908	struct timespec64 t;
3909	kernel_siginfo_t info;
3910	long ret;
3911
3912	if (sigsetsize != sizeof(sigset_t))
3913		return -EINVAL;
3914
3915	if (get_compat_sigset(&s, uthese))
3916		return -EFAULT;
3917
3918	if (uts) {
3919		if (get_old_timespec32(&t, uts))
3920			return -EFAULT;
3921	}
3922
3923	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3924
3925	if (ret > 0 && uinfo) {
3926		if (copy_siginfo_to_user32(uinfo, &info))
3927			ret = -EFAULT;
3928	}
3929
3930	return ret;
3931}
3932#endif
3933#endif
3934
3935static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
3936				 enum pid_type type)
3937{
3938	clear_siginfo(info);
3939	info->si_signo = sig;
3940	info->si_errno = 0;
3941	info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
3942	info->si_pid = task_tgid_vnr(current);
3943	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3944}
3945
3946/**
3947 *  sys_kill - send a signal to a process
3948 *  @pid: the PID of the process
3949 *  @sig: signal to be sent
3950 */
3951SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3952{
3953	struct kernel_siginfo info;
3954
3955	prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
3956
3957	return kill_something_info(sig, &info, pid);
3958}
3959
3960/*
3961 * Verify that the signaler and signalee either are in the same pid namespace
3962 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3963 * namespace.
3964 */
3965static bool access_pidfd_pidns(struct pid *pid)
3966{
3967	struct pid_namespace *active = task_active_pid_ns(current);
3968	struct pid_namespace *p = ns_of_pid(pid);
3969
3970	for (;;) {
3971		if (!p)
3972			return false;
3973		if (p == active)
3974			break;
3975		p = p->parent;
3976	}
3977
3978	return true;
3979}
3980
3981static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3982		siginfo_t __user *info)
3983{
3984#ifdef CONFIG_COMPAT
3985	/*
3986	 * Avoid hooking up compat syscalls and instead handle necessary
3987	 * conversions here. Note, this is a stop-gap measure and should not be
3988	 * considered a generic solution.
3989	 */
3990	if (in_compat_syscall())
3991		return copy_siginfo_from_user32(
3992			kinfo, (struct compat_siginfo __user *)info);
3993#endif
3994	return copy_siginfo_from_user(kinfo, info);
3995}
3996
3997static struct pid *pidfd_to_pid(const struct file *file)
3998{
3999	struct pid *pid;
4000
4001	pid = pidfd_pid(file);
4002	if (!IS_ERR(pid))
4003		return pid;
4004
4005	return tgid_pidfd_to_pid(file);
4006}
4007
4008#define PIDFD_SEND_SIGNAL_FLAGS                            \
4009	(PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
4010	 PIDFD_SIGNAL_PROCESS_GROUP)
4011
4012/**
4013 * sys_pidfd_send_signal - Signal a process through a pidfd
4014 * @pidfd:  file descriptor of the process
4015 * @sig:    signal to send
4016 * @info:   signal info
4017 * @flags:  future flags
4018 *
4019 * Send the signal to the thread group or to the individual thread depending
4020 * on PIDFD_THREAD.
4021 * In the future extension to @flags may be used to override the default scope
4022 * of @pidfd.
 
 
 
 
4023 *
4024 * Return: 0 on success, negative errno on failure
4025 */
4026SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
4027		siginfo_t __user *, info, unsigned int, flags)
4028{
4029	int ret;
 
4030	struct pid *pid;
4031	kernel_siginfo_t kinfo;
4032	enum pid_type type;
4033
4034	/* Enforce flags be set to 0 until we add an extension. */
4035	if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
4036		return -EINVAL;
4037
4038	/* Ensure that only a single signal scope determining flag is set. */
4039	if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
4040		return -EINVAL;
4041
4042	CLASS(fd, f)(pidfd);
4043	if (fd_empty(f))
4044		return -EBADF;
4045
4046	/* Is this a pidfd? */
4047	pid = pidfd_to_pid(fd_file(f));
4048	if (IS_ERR(pid))
4049		return PTR_ERR(pid);
 
 
4050
 
4051	if (!access_pidfd_pidns(pid))
4052		return -EINVAL;
4053
4054	switch (flags) {
4055	case 0:
4056		/* Infer scope from the type of pidfd. */
4057		if (fd_file(f)->f_flags & PIDFD_THREAD)
4058			type = PIDTYPE_PID;
4059		else
4060			type = PIDTYPE_TGID;
4061		break;
4062	case PIDFD_SIGNAL_THREAD:
4063		type = PIDTYPE_PID;
4064		break;
4065	case PIDFD_SIGNAL_THREAD_GROUP:
4066		type = PIDTYPE_TGID;
4067		break;
4068	case PIDFD_SIGNAL_PROCESS_GROUP:
4069		type = PIDTYPE_PGID;
4070		break;
4071	}
4072
4073	if (info) {
4074		ret = copy_siginfo_from_user_any(&kinfo, info);
4075		if (unlikely(ret))
4076			return ret;
4077
 
4078		if (unlikely(sig != kinfo.si_signo))
4079			return -EINVAL;
4080
4081		/* Only allow sending arbitrary signals to yourself. */
4082		if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
 
4083		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
4084			return -EPERM;
4085	} else {
4086		prepare_kill_siginfo(sig, &kinfo, type);
4087	}
4088
4089	if (type == PIDTYPE_PGID)
4090		return kill_pgrp_info(sig, &kinfo, pid);
4091	else
4092		return kill_pid_info_type(sig, &kinfo, pid, type);
 
4093}
4094
4095static int
4096do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
4097{
4098	struct task_struct *p;
4099	int error = -ESRCH;
4100
4101	rcu_read_lock();
4102	p = find_task_by_vpid(pid);
4103	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
4104		error = check_kill_permission(sig, info, p);
4105		/*
4106		 * The null signal is a permissions and process existence
4107		 * probe.  No signal is actually delivered.
4108		 */
4109		if (!error && sig) {
4110			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4111			/*
4112			 * If lock_task_sighand() failed we pretend the task
4113			 * dies after receiving the signal. The window is tiny,
4114			 * and the signal is private anyway.
4115			 */
4116			if (unlikely(error == -ESRCH))
4117				error = 0;
4118		}
4119	}
4120	rcu_read_unlock();
4121
4122	return error;
4123}
4124
4125static int do_tkill(pid_t tgid, pid_t pid, int sig)
4126{
4127	struct kernel_siginfo info;
4128
4129	prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
 
 
 
 
 
4130
4131	return do_send_specific(tgid, pid, sig, &info);
4132}
4133
4134/**
4135 *  sys_tgkill - send signal to one specific thread
4136 *  @tgid: the thread group ID of the thread
4137 *  @pid: the PID of the thread
4138 *  @sig: signal to be sent
4139 *
4140 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
4141 *  exists but it's not belonging to the target process anymore. This
4142 *  method solves the problem of threads exiting and PIDs getting reused.
4143 */
4144SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4145{
4146	/* This is only valid for single tasks */
4147	if (pid <= 0 || tgid <= 0)
4148		return -EINVAL;
4149
4150	return do_tkill(tgid, pid, sig);
4151}
4152
4153/**
4154 *  sys_tkill - send signal to one specific task
4155 *  @pid: the PID of the task
4156 *  @sig: signal to be sent
4157 *
4158 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4159 */
4160SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4161{
4162	/* This is only valid for single tasks */
4163	if (pid <= 0)
4164		return -EINVAL;
4165
4166	return do_tkill(0, pid, sig);
4167}
4168
4169static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4170{
4171	/* Not even root can pretend to send signals from the kernel.
4172	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4173	 */
4174	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4175	    (task_pid_vnr(current) != pid))
4176		return -EPERM;
4177
4178	/* POSIX.1b doesn't mention process groups.  */
4179	return kill_proc_info(sig, info, pid);
4180}
4181
4182/**
4183 *  sys_rt_sigqueueinfo - send signal information to a signal
4184 *  @pid: the PID of the thread
4185 *  @sig: signal to be sent
4186 *  @uinfo: signal info to be sent
4187 */
4188SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4189		siginfo_t __user *, uinfo)
4190{
4191	kernel_siginfo_t info;
4192	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4193	if (unlikely(ret))
4194		return ret;
4195	return do_rt_sigqueueinfo(pid, sig, &info);
4196}
4197
4198#ifdef CONFIG_COMPAT
4199COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4200			compat_pid_t, pid,
4201			int, sig,
4202			struct compat_siginfo __user *, uinfo)
4203{
4204	kernel_siginfo_t info;
4205	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4206	if (unlikely(ret))
4207		return ret;
4208	return do_rt_sigqueueinfo(pid, sig, &info);
4209}
4210#endif
4211
4212static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4213{
4214	/* This is only valid for single tasks */
4215	if (pid <= 0 || tgid <= 0)
4216		return -EINVAL;
4217
4218	/* Not even root can pretend to send signals from the kernel.
4219	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4220	 */
4221	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4222	    (task_pid_vnr(current) != pid))
4223		return -EPERM;
4224
4225	return do_send_specific(tgid, pid, sig, info);
4226}
4227
4228SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4229		siginfo_t __user *, uinfo)
4230{
4231	kernel_siginfo_t info;
4232	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4233	if (unlikely(ret))
4234		return ret;
4235	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4236}
4237
4238#ifdef CONFIG_COMPAT
4239COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4240			compat_pid_t, tgid,
4241			compat_pid_t, pid,
4242			int, sig,
4243			struct compat_siginfo __user *, uinfo)
4244{
4245	kernel_siginfo_t info;
4246	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4247	if (unlikely(ret))
4248		return ret;
4249	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4250}
4251#endif
4252
4253/*
4254 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4255 */
4256void kernel_sigaction(int sig, __sighandler_t action)
4257{
4258	spin_lock_irq(&current->sighand->siglock);
4259	current->sighand->action[sig - 1].sa.sa_handler = action;
4260	if (action == SIG_IGN) {
4261		sigset_t mask;
4262
4263		sigemptyset(&mask);
4264		sigaddset(&mask, sig);
4265
4266		flush_sigqueue_mask(current, &mask, &current->signal->shared_pending);
4267		flush_sigqueue_mask(current, &mask, &current->pending);
4268		recalc_sigpending();
4269	}
4270	spin_unlock_irq(&current->sighand->siglock);
4271}
4272EXPORT_SYMBOL(kernel_sigaction);
4273
4274void __weak sigaction_compat_abi(struct k_sigaction *act,
4275		struct k_sigaction *oact)
4276{
4277}
4278
4279int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4280{
4281	struct task_struct *p = current, *t;
4282	struct k_sigaction *k;
4283	sigset_t mask;
4284
4285	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4286		return -EINVAL;
4287
4288	k = &p->sighand->action[sig-1];
4289
4290	spin_lock_irq(&p->sighand->siglock);
4291	if (k->sa.sa_flags & SA_IMMUTABLE) {
4292		spin_unlock_irq(&p->sighand->siglock);
4293		return -EINVAL;
4294	}
4295	if (oact)
4296		*oact = *k;
4297
4298	/*
4299	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4300	 * e.g. by having an architecture use the bit in their uapi.
4301	 */
4302	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4303
4304	/*
4305	 * Clear unknown flag bits in order to allow userspace to detect missing
4306	 * support for flag bits and to allow the kernel to use non-uapi bits
4307	 * internally.
4308	 */
4309	if (act)
4310		act->sa.sa_flags &= UAPI_SA_FLAGS;
4311	if (oact)
4312		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4313
4314	sigaction_compat_abi(act, oact);
4315
4316	if (act) {
4317		bool was_ignored = k->sa.sa_handler == SIG_IGN;
4318
4319		sigdelsetmask(&act->sa.sa_mask,
4320			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4321		*k = *act;
4322		/*
4323		 * POSIX 3.3.1.3:
4324		 *  "Setting a signal action to SIG_IGN for a signal that is
4325		 *   pending shall cause the pending signal to be discarded,
4326		 *   whether or not it is blocked."
4327		 *
4328		 *  "Setting a signal action to SIG_DFL for a signal that is
4329		 *   pending and whose default action is to ignore the signal
4330		 *   (for example, SIGCHLD), shall cause the pending signal to
4331		 *   be discarded, whether or not it is blocked"
4332		 */
4333		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4334			sigemptyset(&mask);
4335			sigaddset(&mask, sig);
4336			flush_sigqueue_mask(p, &mask, &p->signal->shared_pending);
4337			for_each_thread(p, t)
4338				flush_sigqueue_mask(p, &mask, &t->pending);
4339		} else if (was_ignored) {
4340			posixtimer_sig_unignore(p, sig);
4341		}
4342	}
4343
4344	spin_unlock_irq(&p->sighand->siglock);
4345	return 0;
4346}
4347
4348#ifdef CONFIG_DYNAMIC_SIGFRAME
4349static inline void sigaltstack_lock(void)
4350	__acquires(&current->sighand->siglock)
4351{
4352	spin_lock_irq(&current->sighand->siglock);
4353}
4354
4355static inline void sigaltstack_unlock(void)
4356	__releases(&current->sighand->siglock)
4357{
4358	spin_unlock_irq(&current->sighand->siglock);
4359}
4360#else
4361static inline void sigaltstack_lock(void) { }
4362static inline void sigaltstack_unlock(void) { }
4363#endif
4364
4365static int
4366do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4367		size_t min_ss_size)
4368{
4369	struct task_struct *t = current;
4370	int ret = 0;
4371
4372	if (oss) {
4373		memset(oss, 0, sizeof(stack_t));
4374		oss->ss_sp = (void __user *) t->sas_ss_sp;
4375		oss->ss_size = t->sas_ss_size;
4376		oss->ss_flags = sas_ss_flags(sp) |
4377			(current->sas_ss_flags & SS_FLAG_BITS);
4378	}
4379
4380	if (ss) {
4381		void __user *ss_sp = ss->ss_sp;
4382		size_t ss_size = ss->ss_size;
4383		unsigned ss_flags = ss->ss_flags;
4384		int ss_mode;
4385
4386		if (unlikely(on_sig_stack(sp)))
4387			return -EPERM;
4388
4389		ss_mode = ss_flags & ~SS_FLAG_BITS;
4390		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4391				ss_mode != 0))
4392			return -EINVAL;
4393
4394		/*
4395		 * Return before taking any locks if no actual
4396		 * sigaltstack changes were requested.
4397		 */
4398		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4399		    t->sas_ss_size == ss_size &&
4400		    t->sas_ss_flags == ss_flags)
4401			return 0;
4402
4403		sigaltstack_lock();
4404		if (ss_mode == SS_DISABLE) {
4405			ss_size = 0;
4406			ss_sp = NULL;
4407		} else {
4408			if (unlikely(ss_size < min_ss_size))
4409				ret = -ENOMEM;
4410			if (!sigaltstack_size_valid(ss_size))
4411				ret = -ENOMEM;
4412		}
4413		if (!ret) {
4414			t->sas_ss_sp = (unsigned long) ss_sp;
4415			t->sas_ss_size = ss_size;
4416			t->sas_ss_flags = ss_flags;
4417		}
4418		sigaltstack_unlock();
4419	}
4420	return ret;
4421}
4422
4423SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4424{
4425	stack_t new, old;
4426	int err;
4427	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4428		return -EFAULT;
4429	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4430			      current_user_stack_pointer(),
4431			      MINSIGSTKSZ);
4432	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4433		err = -EFAULT;
4434	return err;
4435}
4436
4437int restore_altstack(const stack_t __user *uss)
4438{
4439	stack_t new;
4440	if (copy_from_user(&new, uss, sizeof(stack_t)))
4441		return -EFAULT;
4442	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4443			     MINSIGSTKSZ);
4444	/* squash all but EFAULT for now */
4445	return 0;
4446}
4447
4448int __save_altstack(stack_t __user *uss, unsigned long sp)
4449{
4450	struct task_struct *t = current;
4451	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4452		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4453		__put_user(t->sas_ss_size, &uss->ss_size);
4454	return err;
4455}
4456
4457#ifdef CONFIG_COMPAT
4458static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4459				 compat_stack_t __user *uoss_ptr)
4460{
4461	stack_t uss, uoss;
4462	int ret;
4463
4464	if (uss_ptr) {
4465		compat_stack_t uss32;
4466		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4467			return -EFAULT;
4468		uss.ss_sp = compat_ptr(uss32.ss_sp);
4469		uss.ss_flags = uss32.ss_flags;
4470		uss.ss_size = uss32.ss_size;
4471	}
4472	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4473			     compat_user_stack_pointer(),
4474			     COMPAT_MINSIGSTKSZ);
4475	if (ret >= 0 && uoss_ptr)  {
4476		compat_stack_t old;
4477		memset(&old, 0, sizeof(old));
4478		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4479		old.ss_flags = uoss.ss_flags;
4480		old.ss_size = uoss.ss_size;
4481		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4482			ret = -EFAULT;
4483	}
4484	return ret;
4485}
4486
4487COMPAT_SYSCALL_DEFINE2(sigaltstack,
4488			const compat_stack_t __user *, uss_ptr,
4489			compat_stack_t __user *, uoss_ptr)
4490{
4491	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4492}
4493
4494int compat_restore_altstack(const compat_stack_t __user *uss)
4495{
4496	int err = do_compat_sigaltstack(uss, NULL);
4497	/* squash all but -EFAULT for now */
4498	return err == -EFAULT ? err : 0;
4499}
4500
4501int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4502{
4503	int err;
4504	struct task_struct *t = current;
4505	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4506			 &uss->ss_sp) |
4507		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4508		__put_user(t->sas_ss_size, &uss->ss_size);
4509	return err;
4510}
4511#endif
4512
4513#ifdef __ARCH_WANT_SYS_SIGPENDING
4514
4515/**
4516 *  sys_sigpending - examine pending signals
4517 *  @uset: where mask of pending signal is returned
4518 */
4519SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4520{
4521	sigset_t set;
4522
4523	if (sizeof(old_sigset_t) > sizeof(*uset))
4524		return -EINVAL;
4525
4526	do_sigpending(&set);
4527
4528	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4529		return -EFAULT;
4530
4531	return 0;
4532}
4533
4534#ifdef CONFIG_COMPAT
4535COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4536{
4537	sigset_t set;
4538
4539	do_sigpending(&set);
4540
4541	return put_user(set.sig[0], set32);
4542}
4543#endif
4544
4545#endif
4546
4547#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4548/**
4549 *  sys_sigprocmask - examine and change blocked signals
4550 *  @how: whether to add, remove, or set signals
4551 *  @nset: signals to add or remove (if non-null)
4552 *  @oset: previous value of signal mask if non-null
4553 *
4554 * Some platforms have their own version with special arguments;
4555 * others support only sys_rt_sigprocmask.
4556 */
4557
4558SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4559		old_sigset_t __user *, oset)
4560{
4561	old_sigset_t old_set, new_set;
4562	sigset_t new_blocked;
4563
4564	old_set = current->blocked.sig[0];
4565
4566	if (nset) {
4567		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4568			return -EFAULT;
4569
4570		new_blocked = current->blocked;
4571
4572		switch (how) {
4573		case SIG_BLOCK:
4574			sigaddsetmask(&new_blocked, new_set);
4575			break;
4576		case SIG_UNBLOCK:
4577			sigdelsetmask(&new_blocked, new_set);
4578			break;
4579		case SIG_SETMASK:
4580			new_blocked.sig[0] = new_set;
4581			break;
4582		default:
4583			return -EINVAL;
4584		}
4585
4586		set_current_blocked(&new_blocked);
4587	}
4588
4589	if (oset) {
4590		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4591			return -EFAULT;
4592	}
4593
4594	return 0;
4595}
4596#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4597
4598#ifndef CONFIG_ODD_RT_SIGACTION
4599/**
4600 *  sys_rt_sigaction - alter an action taken by a process
4601 *  @sig: signal to be sent
4602 *  @act: new sigaction
4603 *  @oact: used to save the previous sigaction
4604 *  @sigsetsize: size of sigset_t type
4605 */
4606SYSCALL_DEFINE4(rt_sigaction, int, sig,
4607		const struct sigaction __user *, act,
4608		struct sigaction __user *, oact,
4609		size_t, sigsetsize)
4610{
4611	struct k_sigaction new_sa, old_sa;
4612	int ret;
4613
4614	/* XXX: Don't preclude handling different sized sigset_t's.  */
4615	if (sigsetsize != sizeof(sigset_t))
4616		return -EINVAL;
4617
4618	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4619		return -EFAULT;
4620
4621	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4622	if (ret)
4623		return ret;
4624
4625	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4626		return -EFAULT;
4627
4628	return 0;
4629}
4630#ifdef CONFIG_COMPAT
4631COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4632		const struct compat_sigaction __user *, act,
4633		struct compat_sigaction __user *, oact,
4634		compat_size_t, sigsetsize)
4635{
4636	struct k_sigaction new_ka, old_ka;
4637#ifdef __ARCH_HAS_SA_RESTORER
4638	compat_uptr_t restorer;
4639#endif
4640	int ret;
4641
4642	/* XXX: Don't preclude handling different sized sigset_t's.  */
4643	if (sigsetsize != sizeof(compat_sigset_t))
4644		return -EINVAL;
4645
4646	if (act) {
4647		compat_uptr_t handler;
4648		ret = get_user(handler, &act->sa_handler);
4649		new_ka.sa.sa_handler = compat_ptr(handler);
4650#ifdef __ARCH_HAS_SA_RESTORER
4651		ret |= get_user(restorer, &act->sa_restorer);
4652		new_ka.sa.sa_restorer = compat_ptr(restorer);
4653#endif
4654		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4655		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4656		if (ret)
4657			return -EFAULT;
4658	}
4659
4660	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4661	if (!ret && oact) {
4662		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
4663			       &oact->sa_handler);
4664		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4665					 sizeof(oact->sa_mask));
4666		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4667#ifdef __ARCH_HAS_SA_RESTORER
4668		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4669				&oact->sa_restorer);
4670#endif
4671	}
4672	return ret;
4673}
4674#endif
4675#endif /* !CONFIG_ODD_RT_SIGACTION */
4676
4677#ifdef CONFIG_OLD_SIGACTION
4678SYSCALL_DEFINE3(sigaction, int, sig,
4679		const struct old_sigaction __user *, act,
4680	        struct old_sigaction __user *, oact)
4681{
4682	struct k_sigaction new_ka, old_ka;
4683	int ret;
4684
4685	if (act) {
4686		old_sigset_t mask;
4687		if (!access_ok(act, sizeof(*act)) ||
4688		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4689		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4690		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4691		    __get_user(mask, &act->sa_mask))
4692			return -EFAULT;
4693#ifdef __ARCH_HAS_KA_RESTORER
4694		new_ka.ka_restorer = NULL;
4695#endif
4696		siginitset(&new_ka.sa.sa_mask, mask);
4697	}
4698
4699	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4700
4701	if (!ret && oact) {
4702		if (!access_ok(oact, sizeof(*oact)) ||
4703		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4704		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4705		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4706		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4707			return -EFAULT;
4708	}
4709
4710	return ret;
4711}
4712#endif
4713#ifdef CONFIG_COMPAT_OLD_SIGACTION
4714COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4715		const struct compat_old_sigaction __user *, act,
4716	        struct compat_old_sigaction __user *, oact)
4717{
4718	struct k_sigaction new_ka, old_ka;
4719	int ret;
4720	compat_old_sigset_t mask;
4721	compat_uptr_t handler, restorer;
4722
4723	if (act) {
4724		if (!access_ok(act, sizeof(*act)) ||
4725		    __get_user(handler, &act->sa_handler) ||
4726		    __get_user(restorer, &act->sa_restorer) ||
4727		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4728		    __get_user(mask, &act->sa_mask))
4729			return -EFAULT;
4730
4731#ifdef __ARCH_HAS_KA_RESTORER
4732		new_ka.ka_restorer = NULL;
4733#endif
4734		new_ka.sa.sa_handler = compat_ptr(handler);
4735		new_ka.sa.sa_restorer = compat_ptr(restorer);
4736		siginitset(&new_ka.sa.sa_mask, mask);
4737	}
4738
4739	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4740
4741	if (!ret && oact) {
4742		if (!access_ok(oact, sizeof(*oact)) ||
4743		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4744			       &oact->sa_handler) ||
4745		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4746			       &oact->sa_restorer) ||
4747		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4748		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4749			return -EFAULT;
4750	}
4751	return ret;
4752}
4753#endif
4754
4755#ifdef CONFIG_SGETMASK_SYSCALL
4756
4757/*
4758 * For backwards compatibility.  Functionality superseded by sigprocmask.
4759 */
4760SYSCALL_DEFINE0(sgetmask)
4761{
4762	/* SMP safe */
4763	return current->blocked.sig[0];
4764}
4765
4766SYSCALL_DEFINE1(ssetmask, int, newmask)
4767{
4768	int old = current->blocked.sig[0];
4769	sigset_t newset;
4770
4771	siginitset(&newset, newmask);
4772	set_current_blocked(&newset);
4773
4774	return old;
4775}
4776#endif /* CONFIG_SGETMASK_SYSCALL */
4777
4778#ifdef __ARCH_WANT_SYS_SIGNAL
4779/*
4780 * For backwards compatibility.  Functionality superseded by sigaction.
4781 */
4782SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4783{
4784	struct k_sigaction new_sa, old_sa;
4785	int ret;
4786
4787	new_sa.sa.sa_handler = handler;
4788	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4789	sigemptyset(&new_sa.sa.sa_mask);
4790
4791	ret = do_sigaction(sig, &new_sa, &old_sa);
4792
4793	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4794}
4795#endif /* __ARCH_WANT_SYS_SIGNAL */
4796
4797#ifdef __ARCH_WANT_SYS_PAUSE
4798
4799SYSCALL_DEFINE0(pause)
4800{
4801	while (!signal_pending(current)) {
4802		__set_current_state(TASK_INTERRUPTIBLE);
4803		schedule();
4804	}
4805	return -ERESTARTNOHAND;
4806}
4807
4808#endif
4809
4810static int sigsuspend(sigset_t *set)
4811{
4812	current->saved_sigmask = current->blocked;
4813	set_current_blocked(set);
4814
4815	while (!signal_pending(current)) {
4816		__set_current_state(TASK_INTERRUPTIBLE);
4817		schedule();
4818	}
4819	set_restore_sigmask();
4820	return -ERESTARTNOHAND;
4821}
4822
4823/**
4824 *  sys_rt_sigsuspend - replace the signal mask for a value with the
4825 *	@unewset value until a signal is received
4826 *  @unewset: new signal mask value
4827 *  @sigsetsize: size of sigset_t type
4828 */
4829SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4830{
4831	sigset_t newset;
4832
4833	/* XXX: Don't preclude handling different sized sigset_t's.  */
4834	if (sigsetsize != sizeof(sigset_t))
4835		return -EINVAL;
4836
4837	if (copy_from_user(&newset, unewset, sizeof(newset)))
4838		return -EFAULT;
4839	return sigsuspend(&newset);
4840}
4841 
4842#ifdef CONFIG_COMPAT
4843COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4844{
4845	sigset_t newset;
4846
4847	/* XXX: Don't preclude handling different sized sigset_t's.  */
4848	if (sigsetsize != sizeof(sigset_t))
4849		return -EINVAL;
4850
4851	if (get_compat_sigset(&newset, unewset))
4852		return -EFAULT;
4853	return sigsuspend(&newset);
4854}
4855#endif
4856
4857#ifdef CONFIG_OLD_SIGSUSPEND
4858SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4859{
4860	sigset_t blocked;
4861	siginitset(&blocked, mask);
4862	return sigsuspend(&blocked);
4863}
4864#endif
4865#ifdef CONFIG_OLD_SIGSUSPEND3
4866SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4867{
4868	sigset_t blocked;
4869	siginitset(&blocked, mask);
4870	return sigsuspend(&blocked);
4871}
4872#endif
4873
4874__weak const char *arch_vma_name(struct vm_area_struct *vma)
4875{
4876	return NULL;
4877}
4878
4879static inline void siginfo_buildtime_checks(void)
4880{
4881	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4882
4883	/* Verify the offsets in the two siginfos match */
4884#define CHECK_OFFSET(field) \
4885	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4886
4887	/* kill */
4888	CHECK_OFFSET(si_pid);
4889	CHECK_OFFSET(si_uid);
4890
4891	/* timer */
4892	CHECK_OFFSET(si_tid);
4893	CHECK_OFFSET(si_overrun);
4894	CHECK_OFFSET(si_value);
4895
4896	/* rt */
4897	CHECK_OFFSET(si_pid);
4898	CHECK_OFFSET(si_uid);
4899	CHECK_OFFSET(si_value);
4900
4901	/* sigchld */
4902	CHECK_OFFSET(si_pid);
4903	CHECK_OFFSET(si_uid);
4904	CHECK_OFFSET(si_status);
4905	CHECK_OFFSET(si_utime);
4906	CHECK_OFFSET(si_stime);
4907
4908	/* sigfault */
4909	CHECK_OFFSET(si_addr);
4910	CHECK_OFFSET(si_trapno);
4911	CHECK_OFFSET(si_addr_lsb);
4912	CHECK_OFFSET(si_lower);
4913	CHECK_OFFSET(si_upper);
4914	CHECK_OFFSET(si_pkey);
4915	CHECK_OFFSET(si_perf_data);
4916	CHECK_OFFSET(si_perf_type);
4917	CHECK_OFFSET(si_perf_flags);
4918
4919	/* sigpoll */
4920	CHECK_OFFSET(si_band);
4921	CHECK_OFFSET(si_fd);
4922
4923	/* sigsys */
4924	CHECK_OFFSET(si_call_addr);
4925	CHECK_OFFSET(si_syscall);
4926	CHECK_OFFSET(si_arch);
4927#undef CHECK_OFFSET
4928
4929	/* usb asyncio */
4930	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4931		     offsetof(struct siginfo, si_addr));
4932	if (sizeof(int) == sizeof(void __user *)) {
4933		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4934			     sizeof(void __user *));
4935	} else {
4936		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4937			      sizeof_field(struct siginfo, si_uid)) !=
4938			     sizeof(void __user *));
4939		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4940			     offsetof(struct siginfo, si_uid));
4941	}
4942#ifdef CONFIG_COMPAT
4943	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4944		     offsetof(struct compat_siginfo, si_addr));
4945	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4946		     sizeof(compat_uptr_t));
4947	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4948		     sizeof_field(struct siginfo, si_pid));
4949#endif
4950}
4951
4952#if defined(CONFIG_SYSCTL)
4953static struct ctl_table signal_debug_table[] = {
4954#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4955	{
4956		.procname	= "exception-trace",
4957		.data		= &show_unhandled_signals,
4958		.maxlen		= sizeof(int),
4959		.mode		= 0644,
4960		.proc_handler	= proc_dointvec
4961	},
4962#endif
4963};
4964
4965static int __init init_signal_sysctls(void)
4966{
4967	register_sysctl_init("debug", signal_debug_table);
4968	return 0;
4969}
4970early_initcall(init_signal_sysctls);
4971#endif /* CONFIG_SYSCTL */
4972
4973void __init signals_init(void)
4974{
4975	siginfo_buildtime_checks();
4976
4977	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4978}
4979
4980#ifdef CONFIG_KGDB_KDB
4981#include <linux/kdb.h>
4982/*
4983 * kdb_send_sig - Allows kdb to send signals without exposing
4984 * signal internals.  This function checks if the required locks are
4985 * available before calling the main signal code, to avoid kdb
4986 * deadlocks.
4987 */
4988void kdb_send_sig(struct task_struct *t, int sig)
4989{
4990	static struct task_struct *kdb_prev_t;
4991	int new_t, ret;
4992	if (!spin_trylock(&t->sighand->siglock)) {
4993		kdb_printf("Can't do kill command now.\n"
4994			   "The sigmask lock is held somewhere else in "
4995			   "kernel, try again later\n");
4996		return;
4997	}
4998	new_t = kdb_prev_t != t;
4999	kdb_prev_t = t;
5000	if (!task_is_running(t) && new_t) {
5001		spin_unlock(&t->sighand->siglock);
5002		kdb_printf("Process is not RUNNING, sending a signal from "
5003			   "kdb risks deadlock\n"
5004			   "on the run queue locks. "
5005			   "The signal has _not_ been sent.\n"
5006			   "Reissue the kill command if you want to risk "
5007			   "the deadlock.\n");
5008		return;
5009	}
5010	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
5011	spin_unlock(&t->sighand->siglock);
5012	if (ret)
5013		kdb_printf("Fail to deliver Signal %d to process %d.\n",
5014			   sig, t->pid);
5015	else
5016		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
5017}
5018#endif	/* CONFIG_KGDB_KDB */