Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 *  linux/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   7 *
   8 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
   9 *		Changes to use preallocated sigqueue structures
  10 *		to allow signals to be sent reliably.
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/sched.h>
 
 
 
 
 
  17#include <linux/fs.h>
  18#include <linux/tty.h>
  19#include <linux/binfmts.h>
  20#include <linux/coredump.h>
  21#include <linux/security.h>
  22#include <linux/syscalls.h>
  23#include <linux/ptrace.h>
  24#include <linux/signal.h>
  25#include <linux/signalfd.h>
  26#include <linux/ratelimit.h>
  27#include <linux/tracehook.h>
  28#include <linux/capability.h>
  29#include <linux/freezer.h>
  30#include <linux/pid_namespace.h>
  31#include <linux/nsproxy.h>
  32#include <linux/user_namespace.h>
  33#include <linux/uprobes.h>
  34#include <linux/compat.h>
  35#include <linux/cn_proc.h>
  36#include <linux/compiler.h>
 
 
  37
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/signal.h>
  40
  41#include <asm/param.h>
  42#include <linux/uaccess.h>
  43#include <asm/unistd.h>
  44#include <asm/siginfo.h>
  45#include <asm/cacheflush.h>
  46#include "audit.h"	/* audit_signal_info() */
  47
  48/*
  49 * SLAB caches for signal bits.
  50 */
  51
  52static struct kmem_cache *sigqueue_cachep;
  53
  54int print_fatal_signals __read_mostly;
  55
  56static void __user *sig_handler(struct task_struct *t, int sig)
  57{
  58	return t->sighand->action[sig - 1].sa.sa_handler;
  59}
  60
  61static int sig_handler_ignored(void __user *handler, int sig)
  62{
  63	/* Is it explicitly or implicitly ignored? */
  64	return handler == SIG_IGN ||
  65		(handler == SIG_DFL && sig_kernel_ignore(sig));
  66}
  67
  68static int sig_task_ignored(struct task_struct *t, int sig, bool force)
  69{
  70	void __user *handler;
  71
  72	handler = sig_handler(t, sig);
  73
  74	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  75			handler == SIG_DFL && !force)
  76		return 1;
  77
  78	return sig_handler_ignored(handler, sig);
  79}
  80
  81static int sig_ignored(struct task_struct *t, int sig, bool force)
  82{
  83	/*
  84	 * Blocked signals are never ignored, since the
  85	 * signal handler may change by the time it is
  86	 * unblocked.
  87	 */
  88	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
  89		return 0;
  90
  91	if (!sig_task_ignored(t, sig, force))
  92		return 0;
  93
  94	/*
  95	 * Tracers may want to know about even ignored signals.
 
 
  96	 */
  97	return !t->ptrace;
 
 
 
  98}
  99
 100/*
 101 * Re-calculate pending state from the set of locally pending
 102 * signals, globally pending signals, and blocked signals.
 103 */
 104static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 105{
 106	unsigned long ready;
 107	long i;
 108
 109	switch (_NSIG_WORDS) {
 110	default:
 111		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 112			ready |= signal->sig[i] &~ blocked->sig[i];
 113		break;
 114
 115	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 116		ready |= signal->sig[2] &~ blocked->sig[2];
 117		ready |= signal->sig[1] &~ blocked->sig[1];
 118		ready |= signal->sig[0] &~ blocked->sig[0];
 119		break;
 120
 121	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 122		ready |= signal->sig[0] &~ blocked->sig[0];
 123		break;
 124
 125	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 126	}
 127	return ready !=	0;
 128}
 129
 130#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 131
 132static int recalc_sigpending_tsk(struct task_struct *t)
 133{
 134	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
 135	    PENDING(&t->pending, &t->blocked) ||
 136	    PENDING(&t->signal->shared_pending, &t->blocked)) {
 137		set_tsk_thread_flag(t, TIF_SIGPENDING);
 138		return 1;
 139	}
 140	/*
 141	 * We must never clear the flag in another thread, or in current
 142	 * when it's possible the current syscall is returning -ERESTART*.
 143	 * So we don't clear it here, and only callers who know they should do.
 144	 */
 145	return 0;
 146}
 147
 148/*
 149 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 150 * This is superfluous when called on current, the wakeup is a harmless no-op.
 151 */
 152void recalc_sigpending_and_wake(struct task_struct *t)
 153{
 154	if (recalc_sigpending_tsk(t))
 155		signal_wake_up(t, 0);
 156}
 157
 158void recalc_sigpending(void)
 159{
 160	if (!recalc_sigpending_tsk(current) && !freezing(current))
 
 161		clear_thread_flag(TIF_SIGPENDING);
 162
 163}
 164
 165/* Given the mask, find the first available signal that should be serviced. */
 166
 167#define SYNCHRONOUS_MASK \
 168	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 169	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 170
 171int next_signal(struct sigpending *pending, sigset_t *mask)
 172{
 173	unsigned long i, *s, *m, x;
 174	int sig = 0;
 175
 176	s = pending->signal.sig;
 177	m = mask->sig;
 178
 179	/*
 180	 * Handle the first word specially: it contains the
 181	 * synchronous signals that need to be dequeued first.
 182	 */
 183	x = *s &~ *m;
 184	if (x) {
 185		if (x & SYNCHRONOUS_MASK)
 186			x &= SYNCHRONOUS_MASK;
 187		sig = ffz(~x) + 1;
 188		return sig;
 189	}
 190
 191	switch (_NSIG_WORDS) {
 192	default:
 193		for (i = 1; i < _NSIG_WORDS; ++i) {
 194			x = *++s &~ *++m;
 195			if (!x)
 196				continue;
 197			sig = ffz(~x) + i*_NSIG_BPW + 1;
 198			break;
 199		}
 200		break;
 201
 202	case 2:
 203		x = s[1] &~ m[1];
 204		if (!x)
 205			break;
 206		sig = ffz(~x) + _NSIG_BPW + 1;
 207		break;
 208
 209	case 1:
 210		/* Nothing to do */
 211		break;
 212	}
 213
 214	return sig;
 215}
 216
 217static inline void print_dropped_signal(int sig)
 218{
 219	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 220
 221	if (!print_fatal_signals)
 222		return;
 223
 224	if (!__ratelimit(&ratelimit_state))
 225		return;
 226
 227	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 228				current->comm, current->pid, sig);
 229}
 230
 231/**
 232 * task_set_jobctl_pending - set jobctl pending bits
 233 * @task: target task
 234 * @mask: pending bits to set
 235 *
 236 * Clear @mask from @task->jobctl.  @mask must be subset of
 237 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 238 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 239 * cleared.  If @task is already being killed or exiting, this function
 240 * becomes noop.
 241 *
 242 * CONTEXT:
 243 * Must be called with @task->sighand->siglock held.
 244 *
 245 * RETURNS:
 246 * %true if @mask is set, %false if made noop because @task was dying.
 247 */
 248bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 249{
 250	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 251			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 252	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 253
 254	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 255		return false;
 256
 257	if (mask & JOBCTL_STOP_SIGMASK)
 258		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 259
 260	task->jobctl |= mask;
 261	return true;
 262}
 263
 264/**
 265 * task_clear_jobctl_trapping - clear jobctl trapping bit
 266 * @task: target task
 267 *
 268 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 269 * Clear it and wake up the ptracer.  Note that we don't need any further
 270 * locking.  @task->siglock guarantees that @task->parent points to the
 271 * ptracer.
 272 *
 273 * CONTEXT:
 274 * Must be called with @task->sighand->siglock held.
 275 */
 276void task_clear_jobctl_trapping(struct task_struct *task)
 277{
 278	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 279		task->jobctl &= ~JOBCTL_TRAPPING;
 280		smp_mb();	/* advised by wake_up_bit() */
 281		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 282	}
 283}
 284
 285/**
 286 * task_clear_jobctl_pending - clear jobctl pending bits
 287 * @task: target task
 288 * @mask: pending bits to clear
 289 *
 290 * Clear @mask from @task->jobctl.  @mask must be subset of
 291 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 292 * STOP bits are cleared together.
 293 *
 294 * If clearing of @mask leaves no stop or trap pending, this function calls
 295 * task_clear_jobctl_trapping().
 296 *
 297 * CONTEXT:
 298 * Must be called with @task->sighand->siglock held.
 299 */
 300void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 301{
 302	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 303
 304	if (mask & JOBCTL_STOP_PENDING)
 305		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 306
 307	task->jobctl &= ~mask;
 308
 309	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 310		task_clear_jobctl_trapping(task);
 311}
 312
 313/**
 314 * task_participate_group_stop - participate in a group stop
 315 * @task: task participating in a group stop
 316 *
 317 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 318 * Group stop states are cleared and the group stop count is consumed if
 319 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 320 * stop, the appropriate %SIGNAL_* flags are set.
 321 *
 322 * CONTEXT:
 323 * Must be called with @task->sighand->siglock held.
 324 *
 325 * RETURNS:
 326 * %true if group stop completion should be notified to the parent, %false
 327 * otherwise.
 328 */
 329static bool task_participate_group_stop(struct task_struct *task)
 330{
 331	struct signal_struct *sig = task->signal;
 332	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 333
 334	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 335
 336	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 337
 338	if (!consume)
 339		return false;
 340
 341	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 342		sig->group_stop_count--;
 343
 344	/*
 345	 * Tell the caller to notify completion iff we are entering into a
 346	 * fresh group stop.  Read comment in do_signal_stop() for details.
 347	 */
 348	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 349		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 350		return true;
 351	}
 352	return false;
 353}
 354
 355/*
 356 * allocate a new signal queue record
 357 * - this may be called without locks if and only if t == current, otherwise an
 358 *   appropriate lock must be held to stop the target task from exiting
 359 */
 360static struct sigqueue *
 361__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
 362{
 363	struct sigqueue *q = NULL;
 364	struct user_struct *user;
 365
 366	/*
 367	 * Protect access to @t credentials. This can go away when all
 368	 * callers hold rcu read lock.
 369	 */
 370	rcu_read_lock();
 371	user = get_uid(__task_cred(t)->user);
 372	atomic_inc(&user->sigpending);
 373	rcu_read_unlock();
 374
 375	if (override_rlimit ||
 376	    atomic_read(&user->sigpending) <=
 377			task_rlimit(t, RLIMIT_SIGPENDING)) {
 378		q = kmem_cache_alloc(sigqueue_cachep, flags);
 379	} else {
 380		print_dropped_signal(sig);
 381	}
 382
 383	if (unlikely(q == NULL)) {
 384		atomic_dec(&user->sigpending);
 385		free_uid(user);
 386	} else {
 387		INIT_LIST_HEAD(&q->list);
 388		q->flags = 0;
 389		q->user = user;
 390	}
 391
 392	return q;
 393}
 394
 395static void __sigqueue_free(struct sigqueue *q)
 396{
 397	if (q->flags & SIGQUEUE_PREALLOC)
 398		return;
 399	atomic_dec(&q->user->sigpending);
 400	free_uid(q->user);
 401	kmem_cache_free(sigqueue_cachep, q);
 402}
 403
 404void flush_sigqueue(struct sigpending *queue)
 405{
 406	struct sigqueue *q;
 407
 408	sigemptyset(&queue->signal);
 409	while (!list_empty(&queue->list)) {
 410		q = list_entry(queue->list.next, struct sigqueue , list);
 411		list_del_init(&q->list);
 412		__sigqueue_free(q);
 413	}
 414}
 415
 416/*
 417 * Flush all pending signals for this kthread.
 418 */
 419void flush_signals(struct task_struct *t)
 420{
 421	unsigned long flags;
 422
 423	spin_lock_irqsave(&t->sighand->siglock, flags);
 424	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 425	flush_sigqueue(&t->pending);
 426	flush_sigqueue(&t->signal->shared_pending);
 427	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 428}
 429
 430#ifdef CONFIG_POSIX_TIMERS
 431static void __flush_itimer_signals(struct sigpending *pending)
 432{
 433	sigset_t signal, retain;
 434	struct sigqueue *q, *n;
 435
 436	signal = pending->signal;
 437	sigemptyset(&retain);
 438
 439	list_for_each_entry_safe(q, n, &pending->list, list) {
 440		int sig = q->info.si_signo;
 441
 442		if (likely(q->info.si_code != SI_TIMER)) {
 443			sigaddset(&retain, sig);
 444		} else {
 445			sigdelset(&signal, sig);
 446			list_del_init(&q->list);
 447			__sigqueue_free(q);
 448		}
 449	}
 450
 451	sigorsets(&pending->signal, &signal, &retain);
 452}
 453
 454void flush_itimer_signals(void)
 455{
 456	struct task_struct *tsk = current;
 457	unsigned long flags;
 458
 459	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 460	__flush_itimer_signals(&tsk->pending);
 461	__flush_itimer_signals(&tsk->signal->shared_pending);
 462	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 463}
 464#endif
 465
 466void ignore_signals(struct task_struct *t)
 467{
 468	int i;
 469
 470	for (i = 0; i < _NSIG; ++i)
 471		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 472
 473	flush_signals(t);
 474}
 475
 476/*
 477 * Flush all handlers for a task.
 478 */
 479
 480void
 481flush_signal_handlers(struct task_struct *t, int force_default)
 482{
 483	int i;
 484	struct k_sigaction *ka = &t->sighand->action[0];
 485	for (i = _NSIG ; i != 0 ; i--) {
 486		if (force_default || ka->sa.sa_handler != SIG_IGN)
 487			ka->sa.sa_handler = SIG_DFL;
 488		ka->sa.sa_flags = 0;
 489#ifdef __ARCH_HAS_SA_RESTORER
 490		ka->sa.sa_restorer = NULL;
 491#endif
 492		sigemptyset(&ka->sa.sa_mask);
 493		ka++;
 494	}
 495}
 496
 497int unhandled_signal(struct task_struct *tsk, int sig)
 498{
 499	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 500	if (is_global_init(tsk))
 501		return 1;
 502	if (handler != SIG_IGN && handler != SIG_DFL)
 503		return 0;
 504	/* if ptraced, let the tracer determine */
 505	return !tsk->ptrace;
 506}
 507
 508static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 
 509{
 510	struct sigqueue *q, *first = NULL;
 511
 512	/*
 513	 * Collect the siginfo appropriate to this signal.  Check if
 514	 * there is another siginfo for the same signal.
 515	*/
 516	list_for_each_entry(q, &list->list, list) {
 517		if (q->info.si_signo == sig) {
 518			if (first)
 519				goto still_pending;
 520			first = q;
 521		}
 522	}
 523
 524	sigdelset(&list->signal, sig);
 525
 526	if (first) {
 527still_pending:
 528		list_del_init(&first->list);
 529		copy_siginfo(info, &first->info);
 
 
 
 
 
 
 530		__sigqueue_free(first);
 531	} else {
 532		/*
 533		 * Ok, it wasn't in the queue.  This must be
 534		 * a fast-pathed signal or we must have been
 535		 * out of queue space.  So zero out the info.
 536		 */
 
 537		info->si_signo = sig;
 538		info->si_errno = 0;
 539		info->si_code = SI_USER;
 540		info->si_pid = 0;
 541		info->si_uid = 0;
 542	}
 543}
 544
 545static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 546			siginfo_t *info)
 547{
 548	int sig = next_signal(pending, mask);
 549
 550	if (sig)
 551		collect_signal(sig, pending, info);
 552	return sig;
 553}
 554
 555/*
 556 * Dequeue a signal and return the element to the caller, which is
 557 * expected to free it.
 558 *
 559 * All callers have to hold the siglock.
 560 */
 561int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 562{
 
 563	int signr;
 564
 565	/* We only dequeue private signals from ourselves, we don't let
 566	 * signalfd steal them
 567	 */
 568	signr = __dequeue_signal(&tsk->pending, mask, info);
 569	if (!signr) {
 570		signr = __dequeue_signal(&tsk->signal->shared_pending,
 571					 mask, info);
 572#ifdef CONFIG_POSIX_TIMERS
 573		/*
 574		 * itimer signal ?
 575		 *
 576		 * itimers are process shared and we restart periodic
 577		 * itimers in the signal delivery path to prevent DoS
 578		 * attacks in the high resolution timer case. This is
 579		 * compliant with the old way of self-restarting
 580		 * itimers, as the SIGALRM is a legacy signal and only
 581		 * queued once. Changing the restart behaviour to
 582		 * restart the timer in the signal dequeue path is
 583		 * reducing the timer noise on heavy loaded !highres
 584		 * systems too.
 585		 */
 586		if (unlikely(signr == SIGALRM)) {
 587			struct hrtimer *tmr = &tsk->signal->real_timer;
 588
 589			if (!hrtimer_is_queued(tmr) &&
 590			    tsk->signal->it_real_incr != 0) {
 591				hrtimer_forward(tmr, tmr->base->get_time(),
 592						tsk->signal->it_real_incr);
 593				hrtimer_restart(tmr);
 594			}
 595		}
 596#endif
 597	}
 598
 599	recalc_sigpending();
 600	if (!signr)
 601		return 0;
 602
 603	if (unlikely(sig_kernel_stop(signr))) {
 604		/*
 605		 * Set a marker that we have dequeued a stop signal.  Our
 606		 * caller might release the siglock and then the pending
 607		 * stop signal it is about to process is no longer in the
 608		 * pending bitmasks, but must still be cleared by a SIGCONT
 609		 * (and overruled by a SIGKILL).  So those cases clear this
 610		 * shared flag after we've set it.  Note that this flag may
 611		 * remain set after the signal we return is ignored or
 612		 * handled.  That doesn't matter because its only purpose
 613		 * is to alert stop-signal processing code when another
 614		 * processor has come along and cleared the flag.
 615		 */
 616		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 617	}
 618#ifdef CONFIG_POSIX_TIMERS
 619	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
 620		/*
 621		 * Release the siglock to ensure proper locking order
 622		 * of timer locks outside of siglocks.  Note, we leave
 623		 * irqs disabled here, since the posix-timers code is
 624		 * about to disable them again anyway.
 625		 */
 626		spin_unlock(&tsk->sighand->siglock);
 627		do_schedule_next_timer(info);
 628		spin_lock(&tsk->sighand->siglock);
 
 
 
 629	}
 630#endif
 631	return signr;
 632}
 633
 634/*
 635 * Tell a process that it has a new active signal..
 636 *
 637 * NOTE! we rely on the previous spin_lock to
 638 * lock interrupts for us! We can only be called with
 639 * "siglock" held, and the local interrupt must
 640 * have been disabled when that got acquired!
 641 *
 642 * No need to set need_resched since signal event passing
 643 * goes through ->blocked
 644 */
 645void signal_wake_up_state(struct task_struct *t, unsigned int state)
 646{
 647	set_tsk_thread_flag(t, TIF_SIGPENDING);
 648	/*
 649	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 650	 * case. We don't check t->state here because there is a race with it
 651	 * executing another processor and just now entering stopped state.
 652	 * By using wake_up_state, we ensure the process will wake up and
 653	 * handle its death signal.
 654	 */
 655	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 656		kick_process(t);
 657}
 658
 659/*
 660 * Remove signals in mask from the pending set and queue.
 661 * Returns 1 if any signals were found.
 662 *
 663 * All callers must be holding the siglock.
 664 */
 665static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 666{
 667	struct sigqueue *q, *n;
 668	sigset_t m;
 669
 670	sigandsets(&m, mask, &s->signal);
 671	if (sigisemptyset(&m))
 672		return 0;
 673
 674	sigandnsets(&s->signal, &s->signal, mask);
 675	list_for_each_entry_safe(q, n, &s->list, list) {
 676		if (sigismember(mask, q->info.si_signo)) {
 677			list_del_init(&q->list);
 678			__sigqueue_free(q);
 679		}
 680	}
 681	return 1;
 682}
 683
 684static inline int is_si_special(const struct siginfo *info)
 685{
 686	return info <= SEND_SIG_FORCED;
 687}
 688
 689static inline bool si_fromuser(const struct siginfo *info)
 690{
 691	return info == SEND_SIG_NOINFO ||
 692		(!is_si_special(info) && SI_FROMUSER(info));
 693}
 694
 695/*
 696 * called with RCU read lock from check_kill_permission()
 697 */
 698static int kill_ok_by_cred(struct task_struct *t)
 699{
 700	const struct cred *cred = current_cred();
 701	const struct cred *tcred = __task_cred(t);
 702
 703	if (uid_eq(cred->euid, tcred->suid) ||
 704	    uid_eq(cred->euid, tcred->uid)  ||
 705	    uid_eq(cred->uid,  tcred->suid) ||
 706	    uid_eq(cred->uid,  tcred->uid))
 707		return 1;
 708
 709	if (ns_capable(tcred->user_ns, CAP_KILL))
 710		return 1;
 711
 712	return 0;
 713}
 714
 715/*
 716 * Bad permissions for sending the signal
 717 * - the caller must hold the RCU read lock
 718 */
 719static int check_kill_permission(int sig, struct siginfo *info,
 720				 struct task_struct *t)
 721{
 722	struct pid *sid;
 723	int error;
 724
 725	if (!valid_signal(sig))
 726		return -EINVAL;
 727
 728	if (!si_fromuser(info))
 729		return 0;
 730
 731	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 732	if (error)
 733		return error;
 734
 735	if (!same_thread_group(current, t) &&
 736	    !kill_ok_by_cred(t)) {
 737		switch (sig) {
 738		case SIGCONT:
 739			sid = task_session(t);
 740			/*
 741			 * We don't return the error if sid == NULL. The
 742			 * task was unhashed, the caller must notice this.
 743			 */
 744			if (!sid || sid == task_session(current))
 745				break;
 746		default:
 747			return -EPERM;
 748		}
 749	}
 750
 751	return security_task_kill(t, info, sig, 0);
 752}
 753
 754/**
 755 * ptrace_trap_notify - schedule trap to notify ptracer
 756 * @t: tracee wanting to notify tracer
 757 *
 758 * This function schedules sticky ptrace trap which is cleared on the next
 759 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 760 * ptracer.
 761 *
 762 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 763 * ptracer is listening for events, tracee is woken up so that it can
 764 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 765 * eventually taken without returning to userland after the existing traps
 766 * are finished by PTRACE_CONT.
 767 *
 768 * CONTEXT:
 769 * Must be called with @task->sighand->siglock held.
 770 */
 771static void ptrace_trap_notify(struct task_struct *t)
 772{
 773	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 774	assert_spin_locked(&t->sighand->siglock);
 775
 776	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 777	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 778}
 779
 780/*
 781 * Handle magic process-wide effects of stop/continue signals. Unlike
 782 * the signal actions, these happen immediately at signal-generation
 783 * time regardless of blocking, ignoring, or handling.  This does the
 784 * actual continuing for SIGCONT, but not the actual stopping for stop
 785 * signals. The process stop is done as a signal action for SIG_DFL.
 786 *
 787 * Returns true if the signal should be actually delivered, otherwise
 788 * it should be dropped.
 789 */
 790static bool prepare_signal(int sig, struct task_struct *p, bool force)
 791{
 792	struct signal_struct *signal = p->signal;
 793	struct task_struct *t;
 794	sigset_t flush;
 795
 796	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
 797		if (!(signal->flags & SIGNAL_GROUP_EXIT))
 798			return sig == SIGKILL;
 799		/*
 800		 * The process is in the middle of dying, nothing to do.
 801		 */
 802	} else if (sig_kernel_stop(sig)) {
 803		/*
 804		 * This is a stop signal.  Remove SIGCONT from all queues.
 805		 */
 806		siginitset(&flush, sigmask(SIGCONT));
 807		flush_sigqueue_mask(&flush, &signal->shared_pending);
 808		for_each_thread(p, t)
 809			flush_sigqueue_mask(&flush, &t->pending);
 810	} else if (sig == SIGCONT) {
 811		unsigned int why;
 812		/*
 813		 * Remove all stop signals from all queues, wake all threads.
 814		 */
 815		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 816		flush_sigqueue_mask(&flush, &signal->shared_pending);
 817		for_each_thread(p, t) {
 818			flush_sigqueue_mask(&flush, &t->pending);
 819			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 820			if (likely(!(t->ptrace & PT_SEIZED)))
 821				wake_up_state(t, __TASK_STOPPED);
 822			else
 823				ptrace_trap_notify(t);
 824		}
 825
 826		/*
 827		 * Notify the parent with CLD_CONTINUED if we were stopped.
 828		 *
 829		 * If we were in the middle of a group stop, we pretend it
 830		 * was already finished, and then continued. Since SIGCHLD
 831		 * doesn't queue we report only CLD_STOPPED, as if the next
 832		 * CLD_CONTINUED was dropped.
 833		 */
 834		why = 0;
 835		if (signal->flags & SIGNAL_STOP_STOPPED)
 836			why |= SIGNAL_CLD_CONTINUED;
 837		else if (signal->group_stop_count)
 838			why |= SIGNAL_CLD_STOPPED;
 839
 840		if (why) {
 841			/*
 842			 * The first thread which returns from do_signal_stop()
 843			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 844			 * notify its parent. See get_signal_to_deliver().
 845			 */
 846			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 847			signal->group_stop_count = 0;
 848			signal->group_exit_code = 0;
 849		}
 850	}
 851
 852	return !sig_ignored(p, sig, force);
 853}
 854
 855/*
 856 * Test if P wants to take SIG.  After we've checked all threads with this,
 857 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 858 * blocking SIG were ruled out because they are not running and already
 859 * have pending signals.  Such threads will dequeue from the shared queue
 860 * as soon as they're available, so putting the signal on the shared queue
 861 * will be equivalent to sending it to one such thread.
 862 */
 863static inline int wants_signal(int sig, struct task_struct *p)
 864{
 865	if (sigismember(&p->blocked, sig))
 866		return 0;
 867	if (p->flags & PF_EXITING)
 868		return 0;
 869	if (sig == SIGKILL)
 870		return 1;
 871	if (task_is_stopped_or_traced(p))
 872		return 0;
 873	return task_curr(p) || !signal_pending(p);
 874}
 875
 876static void complete_signal(int sig, struct task_struct *p, int group)
 877{
 878	struct signal_struct *signal = p->signal;
 879	struct task_struct *t;
 880
 881	/*
 882	 * Now find a thread we can wake up to take the signal off the queue.
 883	 *
 884	 * If the main thread wants the signal, it gets first crack.
 885	 * Probably the least surprising to the average bear.
 886	 */
 887	if (wants_signal(sig, p))
 888		t = p;
 889	else if (!group || thread_group_empty(p))
 890		/*
 891		 * There is just one thread and it does not need to be woken.
 892		 * It will dequeue unblocked signals before it runs again.
 893		 */
 894		return;
 895	else {
 896		/*
 897		 * Otherwise try to find a suitable thread.
 898		 */
 899		t = signal->curr_target;
 900		while (!wants_signal(sig, t)) {
 901			t = next_thread(t);
 902			if (t == signal->curr_target)
 903				/*
 904				 * No thread needs to be woken.
 905				 * Any eligible threads will see
 906				 * the signal in the queue soon.
 907				 */
 908				return;
 909		}
 910		signal->curr_target = t;
 911	}
 912
 913	/*
 914	 * Found a killable thread.  If the signal will be fatal,
 915	 * then start taking the whole group down immediately.
 916	 */
 917	if (sig_fatal(p, sig) &&
 918	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
 919	    !sigismember(&t->real_blocked, sig) &&
 920	    (sig == SIGKILL || !t->ptrace)) {
 921		/*
 922		 * This signal will be fatal to the whole group.
 923		 */
 924		if (!sig_kernel_coredump(sig)) {
 925			/*
 926			 * Start a group exit and wake everybody up.
 927			 * This way we don't have other threads
 928			 * running and doing things after a slower
 929			 * thread has the fatal signal pending.
 930			 */
 931			signal->flags = SIGNAL_GROUP_EXIT;
 932			signal->group_exit_code = sig;
 933			signal->group_stop_count = 0;
 934			t = p;
 935			do {
 936				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
 937				sigaddset(&t->pending.signal, SIGKILL);
 938				signal_wake_up(t, 1);
 939			} while_each_thread(p, t);
 940			return;
 941		}
 942	}
 943
 944	/*
 945	 * The signal is already in the shared-pending queue.
 946	 * Tell the chosen thread to wake up and dequeue it.
 947	 */
 948	signal_wake_up(t, sig == SIGKILL);
 949	return;
 950}
 951
 952static inline int legacy_queue(struct sigpending *signals, int sig)
 953{
 954	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 955}
 956
 957#ifdef CONFIG_USER_NS
 958static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
 959{
 960	if (current_user_ns() == task_cred_xxx(t, user_ns))
 961		return;
 962
 963	if (SI_FROMKERNEL(info))
 964		return;
 965
 966	rcu_read_lock();
 967	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
 968					make_kuid(current_user_ns(), info->si_uid));
 969	rcu_read_unlock();
 970}
 971#else
 972static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
 973{
 974	return;
 975}
 976#endif
 977
 978static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
 979			int group, int from_ancestor_ns)
 980{
 981	struct sigpending *pending;
 982	struct sigqueue *q;
 983	int override_rlimit;
 984	int ret = 0, result;
 985
 986	assert_spin_locked(&t->sighand->siglock);
 987
 988	result = TRACE_SIGNAL_IGNORED;
 989	if (!prepare_signal(sig, t,
 990			from_ancestor_ns || (info == SEND_SIG_FORCED)))
 991		goto ret;
 992
 993	pending = group ? &t->signal->shared_pending : &t->pending;
 994	/*
 995	 * Short-circuit ignored signals and support queuing
 996	 * exactly one non-rt signal, so that we can get more
 997	 * detailed information about the cause of the signal.
 998	 */
 999	result = TRACE_SIGNAL_ALREADY_PENDING;
1000	if (legacy_queue(pending, sig))
1001		goto ret;
1002
1003	result = TRACE_SIGNAL_DELIVERED;
1004	/*
1005	 * fast-pathed signals for kernel-internal things like SIGSTOP
1006	 * or SIGKILL.
1007	 */
1008	if (info == SEND_SIG_FORCED)
1009		goto out_set;
1010
1011	/*
1012	 * Real-time signals must be queued if sent by sigqueue, or
1013	 * some other real-time mechanism.  It is implementation
1014	 * defined whether kill() does so.  We attempt to do so, on
1015	 * the principle of least surprise, but since kill is not
1016	 * allowed to fail with EAGAIN when low on memory we just
1017	 * make sure at least one signal gets delivered and don't
1018	 * pass on the info struct.
1019	 */
1020	if (sig < SIGRTMIN)
1021		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1022	else
1023		override_rlimit = 0;
1024
1025	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1026		override_rlimit);
1027	if (q) {
1028		list_add_tail(&q->list, &pending->list);
1029		switch ((unsigned long) info) {
1030		case (unsigned long) SEND_SIG_NOINFO:
 
1031			q->info.si_signo = sig;
1032			q->info.si_errno = 0;
1033			q->info.si_code = SI_USER;
1034			q->info.si_pid = task_tgid_nr_ns(current,
1035							task_active_pid_ns(t));
1036			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1037			break;
1038		case (unsigned long) SEND_SIG_PRIV:
 
1039			q->info.si_signo = sig;
1040			q->info.si_errno = 0;
1041			q->info.si_code = SI_KERNEL;
1042			q->info.si_pid = 0;
1043			q->info.si_uid = 0;
1044			break;
1045		default:
1046			copy_siginfo(&q->info, info);
1047			if (from_ancestor_ns)
1048				q->info.si_pid = 0;
1049			break;
1050		}
1051
1052		userns_fixup_signal_uid(&q->info, t);
1053
1054	} else if (!is_si_special(info)) {
1055		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1056			/*
1057			 * Queue overflow, abort.  We may abort if the
1058			 * signal was rt and sent by user using something
1059			 * other than kill().
1060			 */
1061			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1062			ret = -EAGAIN;
1063			goto ret;
1064		} else {
1065			/*
1066			 * This is a silent loss of information.  We still
1067			 * send the signal, but the *info bits are lost.
1068			 */
1069			result = TRACE_SIGNAL_LOSE_INFO;
1070		}
1071	}
1072
1073out_set:
1074	signalfd_notify(t, sig);
1075	sigaddset(&pending->signal, sig);
1076	complete_signal(sig, t, group);
1077ret:
1078	trace_signal_generate(sig, info, t, group, result);
1079	return ret;
1080}
1081
1082static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1083			int group)
1084{
1085	int from_ancestor_ns = 0;
1086
1087#ifdef CONFIG_PID_NS
1088	from_ancestor_ns = si_fromuser(info) &&
1089			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1090#endif
1091
1092	return __send_signal(sig, info, t, group, from_ancestor_ns);
1093}
1094
1095static void print_fatal_signal(int signr)
1096{
1097	struct pt_regs *regs = signal_pt_regs();
1098	pr_info("potentially unexpected fatal signal %d.\n", signr);
1099
1100#if defined(__i386__) && !defined(__arch_um__)
1101	pr_info("code at %08lx: ", regs->ip);
1102	{
1103		int i;
1104		for (i = 0; i < 16; i++) {
1105			unsigned char insn;
1106
1107			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1108				break;
1109			pr_cont("%02x ", insn);
1110		}
1111	}
1112	pr_cont("\n");
1113#endif
1114	preempt_disable();
1115	show_regs(regs);
1116	preempt_enable();
1117}
1118
1119static int __init setup_print_fatal_signals(char *str)
1120{
1121	get_option (&str, &print_fatal_signals);
1122
1123	return 1;
1124}
1125
1126__setup("print-fatal-signals=", setup_print_fatal_signals);
1127
1128int
1129__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1130{
1131	return send_signal(sig, info, p, 1);
1132}
1133
1134static int
1135specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1136{
1137	return send_signal(sig, info, t, 0);
1138}
1139
1140int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1141			bool group)
1142{
1143	unsigned long flags;
1144	int ret = -ESRCH;
1145
1146	if (lock_task_sighand(p, &flags)) {
1147		ret = send_signal(sig, info, p, group);
1148		unlock_task_sighand(p, &flags);
1149	}
1150
1151	return ret;
1152}
1153
1154/*
1155 * Force a signal that the process can't ignore: if necessary
1156 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1157 *
1158 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1159 * since we do not want to have a signal handler that was blocked
1160 * be invoked when user space had explicitly blocked it.
1161 *
1162 * We don't want to have recursive SIGSEGV's etc, for example,
1163 * that is why we also clear SIGNAL_UNKILLABLE.
1164 */
1165int
1166force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1167{
1168	unsigned long int flags;
1169	int ret, blocked, ignored;
1170	struct k_sigaction *action;
1171
1172	spin_lock_irqsave(&t->sighand->siglock, flags);
1173	action = &t->sighand->action[sig-1];
1174	ignored = action->sa.sa_handler == SIG_IGN;
1175	blocked = sigismember(&t->blocked, sig);
1176	if (blocked || ignored) {
1177		action->sa.sa_handler = SIG_DFL;
1178		if (blocked) {
1179			sigdelset(&t->blocked, sig);
1180			recalc_sigpending_and_wake(t);
1181		}
1182	}
1183	if (action->sa.sa_handler == SIG_DFL)
 
 
 
 
1184		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1185	ret = specific_send_sig_info(sig, info, t);
1186	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1187
1188	return ret;
1189}
1190
1191/*
1192 * Nuke all other threads in the group.
1193 */
1194int zap_other_threads(struct task_struct *p)
1195{
1196	struct task_struct *t = p;
1197	int count = 0;
1198
1199	p->signal->group_stop_count = 0;
1200
1201	while_each_thread(p, t) {
1202		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1203		count++;
1204
1205		/* Don't bother with already dead threads */
1206		if (t->exit_state)
1207			continue;
1208		sigaddset(&t->pending.signal, SIGKILL);
1209		signal_wake_up(t, 1);
1210	}
1211
1212	return count;
1213}
1214
1215struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1216					   unsigned long *flags)
1217{
1218	struct sighand_struct *sighand;
1219
1220	for (;;) {
1221		/*
1222		 * Disable interrupts early to avoid deadlocks.
1223		 * See rcu_read_unlock() comment header for details.
1224		 */
1225		local_irq_save(*flags);
1226		rcu_read_lock();
1227		sighand = rcu_dereference(tsk->sighand);
1228		if (unlikely(sighand == NULL)) {
1229			rcu_read_unlock();
1230			local_irq_restore(*flags);
1231			break;
1232		}
1233		/*
1234		 * This sighand can be already freed and even reused, but
1235		 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1236		 * initializes ->siglock: this slab can't go away, it has
1237		 * the same object type, ->siglock can't be reinitialized.
1238		 *
1239		 * We need to ensure that tsk->sighand is still the same
1240		 * after we take the lock, we can race with de_thread() or
1241		 * __exit_signal(). In the latter case the next iteration
1242		 * must see ->sighand == NULL.
1243		 */
1244		spin_lock(&sighand->siglock);
1245		if (likely(sighand == tsk->sighand)) {
1246			rcu_read_unlock();
1247			break;
1248		}
1249		spin_unlock(&sighand->siglock);
1250		rcu_read_unlock();
1251		local_irq_restore(*flags);
1252	}
1253
1254	return sighand;
1255}
1256
1257/*
1258 * send signal info to all the members of a group
1259 */
1260int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1261{
1262	int ret;
1263
1264	rcu_read_lock();
1265	ret = check_kill_permission(sig, info, p);
1266	rcu_read_unlock();
1267
1268	if (!ret && sig)
1269		ret = do_send_sig_info(sig, info, p, true);
1270
1271	return ret;
1272}
1273
1274/*
1275 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1276 * control characters do (^C, ^Z etc)
1277 * - the caller must hold at least a readlock on tasklist_lock
1278 */
1279int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1280{
1281	struct task_struct *p = NULL;
1282	int retval, success;
1283
1284	success = 0;
1285	retval = -ESRCH;
1286	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1287		int err = group_send_sig_info(sig, info, p);
1288		success |= !err;
1289		retval = err;
1290	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1291	return success ? 0 : retval;
1292}
1293
1294int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1295{
1296	int error = -ESRCH;
1297	struct task_struct *p;
1298
1299	for (;;) {
1300		rcu_read_lock();
1301		p = pid_task(pid, PIDTYPE_PID);
1302		if (p)
1303			error = group_send_sig_info(sig, info, p);
1304		rcu_read_unlock();
1305		if (likely(!p || error != -ESRCH))
1306			return error;
1307
1308		/*
1309		 * The task was unhashed in between, try again.  If it
1310		 * is dead, pid_task() will return NULL, if we race with
1311		 * de_thread() it will find the new leader.
1312		 */
1313	}
1314}
1315
1316int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1317{
1318	int error;
1319	rcu_read_lock();
1320	error = kill_pid_info(sig, info, find_vpid(pid));
1321	rcu_read_unlock();
1322	return error;
1323}
1324
1325static int kill_as_cred_perm(const struct cred *cred,
1326			     struct task_struct *target)
1327{
1328	const struct cred *pcred = __task_cred(target);
1329	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1330	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1331		return 0;
1332	return 1;
1333}
1334
1335/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1336int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1337			 const struct cred *cred, u32 secid)
1338{
1339	int ret = -EINVAL;
1340	struct task_struct *p;
1341	unsigned long flags;
1342
1343	if (!valid_signal(sig))
1344		return ret;
1345
1346	rcu_read_lock();
1347	p = pid_task(pid, PIDTYPE_PID);
1348	if (!p) {
1349		ret = -ESRCH;
1350		goto out_unlock;
1351	}
1352	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1353		ret = -EPERM;
1354		goto out_unlock;
1355	}
1356	ret = security_task_kill(p, info, sig, secid);
1357	if (ret)
1358		goto out_unlock;
1359
1360	if (sig) {
1361		if (lock_task_sighand(p, &flags)) {
1362			ret = __send_signal(sig, info, p, 1, 0);
1363			unlock_task_sighand(p, &flags);
1364		} else
1365			ret = -ESRCH;
1366	}
1367out_unlock:
1368	rcu_read_unlock();
1369	return ret;
1370}
1371EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1372
1373/*
1374 * kill_something_info() interprets pid in interesting ways just like kill(2).
1375 *
1376 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1377 * is probably wrong.  Should make it like BSD or SYSV.
1378 */
1379
1380static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1381{
1382	int ret;
1383
1384	if (pid > 0) {
1385		rcu_read_lock();
1386		ret = kill_pid_info(sig, info, find_vpid(pid));
1387		rcu_read_unlock();
1388		return ret;
1389	}
1390
 
 
 
 
1391	read_lock(&tasklist_lock);
1392	if (pid != -1) {
1393		ret = __kill_pgrp_info(sig, info,
1394				pid ? find_vpid(-pid) : task_pgrp(current));
1395	} else {
1396		int retval = 0, count = 0;
1397		struct task_struct * p;
1398
1399		for_each_process(p) {
1400			if (task_pid_vnr(p) > 1 &&
1401					!same_thread_group(p, current)) {
1402				int err = group_send_sig_info(sig, info, p);
1403				++count;
1404				if (err != -EPERM)
1405					retval = err;
1406			}
1407		}
1408		ret = count ? retval : -ESRCH;
1409	}
1410	read_unlock(&tasklist_lock);
1411
1412	return ret;
1413}
1414
1415/*
1416 * These are for backward compatibility with the rest of the kernel source.
1417 */
1418
1419int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1420{
1421	/*
1422	 * Make sure legacy kernel users don't send in bad values
1423	 * (normal paths check this in check_kill_permission).
1424	 */
1425	if (!valid_signal(sig))
1426		return -EINVAL;
1427
1428	return do_send_sig_info(sig, info, p, false);
1429}
1430
1431#define __si_special(priv) \
1432	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1433
1434int
1435send_sig(int sig, struct task_struct *p, int priv)
1436{
1437	return send_sig_info(sig, __si_special(priv), p);
1438}
1439
1440void
1441force_sig(int sig, struct task_struct *p)
1442{
1443	force_sig_info(sig, SEND_SIG_PRIV, p);
1444}
1445
1446/*
1447 * When things go south during signal handling, we
1448 * will force a SIGSEGV. And if the signal that caused
1449 * the problem was already a SIGSEGV, we'll want to
1450 * make sure we don't even try to deliver the signal..
1451 */
1452int
1453force_sigsegv(int sig, struct task_struct *p)
1454{
1455	if (sig == SIGSEGV) {
1456		unsigned long flags;
1457		spin_lock_irqsave(&p->sighand->siglock, flags);
1458		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1459		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1460	}
1461	force_sig(SIGSEGV, p);
1462	return 0;
1463}
1464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1465int kill_pgrp(struct pid *pid, int sig, int priv)
1466{
1467	int ret;
1468
1469	read_lock(&tasklist_lock);
1470	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1471	read_unlock(&tasklist_lock);
1472
1473	return ret;
1474}
1475EXPORT_SYMBOL(kill_pgrp);
1476
1477int kill_pid(struct pid *pid, int sig, int priv)
1478{
1479	return kill_pid_info(sig, __si_special(priv), pid);
1480}
1481EXPORT_SYMBOL(kill_pid);
1482
1483/*
1484 * These functions support sending signals using preallocated sigqueue
1485 * structures.  This is needed "because realtime applications cannot
1486 * afford to lose notifications of asynchronous events, like timer
1487 * expirations or I/O completions".  In the case of POSIX Timers
1488 * we allocate the sigqueue structure from the timer_create.  If this
1489 * allocation fails we are able to report the failure to the application
1490 * with an EAGAIN error.
1491 */
1492struct sigqueue *sigqueue_alloc(void)
1493{
1494	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1495
1496	if (q)
1497		q->flags |= SIGQUEUE_PREALLOC;
1498
1499	return q;
1500}
1501
1502void sigqueue_free(struct sigqueue *q)
1503{
1504	unsigned long flags;
1505	spinlock_t *lock = &current->sighand->siglock;
1506
1507	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1508	/*
1509	 * We must hold ->siglock while testing q->list
1510	 * to serialize with collect_signal() or with
1511	 * __exit_signal()->flush_sigqueue().
1512	 */
1513	spin_lock_irqsave(lock, flags);
1514	q->flags &= ~SIGQUEUE_PREALLOC;
1515	/*
1516	 * If it is queued it will be freed when dequeued,
1517	 * like the "regular" sigqueue.
1518	 */
1519	if (!list_empty(&q->list))
1520		q = NULL;
1521	spin_unlock_irqrestore(lock, flags);
1522
1523	if (q)
1524		__sigqueue_free(q);
1525}
1526
1527int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1528{
1529	int sig = q->info.si_signo;
1530	struct sigpending *pending;
1531	unsigned long flags;
1532	int ret, result;
1533
1534	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1535
1536	ret = -1;
1537	if (!likely(lock_task_sighand(t, &flags)))
1538		goto ret;
1539
1540	ret = 1; /* the signal is ignored */
1541	result = TRACE_SIGNAL_IGNORED;
1542	if (!prepare_signal(sig, t, false))
1543		goto out;
1544
1545	ret = 0;
1546	if (unlikely(!list_empty(&q->list))) {
1547		/*
1548		 * If an SI_TIMER entry is already queue just increment
1549		 * the overrun count.
1550		 */
1551		BUG_ON(q->info.si_code != SI_TIMER);
1552		q->info.si_overrun++;
1553		result = TRACE_SIGNAL_ALREADY_PENDING;
1554		goto out;
1555	}
1556	q->info.si_overrun = 0;
1557
1558	signalfd_notify(t, sig);
1559	pending = group ? &t->signal->shared_pending : &t->pending;
1560	list_add_tail(&q->list, &pending->list);
1561	sigaddset(&pending->signal, sig);
1562	complete_signal(sig, t, group);
1563	result = TRACE_SIGNAL_DELIVERED;
1564out:
1565	trace_signal_generate(sig, &q->info, t, group, result);
1566	unlock_task_sighand(t, &flags);
1567ret:
1568	return ret;
1569}
1570
1571/*
1572 * Let a parent know about the death of a child.
1573 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1574 *
1575 * Returns true if our parent ignored us and so we've switched to
1576 * self-reaping.
1577 */
1578bool do_notify_parent(struct task_struct *tsk, int sig)
1579{
1580	struct siginfo info;
1581	unsigned long flags;
1582	struct sighand_struct *psig;
1583	bool autoreap = false;
1584	cputime_t utime, stime;
1585
1586	BUG_ON(sig == -1);
1587
1588 	/* do_notify_parent_cldstop should have been called instead.  */
1589 	BUG_ON(task_is_stopped_or_traced(tsk));
1590
1591	BUG_ON(!tsk->ptrace &&
1592	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1593
1594	if (sig != SIGCHLD) {
1595		/*
1596		 * This is only possible if parent == real_parent.
1597		 * Check if it has changed security domain.
1598		 */
1599		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1600			sig = SIGCHLD;
1601	}
1602
 
1603	info.si_signo = sig;
1604	info.si_errno = 0;
1605	/*
1606	 * We are under tasklist_lock here so our parent is tied to
1607	 * us and cannot change.
1608	 *
1609	 * task_active_pid_ns will always return the same pid namespace
1610	 * until a task passes through release_task.
1611	 *
1612	 * write_lock() currently calls preempt_disable() which is the
1613	 * same as rcu_read_lock(), but according to Oleg, this is not
1614	 * correct to rely on this
1615	 */
1616	rcu_read_lock();
1617	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1618	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1619				       task_uid(tsk));
1620	rcu_read_unlock();
1621
1622	task_cputime(tsk, &utime, &stime);
1623	info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1624	info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1625
1626	info.si_status = tsk->exit_code & 0x7f;
1627	if (tsk->exit_code & 0x80)
1628		info.si_code = CLD_DUMPED;
1629	else if (tsk->exit_code & 0x7f)
1630		info.si_code = CLD_KILLED;
1631	else {
1632		info.si_code = CLD_EXITED;
1633		info.si_status = tsk->exit_code >> 8;
1634	}
1635
1636	psig = tsk->parent->sighand;
1637	spin_lock_irqsave(&psig->siglock, flags);
1638	if (!tsk->ptrace && sig == SIGCHLD &&
1639	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1640	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1641		/*
1642		 * We are exiting and our parent doesn't care.  POSIX.1
1643		 * defines special semantics for setting SIGCHLD to SIG_IGN
1644		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1645		 * automatically and not left for our parent's wait4 call.
1646		 * Rather than having the parent do it as a magic kind of
1647		 * signal handler, we just set this to tell do_exit that we
1648		 * can be cleaned up without becoming a zombie.  Note that
1649		 * we still call __wake_up_parent in this case, because a
1650		 * blocked sys_wait4 might now return -ECHILD.
1651		 *
1652		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1653		 * is implementation-defined: we do (if you don't want
1654		 * it, just use SIG_IGN instead).
1655		 */
1656		autoreap = true;
1657		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1658			sig = 0;
1659	}
1660	if (valid_signal(sig) && sig)
1661		__group_send_sig_info(sig, &info, tsk->parent);
1662	__wake_up_parent(tsk, tsk->parent);
1663	spin_unlock_irqrestore(&psig->siglock, flags);
1664
1665	return autoreap;
1666}
1667
1668/**
1669 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1670 * @tsk: task reporting the state change
1671 * @for_ptracer: the notification is for ptracer
1672 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1673 *
1674 * Notify @tsk's parent that the stopped/continued state has changed.  If
1675 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1676 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1677 *
1678 * CONTEXT:
1679 * Must be called with tasklist_lock at least read locked.
1680 */
1681static void do_notify_parent_cldstop(struct task_struct *tsk,
1682				     bool for_ptracer, int why)
1683{
1684	struct siginfo info;
1685	unsigned long flags;
1686	struct task_struct *parent;
1687	struct sighand_struct *sighand;
1688	cputime_t utime, stime;
1689
1690	if (for_ptracer) {
1691		parent = tsk->parent;
1692	} else {
1693		tsk = tsk->group_leader;
1694		parent = tsk->real_parent;
1695	}
1696
 
1697	info.si_signo = SIGCHLD;
1698	info.si_errno = 0;
1699	/*
1700	 * see comment in do_notify_parent() about the following 4 lines
1701	 */
1702	rcu_read_lock();
1703	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1704	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1705	rcu_read_unlock();
1706
1707	task_cputime(tsk, &utime, &stime);
1708	info.si_utime = cputime_to_clock_t(utime);
1709	info.si_stime = cputime_to_clock_t(stime);
1710
1711 	info.si_code = why;
1712 	switch (why) {
1713 	case CLD_CONTINUED:
1714 		info.si_status = SIGCONT;
1715 		break;
1716 	case CLD_STOPPED:
1717 		info.si_status = tsk->signal->group_exit_code & 0x7f;
1718 		break;
1719 	case CLD_TRAPPED:
1720 		info.si_status = tsk->exit_code & 0x7f;
1721 		break;
1722 	default:
1723 		BUG();
1724 	}
1725
1726	sighand = parent->sighand;
1727	spin_lock_irqsave(&sighand->siglock, flags);
1728	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1729	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1730		__group_send_sig_info(SIGCHLD, &info, parent);
1731	/*
1732	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1733	 */
1734	__wake_up_parent(tsk, parent);
1735	spin_unlock_irqrestore(&sighand->siglock, flags);
1736}
1737
1738static inline int may_ptrace_stop(void)
1739{
1740	if (!likely(current->ptrace))
1741		return 0;
1742	/*
1743	 * Are we in the middle of do_coredump?
1744	 * If so and our tracer is also part of the coredump stopping
1745	 * is a deadlock situation, and pointless because our tracer
1746	 * is dead so don't allow us to stop.
1747	 * If SIGKILL was already sent before the caller unlocked
1748	 * ->siglock we must see ->core_state != NULL. Otherwise it
1749	 * is safe to enter schedule().
1750	 *
1751	 * This is almost outdated, a task with the pending SIGKILL can't
1752	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1753	 * after SIGKILL was already dequeued.
1754	 */
1755	if (unlikely(current->mm->core_state) &&
1756	    unlikely(current->mm == current->parent->mm))
1757		return 0;
1758
1759	return 1;
1760}
1761
1762/*
1763 * Return non-zero if there is a SIGKILL that should be waking us up.
1764 * Called with the siglock held.
1765 */
1766static int sigkill_pending(struct task_struct *tsk)
1767{
1768	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1769		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1770}
1771
1772/*
1773 * This must be called with current->sighand->siglock held.
1774 *
1775 * This should be the path for all ptrace stops.
1776 * We always set current->last_siginfo while stopped here.
1777 * That makes it a way to test a stopped process for
1778 * being ptrace-stopped vs being job-control-stopped.
1779 *
1780 * If we actually decide not to stop at all because the tracer
1781 * is gone, we keep current->exit_code unless clear_code.
1782 */
1783static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1784	__releases(&current->sighand->siglock)
1785	__acquires(&current->sighand->siglock)
1786{
1787	bool gstop_done = false;
1788
1789	if (arch_ptrace_stop_needed(exit_code, info)) {
1790		/*
1791		 * The arch code has something special to do before a
1792		 * ptrace stop.  This is allowed to block, e.g. for faults
1793		 * on user stack pages.  We can't keep the siglock while
1794		 * calling arch_ptrace_stop, so we must release it now.
1795		 * To preserve proper semantics, we must do this before
1796		 * any signal bookkeeping like checking group_stop_count.
1797		 * Meanwhile, a SIGKILL could come in before we retake the
1798		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1799		 * So after regaining the lock, we must check for SIGKILL.
1800		 */
1801		spin_unlock_irq(&current->sighand->siglock);
1802		arch_ptrace_stop(exit_code, info);
1803		spin_lock_irq(&current->sighand->siglock);
1804		if (sigkill_pending(current))
1805			return;
1806	}
1807
 
 
1808	/*
1809	 * We're committing to trapping.  TRACED should be visible before
1810	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1811	 * Also, transition to TRACED and updates to ->jobctl should be
1812	 * atomic with respect to siglock and should be done after the arch
1813	 * hook as siglock is released and regrabbed across it.
 
 
 
 
 
 
 
 
 
 
 
1814	 */
1815	set_current_state(TASK_TRACED);
1816
1817	current->last_siginfo = info;
1818	current->exit_code = exit_code;
1819
1820	/*
1821	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1822	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1823	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1824	 * could be clear now.  We act as if SIGCONT is received after
1825	 * TASK_TRACED is entered - ignore it.
1826	 */
1827	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1828		gstop_done = task_participate_group_stop(current);
1829
1830	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1831	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1832	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1833		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1834
1835	/* entering a trap, clear TRAPPING */
1836	task_clear_jobctl_trapping(current);
1837
1838	spin_unlock_irq(&current->sighand->siglock);
1839	read_lock(&tasklist_lock);
1840	if (may_ptrace_stop()) {
1841		/*
1842		 * Notify parents of the stop.
1843		 *
1844		 * While ptraced, there are two parents - the ptracer and
1845		 * the real_parent of the group_leader.  The ptracer should
1846		 * know about every stop while the real parent is only
1847		 * interested in the completion of group stop.  The states
1848		 * for the two don't interact with each other.  Notify
1849		 * separately unless they're gonna be duplicates.
1850		 */
1851		do_notify_parent_cldstop(current, true, why);
1852		if (gstop_done && ptrace_reparented(current))
1853			do_notify_parent_cldstop(current, false, why);
1854
1855		/*
1856		 * Don't want to allow preemption here, because
1857		 * sys_ptrace() needs this task to be inactive.
1858		 *
1859		 * XXX: implement read_unlock_no_resched().
1860		 */
1861		preempt_disable();
1862		read_unlock(&tasklist_lock);
1863		preempt_enable_no_resched();
1864		freezable_schedule();
1865	} else {
1866		/*
1867		 * By the time we got the lock, our tracer went away.
1868		 * Don't drop the lock yet, another tracer may come.
1869		 *
1870		 * If @gstop_done, the ptracer went away between group stop
1871		 * completion and here.  During detach, it would have set
1872		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1873		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1874		 * the real parent of the group stop completion is enough.
1875		 */
1876		if (gstop_done)
1877			do_notify_parent_cldstop(current, false, why);
1878
1879		/* tasklist protects us from ptrace_freeze_traced() */
1880		__set_current_state(TASK_RUNNING);
1881		if (clear_code)
1882			current->exit_code = 0;
1883		read_unlock(&tasklist_lock);
1884	}
1885
1886	/*
1887	 * We are back.  Now reacquire the siglock before touching
1888	 * last_siginfo, so that we are sure to have synchronized with
1889	 * any signal-sending on another CPU that wants to examine it.
1890	 */
1891	spin_lock_irq(&current->sighand->siglock);
1892	current->last_siginfo = NULL;
1893
1894	/* LISTENING can be set only during STOP traps, clear it */
1895	current->jobctl &= ~JOBCTL_LISTENING;
1896
1897	/*
1898	 * Queued signals ignored us while we were stopped for tracing.
1899	 * So check for any that we should take before resuming user mode.
1900	 * This sets TIF_SIGPENDING, but never clears it.
1901	 */
1902	recalc_sigpending_tsk(current);
1903}
1904
1905static void ptrace_do_notify(int signr, int exit_code, int why)
1906{
1907	siginfo_t info;
1908
1909	memset(&info, 0, sizeof info);
1910	info.si_signo = signr;
1911	info.si_code = exit_code;
1912	info.si_pid = task_pid_vnr(current);
1913	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1914
1915	/* Let the debugger run.  */
1916	ptrace_stop(exit_code, why, 1, &info);
1917}
1918
1919void ptrace_notify(int exit_code)
1920{
1921	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1922	if (unlikely(current->task_works))
1923		task_work_run();
1924
1925	spin_lock_irq(&current->sighand->siglock);
1926	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1927	spin_unlock_irq(&current->sighand->siglock);
1928}
1929
1930/**
1931 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1932 * @signr: signr causing group stop if initiating
1933 *
1934 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1935 * and participate in it.  If already set, participate in the existing
1936 * group stop.  If participated in a group stop (and thus slept), %true is
1937 * returned with siglock released.
1938 *
1939 * If ptraced, this function doesn't handle stop itself.  Instead,
1940 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1941 * untouched.  The caller must ensure that INTERRUPT trap handling takes
1942 * places afterwards.
1943 *
1944 * CONTEXT:
1945 * Must be called with @current->sighand->siglock held, which is released
1946 * on %true return.
1947 *
1948 * RETURNS:
1949 * %false if group stop is already cancelled or ptrace trap is scheduled.
1950 * %true if participated in group stop.
1951 */
1952static bool do_signal_stop(int signr)
1953	__releases(&current->sighand->siglock)
1954{
1955	struct signal_struct *sig = current->signal;
1956
1957	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1958		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1959		struct task_struct *t;
1960
1961		/* signr will be recorded in task->jobctl for retries */
1962		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1963
1964		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1965		    unlikely(signal_group_exit(sig)))
1966			return false;
1967		/*
1968		 * There is no group stop already in progress.  We must
1969		 * initiate one now.
1970		 *
1971		 * While ptraced, a task may be resumed while group stop is
1972		 * still in effect and then receive a stop signal and
1973		 * initiate another group stop.  This deviates from the
1974		 * usual behavior as two consecutive stop signals can't
1975		 * cause two group stops when !ptraced.  That is why we
1976		 * also check !task_is_stopped(t) below.
1977		 *
1978		 * The condition can be distinguished by testing whether
1979		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
1980		 * group_exit_code in such case.
1981		 *
1982		 * This is not necessary for SIGNAL_STOP_CONTINUED because
1983		 * an intervening stop signal is required to cause two
1984		 * continued events regardless of ptrace.
1985		 */
1986		if (!(sig->flags & SIGNAL_STOP_STOPPED))
1987			sig->group_exit_code = signr;
1988
1989		sig->group_stop_count = 0;
1990
1991		if (task_set_jobctl_pending(current, signr | gstop))
1992			sig->group_stop_count++;
1993
1994		t = current;
1995		while_each_thread(current, t) {
1996			/*
1997			 * Setting state to TASK_STOPPED for a group
1998			 * stop is always done with the siglock held,
1999			 * so this check has no races.
2000			 */
2001			if (!task_is_stopped(t) &&
2002			    task_set_jobctl_pending(t, signr | gstop)) {
2003				sig->group_stop_count++;
2004				if (likely(!(t->ptrace & PT_SEIZED)))
2005					signal_wake_up(t, 0);
2006				else
2007					ptrace_trap_notify(t);
2008			}
2009		}
2010	}
2011
2012	if (likely(!current->ptrace)) {
2013		int notify = 0;
2014
2015		/*
2016		 * If there are no other threads in the group, or if there
2017		 * is a group stop in progress and we are the last to stop,
2018		 * report to the parent.
2019		 */
2020		if (task_participate_group_stop(current))
2021			notify = CLD_STOPPED;
2022
2023		__set_current_state(TASK_STOPPED);
2024		spin_unlock_irq(&current->sighand->siglock);
2025
2026		/*
2027		 * Notify the parent of the group stop completion.  Because
2028		 * we're not holding either the siglock or tasklist_lock
2029		 * here, ptracer may attach inbetween; however, this is for
2030		 * group stop and should always be delivered to the real
2031		 * parent of the group leader.  The new ptracer will get
2032		 * its notification when this task transitions into
2033		 * TASK_TRACED.
2034		 */
2035		if (notify) {
2036			read_lock(&tasklist_lock);
2037			do_notify_parent_cldstop(current, false, notify);
2038			read_unlock(&tasklist_lock);
2039		}
2040
2041		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2042		freezable_schedule();
2043		return true;
2044	} else {
2045		/*
2046		 * While ptraced, group stop is handled by STOP trap.
2047		 * Schedule it and let the caller deal with it.
2048		 */
2049		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2050		return false;
2051	}
2052}
2053
2054/**
2055 * do_jobctl_trap - take care of ptrace jobctl traps
2056 *
2057 * When PT_SEIZED, it's used for both group stop and explicit
2058 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2059 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2060 * the stop signal; otherwise, %SIGTRAP.
2061 *
2062 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2063 * number as exit_code and no siginfo.
2064 *
2065 * CONTEXT:
2066 * Must be called with @current->sighand->siglock held, which may be
2067 * released and re-acquired before returning with intervening sleep.
2068 */
2069static void do_jobctl_trap(void)
2070{
2071	struct signal_struct *signal = current->signal;
2072	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2073
2074	if (current->ptrace & PT_SEIZED) {
2075		if (!signal->group_stop_count &&
2076		    !(signal->flags & SIGNAL_STOP_STOPPED))
2077			signr = SIGTRAP;
2078		WARN_ON_ONCE(!signr);
2079		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2080				 CLD_STOPPED);
2081	} else {
2082		WARN_ON_ONCE(!signr);
2083		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2084		current->exit_code = 0;
2085	}
2086}
2087
2088static int ptrace_signal(int signr, siginfo_t *info)
2089{
2090	ptrace_signal_deliver();
2091	/*
2092	 * We do not check sig_kernel_stop(signr) but set this marker
2093	 * unconditionally because we do not know whether debugger will
2094	 * change signr. This flag has no meaning unless we are going
2095	 * to stop after return from ptrace_stop(). In this case it will
2096	 * be checked in do_signal_stop(), we should only stop if it was
2097	 * not cleared by SIGCONT while we were sleeping. See also the
2098	 * comment in dequeue_signal().
2099	 */
2100	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2101	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2102
2103	/* We're back.  Did the debugger cancel the sig?  */
2104	signr = current->exit_code;
2105	if (signr == 0)
2106		return signr;
2107
2108	current->exit_code = 0;
2109
2110	/*
2111	 * Update the siginfo structure if the signal has
2112	 * changed.  If the debugger wanted something
2113	 * specific in the siginfo structure then it should
2114	 * have updated *info via PTRACE_SETSIGINFO.
2115	 */
2116	if (signr != info->si_signo) {
 
2117		info->si_signo = signr;
2118		info->si_errno = 0;
2119		info->si_code = SI_USER;
2120		rcu_read_lock();
2121		info->si_pid = task_pid_vnr(current->parent);
2122		info->si_uid = from_kuid_munged(current_user_ns(),
2123						task_uid(current->parent));
2124		rcu_read_unlock();
2125	}
2126
2127	/* If the (new) signal is now blocked, requeue it.  */
2128	if (sigismember(&current->blocked, signr)) {
2129		specific_send_sig_info(signr, info, current);
2130		signr = 0;
2131	}
2132
2133	return signr;
2134}
2135
2136int get_signal(struct ksignal *ksig)
2137{
2138	struct sighand_struct *sighand = current->sighand;
2139	struct signal_struct *signal = current->signal;
2140	int signr;
2141
2142	if (unlikely(current->task_works))
2143		task_work_run();
2144
2145	if (unlikely(uprobe_deny_signal()))
2146		return 0;
2147
2148	/*
2149	 * Do this once, we can't return to user-mode if freezing() == T.
2150	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2151	 * thus do not need another check after return.
2152	 */
2153	try_to_freeze();
2154
2155relock:
2156	spin_lock_irq(&sighand->siglock);
2157	/*
2158	 * Every stopped thread goes here after wakeup. Check to see if
2159	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2160	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2161	 */
2162	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2163		int why;
2164
2165		if (signal->flags & SIGNAL_CLD_CONTINUED)
2166			why = CLD_CONTINUED;
2167		else
2168			why = CLD_STOPPED;
2169
2170		signal->flags &= ~SIGNAL_CLD_MASK;
2171
2172		spin_unlock_irq(&sighand->siglock);
2173
2174		/*
2175		 * Notify the parent that we're continuing.  This event is
2176		 * always per-process and doesn't make whole lot of sense
2177		 * for ptracers, who shouldn't consume the state via
2178		 * wait(2) either, but, for backward compatibility, notify
2179		 * the ptracer of the group leader too unless it's gonna be
2180		 * a duplicate.
2181		 */
2182		read_lock(&tasklist_lock);
2183		do_notify_parent_cldstop(current, false, why);
2184
2185		if (ptrace_reparented(current->group_leader))
2186			do_notify_parent_cldstop(current->group_leader,
2187						true, why);
2188		read_unlock(&tasklist_lock);
2189
2190		goto relock;
2191	}
2192
2193	for (;;) {
2194		struct k_sigaction *ka;
2195
2196		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2197		    do_signal_stop(0))
2198			goto relock;
2199
2200		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2201			do_jobctl_trap();
2202			spin_unlock_irq(&sighand->siglock);
2203			goto relock;
2204		}
2205
2206		signr = dequeue_signal(current, &current->blocked, &ksig->info);
2207
2208		if (!signr)
2209			break; /* will return 0 */
2210
2211		if (unlikely(current->ptrace) && signr != SIGKILL) {
2212			signr = ptrace_signal(signr, &ksig->info);
2213			if (!signr)
2214				continue;
2215		}
2216
2217		ka = &sighand->action[signr-1];
2218
2219		/* Trace actually delivered signals. */
2220		trace_signal_deliver(signr, &ksig->info, ka);
2221
2222		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2223			continue;
2224		if (ka->sa.sa_handler != SIG_DFL) {
2225			/* Run the handler.  */
2226			ksig->ka = *ka;
2227
2228			if (ka->sa.sa_flags & SA_ONESHOT)
2229				ka->sa.sa_handler = SIG_DFL;
2230
2231			break; /* will return non-zero "signr" value */
2232		}
2233
2234		/*
2235		 * Now we are doing the default action for this signal.
2236		 */
2237		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2238			continue;
2239
2240		/*
2241		 * Global init gets no signals it doesn't want.
2242		 * Container-init gets no signals it doesn't want from same
2243		 * container.
2244		 *
2245		 * Note that if global/container-init sees a sig_kernel_only()
2246		 * signal here, the signal must have been generated internally
2247		 * or must have come from an ancestor namespace. In either
2248		 * case, the signal cannot be dropped.
2249		 */
2250		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2251				!sig_kernel_only(signr))
2252			continue;
2253
2254		if (sig_kernel_stop(signr)) {
2255			/*
2256			 * The default action is to stop all threads in
2257			 * the thread group.  The job control signals
2258			 * do nothing in an orphaned pgrp, but SIGSTOP
2259			 * always works.  Note that siglock needs to be
2260			 * dropped during the call to is_orphaned_pgrp()
2261			 * because of lock ordering with tasklist_lock.
2262			 * This allows an intervening SIGCONT to be posted.
2263			 * We need to check for that and bail out if necessary.
2264			 */
2265			if (signr != SIGSTOP) {
2266				spin_unlock_irq(&sighand->siglock);
2267
2268				/* signals can be posted during this window */
2269
2270				if (is_current_pgrp_orphaned())
2271					goto relock;
2272
2273				spin_lock_irq(&sighand->siglock);
2274			}
2275
2276			if (likely(do_signal_stop(ksig->info.si_signo))) {
2277				/* It released the siglock.  */
2278				goto relock;
2279			}
2280
2281			/*
2282			 * We didn't actually stop, due to a race
2283			 * with SIGCONT or something like that.
2284			 */
2285			continue;
2286		}
2287
2288		spin_unlock_irq(&sighand->siglock);
2289
2290		/*
2291		 * Anything else is fatal, maybe with a core dump.
2292		 */
2293		current->flags |= PF_SIGNALED;
2294
2295		if (sig_kernel_coredump(signr)) {
2296			if (print_fatal_signals)
2297				print_fatal_signal(ksig->info.si_signo);
2298			proc_coredump_connector(current);
2299			/*
2300			 * If it was able to dump core, this kills all
2301			 * other threads in the group and synchronizes with
2302			 * their demise.  If we lost the race with another
2303			 * thread getting here, it set group_exit_code
2304			 * first and our do_group_exit call below will use
2305			 * that value and ignore the one we pass it.
2306			 */
2307			do_coredump(&ksig->info);
2308		}
2309
2310		/*
2311		 * Death signals, no core dump.
2312		 */
2313		do_group_exit(ksig->info.si_signo);
2314		/* NOTREACHED */
2315	}
2316	spin_unlock_irq(&sighand->siglock);
2317
2318	ksig->sig = signr;
2319	return ksig->sig > 0;
2320}
2321
2322/**
2323 * signal_delivered - 
2324 * @ksig:		kernel signal struct
2325 * @stepping:		nonzero if debugger single-step or block-step in use
2326 *
2327 * This function should be called when a signal has successfully been
2328 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2329 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2330 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2331 */
2332static void signal_delivered(struct ksignal *ksig, int stepping)
2333{
2334	sigset_t blocked;
2335
2336	/* A signal was successfully delivered, and the
2337	   saved sigmask was stored on the signal frame,
2338	   and will be restored by sigreturn.  So we can
2339	   simply clear the restore sigmask flag.  */
2340	clear_restore_sigmask();
2341
2342	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2343	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2344		sigaddset(&blocked, ksig->sig);
2345	set_current_blocked(&blocked);
2346	tracehook_signal_handler(stepping);
2347}
2348
2349void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2350{
2351	if (failed)
2352		force_sigsegv(ksig->sig, current);
2353	else
2354		signal_delivered(ksig, stepping);
2355}
2356
2357/*
2358 * It could be that complete_signal() picked us to notify about the
2359 * group-wide signal. Other threads should be notified now to take
2360 * the shared signals in @which since we will not.
2361 */
2362static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2363{
2364	sigset_t retarget;
2365	struct task_struct *t;
2366
2367	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2368	if (sigisemptyset(&retarget))
2369		return;
2370
2371	t = tsk;
2372	while_each_thread(tsk, t) {
2373		if (t->flags & PF_EXITING)
2374			continue;
2375
2376		if (!has_pending_signals(&retarget, &t->blocked))
2377			continue;
2378		/* Remove the signals this thread can handle. */
2379		sigandsets(&retarget, &retarget, &t->blocked);
2380
2381		if (!signal_pending(t))
2382			signal_wake_up(t, 0);
2383
2384		if (sigisemptyset(&retarget))
2385			break;
2386	}
2387}
2388
2389void exit_signals(struct task_struct *tsk)
2390{
2391	int group_stop = 0;
2392	sigset_t unblocked;
2393
2394	/*
2395	 * @tsk is about to have PF_EXITING set - lock out users which
2396	 * expect stable threadgroup.
2397	 */
2398	threadgroup_change_begin(tsk);
2399
2400	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2401		tsk->flags |= PF_EXITING;
2402		threadgroup_change_end(tsk);
2403		return;
2404	}
2405
2406	spin_lock_irq(&tsk->sighand->siglock);
2407	/*
2408	 * From now this task is not visible for group-wide signals,
2409	 * see wants_signal(), do_signal_stop().
2410	 */
2411	tsk->flags |= PF_EXITING;
2412
2413	threadgroup_change_end(tsk);
2414
2415	if (!signal_pending(tsk))
2416		goto out;
2417
2418	unblocked = tsk->blocked;
2419	signotset(&unblocked);
2420	retarget_shared_pending(tsk, &unblocked);
2421
2422	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2423	    task_participate_group_stop(tsk))
2424		group_stop = CLD_STOPPED;
2425out:
2426	spin_unlock_irq(&tsk->sighand->siglock);
2427
2428	/*
2429	 * If group stop has completed, deliver the notification.  This
2430	 * should always go to the real parent of the group leader.
2431	 */
2432	if (unlikely(group_stop)) {
2433		read_lock(&tasklist_lock);
2434		do_notify_parent_cldstop(tsk, false, group_stop);
2435		read_unlock(&tasklist_lock);
2436	}
2437}
2438
2439EXPORT_SYMBOL(recalc_sigpending);
2440EXPORT_SYMBOL_GPL(dequeue_signal);
2441EXPORT_SYMBOL(flush_signals);
2442EXPORT_SYMBOL(force_sig);
2443EXPORT_SYMBOL(send_sig);
2444EXPORT_SYMBOL(send_sig_info);
2445EXPORT_SYMBOL(sigprocmask);
2446
2447/*
2448 * System call entry points.
2449 */
2450
2451/**
2452 *  sys_restart_syscall - restart a system call
2453 */
2454SYSCALL_DEFINE0(restart_syscall)
2455{
2456	struct restart_block *restart = &current->restart_block;
2457	return restart->fn(restart);
2458}
2459
2460long do_no_restart_syscall(struct restart_block *param)
2461{
2462	return -EINTR;
2463}
2464
2465static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2466{
2467	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2468		sigset_t newblocked;
2469		/* A set of now blocked but previously unblocked signals. */
2470		sigandnsets(&newblocked, newset, &current->blocked);
2471		retarget_shared_pending(tsk, &newblocked);
2472	}
2473	tsk->blocked = *newset;
2474	recalc_sigpending();
2475}
2476
2477/**
2478 * set_current_blocked - change current->blocked mask
2479 * @newset: new mask
2480 *
2481 * It is wrong to change ->blocked directly, this helper should be used
2482 * to ensure the process can't miss a shared signal we are going to block.
2483 */
2484void set_current_blocked(sigset_t *newset)
2485{
2486	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2487	__set_current_blocked(newset);
2488}
2489
2490void __set_current_blocked(const sigset_t *newset)
2491{
2492	struct task_struct *tsk = current;
2493
2494	/*
2495	 * In case the signal mask hasn't changed, there is nothing we need
2496	 * to do. The current->blocked shouldn't be modified by other task.
2497	 */
2498	if (sigequalsets(&tsk->blocked, newset))
2499		return;
2500
2501	spin_lock_irq(&tsk->sighand->siglock);
2502	__set_task_blocked(tsk, newset);
2503	spin_unlock_irq(&tsk->sighand->siglock);
2504}
2505
2506/*
2507 * This is also useful for kernel threads that want to temporarily
2508 * (or permanently) block certain signals.
2509 *
2510 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2511 * interface happily blocks "unblockable" signals like SIGKILL
2512 * and friends.
2513 */
2514int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2515{
2516	struct task_struct *tsk = current;
2517	sigset_t newset;
2518
2519	/* Lockless, only current can change ->blocked, never from irq */
2520	if (oldset)
2521		*oldset = tsk->blocked;
2522
2523	switch (how) {
2524	case SIG_BLOCK:
2525		sigorsets(&newset, &tsk->blocked, set);
2526		break;
2527	case SIG_UNBLOCK:
2528		sigandnsets(&newset, &tsk->blocked, set);
2529		break;
2530	case SIG_SETMASK:
2531		newset = *set;
2532		break;
2533	default:
2534		return -EINVAL;
2535	}
2536
2537	__set_current_blocked(&newset);
2538	return 0;
2539}
2540
2541/**
2542 *  sys_rt_sigprocmask - change the list of currently blocked signals
2543 *  @how: whether to add, remove, or set signals
2544 *  @nset: stores pending signals
2545 *  @oset: previous value of signal mask if non-null
2546 *  @sigsetsize: size of sigset_t type
2547 */
2548SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2549		sigset_t __user *, oset, size_t, sigsetsize)
2550{
2551	sigset_t old_set, new_set;
2552	int error;
2553
2554	/* XXX: Don't preclude handling different sized sigset_t's.  */
2555	if (sigsetsize != sizeof(sigset_t))
2556		return -EINVAL;
2557
2558	old_set = current->blocked;
2559
2560	if (nset) {
2561		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2562			return -EFAULT;
2563		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2564
2565		error = sigprocmask(how, &new_set, NULL);
2566		if (error)
2567			return error;
2568	}
2569
2570	if (oset) {
2571		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2572			return -EFAULT;
2573	}
2574
2575	return 0;
2576}
2577
2578#ifdef CONFIG_COMPAT
2579COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2580		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2581{
2582#ifdef __BIG_ENDIAN
2583	sigset_t old_set = current->blocked;
2584
2585	/* XXX: Don't preclude handling different sized sigset_t's.  */
2586	if (sigsetsize != sizeof(sigset_t))
2587		return -EINVAL;
2588
2589	if (nset) {
2590		compat_sigset_t new32;
2591		sigset_t new_set;
2592		int error;
2593		if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2594			return -EFAULT;
2595
2596		sigset_from_compat(&new_set, &new32);
2597		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2598
2599		error = sigprocmask(how, &new_set, NULL);
2600		if (error)
2601			return error;
2602	}
2603	if (oset) {
2604		compat_sigset_t old32;
2605		sigset_to_compat(&old32, &old_set);
2606		if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2607			return -EFAULT;
2608	}
2609	return 0;
2610#else
2611	return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2612				  (sigset_t __user *)oset, sigsetsize);
2613#endif
2614}
2615#endif
2616
2617static int do_sigpending(void *set, unsigned long sigsetsize)
2618{
2619	if (sigsetsize > sizeof(sigset_t))
2620		return -EINVAL;
2621
2622	spin_lock_irq(&current->sighand->siglock);
2623	sigorsets(set, &current->pending.signal,
2624		  &current->signal->shared_pending.signal);
2625	spin_unlock_irq(&current->sighand->siglock);
2626
2627	/* Outside the lock because only this thread touches it.  */
2628	sigandsets(set, &current->blocked, set);
2629	return 0;
2630}
2631
2632/**
2633 *  sys_rt_sigpending - examine a pending signal that has been raised
2634 *			while blocked
2635 *  @uset: stores pending signals
2636 *  @sigsetsize: size of sigset_t type or larger
2637 */
2638SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2639{
2640	sigset_t set;
2641	int err = do_sigpending(&set, sigsetsize);
 
 
 
 
 
2642	if (!err && copy_to_user(uset, &set, sigsetsize))
2643		err = -EFAULT;
2644	return err;
2645}
2646
2647#ifdef CONFIG_COMPAT
2648COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2649		compat_size_t, sigsetsize)
2650{
2651#ifdef __BIG_ENDIAN
2652	sigset_t set;
2653	int err = do_sigpending(&set, sigsetsize);
2654	if (!err) {
2655		compat_sigset_t set32;
2656		sigset_to_compat(&set32, &set);
2657		/* we can get here only if sigsetsize <= sizeof(set) */
2658		if (copy_to_user(uset, &set32, sigsetsize))
2659			err = -EFAULT;
2660	}
2661	return err;
2662#else
2663	return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2664#endif
2665}
2666#endif
2667
2668#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2669
2670int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2671{
2672	int err;
2673
2674	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2675		return -EFAULT;
2676	if (from->si_code < 0)
2677		return __copy_to_user(to, from, sizeof(siginfo_t))
2678			? -EFAULT : 0;
2679	/*
2680	 * If you change siginfo_t structure, please be sure
2681	 * this code is fixed accordingly.
2682	 * Please remember to update the signalfd_copyinfo() function
2683	 * inside fs/signalfd.c too, in case siginfo_t changes.
2684	 * It should never copy any pad contained in the structure
2685	 * to avoid security leaks, but must copy the generic
2686	 * 3 ints plus the relevant union member.
2687	 */
2688	err = __put_user(from->si_signo, &to->si_signo);
2689	err |= __put_user(from->si_errno, &to->si_errno);
2690	err |= __put_user((short)from->si_code, &to->si_code);
2691	switch (from->si_code & __SI_MASK) {
2692	case __SI_KILL:
2693		err |= __put_user(from->si_pid, &to->si_pid);
2694		err |= __put_user(from->si_uid, &to->si_uid);
2695		break;
2696	case __SI_TIMER:
2697		 err |= __put_user(from->si_tid, &to->si_tid);
2698		 err |= __put_user(from->si_overrun, &to->si_overrun);
2699		 err |= __put_user(from->si_ptr, &to->si_ptr);
2700		break;
2701	case __SI_POLL:
2702		err |= __put_user(from->si_band, &to->si_band);
2703		err |= __put_user(from->si_fd, &to->si_fd);
2704		break;
2705	case __SI_FAULT:
2706		err |= __put_user(from->si_addr, &to->si_addr);
2707#ifdef __ARCH_SI_TRAPNO
2708		err |= __put_user(from->si_trapno, &to->si_trapno);
2709#endif
2710#ifdef BUS_MCEERR_AO
 
 
 
 
2711		/*
2712		 * Other callers might not initialize the si_lsb field,
2713		 * so check explicitly for the right codes here.
2714		 */
2715		if (from->si_signo == SIGBUS &&
2716		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
 
 
 
 
2717			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2718#endif
2719#ifdef SEGV_BNDERR
2720		if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2721			err |= __put_user(from->si_lower, &to->si_lower);
2722			err |= __put_user(from->si_upper, &to->si_upper);
2723		}
2724#endif
2725#ifdef SEGV_PKUERR
2726		if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2727			err |= __put_user(from->si_pkey, &to->si_pkey);
2728#endif
2729		break;
2730	case __SI_CHLD:
2731		err |= __put_user(from->si_pid, &to->si_pid);
2732		err |= __put_user(from->si_uid, &to->si_uid);
2733		err |= __put_user(from->si_status, &to->si_status);
2734		err |= __put_user(from->si_utime, &to->si_utime);
2735		err |= __put_user(from->si_stime, &to->si_stime);
2736		break;
2737	case __SI_RT: /* This is not generated by the kernel as of now. */
2738	case __SI_MESGQ: /* But this is */
2739		err |= __put_user(from->si_pid, &to->si_pid);
2740		err |= __put_user(from->si_uid, &to->si_uid);
2741		err |= __put_user(from->si_ptr, &to->si_ptr);
2742		break;
2743#ifdef __ARCH_SIGSYS
2744	case __SI_SYS:
2745		err |= __put_user(from->si_call_addr, &to->si_call_addr);
2746		err |= __put_user(from->si_syscall, &to->si_syscall);
2747		err |= __put_user(from->si_arch, &to->si_arch);
2748		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2749#endif
2750	default: /* this is just in case for now ... */
2751		err |= __put_user(from->si_pid, &to->si_pid);
2752		err |= __put_user(from->si_uid, &to->si_uid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2753		break;
2754	}
2755	return err;
 
 
 
 
2756}
2757
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2758#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2759
2760/**
2761 *  do_sigtimedwait - wait for queued signals specified in @which
2762 *  @which: queued signals to wait for
2763 *  @info: if non-null, the signal's siginfo is returned here
2764 *  @ts: upper bound on process time suspension
2765 */
2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2767		    const struct timespec *ts)
2768{
2769	ktime_t *to = NULL, timeout = KTIME_MAX;
2770	struct task_struct *tsk = current;
2771	sigset_t mask = *which;
2772	int sig, ret = 0;
2773
2774	if (ts) {
2775		if (!timespec_valid(ts))
2776			return -EINVAL;
2777		timeout = timespec_to_ktime(*ts);
2778		to = &timeout;
2779	}
2780
2781	/*
2782	 * Invert the set of allowed signals to get those we want to block.
2783	 */
2784	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2785	signotset(&mask);
2786
2787	spin_lock_irq(&tsk->sighand->siglock);
2788	sig = dequeue_signal(tsk, &mask, info);
2789	if (!sig && timeout) {
2790		/*
2791		 * None ready, temporarily unblock those we're interested
2792		 * while we are sleeping in so that we'll be awakened when
2793		 * they arrive. Unblocking is always fine, we can avoid
2794		 * set_current_blocked().
2795		 */
2796		tsk->real_blocked = tsk->blocked;
2797		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2798		recalc_sigpending();
2799		spin_unlock_irq(&tsk->sighand->siglock);
2800
2801		__set_current_state(TASK_INTERRUPTIBLE);
2802		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2803							 HRTIMER_MODE_REL);
2804		spin_lock_irq(&tsk->sighand->siglock);
2805		__set_task_blocked(tsk, &tsk->real_blocked);
2806		sigemptyset(&tsk->real_blocked);
2807		sig = dequeue_signal(tsk, &mask, info);
2808	}
2809	spin_unlock_irq(&tsk->sighand->siglock);
2810
2811	if (sig)
2812		return sig;
2813	return ret ? -EINTR : -EAGAIN;
2814}
2815
2816/**
2817 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2818 *			in @uthese
2819 *  @uthese: queued signals to wait for
2820 *  @uinfo: if non-null, the signal's siginfo is returned here
2821 *  @uts: upper bound on process time suspension
2822 *  @sigsetsize: size of sigset_t type
2823 */
2824SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2825		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2826		size_t, sigsetsize)
2827{
2828	sigset_t these;
2829	struct timespec ts;
2830	siginfo_t info;
2831	int ret;
2832
2833	/* XXX: Don't preclude handling different sized sigset_t's.  */
2834	if (sigsetsize != sizeof(sigset_t))
2835		return -EINVAL;
2836
2837	if (copy_from_user(&these, uthese, sizeof(these)))
2838		return -EFAULT;
2839
2840	if (uts) {
2841		if (copy_from_user(&ts, uts, sizeof(ts)))
2842			return -EFAULT;
2843	}
2844
2845	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2846
2847	if (ret > 0 && uinfo) {
2848		if (copy_siginfo_to_user(uinfo, &info))
2849			ret = -EFAULT;
2850	}
2851
2852	return ret;
2853}
2854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2855/**
2856 *  sys_kill - send a signal to a process
2857 *  @pid: the PID of the process
2858 *  @sig: signal to be sent
2859 */
2860SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2861{
2862	struct siginfo info;
2863
 
2864	info.si_signo = sig;
2865	info.si_errno = 0;
2866	info.si_code = SI_USER;
2867	info.si_pid = task_tgid_vnr(current);
2868	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2869
2870	return kill_something_info(sig, &info, pid);
2871}
2872
2873static int
2874do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2875{
2876	struct task_struct *p;
2877	int error = -ESRCH;
2878
2879	rcu_read_lock();
2880	p = find_task_by_vpid(pid);
2881	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2882		error = check_kill_permission(sig, info, p);
2883		/*
2884		 * The null signal is a permissions and process existence
2885		 * probe.  No signal is actually delivered.
2886		 */
2887		if (!error && sig) {
2888			error = do_send_sig_info(sig, info, p, false);
2889			/*
2890			 * If lock_task_sighand() failed we pretend the task
2891			 * dies after receiving the signal. The window is tiny,
2892			 * and the signal is private anyway.
2893			 */
2894			if (unlikely(error == -ESRCH))
2895				error = 0;
2896		}
2897	}
2898	rcu_read_unlock();
2899
2900	return error;
2901}
2902
2903static int do_tkill(pid_t tgid, pid_t pid, int sig)
2904{
2905	struct siginfo info = {};
2906
 
2907	info.si_signo = sig;
2908	info.si_errno = 0;
2909	info.si_code = SI_TKILL;
2910	info.si_pid = task_tgid_vnr(current);
2911	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2912
2913	return do_send_specific(tgid, pid, sig, &info);
2914}
2915
2916/**
2917 *  sys_tgkill - send signal to one specific thread
2918 *  @tgid: the thread group ID of the thread
2919 *  @pid: the PID of the thread
2920 *  @sig: signal to be sent
2921 *
2922 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2923 *  exists but it's not belonging to the target process anymore. This
2924 *  method solves the problem of threads exiting and PIDs getting reused.
2925 */
2926SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2927{
2928	/* This is only valid for single tasks */
2929	if (pid <= 0 || tgid <= 0)
2930		return -EINVAL;
2931
2932	return do_tkill(tgid, pid, sig);
2933}
2934
2935/**
2936 *  sys_tkill - send signal to one specific task
2937 *  @pid: the PID of the task
2938 *  @sig: signal to be sent
2939 *
2940 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2941 */
2942SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2943{
2944	/* This is only valid for single tasks */
2945	if (pid <= 0)
2946		return -EINVAL;
2947
2948	return do_tkill(0, pid, sig);
2949}
2950
2951static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2952{
2953	/* Not even root can pretend to send signals from the kernel.
2954	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2955	 */
2956	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2957	    (task_pid_vnr(current) != pid))
2958		return -EPERM;
2959
2960	info->si_signo = sig;
2961
2962	/* POSIX.1b doesn't mention process groups.  */
2963	return kill_proc_info(sig, info, pid);
2964}
2965
2966/**
2967 *  sys_rt_sigqueueinfo - send signal information to a signal
2968 *  @pid: the PID of the thread
2969 *  @sig: signal to be sent
2970 *  @uinfo: signal info to be sent
2971 */
2972SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2973		siginfo_t __user *, uinfo)
2974{
2975	siginfo_t info;
2976	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2977		return -EFAULT;
2978	return do_rt_sigqueueinfo(pid, sig, &info);
2979}
2980
2981#ifdef CONFIG_COMPAT
2982COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
2983			compat_pid_t, pid,
2984			int, sig,
2985			struct compat_siginfo __user *, uinfo)
2986{
2987	siginfo_t info = {};
2988	int ret = copy_siginfo_from_user32(&info, uinfo);
2989	if (unlikely(ret))
2990		return ret;
2991	return do_rt_sigqueueinfo(pid, sig, &info);
2992}
2993#endif
2994
2995static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2996{
2997	/* This is only valid for single tasks */
2998	if (pid <= 0 || tgid <= 0)
2999		return -EINVAL;
3000
3001	/* Not even root can pretend to send signals from the kernel.
3002	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3003	 */
3004	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3005	    (task_pid_vnr(current) != pid))
3006		return -EPERM;
3007
3008	info->si_signo = sig;
3009
3010	return do_send_specific(tgid, pid, sig, info);
3011}
3012
3013SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3014		siginfo_t __user *, uinfo)
3015{
3016	siginfo_t info;
3017
3018	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3019		return -EFAULT;
3020
3021	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3022}
3023
3024#ifdef CONFIG_COMPAT
3025COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3026			compat_pid_t, tgid,
3027			compat_pid_t, pid,
3028			int, sig,
3029			struct compat_siginfo __user *, uinfo)
3030{
3031	siginfo_t info = {};
3032
3033	if (copy_siginfo_from_user32(&info, uinfo))
3034		return -EFAULT;
3035	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3036}
3037#endif
3038
3039/*
3040 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3041 */
3042void kernel_sigaction(int sig, __sighandler_t action)
3043{
3044	spin_lock_irq(&current->sighand->siglock);
3045	current->sighand->action[sig - 1].sa.sa_handler = action;
3046	if (action == SIG_IGN) {
3047		sigset_t mask;
3048
3049		sigemptyset(&mask);
3050		sigaddset(&mask, sig);
3051
3052		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3053		flush_sigqueue_mask(&mask, &current->pending);
3054		recalc_sigpending();
3055	}
3056	spin_unlock_irq(&current->sighand->siglock);
3057}
3058EXPORT_SYMBOL(kernel_sigaction);
3059
3060void __weak sigaction_compat_abi(struct k_sigaction *act,
3061		struct k_sigaction *oact)
3062{
3063}
3064
3065int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3066{
3067	struct task_struct *p = current, *t;
3068	struct k_sigaction *k;
3069	sigset_t mask;
3070
3071	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3072		return -EINVAL;
3073
3074	k = &p->sighand->action[sig-1];
3075
3076	spin_lock_irq(&p->sighand->siglock);
3077	if (oact)
3078		*oact = *k;
3079
3080	sigaction_compat_abi(act, oact);
3081
3082	if (act) {
3083		sigdelsetmask(&act->sa.sa_mask,
3084			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3085		*k = *act;
3086		/*
3087		 * POSIX 3.3.1.3:
3088		 *  "Setting a signal action to SIG_IGN for a signal that is
3089		 *   pending shall cause the pending signal to be discarded,
3090		 *   whether or not it is blocked."
3091		 *
3092		 *  "Setting a signal action to SIG_DFL for a signal that is
3093		 *   pending and whose default action is to ignore the signal
3094		 *   (for example, SIGCHLD), shall cause the pending signal to
3095		 *   be discarded, whether or not it is blocked"
3096		 */
3097		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3098			sigemptyset(&mask);
3099			sigaddset(&mask, sig);
3100			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3101			for_each_thread(p, t)
3102				flush_sigqueue_mask(&mask, &t->pending);
3103		}
3104	}
3105
3106	spin_unlock_irq(&p->sighand->siglock);
3107	return 0;
3108}
3109
3110static int
3111do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3112{
3113	stack_t oss;
3114	int error;
3115
3116	oss.ss_sp = (void __user *) current->sas_ss_sp;
3117	oss.ss_size = current->sas_ss_size;
3118	oss.ss_flags = sas_ss_flags(sp) |
3119		(current->sas_ss_flags & SS_FLAG_BITS);
3120
3121	if (uss) {
3122		void __user *ss_sp;
3123		size_t ss_size;
3124		unsigned ss_flags;
3125		int ss_mode;
3126
3127		error = -EFAULT;
3128		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3129			goto out;
3130		error = __get_user(ss_sp, &uss->ss_sp) |
3131			__get_user(ss_flags, &uss->ss_flags) |
3132			__get_user(ss_size, &uss->ss_size);
3133		if (error)
3134			goto out;
3135
3136		error = -EPERM;
3137		if (on_sig_stack(sp))
3138			goto out;
3139
3140		ss_mode = ss_flags & ~SS_FLAG_BITS;
3141		error = -EINVAL;
3142		if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3143				ss_mode != 0)
3144			goto out;
3145
3146		if (ss_mode == SS_DISABLE) {
3147			ss_size = 0;
3148			ss_sp = NULL;
3149		} else {
3150			error = -ENOMEM;
3151			if (ss_size < MINSIGSTKSZ)
3152				goto out;
3153		}
3154
3155		current->sas_ss_sp = (unsigned long) ss_sp;
3156		current->sas_ss_size = ss_size;
3157		current->sas_ss_flags = ss_flags;
3158	}
3159
3160	error = 0;
3161	if (uoss) {
3162		error = -EFAULT;
3163		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3164			goto out;
3165		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3166			__put_user(oss.ss_size, &uoss->ss_size) |
3167			__put_user(oss.ss_flags, &uoss->ss_flags);
3168	}
3169
3170out:
3171	return error;
 
 
 
3172}
 
3173SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3174{
3175	return do_sigaltstack(uss, uoss, current_user_stack_pointer());
 
 
 
 
 
 
 
 
3176}
3177
3178int restore_altstack(const stack_t __user *uss)
3179{
3180	int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
 
 
 
3181	/* squash all but EFAULT for now */
3182	return err == -EFAULT ? err : 0;
3183}
3184
3185int __save_altstack(stack_t __user *uss, unsigned long sp)
3186{
3187	struct task_struct *t = current;
3188	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3189		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3190		__put_user(t->sas_ss_size, &uss->ss_size);
3191	if (err)
3192		return err;
3193	if (t->sas_ss_flags & SS_AUTODISARM)
3194		sas_ss_reset(t);
3195	return 0;
3196}
3197
3198#ifdef CONFIG_COMPAT
3199COMPAT_SYSCALL_DEFINE2(sigaltstack,
3200			const compat_stack_t __user *, uss_ptr,
3201			compat_stack_t __user *, uoss_ptr)
3202{
3203	stack_t uss, uoss;
3204	int ret;
3205	mm_segment_t seg;
3206
3207	if (uss_ptr) {
3208		compat_stack_t uss32;
3209
3210		memset(&uss, 0, sizeof(stack_t));
3211		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3212			return -EFAULT;
3213		uss.ss_sp = compat_ptr(uss32.ss_sp);
3214		uss.ss_flags = uss32.ss_flags;
3215		uss.ss_size = uss32.ss_size;
3216	}
3217	seg = get_fs();
3218	set_fs(KERNEL_DS);
3219	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3220			     (stack_t __force __user *) &uoss,
3221			     compat_user_stack_pointer());
3222	set_fs(seg);
3223	if (ret >= 0 && uoss_ptr)  {
3224		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3225		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3226		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3227		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
 
 
3228			ret = -EFAULT;
3229	}
3230	return ret;
3231}
3232
 
 
 
 
 
 
 
3233int compat_restore_altstack(const compat_stack_t __user *uss)
3234{
3235	int err = compat_sys_sigaltstack(uss, NULL);
3236	/* squash all but -EFAULT for now */
3237	return err == -EFAULT ? err : 0;
3238}
3239
3240int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3241{
3242	int err;
3243	struct task_struct *t = current;
3244	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3245			 &uss->ss_sp) |
3246		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3247		__put_user(t->sas_ss_size, &uss->ss_size);
3248	if (err)
3249		return err;
3250	if (t->sas_ss_flags & SS_AUTODISARM)
3251		sas_ss_reset(t);
3252	return 0;
3253}
3254#endif
3255
3256#ifdef __ARCH_WANT_SYS_SIGPENDING
3257
3258/**
3259 *  sys_sigpending - examine pending signals
3260 *  @set: where mask of pending signal is returned
3261 */
3262SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3263{
3264	return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); 
 
 
 
 
3265}
 
3266
3267#endif
3268
3269#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3270/**
3271 *  sys_sigprocmask - examine and change blocked signals
3272 *  @how: whether to add, remove, or set signals
3273 *  @nset: signals to add or remove (if non-null)
3274 *  @oset: previous value of signal mask if non-null
3275 *
3276 * Some platforms have their own version with special arguments;
3277 * others support only sys_rt_sigprocmask.
3278 */
3279
3280SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3281		old_sigset_t __user *, oset)
3282{
3283	old_sigset_t old_set, new_set;
3284	sigset_t new_blocked;
3285
3286	old_set = current->blocked.sig[0];
3287
3288	if (nset) {
3289		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3290			return -EFAULT;
3291
3292		new_blocked = current->blocked;
3293
3294		switch (how) {
3295		case SIG_BLOCK:
3296			sigaddsetmask(&new_blocked, new_set);
3297			break;
3298		case SIG_UNBLOCK:
3299			sigdelsetmask(&new_blocked, new_set);
3300			break;
3301		case SIG_SETMASK:
3302			new_blocked.sig[0] = new_set;
3303			break;
3304		default:
3305			return -EINVAL;
3306		}
3307
3308		set_current_blocked(&new_blocked);
3309	}
3310
3311	if (oset) {
3312		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3313			return -EFAULT;
3314	}
3315
3316	return 0;
3317}
3318#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3319
3320#ifndef CONFIG_ODD_RT_SIGACTION
3321/**
3322 *  sys_rt_sigaction - alter an action taken by a process
3323 *  @sig: signal to be sent
3324 *  @act: new sigaction
3325 *  @oact: used to save the previous sigaction
3326 *  @sigsetsize: size of sigset_t type
3327 */
3328SYSCALL_DEFINE4(rt_sigaction, int, sig,
3329		const struct sigaction __user *, act,
3330		struct sigaction __user *, oact,
3331		size_t, sigsetsize)
3332{
3333	struct k_sigaction new_sa, old_sa;
3334	int ret = -EINVAL;
3335
3336	/* XXX: Don't preclude handling different sized sigset_t's.  */
3337	if (sigsetsize != sizeof(sigset_t))
3338		goto out;
3339
3340	if (act) {
3341		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3342			return -EFAULT;
3343	}
3344
3345	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3346
3347	if (!ret && oact) {
3348		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3349			return -EFAULT;
3350	}
3351out:
3352	return ret;
3353}
3354#ifdef CONFIG_COMPAT
3355COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3356		const struct compat_sigaction __user *, act,
3357		struct compat_sigaction __user *, oact,
3358		compat_size_t, sigsetsize)
3359{
3360	struct k_sigaction new_ka, old_ka;
3361	compat_sigset_t mask;
3362#ifdef __ARCH_HAS_SA_RESTORER
3363	compat_uptr_t restorer;
3364#endif
3365	int ret;
3366
3367	/* XXX: Don't preclude handling different sized sigset_t's.  */
3368	if (sigsetsize != sizeof(compat_sigset_t))
3369		return -EINVAL;
3370
3371	if (act) {
3372		compat_uptr_t handler;
3373		ret = get_user(handler, &act->sa_handler);
3374		new_ka.sa.sa_handler = compat_ptr(handler);
3375#ifdef __ARCH_HAS_SA_RESTORER
3376		ret |= get_user(restorer, &act->sa_restorer);
3377		new_ka.sa.sa_restorer = compat_ptr(restorer);
3378#endif
3379		ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3380		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3381		if (ret)
3382			return -EFAULT;
3383		sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3384	}
3385
3386	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3387	if (!ret && oact) {
3388		sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3389		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
3390			       &oact->sa_handler);
3391		ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
 
3392		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3393#ifdef __ARCH_HAS_SA_RESTORER
3394		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3395				&oact->sa_restorer);
3396#endif
3397	}
3398	return ret;
3399}
3400#endif
3401#endif /* !CONFIG_ODD_RT_SIGACTION */
3402
3403#ifdef CONFIG_OLD_SIGACTION
3404SYSCALL_DEFINE3(sigaction, int, sig,
3405		const struct old_sigaction __user *, act,
3406	        struct old_sigaction __user *, oact)
3407{
3408	struct k_sigaction new_ka, old_ka;
3409	int ret;
3410
3411	if (act) {
3412		old_sigset_t mask;
3413		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3414		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3415		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3416		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3417		    __get_user(mask, &act->sa_mask))
3418			return -EFAULT;
3419#ifdef __ARCH_HAS_KA_RESTORER
3420		new_ka.ka_restorer = NULL;
3421#endif
3422		siginitset(&new_ka.sa.sa_mask, mask);
3423	}
3424
3425	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3426
3427	if (!ret && oact) {
3428		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3429		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3430		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3431		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3432		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3433			return -EFAULT;
3434	}
3435
3436	return ret;
3437}
3438#endif
3439#ifdef CONFIG_COMPAT_OLD_SIGACTION
3440COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3441		const struct compat_old_sigaction __user *, act,
3442	        struct compat_old_sigaction __user *, oact)
3443{
3444	struct k_sigaction new_ka, old_ka;
3445	int ret;
3446	compat_old_sigset_t mask;
3447	compat_uptr_t handler, restorer;
3448
3449	if (act) {
3450		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3451		    __get_user(handler, &act->sa_handler) ||
3452		    __get_user(restorer, &act->sa_restorer) ||
3453		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3454		    __get_user(mask, &act->sa_mask))
3455			return -EFAULT;
3456
3457#ifdef __ARCH_HAS_KA_RESTORER
3458		new_ka.ka_restorer = NULL;
3459#endif
3460		new_ka.sa.sa_handler = compat_ptr(handler);
3461		new_ka.sa.sa_restorer = compat_ptr(restorer);
3462		siginitset(&new_ka.sa.sa_mask, mask);
3463	}
3464
3465	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3466
3467	if (!ret && oact) {
3468		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3469		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3470			       &oact->sa_handler) ||
3471		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3472			       &oact->sa_restorer) ||
3473		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3474		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3475			return -EFAULT;
3476	}
3477	return ret;
3478}
3479#endif
3480
3481#ifdef CONFIG_SGETMASK_SYSCALL
3482
3483/*
3484 * For backwards compatibility.  Functionality superseded by sigprocmask.
3485 */
3486SYSCALL_DEFINE0(sgetmask)
3487{
3488	/* SMP safe */
3489	return current->blocked.sig[0];
3490}
3491
3492SYSCALL_DEFINE1(ssetmask, int, newmask)
3493{
3494	int old = current->blocked.sig[0];
3495	sigset_t newset;
3496
3497	siginitset(&newset, newmask);
3498	set_current_blocked(&newset);
3499
3500	return old;
3501}
3502#endif /* CONFIG_SGETMASK_SYSCALL */
3503
3504#ifdef __ARCH_WANT_SYS_SIGNAL
3505/*
3506 * For backwards compatibility.  Functionality superseded by sigaction.
3507 */
3508SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3509{
3510	struct k_sigaction new_sa, old_sa;
3511	int ret;
3512
3513	new_sa.sa.sa_handler = handler;
3514	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3515	sigemptyset(&new_sa.sa.sa_mask);
3516
3517	ret = do_sigaction(sig, &new_sa, &old_sa);
3518
3519	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3520}
3521#endif /* __ARCH_WANT_SYS_SIGNAL */
3522
3523#ifdef __ARCH_WANT_SYS_PAUSE
3524
3525SYSCALL_DEFINE0(pause)
3526{
3527	while (!signal_pending(current)) {
3528		__set_current_state(TASK_INTERRUPTIBLE);
3529		schedule();
3530	}
3531	return -ERESTARTNOHAND;
3532}
3533
3534#endif
3535
3536static int sigsuspend(sigset_t *set)
3537{
3538	current->saved_sigmask = current->blocked;
3539	set_current_blocked(set);
3540
3541	while (!signal_pending(current)) {
3542		__set_current_state(TASK_INTERRUPTIBLE);
3543		schedule();
3544	}
3545	set_restore_sigmask();
3546	return -ERESTARTNOHAND;
3547}
3548
3549/**
3550 *  sys_rt_sigsuspend - replace the signal mask for a value with the
3551 *	@unewset value until a signal is received
3552 *  @unewset: new signal mask value
3553 *  @sigsetsize: size of sigset_t type
3554 */
3555SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3556{
3557	sigset_t newset;
3558
3559	/* XXX: Don't preclude handling different sized sigset_t's.  */
3560	if (sigsetsize != sizeof(sigset_t))
3561		return -EINVAL;
3562
3563	if (copy_from_user(&newset, unewset, sizeof(newset)))
3564		return -EFAULT;
3565	return sigsuspend(&newset);
3566}
3567 
3568#ifdef CONFIG_COMPAT
3569COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3570{
3571#ifdef __BIG_ENDIAN
3572	sigset_t newset;
3573	compat_sigset_t newset32;
3574
3575	/* XXX: Don't preclude handling different sized sigset_t's.  */
3576	if (sigsetsize != sizeof(sigset_t))
3577		return -EINVAL;
3578
3579	if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3580		return -EFAULT;
3581	sigset_from_compat(&newset, &newset32);
3582	return sigsuspend(&newset);
3583#else
3584	/* on little-endian bitmaps don't care about granularity */
3585	return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3586#endif
3587}
3588#endif
3589
3590#ifdef CONFIG_OLD_SIGSUSPEND
3591SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3592{
3593	sigset_t blocked;
3594	siginitset(&blocked, mask);
3595	return sigsuspend(&blocked);
3596}
3597#endif
3598#ifdef CONFIG_OLD_SIGSUSPEND3
3599SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3600{
3601	sigset_t blocked;
3602	siginitset(&blocked, mask);
3603	return sigsuspend(&blocked);
3604}
3605#endif
3606
3607__weak const char *arch_vma_name(struct vm_area_struct *vma)
3608{
3609	return NULL;
3610}
3611
3612void __init signals_init(void)
3613{
3614	/* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3615	BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3616		!= offsetof(struct siginfo, _sifields._pad));
 
3617
3618	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3619}
3620
3621#ifdef CONFIG_KGDB_KDB
3622#include <linux/kdb.h>
3623/*
3624 * kdb_send_sig_info - Allows kdb to send signals without exposing
3625 * signal internals.  This function checks if the required locks are
3626 * available before calling the main signal code, to avoid kdb
3627 * deadlocks.
3628 */
3629void
3630kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3631{
3632	static struct task_struct *kdb_prev_t;
3633	int sig, new_t;
3634	if (!spin_trylock(&t->sighand->siglock)) {
3635		kdb_printf("Can't do kill command now.\n"
3636			   "The sigmask lock is held somewhere else in "
3637			   "kernel, try again later\n");
3638		return;
3639	}
3640	spin_unlock(&t->sighand->siglock);
3641	new_t = kdb_prev_t != t;
3642	kdb_prev_t = t;
3643	if (t->state != TASK_RUNNING && new_t) {
 
3644		kdb_printf("Process is not RUNNING, sending a signal from "
3645			   "kdb risks deadlock\n"
3646			   "on the run queue locks. "
3647			   "The signal has _not_ been sent.\n"
3648			   "Reissue the kill command if you want to risk "
3649			   "the deadlock.\n");
3650		return;
3651	}
3652	sig = info->si_signo;
3653	if (send_sig_info(sig, info, t))
 
3654		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3655			   sig, t->pid);
3656	else
3657		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3658}
3659#endif	/* CONFIG_KGDB_KDB */
v4.17
   1/*
   2 *  linux/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   7 *
   8 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
   9 *		Changes to use preallocated sigqueue structures
  10 *		to allow signals to be sent reliably.
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/sched/mm.h>
  17#include <linux/sched/user.h>
  18#include <linux/sched/debug.h>
  19#include <linux/sched/task.h>
  20#include <linux/sched/task_stack.h>
  21#include <linux/sched/cputime.h>
  22#include <linux/fs.h>
  23#include <linux/tty.h>
  24#include <linux/binfmts.h>
  25#include <linux/coredump.h>
  26#include <linux/security.h>
  27#include <linux/syscalls.h>
  28#include <linux/ptrace.h>
  29#include <linux/signal.h>
  30#include <linux/signalfd.h>
  31#include <linux/ratelimit.h>
  32#include <linux/tracehook.h>
  33#include <linux/capability.h>
  34#include <linux/freezer.h>
  35#include <linux/pid_namespace.h>
  36#include <linux/nsproxy.h>
  37#include <linux/user_namespace.h>
  38#include <linux/uprobes.h>
  39#include <linux/compat.h>
  40#include <linux/cn_proc.h>
  41#include <linux/compiler.h>
  42#include <linux/posix-timers.h>
  43#include <linux/livepatch.h>
  44
  45#define CREATE_TRACE_POINTS
  46#include <trace/events/signal.h>
  47
  48#include <asm/param.h>
  49#include <linux/uaccess.h>
  50#include <asm/unistd.h>
  51#include <asm/siginfo.h>
  52#include <asm/cacheflush.h>
  53#include "audit.h"	/* audit_signal_info() */
  54
  55/*
  56 * SLAB caches for signal bits.
  57 */
  58
  59static struct kmem_cache *sigqueue_cachep;
  60
  61int print_fatal_signals __read_mostly;
  62
  63static void __user *sig_handler(struct task_struct *t, int sig)
  64{
  65	return t->sighand->action[sig - 1].sa.sa_handler;
  66}
  67
  68static int sig_handler_ignored(void __user *handler, int sig)
  69{
  70	/* Is it explicitly or implicitly ignored? */
  71	return handler == SIG_IGN ||
  72		(handler == SIG_DFL && sig_kernel_ignore(sig));
  73}
  74
  75static int sig_task_ignored(struct task_struct *t, int sig, bool force)
  76{
  77	void __user *handler;
  78
  79	handler = sig_handler(t, sig);
  80
  81	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  82	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  83		return 1;
  84
  85	return sig_handler_ignored(handler, sig);
  86}
  87
  88static int sig_ignored(struct task_struct *t, int sig, bool force)
  89{
  90	/*
  91	 * Blocked signals are never ignored, since the
  92	 * signal handler may change by the time it is
  93	 * unblocked.
  94	 */
  95	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
  96		return 0;
  97
 
 
 
  98	/*
  99	 * Tracers may want to know about even ignored signal unless it
 100	 * is SIGKILL which can't be reported anyway but can be ignored
 101	 * by SIGNAL_UNKILLABLE task.
 102	 */
 103	if (t->ptrace && sig != SIGKILL)
 104		return 0;
 105
 106	return sig_task_ignored(t, sig, force);
 107}
 108
 109/*
 110 * Re-calculate pending state from the set of locally pending
 111 * signals, globally pending signals, and blocked signals.
 112 */
 113static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 114{
 115	unsigned long ready;
 116	long i;
 117
 118	switch (_NSIG_WORDS) {
 119	default:
 120		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 121			ready |= signal->sig[i] &~ blocked->sig[i];
 122		break;
 123
 124	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 125		ready |= signal->sig[2] &~ blocked->sig[2];
 126		ready |= signal->sig[1] &~ blocked->sig[1];
 127		ready |= signal->sig[0] &~ blocked->sig[0];
 128		break;
 129
 130	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 131		ready |= signal->sig[0] &~ blocked->sig[0];
 132		break;
 133
 134	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 135	}
 136	return ready !=	0;
 137}
 138
 139#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 140
 141static int recalc_sigpending_tsk(struct task_struct *t)
 142{
 143	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
 144	    PENDING(&t->pending, &t->blocked) ||
 145	    PENDING(&t->signal->shared_pending, &t->blocked)) {
 146		set_tsk_thread_flag(t, TIF_SIGPENDING);
 147		return 1;
 148	}
 149	/*
 150	 * We must never clear the flag in another thread, or in current
 151	 * when it's possible the current syscall is returning -ERESTART*.
 152	 * So we don't clear it here, and only callers who know they should do.
 153	 */
 154	return 0;
 155}
 156
 157/*
 158 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 159 * This is superfluous when called on current, the wakeup is a harmless no-op.
 160 */
 161void recalc_sigpending_and_wake(struct task_struct *t)
 162{
 163	if (recalc_sigpending_tsk(t))
 164		signal_wake_up(t, 0);
 165}
 166
 167void recalc_sigpending(void)
 168{
 169	if (!recalc_sigpending_tsk(current) && !freezing(current) &&
 170	    !klp_patch_pending(current))
 171		clear_thread_flag(TIF_SIGPENDING);
 172
 173}
 174
 175/* Given the mask, find the first available signal that should be serviced. */
 176
 177#define SYNCHRONOUS_MASK \
 178	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 179	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 180
 181int next_signal(struct sigpending *pending, sigset_t *mask)
 182{
 183	unsigned long i, *s, *m, x;
 184	int sig = 0;
 185
 186	s = pending->signal.sig;
 187	m = mask->sig;
 188
 189	/*
 190	 * Handle the first word specially: it contains the
 191	 * synchronous signals that need to be dequeued first.
 192	 */
 193	x = *s &~ *m;
 194	if (x) {
 195		if (x & SYNCHRONOUS_MASK)
 196			x &= SYNCHRONOUS_MASK;
 197		sig = ffz(~x) + 1;
 198		return sig;
 199	}
 200
 201	switch (_NSIG_WORDS) {
 202	default:
 203		for (i = 1; i < _NSIG_WORDS; ++i) {
 204			x = *++s &~ *++m;
 205			if (!x)
 206				continue;
 207			sig = ffz(~x) + i*_NSIG_BPW + 1;
 208			break;
 209		}
 210		break;
 211
 212	case 2:
 213		x = s[1] &~ m[1];
 214		if (!x)
 215			break;
 216		sig = ffz(~x) + _NSIG_BPW + 1;
 217		break;
 218
 219	case 1:
 220		/* Nothing to do */
 221		break;
 222	}
 223
 224	return sig;
 225}
 226
 227static inline void print_dropped_signal(int sig)
 228{
 229	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 230
 231	if (!print_fatal_signals)
 232		return;
 233
 234	if (!__ratelimit(&ratelimit_state))
 235		return;
 236
 237	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 238				current->comm, current->pid, sig);
 239}
 240
 241/**
 242 * task_set_jobctl_pending - set jobctl pending bits
 243 * @task: target task
 244 * @mask: pending bits to set
 245 *
 246 * Clear @mask from @task->jobctl.  @mask must be subset of
 247 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 248 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 249 * cleared.  If @task is already being killed or exiting, this function
 250 * becomes noop.
 251 *
 252 * CONTEXT:
 253 * Must be called with @task->sighand->siglock held.
 254 *
 255 * RETURNS:
 256 * %true if @mask is set, %false if made noop because @task was dying.
 257 */
 258bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 259{
 260	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 261			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 262	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 263
 264	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 265		return false;
 266
 267	if (mask & JOBCTL_STOP_SIGMASK)
 268		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 269
 270	task->jobctl |= mask;
 271	return true;
 272}
 273
 274/**
 275 * task_clear_jobctl_trapping - clear jobctl trapping bit
 276 * @task: target task
 277 *
 278 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 279 * Clear it and wake up the ptracer.  Note that we don't need any further
 280 * locking.  @task->siglock guarantees that @task->parent points to the
 281 * ptracer.
 282 *
 283 * CONTEXT:
 284 * Must be called with @task->sighand->siglock held.
 285 */
 286void task_clear_jobctl_trapping(struct task_struct *task)
 287{
 288	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 289		task->jobctl &= ~JOBCTL_TRAPPING;
 290		smp_mb();	/* advised by wake_up_bit() */
 291		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 292	}
 293}
 294
 295/**
 296 * task_clear_jobctl_pending - clear jobctl pending bits
 297 * @task: target task
 298 * @mask: pending bits to clear
 299 *
 300 * Clear @mask from @task->jobctl.  @mask must be subset of
 301 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 302 * STOP bits are cleared together.
 303 *
 304 * If clearing of @mask leaves no stop or trap pending, this function calls
 305 * task_clear_jobctl_trapping().
 306 *
 307 * CONTEXT:
 308 * Must be called with @task->sighand->siglock held.
 309 */
 310void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 311{
 312	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 313
 314	if (mask & JOBCTL_STOP_PENDING)
 315		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 316
 317	task->jobctl &= ~mask;
 318
 319	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 320		task_clear_jobctl_trapping(task);
 321}
 322
 323/**
 324 * task_participate_group_stop - participate in a group stop
 325 * @task: task participating in a group stop
 326 *
 327 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 328 * Group stop states are cleared and the group stop count is consumed if
 329 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 330 * stop, the appropriate %SIGNAL_* flags are set.
 331 *
 332 * CONTEXT:
 333 * Must be called with @task->sighand->siglock held.
 334 *
 335 * RETURNS:
 336 * %true if group stop completion should be notified to the parent, %false
 337 * otherwise.
 338 */
 339static bool task_participate_group_stop(struct task_struct *task)
 340{
 341	struct signal_struct *sig = task->signal;
 342	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 343
 344	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 345
 346	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 347
 348	if (!consume)
 349		return false;
 350
 351	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 352		sig->group_stop_count--;
 353
 354	/*
 355	 * Tell the caller to notify completion iff we are entering into a
 356	 * fresh group stop.  Read comment in do_signal_stop() for details.
 357	 */
 358	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 359		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 360		return true;
 361	}
 362	return false;
 363}
 364
 365/*
 366 * allocate a new signal queue record
 367 * - this may be called without locks if and only if t == current, otherwise an
 368 *   appropriate lock must be held to stop the target task from exiting
 369 */
 370static struct sigqueue *
 371__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
 372{
 373	struct sigqueue *q = NULL;
 374	struct user_struct *user;
 375
 376	/*
 377	 * Protect access to @t credentials. This can go away when all
 378	 * callers hold rcu read lock.
 379	 */
 380	rcu_read_lock();
 381	user = get_uid(__task_cred(t)->user);
 382	atomic_inc(&user->sigpending);
 383	rcu_read_unlock();
 384
 385	if (override_rlimit ||
 386	    atomic_read(&user->sigpending) <=
 387			task_rlimit(t, RLIMIT_SIGPENDING)) {
 388		q = kmem_cache_alloc(sigqueue_cachep, flags);
 389	} else {
 390		print_dropped_signal(sig);
 391	}
 392
 393	if (unlikely(q == NULL)) {
 394		atomic_dec(&user->sigpending);
 395		free_uid(user);
 396	} else {
 397		INIT_LIST_HEAD(&q->list);
 398		q->flags = 0;
 399		q->user = user;
 400	}
 401
 402	return q;
 403}
 404
 405static void __sigqueue_free(struct sigqueue *q)
 406{
 407	if (q->flags & SIGQUEUE_PREALLOC)
 408		return;
 409	atomic_dec(&q->user->sigpending);
 410	free_uid(q->user);
 411	kmem_cache_free(sigqueue_cachep, q);
 412}
 413
 414void flush_sigqueue(struct sigpending *queue)
 415{
 416	struct sigqueue *q;
 417
 418	sigemptyset(&queue->signal);
 419	while (!list_empty(&queue->list)) {
 420		q = list_entry(queue->list.next, struct sigqueue , list);
 421		list_del_init(&q->list);
 422		__sigqueue_free(q);
 423	}
 424}
 425
 426/*
 427 * Flush all pending signals for this kthread.
 428 */
 429void flush_signals(struct task_struct *t)
 430{
 431	unsigned long flags;
 432
 433	spin_lock_irqsave(&t->sighand->siglock, flags);
 434	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 435	flush_sigqueue(&t->pending);
 436	flush_sigqueue(&t->signal->shared_pending);
 437	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 438}
 439
 440#ifdef CONFIG_POSIX_TIMERS
 441static void __flush_itimer_signals(struct sigpending *pending)
 442{
 443	sigset_t signal, retain;
 444	struct sigqueue *q, *n;
 445
 446	signal = pending->signal;
 447	sigemptyset(&retain);
 448
 449	list_for_each_entry_safe(q, n, &pending->list, list) {
 450		int sig = q->info.si_signo;
 451
 452		if (likely(q->info.si_code != SI_TIMER)) {
 453			sigaddset(&retain, sig);
 454		} else {
 455			sigdelset(&signal, sig);
 456			list_del_init(&q->list);
 457			__sigqueue_free(q);
 458		}
 459	}
 460
 461	sigorsets(&pending->signal, &signal, &retain);
 462}
 463
 464void flush_itimer_signals(void)
 465{
 466	struct task_struct *tsk = current;
 467	unsigned long flags;
 468
 469	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 470	__flush_itimer_signals(&tsk->pending);
 471	__flush_itimer_signals(&tsk->signal->shared_pending);
 472	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 473}
 474#endif
 475
 476void ignore_signals(struct task_struct *t)
 477{
 478	int i;
 479
 480	for (i = 0; i < _NSIG; ++i)
 481		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 482
 483	flush_signals(t);
 484}
 485
 486/*
 487 * Flush all handlers for a task.
 488 */
 489
 490void
 491flush_signal_handlers(struct task_struct *t, int force_default)
 492{
 493	int i;
 494	struct k_sigaction *ka = &t->sighand->action[0];
 495	for (i = _NSIG ; i != 0 ; i--) {
 496		if (force_default || ka->sa.sa_handler != SIG_IGN)
 497			ka->sa.sa_handler = SIG_DFL;
 498		ka->sa.sa_flags = 0;
 499#ifdef __ARCH_HAS_SA_RESTORER
 500		ka->sa.sa_restorer = NULL;
 501#endif
 502		sigemptyset(&ka->sa.sa_mask);
 503		ka++;
 504	}
 505}
 506
 507int unhandled_signal(struct task_struct *tsk, int sig)
 508{
 509	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 510	if (is_global_init(tsk))
 511		return 1;
 512	if (handler != SIG_IGN && handler != SIG_DFL)
 513		return 0;
 514	/* if ptraced, let the tracer determine */
 515	return !tsk->ptrace;
 516}
 517
 518static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
 519			   bool *resched_timer)
 520{
 521	struct sigqueue *q, *first = NULL;
 522
 523	/*
 524	 * Collect the siginfo appropriate to this signal.  Check if
 525	 * there is another siginfo for the same signal.
 526	*/
 527	list_for_each_entry(q, &list->list, list) {
 528		if (q->info.si_signo == sig) {
 529			if (first)
 530				goto still_pending;
 531			first = q;
 532		}
 533	}
 534
 535	sigdelset(&list->signal, sig);
 536
 537	if (first) {
 538still_pending:
 539		list_del_init(&first->list);
 540		copy_siginfo(info, &first->info);
 541
 542		*resched_timer =
 543			(first->flags & SIGQUEUE_PREALLOC) &&
 544			(info->si_code == SI_TIMER) &&
 545			(info->si_sys_private);
 546
 547		__sigqueue_free(first);
 548	} else {
 549		/*
 550		 * Ok, it wasn't in the queue.  This must be
 551		 * a fast-pathed signal or we must have been
 552		 * out of queue space.  So zero out the info.
 553		 */
 554		clear_siginfo(info);
 555		info->si_signo = sig;
 556		info->si_errno = 0;
 557		info->si_code = SI_USER;
 558		info->si_pid = 0;
 559		info->si_uid = 0;
 560	}
 561}
 562
 563static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 564			siginfo_t *info, bool *resched_timer)
 565{
 566	int sig = next_signal(pending, mask);
 567
 568	if (sig)
 569		collect_signal(sig, pending, info, resched_timer);
 570	return sig;
 571}
 572
 573/*
 574 * Dequeue a signal and return the element to the caller, which is
 575 * expected to free it.
 576 *
 577 * All callers have to hold the siglock.
 578 */
 579int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 580{
 581	bool resched_timer = false;
 582	int signr;
 583
 584	/* We only dequeue private signals from ourselves, we don't let
 585	 * signalfd steal them
 586	 */
 587	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 588	if (!signr) {
 589		signr = __dequeue_signal(&tsk->signal->shared_pending,
 590					 mask, info, &resched_timer);
 591#ifdef CONFIG_POSIX_TIMERS
 592		/*
 593		 * itimer signal ?
 594		 *
 595		 * itimers are process shared and we restart periodic
 596		 * itimers in the signal delivery path to prevent DoS
 597		 * attacks in the high resolution timer case. This is
 598		 * compliant with the old way of self-restarting
 599		 * itimers, as the SIGALRM is a legacy signal and only
 600		 * queued once. Changing the restart behaviour to
 601		 * restart the timer in the signal dequeue path is
 602		 * reducing the timer noise on heavy loaded !highres
 603		 * systems too.
 604		 */
 605		if (unlikely(signr == SIGALRM)) {
 606			struct hrtimer *tmr = &tsk->signal->real_timer;
 607
 608			if (!hrtimer_is_queued(tmr) &&
 609			    tsk->signal->it_real_incr != 0) {
 610				hrtimer_forward(tmr, tmr->base->get_time(),
 611						tsk->signal->it_real_incr);
 612				hrtimer_restart(tmr);
 613			}
 614		}
 615#endif
 616	}
 617
 618	recalc_sigpending();
 619	if (!signr)
 620		return 0;
 621
 622	if (unlikely(sig_kernel_stop(signr))) {
 623		/*
 624		 * Set a marker that we have dequeued a stop signal.  Our
 625		 * caller might release the siglock and then the pending
 626		 * stop signal it is about to process is no longer in the
 627		 * pending bitmasks, but must still be cleared by a SIGCONT
 628		 * (and overruled by a SIGKILL).  So those cases clear this
 629		 * shared flag after we've set it.  Note that this flag may
 630		 * remain set after the signal we return is ignored or
 631		 * handled.  That doesn't matter because its only purpose
 632		 * is to alert stop-signal processing code when another
 633		 * processor has come along and cleared the flag.
 634		 */
 635		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 636	}
 637#ifdef CONFIG_POSIX_TIMERS
 638	if (resched_timer) {
 639		/*
 640		 * Release the siglock to ensure proper locking order
 641		 * of timer locks outside of siglocks.  Note, we leave
 642		 * irqs disabled here, since the posix-timers code is
 643		 * about to disable them again anyway.
 644		 */
 645		spin_unlock(&tsk->sighand->siglock);
 646		posixtimer_rearm(info);
 647		spin_lock(&tsk->sighand->siglock);
 648
 649		/* Don't expose the si_sys_private value to userspace */
 650		info->si_sys_private = 0;
 651	}
 652#endif
 653	return signr;
 654}
 655
 656/*
 657 * Tell a process that it has a new active signal..
 658 *
 659 * NOTE! we rely on the previous spin_lock to
 660 * lock interrupts for us! We can only be called with
 661 * "siglock" held, and the local interrupt must
 662 * have been disabled when that got acquired!
 663 *
 664 * No need to set need_resched since signal event passing
 665 * goes through ->blocked
 666 */
 667void signal_wake_up_state(struct task_struct *t, unsigned int state)
 668{
 669	set_tsk_thread_flag(t, TIF_SIGPENDING);
 670	/*
 671	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 672	 * case. We don't check t->state here because there is a race with it
 673	 * executing another processor and just now entering stopped state.
 674	 * By using wake_up_state, we ensure the process will wake up and
 675	 * handle its death signal.
 676	 */
 677	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 678		kick_process(t);
 679}
 680
 681/*
 682 * Remove signals in mask from the pending set and queue.
 683 * Returns 1 if any signals were found.
 684 *
 685 * All callers must be holding the siglock.
 686 */
 687static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 688{
 689	struct sigqueue *q, *n;
 690	sigset_t m;
 691
 692	sigandsets(&m, mask, &s->signal);
 693	if (sigisemptyset(&m))
 694		return 0;
 695
 696	sigandnsets(&s->signal, &s->signal, mask);
 697	list_for_each_entry_safe(q, n, &s->list, list) {
 698		if (sigismember(mask, q->info.si_signo)) {
 699			list_del_init(&q->list);
 700			__sigqueue_free(q);
 701		}
 702	}
 703	return 1;
 704}
 705
 706static inline int is_si_special(const struct siginfo *info)
 707{
 708	return info <= SEND_SIG_FORCED;
 709}
 710
 711static inline bool si_fromuser(const struct siginfo *info)
 712{
 713	return info == SEND_SIG_NOINFO ||
 714		(!is_si_special(info) && SI_FROMUSER(info));
 715}
 716
 717/*
 718 * called with RCU read lock from check_kill_permission()
 719 */
 720static int kill_ok_by_cred(struct task_struct *t)
 721{
 722	const struct cred *cred = current_cred();
 723	const struct cred *tcred = __task_cred(t);
 724
 725	if (uid_eq(cred->euid, tcred->suid) ||
 726	    uid_eq(cred->euid, tcred->uid)  ||
 727	    uid_eq(cred->uid,  tcred->suid) ||
 728	    uid_eq(cred->uid,  tcred->uid))
 729		return 1;
 730
 731	if (ns_capable(tcred->user_ns, CAP_KILL))
 732		return 1;
 733
 734	return 0;
 735}
 736
 737/*
 738 * Bad permissions for sending the signal
 739 * - the caller must hold the RCU read lock
 740 */
 741static int check_kill_permission(int sig, struct siginfo *info,
 742				 struct task_struct *t)
 743{
 744	struct pid *sid;
 745	int error;
 746
 747	if (!valid_signal(sig))
 748		return -EINVAL;
 749
 750	if (!si_fromuser(info))
 751		return 0;
 752
 753	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 754	if (error)
 755		return error;
 756
 757	if (!same_thread_group(current, t) &&
 758	    !kill_ok_by_cred(t)) {
 759		switch (sig) {
 760		case SIGCONT:
 761			sid = task_session(t);
 762			/*
 763			 * We don't return the error if sid == NULL. The
 764			 * task was unhashed, the caller must notice this.
 765			 */
 766			if (!sid || sid == task_session(current))
 767				break;
 768		default:
 769			return -EPERM;
 770		}
 771	}
 772
 773	return security_task_kill(t, info, sig, NULL);
 774}
 775
 776/**
 777 * ptrace_trap_notify - schedule trap to notify ptracer
 778 * @t: tracee wanting to notify tracer
 779 *
 780 * This function schedules sticky ptrace trap which is cleared on the next
 781 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 782 * ptracer.
 783 *
 784 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 785 * ptracer is listening for events, tracee is woken up so that it can
 786 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 787 * eventually taken without returning to userland after the existing traps
 788 * are finished by PTRACE_CONT.
 789 *
 790 * CONTEXT:
 791 * Must be called with @task->sighand->siglock held.
 792 */
 793static void ptrace_trap_notify(struct task_struct *t)
 794{
 795	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 796	assert_spin_locked(&t->sighand->siglock);
 797
 798	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 799	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 800}
 801
 802/*
 803 * Handle magic process-wide effects of stop/continue signals. Unlike
 804 * the signal actions, these happen immediately at signal-generation
 805 * time regardless of blocking, ignoring, or handling.  This does the
 806 * actual continuing for SIGCONT, but not the actual stopping for stop
 807 * signals. The process stop is done as a signal action for SIG_DFL.
 808 *
 809 * Returns true if the signal should be actually delivered, otherwise
 810 * it should be dropped.
 811 */
 812static bool prepare_signal(int sig, struct task_struct *p, bool force)
 813{
 814	struct signal_struct *signal = p->signal;
 815	struct task_struct *t;
 816	sigset_t flush;
 817
 818	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
 819		if (!(signal->flags & SIGNAL_GROUP_EXIT))
 820			return sig == SIGKILL;
 821		/*
 822		 * The process is in the middle of dying, nothing to do.
 823		 */
 824	} else if (sig_kernel_stop(sig)) {
 825		/*
 826		 * This is a stop signal.  Remove SIGCONT from all queues.
 827		 */
 828		siginitset(&flush, sigmask(SIGCONT));
 829		flush_sigqueue_mask(&flush, &signal->shared_pending);
 830		for_each_thread(p, t)
 831			flush_sigqueue_mask(&flush, &t->pending);
 832	} else if (sig == SIGCONT) {
 833		unsigned int why;
 834		/*
 835		 * Remove all stop signals from all queues, wake all threads.
 836		 */
 837		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 838		flush_sigqueue_mask(&flush, &signal->shared_pending);
 839		for_each_thread(p, t) {
 840			flush_sigqueue_mask(&flush, &t->pending);
 841			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 842			if (likely(!(t->ptrace & PT_SEIZED)))
 843				wake_up_state(t, __TASK_STOPPED);
 844			else
 845				ptrace_trap_notify(t);
 846		}
 847
 848		/*
 849		 * Notify the parent with CLD_CONTINUED if we were stopped.
 850		 *
 851		 * If we were in the middle of a group stop, we pretend it
 852		 * was already finished, and then continued. Since SIGCHLD
 853		 * doesn't queue we report only CLD_STOPPED, as if the next
 854		 * CLD_CONTINUED was dropped.
 855		 */
 856		why = 0;
 857		if (signal->flags & SIGNAL_STOP_STOPPED)
 858			why |= SIGNAL_CLD_CONTINUED;
 859		else if (signal->group_stop_count)
 860			why |= SIGNAL_CLD_STOPPED;
 861
 862		if (why) {
 863			/*
 864			 * The first thread which returns from do_signal_stop()
 865			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 866			 * notify its parent. See get_signal_to_deliver().
 867			 */
 868			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 869			signal->group_stop_count = 0;
 870			signal->group_exit_code = 0;
 871		}
 872	}
 873
 874	return !sig_ignored(p, sig, force);
 875}
 876
 877/*
 878 * Test if P wants to take SIG.  After we've checked all threads with this,
 879 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 880 * blocking SIG were ruled out because they are not running and already
 881 * have pending signals.  Such threads will dequeue from the shared queue
 882 * as soon as they're available, so putting the signal on the shared queue
 883 * will be equivalent to sending it to one such thread.
 884 */
 885static inline int wants_signal(int sig, struct task_struct *p)
 886{
 887	if (sigismember(&p->blocked, sig))
 888		return 0;
 889	if (p->flags & PF_EXITING)
 890		return 0;
 891	if (sig == SIGKILL)
 892		return 1;
 893	if (task_is_stopped_or_traced(p))
 894		return 0;
 895	return task_curr(p) || !signal_pending(p);
 896}
 897
 898static void complete_signal(int sig, struct task_struct *p, int group)
 899{
 900	struct signal_struct *signal = p->signal;
 901	struct task_struct *t;
 902
 903	/*
 904	 * Now find a thread we can wake up to take the signal off the queue.
 905	 *
 906	 * If the main thread wants the signal, it gets first crack.
 907	 * Probably the least surprising to the average bear.
 908	 */
 909	if (wants_signal(sig, p))
 910		t = p;
 911	else if (!group || thread_group_empty(p))
 912		/*
 913		 * There is just one thread and it does not need to be woken.
 914		 * It will dequeue unblocked signals before it runs again.
 915		 */
 916		return;
 917	else {
 918		/*
 919		 * Otherwise try to find a suitable thread.
 920		 */
 921		t = signal->curr_target;
 922		while (!wants_signal(sig, t)) {
 923			t = next_thread(t);
 924			if (t == signal->curr_target)
 925				/*
 926				 * No thread needs to be woken.
 927				 * Any eligible threads will see
 928				 * the signal in the queue soon.
 929				 */
 930				return;
 931		}
 932		signal->curr_target = t;
 933	}
 934
 935	/*
 936	 * Found a killable thread.  If the signal will be fatal,
 937	 * then start taking the whole group down immediately.
 938	 */
 939	if (sig_fatal(p, sig) &&
 940	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
 941	    !sigismember(&t->real_blocked, sig) &&
 942	    (sig == SIGKILL || !p->ptrace)) {
 943		/*
 944		 * This signal will be fatal to the whole group.
 945		 */
 946		if (!sig_kernel_coredump(sig)) {
 947			/*
 948			 * Start a group exit and wake everybody up.
 949			 * This way we don't have other threads
 950			 * running and doing things after a slower
 951			 * thread has the fatal signal pending.
 952			 */
 953			signal->flags = SIGNAL_GROUP_EXIT;
 954			signal->group_exit_code = sig;
 955			signal->group_stop_count = 0;
 956			t = p;
 957			do {
 958				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
 959				sigaddset(&t->pending.signal, SIGKILL);
 960				signal_wake_up(t, 1);
 961			} while_each_thread(p, t);
 962			return;
 963		}
 964	}
 965
 966	/*
 967	 * The signal is already in the shared-pending queue.
 968	 * Tell the chosen thread to wake up and dequeue it.
 969	 */
 970	signal_wake_up(t, sig == SIGKILL);
 971	return;
 972}
 973
 974static inline int legacy_queue(struct sigpending *signals, int sig)
 975{
 976	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 977}
 978
 979#ifdef CONFIG_USER_NS
 980static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
 981{
 982	if (current_user_ns() == task_cred_xxx(t, user_ns))
 983		return;
 984
 985	if (SI_FROMKERNEL(info))
 986		return;
 987
 988	rcu_read_lock();
 989	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
 990					make_kuid(current_user_ns(), info->si_uid));
 991	rcu_read_unlock();
 992}
 993#else
 994static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
 995{
 996	return;
 997}
 998#endif
 999
1000static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1001			int group, int from_ancestor_ns)
1002{
1003	struct sigpending *pending;
1004	struct sigqueue *q;
1005	int override_rlimit;
1006	int ret = 0, result;
1007
1008	assert_spin_locked(&t->sighand->siglock);
1009
1010	result = TRACE_SIGNAL_IGNORED;
1011	if (!prepare_signal(sig, t,
1012			from_ancestor_ns || (info == SEND_SIG_FORCED)))
1013		goto ret;
1014
1015	pending = group ? &t->signal->shared_pending : &t->pending;
1016	/*
1017	 * Short-circuit ignored signals and support queuing
1018	 * exactly one non-rt signal, so that we can get more
1019	 * detailed information about the cause of the signal.
1020	 */
1021	result = TRACE_SIGNAL_ALREADY_PENDING;
1022	if (legacy_queue(pending, sig))
1023		goto ret;
1024
1025	result = TRACE_SIGNAL_DELIVERED;
1026	/*
1027	 * fast-pathed signals for kernel-internal things like SIGSTOP
1028	 * or SIGKILL.
1029	 */
1030	if (info == SEND_SIG_FORCED)
1031		goto out_set;
1032
1033	/*
1034	 * Real-time signals must be queued if sent by sigqueue, or
1035	 * some other real-time mechanism.  It is implementation
1036	 * defined whether kill() does so.  We attempt to do so, on
1037	 * the principle of least surprise, but since kill is not
1038	 * allowed to fail with EAGAIN when low on memory we just
1039	 * make sure at least one signal gets delivered and don't
1040	 * pass on the info struct.
1041	 */
1042	if (sig < SIGRTMIN)
1043		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1044	else
1045		override_rlimit = 0;
1046
1047	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
 
1048	if (q) {
1049		list_add_tail(&q->list, &pending->list);
1050		switch ((unsigned long) info) {
1051		case (unsigned long) SEND_SIG_NOINFO:
1052			clear_siginfo(&q->info);
1053			q->info.si_signo = sig;
1054			q->info.si_errno = 0;
1055			q->info.si_code = SI_USER;
1056			q->info.si_pid = task_tgid_nr_ns(current,
1057							task_active_pid_ns(t));
1058			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1059			break;
1060		case (unsigned long) SEND_SIG_PRIV:
1061			clear_siginfo(&q->info);
1062			q->info.si_signo = sig;
1063			q->info.si_errno = 0;
1064			q->info.si_code = SI_KERNEL;
1065			q->info.si_pid = 0;
1066			q->info.si_uid = 0;
1067			break;
1068		default:
1069			copy_siginfo(&q->info, info);
1070			if (from_ancestor_ns)
1071				q->info.si_pid = 0;
1072			break;
1073		}
1074
1075		userns_fixup_signal_uid(&q->info, t);
1076
1077	} else if (!is_si_special(info)) {
1078		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1079			/*
1080			 * Queue overflow, abort.  We may abort if the
1081			 * signal was rt and sent by user using something
1082			 * other than kill().
1083			 */
1084			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1085			ret = -EAGAIN;
1086			goto ret;
1087		} else {
1088			/*
1089			 * This is a silent loss of information.  We still
1090			 * send the signal, but the *info bits are lost.
1091			 */
1092			result = TRACE_SIGNAL_LOSE_INFO;
1093		}
1094	}
1095
1096out_set:
1097	signalfd_notify(t, sig);
1098	sigaddset(&pending->signal, sig);
1099	complete_signal(sig, t, group);
1100ret:
1101	trace_signal_generate(sig, info, t, group, result);
1102	return ret;
1103}
1104
1105static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1106			int group)
1107{
1108	int from_ancestor_ns = 0;
1109
1110#ifdef CONFIG_PID_NS
1111	from_ancestor_ns = si_fromuser(info) &&
1112			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1113#endif
1114
1115	return __send_signal(sig, info, t, group, from_ancestor_ns);
1116}
1117
1118static void print_fatal_signal(int signr)
1119{
1120	struct pt_regs *regs = signal_pt_regs();
1121	pr_info("potentially unexpected fatal signal %d.\n", signr);
1122
1123#if defined(__i386__) && !defined(__arch_um__)
1124	pr_info("code at %08lx: ", regs->ip);
1125	{
1126		int i;
1127		for (i = 0; i < 16; i++) {
1128			unsigned char insn;
1129
1130			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1131				break;
1132			pr_cont("%02x ", insn);
1133		}
1134	}
1135	pr_cont("\n");
1136#endif
1137	preempt_disable();
1138	show_regs(regs);
1139	preempt_enable();
1140}
1141
1142static int __init setup_print_fatal_signals(char *str)
1143{
1144	get_option (&str, &print_fatal_signals);
1145
1146	return 1;
1147}
1148
1149__setup("print-fatal-signals=", setup_print_fatal_signals);
1150
1151int
1152__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1153{
1154	return send_signal(sig, info, p, 1);
1155}
1156
1157static int
1158specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1159{
1160	return send_signal(sig, info, t, 0);
1161}
1162
1163int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1164			bool group)
1165{
1166	unsigned long flags;
1167	int ret = -ESRCH;
1168
1169	if (lock_task_sighand(p, &flags)) {
1170		ret = send_signal(sig, info, p, group);
1171		unlock_task_sighand(p, &flags);
1172	}
1173
1174	return ret;
1175}
1176
1177/*
1178 * Force a signal that the process can't ignore: if necessary
1179 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1180 *
1181 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1182 * since we do not want to have a signal handler that was blocked
1183 * be invoked when user space had explicitly blocked it.
1184 *
1185 * We don't want to have recursive SIGSEGV's etc, for example,
1186 * that is why we also clear SIGNAL_UNKILLABLE.
1187 */
1188int
1189force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1190{
1191	unsigned long int flags;
1192	int ret, blocked, ignored;
1193	struct k_sigaction *action;
1194
1195	spin_lock_irqsave(&t->sighand->siglock, flags);
1196	action = &t->sighand->action[sig-1];
1197	ignored = action->sa.sa_handler == SIG_IGN;
1198	blocked = sigismember(&t->blocked, sig);
1199	if (blocked || ignored) {
1200		action->sa.sa_handler = SIG_DFL;
1201		if (blocked) {
1202			sigdelset(&t->blocked, sig);
1203			recalc_sigpending_and_wake(t);
1204		}
1205	}
1206	/*
1207	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1208	 * debugging to leave init killable.
1209	 */
1210	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1211		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1212	ret = specific_send_sig_info(sig, info, t);
1213	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1214
1215	return ret;
1216}
1217
1218/*
1219 * Nuke all other threads in the group.
1220 */
1221int zap_other_threads(struct task_struct *p)
1222{
1223	struct task_struct *t = p;
1224	int count = 0;
1225
1226	p->signal->group_stop_count = 0;
1227
1228	while_each_thread(p, t) {
1229		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1230		count++;
1231
1232		/* Don't bother with already dead threads */
1233		if (t->exit_state)
1234			continue;
1235		sigaddset(&t->pending.signal, SIGKILL);
1236		signal_wake_up(t, 1);
1237	}
1238
1239	return count;
1240}
1241
1242struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1243					   unsigned long *flags)
1244{
1245	struct sighand_struct *sighand;
1246
1247	for (;;) {
1248		/*
1249		 * Disable interrupts early to avoid deadlocks.
1250		 * See rcu_read_unlock() comment header for details.
1251		 */
1252		local_irq_save(*flags);
1253		rcu_read_lock();
1254		sighand = rcu_dereference(tsk->sighand);
1255		if (unlikely(sighand == NULL)) {
1256			rcu_read_unlock();
1257			local_irq_restore(*flags);
1258			break;
1259		}
1260		/*
1261		 * This sighand can be already freed and even reused, but
1262		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1263		 * initializes ->siglock: this slab can't go away, it has
1264		 * the same object type, ->siglock can't be reinitialized.
1265		 *
1266		 * We need to ensure that tsk->sighand is still the same
1267		 * after we take the lock, we can race with de_thread() or
1268		 * __exit_signal(). In the latter case the next iteration
1269		 * must see ->sighand == NULL.
1270		 */
1271		spin_lock(&sighand->siglock);
1272		if (likely(sighand == tsk->sighand)) {
1273			rcu_read_unlock();
1274			break;
1275		}
1276		spin_unlock(&sighand->siglock);
1277		rcu_read_unlock();
1278		local_irq_restore(*flags);
1279	}
1280
1281	return sighand;
1282}
1283
1284/*
1285 * send signal info to all the members of a group
1286 */
1287int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1288{
1289	int ret;
1290
1291	rcu_read_lock();
1292	ret = check_kill_permission(sig, info, p);
1293	rcu_read_unlock();
1294
1295	if (!ret && sig)
1296		ret = do_send_sig_info(sig, info, p, true);
1297
1298	return ret;
1299}
1300
1301/*
1302 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1303 * control characters do (^C, ^Z etc)
1304 * - the caller must hold at least a readlock on tasklist_lock
1305 */
1306int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1307{
1308	struct task_struct *p = NULL;
1309	int retval, success;
1310
1311	success = 0;
1312	retval = -ESRCH;
1313	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1314		int err = group_send_sig_info(sig, info, p);
1315		success |= !err;
1316		retval = err;
1317	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1318	return success ? 0 : retval;
1319}
1320
1321int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1322{
1323	int error = -ESRCH;
1324	struct task_struct *p;
1325
1326	for (;;) {
1327		rcu_read_lock();
1328		p = pid_task(pid, PIDTYPE_PID);
1329		if (p)
1330			error = group_send_sig_info(sig, info, p);
1331		rcu_read_unlock();
1332		if (likely(!p || error != -ESRCH))
1333			return error;
1334
1335		/*
1336		 * The task was unhashed in between, try again.  If it
1337		 * is dead, pid_task() will return NULL, if we race with
1338		 * de_thread() it will find the new leader.
1339		 */
1340	}
1341}
1342
1343static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1344{
1345	int error;
1346	rcu_read_lock();
1347	error = kill_pid_info(sig, info, find_vpid(pid));
1348	rcu_read_unlock();
1349	return error;
1350}
1351
1352static int kill_as_cred_perm(const struct cred *cred,
1353			     struct task_struct *target)
1354{
1355	const struct cred *pcred = __task_cred(target);
1356	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1357	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1358		return 0;
1359	return 1;
1360}
1361
1362/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1363int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1364			 const struct cred *cred)
1365{
1366	int ret = -EINVAL;
1367	struct task_struct *p;
1368	unsigned long flags;
1369
1370	if (!valid_signal(sig))
1371		return ret;
1372
1373	rcu_read_lock();
1374	p = pid_task(pid, PIDTYPE_PID);
1375	if (!p) {
1376		ret = -ESRCH;
1377		goto out_unlock;
1378	}
1379	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1380		ret = -EPERM;
1381		goto out_unlock;
1382	}
1383	ret = security_task_kill(p, info, sig, cred);
1384	if (ret)
1385		goto out_unlock;
1386
1387	if (sig) {
1388		if (lock_task_sighand(p, &flags)) {
1389			ret = __send_signal(sig, info, p, 1, 0);
1390			unlock_task_sighand(p, &flags);
1391		} else
1392			ret = -ESRCH;
1393	}
1394out_unlock:
1395	rcu_read_unlock();
1396	return ret;
1397}
1398EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1399
1400/*
1401 * kill_something_info() interprets pid in interesting ways just like kill(2).
1402 *
1403 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1404 * is probably wrong.  Should make it like BSD or SYSV.
1405 */
1406
1407static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1408{
1409	int ret;
1410
1411	if (pid > 0) {
1412		rcu_read_lock();
1413		ret = kill_pid_info(sig, info, find_vpid(pid));
1414		rcu_read_unlock();
1415		return ret;
1416	}
1417
1418	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1419	if (pid == INT_MIN)
1420		return -ESRCH;
1421
1422	read_lock(&tasklist_lock);
1423	if (pid != -1) {
1424		ret = __kill_pgrp_info(sig, info,
1425				pid ? find_vpid(-pid) : task_pgrp(current));
1426	} else {
1427		int retval = 0, count = 0;
1428		struct task_struct * p;
1429
1430		for_each_process(p) {
1431			if (task_pid_vnr(p) > 1 &&
1432					!same_thread_group(p, current)) {
1433				int err = group_send_sig_info(sig, info, p);
1434				++count;
1435				if (err != -EPERM)
1436					retval = err;
1437			}
1438		}
1439		ret = count ? retval : -ESRCH;
1440	}
1441	read_unlock(&tasklist_lock);
1442
1443	return ret;
1444}
1445
1446/*
1447 * These are for backward compatibility with the rest of the kernel source.
1448 */
1449
1450int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1451{
1452	/*
1453	 * Make sure legacy kernel users don't send in bad values
1454	 * (normal paths check this in check_kill_permission).
1455	 */
1456	if (!valid_signal(sig))
1457		return -EINVAL;
1458
1459	return do_send_sig_info(sig, info, p, false);
1460}
1461
1462#define __si_special(priv) \
1463	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1464
1465int
1466send_sig(int sig, struct task_struct *p, int priv)
1467{
1468	return send_sig_info(sig, __si_special(priv), p);
1469}
1470
1471void
1472force_sig(int sig, struct task_struct *p)
1473{
1474	force_sig_info(sig, SEND_SIG_PRIV, p);
1475}
1476
1477/*
1478 * When things go south during signal handling, we
1479 * will force a SIGSEGV. And if the signal that caused
1480 * the problem was already a SIGSEGV, we'll want to
1481 * make sure we don't even try to deliver the signal..
1482 */
1483int
1484force_sigsegv(int sig, struct task_struct *p)
1485{
1486	if (sig == SIGSEGV) {
1487		unsigned long flags;
1488		spin_lock_irqsave(&p->sighand->siglock, flags);
1489		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1490		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1491	}
1492	force_sig(SIGSEGV, p);
1493	return 0;
1494}
1495
1496int force_sig_fault(int sig, int code, void __user *addr
1497	___ARCH_SI_TRAPNO(int trapno)
1498	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1499	, struct task_struct *t)
1500{
1501	struct siginfo info;
1502
1503	clear_siginfo(&info);
1504	info.si_signo = sig;
1505	info.si_errno = 0;
1506	info.si_code  = code;
1507	info.si_addr  = addr;
1508#ifdef __ARCH_SI_TRAPNO
1509	info.si_trapno = trapno;
1510#endif
1511#ifdef __ia64__
1512	info.si_imm = imm;
1513	info.si_flags = flags;
1514	info.si_isr = isr;
1515#endif
1516	return force_sig_info(info.si_signo, &info, t);
1517}
1518
1519int send_sig_fault(int sig, int code, void __user *addr
1520	___ARCH_SI_TRAPNO(int trapno)
1521	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1522	, struct task_struct *t)
1523{
1524	struct siginfo info;
1525
1526	clear_siginfo(&info);
1527	info.si_signo = sig;
1528	info.si_errno = 0;
1529	info.si_code  = code;
1530	info.si_addr  = addr;
1531#ifdef __ARCH_SI_TRAPNO
1532	info.si_trapno = trapno;
1533#endif
1534#ifdef __ia64__
1535	info.si_imm = imm;
1536	info.si_flags = flags;
1537	info.si_isr = isr;
1538#endif
1539	return send_sig_info(info.si_signo, &info, t);
1540}
1541
1542#if defined(BUS_MCEERR_AO) && defined(BUS_MCEERR_AR)
1543int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1544{
1545	struct siginfo info;
1546
1547	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1548	clear_siginfo(&info);
1549	info.si_signo = SIGBUS;
1550	info.si_errno = 0;
1551	info.si_code = code;
1552	info.si_addr = addr;
1553	info.si_addr_lsb = lsb;
1554	return force_sig_info(info.si_signo, &info, t);
1555}
1556
1557int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1558{
1559	struct siginfo info;
1560
1561	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1562	clear_siginfo(&info);
1563	info.si_signo = SIGBUS;
1564	info.si_errno = 0;
1565	info.si_code = code;
1566	info.si_addr = addr;
1567	info.si_addr_lsb = lsb;
1568	return send_sig_info(info.si_signo, &info, t);
1569}
1570EXPORT_SYMBOL(send_sig_mceerr);
1571#endif
1572
1573#ifdef SEGV_BNDERR
1574int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1575{
1576	struct siginfo info;
1577
1578	clear_siginfo(&info);
1579	info.si_signo = SIGSEGV;
1580	info.si_errno = 0;
1581	info.si_code  = SEGV_BNDERR;
1582	info.si_addr  = addr;
1583	info.si_lower = lower;
1584	info.si_upper = upper;
1585	return force_sig_info(info.si_signo, &info, current);
1586}
1587#endif
1588
1589#ifdef SEGV_PKUERR
1590int force_sig_pkuerr(void __user *addr, u32 pkey)
1591{
1592	struct siginfo info;
1593
1594	clear_siginfo(&info);
1595	info.si_signo = SIGSEGV;
1596	info.si_errno = 0;
1597	info.si_code  = SEGV_PKUERR;
1598	info.si_addr  = addr;
1599	info.si_pkey  = pkey;
1600	return force_sig_info(info.si_signo, &info, current);
1601}
1602#endif
1603
1604/* For the crazy architectures that include trap information in
1605 * the errno field, instead of an actual errno value.
1606 */
1607int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1608{
1609	struct siginfo info;
1610
1611	clear_siginfo(&info);
1612	info.si_signo = SIGTRAP;
1613	info.si_errno = errno;
1614	info.si_code  = TRAP_HWBKPT;
1615	info.si_addr  = addr;
1616	return force_sig_info(info.si_signo, &info, current);
1617}
1618
1619int kill_pgrp(struct pid *pid, int sig, int priv)
1620{
1621	int ret;
1622
1623	read_lock(&tasklist_lock);
1624	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1625	read_unlock(&tasklist_lock);
1626
1627	return ret;
1628}
1629EXPORT_SYMBOL(kill_pgrp);
1630
1631int kill_pid(struct pid *pid, int sig, int priv)
1632{
1633	return kill_pid_info(sig, __si_special(priv), pid);
1634}
1635EXPORT_SYMBOL(kill_pid);
1636
1637/*
1638 * These functions support sending signals using preallocated sigqueue
1639 * structures.  This is needed "because realtime applications cannot
1640 * afford to lose notifications of asynchronous events, like timer
1641 * expirations or I/O completions".  In the case of POSIX Timers
1642 * we allocate the sigqueue structure from the timer_create.  If this
1643 * allocation fails we are able to report the failure to the application
1644 * with an EAGAIN error.
1645 */
1646struct sigqueue *sigqueue_alloc(void)
1647{
1648	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1649
1650	if (q)
1651		q->flags |= SIGQUEUE_PREALLOC;
1652
1653	return q;
1654}
1655
1656void sigqueue_free(struct sigqueue *q)
1657{
1658	unsigned long flags;
1659	spinlock_t *lock = &current->sighand->siglock;
1660
1661	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1662	/*
1663	 * We must hold ->siglock while testing q->list
1664	 * to serialize with collect_signal() or with
1665	 * __exit_signal()->flush_sigqueue().
1666	 */
1667	spin_lock_irqsave(lock, flags);
1668	q->flags &= ~SIGQUEUE_PREALLOC;
1669	/*
1670	 * If it is queued it will be freed when dequeued,
1671	 * like the "regular" sigqueue.
1672	 */
1673	if (!list_empty(&q->list))
1674		q = NULL;
1675	spin_unlock_irqrestore(lock, flags);
1676
1677	if (q)
1678		__sigqueue_free(q);
1679}
1680
1681int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1682{
1683	int sig = q->info.si_signo;
1684	struct sigpending *pending;
1685	unsigned long flags;
1686	int ret, result;
1687
1688	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1689
1690	ret = -1;
1691	if (!likely(lock_task_sighand(t, &flags)))
1692		goto ret;
1693
1694	ret = 1; /* the signal is ignored */
1695	result = TRACE_SIGNAL_IGNORED;
1696	if (!prepare_signal(sig, t, false))
1697		goto out;
1698
1699	ret = 0;
1700	if (unlikely(!list_empty(&q->list))) {
1701		/*
1702		 * If an SI_TIMER entry is already queue just increment
1703		 * the overrun count.
1704		 */
1705		BUG_ON(q->info.si_code != SI_TIMER);
1706		q->info.si_overrun++;
1707		result = TRACE_SIGNAL_ALREADY_PENDING;
1708		goto out;
1709	}
1710	q->info.si_overrun = 0;
1711
1712	signalfd_notify(t, sig);
1713	pending = group ? &t->signal->shared_pending : &t->pending;
1714	list_add_tail(&q->list, &pending->list);
1715	sigaddset(&pending->signal, sig);
1716	complete_signal(sig, t, group);
1717	result = TRACE_SIGNAL_DELIVERED;
1718out:
1719	trace_signal_generate(sig, &q->info, t, group, result);
1720	unlock_task_sighand(t, &flags);
1721ret:
1722	return ret;
1723}
1724
1725/*
1726 * Let a parent know about the death of a child.
1727 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1728 *
1729 * Returns true if our parent ignored us and so we've switched to
1730 * self-reaping.
1731 */
1732bool do_notify_parent(struct task_struct *tsk, int sig)
1733{
1734	struct siginfo info;
1735	unsigned long flags;
1736	struct sighand_struct *psig;
1737	bool autoreap = false;
1738	u64 utime, stime;
1739
1740	BUG_ON(sig == -1);
1741
1742 	/* do_notify_parent_cldstop should have been called instead.  */
1743 	BUG_ON(task_is_stopped_or_traced(tsk));
1744
1745	BUG_ON(!tsk->ptrace &&
1746	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1747
1748	if (sig != SIGCHLD) {
1749		/*
1750		 * This is only possible if parent == real_parent.
1751		 * Check if it has changed security domain.
1752		 */
1753		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1754			sig = SIGCHLD;
1755	}
1756
1757	clear_siginfo(&info);
1758	info.si_signo = sig;
1759	info.si_errno = 0;
1760	/*
1761	 * We are under tasklist_lock here so our parent is tied to
1762	 * us and cannot change.
1763	 *
1764	 * task_active_pid_ns will always return the same pid namespace
1765	 * until a task passes through release_task.
1766	 *
1767	 * write_lock() currently calls preempt_disable() which is the
1768	 * same as rcu_read_lock(), but according to Oleg, this is not
1769	 * correct to rely on this
1770	 */
1771	rcu_read_lock();
1772	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1773	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1774				       task_uid(tsk));
1775	rcu_read_unlock();
1776
1777	task_cputime(tsk, &utime, &stime);
1778	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1779	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1780
1781	info.si_status = tsk->exit_code & 0x7f;
1782	if (tsk->exit_code & 0x80)
1783		info.si_code = CLD_DUMPED;
1784	else if (tsk->exit_code & 0x7f)
1785		info.si_code = CLD_KILLED;
1786	else {
1787		info.si_code = CLD_EXITED;
1788		info.si_status = tsk->exit_code >> 8;
1789	}
1790
1791	psig = tsk->parent->sighand;
1792	spin_lock_irqsave(&psig->siglock, flags);
1793	if (!tsk->ptrace && sig == SIGCHLD &&
1794	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1795	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1796		/*
1797		 * We are exiting and our parent doesn't care.  POSIX.1
1798		 * defines special semantics for setting SIGCHLD to SIG_IGN
1799		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1800		 * automatically and not left for our parent's wait4 call.
1801		 * Rather than having the parent do it as a magic kind of
1802		 * signal handler, we just set this to tell do_exit that we
1803		 * can be cleaned up without becoming a zombie.  Note that
1804		 * we still call __wake_up_parent in this case, because a
1805		 * blocked sys_wait4 might now return -ECHILD.
1806		 *
1807		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1808		 * is implementation-defined: we do (if you don't want
1809		 * it, just use SIG_IGN instead).
1810		 */
1811		autoreap = true;
1812		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1813			sig = 0;
1814	}
1815	if (valid_signal(sig) && sig)
1816		__group_send_sig_info(sig, &info, tsk->parent);
1817	__wake_up_parent(tsk, tsk->parent);
1818	spin_unlock_irqrestore(&psig->siglock, flags);
1819
1820	return autoreap;
1821}
1822
1823/**
1824 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1825 * @tsk: task reporting the state change
1826 * @for_ptracer: the notification is for ptracer
1827 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1828 *
1829 * Notify @tsk's parent that the stopped/continued state has changed.  If
1830 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1831 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1832 *
1833 * CONTEXT:
1834 * Must be called with tasklist_lock at least read locked.
1835 */
1836static void do_notify_parent_cldstop(struct task_struct *tsk,
1837				     bool for_ptracer, int why)
1838{
1839	struct siginfo info;
1840	unsigned long flags;
1841	struct task_struct *parent;
1842	struct sighand_struct *sighand;
1843	u64 utime, stime;
1844
1845	if (for_ptracer) {
1846		parent = tsk->parent;
1847	} else {
1848		tsk = tsk->group_leader;
1849		parent = tsk->real_parent;
1850	}
1851
1852	clear_siginfo(&info);
1853	info.si_signo = SIGCHLD;
1854	info.si_errno = 0;
1855	/*
1856	 * see comment in do_notify_parent() about the following 4 lines
1857	 */
1858	rcu_read_lock();
1859	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1860	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1861	rcu_read_unlock();
1862
1863	task_cputime(tsk, &utime, &stime);
1864	info.si_utime = nsec_to_clock_t(utime);
1865	info.si_stime = nsec_to_clock_t(stime);
1866
1867 	info.si_code = why;
1868 	switch (why) {
1869 	case CLD_CONTINUED:
1870 		info.si_status = SIGCONT;
1871 		break;
1872 	case CLD_STOPPED:
1873 		info.si_status = tsk->signal->group_exit_code & 0x7f;
1874 		break;
1875 	case CLD_TRAPPED:
1876 		info.si_status = tsk->exit_code & 0x7f;
1877 		break;
1878 	default:
1879 		BUG();
1880 	}
1881
1882	sighand = parent->sighand;
1883	spin_lock_irqsave(&sighand->siglock, flags);
1884	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1885	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1886		__group_send_sig_info(SIGCHLD, &info, parent);
1887	/*
1888	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1889	 */
1890	__wake_up_parent(tsk, parent);
1891	spin_unlock_irqrestore(&sighand->siglock, flags);
1892}
1893
1894static inline int may_ptrace_stop(void)
1895{
1896	if (!likely(current->ptrace))
1897		return 0;
1898	/*
1899	 * Are we in the middle of do_coredump?
1900	 * If so and our tracer is also part of the coredump stopping
1901	 * is a deadlock situation, and pointless because our tracer
1902	 * is dead so don't allow us to stop.
1903	 * If SIGKILL was already sent before the caller unlocked
1904	 * ->siglock we must see ->core_state != NULL. Otherwise it
1905	 * is safe to enter schedule().
1906	 *
1907	 * This is almost outdated, a task with the pending SIGKILL can't
1908	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1909	 * after SIGKILL was already dequeued.
1910	 */
1911	if (unlikely(current->mm->core_state) &&
1912	    unlikely(current->mm == current->parent->mm))
1913		return 0;
1914
1915	return 1;
1916}
1917
1918/*
1919 * Return non-zero if there is a SIGKILL that should be waking us up.
1920 * Called with the siglock held.
1921 */
1922static int sigkill_pending(struct task_struct *tsk)
1923{
1924	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1925		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1926}
1927
1928/*
1929 * This must be called with current->sighand->siglock held.
1930 *
1931 * This should be the path for all ptrace stops.
1932 * We always set current->last_siginfo while stopped here.
1933 * That makes it a way to test a stopped process for
1934 * being ptrace-stopped vs being job-control-stopped.
1935 *
1936 * If we actually decide not to stop at all because the tracer
1937 * is gone, we keep current->exit_code unless clear_code.
1938 */
1939static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1940	__releases(&current->sighand->siglock)
1941	__acquires(&current->sighand->siglock)
1942{
1943	bool gstop_done = false;
1944
1945	if (arch_ptrace_stop_needed(exit_code, info)) {
1946		/*
1947		 * The arch code has something special to do before a
1948		 * ptrace stop.  This is allowed to block, e.g. for faults
1949		 * on user stack pages.  We can't keep the siglock while
1950		 * calling arch_ptrace_stop, so we must release it now.
1951		 * To preserve proper semantics, we must do this before
1952		 * any signal bookkeeping like checking group_stop_count.
1953		 * Meanwhile, a SIGKILL could come in before we retake the
1954		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1955		 * So after regaining the lock, we must check for SIGKILL.
1956		 */
1957		spin_unlock_irq(&current->sighand->siglock);
1958		arch_ptrace_stop(exit_code, info);
1959		spin_lock_irq(&current->sighand->siglock);
1960		if (sigkill_pending(current))
1961			return;
1962	}
1963
1964	set_special_state(TASK_TRACED);
1965
1966	/*
1967	 * We're committing to trapping.  TRACED should be visible before
1968	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1969	 * Also, transition to TRACED and updates to ->jobctl should be
1970	 * atomic with respect to siglock and should be done after the arch
1971	 * hook as siglock is released and regrabbed across it.
1972	 *
1973	 *     TRACER				    TRACEE
1974	 *
1975	 *     ptrace_attach()
1976	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
1977	 *     do_wait()
1978	 *       set_current_state()                smp_wmb();
1979	 *       ptrace_do_wait()
1980	 *         wait_task_stopped()
1981	 *           task_stopped_code()
1982	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
1983	 */
1984	smp_wmb();
1985
1986	current->last_siginfo = info;
1987	current->exit_code = exit_code;
1988
1989	/*
1990	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1991	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1992	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1993	 * could be clear now.  We act as if SIGCONT is received after
1994	 * TASK_TRACED is entered - ignore it.
1995	 */
1996	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1997		gstop_done = task_participate_group_stop(current);
1998
1999	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2000	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2001	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2002		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2003
2004	/* entering a trap, clear TRAPPING */
2005	task_clear_jobctl_trapping(current);
2006
2007	spin_unlock_irq(&current->sighand->siglock);
2008	read_lock(&tasklist_lock);
2009	if (may_ptrace_stop()) {
2010		/*
2011		 * Notify parents of the stop.
2012		 *
2013		 * While ptraced, there are two parents - the ptracer and
2014		 * the real_parent of the group_leader.  The ptracer should
2015		 * know about every stop while the real parent is only
2016		 * interested in the completion of group stop.  The states
2017		 * for the two don't interact with each other.  Notify
2018		 * separately unless they're gonna be duplicates.
2019		 */
2020		do_notify_parent_cldstop(current, true, why);
2021		if (gstop_done && ptrace_reparented(current))
2022			do_notify_parent_cldstop(current, false, why);
2023
2024		/*
2025		 * Don't want to allow preemption here, because
2026		 * sys_ptrace() needs this task to be inactive.
2027		 *
2028		 * XXX: implement read_unlock_no_resched().
2029		 */
2030		preempt_disable();
2031		read_unlock(&tasklist_lock);
2032		preempt_enable_no_resched();
2033		freezable_schedule();
2034	} else {
2035		/*
2036		 * By the time we got the lock, our tracer went away.
2037		 * Don't drop the lock yet, another tracer may come.
2038		 *
2039		 * If @gstop_done, the ptracer went away between group stop
2040		 * completion and here.  During detach, it would have set
2041		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2042		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2043		 * the real parent of the group stop completion is enough.
2044		 */
2045		if (gstop_done)
2046			do_notify_parent_cldstop(current, false, why);
2047
2048		/* tasklist protects us from ptrace_freeze_traced() */
2049		__set_current_state(TASK_RUNNING);
2050		if (clear_code)
2051			current->exit_code = 0;
2052		read_unlock(&tasklist_lock);
2053	}
2054
2055	/*
2056	 * We are back.  Now reacquire the siglock before touching
2057	 * last_siginfo, so that we are sure to have synchronized with
2058	 * any signal-sending on another CPU that wants to examine it.
2059	 */
2060	spin_lock_irq(&current->sighand->siglock);
2061	current->last_siginfo = NULL;
2062
2063	/* LISTENING can be set only during STOP traps, clear it */
2064	current->jobctl &= ~JOBCTL_LISTENING;
2065
2066	/*
2067	 * Queued signals ignored us while we were stopped for tracing.
2068	 * So check for any that we should take before resuming user mode.
2069	 * This sets TIF_SIGPENDING, but never clears it.
2070	 */
2071	recalc_sigpending_tsk(current);
2072}
2073
2074static void ptrace_do_notify(int signr, int exit_code, int why)
2075{
2076	siginfo_t info;
2077
2078	clear_siginfo(&info);
2079	info.si_signo = signr;
2080	info.si_code = exit_code;
2081	info.si_pid = task_pid_vnr(current);
2082	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2083
2084	/* Let the debugger run.  */
2085	ptrace_stop(exit_code, why, 1, &info);
2086}
2087
2088void ptrace_notify(int exit_code)
2089{
2090	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2091	if (unlikely(current->task_works))
2092		task_work_run();
2093
2094	spin_lock_irq(&current->sighand->siglock);
2095	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2096	spin_unlock_irq(&current->sighand->siglock);
2097}
2098
2099/**
2100 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2101 * @signr: signr causing group stop if initiating
2102 *
2103 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2104 * and participate in it.  If already set, participate in the existing
2105 * group stop.  If participated in a group stop (and thus slept), %true is
2106 * returned with siglock released.
2107 *
2108 * If ptraced, this function doesn't handle stop itself.  Instead,
2109 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2110 * untouched.  The caller must ensure that INTERRUPT trap handling takes
2111 * places afterwards.
2112 *
2113 * CONTEXT:
2114 * Must be called with @current->sighand->siglock held, which is released
2115 * on %true return.
2116 *
2117 * RETURNS:
2118 * %false if group stop is already cancelled or ptrace trap is scheduled.
2119 * %true if participated in group stop.
2120 */
2121static bool do_signal_stop(int signr)
2122	__releases(&current->sighand->siglock)
2123{
2124	struct signal_struct *sig = current->signal;
2125
2126	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2127		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2128		struct task_struct *t;
2129
2130		/* signr will be recorded in task->jobctl for retries */
2131		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2132
2133		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2134		    unlikely(signal_group_exit(sig)))
2135			return false;
2136		/*
2137		 * There is no group stop already in progress.  We must
2138		 * initiate one now.
2139		 *
2140		 * While ptraced, a task may be resumed while group stop is
2141		 * still in effect and then receive a stop signal and
2142		 * initiate another group stop.  This deviates from the
2143		 * usual behavior as two consecutive stop signals can't
2144		 * cause two group stops when !ptraced.  That is why we
2145		 * also check !task_is_stopped(t) below.
2146		 *
2147		 * The condition can be distinguished by testing whether
2148		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2149		 * group_exit_code in such case.
2150		 *
2151		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2152		 * an intervening stop signal is required to cause two
2153		 * continued events regardless of ptrace.
2154		 */
2155		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2156			sig->group_exit_code = signr;
2157
2158		sig->group_stop_count = 0;
2159
2160		if (task_set_jobctl_pending(current, signr | gstop))
2161			sig->group_stop_count++;
2162
2163		t = current;
2164		while_each_thread(current, t) {
2165			/*
2166			 * Setting state to TASK_STOPPED for a group
2167			 * stop is always done with the siglock held,
2168			 * so this check has no races.
2169			 */
2170			if (!task_is_stopped(t) &&
2171			    task_set_jobctl_pending(t, signr | gstop)) {
2172				sig->group_stop_count++;
2173				if (likely(!(t->ptrace & PT_SEIZED)))
2174					signal_wake_up(t, 0);
2175				else
2176					ptrace_trap_notify(t);
2177			}
2178		}
2179	}
2180
2181	if (likely(!current->ptrace)) {
2182		int notify = 0;
2183
2184		/*
2185		 * If there are no other threads in the group, or if there
2186		 * is a group stop in progress and we are the last to stop,
2187		 * report to the parent.
2188		 */
2189		if (task_participate_group_stop(current))
2190			notify = CLD_STOPPED;
2191
2192		set_special_state(TASK_STOPPED);
2193		spin_unlock_irq(&current->sighand->siglock);
2194
2195		/*
2196		 * Notify the parent of the group stop completion.  Because
2197		 * we're not holding either the siglock or tasklist_lock
2198		 * here, ptracer may attach inbetween; however, this is for
2199		 * group stop and should always be delivered to the real
2200		 * parent of the group leader.  The new ptracer will get
2201		 * its notification when this task transitions into
2202		 * TASK_TRACED.
2203		 */
2204		if (notify) {
2205			read_lock(&tasklist_lock);
2206			do_notify_parent_cldstop(current, false, notify);
2207			read_unlock(&tasklist_lock);
2208		}
2209
2210		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2211		freezable_schedule();
2212		return true;
2213	} else {
2214		/*
2215		 * While ptraced, group stop is handled by STOP trap.
2216		 * Schedule it and let the caller deal with it.
2217		 */
2218		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2219		return false;
2220	}
2221}
2222
2223/**
2224 * do_jobctl_trap - take care of ptrace jobctl traps
2225 *
2226 * When PT_SEIZED, it's used for both group stop and explicit
2227 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2228 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2229 * the stop signal; otherwise, %SIGTRAP.
2230 *
2231 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2232 * number as exit_code and no siginfo.
2233 *
2234 * CONTEXT:
2235 * Must be called with @current->sighand->siglock held, which may be
2236 * released and re-acquired before returning with intervening sleep.
2237 */
2238static void do_jobctl_trap(void)
2239{
2240	struct signal_struct *signal = current->signal;
2241	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2242
2243	if (current->ptrace & PT_SEIZED) {
2244		if (!signal->group_stop_count &&
2245		    !(signal->flags & SIGNAL_STOP_STOPPED))
2246			signr = SIGTRAP;
2247		WARN_ON_ONCE(!signr);
2248		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2249				 CLD_STOPPED);
2250	} else {
2251		WARN_ON_ONCE(!signr);
2252		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2253		current->exit_code = 0;
2254	}
2255}
2256
2257static int ptrace_signal(int signr, siginfo_t *info)
2258{
 
2259	/*
2260	 * We do not check sig_kernel_stop(signr) but set this marker
2261	 * unconditionally because we do not know whether debugger will
2262	 * change signr. This flag has no meaning unless we are going
2263	 * to stop after return from ptrace_stop(). In this case it will
2264	 * be checked in do_signal_stop(), we should only stop if it was
2265	 * not cleared by SIGCONT while we were sleeping. See also the
2266	 * comment in dequeue_signal().
2267	 */
2268	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2269	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2270
2271	/* We're back.  Did the debugger cancel the sig?  */
2272	signr = current->exit_code;
2273	if (signr == 0)
2274		return signr;
2275
2276	current->exit_code = 0;
2277
2278	/*
2279	 * Update the siginfo structure if the signal has
2280	 * changed.  If the debugger wanted something
2281	 * specific in the siginfo structure then it should
2282	 * have updated *info via PTRACE_SETSIGINFO.
2283	 */
2284	if (signr != info->si_signo) {
2285		clear_siginfo(info);
2286		info->si_signo = signr;
2287		info->si_errno = 0;
2288		info->si_code = SI_USER;
2289		rcu_read_lock();
2290		info->si_pid = task_pid_vnr(current->parent);
2291		info->si_uid = from_kuid_munged(current_user_ns(),
2292						task_uid(current->parent));
2293		rcu_read_unlock();
2294	}
2295
2296	/* If the (new) signal is now blocked, requeue it.  */
2297	if (sigismember(&current->blocked, signr)) {
2298		specific_send_sig_info(signr, info, current);
2299		signr = 0;
2300	}
2301
2302	return signr;
2303}
2304
2305int get_signal(struct ksignal *ksig)
2306{
2307	struct sighand_struct *sighand = current->sighand;
2308	struct signal_struct *signal = current->signal;
2309	int signr;
2310
2311	if (unlikely(current->task_works))
2312		task_work_run();
2313
2314	if (unlikely(uprobe_deny_signal()))
2315		return 0;
2316
2317	/*
2318	 * Do this once, we can't return to user-mode if freezing() == T.
2319	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2320	 * thus do not need another check after return.
2321	 */
2322	try_to_freeze();
2323
2324relock:
2325	spin_lock_irq(&sighand->siglock);
2326	/*
2327	 * Every stopped thread goes here after wakeup. Check to see if
2328	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2329	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2330	 */
2331	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2332		int why;
2333
2334		if (signal->flags & SIGNAL_CLD_CONTINUED)
2335			why = CLD_CONTINUED;
2336		else
2337			why = CLD_STOPPED;
2338
2339		signal->flags &= ~SIGNAL_CLD_MASK;
2340
2341		spin_unlock_irq(&sighand->siglock);
2342
2343		/*
2344		 * Notify the parent that we're continuing.  This event is
2345		 * always per-process and doesn't make whole lot of sense
2346		 * for ptracers, who shouldn't consume the state via
2347		 * wait(2) either, but, for backward compatibility, notify
2348		 * the ptracer of the group leader too unless it's gonna be
2349		 * a duplicate.
2350		 */
2351		read_lock(&tasklist_lock);
2352		do_notify_parent_cldstop(current, false, why);
2353
2354		if (ptrace_reparented(current->group_leader))
2355			do_notify_parent_cldstop(current->group_leader,
2356						true, why);
2357		read_unlock(&tasklist_lock);
2358
2359		goto relock;
2360	}
2361
2362	for (;;) {
2363		struct k_sigaction *ka;
2364
2365		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2366		    do_signal_stop(0))
2367			goto relock;
2368
2369		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2370			do_jobctl_trap();
2371			spin_unlock_irq(&sighand->siglock);
2372			goto relock;
2373		}
2374
2375		signr = dequeue_signal(current, &current->blocked, &ksig->info);
2376
2377		if (!signr)
2378			break; /* will return 0 */
2379
2380		if (unlikely(current->ptrace) && signr != SIGKILL) {
2381			signr = ptrace_signal(signr, &ksig->info);
2382			if (!signr)
2383				continue;
2384		}
2385
2386		ka = &sighand->action[signr-1];
2387
2388		/* Trace actually delivered signals. */
2389		trace_signal_deliver(signr, &ksig->info, ka);
2390
2391		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2392			continue;
2393		if (ka->sa.sa_handler != SIG_DFL) {
2394			/* Run the handler.  */
2395			ksig->ka = *ka;
2396
2397			if (ka->sa.sa_flags & SA_ONESHOT)
2398				ka->sa.sa_handler = SIG_DFL;
2399
2400			break; /* will return non-zero "signr" value */
2401		}
2402
2403		/*
2404		 * Now we are doing the default action for this signal.
2405		 */
2406		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2407			continue;
2408
2409		/*
2410		 * Global init gets no signals it doesn't want.
2411		 * Container-init gets no signals it doesn't want from same
2412		 * container.
2413		 *
2414		 * Note that if global/container-init sees a sig_kernel_only()
2415		 * signal here, the signal must have been generated internally
2416		 * or must have come from an ancestor namespace. In either
2417		 * case, the signal cannot be dropped.
2418		 */
2419		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2420				!sig_kernel_only(signr))
2421			continue;
2422
2423		if (sig_kernel_stop(signr)) {
2424			/*
2425			 * The default action is to stop all threads in
2426			 * the thread group.  The job control signals
2427			 * do nothing in an orphaned pgrp, but SIGSTOP
2428			 * always works.  Note that siglock needs to be
2429			 * dropped during the call to is_orphaned_pgrp()
2430			 * because of lock ordering with tasklist_lock.
2431			 * This allows an intervening SIGCONT to be posted.
2432			 * We need to check for that and bail out if necessary.
2433			 */
2434			if (signr != SIGSTOP) {
2435				spin_unlock_irq(&sighand->siglock);
2436
2437				/* signals can be posted during this window */
2438
2439				if (is_current_pgrp_orphaned())
2440					goto relock;
2441
2442				spin_lock_irq(&sighand->siglock);
2443			}
2444
2445			if (likely(do_signal_stop(ksig->info.si_signo))) {
2446				/* It released the siglock.  */
2447				goto relock;
2448			}
2449
2450			/*
2451			 * We didn't actually stop, due to a race
2452			 * with SIGCONT or something like that.
2453			 */
2454			continue;
2455		}
2456
2457		spin_unlock_irq(&sighand->siglock);
2458
2459		/*
2460		 * Anything else is fatal, maybe with a core dump.
2461		 */
2462		current->flags |= PF_SIGNALED;
2463
2464		if (sig_kernel_coredump(signr)) {
2465			if (print_fatal_signals)
2466				print_fatal_signal(ksig->info.si_signo);
2467			proc_coredump_connector(current);
2468			/*
2469			 * If it was able to dump core, this kills all
2470			 * other threads in the group and synchronizes with
2471			 * their demise.  If we lost the race with another
2472			 * thread getting here, it set group_exit_code
2473			 * first and our do_group_exit call below will use
2474			 * that value and ignore the one we pass it.
2475			 */
2476			do_coredump(&ksig->info);
2477		}
2478
2479		/*
2480		 * Death signals, no core dump.
2481		 */
2482		do_group_exit(ksig->info.si_signo);
2483		/* NOTREACHED */
2484	}
2485	spin_unlock_irq(&sighand->siglock);
2486
2487	ksig->sig = signr;
2488	return ksig->sig > 0;
2489}
2490
2491/**
2492 * signal_delivered - 
2493 * @ksig:		kernel signal struct
2494 * @stepping:		nonzero if debugger single-step or block-step in use
2495 *
2496 * This function should be called when a signal has successfully been
2497 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2498 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2499 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2500 */
2501static void signal_delivered(struct ksignal *ksig, int stepping)
2502{
2503	sigset_t blocked;
2504
2505	/* A signal was successfully delivered, and the
2506	   saved sigmask was stored on the signal frame,
2507	   and will be restored by sigreturn.  So we can
2508	   simply clear the restore sigmask flag.  */
2509	clear_restore_sigmask();
2510
2511	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2512	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2513		sigaddset(&blocked, ksig->sig);
2514	set_current_blocked(&blocked);
2515	tracehook_signal_handler(stepping);
2516}
2517
2518void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2519{
2520	if (failed)
2521		force_sigsegv(ksig->sig, current);
2522	else
2523		signal_delivered(ksig, stepping);
2524}
2525
2526/*
2527 * It could be that complete_signal() picked us to notify about the
2528 * group-wide signal. Other threads should be notified now to take
2529 * the shared signals in @which since we will not.
2530 */
2531static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2532{
2533	sigset_t retarget;
2534	struct task_struct *t;
2535
2536	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2537	if (sigisemptyset(&retarget))
2538		return;
2539
2540	t = tsk;
2541	while_each_thread(tsk, t) {
2542		if (t->flags & PF_EXITING)
2543			continue;
2544
2545		if (!has_pending_signals(&retarget, &t->blocked))
2546			continue;
2547		/* Remove the signals this thread can handle. */
2548		sigandsets(&retarget, &retarget, &t->blocked);
2549
2550		if (!signal_pending(t))
2551			signal_wake_up(t, 0);
2552
2553		if (sigisemptyset(&retarget))
2554			break;
2555	}
2556}
2557
2558void exit_signals(struct task_struct *tsk)
2559{
2560	int group_stop = 0;
2561	sigset_t unblocked;
2562
2563	/*
2564	 * @tsk is about to have PF_EXITING set - lock out users which
2565	 * expect stable threadgroup.
2566	 */
2567	cgroup_threadgroup_change_begin(tsk);
2568
2569	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2570		tsk->flags |= PF_EXITING;
2571		cgroup_threadgroup_change_end(tsk);
2572		return;
2573	}
2574
2575	spin_lock_irq(&tsk->sighand->siglock);
2576	/*
2577	 * From now this task is not visible for group-wide signals,
2578	 * see wants_signal(), do_signal_stop().
2579	 */
2580	tsk->flags |= PF_EXITING;
2581
2582	cgroup_threadgroup_change_end(tsk);
2583
2584	if (!signal_pending(tsk))
2585		goto out;
2586
2587	unblocked = tsk->blocked;
2588	signotset(&unblocked);
2589	retarget_shared_pending(tsk, &unblocked);
2590
2591	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2592	    task_participate_group_stop(tsk))
2593		group_stop = CLD_STOPPED;
2594out:
2595	spin_unlock_irq(&tsk->sighand->siglock);
2596
2597	/*
2598	 * If group stop has completed, deliver the notification.  This
2599	 * should always go to the real parent of the group leader.
2600	 */
2601	if (unlikely(group_stop)) {
2602		read_lock(&tasklist_lock);
2603		do_notify_parent_cldstop(tsk, false, group_stop);
2604		read_unlock(&tasklist_lock);
2605	}
2606}
2607
2608EXPORT_SYMBOL(recalc_sigpending);
2609EXPORT_SYMBOL_GPL(dequeue_signal);
2610EXPORT_SYMBOL(flush_signals);
2611EXPORT_SYMBOL(force_sig);
2612EXPORT_SYMBOL(send_sig);
2613EXPORT_SYMBOL(send_sig_info);
2614EXPORT_SYMBOL(sigprocmask);
2615
2616/*
2617 * System call entry points.
2618 */
2619
2620/**
2621 *  sys_restart_syscall - restart a system call
2622 */
2623SYSCALL_DEFINE0(restart_syscall)
2624{
2625	struct restart_block *restart = &current->restart_block;
2626	return restart->fn(restart);
2627}
2628
2629long do_no_restart_syscall(struct restart_block *param)
2630{
2631	return -EINTR;
2632}
2633
2634static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2635{
2636	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2637		sigset_t newblocked;
2638		/* A set of now blocked but previously unblocked signals. */
2639		sigandnsets(&newblocked, newset, &current->blocked);
2640		retarget_shared_pending(tsk, &newblocked);
2641	}
2642	tsk->blocked = *newset;
2643	recalc_sigpending();
2644}
2645
2646/**
2647 * set_current_blocked - change current->blocked mask
2648 * @newset: new mask
2649 *
2650 * It is wrong to change ->blocked directly, this helper should be used
2651 * to ensure the process can't miss a shared signal we are going to block.
2652 */
2653void set_current_blocked(sigset_t *newset)
2654{
2655	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2656	__set_current_blocked(newset);
2657}
2658
2659void __set_current_blocked(const sigset_t *newset)
2660{
2661	struct task_struct *tsk = current;
2662
2663	/*
2664	 * In case the signal mask hasn't changed, there is nothing we need
2665	 * to do. The current->blocked shouldn't be modified by other task.
2666	 */
2667	if (sigequalsets(&tsk->blocked, newset))
2668		return;
2669
2670	spin_lock_irq(&tsk->sighand->siglock);
2671	__set_task_blocked(tsk, newset);
2672	spin_unlock_irq(&tsk->sighand->siglock);
2673}
2674
2675/*
2676 * This is also useful for kernel threads that want to temporarily
2677 * (or permanently) block certain signals.
2678 *
2679 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2680 * interface happily blocks "unblockable" signals like SIGKILL
2681 * and friends.
2682 */
2683int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2684{
2685	struct task_struct *tsk = current;
2686	sigset_t newset;
2687
2688	/* Lockless, only current can change ->blocked, never from irq */
2689	if (oldset)
2690		*oldset = tsk->blocked;
2691
2692	switch (how) {
2693	case SIG_BLOCK:
2694		sigorsets(&newset, &tsk->blocked, set);
2695		break;
2696	case SIG_UNBLOCK:
2697		sigandnsets(&newset, &tsk->blocked, set);
2698		break;
2699	case SIG_SETMASK:
2700		newset = *set;
2701		break;
2702	default:
2703		return -EINVAL;
2704	}
2705
2706	__set_current_blocked(&newset);
2707	return 0;
2708}
2709
2710/**
2711 *  sys_rt_sigprocmask - change the list of currently blocked signals
2712 *  @how: whether to add, remove, or set signals
2713 *  @nset: stores pending signals
2714 *  @oset: previous value of signal mask if non-null
2715 *  @sigsetsize: size of sigset_t type
2716 */
2717SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2718		sigset_t __user *, oset, size_t, sigsetsize)
2719{
2720	sigset_t old_set, new_set;
2721	int error;
2722
2723	/* XXX: Don't preclude handling different sized sigset_t's.  */
2724	if (sigsetsize != sizeof(sigset_t))
2725		return -EINVAL;
2726
2727	old_set = current->blocked;
2728
2729	if (nset) {
2730		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2731			return -EFAULT;
2732		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2733
2734		error = sigprocmask(how, &new_set, NULL);
2735		if (error)
2736			return error;
2737	}
2738
2739	if (oset) {
2740		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2741			return -EFAULT;
2742	}
2743
2744	return 0;
2745}
2746
2747#ifdef CONFIG_COMPAT
2748COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2749		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2750{
 
2751	sigset_t old_set = current->blocked;
2752
2753	/* XXX: Don't preclude handling different sized sigset_t's.  */
2754	if (sigsetsize != sizeof(sigset_t))
2755		return -EINVAL;
2756
2757	if (nset) {
 
2758		sigset_t new_set;
2759		int error;
2760		if (get_compat_sigset(&new_set, nset))
2761			return -EFAULT;
 
 
2762		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2763
2764		error = sigprocmask(how, &new_set, NULL);
2765		if (error)
2766			return error;
2767	}
2768	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
 
 
 
 
 
 
 
 
 
 
2769}
2770#endif
2771
2772static int do_sigpending(sigset_t *set)
2773{
 
 
 
2774	spin_lock_irq(&current->sighand->siglock);
2775	sigorsets(set, &current->pending.signal,
2776		  &current->signal->shared_pending.signal);
2777	spin_unlock_irq(&current->sighand->siglock);
2778
2779	/* Outside the lock because only this thread touches it.  */
2780	sigandsets(set, &current->blocked, set);
2781	return 0;
2782}
2783
2784/**
2785 *  sys_rt_sigpending - examine a pending signal that has been raised
2786 *			while blocked
2787 *  @uset: stores pending signals
2788 *  @sigsetsize: size of sigset_t type or larger
2789 */
2790SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2791{
2792	sigset_t set;
2793	int err;
2794
2795	if (sigsetsize > sizeof(*uset))
2796		return -EINVAL;
2797
2798	err = do_sigpending(&set);
2799	if (!err && copy_to_user(uset, &set, sigsetsize))
2800		err = -EFAULT;
2801	return err;
2802}
2803
2804#ifdef CONFIG_COMPAT
2805COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2806		compat_size_t, sigsetsize)
2807{
 
2808	sigset_t set;
2809	int err;
2810
2811	if (sigsetsize > sizeof(*uset))
2812		return -EINVAL;
2813
2814	err = do_sigpending(&set);
2815	if (!err)
2816		err = put_compat_sigset(uset, &set, sigsetsize);
2817	return err;
 
 
 
2818}
2819#endif
2820
2821enum siginfo_layout siginfo_layout(int sig, int si_code)
2822{
2823	enum siginfo_layout layout = SIL_KILL;
2824	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2825		static const struct {
2826			unsigned char limit, layout;
2827		} filter[] = {
2828			[SIGILL]  = { NSIGILL,  SIL_FAULT },
2829			[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
2830			[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2831			[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
2832			[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2833#if defined(SIGEMT) && defined(NSIGEMT)
2834			[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
2835#endif
2836			[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2837			[SIGPOLL] = { NSIGPOLL, SIL_POLL },
2838			[SIGSYS]  = { NSIGSYS,  SIL_SYS },
2839		};
2840		if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit))
2841			layout = filter[sig].layout;
2842		else if (si_code <= NSIGPOLL)
2843			layout = SIL_POLL;
2844	} else {
2845		if (si_code == SI_TIMER)
2846			layout = SIL_TIMER;
2847		else if (si_code == SI_SIGIO)
2848			layout = SIL_POLL;
2849		else if (si_code < 0)
2850			layout = SIL_RT;
2851		/* Tests to support buggy kernel ABIs */
2852#ifdef TRAP_FIXME
2853		if ((sig == SIGTRAP) && (si_code == TRAP_FIXME))
2854			layout = SIL_FAULT;
2855#endif
2856#ifdef FPE_FIXME
2857		if ((sig == SIGFPE) && (si_code == FPE_FIXME))
2858			layout = SIL_FAULT;
2859#endif
2860	}
2861	return layout;
2862}
2863
2864int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2865{
2866	int err;
2867
2868	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2869		return -EFAULT;
2870	if (from->si_code < 0)
2871		return __copy_to_user(to, from, sizeof(siginfo_t))
2872			? -EFAULT : 0;
2873	/*
2874	 * If you change siginfo_t structure, please be sure
2875	 * this code is fixed accordingly.
2876	 * Please remember to update the signalfd_copyinfo() function
2877	 * inside fs/signalfd.c too, in case siginfo_t changes.
2878	 * It should never copy any pad contained in the structure
2879	 * to avoid security leaks, but must copy the generic
2880	 * 3 ints plus the relevant union member.
2881	 */
2882	err = __put_user(from->si_signo, &to->si_signo);
2883	err |= __put_user(from->si_errno, &to->si_errno);
2884	err |= __put_user(from->si_code, &to->si_code);
2885	switch (siginfo_layout(from->si_signo, from->si_code)) {
2886	case SIL_KILL:
2887		err |= __put_user(from->si_pid, &to->si_pid);
2888		err |= __put_user(from->si_uid, &to->si_uid);
2889		break;
2890	case SIL_TIMER:
2891		/* Unreached SI_TIMER is negative */
 
 
2892		break;
2893	case SIL_POLL:
2894		err |= __put_user(from->si_band, &to->si_band);
2895		err |= __put_user(from->si_fd, &to->si_fd);
2896		break;
2897	case SIL_FAULT:
2898		err |= __put_user(from->si_addr, &to->si_addr);
2899#ifdef __ARCH_SI_TRAPNO
2900		err |= __put_user(from->si_trapno, &to->si_trapno);
2901#endif
2902#ifdef __ia64__
2903		err |= __put_user(from->si_imm, &to->si_imm);
2904		err |= __put_user(from->si_flags, &to->si_flags);
2905		err |= __put_user(from->si_isr, &to->si_isr);
2906#endif
2907		/*
2908		 * Other callers might not initialize the si_lsb field,
2909		 * so check explicitly for the right codes here.
2910		 */
2911#ifdef BUS_MCEERR_AR
2912		if (from->si_signo == SIGBUS && from->si_code == BUS_MCEERR_AR)
2913			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2914#endif
2915#ifdef BUS_MCEERR_AO
2916		if (from->si_signo == SIGBUS && from->si_code == BUS_MCEERR_AO)
2917			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2918#endif
2919#ifdef SEGV_BNDERR
2920		if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2921			err |= __put_user(from->si_lower, &to->si_lower);
2922			err |= __put_user(from->si_upper, &to->si_upper);
2923		}
2924#endif
2925#ifdef SEGV_PKUERR
2926		if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2927			err |= __put_user(from->si_pkey, &to->si_pkey);
2928#endif
2929		break;
2930	case SIL_CHLD:
2931		err |= __put_user(from->si_pid, &to->si_pid);
2932		err |= __put_user(from->si_uid, &to->si_uid);
2933		err |= __put_user(from->si_status, &to->si_status);
2934		err |= __put_user(from->si_utime, &to->si_utime);
2935		err |= __put_user(from->si_stime, &to->si_stime);
2936		break;
2937	case SIL_RT:
 
2938		err |= __put_user(from->si_pid, &to->si_pid);
2939		err |= __put_user(from->si_uid, &to->si_uid);
2940		err |= __put_user(from->si_ptr, &to->si_ptr);
2941		break;
2942	case SIL_SYS:
 
2943		err |= __put_user(from->si_call_addr, &to->si_call_addr);
2944		err |= __put_user(from->si_syscall, &to->si_syscall);
2945		err |= __put_user(from->si_arch, &to->si_arch);
2946		break;
2947	}
2948	return err;
2949}
2950
2951#ifdef CONFIG_COMPAT
2952int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2953			   const struct siginfo *from)
2954#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2955{
2956	return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2957}
2958int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2959			     const struct siginfo *from, bool x32_ABI)
2960#endif
2961{
2962	struct compat_siginfo new;
2963	memset(&new, 0, sizeof(new));
2964
2965	new.si_signo = from->si_signo;
2966	new.si_errno = from->si_errno;
2967	new.si_code  = from->si_code;
2968	switch(siginfo_layout(from->si_signo, from->si_code)) {
2969	case SIL_KILL:
2970		new.si_pid = from->si_pid;
2971		new.si_uid = from->si_uid;
2972		break;
2973	case SIL_TIMER:
2974		new.si_tid     = from->si_tid;
2975		new.si_overrun = from->si_overrun;
2976		new.si_int     = from->si_int;
2977		break;
2978	case SIL_POLL:
2979		new.si_band = from->si_band;
2980		new.si_fd   = from->si_fd;
2981		break;
2982	case SIL_FAULT:
2983		new.si_addr = ptr_to_compat(from->si_addr);
2984#ifdef __ARCH_SI_TRAPNO
2985		new.si_trapno = from->si_trapno;
2986#endif
2987#ifdef BUS_MCEERR_AR
2988		if ((from->si_signo == SIGBUS) && (from->si_code == BUS_MCEERR_AR))
2989			new.si_addr_lsb = from->si_addr_lsb;
2990#endif
2991#ifdef BUS_MCEERR_AO
2992		if ((from->si_signo == SIGBUS) && (from->si_code == BUS_MCEERR_AO))
2993			new.si_addr_lsb = from->si_addr_lsb;
2994#endif
2995#ifdef SEGV_BNDERR
2996		if ((from->si_signo == SIGSEGV) &&
2997		    (from->si_code == SEGV_BNDERR)) {
2998			new.si_lower = ptr_to_compat(from->si_lower);
2999			new.si_upper = ptr_to_compat(from->si_upper);
3000		}
3001#endif
3002#ifdef SEGV_PKUERR
3003		if ((from->si_signo == SIGSEGV) &&
3004		    (from->si_code == SEGV_PKUERR))
3005			new.si_pkey = from->si_pkey;
3006#endif
3007
3008		break;
3009	case SIL_CHLD:
3010		new.si_pid    = from->si_pid;
3011		new.si_uid    = from->si_uid;
3012		new.si_status = from->si_status;
3013#ifdef CONFIG_X86_X32_ABI
3014		if (x32_ABI) {
3015			new._sifields._sigchld_x32._utime = from->si_utime;
3016			new._sifields._sigchld_x32._stime = from->si_stime;
3017		} else
3018#endif
3019		{
3020			new.si_utime = from->si_utime;
3021			new.si_stime = from->si_stime;
3022		}
3023		break;
3024	case SIL_RT:
3025		new.si_pid = from->si_pid;
3026		new.si_uid = from->si_uid;
3027		new.si_int = from->si_int;
3028		break;
3029	case SIL_SYS:
3030		new.si_call_addr = ptr_to_compat(from->si_call_addr);
3031		new.si_syscall   = from->si_syscall;
3032		new.si_arch      = from->si_arch;
3033		break;
3034	}
3035
3036	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3037		return -EFAULT;
3038
3039	return 0;
3040}
3041
3042int copy_siginfo_from_user32(struct siginfo *to,
3043			     const struct compat_siginfo __user *ufrom)
3044{
3045	struct compat_siginfo from;
3046
3047	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3048		return -EFAULT;
3049
3050	clear_siginfo(to);
3051	to->si_signo = from.si_signo;
3052	to->si_errno = from.si_errno;
3053	to->si_code  = from.si_code;
3054	switch(siginfo_layout(from.si_signo, from.si_code)) {
3055	case SIL_KILL:
3056		to->si_pid = from.si_pid;
3057		to->si_uid = from.si_uid;
3058		break;
3059	case SIL_TIMER:
3060		to->si_tid     = from.si_tid;
3061		to->si_overrun = from.si_overrun;
3062		to->si_int     = from.si_int;
3063		break;
3064	case SIL_POLL:
3065		to->si_band = from.si_band;
3066		to->si_fd   = from.si_fd;
3067		break;
3068	case SIL_FAULT:
3069		to->si_addr = compat_ptr(from.si_addr);
3070#ifdef __ARCH_SI_TRAPNO
3071		to->si_trapno = from.si_trapno;
3072#endif
3073#ifdef BUS_MCEERR_AR
3074		if ((from.si_signo == SIGBUS) && (from.si_code == BUS_MCEERR_AR))
3075			to->si_addr_lsb = from.si_addr_lsb;
3076#endif
3077#ifdef BUS_MCEER_AO
3078		if ((from.si_signo == SIGBUS) && (from.si_code == BUS_MCEERR_AO))
3079			to->si_addr_lsb = from.si_addr_lsb;
3080#endif
3081#ifdef SEGV_BNDERR
3082		if ((from.si_signo == SIGSEGV) && (from.si_code == SEGV_BNDERR)) {
3083			to->si_lower = compat_ptr(from.si_lower);
3084			to->si_upper = compat_ptr(from.si_upper);
3085		}
3086#endif
3087#ifdef SEGV_PKUERR
3088		if ((from.si_signo == SIGSEGV) && (from.si_code == SEGV_PKUERR))
3089			to->si_pkey = from.si_pkey;
3090#endif
3091		break;
3092	case SIL_CHLD:
3093		to->si_pid    = from.si_pid;
3094		to->si_uid    = from.si_uid;
3095		to->si_status = from.si_status;
3096#ifdef CONFIG_X86_X32_ABI
3097		if (in_x32_syscall()) {
3098			to->si_utime = from._sifields._sigchld_x32._utime;
3099			to->si_stime = from._sifields._sigchld_x32._stime;
3100		} else
3101#endif
3102		{
3103			to->si_utime = from.si_utime;
3104			to->si_stime = from.si_stime;
3105		}
3106		break;
3107	case SIL_RT:
3108		to->si_pid = from.si_pid;
3109		to->si_uid = from.si_uid;
3110		to->si_int = from.si_int;
3111		break;
3112	case SIL_SYS:
3113		to->si_call_addr = compat_ptr(from.si_call_addr);
3114		to->si_syscall   = from.si_syscall;
3115		to->si_arch      = from.si_arch;
3116		break;
3117	}
3118	return 0;
3119}
3120#endif /* CONFIG_COMPAT */
3121
3122/**
3123 *  do_sigtimedwait - wait for queued signals specified in @which
3124 *  @which: queued signals to wait for
3125 *  @info: if non-null, the signal's siginfo is returned here
3126 *  @ts: upper bound on process time suspension
3127 */
3128static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3129		    const struct timespec *ts)
3130{
3131	ktime_t *to = NULL, timeout = KTIME_MAX;
3132	struct task_struct *tsk = current;
3133	sigset_t mask = *which;
3134	int sig, ret = 0;
3135
3136	if (ts) {
3137		if (!timespec_valid(ts))
3138			return -EINVAL;
3139		timeout = timespec_to_ktime(*ts);
3140		to = &timeout;
3141	}
3142
3143	/*
3144	 * Invert the set of allowed signals to get those we want to block.
3145	 */
3146	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3147	signotset(&mask);
3148
3149	spin_lock_irq(&tsk->sighand->siglock);
3150	sig = dequeue_signal(tsk, &mask, info);
3151	if (!sig && timeout) {
3152		/*
3153		 * None ready, temporarily unblock those we're interested
3154		 * while we are sleeping in so that we'll be awakened when
3155		 * they arrive. Unblocking is always fine, we can avoid
3156		 * set_current_blocked().
3157		 */
3158		tsk->real_blocked = tsk->blocked;
3159		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3160		recalc_sigpending();
3161		spin_unlock_irq(&tsk->sighand->siglock);
3162
3163		__set_current_state(TASK_INTERRUPTIBLE);
3164		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3165							 HRTIMER_MODE_REL);
3166		spin_lock_irq(&tsk->sighand->siglock);
3167		__set_task_blocked(tsk, &tsk->real_blocked);
3168		sigemptyset(&tsk->real_blocked);
3169		sig = dequeue_signal(tsk, &mask, info);
3170	}
3171	spin_unlock_irq(&tsk->sighand->siglock);
3172
3173	if (sig)
3174		return sig;
3175	return ret ? -EINTR : -EAGAIN;
3176}
3177
3178/**
3179 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3180 *			in @uthese
3181 *  @uthese: queued signals to wait for
3182 *  @uinfo: if non-null, the signal's siginfo is returned here
3183 *  @uts: upper bound on process time suspension
3184 *  @sigsetsize: size of sigset_t type
3185 */
3186SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3187		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3188		size_t, sigsetsize)
3189{
3190	sigset_t these;
3191	struct timespec ts;
3192	siginfo_t info;
3193	int ret;
3194
3195	/* XXX: Don't preclude handling different sized sigset_t's.  */
3196	if (sigsetsize != sizeof(sigset_t))
3197		return -EINVAL;
3198
3199	if (copy_from_user(&these, uthese, sizeof(these)))
3200		return -EFAULT;
3201
3202	if (uts) {
3203		if (copy_from_user(&ts, uts, sizeof(ts)))
3204			return -EFAULT;
3205	}
3206
3207	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3208
3209	if (ret > 0 && uinfo) {
3210		if (copy_siginfo_to_user(uinfo, &info))
3211			ret = -EFAULT;
3212	}
3213
3214	return ret;
3215}
3216
3217#ifdef CONFIG_COMPAT
3218COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3219		struct compat_siginfo __user *, uinfo,
3220		struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3221{
3222	sigset_t s;
3223	struct timespec t;
3224	siginfo_t info;
3225	long ret;
3226
3227	if (sigsetsize != sizeof(sigset_t))
3228		return -EINVAL;
3229
3230	if (get_compat_sigset(&s, uthese))
3231		return -EFAULT;
3232
3233	if (uts) {
3234		if (compat_get_timespec(&t, uts))
3235			return -EFAULT;
3236	}
3237
3238	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3239
3240	if (ret > 0 && uinfo) {
3241		if (copy_siginfo_to_user32(uinfo, &info))
3242			ret = -EFAULT;
3243	}
3244
3245	return ret;
3246}
3247#endif
3248
3249/**
3250 *  sys_kill - send a signal to a process
3251 *  @pid: the PID of the process
3252 *  @sig: signal to be sent
3253 */
3254SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3255{
3256	struct siginfo info;
3257
3258	clear_siginfo(&info);
3259	info.si_signo = sig;
3260	info.si_errno = 0;
3261	info.si_code = SI_USER;
3262	info.si_pid = task_tgid_vnr(current);
3263	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3264
3265	return kill_something_info(sig, &info, pid);
3266}
3267
3268static int
3269do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3270{
3271	struct task_struct *p;
3272	int error = -ESRCH;
3273
3274	rcu_read_lock();
3275	p = find_task_by_vpid(pid);
3276	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3277		error = check_kill_permission(sig, info, p);
3278		/*
3279		 * The null signal is a permissions and process existence
3280		 * probe.  No signal is actually delivered.
3281		 */
3282		if (!error && sig) {
3283			error = do_send_sig_info(sig, info, p, false);
3284			/*
3285			 * If lock_task_sighand() failed we pretend the task
3286			 * dies after receiving the signal. The window is tiny,
3287			 * and the signal is private anyway.
3288			 */
3289			if (unlikely(error == -ESRCH))
3290				error = 0;
3291		}
3292	}
3293	rcu_read_unlock();
3294
3295	return error;
3296}
3297
3298static int do_tkill(pid_t tgid, pid_t pid, int sig)
3299{
3300	struct siginfo info;
3301
3302	clear_siginfo(&info);
3303	info.si_signo = sig;
3304	info.si_errno = 0;
3305	info.si_code = SI_TKILL;
3306	info.si_pid = task_tgid_vnr(current);
3307	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3308
3309	return do_send_specific(tgid, pid, sig, &info);
3310}
3311
3312/**
3313 *  sys_tgkill - send signal to one specific thread
3314 *  @tgid: the thread group ID of the thread
3315 *  @pid: the PID of the thread
3316 *  @sig: signal to be sent
3317 *
3318 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3319 *  exists but it's not belonging to the target process anymore. This
3320 *  method solves the problem of threads exiting and PIDs getting reused.
3321 */
3322SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3323{
3324	/* This is only valid for single tasks */
3325	if (pid <= 0 || tgid <= 0)
3326		return -EINVAL;
3327
3328	return do_tkill(tgid, pid, sig);
3329}
3330
3331/**
3332 *  sys_tkill - send signal to one specific task
3333 *  @pid: the PID of the task
3334 *  @sig: signal to be sent
3335 *
3336 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3337 */
3338SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3339{
3340	/* This is only valid for single tasks */
3341	if (pid <= 0)
3342		return -EINVAL;
3343
3344	return do_tkill(0, pid, sig);
3345}
3346
3347static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3348{
3349	/* Not even root can pretend to send signals from the kernel.
3350	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3351	 */
3352	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3353	    (task_pid_vnr(current) != pid))
3354		return -EPERM;
3355
3356	info->si_signo = sig;
3357
3358	/* POSIX.1b doesn't mention process groups.  */
3359	return kill_proc_info(sig, info, pid);
3360}
3361
3362/**
3363 *  sys_rt_sigqueueinfo - send signal information to a signal
3364 *  @pid: the PID of the thread
3365 *  @sig: signal to be sent
3366 *  @uinfo: signal info to be sent
3367 */
3368SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3369		siginfo_t __user *, uinfo)
3370{
3371	siginfo_t info;
3372	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3373		return -EFAULT;
3374	return do_rt_sigqueueinfo(pid, sig, &info);
3375}
3376
3377#ifdef CONFIG_COMPAT
3378COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3379			compat_pid_t, pid,
3380			int, sig,
3381			struct compat_siginfo __user *, uinfo)
3382{
3383	siginfo_t info;
3384	int ret = copy_siginfo_from_user32(&info, uinfo);
3385	if (unlikely(ret))
3386		return ret;
3387	return do_rt_sigqueueinfo(pid, sig, &info);
3388}
3389#endif
3390
3391static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3392{
3393	/* This is only valid for single tasks */
3394	if (pid <= 0 || tgid <= 0)
3395		return -EINVAL;
3396
3397	/* Not even root can pretend to send signals from the kernel.
3398	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3399	 */
3400	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3401	    (task_pid_vnr(current) != pid))
3402		return -EPERM;
3403
3404	info->si_signo = sig;
3405
3406	return do_send_specific(tgid, pid, sig, info);
3407}
3408
3409SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3410		siginfo_t __user *, uinfo)
3411{
3412	siginfo_t info;
3413
3414	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3415		return -EFAULT;
3416
3417	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3418}
3419
3420#ifdef CONFIG_COMPAT
3421COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3422			compat_pid_t, tgid,
3423			compat_pid_t, pid,
3424			int, sig,
3425			struct compat_siginfo __user *, uinfo)
3426{
3427	siginfo_t info;
3428
3429	if (copy_siginfo_from_user32(&info, uinfo))
3430		return -EFAULT;
3431	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3432}
3433#endif
3434
3435/*
3436 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3437 */
3438void kernel_sigaction(int sig, __sighandler_t action)
3439{
3440	spin_lock_irq(&current->sighand->siglock);
3441	current->sighand->action[sig - 1].sa.sa_handler = action;
3442	if (action == SIG_IGN) {
3443		sigset_t mask;
3444
3445		sigemptyset(&mask);
3446		sigaddset(&mask, sig);
3447
3448		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3449		flush_sigqueue_mask(&mask, &current->pending);
3450		recalc_sigpending();
3451	}
3452	spin_unlock_irq(&current->sighand->siglock);
3453}
3454EXPORT_SYMBOL(kernel_sigaction);
3455
3456void __weak sigaction_compat_abi(struct k_sigaction *act,
3457		struct k_sigaction *oact)
3458{
3459}
3460
3461int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3462{
3463	struct task_struct *p = current, *t;
3464	struct k_sigaction *k;
3465	sigset_t mask;
3466
3467	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3468		return -EINVAL;
3469
3470	k = &p->sighand->action[sig-1];
3471
3472	spin_lock_irq(&p->sighand->siglock);
3473	if (oact)
3474		*oact = *k;
3475
3476	sigaction_compat_abi(act, oact);
3477
3478	if (act) {
3479		sigdelsetmask(&act->sa.sa_mask,
3480			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3481		*k = *act;
3482		/*
3483		 * POSIX 3.3.1.3:
3484		 *  "Setting a signal action to SIG_IGN for a signal that is
3485		 *   pending shall cause the pending signal to be discarded,
3486		 *   whether or not it is blocked."
3487		 *
3488		 *  "Setting a signal action to SIG_DFL for a signal that is
3489		 *   pending and whose default action is to ignore the signal
3490		 *   (for example, SIGCHLD), shall cause the pending signal to
3491		 *   be discarded, whether or not it is blocked"
3492		 */
3493		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3494			sigemptyset(&mask);
3495			sigaddset(&mask, sig);
3496			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3497			for_each_thread(p, t)
3498				flush_sigqueue_mask(&mask, &t->pending);
3499		}
3500	}
3501
3502	spin_unlock_irq(&p->sighand->siglock);
3503	return 0;
3504}
3505
3506static int
3507do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
3508{
3509	struct task_struct *t = current;
 
3510
3511	if (oss) {
3512		memset(oss, 0, sizeof(stack_t));
3513		oss->ss_sp = (void __user *) t->sas_ss_sp;
3514		oss->ss_size = t->sas_ss_size;
3515		oss->ss_flags = sas_ss_flags(sp) |
3516			(current->sas_ss_flags & SS_FLAG_BITS);
3517	}
 
 
 
3518
3519	if (ss) {
3520		void __user *ss_sp = ss->ss_sp;
3521		size_t ss_size = ss->ss_size;
3522		unsigned ss_flags = ss->ss_flags;
3523		int ss_mode;
 
 
 
3524
3525		if (unlikely(on_sig_stack(sp)))
3526			return -EPERM;
 
3527
3528		ss_mode = ss_flags & ~SS_FLAG_BITS;
3529		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3530				ss_mode != 0))
3531			return -EINVAL;
 
3532
3533		if (ss_mode == SS_DISABLE) {
3534			ss_size = 0;
3535			ss_sp = NULL;
3536		} else {
3537			if (unlikely(ss_size < MINSIGSTKSZ))
3538				return -ENOMEM;
3539		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3540
3541		t->sas_ss_sp = (unsigned long) ss_sp;
3542		t->sas_ss_size = ss_size;
3543		t->sas_ss_flags = ss_flags;
3544	}
3545	return 0;
3546}
3547
3548SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3549{
3550	stack_t new, old;
3551	int err;
3552	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3553		return -EFAULT;
3554	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3555			      current_user_stack_pointer());
3556	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3557		err = -EFAULT;
3558	return err;
3559}
3560
3561int restore_altstack(const stack_t __user *uss)
3562{
3563	stack_t new;
3564	if (copy_from_user(&new, uss, sizeof(stack_t)))
3565		return -EFAULT;
3566	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
3567	/* squash all but EFAULT for now */
3568	return 0;
3569}
3570
3571int __save_altstack(stack_t __user *uss, unsigned long sp)
3572{
3573	struct task_struct *t = current;
3574	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3575		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3576		__put_user(t->sas_ss_size, &uss->ss_size);
3577	if (err)
3578		return err;
3579	if (t->sas_ss_flags & SS_AUTODISARM)
3580		sas_ss_reset(t);
3581	return 0;
3582}
3583
3584#ifdef CONFIG_COMPAT
3585static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3586				 compat_stack_t __user *uoss_ptr)
 
3587{
3588	stack_t uss, uoss;
3589	int ret;
 
3590
3591	if (uss_ptr) {
3592		compat_stack_t uss32;
 
 
3593		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3594			return -EFAULT;
3595		uss.ss_sp = compat_ptr(uss32.ss_sp);
3596		uss.ss_flags = uss32.ss_flags;
3597		uss.ss_size = uss32.ss_size;
3598	}
3599	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
 
 
 
3600			     compat_user_stack_pointer());
 
3601	if (ret >= 0 && uoss_ptr)  {
3602		compat_stack_t old;
3603		memset(&old, 0, sizeof(old));
3604		old.ss_sp = ptr_to_compat(uoss.ss_sp);
3605		old.ss_flags = uoss.ss_flags;
3606		old.ss_size = uoss.ss_size;
3607		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3608			ret = -EFAULT;
3609	}
3610	return ret;
3611}
3612
3613COMPAT_SYSCALL_DEFINE2(sigaltstack,
3614			const compat_stack_t __user *, uss_ptr,
3615			compat_stack_t __user *, uoss_ptr)
3616{
3617	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3618}
3619
3620int compat_restore_altstack(const compat_stack_t __user *uss)
3621{
3622	int err = do_compat_sigaltstack(uss, NULL);
3623	/* squash all but -EFAULT for now */
3624	return err == -EFAULT ? err : 0;
3625}
3626
3627int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3628{
3629	int err;
3630	struct task_struct *t = current;
3631	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3632			 &uss->ss_sp) |
3633		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3634		__put_user(t->sas_ss_size, &uss->ss_size);
3635	if (err)
3636		return err;
3637	if (t->sas_ss_flags & SS_AUTODISARM)
3638		sas_ss_reset(t);
3639	return 0;
3640}
3641#endif
3642
3643#ifdef __ARCH_WANT_SYS_SIGPENDING
3644
3645/**
3646 *  sys_sigpending - examine pending signals
3647 *  @uset: where mask of pending signal is returned
3648 */
3649SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3650{
3651	sigset_t set;
3652	int err;
3653
3654	if (sizeof(old_sigset_t) > sizeof(*uset))
3655		return -EINVAL;
3656
3657	err = do_sigpending(&set);
3658	if (!err && copy_to_user(uset, &set, sizeof(old_sigset_t)))
3659		err = -EFAULT;
3660	return err;
3661}
3662
3663#ifdef CONFIG_COMPAT
3664COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3665{
3666	sigset_t set;
3667	int err = do_sigpending(&set);
3668	if (!err)
3669		err = put_user(set.sig[0], set32);
3670	return err;
3671}
3672#endif
3673
3674#endif
3675
3676#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3677/**
3678 *  sys_sigprocmask - examine and change blocked signals
3679 *  @how: whether to add, remove, or set signals
3680 *  @nset: signals to add or remove (if non-null)
3681 *  @oset: previous value of signal mask if non-null
3682 *
3683 * Some platforms have their own version with special arguments;
3684 * others support only sys_rt_sigprocmask.
3685 */
3686
3687SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3688		old_sigset_t __user *, oset)
3689{
3690	old_sigset_t old_set, new_set;
3691	sigset_t new_blocked;
3692
3693	old_set = current->blocked.sig[0];
3694
3695	if (nset) {
3696		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3697			return -EFAULT;
3698
3699		new_blocked = current->blocked;
3700
3701		switch (how) {
3702		case SIG_BLOCK:
3703			sigaddsetmask(&new_blocked, new_set);
3704			break;
3705		case SIG_UNBLOCK:
3706			sigdelsetmask(&new_blocked, new_set);
3707			break;
3708		case SIG_SETMASK:
3709			new_blocked.sig[0] = new_set;
3710			break;
3711		default:
3712			return -EINVAL;
3713		}
3714
3715		set_current_blocked(&new_blocked);
3716	}
3717
3718	if (oset) {
3719		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3720			return -EFAULT;
3721	}
3722
3723	return 0;
3724}
3725#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3726
3727#ifndef CONFIG_ODD_RT_SIGACTION
3728/**
3729 *  sys_rt_sigaction - alter an action taken by a process
3730 *  @sig: signal to be sent
3731 *  @act: new sigaction
3732 *  @oact: used to save the previous sigaction
3733 *  @sigsetsize: size of sigset_t type
3734 */
3735SYSCALL_DEFINE4(rt_sigaction, int, sig,
3736		const struct sigaction __user *, act,
3737		struct sigaction __user *, oact,
3738		size_t, sigsetsize)
3739{
3740	struct k_sigaction new_sa, old_sa;
3741	int ret = -EINVAL;
3742
3743	/* XXX: Don't preclude handling different sized sigset_t's.  */
3744	if (sigsetsize != sizeof(sigset_t))
3745		goto out;
3746
3747	if (act) {
3748		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3749			return -EFAULT;
3750	}
3751
3752	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3753
3754	if (!ret && oact) {
3755		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3756			return -EFAULT;
3757	}
3758out:
3759	return ret;
3760}
3761#ifdef CONFIG_COMPAT
3762COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3763		const struct compat_sigaction __user *, act,
3764		struct compat_sigaction __user *, oact,
3765		compat_size_t, sigsetsize)
3766{
3767	struct k_sigaction new_ka, old_ka;
 
3768#ifdef __ARCH_HAS_SA_RESTORER
3769	compat_uptr_t restorer;
3770#endif
3771	int ret;
3772
3773	/* XXX: Don't preclude handling different sized sigset_t's.  */
3774	if (sigsetsize != sizeof(compat_sigset_t))
3775		return -EINVAL;
3776
3777	if (act) {
3778		compat_uptr_t handler;
3779		ret = get_user(handler, &act->sa_handler);
3780		new_ka.sa.sa_handler = compat_ptr(handler);
3781#ifdef __ARCH_HAS_SA_RESTORER
3782		ret |= get_user(restorer, &act->sa_restorer);
3783		new_ka.sa.sa_restorer = compat_ptr(restorer);
3784#endif
3785		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3786		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3787		if (ret)
3788			return -EFAULT;
 
3789	}
3790
3791	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3792	if (!ret && oact) {
 
3793		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
3794			       &oact->sa_handler);
3795		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3796					 sizeof(oact->sa_mask));
3797		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3798#ifdef __ARCH_HAS_SA_RESTORER
3799		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3800				&oact->sa_restorer);
3801#endif
3802	}
3803	return ret;
3804}
3805#endif
3806#endif /* !CONFIG_ODD_RT_SIGACTION */
3807
3808#ifdef CONFIG_OLD_SIGACTION
3809SYSCALL_DEFINE3(sigaction, int, sig,
3810		const struct old_sigaction __user *, act,
3811	        struct old_sigaction __user *, oact)
3812{
3813	struct k_sigaction new_ka, old_ka;
3814	int ret;
3815
3816	if (act) {
3817		old_sigset_t mask;
3818		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3819		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3820		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3821		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3822		    __get_user(mask, &act->sa_mask))
3823			return -EFAULT;
3824#ifdef __ARCH_HAS_KA_RESTORER
3825		new_ka.ka_restorer = NULL;
3826#endif
3827		siginitset(&new_ka.sa.sa_mask, mask);
3828	}
3829
3830	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3831
3832	if (!ret && oact) {
3833		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3834		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3835		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3836		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3837		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3838			return -EFAULT;
3839	}
3840
3841	return ret;
3842}
3843#endif
3844#ifdef CONFIG_COMPAT_OLD_SIGACTION
3845COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3846		const struct compat_old_sigaction __user *, act,
3847	        struct compat_old_sigaction __user *, oact)
3848{
3849	struct k_sigaction new_ka, old_ka;
3850	int ret;
3851	compat_old_sigset_t mask;
3852	compat_uptr_t handler, restorer;
3853
3854	if (act) {
3855		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3856		    __get_user(handler, &act->sa_handler) ||
3857		    __get_user(restorer, &act->sa_restorer) ||
3858		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3859		    __get_user(mask, &act->sa_mask))
3860			return -EFAULT;
3861
3862#ifdef __ARCH_HAS_KA_RESTORER
3863		new_ka.ka_restorer = NULL;
3864#endif
3865		new_ka.sa.sa_handler = compat_ptr(handler);
3866		new_ka.sa.sa_restorer = compat_ptr(restorer);
3867		siginitset(&new_ka.sa.sa_mask, mask);
3868	}
3869
3870	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3871
3872	if (!ret && oact) {
3873		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3874		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3875			       &oact->sa_handler) ||
3876		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3877			       &oact->sa_restorer) ||
3878		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3879		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3880			return -EFAULT;
3881	}
3882	return ret;
3883}
3884#endif
3885
3886#ifdef CONFIG_SGETMASK_SYSCALL
3887
3888/*
3889 * For backwards compatibility.  Functionality superseded by sigprocmask.
3890 */
3891SYSCALL_DEFINE0(sgetmask)
3892{
3893	/* SMP safe */
3894	return current->blocked.sig[0];
3895}
3896
3897SYSCALL_DEFINE1(ssetmask, int, newmask)
3898{
3899	int old = current->blocked.sig[0];
3900	sigset_t newset;
3901
3902	siginitset(&newset, newmask);
3903	set_current_blocked(&newset);
3904
3905	return old;
3906}
3907#endif /* CONFIG_SGETMASK_SYSCALL */
3908
3909#ifdef __ARCH_WANT_SYS_SIGNAL
3910/*
3911 * For backwards compatibility.  Functionality superseded by sigaction.
3912 */
3913SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3914{
3915	struct k_sigaction new_sa, old_sa;
3916	int ret;
3917
3918	new_sa.sa.sa_handler = handler;
3919	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3920	sigemptyset(&new_sa.sa.sa_mask);
3921
3922	ret = do_sigaction(sig, &new_sa, &old_sa);
3923
3924	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3925}
3926#endif /* __ARCH_WANT_SYS_SIGNAL */
3927
3928#ifdef __ARCH_WANT_SYS_PAUSE
3929
3930SYSCALL_DEFINE0(pause)
3931{
3932	while (!signal_pending(current)) {
3933		__set_current_state(TASK_INTERRUPTIBLE);
3934		schedule();
3935	}
3936	return -ERESTARTNOHAND;
3937}
3938
3939#endif
3940
3941static int sigsuspend(sigset_t *set)
3942{
3943	current->saved_sigmask = current->blocked;
3944	set_current_blocked(set);
3945
3946	while (!signal_pending(current)) {
3947		__set_current_state(TASK_INTERRUPTIBLE);
3948		schedule();
3949	}
3950	set_restore_sigmask();
3951	return -ERESTARTNOHAND;
3952}
3953
3954/**
3955 *  sys_rt_sigsuspend - replace the signal mask for a value with the
3956 *	@unewset value until a signal is received
3957 *  @unewset: new signal mask value
3958 *  @sigsetsize: size of sigset_t type
3959 */
3960SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3961{
3962	sigset_t newset;
3963
3964	/* XXX: Don't preclude handling different sized sigset_t's.  */
3965	if (sigsetsize != sizeof(sigset_t))
3966		return -EINVAL;
3967
3968	if (copy_from_user(&newset, unewset, sizeof(newset)))
3969		return -EFAULT;
3970	return sigsuspend(&newset);
3971}
3972 
3973#ifdef CONFIG_COMPAT
3974COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3975{
 
3976	sigset_t newset;
 
3977
3978	/* XXX: Don't preclude handling different sized sigset_t's.  */
3979	if (sigsetsize != sizeof(sigset_t))
3980		return -EINVAL;
3981
3982	if (get_compat_sigset(&newset, unewset))
3983		return -EFAULT;
 
3984	return sigsuspend(&newset);
 
 
 
 
3985}
3986#endif
3987
3988#ifdef CONFIG_OLD_SIGSUSPEND
3989SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3990{
3991	sigset_t blocked;
3992	siginitset(&blocked, mask);
3993	return sigsuspend(&blocked);
3994}
3995#endif
3996#ifdef CONFIG_OLD_SIGSUSPEND3
3997SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3998{
3999	sigset_t blocked;
4000	siginitset(&blocked, mask);
4001	return sigsuspend(&blocked);
4002}
4003#endif
4004
4005__weak const char *arch_vma_name(struct vm_area_struct *vma)
4006{
4007	return NULL;
4008}
4009
4010void __init signals_init(void)
4011{
4012	/* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
4013	BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
4014		!= offsetof(struct siginfo, _sifields._pad));
4015	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4016
4017	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4018}
4019
4020#ifdef CONFIG_KGDB_KDB
4021#include <linux/kdb.h>
4022/*
4023 * kdb_send_sig - Allows kdb to send signals without exposing
4024 * signal internals.  This function checks if the required locks are
4025 * available before calling the main signal code, to avoid kdb
4026 * deadlocks.
4027 */
4028void kdb_send_sig(struct task_struct *t, int sig)
 
4029{
4030	static struct task_struct *kdb_prev_t;
4031	int new_t, ret;
4032	if (!spin_trylock(&t->sighand->siglock)) {
4033		kdb_printf("Can't do kill command now.\n"
4034			   "The sigmask lock is held somewhere else in "
4035			   "kernel, try again later\n");
4036		return;
4037	}
 
4038	new_t = kdb_prev_t != t;
4039	kdb_prev_t = t;
4040	if (t->state != TASK_RUNNING && new_t) {
4041		spin_unlock(&t->sighand->siglock);
4042		kdb_printf("Process is not RUNNING, sending a signal from "
4043			   "kdb risks deadlock\n"
4044			   "on the run queue locks. "
4045			   "The signal has _not_ been sent.\n"
4046			   "Reissue the kill command if you want to risk "
4047			   "the deadlock.\n");
4048		return;
4049	}
4050	ret = send_signal(sig, SEND_SIG_PRIV, t, false);
4051	spin_unlock(&t->sighand->siglock);
4052	if (ret)
4053		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4054			   sig, t->pid);
4055	else
4056		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4057}
4058#endif	/* CONFIG_KGDB_KDB */