Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 *  linux/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   7 *
   8 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
   9 *		Changes to use preallocated sigqueue structures
  10 *		to allow signals to be sent reliably.
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/sched.h>
 
 
 
 
 
 
  17#include <linux/fs.h>
 
  18#include <linux/tty.h>
  19#include <linux/binfmts.h>
  20#include <linux/coredump.h>
  21#include <linux/security.h>
  22#include <linux/syscalls.h>
  23#include <linux/ptrace.h>
  24#include <linux/signal.h>
  25#include <linux/signalfd.h>
  26#include <linux/ratelimit.h>
  27#include <linux/tracehook.h>
  28#include <linux/capability.h>
  29#include <linux/freezer.h>
  30#include <linux/pid_namespace.h>
  31#include <linux/nsproxy.h>
  32#include <linux/user_namespace.h>
  33#include <linux/uprobes.h>
  34#include <linux/compat.h>
  35#include <linux/cn_proc.h>
  36#include <linux/compiler.h>
 
 
 
 
  37
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/signal.h>
  40
  41#include <asm/param.h>
  42#include <linux/uaccess.h>
  43#include <asm/unistd.h>
  44#include <asm/siginfo.h>
  45#include <asm/cacheflush.h>
  46#include "audit.h"	/* audit_signal_info() */
  47
  48/*
  49 * SLAB caches for signal bits.
  50 */
  51
  52static struct kmem_cache *sigqueue_cachep;
  53
  54int print_fatal_signals __read_mostly;
  55
  56static void __user *sig_handler(struct task_struct *t, int sig)
  57{
  58	return t->sighand->action[sig - 1].sa.sa_handler;
  59}
  60
  61static int sig_handler_ignored(void __user *handler, int sig)
  62{
  63	/* Is it explicitly or implicitly ignored? */
  64	return handler == SIG_IGN ||
  65		(handler == SIG_DFL && sig_kernel_ignore(sig));
  66}
  67
  68static int sig_task_ignored(struct task_struct *t, int sig, bool force)
  69{
  70	void __user *handler;
  71
  72	handler = sig_handler(t, sig);
  73
 
 
 
 
  74	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  75			handler == SIG_DFL && !force)
  76		return 1;
 
 
 
 
 
  77
  78	return sig_handler_ignored(handler, sig);
  79}
  80
  81static int sig_ignored(struct task_struct *t, int sig, bool force)
  82{
  83	/*
  84	 * Blocked signals are never ignored, since the
  85	 * signal handler may change by the time it is
  86	 * unblocked.
  87	 */
  88	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
  89		return 0;
  90
  91	if (!sig_task_ignored(t, sig, force))
  92		return 0;
  93
  94	/*
  95	 * Tracers may want to know about even ignored signals.
 
 
  96	 */
  97	return !t->ptrace;
 
 
 
  98}
  99
 100/*
 101 * Re-calculate pending state from the set of locally pending
 102 * signals, globally pending signals, and blocked signals.
 103 */
 104static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 105{
 106	unsigned long ready;
 107	long i;
 108
 109	switch (_NSIG_WORDS) {
 110	default:
 111		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 112			ready |= signal->sig[i] &~ blocked->sig[i];
 113		break;
 114
 115	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 116		ready |= signal->sig[2] &~ blocked->sig[2];
 117		ready |= signal->sig[1] &~ blocked->sig[1];
 118		ready |= signal->sig[0] &~ blocked->sig[0];
 119		break;
 120
 121	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 122		ready |= signal->sig[0] &~ blocked->sig[0];
 123		break;
 124
 125	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 126	}
 127	return ready !=	0;
 128}
 129
 130#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 131
 132static int recalc_sigpending_tsk(struct task_struct *t)
 133{
 134	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
 135	    PENDING(&t->pending, &t->blocked) ||
 136	    PENDING(&t->signal->shared_pending, &t->blocked)) {
 
 137		set_tsk_thread_flag(t, TIF_SIGPENDING);
 138		return 1;
 139	}
 
 140	/*
 141	 * We must never clear the flag in another thread, or in current
 142	 * when it's possible the current syscall is returning -ERESTART*.
 143	 * So we don't clear it here, and only callers who know they should do.
 144	 */
 145	return 0;
 146}
 147
 148/*
 149 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 150 * This is superfluous when called on current, the wakeup is a harmless no-op.
 151 */
 152void recalc_sigpending_and_wake(struct task_struct *t)
 153{
 154	if (recalc_sigpending_tsk(t))
 155		signal_wake_up(t, 0);
 156}
 157
 158void recalc_sigpending(void)
 159{
 160	if (!recalc_sigpending_tsk(current) && !freezing(current))
 
 161		clear_thread_flag(TIF_SIGPENDING);
 162
 163}
 
 
 
 
 
 
 
 
 
 
 
 
 164
 165/* Given the mask, find the first available signal that should be serviced. */
 166
 167#define SYNCHRONOUS_MASK \
 168	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 169	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 170
 171int next_signal(struct sigpending *pending, sigset_t *mask)
 172{
 173	unsigned long i, *s, *m, x;
 174	int sig = 0;
 175
 176	s = pending->signal.sig;
 177	m = mask->sig;
 178
 179	/*
 180	 * Handle the first word specially: it contains the
 181	 * synchronous signals that need to be dequeued first.
 182	 */
 183	x = *s &~ *m;
 184	if (x) {
 185		if (x & SYNCHRONOUS_MASK)
 186			x &= SYNCHRONOUS_MASK;
 187		sig = ffz(~x) + 1;
 188		return sig;
 189	}
 190
 191	switch (_NSIG_WORDS) {
 192	default:
 193		for (i = 1; i < _NSIG_WORDS; ++i) {
 194			x = *++s &~ *++m;
 195			if (!x)
 196				continue;
 197			sig = ffz(~x) + i*_NSIG_BPW + 1;
 198			break;
 199		}
 200		break;
 201
 202	case 2:
 203		x = s[1] &~ m[1];
 204		if (!x)
 205			break;
 206		sig = ffz(~x) + _NSIG_BPW + 1;
 207		break;
 208
 209	case 1:
 210		/* Nothing to do */
 211		break;
 212	}
 213
 214	return sig;
 215}
 216
 217static inline void print_dropped_signal(int sig)
 218{
 219	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 220
 221	if (!print_fatal_signals)
 222		return;
 223
 224	if (!__ratelimit(&ratelimit_state))
 225		return;
 226
 227	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 228				current->comm, current->pid, sig);
 229}
 230
 231/**
 232 * task_set_jobctl_pending - set jobctl pending bits
 233 * @task: target task
 234 * @mask: pending bits to set
 235 *
 236 * Clear @mask from @task->jobctl.  @mask must be subset of
 237 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 238 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 239 * cleared.  If @task is already being killed or exiting, this function
 240 * becomes noop.
 241 *
 242 * CONTEXT:
 243 * Must be called with @task->sighand->siglock held.
 244 *
 245 * RETURNS:
 246 * %true if @mask is set, %false if made noop because @task was dying.
 247 */
 248bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 249{
 250	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 251			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 252	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 253
 254	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 255		return false;
 256
 257	if (mask & JOBCTL_STOP_SIGMASK)
 258		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 259
 260	task->jobctl |= mask;
 261	return true;
 262}
 263
 264/**
 265 * task_clear_jobctl_trapping - clear jobctl trapping bit
 266 * @task: target task
 267 *
 268 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 269 * Clear it and wake up the ptracer.  Note that we don't need any further
 270 * locking.  @task->siglock guarantees that @task->parent points to the
 271 * ptracer.
 272 *
 273 * CONTEXT:
 274 * Must be called with @task->sighand->siglock held.
 275 */
 276void task_clear_jobctl_trapping(struct task_struct *task)
 277{
 278	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 279		task->jobctl &= ~JOBCTL_TRAPPING;
 280		smp_mb();	/* advised by wake_up_bit() */
 281		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 282	}
 283}
 284
 285/**
 286 * task_clear_jobctl_pending - clear jobctl pending bits
 287 * @task: target task
 288 * @mask: pending bits to clear
 289 *
 290 * Clear @mask from @task->jobctl.  @mask must be subset of
 291 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 292 * STOP bits are cleared together.
 293 *
 294 * If clearing of @mask leaves no stop or trap pending, this function calls
 295 * task_clear_jobctl_trapping().
 296 *
 297 * CONTEXT:
 298 * Must be called with @task->sighand->siglock held.
 299 */
 300void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 301{
 302	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 303
 304	if (mask & JOBCTL_STOP_PENDING)
 305		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 306
 307	task->jobctl &= ~mask;
 308
 309	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 310		task_clear_jobctl_trapping(task);
 311}
 312
 313/**
 314 * task_participate_group_stop - participate in a group stop
 315 * @task: task participating in a group stop
 316 *
 317 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 318 * Group stop states are cleared and the group stop count is consumed if
 319 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 320 * stop, the appropriate %SIGNAL_* flags are set.
 321 *
 322 * CONTEXT:
 323 * Must be called with @task->sighand->siglock held.
 324 *
 325 * RETURNS:
 326 * %true if group stop completion should be notified to the parent, %false
 327 * otherwise.
 328 */
 329static bool task_participate_group_stop(struct task_struct *task)
 330{
 331	struct signal_struct *sig = task->signal;
 332	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 333
 334	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 335
 336	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 337
 338	if (!consume)
 339		return false;
 340
 341	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 342		sig->group_stop_count--;
 343
 344	/*
 345	 * Tell the caller to notify completion iff we are entering into a
 346	 * fresh group stop.  Read comment in do_signal_stop() for details.
 347	 */
 348	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 349		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 350		return true;
 351	}
 352	return false;
 353}
 354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 355/*
 356 * allocate a new signal queue record
 357 * - this may be called without locks if and only if t == current, otherwise an
 358 *   appropriate lock must be held to stop the target task from exiting
 359 */
 360static struct sigqueue *
 361__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
 362{
 363	struct sigqueue *q = NULL;
 364	struct user_struct *user;
 
 365
 366	/*
 367	 * Protect access to @t credentials. This can go away when all
 368	 * callers hold rcu read lock.
 
 
 
 
 369	 */
 370	rcu_read_lock();
 371	user = get_uid(__task_cred(t)->user);
 372	atomic_inc(&user->sigpending);
 
 
 373	rcu_read_unlock();
 374
 375	if (override_rlimit ||
 376	    atomic_read(&user->sigpending) <=
 377			task_rlimit(t, RLIMIT_SIGPENDING)) {
 378		q = kmem_cache_alloc(sigqueue_cachep, flags);
 379	} else {
 380		print_dropped_signal(sig);
 381	}
 382
 383	if (unlikely(q == NULL)) {
 384		atomic_dec(&user->sigpending);
 385		free_uid(user);
 386	} else {
 387		INIT_LIST_HEAD(&q->list);
 388		q->flags = 0;
 389		q->user = user;
 390	}
 391
 392	return q;
 393}
 394
 395static void __sigqueue_free(struct sigqueue *q)
 396{
 397	if (q->flags & SIGQUEUE_PREALLOC)
 398		return;
 399	atomic_dec(&q->user->sigpending);
 400	free_uid(q->user);
 401	kmem_cache_free(sigqueue_cachep, q);
 402}
 403
 404void flush_sigqueue(struct sigpending *queue)
 405{
 406	struct sigqueue *q;
 407
 408	sigemptyset(&queue->signal);
 409	while (!list_empty(&queue->list)) {
 410		q = list_entry(queue->list.next, struct sigqueue , list);
 411		list_del_init(&q->list);
 412		__sigqueue_free(q);
 413	}
 414}
 415
 416/*
 417 * Flush all pending signals for this kthread.
 418 */
 419void flush_signals(struct task_struct *t)
 420{
 421	unsigned long flags;
 422
 423	spin_lock_irqsave(&t->sighand->siglock, flags);
 424	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 425	flush_sigqueue(&t->pending);
 426	flush_sigqueue(&t->signal->shared_pending);
 427	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 428}
 
 429
 430#ifdef CONFIG_POSIX_TIMERS
 431static void __flush_itimer_signals(struct sigpending *pending)
 432{
 433	sigset_t signal, retain;
 434	struct sigqueue *q, *n;
 435
 436	signal = pending->signal;
 437	sigemptyset(&retain);
 438
 439	list_for_each_entry_safe(q, n, &pending->list, list) {
 440		int sig = q->info.si_signo;
 441
 442		if (likely(q->info.si_code != SI_TIMER)) {
 443			sigaddset(&retain, sig);
 444		} else {
 445			sigdelset(&signal, sig);
 446			list_del_init(&q->list);
 447			__sigqueue_free(q);
 448		}
 449	}
 450
 451	sigorsets(&pending->signal, &signal, &retain);
 452}
 453
 454void flush_itimer_signals(void)
 455{
 456	struct task_struct *tsk = current;
 457	unsigned long flags;
 458
 459	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 460	__flush_itimer_signals(&tsk->pending);
 461	__flush_itimer_signals(&tsk->signal->shared_pending);
 462	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 463}
 464#endif
 465
 466void ignore_signals(struct task_struct *t)
 467{
 468	int i;
 469
 470	for (i = 0; i < _NSIG; ++i)
 471		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 472
 473	flush_signals(t);
 474}
 475
 476/*
 477 * Flush all handlers for a task.
 478 */
 479
 480void
 481flush_signal_handlers(struct task_struct *t, int force_default)
 482{
 483	int i;
 484	struct k_sigaction *ka = &t->sighand->action[0];
 485	for (i = _NSIG ; i != 0 ; i--) {
 486		if (force_default || ka->sa.sa_handler != SIG_IGN)
 487			ka->sa.sa_handler = SIG_DFL;
 488		ka->sa.sa_flags = 0;
 489#ifdef __ARCH_HAS_SA_RESTORER
 490		ka->sa.sa_restorer = NULL;
 491#endif
 492		sigemptyset(&ka->sa.sa_mask);
 493		ka++;
 494	}
 495}
 496
 497int unhandled_signal(struct task_struct *tsk, int sig)
 498{
 499	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 500	if (is_global_init(tsk))
 501		return 1;
 
 502	if (handler != SIG_IGN && handler != SIG_DFL)
 503		return 0;
 
 504	/* if ptraced, let the tracer determine */
 505	return !tsk->ptrace;
 506}
 507
 508static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 
 509{
 510	struct sigqueue *q, *first = NULL;
 511
 512	/*
 513	 * Collect the siginfo appropriate to this signal.  Check if
 514	 * there is another siginfo for the same signal.
 515	*/
 516	list_for_each_entry(q, &list->list, list) {
 517		if (q->info.si_signo == sig) {
 518			if (first)
 519				goto still_pending;
 520			first = q;
 521		}
 522	}
 523
 524	sigdelset(&list->signal, sig);
 525
 526	if (first) {
 527still_pending:
 528		list_del_init(&first->list);
 529		copy_siginfo(info, &first->info);
 
 
 
 
 
 
 530		__sigqueue_free(first);
 531	} else {
 532		/*
 533		 * Ok, it wasn't in the queue.  This must be
 534		 * a fast-pathed signal or we must have been
 535		 * out of queue space.  So zero out the info.
 536		 */
 
 537		info->si_signo = sig;
 538		info->si_errno = 0;
 539		info->si_code = SI_USER;
 540		info->si_pid = 0;
 541		info->si_uid = 0;
 542	}
 543}
 544
 545static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 546			siginfo_t *info)
 547{
 548	int sig = next_signal(pending, mask);
 549
 550	if (sig)
 551		collect_signal(sig, pending, info);
 552	return sig;
 553}
 554
 555/*
 556 * Dequeue a signal and return the element to the caller, which is
 557 * expected to free it.
 558 *
 559 * All callers have to hold the siglock.
 560 */
 561int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 562{
 
 563	int signr;
 564
 565	/* We only dequeue private signals from ourselves, we don't let
 566	 * signalfd steal them
 567	 */
 568	signr = __dequeue_signal(&tsk->pending, mask, info);
 569	if (!signr) {
 570		signr = __dequeue_signal(&tsk->signal->shared_pending,
 571					 mask, info);
 572#ifdef CONFIG_POSIX_TIMERS
 573		/*
 574		 * itimer signal ?
 575		 *
 576		 * itimers are process shared and we restart periodic
 577		 * itimers in the signal delivery path to prevent DoS
 578		 * attacks in the high resolution timer case. This is
 579		 * compliant with the old way of self-restarting
 580		 * itimers, as the SIGALRM is a legacy signal and only
 581		 * queued once. Changing the restart behaviour to
 582		 * restart the timer in the signal dequeue path is
 583		 * reducing the timer noise on heavy loaded !highres
 584		 * systems too.
 585		 */
 586		if (unlikely(signr == SIGALRM)) {
 587			struct hrtimer *tmr = &tsk->signal->real_timer;
 588
 589			if (!hrtimer_is_queued(tmr) &&
 590			    tsk->signal->it_real_incr != 0) {
 591				hrtimer_forward(tmr, tmr->base->get_time(),
 592						tsk->signal->it_real_incr);
 593				hrtimer_restart(tmr);
 594			}
 595		}
 596#endif
 597	}
 598
 599	recalc_sigpending();
 600	if (!signr)
 601		return 0;
 602
 603	if (unlikely(sig_kernel_stop(signr))) {
 604		/*
 605		 * Set a marker that we have dequeued a stop signal.  Our
 606		 * caller might release the siglock and then the pending
 607		 * stop signal it is about to process is no longer in the
 608		 * pending bitmasks, but must still be cleared by a SIGCONT
 609		 * (and overruled by a SIGKILL).  So those cases clear this
 610		 * shared flag after we've set it.  Note that this flag may
 611		 * remain set after the signal we return is ignored or
 612		 * handled.  That doesn't matter because its only purpose
 613		 * is to alert stop-signal processing code when another
 614		 * processor has come along and cleared the flag.
 615		 */
 616		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 617	}
 618#ifdef CONFIG_POSIX_TIMERS
 619	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
 620		/*
 621		 * Release the siglock to ensure proper locking order
 622		 * of timer locks outside of siglocks.  Note, we leave
 623		 * irqs disabled here, since the posix-timers code is
 624		 * about to disable them again anyway.
 625		 */
 626		spin_unlock(&tsk->sighand->siglock);
 627		do_schedule_next_timer(info);
 628		spin_lock(&tsk->sighand->siglock);
 
 
 
 629	}
 630#endif
 631	return signr;
 632}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634/*
 635 * Tell a process that it has a new active signal..
 636 *
 637 * NOTE! we rely on the previous spin_lock to
 638 * lock interrupts for us! We can only be called with
 639 * "siglock" held, and the local interrupt must
 640 * have been disabled when that got acquired!
 641 *
 642 * No need to set need_resched since signal event passing
 643 * goes through ->blocked
 644 */
 645void signal_wake_up_state(struct task_struct *t, unsigned int state)
 646{
 647	set_tsk_thread_flag(t, TIF_SIGPENDING);
 648	/*
 649	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 650	 * case. We don't check t->state here because there is a race with it
 651	 * executing another processor and just now entering stopped state.
 652	 * By using wake_up_state, we ensure the process will wake up and
 653	 * handle its death signal.
 654	 */
 655	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 656		kick_process(t);
 657}
 658
 659/*
 660 * Remove signals in mask from the pending set and queue.
 661 * Returns 1 if any signals were found.
 662 *
 663 * All callers must be holding the siglock.
 664 */
 665static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 666{
 667	struct sigqueue *q, *n;
 668	sigset_t m;
 669
 670	sigandsets(&m, mask, &s->signal);
 671	if (sigisemptyset(&m))
 672		return 0;
 673
 674	sigandnsets(&s->signal, &s->signal, mask);
 675	list_for_each_entry_safe(q, n, &s->list, list) {
 676		if (sigismember(mask, q->info.si_signo)) {
 677			list_del_init(&q->list);
 678			__sigqueue_free(q);
 679		}
 680	}
 681	return 1;
 682}
 683
 684static inline int is_si_special(const struct siginfo *info)
 685{
 686	return info <= SEND_SIG_FORCED;
 687}
 688
 689static inline bool si_fromuser(const struct siginfo *info)
 690{
 691	return info == SEND_SIG_NOINFO ||
 692		(!is_si_special(info) && SI_FROMUSER(info));
 693}
 694
 695/*
 696 * called with RCU read lock from check_kill_permission()
 697 */
 698static int kill_ok_by_cred(struct task_struct *t)
 699{
 700	const struct cred *cred = current_cred();
 701	const struct cred *tcred = __task_cred(t);
 702
 703	if (uid_eq(cred->euid, tcred->suid) ||
 704	    uid_eq(cred->euid, tcred->uid)  ||
 705	    uid_eq(cred->uid,  tcred->suid) ||
 706	    uid_eq(cred->uid,  tcred->uid))
 707		return 1;
 708
 709	if (ns_capable(tcred->user_ns, CAP_KILL))
 710		return 1;
 711
 712	return 0;
 713}
 714
 715/*
 716 * Bad permissions for sending the signal
 717 * - the caller must hold the RCU read lock
 718 */
 719static int check_kill_permission(int sig, struct siginfo *info,
 720				 struct task_struct *t)
 721{
 722	struct pid *sid;
 723	int error;
 724
 725	if (!valid_signal(sig))
 726		return -EINVAL;
 727
 728	if (!si_fromuser(info))
 729		return 0;
 730
 731	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 732	if (error)
 733		return error;
 734
 735	if (!same_thread_group(current, t) &&
 736	    !kill_ok_by_cred(t)) {
 737		switch (sig) {
 738		case SIGCONT:
 739			sid = task_session(t);
 740			/*
 741			 * We don't return the error if sid == NULL. The
 742			 * task was unhashed, the caller must notice this.
 743			 */
 744			if (!sid || sid == task_session(current))
 745				break;
 
 746		default:
 747			return -EPERM;
 748		}
 749	}
 750
 751	return security_task_kill(t, info, sig, 0);
 752}
 753
 754/**
 755 * ptrace_trap_notify - schedule trap to notify ptracer
 756 * @t: tracee wanting to notify tracer
 757 *
 758 * This function schedules sticky ptrace trap which is cleared on the next
 759 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 760 * ptracer.
 761 *
 762 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 763 * ptracer is listening for events, tracee is woken up so that it can
 764 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 765 * eventually taken without returning to userland after the existing traps
 766 * are finished by PTRACE_CONT.
 767 *
 768 * CONTEXT:
 769 * Must be called with @task->sighand->siglock held.
 770 */
 771static void ptrace_trap_notify(struct task_struct *t)
 772{
 773	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 774	assert_spin_locked(&t->sighand->siglock);
 775
 776	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 777	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 778}
 779
 780/*
 781 * Handle magic process-wide effects of stop/continue signals. Unlike
 782 * the signal actions, these happen immediately at signal-generation
 783 * time regardless of blocking, ignoring, or handling.  This does the
 784 * actual continuing for SIGCONT, but not the actual stopping for stop
 785 * signals. The process stop is done as a signal action for SIG_DFL.
 786 *
 787 * Returns true if the signal should be actually delivered, otherwise
 788 * it should be dropped.
 789 */
 790static bool prepare_signal(int sig, struct task_struct *p, bool force)
 791{
 792	struct signal_struct *signal = p->signal;
 793	struct task_struct *t;
 794	sigset_t flush;
 795
 796	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
 797		if (!(signal->flags & SIGNAL_GROUP_EXIT))
 798			return sig == SIGKILL;
 799		/*
 800		 * The process is in the middle of dying, nothing to do.
 801		 */
 802	} else if (sig_kernel_stop(sig)) {
 803		/*
 804		 * This is a stop signal.  Remove SIGCONT from all queues.
 805		 */
 806		siginitset(&flush, sigmask(SIGCONT));
 807		flush_sigqueue_mask(&flush, &signal->shared_pending);
 808		for_each_thread(p, t)
 809			flush_sigqueue_mask(&flush, &t->pending);
 810	} else if (sig == SIGCONT) {
 811		unsigned int why;
 812		/*
 813		 * Remove all stop signals from all queues, wake all threads.
 814		 */
 815		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 816		flush_sigqueue_mask(&flush, &signal->shared_pending);
 817		for_each_thread(p, t) {
 818			flush_sigqueue_mask(&flush, &t->pending);
 819			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 820			if (likely(!(t->ptrace & PT_SEIZED)))
 821				wake_up_state(t, __TASK_STOPPED);
 822			else
 823				ptrace_trap_notify(t);
 824		}
 825
 826		/*
 827		 * Notify the parent with CLD_CONTINUED if we were stopped.
 828		 *
 829		 * If we were in the middle of a group stop, we pretend it
 830		 * was already finished, and then continued. Since SIGCHLD
 831		 * doesn't queue we report only CLD_STOPPED, as if the next
 832		 * CLD_CONTINUED was dropped.
 833		 */
 834		why = 0;
 835		if (signal->flags & SIGNAL_STOP_STOPPED)
 836			why |= SIGNAL_CLD_CONTINUED;
 837		else if (signal->group_stop_count)
 838			why |= SIGNAL_CLD_STOPPED;
 839
 840		if (why) {
 841			/*
 842			 * The first thread which returns from do_signal_stop()
 843			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 844			 * notify its parent. See get_signal_to_deliver().
 845			 */
 846			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 847			signal->group_stop_count = 0;
 848			signal->group_exit_code = 0;
 849		}
 850	}
 851
 852	return !sig_ignored(p, sig, force);
 853}
 854
 855/*
 856 * Test if P wants to take SIG.  After we've checked all threads with this,
 857 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 858 * blocking SIG were ruled out because they are not running and already
 859 * have pending signals.  Such threads will dequeue from the shared queue
 860 * as soon as they're available, so putting the signal on the shared queue
 861 * will be equivalent to sending it to one such thread.
 862 */
 863static inline int wants_signal(int sig, struct task_struct *p)
 864{
 865	if (sigismember(&p->blocked, sig))
 866		return 0;
 
 867	if (p->flags & PF_EXITING)
 868		return 0;
 
 869	if (sig == SIGKILL)
 870		return 1;
 
 871	if (task_is_stopped_or_traced(p))
 872		return 0;
 
 873	return task_curr(p) || !signal_pending(p);
 874}
 875
 876static void complete_signal(int sig, struct task_struct *p, int group)
 877{
 878	struct signal_struct *signal = p->signal;
 879	struct task_struct *t;
 880
 881	/*
 882	 * Now find a thread we can wake up to take the signal off the queue.
 883	 *
 884	 * If the main thread wants the signal, it gets first crack.
 885	 * Probably the least surprising to the average bear.
 886	 */
 887	if (wants_signal(sig, p))
 888		t = p;
 889	else if (!group || thread_group_empty(p))
 890		/*
 891		 * There is just one thread and it does not need to be woken.
 892		 * It will dequeue unblocked signals before it runs again.
 893		 */
 894		return;
 895	else {
 896		/*
 897		 * Otherwise try to find a suitable thread.
 898		 */
 899		t = signal->curr_target;
 900		while (!wants_signal(sig, t)) {
 901			t = next_thread(t);
 902			if (t == signal->curr_target)
 903				/*
 904				 * No thread needs to be woken.
 905				 * Any eligible threads will see
 906				 * the signal in the queue soon.
 907				 */
 908				return;
 909		}
 910		signal->curr_target = t;
 911	}
 912
 913	/*
 914	 * Found a killable thread.  If the signal will be fatal,
 915	 * then start taking the whole group down immediately.
 916	 */
 917	if (sig_fatal(p, sig) &&
 918	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
 919	    !sigismember(&t->real_blocked, sig) &&
 920	    (sig == SIGKILL || !t->ptrace)) {
 921		/*
 922		 * This signal will be fatal to the whole group.
 923		 */
 924		if (!sig_kernel_coredump(sig)) {
 925			/*
 926			 * Start a group exit and wake everybody up.
 927			 * This way we don't have other threads
 928			 * running and doing things after a slower
 929			 * thread has the fatal signal pending.
 930			 */
 931			signal->flags = SIGNAL_GROUP_EXIT;
 932			signal->group_exit_code = sig;
 933			signal->group_stop_count = 0;
 934			t = p;
 935			do {
 936				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
 937				sigaddset(&t->pending.signal, SIGKILL);
 938				signal_wake_up(t, 1);
 939			} while_each_thread(p, t);
 940			return;
 941		}
 942	}
 943
 944	/*
 945	 * The signal is already in the shared-pending queue.
 946	 * Tell the chosen thread to wake up and dequeue it.
 947	 */
 948	signal_wake_up(t, sig == SIGKILL);
 949	return;
 950}
 951
 952static inline int legacy_queue(struct sigpending *signals, int sig)
 953{
 954	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 955}
 956
 957#ifdef CONFIG_USER_NS
 958static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
 959{
 960	if (current_user_ns() == task_cred_xxx(t, user_ns))
 961		return;
 962
 963	if (SI_FROMKERNEL(info))
 964		return;
 965
 966	rcu_read_lock();
 967	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
 968					make_kuid(current_user_ns(), info->si_uid));
 969	rcu_read_unlock();
 970}
 971#else
 972static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
 973{
 974	return;
 975}
 976#endif
 977
 978static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
 979			int group, int from_ancestor_ns)
 980{
 981	struct sigpending *pending;
 982	struct sigqueue *q;
 983	int override_rlimit;
 984	int ret = 0, result;
 985
 986	assert_spin_locked(&t->sighand->siglock);
 987
 988	result = TRACE_SIGNAL_IGNORED;
 989	if (!prepare_signal(sig, t,
 990			from_ancestor_ns || (info == SEND_SIG_FORCED)))
 991		goto ret;
 992
 993	pending = group ? &t->signal->shared_pending : &t->pending;
 994	/*
 995	 * Short-circuit ignored signals and support queuing
 996	 * exactly one non-rt signal, so that we can get more
 997	 * detailed information about the cause of the signal.
 998	 */
 999	result = TRACE_SIGNAL_ALREADY_PENDING;
1000	if (legacy_queue(pending, sig))
1001		goto ret;
1002
1003	result = TRACE_SIGNAL_DELIVERED;
1004	/*
1005	 * fast-pathed signals for kernel-internal things like SIGSTOP
1006	 * or SIGKILL.
1007	 */
1008	if (info == SEND_SIG_FORCED)
1009		goto out_set;
1010
1011	/*
1012	 * Real-time signals must be queued if sent by sigqueue, or
1013	 * some other real-time mechanism.  It is implementation
1014	 * defined whether kill() does so.  We attempt to do so, on
1015	 * the principle of least surprise, but since kill is not
1016	 * allowed to fail with EAGAIN when low on memory we just
1017	 * make sure at least one signal gets delivered and don't
1018	 * pass on the info struct.
1019	 */
1020	if (sig < SIGRTMIN)
1021		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1022	else
1023		override_rlimit = 0;
1024
1025	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1026		override_rlimit);
1027	if (q) {
1028		list_add_tail(&q->list, &pending->list);
1029		switch ((unsigned long) info) {
1030		case (unsigned long) SEND_SIG_NOINFO:
 
1031			q->info.si_signo = sig;
1032			q->info.si_errno = 0;
1033			q->info.si_code = SI_USER;
1034			q->info.si_pid = task_tgid_nr_ns(current,
1035							task_active_pid_ns(t));
1036			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
 
 
 
 
1037			break;
1038		case (unsigned long) SEND_SIG_PRIV:
 
1039			q->info.si_signo = sig;
1040			q->info.si_errno = 0;
1041			q->info.si_code = SI_KERNEL;
1042			q->info.si_pid = 0;
1043			q->info.si_uid = 0;
1044			break;
1045		default:
1046			copy_siginfo(&q->info, info);
1047			if (from_ancestor_ns)
1048				q->info.si_pid = 0;
1049			break;
1050		}
1051
1052		userns_fixup_signal_uid(&q->info, t);
1053
1054	} else if (!is_si_special(info)) {
1055		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1056			/*
1057			 * Queue overflow, abort.  We may abort if the
1058			 * signal was rt and sent by user using something
1059			 * other than kill().
1060			 */
1061			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1062			ret = -EAGAIN;
1063			goto ret;
1064		} else {
1065			/*
1066			 * This is a silent loss of information.  We still
1067			 * send the signal, but the *info bits are lost.
1068			 */
1069			result = TRACE_SIGNAL_LOSE_INFO;
1070		}
1071	}
1072
1073out_set:
1074	signalfd_notify(t, sig);
1075	sigaddset(&pending->signal, sig);
1076	complete_signal(sig, t, group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077ret:
1078	trace_signal_generate(sig, info, t, group, result);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079	return ret;
1080}
1081
1082static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1083			int group)
1084{
1085	int from_ancestor_ns = 0;
 
1086
1087#ifdef CONFIG_PID_NS
1088	from_ancestor_ns = si_fromuser(info) &&
1089			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1090#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
1091
1092	return __send_signal(sig, info, t, group, from_ancestor_ns);
 
 
 
 
 
 
 
 
 
1093}
1094
1095static void print_fatal_signal(int signr)
1096{
1097	struct pt_regs *regs = signal_pt_regs();
1098	pr_info("potentially unexpected fatal signal %d.\n", signr);
1099
1100#if defined(__i386__) && !defined(__arch_um__)
1101	pr_info("code at %08lx: ", regs->ip);
1102	{
1103		int i;
1104		for (i = 0; i < 16; i++) {
1105			unsigned char insn;
1106
1107			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1108				break;
1109			pr_cont("%02x ", insn);
1110		}
1111	}
1112	pr_cont("\n");
1113#endif
1114	preempt_disable();
1115	show_regs(regs);
1116	preempt_enable();
1117}
1118
1119static int __init setup_print_fatal_signals(char *str)
1120{
1121	get_option (&str, &print_fatal_signals);
1122
1123	return 1;
1124}
1125
1126__setup("print-fatal-signals=", setup_print_fatal_signals);
1127
1128int
1129__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1130{
1131	return send_signal(sig, info, p, 1);
1132}
1133
1134static int
1135specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1136{
1137	return send_signal(sig, info, t, 0);
1138}
1139
1140int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1141			bool group)
1142{
1143	unsigned long flags;
1144	int ret = -ESRCH;
1145
1146	if (lock_task_sighand(p, &flags)) {
1147		ret = send_signal(sig, info, p, group);
1148		unlock_task_sighand(p, &flags);
1149	}
1150
1151	return ret;
1152}
1153
1154/*
1155 * Force a signal that the process can't ignore: if necessary
1156 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1157 *
1158 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1159 * since we do not want to have a signal handler that was blocked
1160 * be invoked when user space had explicitly blocked it.
1161 *
1162 * We don't want to have recursive SIGSEGV's etc, for example,
1163 * that is why we also clear SIGNAL_UNKILLABLE.
1164 */
1165int
1166force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1167{
1168	unsigned long int flags;
1169	int ret, blocked, ignored;
1170	struct k_sigaction *action;
 
1171
1172	spin_lock_irqsave(&t->sighand->siglock, flags);
1173	action = &t->sighand->action[sig-1];
1174	ignored = action->sa.sa_handler == SIG_IGN;
1175	blocked = sigismember(&t->blocked, sig);
1176	if (blocked || ignored) {
1177		action->sa.sa_handler = SIG_DFL;
1178		if (blocked) {
1179			sigdelset(&t->blocked, sig);
1180			recalc_sigpending_and_wake(t);
1181		}
1182	}
1183	if (action->sa.sa_handler == SIG_DFL)
 
 
 
 
1184		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1185	ret = specific_send_sig_info(sig, info, t);
1186	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1187
1188	return ret;
1189}
1190
 
 
 
 
 
1191/*
1192 * Nuke all other threads in the group.
1193 */
1194int zap_other_threads(struct task_struct *p)
1195{
1196	struct task_struct *t = p;
1197	int count = 0;
1198
1199	p->signal->group_stop_count = 0;
1200
1201	while_each_thread(p, t) {
1202		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1203		count++;
1204
1205		/* Don't bother with already dead threads */
1206		if (t->exit_state)
1207			continue;
1208		sigaddset(&t->pending.signal, SIGKILL);
1209		signal_wake_up(t, 1);
1210	}
1211
1212	return count;
1213}
1214
1215struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1216					   unsigned long *flags)
1217{
1218	struct sighand_struct *sighand;
1219
 
1220	for (;;) {
1221		/*
1222		 * Disable interrupts early to avoid deadlocks.
1223		 * See rcu_read_unlock() comment header for details.
1224		 */
1225		local_irq_save(*flags);
1226		rcu_read_lock();
1227		sighand = rcu_dereference(tsk->sighand);
1228		if (unlikely(sighand == NULL)) {
1229			rcu_read_unlock();
1230			local_irq_restore(*flags);
1231			break;
1232		}
1233		/*
1234		 * This sighand can be already freed and even reused, but
1235		 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1236		 * initializes ->siglock: this slab can't go away, it has
1237		 * the same object type, ->siglock can't be reinitialized.
1238		 *
1239		 * We need to ensure that tsk->sighand is still the same
1240		 * after we take the lock, we can race with de_thread() or
1241		 * __exit_signal(). In the latter case the next iteration
1242		 * must see ->sighand == NULL.
1243		 */
1244		spin_lock(&sighand->siglock);
1245		if (likely(sighand == tsk->sighand)) {
1246			rcu_read_unlock();
1247			break;
1248		}
1249		spin_unlock(&sighand->siglock);
1250		rcu_read_unlock();
1251		local_irq_restore(*flags);
1252	}
 
1253
1254	return sighand;
1255}
1256
1257/*
1258 * send signal info to all the members of a group
1259 */
1260int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 
1261{
1262	int ret;
1263
1264	rcu_read_lock();
1265	ret = check_kill_permission(sig, info, p);
1266	rcu_read_unlock();
1267
1268	if (!ret && sig)
1269		ret = do_send_sig_info(sig, info, p, true);
1270
1271	return ret;
1272}
1273
1274/*
1275 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1276 * control characters do (^C, ^Z etc)
1277 * - the caller must hold at least a readlock on tasklist_lock
1278 */
1279int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1280{
1281	struct task_struct *p = NULL;
1282	int retval, success;
1283
1284	success = 0;
1285	retval = -ESRCH;
1286	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1287		int err = group_send_sig_info(sig, info, p);
1288		success |= !err;
1289		retval = err;
1290	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1291	return success ? 0 : retval;
1292}
1293
1294int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1295{
1296	int error = -ESRCH;
1297	struct task_struct *p;
1298
1299	for (;;) {
1300		rcu_read_lock();
1301		p = pid_task(pid, PIDTYPE_PID);
1302		if (p)
1303			error = group_send_sig_info(sig, info, p);
1304		rcu_read_unlock();
1305		if (likely(!p || error != -ESRCH))
1306			return error;
1307
1308		/*
1309		 * The task was unhashed in between, try again.  If it
1310		 * is dead, pid_task() will return NULL, if we race with
1311		 * de_thread() it will find the new leader.
1312		 */
1313	}
1314}
1315
1316int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1317{
1318	int error;
1319	rcu_read_lock();
1320	error = kill_pid_info(sig, info, find_vpid(pid));
1321	rcu_read_unlock();
1322	return error;
1323}
1324
1325static int kill_as_cred_perm(const struct cred *cred,
1326			     struct task_struct *target)
1327{
1328	const struct cred *pcred = __task_cred(target);
1329	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1330	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1331		return 0;
1332	return 1;
 
1333}
1334
1335/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1336int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1337			 const struct cred *cred, u32 secid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1338{
1339	int ret = -EINVAL;
1340	struct task_struct *p;
1341	unsigned long flags;
 
1342
1343	if (!valid_signal(sig))
1344		return ret;
1345
 
 
 
 
 
 
1346	rcu_read_lock();
1347	p = pid_task(pid, PIDTYPE_PID);
1348	if (!p) {
1349		ret = -ESRCH;
1350		goto out_unlock;
1351	}
1352	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1353		ret = -EPERM;
1354		goto out_unlock;
1355	}
1356	ret = security_task_kill(p, info, sig, secid);
1357	if (ret)
1358		goto out_unlock;
1359
1360	if (sig) {
1361		if (lock_task_sighand(p, &flags)) {
1362			ret = __send_signal(sig, info, p, 1, 0);
1363			unlock_task_sighand(p, &flags);
1364		} else
1365			ret = -ESRCH;
1366	}
1367out_unlock:
1368	rcu_read_unlock();
1369	return ret;
1370}
1371EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1372
1373/*
1374 * kill_something_info() interprets pid in interesting ways just like kill(2).
1375 *
1376 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1377 * is probably wrong.  Should make it like BSD or SYSV.
1378 */
1379
1380static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1381{
1382	int ret;
1383
1384	if (pid > 0) {
1385		rcu_read_lock();
1386		ret = kill_pid_info(sig, info, find_vpid(pid));
1387		rcu_read_unlock();
1388		return ret;
1389	}
1390
1391	read_lock(&tasklist_lock);
1392	if (pid != -1) {
1393		ret = __kill_pgrp_info(sig, info,
1394				pid ? find_vpid(-pid) : task_pgrp(current));
1395	} else {
1396		int retval = 0, count = 0;
1397		struct task_struct * p;
1398
1399		for_each_process(p) {
1400			if (task_pid_vnr(p) > 1 &&
1401					!same_thread_group(p, current)) {
1402				int err = group_send_sig_info(sig, info, p);
 
1403				++count;
1404				if (err != -EPERM)
1405					retval = err;
1406			}
1407		}
1408		ret = count ? retval : -ESRCH;
1409	}
1410	read_unlock(&tasklist_lock);
1411
1412	return ret;
1413}
1414
1415/*
1416 * These are for backward compatibility with the rest of the kernel source.
1417 */
1418
1419int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1420{
1421	/*
1422	 * Make sure legacy kernel users don't send in bad values
1423	 * (normal paths check this in check_kill_permission).
1424	 */
1425	if (!valid_signal(sig))
1426		return -EINVAL;
1427
1428	return do_send_sig_info(sig, info, p, false);
1429}
 
1430
1431#define __si_special(priv) \
1432	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1433
1434int
1435send_sig(int sig, struct task_struct *p, int priv)
1436{
1437	return send_sig_info(sig, __si_special(priv), p);
1438}
 
1439
1440void
1441force_sig(int sig, struct task_struct *p)
1442{
1443	force_sig_info(sig, SEND_SIG_PRIV, p);
 
 
 
 
 
 
 
 
1444}
 
1445
1446/*
1447 * When things go south during signal handling, we
1448 * will force a SIGSEGV. And if the signal that caused
1449 * the problem was already a SIGSEGV, we'll want to
1450 * make sure we don't even try to deliver the signal..
1451 */
1452int
1453force_sigsegv(int sig, struct task_struct *p)
1454{
 
 
1455	if (sig == SIGSEGV) {
1456		unsigned long flags;
1457		spin_lock_irqsave(&p->sighand->siglock, flags);
1458		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1459		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1460	}
1461	force_sig(SIGSEGV, p);
1462	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1463}
1464
1465int kill_pgrp(struct pid *pid, int sig, int priv)
1466{
1467	int ret;
1468
1469	read_lock(&tasklist_lock);
1470	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1471	read_unlock(&tasklist_lock);
1472
1473	return ret;
1474}
1475EXPORT_SYMBOL(kill_pgrp);
1476
1477int kill_pid(struct pid *pid, int sig, int priv)
1478{
1479	return kill_pid_info(sig, __si_special(priv), pid);
1480}
1481EXPORT_SYMBOL(kill_pid);
1482
1483/*
1484 * These functions support sending signals using preallocated sigqueue
1485 * structures.  This is needed "because realtime applications cannot
1486 * afford to lose notifications of asynchronous events, like timer
1487 * expirations or I/O completions".  In the case of POSIX Timers
1488 * we allocate the sigqueue structure from the timer_create.  If this
1489 * allocation fails we are able to report the failure to the application
1490 * with an EAGAIN error.
1491 */
1492struct sigqueue *sigqueue_alloc(void)
1493{
1494	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1495
1496	if (q)
1497		q->flags |= SIGQUEUE_PREALLOC;
1498
1499	return q;
1500}
1501
1502void sigqueue_free(struct sigqueue *q)
1503{
1504	unsigned long flags;
1505	spinlock_t *lock = &current->sighand->siglock;
1506
1507	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1508	/*
1509	 * We must hold ->siglock while testing q->list
1510	 * to serialize with collect_signal() or with
1511	 * __exit_signal()->flush_sigqueue().
1512	 */
1513	spin_lock_irqsave(lock, flags);
1514	q->flags &= ~SIGQUEUE_PREALLOC;
1515	/*
1516	 * If it is queued it will be freed when dequeued,
1517	 * like the "regular" sigqueue.
1518	 */
1519	if (!list_empty(&q->list))
1520		q = NULL;
1521	spin_unlock_irqrestore(lock, flags);
1522
1523	if (q)
1524		__sigqueue_free(q);
1525}
1526
1527int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1528{
1529	int sig = q->info.si_signo;
1530	struct sigpending *pending;
 
1531	unsigned long flags;
1532	int ret, result;
1533
1534	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1535
1536	ret = -1;
1537	if (!likely(lock_task_sighand(t, &flags)))
 
 
1538		goto ret;
1539
1540	ret = 1; /* the signal is ignored */
1541	result = TRACE_SIGNAL_IGNORED;
1542	if (!prepare_signal(sig, t, false))
1543		goto out;
1544
1545	ret = 0;
1546	if (unlikely(!list_empty(&q->list))) {
1547		/*
1548		 * If an SI_TIMER entry is already queue just increment
1549		 * the overrun count.
1550		 */
1551		BUG_ON(q->info.si_code != SI_TIMER);
1552		q->info.si_overrun++;
1553		result = TRACE_SIGNAL_ALREADY_PENDING;
1554		goto out;
1555	}
1556	q->info.si_overrun = 0;
1557
1558	signalfd_notify(t, sig);
1559	pending = group ? &t->signal->shared_pending : &t->pending;
1560	list_add_tail(&q->list, &pending->list);
1561	sigaddset(&pending->signal, sig);
1562	complete_signal(sig, t, group);
1563	result = TRACE_SIGNAL_DELIVERED;
1564out:
1565	trace_signal_generate(sig, &q->info, t, group, result);
1566	unlock_task_sighand(t, &flags);
1567ret:
 
1568	return ret;
1569}
1570
 
 
 
 
 
 
 
 
 
1571/*
1572 * Let a parent know about the death of a child.
1573 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1574 *
1575 * Returns true if our parent ignored us and so we've switched to
1576 * self-reaping.
1577 */
1578bool do_notify_parent(struct task_struct *tsk, int sig)
1579{
1580	struct siginfo info;
1581	unsigned long flags;
1582	struct sighand_struct *psig;
1583	bool autoreap = false;
1584	cputime_t utime, stime;
1585
1586	BUG_ON(sig == -1);
1587
1588 	/* do_notify_parent_cldstop should have been called instead.  */
1589 	BUG_ON(task_is_stopped_or_traced(tsk));
1590
1591	BUG_ON(!tsk->ptrace &&
1592	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1593
 
 
 
1594	if (sig != SIGCHLD) {
1595		/*
1596		 * This is only possible if parent == real_parent.
1597		 * Check if it has changed security domain.
1598		 */
1599		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1600			sig = SIGCHLD;
1601	}
1602
 
1603	info.si_signo = sig;
1604	info.si_errno = 0;
1605	/*
1606	 * We are under tasklist_lock here so our parent is tied to
1607	 * us and cannot change.
1608	 *
1609	 * task_active_pid_ns will always return the same pid namespace
1610	 * until a task passes through release_task.
1611	 *
1612	 * write_lock() currently calls preempt_disable() which is the
1613	 * same as rcu_read_lock(), but according to Oleg, this is not
1614	 * correct to rely on this
1615	 */
1616	rcu_read_lock();
1617	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1618	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1619				       task_uid(tsk));
1620	rcu_read_unlock();
1621
1622	task_cputime(tsk, &utime, &stime);
1623	info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1624	info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1625
1626	info.si_status = tsk->exit_code & 0x7f;
1627	if (tsk->exit_code & 0x80)
1628		info.si_code = CLD_DUMPED;
1629	else if (tsk->exit_code & 0x7f)
1630		info.si_code = CLD_KILLED;
1631	else {
1632		info.si_code = CLD_EXITED;
1633		info.si_status = tsk->exit_code >> 8;
1634	}
1635
1636	psig = tsk->parent->sighand;
1637	spin_lock_irqsave(&psig->siglock, flags);
1638	if (!tsk->ptrace && sig == SIGCHLD &&
1639	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1640	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1641		/*
1642		 * We are exiting and our parent doesn't care.  POSIX.1
1643		 * defines special semantics for setting SIGCHLD to SIG_IGN
1644		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1645		 * automatically and not left for our parent's wait4 call.
1646		 * Rather than having the parent do it as a magic kind of
1647		 * signal handler, we just set this to tell do_exit that we
1648		 * can be cleaned up without becoming a zombie.  Note that
1649		 * we still call __wake_up_parent in this case, because a
1650		 * blocked sys_wait4 might now return -ECHILD.
1651		 *
1652		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1653		 * is implementation-defined: we do (if you don't want
1654		 * it, just use SIG_IGN instead).
1655		 */
1656		autoreap = true;
1657		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1658			sig = 0;
1659	}
 
 
 
 
1660	if (valid_signal(sig) && sig)
1661		__group_send_sig_info(sig, &info, tsk->parent);
1662	__wake_up_parent(tsk, tsk->parent);
1663	spin_unlock_irqrestore(&psig->siglock, flags);
1664
1665	return autoreap;
1666}
1667
1668/**
1669 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1670 * @tsk: task reporting the state change
1671 * @for_ptracer: the notification is for ptracer
1672 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1673 *
1674 * Notify @tsk's parent that the stopped/continued state has changed.  If
1675 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1676 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1677 *
1678 * CONTEXT:
1679 * Must be called with tasklist_lock at least read locked.
1680 */
1681static void do_notify_parent_cldstop(struct task_struct *tsk,
1682				     bool for_ptracer, int why)
1683{
1684	struct siginfo info;
1685	unsigned long flags;
1686	struct task_struct *parent;
1687	struct sighand_struct *sighand;
1688	cputime_t utime, stime;
1689
1690	if (for_ptracer) {
1691		parent = tsk->parent;
1692	} else {
1693		tsk = tsk->group_leader;
1694		parent = tsk->real_parent;
1695	}
1696
 
1697	info.si_signo = SIGCHLD;
1698	info.si_errno = 0;
1699	/*
1700	 * see comment in do_notify_parent() about the following 4 lines
1701	 */
1702	rcu_read_lock();
1703	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1704	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1705	rcu_read_unlock();
1706
1707	task_cputime(tsk, &utime, &stime);
1708	info.si_utime = cputime_to_clock_t(utime);
1709	info.si_stime = cputime_to_clock_t(stime);
1710
1711 	info.si_code = why;
1712 	switch (why) {
1713 	case CLD_CONTINUED:
1714 		info.si_status = SIGCONT;
1715 		break;
1716 	case CLD_STOPPED:
1717 		info.si_status = tsk->signal->group_exit_code & 0x7f;
1718 		break;
1719 	case CLD_TRAPPED:
1720 		info.si_status = tsk->exit_code & 0x7f;
1721 		break;
1722 	default:
1723 		BUG();
1724 	}
1725
1726	sighand = parent->sighand;
1727	spin_lock_irqsave(&sighand->siglock, flags);
1728	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1729	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1730		__group_send_sig_info(SIGCHLD, &info, parent);
1731	/*
1732	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1733	 */
1734	__wake_up_parent(tsk, parent);
1735	spin_unlock_irqrestore(&sighand->siglock, flags);
1736}
1737
1738static inline int may_ptrace_stop(void)
1739{
1740	if (!likely(current->ptrace))
1741		return 0;
1742	/*
1743	 * Are we in the middle of do_coredump?
1744	 * If so and our tracer is also part of the coredump stopping
1745	 * is a deadlock situation, and pointless because our tracer
1746	 * is dead so don't allow us to stop.
1747	 * If SIGKILL was already sent before the caller unlocked
1748	 * ->siglock we must see ->core_state != NULL. Otherwise it
1749	 * is safe to enter schedule().
1750	 *
1751	 * This is almost outdated, a task with the pending SIGKILL can't
1752	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1753	 * after SIGKILL was already dequeued.
1754	 */
1755	if (unlikely(current->mm->core_state) &&
1756	    unlikely(current->mm == current->parent->mm))
1757		return 0;
1758
1759	return 1;
1760}
1761
1762/*
1763 * Return non-zero if there is a SIGKILL that should be waking us up.
1764 * Called with the siglock held.
1765 */
1766static int sigkill_pending(struct task_struct *tsk)
1767{
1768	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1769		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1770}
1771
1772/*
1773 * This must be called with current->sighand->siglock held.
1774 *
1775 * This should be the path for all ptrace stops.
1776 * We always set current->last_siginfo while stopped here.
1777 * That makes it a way to test a stopped process for
1778 * being ptrace-stopped vs being job-control-stopped.
1779 *
1780 * If we actually decide not to stop at all because the tracer
1781 * is gone, we keep current->exit_code unless clear_code.
1782 */
1783static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1784	__releases(&current->sighand->siglock)
1785	__acquires(&current->sighand->siglock)
1786{
1787	bool gstop_done = false;
1788
1789	if (arch_ptrace_stop_needed(exit_code, info)) {
1790		/*
1791		 * The arch code has something special to do before a
1792		 * ptrace stop.  This is allowed to block, e.g. for faults
1793		 * on user stack pages.  We can't keep the siglock while
1794		 * calling arch_ptrace_stop, so we must release it now.
1795		 * To preserve proper semantics, we must do this before
1796		 * any signal bookkeeping like checking group_stop_count.
1797		 * Meanwhile, a SIGKILL could come in before we retake the
1798		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1799		 * So after regaining the lock, we must check for SIGKILL.
1800		 */
1801		spin_unlock_irq(&current->sighand->siglock);
1802		arch_ptrace_stop(exit_code, info);
1803		spin_lock_irq(&current->sighand->siglock);
1804		if (sigkill_pending(current))
1805			return;
1806	}
1807
 
 
1808	/*
1809	 * We're committing to trapping.  TRACED should be visible before
1810	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1811	 * Also, transition to TRACED and updates to ->jobctl should be
1812	 * atomic with respect to siglock and should be done after the arch
1813	 * hook as siglock is released and regrabbed across it.
 
 
 
 
 
 
 
 
 
 
 
1814	 */
1815	set_current_state(TASK_TRACED);
1816
1817	current->last_siginfo = info;
1818	current->exit_code = exit_code;
1819
1820	/*
1821	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1822	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1823	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1824	 * could be clear now.  We act as if SIGCONT is received after
1825	 * TASK_TRACED is entered - ignore it.
1826	 */
1827	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1828		gstop_done = task_participate_group_stop(current);
1829
1830	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1831	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1832	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1833		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1834
1835	/* entering a trap, clear TRAPPING */
1836	task_clear_jobctl_trapping(current);
1837
1838	spin_unlock_irq(&current->sighand->siglock);
1839	read_lock(&tasklist_lock);
1840	if (may_ptrace_stop()) {
1841		/*
1842		 * Notify parents of the stop.
1843		 *
1844		 * While ptraced, there are two parents - the ptracer and
1845		 * the real_parent of the group_leader.  The ptracer should
1846		 * know about every stop while the real parent is only
1847		 * interested in the completion of group stop.  The states
1848		 * for the two don't interact with each other.  Notify
1849		 * separately unless they're gonna be duplicates.
1850		 */
1851		do_notify_parent_cldstop(current, true, why);
1852		if (gstop_done && ptrace_reparented(current))
1853			do_notify_parent_cldstop(current, false, why);
1854
1855		/*
1856		 * Don't want to allow preemption here, because
1857		 * sys_ptrace() needs this task to be inactive.
1858		 *
1859		 * XXX: implement read_unlock_no_resched().
1860		 */
1861		preempt_disable();
1862		read_unlock(&tasklist_lock);
 
1863		preempt_enable_no_resched();
1864		freezable_schedule();
 
1865	} else {
1866		/*
1867		 * By the time we got the lock, our tracer went away.
1868		 * Don't drop the lock yet, another tracer may come.
1869		 *
1870		 * If @gstop_done, the ptracer went away between group stop
1871		 * completion and here.  During detach, it would have set
1872		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1873		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1874		 * the real parent of the group stop completion is enough.
1875		 */
1876		if (gstop_done)
1877			do_notify_parent_cldstop(current, false, why);
1878
1879		/* tasklist protects us from ptrace_freeze_traced() */
1880		__set_current_state(TASK_RUNNING);
1881		if (clear_code)
1882			current->exit_code = 0;
1883		read_unlock(&tasklist_lock);
1884	}
1885
1886	/*
1887	 * We are back.  Now reacquire the siglock before touching
1888	 * last_siginfo, so that we are sure to have synchronized with
1889	 * any signal-sending on another CPU that wants to examine it.
1890	 */
1891	spin_lock_irq(&current->sighand->siglock);
1892	current->last_siginfo = NULL;
1893
1894	/* LISTENING can be set only during STOP traps, clear it */
1895	current->jobctl &= ~JOBCTL_LISTENING;
1896
1897	/*
1898	 * Queued signals ignored us while we were stopped for tracing.
1899	 * So check for any that we should take before resuming user mode.
1900	 * This sets TIF_SIGPENDING, but never clears it.
1901	 */
1902	recalc_sigpending_tsk(current);
1903}
1904
1905static void ptrace_do_notify(int signr, int exit_code, int why)
1906{
1907	siginfo_t info;
1908
1909	memset(&info, 0, sizeof info);
1910	info.si_signo = signr;
1911	info.si_code = exit_code;
1912	info.si_pid = task_pid_vnr(current);
1913	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1914
1915	/* Let the debugger run.  */
1916	ptrace_stop(exit_code, why, 1, &info);
1917}
1918
1919void ptrace_notify(int exit_code)
1920{
1921	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1922	if (unlikely(current->task_works))
1923		task_work_run();
1924
1925	spin_lock_irq(&current->sighand->siglock);
1926	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1927	spin_unlock_irq(&current->sighand->siglock);
1928}
1929
1930/**
1931 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1932 * @signr: signr causing group stop if initiating
1933 *
1934 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1935 * and participate in it.  If already set, participate in the existing
1936 * group stop.  If participated in a group stop (and thus slept), %true is
1937 * returned with siglock released.
1938 *
1939 * If ptraced, this function doesn't handle stop itself.  Instead,
1940 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1941 * untouched.  The caller must ensure that INTERRUPT trap handling takes
1942 * places afterwards.
1943 *
1944 * CONTEXT:
1945 * Must be called with @current->sighand->siglock held, which is released
1946 * on %true return.
1947 *
1948 * RETURNS:
1949 * %false if group stop is already cancelled or ptrace trap is scheduled.
1950 * %true if participated in group stop.
1951 */
1952static bool do_signal_stop(int signr)
1953	__releases(&current->sighand->siglock)
1954{
1955	struct signal_struct *sig = current->signal;
1956
1957	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1958		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1959		struct task_struct *t;
1960
1961		/* signr will be recorded in task->jobctl for retries */
1962		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1963
1964		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1965		    unlikely(signal_group_exit(sig)))
1966			return false;
1967		/*
1968		 * There is no group stop already in progress.  We must
1969		 * initiate one now.
1970		 *
1971		 * While ptraced, a task may be resumed while group stop is
1972		 * still in effect and then receive a stop signal and
1973		 * initiate another group stop.  This deviates from the
1974		 * usual behavior as two consecutive stop signals can't
1975		 * cause two group stops when !ptraced.  That is why we
1976		 * also check !task_is_stopped(t) below.
1977		 *
1978		 * The condition can be distinguished by testing whether
1979		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
1980		 * group_exit_code in such case.
1981		 *
1982		 * This is not necessary for SIGNAL_STOP_CONTINUED because
1983		 * an intervening stop signal is required to cause two
1984		 * continued events regardless of ptrace.
1985		 */
1986		if (!(sig->flags & SIGNAL_STOP_STOPPED))
1987			sig->group_exit_code = signr;
1988
1989		sig->group_stop_count = 0;
1990
1991		if (task_set_jobctl_pending(current, signr | gstop))
1992			sig->group_stop_count++;
1993
1994		t = current;
1995		while_each_thread(current, t) {
1996			/*
1997			 * Setting state to TASK_STOPPED for a group
1998			 * stop is always done with the siglock held,
1999			 * so this check has no races.
2000			 */
2001			if (!task_is_stopped(t) &&
2002			    task_set_jobctl_pending(t, signr | gstop)) {
2003				sig->group_stop_count++;
2004				if (likely(!(t->ptrace & PT_SEIZED)))
2005					signal_wake_up(t, 0);
2006				else
2007					ptrace_trap_notify(t);
2008			}
2009		}
2010	}
2011
2012	if (likely(!current->ptrace)) {
2013		int notify = 0;
2014
2015		/*
2016		 * If there are no other threads in the group, or if there
2017		 * is a group stop in progress and we are the last to stop,
2018		 * report to the parent.
2019		 */
2020		if (task_participate_group_stop(current))
2021			notify = CLD_STOPPED;
2022
2023		__set_current_state(TASK_STOPPED);
2024		spin_unlock_irq(&current->sighand->siglock);
2025
2026		/*
2027		 * Notify the parent of the group stop completion.  Because
2028		 * we're not holding either the siglock or tasklist_lock
2029		 * here, ptracer may attach inbetween; however, this is for
2030		 * group stop and should always be delivered to the real
2031		 * parent of the group leader.  The new ptracer will get
2032		 * its notification when this task transitions into
2033		 * TASK_TRACED.
2034		 */
2035		if (notify) {
2036			read_lock(&tasklist_lock);
2037			do_notify_parent_cldstop(current, false, notify);
2038			read_unlock(&tasklist_lock);
2039		}
2040
2041		/* Now we don't run again until woken by SIGCONT or SIGKILL */
 
2042		freezable_schedule();
2043		return true;
2044	} else {
2045		/*
2046		 * While ptraced, group stop is handled by STOP trap.
2047		 * Schedule it and let the caller deal with it.
2048		 */
2049		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2050		return false;
2051	}
2052}
2053
2054/**
2055 * do_jobctl_trap - take care of ptrace jobctl traps
2056 *
2057 * When PT_SEIZED, it's used for both group stop and explicit
2058 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2059 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2060 * the stop signal; otherwise, %SIGTRAP.
2061 *
2062 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2063 * number as exit_code and no siginfo.
2064 *
2065 * CONTEXT:
2066 * Must be called with @current->sighand->siglock held, which may be
2067 * released and re-acquired before returning with intervening sleep.
2068 */
2069static void do_jobctl_trap(void)
2070{
2071	struct signal_struct *signal = current->signal;
2072	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2073
2074	if (current->ptrace & PT_SEIZED) {
2075		if (!signal->group_stop_count &&
2076		    !(signal->flags & SIGNAL_STOP_STOPPED))
2077			signr = SIGTRAP;
2078		WARN_ON_ONCE(!signr);
2079		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2080				 CLD_STOPPED);
2081	} else {
2082		WARN_ON_ONCE(!signr);
2083		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2084		current->exit_code = 0;
2085	}
2086}
2087
2088static int ptrace_signal(int signr, siginfo_t *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2089{
2090	ptrace_signal_deliver();
2091	/*
2092	 * We do not check sig_kernel_stop(signr) but set this marker
2093	 * unconditionally because we do not know whether debugger will
2094	 * change signr. This flag has no meaning unless we are going
2095	 * to stop after return from ptrace_stop(). In this case it will
2096	 * be checked in do_signal_stop(), we should only stop if it was
2097	 * not cleared by SIGCONT while we were sleeping. See also the
2098	 * comment in dequeue_signal().
2099	 */
2100	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2101	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2102
2103	/* We're back.  Did the debugger cancel the sig?  */
2104	signr = current->exit_code;
2105	if (signr == 0)
2106		return signr;
2107
2108	current->exit_code = 0;
2109
2110	/*
2111	 * Update the siginfo structure if the signal has
2112	 * changed.  If the debugger wanted something
2113	 * specific in the siginfo structure then it should
2114	 * have updated *info via PTRACE_SETSIGINFO.
2115	 */
2116	if (signr != info->si_signo) {
 
2117		info->si_signo = signr;
2118		info->si_errno = 0;
2119		info->si_code = SI_USER;
2120		rcu_read_lock();
2121		info->si_pid = task_pid_vnr(current->parent);
2122		info->si_uid = from_kuid_munged(current_user_ns(),
2123						task_uid(current->parent));
2124		rcu_read_unlock();
2125	}
2126
2127	/* If the (new) signal is now blocked, requeue it.  */
2128	if (sigismember(&current->blocked, signr)) {
2129		specific_send_sig_info(signr, info, current);
2130		signr = 0;
2131	}
2132
2133	return signr;
2134}
2135
2136int get_signal(struct ksignal *ksig)
2137{
2138	struct sighand_struct *sighand = current->sighand;
2139	struct signal_struct *signal = current->signal;
2140	int signr;
2141
2142	if (unlikely(current->task_works))
2143		task_work_run();
2144
2145	if (unlikely(uprobe_deny_signal()))
2146		return 0;
2147
2148	/*
2149	 * Do this once, we can't return to user-mode if freezing() == T.
2150	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2151	 * thus do not need another check after return.
2152	 */
2153	try_to_freeze();
2154
2155relock:
2156	spin_lock_irq(&sighand->siglock);
2157	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2158	 * Every stopped thread goes here after wakeup. Check to see if
2159	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2160	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2161	 */
2162	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2163		int why;
2164
2165		if (signal->flags & SIGNAL_CLD_CONTINUED)
2166			why = CLD_CONTINUED;
2167		else
2168			why = CLD_STOPPED;
2169
2170		signal->flags &= ~SIGNAL_CLD_MASK;
2171
2172		spin_unlock_irq(&sighand->siglock);
2173
2174		/*
2175		 * Notify the parent that we're continuing.  This event is
2176		 * always per-process and doesn't make whole lot of sense
2177		 * for ptracers, who shouldn't consume the state via
2178		 * wait(2) either, but, for backward compatibility, notify
2179		 * the ptracer of the group leader too unless it's gonna be
2180		 * a duplicate.
2181		 */
2182		read_lock(&tasklist_lock);
2183		do_notify_parent_cldstop(current, false, why);
2184
2185		if (ptrace_reparented(current->group_leader))
2186			do_notify_parent_cldstop(current->group_leader,
2187						true, why);
2188		read_unlock(&tasklist_lock);
2189
2190		goto relock;
2191	}
2192
 
 
 
 
 
 
 
 
 
 
2193	for (;;) {
2194		struct k_sigaction *ka;
2195
2196		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2197		    do_signal_stop(0))
2198			goto relock;
2199
2200		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2201			do_jobctl_trap();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2202			spin_unlock_irq(&sighand->siglock);
 
2203			goto relock;
2204		}
2205
2206		signr = dequeue_signal(current, &current->blocked, &ksig->info);
 
 
 
 
 
 
 
 
2207
2208		if (!signr)
2209			break; /* will return 0 */
2210
2211		if (unlikely(current->ptrace) && signr != SIGKILL) {
2212			signr = ptrace_signal(signr, &ksig->info);
2213			if (!signr)
2214				continue;
2215		}
2216
2217		ka = &sighand->action[signr-1];
2218
2219		/* Trace actually delivered signals. */
2220		trace_signal_deliver(signr, &ksig->info, ka);
2221
2222		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2223			continue;
2224		if (ka->sa.sa_handler != SIG_DFL) {
2225			/* Run the handler.  */
2226			ksig->ka = *ka;
2227
2228			if (ka->sa.sa_flags & SA_ONESHOT)
2229				ka->sa.sa_handler = SIG_DFL;
2230
2231			break; /* will return non-zero "signr" value */
2232		}
2233
2234		/*
2235		 * Now we are doing the default action for this signal.
2236		 */
2237		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2238			continue;
2239
2240		/*
2241		 * Global init gets no signals it doesn't want.
2242		 * Container-init gets no signals it doesn't want from same
2243		 * container.
2244		 *
2245		 * Note that if global/container-init sees a sig_kernel_only()
2246		 * signal here, the signal must have been generated internally
2247		 * or must have come from an ancestor namespace. In either
2248		 * case, the signal cannot be dropped.
2249		 */
2250		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2251				!sig_kernel_only(signr))
2252			continue;
2253
2254		if (sig_kernel_stop(signr)) {
2255			/*
2256			 * The default action is to stop all threads in
2257			 * the thread group.  The job control signals
2258			 * do nothing in an orphaned pgrp, but SIGSTOP
2259			 * always works.  Note that siglock needs to be
2260			 * dropped during the call to is_orphaned_pgrp()
2261			 * because of lock ordering with tasklist_lock.
2262			 * This allows an intervening SIGCONT to be posted.
2263			 * We need to check for that and bail out if necessary.
2264			 */
2265			if (signr != SIGSTOP) {
2266				spin_unlock_irq(&sighand->siglock);
2267
2268				/* signals can be posted during this window */
2269
2270				if (is_current_pgrp_orphaned())
2271					goto relock;
2272
2273				spin_lock_irq(&sighand->siglock);
2274			}
2275
2276			if (likely(do_signal_stop(ksig->info.si_signo))) {
2277				/* It released the siglock.  */
2278				goto relock;
2279			}
2280
2281			/*
2282			 * We didn't actually stop, due to a race
2283			 * with SIGCONT or something like that.
2284			 */
2285			continue;
2286		}
2287
 
2288		spin_unlock_irq(&sighand->siglock);
 
 
2289
2290		/*
2291		 * Anything else is fatal, maybe with a core dump.
2292		 */
2293		current->flags |= PF_SIGNALED;
2294
2295		if (sig_kernel_coredump(signr)) {
2296			if (print_fatal_signals)
2297				print_fatal_signal(ksig->info.si_signo);
2298			proc_coredump_connector(current);
2299			/*
2300			 * If it was able to dump core, this kills all
2301			 * other threads in the group and synchronizes with
2302			 * their demise.  If we lost the race with another
2303			 * thread getting here, it set group_exit_code
2304			 * first and our do_group_exit call below will use
2305			 * that value and ignore the one we pass it.
2306			 */
2307			do_coredump(&ksig->info);
2308		}
2309
2310		/*
2311		 * Death signals, no core dump.
2312		 */
2313		do_group_exit(ksig->info.si_signo);
2314		/* NOTREACHED */
2315	}
2316	spin_unlock_irq(&sighand->siglock);
2317
2318	ksig->sig = signr;
2319	return ksig->sig > 0;
2320}
2321
2322/**
2323 * signal_delivered - 
2324 * @ksig:		kernel signal struct
2325 * @stepping:		nonzero if debugger single-step or block-step in use
2326 *
2327 * This function should be called when a signal has successfully been
2328 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2329 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2330 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2331 */
2332static void signal_delivered(struct ksignal *ksig, int stepping)
2333{
2334	sigset_t blocked;
2335
2336	/* A signal was successfully delivered, and the
2337	   saved sigmask was stored on the signal frame,
2338	   and will be restored by sigreturn.  So we can
2339	   simply clear the restore sigmask flag.  */
2340	clear_restore_sigmask();
2341
2342	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2343	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2344		sigaddset(&blocked, ksig->sig);
2345	set_current_blocked(&blocked);
2346	tracehook_signal_handler(stepping);
2347}
2348
2349void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2350{
2351	if (failed)
2352		force_sigsegv(ksig->sig, current);
2353	else
2354		signal_delivered(ksig, stepping);
2355}
2356
2357/*
2358 * It could be that complete_signal() picked us to notify about the
2359 * group-wide signal. Other threads should be notified now to take
2360 * the shared signals in @which since we will not.
2361 */
2362static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2363{
2364	sigset_t retarget;
2365	struct task_struct *t;
2366
2367	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2368	if (sigisemptyset(&retarget))
2369		return;
2370
2371	t = tsk;
2372	while_each_thread(tsk, t) {
2373		if (t->flags & PF_EXITING)
2374			continue;
2375
2376		if (!has_pending_signals(&retarget, &t->blocked))
2377			continue;
2378		/* Remove the signals this thread can handle. */
2379		sigandsets(&retarget, &retarget, &t->blocked);
2380
2381		if (!signal_pending(t))
2382			signal_wake_up(t, 0);
2383
2384		if (sigisemptyset(&retarget))
2385			break;
2386	}
2387}
2388
2389void exit_signals(struct task_struct *tsk)
2390{
2391	int group_stop = 0;
2392	sigset_t unblocked;
2393
2394	/*
2395	 * @tsk is about to have PF_EXITING set - lock out users which
2396	 * expect stable threadgroup.
2397	 */
2398	threadgroup_change_begin(tsk);
2399
2400	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2401		tsk->flags |= PF_EXITING;
2402		threadgroup_change_end(tsk);
2403		return;
2404	}
2405
2406	spin_lock_irq(&tsk->sighand->siglock);
2407	/*
2408	 * From now this task is not visible for group-wide signals,
2409	 * see wants_signal(), do_signal_stop().
2410	 */
2411	tsk->flags |= PF_EXITING;
2412
2413	threadgroup_change_end(tsk);
2414
2415	if (!signal_pending(tsk))
2416		goto out;
2417
2418	unblocked = tsk->blocked;
2419	signotset(&unblocked);
2420	retarget_shared_pending(tsk, &unblocked);
2421
2422	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2423	    task_participate_group_stop(tsk))
2424		group_stop = CLD_STOPPED;
2425out:
2426	spin_unlock_irq(&tsk->sighand->siglock);
2427
2428	/*
2429	 * If group stop has completed, deliver the notification.  This
2430	 * should always go to the real parent of the group leader.
2431	 */
2432	if (unlikely(group_stop)) {
2433		read_lock(&tasklist_lock);
2434		do_notify_parent_cldstop(tsk, false, group_stop);
2435		read_unlock(&tasklist_lock);
2436	}
2437}
2438
2439EXPORT_SYMBOL(recalc_sigpending);
2440EXPORT_SYMBOL_GPL(dequeue_signal);
2441EXPORT_SYMBOL(flush_signals);
2442EXPORT_SYMBOL(force_sig);
2443EXPORT_SYMBOL(send_sig);
2444EXPORT_SYMBOL(send_sig_info);
2445EXPORT_SYMBOL(sigprocmask);
2446
2447/*
2448 * System call entry points.
2449 */
2450
2451/**
2452 *  sys_restart_syscall - restart a system call
2453 */
2454SYSCALL_DEFINE0(restart_syscall)
2455{
2456	struct restart_block *restart = &current->restart_block;
2457	return restart->fn(restart);
2458}
2459
2460long do_no_restart_syscall(struct restart_block *param)
2461{
2462	return -EINTR;
2463}
2464
2465static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2466{
2467	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2468		sigset_t newblocked;
2469		/* A set of now blocked but previously unblocked signals. */
2470		sigandnsets(&newblocked, newset, &current->blocked);
2471		retarget_shared_pending(tsk, &newblocked);
2472	}
2473	tsk->blocked = *newset;
2474	recalc_sigpending();
2475}
2476
2477/**
2478 * set_current_blocked - change current->blocked mask
2479 * @newset: new mask
2480 *
2481 * It is wrong to change ->blocked directly, this helper should be used
2482 * to ensure the process can't miss a shared signal we are going to block.
2483 */
2484void set_current_blocked(sigset_t *newset)
2485{
2486	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2487	__set_current_blocked(newset);
2488}
2489
2490void __set_current_blocked(const sigset_t *newset)
2491{
2492	struct task_struct *tsk = current;
2493
2494	/*
2495	 * In case the signal mask hasn't changed, there is nothing we need
2496	 * to do. The current->blocked shouldn't be modified by other task.
2497	 */
2498	if (sigequalsets(&tsk->blocked, newset))
2499		return;
2500
2501	spin_lock_irq(&tsk->sighand->siglock);
2502	__set_task_blocked(tsk, newset);
2503	spin_unlock_irq(&tsk->sighand->siglock);
2504}
2505
2506/*
2507 * This is also useful for kernel threads that want to temporarily
2508 * (or permanently) block certain signals.
2509 *
2510 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2511 * interface happily blocks "unblockable" signals like SIGKILL
2512 * and friends.
2513 */
2514int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2515{
2516	struct task_struct *tsk = current;
2517	sigset_t newset;
2518
2519	/* Lockless, only current can change ->blocked, never from irq */
2520	if (oldset)
2521		*oldset = tsk->blocked;
2522
2523	switch (how) {
2524	case SIG_BLOCK:
2525		sigorsets(&newset, &tsk->blocked, set);
2526		break;
2527	case SIG_UNBLOCK:
2528		sigandnsets(&newset, &tsk->blocked, set);
2529		break;
2530	case SIG_SETMASK:
2531		newset = *set;
2532		break;
2533	default:
2534		return -EINVAL;
2535	}
2536
2537	__set_current_blocked(&newset);
2538	return 0;
2539}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2540
2541/**
2542 *  sys_rt_sigprocmask - change the list of currently blocked signals
2543 *  @how: whether to add, remove, or set signals
2544 *  @nset: stores pending signals
2545 *  @oset: previous value of signal mask if non-null
2546 *  @sigsetsize: size of sigset_t type
2547 */
2548SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2549		sigset_t __user *, oset, size_t, sigsetsize)
2550{
2551	sigset_t old_set, new_set;
2552	int error;
2553
2554	/* XXX: Don't preclude handling different sized sigset_t's.  */
2555	if (sigsetsize != sizeof(sigset_t))
2556		return -EINVAL;
2557
2558	old_set = current->blocked;
2559
2560	if (nset) {
2561		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2562			return -EFAULT;
2563		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2564
2565		error = sigprocmask(how, &new_set, NULL);
2566		if (error)
2567			return error;
2568	}
2569
2570	if (oset) {
2571		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2572			return -EFAULT;
2573	}
2574
2575	return 0;
2576}
2577
2578#ifdef CONFIG_COMPAT
2579COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2580		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2581{
2582#ifdef __BIG_ENDIAN
2583	sigset_t old_set = current->blocked;
2584
2585	/* XXX: Don't preclude handling different sized sigset_t's.  */
2586	if (sigsetsize != sizeof(sigset_t))
2587		return -EINVAL;
2588
2589	if (nset) {
2590		compat_sigset_t new32;
2591		sigset_t new_set;
2592		int error;
2593		if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2594			return -EFAULT;
2595
2596		sigset_from_compat(&new_set, &new32);
2597		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2598
2599		error = sigprocmask(how, &new_set, NULL);
2600		if (error)
2601			return error;
2602	}
2603	if (oset) {
2604		compat_sigset_t old32;
2605		sigset_to_compat(&old32, &old_set);
2606		if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2607			return -EFAULT;
2608	}
2609	return 0;
2610#else
2611	return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2612				  (sigset_t __user *)oset, sigsetsize);
2613#endif
2614}
2615#endif
2616
2617static int do_sigpending(void *set, unsigned long sigsetsize)
2618{
2619	if (sigsetsize > sizeof(sigset_t))
2620		return -EINVAL;
2621
2622	spin_lock_irq(&current->sighand->siglock);
2623	sigorsets(set, &current->pending.signal,
2624		  &current->signal->shared_pending.signal);
2625	spin_unlock_irq(&current->sighand->siglock);
2626
2627	/* Outside the lock because only this thread touches it.  */
2628	sigandsets(set, &current->blocked, set);
2629	return 0;
2630}
2631
2632/**
2633 *  sys_rt_sigpending - examine a pending signal that has been raised
2634 *			while blocked
2635 *  @uset: stores pending signals
2636 *  @sigsetsize: size of sigset_t type or larger
2637 */
2638SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2639{
2640	sigset_t set;
2641	int err = do_sigpending(&set, sigsetsize);
2642	if (!err && copy_to_user(uset, &set, sigsetsize))
2643		err = -EFAULT;
2644	return err;
 
 
 
 
 
 
2645}
2646
2647#ifdef CONFIG_COMPAT
2648COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2649		compat_size_t, sigsetsize)
2650{
2651#ifdef __BIG_ENDIAN
2652	sigset_t set;
2653	int err = do_sigpending(&set, sigsetsize);
2654	if (!err) {
2655		compat_sigset_t set32;
2656		sigset_to_compat(&set32, &set);
2657		/* we can get here only if sigsetsize <= sizeof(set) */
2658		if (copy_to_user(uset, &set32, sigsetsize))
2659			err = -EFAULT;
2660	}
2661	return err;
2662#else
2663	return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
 
 
 
 
 
 
 
 
 
2664#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2665}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2666#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
2667
2668#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
 
 
 
2669
2670int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2671{
2672	int err;
 
 
 
 
 
 
2673
2674	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2675		return -EFAULT;
2676	if (from->si_code < 0)
2677		return __copy_to_user(to, from, sizeof(siginfo_t))
2678			? -EFAULT : 0;
2679	/*
2680	 * If you change siginfo_t structure, please be sure
2681	 * this code is fixed accordingly.
2682	 * Please remember to update the signalfd_copyinfo() function
2683	 * inside fs/signalfd.c too, in case siginfo_t changes.
2684	 * It should never copy any pad contained in the structure
2685	 * to avoid security leaks, but must copy the generic
2686	 * 3 ints plus the relevant union member.
2687	 */
2688	err = __put_user(from->si_signo, &to->si_signo);
2689	err |= __put_user(from->si_errno, &to->si_errno);
2690	err |= __put_user((short)from->si_code, &to->si_code);
2691	switch (from->si_code & __SI_MASK) {
2692	case __SI_KILL:
2693		err |= __put_user(from->si_pid, &to->si_pid);
2694		err |= __put_user(from->si_uid, &to->si_uid);
2695		break;
2696	case __SI_TIMER:
2697		 err |= __put_user(from->si_tid, &to->si_tid);
2698		 err |= __put_user(from->si_overrun, &to->si_overrun);
2699		 err |= __put_user(from->si_ptr, &to->si_ptr);
2700		break;
2701	case __SI_POLL:
2702		err |= __put_user(from->si_band, &to->si_band);
2703		err |= __put_user(from->si_fd, &to->si_fd);
2704		break;
2705	case __SI_FAULT:
2706		err |= __put_user(from->si_addr, &to->si_addr);
 
 
 
 
 
 
 
 
 
2707#ifdef __ARCH_SI_TRAPNO
2708		err |= __put_user(from->si_trapno, &to->si_trapno);
2709#endif
2710#ifdef BUS_MCEERR_AO
2711		/*
2712		 * Other callers might not initialize the si_lsb field,
2713		 * so check explicitly for the right codes here.
2714		 */
2715		if (from->si_signo == SIGBUS &&
2716		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2717			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2718#endif
2719#ifdef SEGV_BNDERR
2720		if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2721			err |= __put_user(from->si_lower, &to->si_lower);
2722			err |= __put_user(from->si_upper, &to->si_upper);
2723		}
 
2724#endif
2725#ifdef SEGV_PKUERR
2726		if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2727			err |= __put_user(from->si_pkey, &to->si_pkey);
 
 
 
 
2728#endif
 
 
 
 
 
 
 
 
2729		break;
2730	case __SI_CHLD:
2731		err |= __put_user(from->si_pid, &to->si_pid);
2732		err |= __put_user(from->si_uid, &to->si_uid);
2733		err |= __put_user(from->si_status, &to->si_status);
2734		err |= __put_user(from->si_utime, &to->si_utime);
2735		err |= __put_user(from->si_stime, &to->si_stime);
2736		break;
2737	case __SI_RT: /* This is not generated by the kernel as of now. */
2738	case __SI_MESGQ: /* But this is */
2739		err |= __put_user(from->si_pid, &to->si_pid);
2740		err |= __put_user(from->si_uid, &to->si_uid);
2741		err |= __put_user(from->si_ptr, &to->si_ptr);
2742		break;
2743#ifdef __ARCH_SIGSYS
2744	case __SI_SYS:
2745		err |= __put_user(from->si_call_addr, &to->si_call_addr);
2746		err |= __put_user(from->si_syscall, &to->si_syscall);
2747		err |= __put_user(from->si_arch, &to->si_arch);
2748		break;
2749#endif
2750	default: /* this is just in case for now ... */
2751		err |= __put_user(from->si_pid, &to->si_pid);
2752		err |= __put_user(from->si_uid, &to->si_uid);
2753		break;
2754	}
2755	return err;
2756}
2757
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2758#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2759
2760/**
2761 *  do_sigtimedwait - wait for queued signals specified in @which
2762 *  @which: queued signals to wait for
2763 *  @info: if non-null, the signal's siginfo is returned here
2764 *  @ts: upper bound on process time suspension
2765 */
2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2767		    const struct timespec *ts)
2768{
2769	ktime_t *to = NULL, timeout = KTIME_MAX;
2770	struct task_struct *tsk = current;
2771	sigset_t mask = *which;
2772	int sig, ret = 0;
2773
2774	if (ts) {
2775		if (!timespec_valid(ts))
2776			return -EINVAL;
2777		timeout = timespec_to_ktime(*ts);
2778		to = &timeout;
2779	}
2780
2781	/*
2782	 * Invert the set of allowed signals to get those we want to block.
2783	 */
2784	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2785	signotset(&mask);
2786
2787	spin_lock_irq(&tsk->sighand->siglock);
2788	sig = dequeue_signal(tsk, &mask, info);
2789	if (!sig && timeout) {
2790		/*
2791		 * None ready, temporarily unblock those we're interested
2792		 * while we are sleeping in so that we'll be awakened when
2793		 * they arrive. Unblocking is always fine, we can avoid
2794		 * set_current_blocked().
2795		 */
2796		tsk->real_blocked = tsk->blocked;
2797		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2798		recalc_sigpending();
2799		spin_unlock_irq(&tsk->sighand->siglock);
2800
2801		__set_current_state(TASK_INTERRUPTIBLE);
2802		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2803							 HRTIMER_MODE_REL);
2804		spin_lock_irq(&tsk->sighand->siglock);
2805		__set_task_blocked(tsk, &tsk->real_blocked);
2806		sigemptyset(&tsk->real_blocked);
2807		sig = dequeue_signal(tsk, &mask, info);
2808	}
2809	spin_unlock_irq(&tsk->sighand->siglock);
2810
2811	if (sig)
2812		return sig;
2813	return ret ? -EINTR : -EAGAIN;
2814}
2815
2816/**
2817 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2818 *			in @uthese
2819 *  @uthese: queued signals to wait for
2820 *  @uinfo: if non-null, the signal's siginfo is returned here
2821 *  @uts: upper bound on process time suspension
2822 *  @sigsetsize: size of sigset_t type
2823 */
2824SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2825		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
 
2826		size_t, sigsetsize)
2827{
2828	sigset_t these;
2829	struct timespec ts;
2830	siginfo_t info;
2831	int ret;
2832
2833	/* XXX: Don't preclude handling different sized sigset_t's.  */
2834	if (sigsetsize != sizeof(sigset_t))
2835		return -EINVAL;
2836
2837	if (copy_from_user(&these, uthese, sizeof(these)))
2838		return -EFAULT;
2839
2840	if (uts) {
2841		if (copy_from_user(&ts, uts, sizeof(ts)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2842			return -EFAULT;
2843	}
2844
2845	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2846
2847	if (ret > 0 && uinfo) {
2848		if (copy_siginfo_to_user(uinfo, &info))
2849			ret = -EFAULT;
2850	}
2851
2852	return ret;
2853}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2854
2855/**
2856 *  sys_kill - send a signal to a process
2857 *  @pid: the PID of the process
2858 *  @sig: signal to be sent
2859 */
2860SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2861{
2862	struct siginfo info;
2863
2864	info.si_signo = sig;
2865	info.si_errno = 0;
2866	info.si_code = SI_USER;
2867	info.si_pid = task_tgid_vnr(current);
2868	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2869
2870	return kill_something_info(sig, &info, pid);
2871}
2872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2873static int
2874do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2875{
2876	struct task_struct *p;
2877	int error = -ESRCH;
2878
2879	rcu_read_lock();
2880	p = find_task_by_vpid(pid);
2881	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2882		error = check_kill_permission(sig, info, p);
2883		/*
2884		 * The null signal is a permissions and process existence
2885		 * probe.  No signal is actually delivered.
2886		 */
2887		if (!error && sig) {
2888			error = do_send_sig_info(sig, info, p, false);
2889			/*
2890			 * If lock_task_sighand() failed we pretend the task
2891			 * dies after receiving the signal. The window is tiny,
2892			 * and the signal is private anyway.
2893			 */
2894			if (unlikely(error == -ESRCH))
2895				error = 0;
2896		}
2897	}
2898	rcu_read_unlock();
2899
2900	return error;
2901}
2902
2903static int do_tkill(pid_t tgid, pid_t pid, int sig)
2904{
2905	struct siginfo info = {};
2906
 
2907	info.si_signo = sig;
2908	info.si_errno = 0;
2909	info.si_code = SI_TKILL;
2910	info.si_pid = task_tgid_vnr(current);
2911	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2912
2913	return do_send_specific(tgid, pid, sig, &info);
2914}
2915
2916/**
2917 *  sys_tgkill - send signal to one specific thread
2918 *  @tgid: the thread group ID of the thread
2919 *  @pid: the PID of the thread
2920 *  @sig: signal to be sent
2921 *
2922 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2923 *  exists but it's not belonging to the target process anymore. This
2924 *  method solves the problem of threads exiting and PIDs getting reused.
2925 */
2926SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2927{
2928	/* This is only valid for single tasks */
2929	if (pid <= 0 || tgid <= 0)
2930		return -EINVAL;
2931
2932	return do_tkill(tgid, pid, sig);
2933}
2934
2935/**
2936 *  sys_tkill - send signal to one specific task
2937 *  @pid: the PID of the task
2938 *  @sig: signal to be sent
2939 *
2940 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2941 */
2942SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2943{
2944	/* This is only valid for single tasks */
2945	if (pid <= 0)
2946		return -EINVAL;
2947
2948	return do_tkill(0, pid, sig);
2949}
2950
2951static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2952{
2953	/* Not even root can pretend to send signals from the kernel.
2954	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2955	 */
2956	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2957	    (task_pid_vnr(current) != pid))
2958		return -EPERM;
2959
2960	info->si_signo = sig;
2961
2962	/* POSIX.1b doesn't mention process groups.  */
2963	return kill_proc_info(sig, info, pid);
2964}
2965
2966/**
2967 *  sys_rt_sigqueueinfo - send signal information to a signal
2968 *  @pid: the PID of the thread
2969 *  @sig: signal to be sent
2970 *  @uinfo: signal info to be sent
2971 */
2972SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2973		siginfo_t __user *, uinfo)
2974{
2975	siginfo_t info;
2976	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2977		return -EFAULT;
 
2978	return do_rt_sigqueueinfo(pid, sig, &info);
2979}
2980
2981#ifdef CONFIG_COMPAT
2982COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
2983			compat_pid_t, pid,
2984			int, sig,
2985			struct compat_siginfo __user *, uinfo)
2986{
2987	siginfo_t info = {};
2988	int ret = copy_siginfo_from_user32(&info, uinfo);
2989	if (unlikely(ret))
2990		return ret;
2991	return do_rt_sigqueueinfo(pid, sig, &info);
2992}
2993#endif
2994
2995static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2996{
2997	/* This is only valid for single tasks */
2998	if (pid <= 0 || tgid <= 0)
2999		return -EINVAL;
3000
3001	/* Not even root can pretend to send signals from the kernel.
3002	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3003	 */
3004	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3005	    (task_pid_vnr(current) != pid))
3006		return -EPERM;
3007
3008	info->si_signo = sig;
3009
3010	return do_send_specific(tgid, pid, sig, info);
3011}
3012
3013SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3014		siginfo_t __user *, uinfo)
3015{
3016	siginfo_t info;
3017
3018	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3019		return -EFAULT;
3020
3021	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3022}
3023
3024#ifdef CONFIG_COMPAT
3025COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3026			compat_pid_t, tgid,
3027			compat_pid_t, pid,
3028			int, sig,
3029			struct compat_siginfo __user *, uinfo)
3030{
3031	siginfo_t info = {};
3032
3033	if (copy_siginfo_from_user32(&info, uinfo))
3034		return -EFAULT;
3035	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3036}
3037#endif
3038
3039/*
3040 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3041 */
3042void kernel_sigaction(int sig, __sighandler_t action)
3043{
3044	spin_lock_irq(&current->sighand->siglock);
3045	current->sighand->action[sig - 1].sa.sa_handler = action;
3046	if (action == SIG_IGN) {
3047		sigset_t mask;
3048
3049		sigemptyset(&mask);
3050		sigaddset(&mask, sig);
3051
3052		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3053		flush_sigqueue_mask(&mask, &current->pending);
3054		recalc_sigpending();
3055	}
3056	spin_unlock_irq(&current->sighand->siglock);
3057}
3058EXPORT_SYMBOL(kernel_sigaction);
3059
3060void __weak sigaction_compat_abi(struct k_sigaction *act,
3061		struct k_sigaction *oact)
3062{
3063}
3064
3065int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3066{
3067	struct task_struct *p = current, *t;
3068	struct k_sigaction *k;
3069	sigset_t mask;
3070
3071	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3072		return -EINVAL;
3073
3074	k = &p->sighand->action[sig-1];
3075
3076	spin_lock_irq(&p->sighand->siglock);
3077	if (oact)
3078		*oact = *k;
3079
3080	sigaction_compat_abi(act, oact);
3081
3082	if (act) {
3083		sigdelsetmask(&act->sa.sa_mask,
3084			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3085		*k = *act;
3086		/*
3087		 * POSIX 3.3.1.3:
3088		 *  "Setting a signal action to SIG_IGN for a signal that is
3089		 *   pending shall cause the pending signal to be discarded,
3090		 *   whether or not it is blocked."
3091		 *
3092		 *  "Setting a signal action to SIG_DFL for a signal that is
3093		 *   pending and whose default action is to ignore the signal
3094		 *   (for example, SIGCHLD), shall cause the pending signal to
3095		 *   be discarded, whether or not it is blocked"
3096		 */
3097		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3098			sigemptyset(&mask);
3099			sigaddset(&mask, sig);
3100			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3101			for_each_thread(p, t)
3102				flush_sigqueue_mask(&mask, &t->pending);
3103		}
3104	}
3105
3106	spin_unlock_irq(&p->sighand->siglock);
3107	return 0;
3108}
3109
3110static int
3111do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
 
3112{
3113	stack_t oss;
3114	int error;
3115
3116	oss.ss_sp = (void __user *) current->sas_ss_sp;
3117	oss.ss_size = current->sas_ss_size;
3118	oss.ss_flags = sas_ss_flags(sp) |
3119		(current->sas_ss_flags & SS_FLAG_BITS);
3120
3121	if (uss) {
3122		void __user *ss_sp;
3123		size_t ss_size;
3124		unsigned ss_flags;
3125		int ss_mode;
3126
3127		error = -EFAULT;
3128		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3129			goto out;
3130		error = __get_user(ss_sp, &uss->ss_sp) |
3131			__get_user(ss_flags, &uss->ss_flags) |
3132			__get_user(ss_size, &uss->ss_size);
3133		if (error)
3134			goto out;
3135
3136		error = -EPERM;
3137		if (on_sig_stack(sp))
3138			goto out;
3139
3140		ss_mode = ss_flags & ~SS_FLAG_BITS;
3141		error = -EINVAL;
3142		if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3143				ss_mode != 0)
3144			goto out;
3145
3146		if (ss_mode == SS_DISABLE) {
3147			ss_size = 0;
3148			ss_sp = NULL;
3149		} else {
3150			error = -ENOMEM;
3151			if (ss_size < MINSIGSTKSZ)
3152				goto out;
3153		}
3154
3155		current->sas_ss_sp = (unsigned long) ss_sp;
3156		current->sas_ss_size = ss_size;
3157		current->sas_ss_flags = ss_flags;
3158	}
3159
3160	error = 0;
3161	if (uoss) {
3162		error = -EFAULT;
3163		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3164			goto out;
3165		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3166			__put_user(oss.ss_size, &uoss->ss_size) |
3167			__put_user(oss.ss_flags, &uoss->ss_flags);
3168	}
3169
3170out:
3171	return error;
 
 
 
3172}
 
3173SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3174{
3175	return do_sigaltstack(uss, uoss, current_user_stack_pointer());
 
 
 
 
 
 
 
 
 
3176}
3177
3178int restore_altstack(const stack_t __user *uss)
3179{
3180	int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
 
 
 
 
3181	/* squash all but EFAULT for now */
3182	return err == -EFAULT ? err : 0;
3183}
3184
3185int __save_altstack(stack_t __user *uss, unsigned long sp)
3186{
3187	struct task_struct *t = current;
3188	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3189		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3190		__put_user(t->sas_ss_size, &uss->ss_size);
3191	if (err)
3192		return err;
3193	if (t->sas_ss_flags & SS_AUTODISARM)
3194		sas_ss_reset(t);
3195	return 0;
3196}
3197
3198#ifdef CONFIG_COMPAT
3199COMPAT_SYSCALL_DEFINE2(sigaltstack,
3200			const compat_stack_t __user *, uss_ptr,
3201			compat_stack_t __user *, uoss_ptr)
3202{
3203	stack_t uss, uoss;
3204	int ret;
3205	mm_segment_t seg;
3206
3207	if (uss_ptr) {
3208		compat_stack_t uss32;
3209
3210		memset(&uss, 0, sizeof(stack_t));
3211		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3212			return -EFAULT;
3213		uss.ss_sp = compat_ptr(uss32.ss_sp);
3214		uss.ss_flags = uss32.ss_flags;
3215		uss.ss_size = uss32.ss_size;
3216	}
3217	seg = get_fs();
3218	set_fs(KERNEL_DS);
3219	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3220			     (stack_t __force __user *) &uoss,
3221			     compat_user_stack_pointer());
3222	set_fs(seg);
3223	if (ret >= 0 && uoss_ptr)  {
3224		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3225		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3226		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3227		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
 
 
3228			ret = -EFAULT;
3229	}
3230	return ret;
3231}
3232
 
 
 
 
 
 
 
3233int compat_restore_altstack(const compat_stack_t __user *uss)
3234{
3235	int err = compat_sys_sigaltstack(uss, NULL);
3236	/* squash all but -EFAULT for now */
3237	return err == -EFAULT ? err : 0;
3238}
3239
3240int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3241{
3242	int err;
3243	struct task_struct *t = current;
3244	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3245			 &uss->ss_sp) |
3246		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3247		__put_user(t->sas_ss_size, &uss->ss_size);
3248	if (err)
3249		return err;
3250	if (t->sas_ss_flags & SS_AUTODISARM)
3251		sas_ss_reset(t);
3252	return 0;
3253}
3254#endif
3255
3256#ifdef __ARCH_WANT_SYS_SIGPENDING
3257
3258/**
3259 *  sys_sigpending - examine pending signals
3260 *  @set: where mask of pending signal is returned
3261 */
3262SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3263{
3264	return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); 
 
 
 
 
3265}
 
3266
3267#endif
3268
3269#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3270/**
3271 *  sys_sigprocmask - examine and change blocked signals
3272 *  @how: whether to add, remove, or set signals
3273 *  @nset: signals to add or remove (if non-null)
3274 *  @oset: previous value of signal mask if non-null
3275 *
3276 * Some platforms have their own version with special arguments;
3277 * others support only sys_rt_sigprocmask.
3278 */
3279
3280SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3281		old_sigset_t __user *, oset)
3282{
3283	old_sigset_t old_set, new_set;
3284	sigset_t new_blocked;
3285
3286	old_set = current->blocked.sig[0];
3287
3288	if (nset) {
3289		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3290			return -EFAULT;
3291
3292		new_blocked = current->blocked;
3293
3294		switch (how) {
3295		case SIG_BLOCK:
3296			sigaddsetmask(&new_blocked, new_set);
3297			break;
3298		case SIG_UNBLOCK:
3299			sigdelsetmask(&new_blocked, new_set);
3300			break;
3301		case SIG_SETMASK:
3302			new_blocked.sig[0] = new_set;
3303			break;
3304		default:
3305			return -EINVAL;
3306		}
3307
3308		set_current_blocked(&new_blocked);
3309	}
3310
3311	if (oset) {
3312		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3313			return -EFAULT;
3314	}
3315
3316	return 0;
3317}
3318#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3319
3320#ifndef CONFIG_ODD_RT_SIGACTION
3321/**
3322 *  sys_rt_sigaction - alter an action taken by a process
3323 *  @sig: signal to be sent
3324 *  @act: new sigaction
3325 *  @oact: used to save the previous sigaction
3326 *  @sigsetsize: size of sigset_t type
3327 */
3328SYSCALL_DEFINE4(rt_sigaction, int, sig,
3329		const struct sigaction __user *, act,
3330		struct sigaction __user *, oact,
3331		size_t, sigsetsize)
3332{
3333	struct k_sigaction new_sa, old_sa;
3334	int ret = -EINVAL;
3335
3336	/* XXX: Don't preclude handling different sized sigset_t's.  */
3337	if (sigsetsize != sizeof(sigset_t))
3338		goto out;
3339
3340	if (act) {
3341		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3342			return -EFAULT;
3343	}
3344
3345	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
 
 
3346
3347	if (!ret && oact) {
3348		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3349			return -EFAULT;
3350	}
3351out:
3352	return ret;
3353}
3354#ifdef CONFIG_COMPAT
3355COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3356		const struct compat_sigaction __user *, act,
3357		struct compat_sigaction __user *, oact,
3358		compat_size_t, sigsetsize)
3359{
3360	struct k_sigaction new_ka, old_ka;
3361	compat_sigset_t mask;
3362#ifdef __ARCH_HAS_SA_RESTORER
3363	compat_uptr_t restorer;
3364#endif
3365	int ret;
3366
3367	/* XXX: Don't preclude handling different sized sigset_t's.  */
3368	if (sigsetsize != sizeof(compat_sigset_t))
3369		return -EINVAL;
3370
3371	if (act) {
3372		compat_uptr_t handler;
3373		ret = get_user(handler, &act->sa_handler);
3374		new_ka.sa.sa_handler = compat_ptr(handler);
3375#ifdef __ARCH_HAS_SA_RESTORER
3376		ret |= get_user(restorer, &act->sa_restorer);
3377		new_ka.sa.sa_restorer = compat_ptr(restorer);
3378#endif
3379		ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3380		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3381		if (ret)
3382			return -EFAULT;
3383		sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3384	}
3385
3386	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3387	if (!ret && oact) {
3388		sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3389		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
3390			       &oact->sa_handler);
3391		ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
 
3392		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3393#ifdef __ARCH_HAS_SA_RESTORER
3394		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3395				&oact->sa_restorer);
3396#endif
3397	}
3398	return ret;
3399}
3400#endif
3401#endif /* !CONFIG_ODD_RT_SIGACTION */
3402
3403#ifdef CONFIG_OLD_SIGACTION
3404SYSCALL_DEFINE3(sigaction, int, sig,
3405		const struct old_sigaction __user *, act,
3406	        struct old_sigaction __user *, oact)
3407{
3408	struct k_sigaction new_ka, old_ka;
3409	int ret;
3410
3411	if (act) {
3412		old_sigset_t mask;
3413		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3414		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3415		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3416		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3417		    __get_user(mask, &act->sa_mask))
3418			return -EFAULT;
3419#ifdef __ARCH_HAS_KA_RESTORER
3420		new_ka.ka_restorer = NULL;
3421#endif
3422		siginitset(&new_ka.sa.sa_mask, mask);
3423	}
3424
3425	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3426
3427	if (!ret && oact) {
3428		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3429		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3430		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3431		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3432		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3433			return -EFAULT;
3434	}
3435
3436	return ret;
3437}
3438#endif
3439#ifdef CONFIG_COMPAT_OLD_SIGACTION
3440COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3441		const struct compat_old_sigaction __user *, act,
3442	        struct compat_old_sigaction __user *, oact)
3443{
3444	struct k_sigaction new_ka, old_ka;
3445	int ret;
3446	compat_old_sigset_t mask;
3447	compat_uptr_t handler, restorer;
3448
3449	if (act) {
3450		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3451		    __get_user(handler, &act->sa_handler) ||
3452		    __get_user(restorer, &act->sa_restorer) ||
3453		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3454		    __get_user(mask, &act->sa_mask))
3455			return -EFAULT;
3456
3457#ifdef __ARCH_HAS_KA_RESTORER
3458		new_ka.ka_restorer = NULL;
3459#endif
3460		new_ka.sa.sa_handler = compat_ptr(handler);
3461		new_ka.sa.sa_restorer = compat_ptr(restorer);
3462		siginitset(&new_ka.sa.sa_mask, mask);
3463	}
3464
3465	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3466
3467	if (!ret && oact) {
3468		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3469		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3470			       &oact->sa_handler) ||
3471		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3472			       &oact->sa_restorer) ||
3473		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3474		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3475			return -EFAULT;
3476	}
3477	return ret;
3478}
3479#endif
3480
3481#ifdef CONFIG_SGETMASK_SYSCALL
3482
3483/*
3484 * For backwards compatibility.  Functionality superseded by sigprocmask.
3485 */
3486SYSCALL_DEFINE0(sgetmask)
3487{
3488	/* SMP safe */
3489	return current->blocked.sig[0];
3490}
3491
3492SYSCALL_DEFINE1(ssetmask, int, newmask)
3493{
3494	int old = current->blocked.sig[0];
3495	sigset_t newset;
3496
3497	siginitset(&newset, newmask);
3498	set_current_blocked(&newset);
3499
3500	return old;
3501}
3502#endif /* CONFIG_SGETMASK_SYSCALL */
3503
3504#ifdef __ARCH_WANT_SYS_SIGNAL
3505/*
3506 * For backwards compatibility.  Functionality superseded by sigaction.
3507 */
3508SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3509{
3510	struct k_sigaction new_sa, old_sa;
3511	int ret;
3512
3513	new_sa.sa.sa_handler = handler;
3514	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3515	sigemptyset(&new_sa.sa.sa_mask);
3516
3517	ret = do_sigaction(sig, &new_sa, &old_sa);
3518
3519	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3520}
3521#endif /* __ARCH_WANT_SYS_SIGNAL */
3522
3523#ifdef __ARCH_WANT_SYS_PAUSE
3524
3525SYSCALL_DEFINE0(pause)
3526{
3527	while (!signal_pending(current)) {
3528		__set_current_state(TASK_INTERRUPTIBLE);
3529		schedule();
3530	}
3531	return -ERESTARTNOHAND;
3532}
3533
3534#endif
3535
3536static int sigsuspend(sigset_t *set)
3537{
3538	current->saved_sigmask = current->blocked;
3539	set_current_blocked(set);
3540
3541	while (!signal_pending(current)) {
3542		__set_current_state(TASK_INTERRUPTIBLE);
3543		schedule();
3544	}
3545	set_restore_sigmask();
3546	return -ERESTARTNOHAND;
3547}
3548
3549/**
3550 *  sys_rt_sigsuspend - replace the signal mask for a value with the
3551 *	@unewset value until a signal is received
3552 *  @unewset: new signal mask value
3553 *  @sigsetsize: size of sigset_t type
3554 */
3555SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3556{
3557	sigset_t newset;
3558
3559	/* XXX: Don't preclude handling different sized sigset_t's.  */
3560	if (sigsetsize != sizeof(sigset_t))
3561		return -EINVAL;
3562
3563	if (copy_from_user(&newset, unewset, sizeof(newset)))
3564		return -EFAULT;
3565	return sigsuspend(&newset);
3566}
3567 
3568#ifdef CONFIG_COMPAT
3569COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3570{
3571#ifdef __BIG_ENDIAN
3572	sigset_t newset;
3573	compat_sigset_t newset32;
3574
3575	/* XXX: Don't preclude handling different sized sigset_t's.  */
3576	if (sigsetsize != sizeof(sigset_t))
3577		return -EINVAL;
3578
3579	if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3580		return -EFAULT;
3581	sigset_from_compat(&newset, &newset32);
3582	return sigsuspend(&newset);
3583#else
3584	/* on little-endian bitmaps don't care about granularity */
3585	return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3586#endif
3587}
3588#endif
3589
3590#ifdef CONFIG_OLD_SIGSUSPEND
3591SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3592{
3593	sigset_t blocked;
3594	siginitset(&blocked, mask);
3595	return sigsuspend(&blocked);
3596}
3597#endif
3598#ifdef CONFIG_OLD_SIGSUSPEND3
3599SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3600{
3601	sigset_t blocked;
3602	siginitset(&blocked, mask);
3603	return sigsuspend(&blocked);
3604}
3605#endif
3606
3607__weak const char *arch_vma_name(struct vm_area_struct *vma)
3608{
3609	return NULL;
3610}
3611
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612void __init signals_init(void)
3613{
3614	/* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3615	BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3616		!= offsetof(struct siginfo, _sifields._pad));
3617
3618	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3619}
3620
3621#ifdef CONFIG_KGDB_KDB
3622#include <linux/kdb.h>
3623/*
3624 * kdb_send_sig_info - Allows kdb to send signals without exposing
3625 * signal internals.  This function checks if the required locks are
3626 * available before calling the main signal code, to avoid kdb
3627 * deadlocks.
3628 */
3629void
3630kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3631{
3632	static struct task_struct *kdb_prev_t;
3633	int sig, new_t;
3634	if (!spin_trylock(&t->sighand->siglock)) {
3635		kdb_printf("Can't do kill command now.\n"
3636			   "The sigmask lock is held somewhere else in "
3637			   "kernel, try again later\n");
3638		return;
3639	}
3640	spin_unlock(&t->sighand->siglock);
3641	new_t = kdb_prev_t != t;
3642	kdb_prev_t = t;
3643	if (t->state != TASK_RUNNING && new_t) {
 
3644		kdb_printf("Process is not RUNNING, sending a signal from "
3645			   "kdb risks deadlock\n"
3646			   "on the run queue locks. "
3647			   "The signal has _not_ been sent.\n"
3648			   "Reissue the kill command if you want to risk "
3649			   "the deadlock.\n");
3650		return;
3651	}
3652	sig = info->si_signo;
3653	if (send_sig_info(sig, info, t))
 
3654		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3655			   sig, t->pid);
3656	else
3657		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3658}
3659#endif	/* CONFIG_KGDB_KDB */
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/signal.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   8 *
   9 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
  10 *		Changes to use preallocated sigqueue structures
  11 *		to allow signals to be sent reliably.
  12 */
  13
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/init.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/user.h>
  19#include <linux/sched/debug.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/sched/cputime.h>
  23#include <linux/file.h>
  24#include <linux/fs.h>
  25#include <linux/proc_fs.h>
  26#include <linux/tty.h>
  27#include <linux/binfmts.h>
  28#include <linux/coredump.h>
  29#include <linux/security.h>
  30#include <linux/syscalls.h>
  31#include <linux/ptrace.h>
  32#include <linux/signal.h>
  33#include <linux/signalfd.h>
  34#include <linux/ratelimit.h>
  35#include <linux/tracehook.h>
  36#include <linux/capability.h>
  37#include <linux/freezer.h>
  38#include <linux/pid_namespace.h>
  39#include <linux/nsproxy.h>
  40#include <linux/user_namespace.h>
  41#include <linux/uprobes.h>
  42#include <linux/compat.h>
  43#include <linux/cn_proc.h>
  44#include <linux/compiler.h>
  45#include <linux/posix-timers.h>
  46#include <linux/livepatch.h>
  47#include <linux/cgroup.h>
  48#include <linux/audit.h>
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/signal.h>
  52
  53#include <asm/param.h>
  54#include <linux/uaccess.h>
  55#include <asm/unistd.h>
  56#include <asm/siginfo.h>
  57#include <asm/cacheflush.h>
 
  58
  59/*
  60 * SLAB caches for signal bits.
  61 */
  62
  63static struct kmem_cache *sigqueue_cachep;
  64
  65int print_fatal_signals __read_mostly;
  66
  67static void __user *sig_handler(struct task_struct *t, int sig)
  68{
  69	return t->sighand->action[sig - 1].sa.sa_handler;
  70}
  71
  72static inline bool sig_handler_ignored(void __user *handler, int sig)
  73{
  74	/* Is it explicitly or implicitly ignored? */
  75	return handler == SIG_IGN ||
  76	       (handler == SIG_DFL && sig_kernel_ignore(sig));
  77}
  78
  79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
  80{
  81	void __user *handler;
  82
  83	handler = sig_handler(t, sig);
  84
  85	/* SIGKILL and SIGSTOP may not be sent to the global init */
  86	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
  87		return true;
  88
  89	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  90	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  91		return true;
  92
  93	/* Only allow kernel generated signals to this kthread */
  94	if (unlikely((t->flags & PF_KTHREAD) &&
  95		     (handler == SIG_KTHREAD_KERNEL) && !force))
  96		return true;
  97
  98	return sig_handler_ignored(handler, sig);
  99}
 100
 101static bool sig_ignored(struct task_struct *t, int sig, bool force)
 102{
 103	/*
 104	 * Blocked signals are never ignored, since the
 105	 * signal handler may change by the time it is
 106	 * unblocked.
 107	 */
 108	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 109		return false;
 
 
 
 110
 111	/*
 112	 * Tracers may want to know about even ignored signal unless it
 113	 * is SIGKILL which can't be reported anyway but can be ignored
 114	 * by SIGNAL_UNKILLABLE task.
 115	 */
 116	if (t->ptrace && sig != SIGKILL)
 117		return false;
 118
 119	return sig_task_ignored(t, sig, force);
 120}
 121
 122/*
 123 * Re-calculate pending state from the set of locally pending
 124 * signals, globally pending signals, and blocked signals.
 125 */
 126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
 127{
 128	unsigned long ready;
 129	long i;
 130
 131	switch (_NSIG_WORDS) {
 132	default:
 133		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 134			ready |= signal->sig[i] &~ blocked->sig[i];
 135		break;
 136
 137	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 138		ready |= signal->sig[2] &~ blocked->sig[2];
 139		ready |= signal->sig[1] &~ blocked->sig[1];
 140		ready |= signal->sig[0] &~ blocked->sig[0];
 141		break;
 142
 143	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 144		ready |= signal->sig[0] &~ blocked->sig[0];
 145		break;
 146
 147	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 148	}
 149	return ready !=	0;
 150}
 151
 152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 153
 154static bool recalc_sigpending_tsk(struct task_struct *t)
 155{
 156	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
 157	    PENDING(&t->pending, &t->blocked) ||
 158	    PENDING(&t->signal->shared_pending, &t->blocked) ||
 159	    cgroup_task_frozen(t)) {
 160		set_tsk_thread_flag(t, TIF_SIGPENDING);
 161		return true;
 162	}
 163
 164	/*
 165	 * We must never clear the flag in another thread, or in current
 166	 * when it's possible the current syscall is returning -ERESTART*.
 167	 * So we don't clear it here, and only callers who know they should do.
 168	 */
 169	return false;
 170}
 171
 172/*
 173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 174 * This is superfluous when called on current, the wakeup is a harmless no-op.
 175 */
 176void recalc_sigpending_and_wake(struct task_struct *t)
 177{
 178	if (recalc_sigpending_tsk(t))
 179		signal_wake_up(t, 0);
 180}
 181
 182void recalc_sigpending(void)
 183{
 184	if (!recalc_sigpending_tsk(current) && !freezing(current) &&
 185	    !klp_patch_pending(current))
 186		clear_thread_flag(TIF_SIGPENDING);
 187
 188}
 189EXPORT_SYMBOL(recalc_sigpending);
 190
 191void calculate_sigpending(void)
 192{
 193	/* Have any signals or users of TIF_SIGPENDING been delayed
 194	 * until after fork?
 195	 */
 196	spin_lock_irq(&current->sighand->siglock);
 197	set_tsk_thread_flag(current, TIF_SIGPENDING);
 198	recalc_sigpending();
 199	spin_unlock_irq(&current->sighand->siglock);
 200}
 201
 202/* Given the mask, find the first available signal that should be serviced. */
 203
 204#define SYNCHRONOUS_MASK \
 205	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 206	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 207
 208int next_signal(struct sigpending *pending, sigset_t *mask)
 209{
 210	unsigned long i, *s, *m, x;
 211	int sig = 0;
 212
 213	s = pending->signal.sig;
 214	m = mask->sig;
 215
 216	/*
 217	 * Handle the first word specially: it contains the
 218	 * synchronous signals that need to be dequeued first.
 219	 */
 220	x = *s &~ *m;
 221	if (x) {
 222		if (x & SYNCHRONOUS_MASK)
 223			x &= SYNCHRONOUS_MASK;
 224		sig = ffz(~x) + 1;
 225		return sig;
 226	}
 227
 228	switch (_NSIG_WORDS) {
 229	default:
 230		for (i = 1; i < _NSIG_WORDS; ++i) {
 231			x = *++s &~ *++m;
 232			if (!x)
 233				continue;
 234			sig = ffz(~x) + i*_NSIG_BPW + 1;
 235			break;
 236		}
 237		break;
 238
 239	case 2:
 240		x = s[1] &~ m[1];
 241		if (!x)
 242			break;
 243		sig = ffz(~x) + _NSIG_BPW + 1;
 244		break;
 245
 246	case 1:
 247		/* Nothing to do */
 248		break;
 249	}
 250
 251	return sig;
 252}
 253
 254static inline void print_dropped_signal(int sig)
 255{
 256	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 257
 258	if (!print_fatal_signals)
 259		return;
 260
 261	if (!__ratelimit(&ratelimit_state))
 262		return;
 263
 264	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 265				current->comm, current->pid, sig);
 266}
 267
 268/**
 269 * task_set_jobctl_pending - set jobctl pending bits
 270 * @task: target task
 271 * @mask: pending bits to set
 272 *
 273 * Clear @mask from @task->jobctl.  @mask must be subset of
 274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 275 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 276 * cleared.  If @task is already being killed or exiting, this function
 277 * becomes noop.
 278 *
 279 * CONTEXT:
 280 * Must be called with @task->sighand->siglock held.
 281 *
 282 * RETURNS:
 283 * %true if @mask is set, %false if made noop because @task was dying.
 284 */
 285bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 286{
 287	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 288			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 289	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 290
 291	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 292		return false;
 293
 294	if (mask & JOBCTL_STOP_SIGMASK)
 295		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 296
 297	task->jobctl |= mask;
 298	return true;
 299}
 300
 301/**
 302 * task_clear_jobctl_trapping - clear jobctl trapping bit
 303 * @task: target task
 304 *
 305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 306 * Clear it and wake up the ptracer.  Note that we don't need any further
 307 * locking.  @task->siglock guarantees that @task->parent points to the
 308 * ptracer.
 309 *
 310 * CONTEXT:
 311 * Must be called with @task->sighand->siglock held.
 312 */
 313void task_clear_jobctl_trapping(struct task_struct *task)
 314{
 315	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 316		task->jobctl &= ~JOBCTL_TRAPPING;
 317		smp_mb();	/* advised by wake_up_bit() */
 318		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 319	}
 320}
 321
 322/**
 323 * task_clear_jobctl_pending - clear jobctl pending bits
 324 * @task: target task
 325 * @mask: pending bits to clear
 326 *
 327 * Clear @mask from @task->jobctl.  @mask must be subset of
 328 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 329 * STOP bits are cleared together.
 330 *
 331 * If clearing of @mask leaves no stop or trap pending, this function calls
 332 * task_clear_jobctl_trapping().
 333 *
 334 * CONTEXT:
 335 * Must be called with @task->sighand->siglock held.
 336 */
 337void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 338{
 339	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 340
 341	if (mask & JOBCTL_STOP_PENDING)
 342		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 343
 344	task->jobctl &= ~mask;
 345
 346	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 347		task_clear_jobctl_trapping(task);
 348}
 349
 350/**
 351 * task_participate_group_stop - participate in a group stop
 352 * @task: task participating in a group stop
 353 *
 354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 355 * Group stop states are cleared and the group stop count is consumed if
 356 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 357 * stop, the appropriate `SIGNAL_*` flags are set.
 358 *
 359 * CONTEXT:
 360 * Must be called with @task->sighand->siglock held.
 361 *
 362 * RETURNS:
 363 * %true if group stop completion should be notified to the parent, %false
 364 * otherwise.
 365 */
 366static bool task_participate_group_stop(struct task_struct *task)
 367{
 368	struct signal_struct *sig = task->signal;
 369	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 370
 371	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 372
 373	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 374
 375	if (!consume)
 376		return false;
 377
 378	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 379		sig->group_stop_count--;
 380
 381	/*
 382	 * Tell the caller to notify completion iff we are entering into a
 383	 * fresh group stop.  Read comment in do_signal_stop() for details.
 384	 */
 385	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 386		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 387		return true;
 388	}
 389	return false;
 390}
 391
 392void task_join_group_stop(struct task_struct *task)
 393{
 394	/* Have the new thread join an on-going signal group stop */
 395	unsigned long jobctl = current->jobctl;
 396	if (jobctl & JOBCTL_STOP_PENDING) {
 397		struct signal_struct *sig = current->signal;
 398		unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
 399		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
 400		if (task_set_jobctl_pending(task, signr | gstop)) {
 401			sig->group_stop_count++;
 402		}
 403	}
 404}
 405
 406/*
 407 * allocate a new signal queue record
 408 * - this may be called without locks if and only if t == current, otherwise an
 409 *   appropriate lock must be held to stop the target task from exiting
 410 */
 411static struct sigqueue *
 412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
 413{
 414	struct sigqueue *q = NULL;
 415	struct user_struct *user;
 416	int sigpending;
 417
 418	/*
 419	 * Protect access to @t credentials. This can go away when all
 420	 * callers hold rcu read lock.
 421	 *
 422	 * NOTE! A pending signal will hold on to the user refcount,
 423	 * and we get/put the refcount only when the sigpending count
 424	 * changes from/to zero.
 425	 */
 426	rcu_read_lock();
 427	user = __task_cred(t)->user;
 428	sigpending = atomic_inc_return(&user->sigpending);
 429	if (sigpending == 1)
 430		get_uid(user);
 431	rcu_read_unlock();
 432
 433	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
 
 
 434		q = kmem_cache_alloc(sigqueue_cachep, flags);
 435	} else {
 436		print_dropped_signal(sig);
 437	}
 438
 439	if (unlikely(q == NULL)) {
 440		if (atomic_dec_and_test(&user->sigpending))
 441			free_uid(user);
 442	} else {
 443		INIT_LIST_HEAD(&q->list);
 444		q->flags = 0;
 445		q->user = user;
 446	}
 447
 448	return q;
 449}
 450
 451static void __sigqueue_free(struct sigqueue *q)
 452{
 453	if (q->flags & SIGQUEUE_PREALLOC)
 454		return;
 455	if (atomic_dec_and_test(&q->user->sigpending))
 456		free_uid(q->user);
 457	kmem_cache_free(sigqueue_cachep, q);
 458}
 459
 460void flush_sigqueue(struct sigpending *queue)
 461{
 462	struct sigqueue *q;
 463
 464	sigemptyset(&queue->signal);
 465	while (!list_empty(&queue->list)) {
 466		q = list_entry(queue->list.next, struct sigqueue , list);
 467		list_del_init(&q->list);
 468		__sigqueue_free(q);
 469	}
 470}
 471
 472/*
 473 * Flush all pending signals for this kthread.
 474 */
 475void flush_signals(struct task_struct *t)
 476{
 477	unsigned long flags;
 478
 479	spin_lock_irqsave(&t->sighand->siglock, flags);
 480	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 481	flush_sigqueue(&t->pending);
 482	flush_sigqueue(&t->signal->shared_pending);
 483	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 484}
 485EXPORT_SYMBOL(flush_signals);
 486
 487#ifdef CONFIG_POSIX_TIMERS
 488static void __flush_itimer_signals(struct sigpending *pending)
 489{
 490	sigset_t signal, retain;
 491	struct sigqueue *q, *n;
 492
 493	signal = pending->signal;
 494	sigemptyset(&retain);
 495
 496	list_for_each_entry_safe(q, n, &pending->list, list) {
 497		int sig = q->info.si_signo;
 498
 499		if (likely(q->info.si_code != SI_TIMER)) {
 500			sigaddset(&retain, sig);
 501		} else {
 502			sigdelset(&signal, sig);
 503			list_del_init(&q->list);
 504			__sigqueue_free(q);
 505		}
 506	}
 507
 508	sigorsets(&pending->signal, &signal, &retain);
 509}
 510
 511void flush_itimer_signals(void)
 512{
 513	struct task_struct *tsk = current;
 514	unsigned long flags;
 515
 516	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 517	__flush_itimer_signals(&tsk->pending);
 518	__flush_itimer_signals(&tsk->signal->shared_pending);
 519	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 520}
 521#endif
 522
 523void ignore_signals(struct task_struct *t)
 524{
 525	int i;
 526
 527	for (i = 0; i < _NSIG; ++i)
 528		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 529
 530	flush_signals(t);
 531}
 532
 533/*
 534 * Flush all handlers for a task.
 535 */
 536
 537void
 538flush_signal_handlers(struct task_struct *t, int force_default)
 539{
 540	int i;
 541	struct k_sigaction *ka = &t->sighand->action[0];
 542	for (i = _NSIG ; i != 0 ; i--) {
 543		if (force_default || ka->sa.sa_handler != SIG_IGN)
 544			ka->sa.sa_handler = SIG_DFL;
 545		ka->sa.sa_flags = 0;
 546#ifdef __ARCH_HAS_SA_RESTORER
 547		ka->sa.sa_restorer = NULL;
 548#endif
 549		sigemptyset(&ka->sa.sa_mask);
 550		ka++;
 551	}
 552}
 553
 554bool unhandled_signal(struct task_struct *tsk, int sig)
 555{
 556	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 557	if (is_global_init(tsk))
 558		return true;
 559
 560	if (handler != SIG_IGN && handler != SIG_DFL)
 561		return false;
 562
 563	/* if ptraced, let the tracer determine */
 564	return !tsk->ptrace;
 565}
 566
 567static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
 568			   bool *resched_timer)
 569{
 570	struct sigqueue *q, *first = NULL;
 571
 572	/*
 573	 * Collect the siginfo appropriate to this signal.  Check if
 574	 * there is another siginfo for the same signal.
 575	*/
 576	list_for_each_entry(q, &list->list, list) {
 577		if (q->info.si_signo == sig) {
 578			if (first)
 579				goto still_pending;
 580			first = q;
 581		}
 582	}
 583
 584	sigdelset(&list->signal, sig);
 585
 586	if (first) {
 587still_pending:
 588		list_del_init(&first->list);
 589		copy_siginfo(info, &first->info);
 590
 591		*resched_timer =
 592			(first->flags & SIGQUEUE_PREALLOC) &&
 593			(info->si_code == SI_TIMER) &&
 594			(info->si_sys_private);
 595
 596		__sigqueue_free(first);
 597	} else {
 598		/*
 599		 * Ok, it wasn't in the queue.  This must be
 600		 * a fast-pathed signal or we must have been
 601		 * out of queue space.  So zero out the info.
 602		 */
 603		clear_siginfo(info);
 604		info->si_signo = sig;
 605		info->si_errno = 0;
 606		info->si_code = SI_USER;
 607		info->si_pid = 0;
 608		info->si_uid = 0;
 609	}
 610}
 611
 612static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 613			kernel_siginfo_t *info, bool *resched_timer)
 614{
 615	int sig = next_signal(pending, mask);
 616
 617	if (sig)
 618		collect_signal(sig, pending, info, resched_timer);
 619	return sig;
 620}
 621
 622/*
 623 * Dequeue a signal and return the element to the caller, which is
 624 * expected to free it.
 625 *
 626 * All callers have to hold the siglock.
 627 */
 628int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
 629{
 630	bool resched_timer = false;
 631	int signr;
 632
 633	/* We only dequeue private signals from ourselves, we don't let
 634	 * signalfd steal them
 635	 */
 636	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 637	if (!signr) {
 638		signr = __dequeue_signal(&tsk->signal->shared_pending,
 639					 mask, info, &resched_timer);
 640#ifdef CONFIG_POSIX_TIMERS
 641		/*
 642		 * itimer signal ?
 643		 *
 644		 * itimers are process shared and we restart periodic
 645		 * itimers in the signal delivery path to prevent DoS
 646		 * attacks in the high resolution timer case. This is
 647		 * compliant with the old way of self-restarting
 648		 * itimers, as the SIGALRM is a legacy signal and only
 649		 * queued once. Changing the restart behaviour to
 650		 * restart the timer in the signal dequeue path is
 651		 * reducing the timer noise on heavy loaded !highres
 652		 * systems too.
 653		 */
 654		if (unlikely(signr == SIGALRM)) {
 655			struct hrtimer *tmr = &tsk->signal->real_timer;
 656
 657			if (!hrtimer_is_queued(tmr) &&
 658			    tsk->signal->it_real_incr != 0) {
 659				hrtimer_forward(tmr, tmr->base->get_time(),
 660						tsk->signal->it_real_incr);
 661				hrtimer_restart(tmr);
 662			}
 663		}
 664#endif
 665	}
 666
 667	recalc_sigpending();
 668	if (!signr)
 669		return 0;
 670
 671	if (unlikely(sig_kernel_stop(signr))) {
 672		/*
 673		 * Set a marker that we have dequeued a stop signal.  Our
 674		 * caller might release the siglock and then the pending
 675		 * stop signal it is about to process is no longer in the
 676		 * pending bitmasks, but must still be cleared by a SIGCONT
 677		 * (and overruled by a SIGKILL).  So those cases clear this
 678		 * shared flag after we've set it.  Note that this flag may
 679		 * remain set after the signal we return is ignored or
 680		 * handled.  That doesn't matter because its only purpose
 681		 * is to alert stop-signal processing code when another
 682		 * processor has come along and cleared the flag.
 683		 */
 684		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 685	}
 686#ifdef CONFIG_POSIX_TIMERS
 687	if (resched_timer) {
 688		/*
 689		 * Release the siglock to ensure proper locking order
 690		 * of timer locks outside of siglocks.  Note, we leave
 691		 * irqs disabled here, since the posix-timers code is
 692		 * about to disable them again anyway.
 693		 */
 694		spin_unlock(&tsk->sighand->siglock);
 695		posixtimer_rearm(info);
 696		spin_lock(&tsk->sighand->siglock);
 697
 698		/* Don't expose the si_sys_private value to userspace */
 699		info->si_sys_private = 0;
 700	}
 701#endif
 702	return signr;
 703}
 704EXPORT_SYMBOL_GPL(dequeue_signal);
 705
 706static int dequeue_synchronous_signal(kernel_siginfo_t *info)
 707{
 708	struct task_struct *tsk = current;
 709	struct sigpending *pending = &tsk->pending;
 710	struct sigqueue *q, *sync = NULL;
 711
 712	/*
 713	 * Might a synchronous signal be in the queue?
 714	 */
 715	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
 716		return 0;
 717
 718	/*
 719	 * Return the first synchronous signal in the queue.
 720	 */
 721	list_for_each_entry(q, &pending->list, list) {
 722		/* Synchronous signals have a positive si_code */
 723		if ((q->info.si_code > SI_USER) &&
 724		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
 725			sync = q;
 726			goto next;
 727		}
 728	}
 729	return 0;
 730next:
 731	/*
 732	 * Check if there is another siginfo for the same signal.
 733	 */
 734	list_for_each_entry_continue(q, &pending->list, list) {
 735		if (q->info.si_signo == sync->info.si_signo)
 736			goto still_pending;
 737	}
 738
 739	sigdelset(&pending->signal, sync->info.si_signo);
 740	recalc_sigpending();
 741still_pending:
 742	list_del_init(&sync->list);
 743	copy_siginfo(info, &sync->info);
 744	__sigqueue_free(sync);
 745	return info->si_signo;
 746}
 747
 748/*
 749 * Tell a process that it has a new active signal..
 750 *
 751 * NOTE! we rely on the previous spin_lock to
 752 * lock interrupts for us! We can only be called with
 753 * "siglock" held, and the local interrupt must
 754 * have been disabled when that got acquired!
 755 *
 756 * No need to set need_resched since signal event passing
 757 * goes through ->blocked
 758 */
 759void signal_wake_up_state(struct task_struct *t, unsigned int state)
 760{
 761	set_tsk_thread_flag(t, TIF_SIGPENDING);
 762	/*
 763	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 764	 * case. We don't check t->state here because there is a race with it
 765	 * executing another processor and just now entering stopped state.
 766	 * By using wake_up_state, we ensure the process will wake up and
 767	 * handle its death signal.
 768	 */
 769	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 770		kick_process(t);
 771}
 772
 773/*
 774 * Remove signals in mask from the pending set and queue.
 775 * Returns 1 if any signals were found.
 776 *
 777 * All callers must be holding the siglock.
 778 */
 779static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 780{
 781	struct sigqueue *q, *n;
 782	sigset_t m;
 783
 784	sigandsets(&m, mask, &s->signal);
 785	if (sigisemptyset(&m))
 786		return;
 787
 788	sigandnsets(&s->signal, &s->signal, mask);
 789	list_for_each_entry_safe(q, n, &s->list, list) {
 790		if (sigismember(mask, q->info.si_signo)) {
 791			list_del_init(&q->list);
 792			__sigqueue_free(q);
 793		}
 794	}
 
 795}
 796
 797static inline int is_si_special(const struct kernel_siginfo *info)
 798{
 799	return info <= SEND_SIG_PRIV;
 800}
 801
 802static inline bool si_fromuser(const struct kernel_siginfo *info)
 803{
 804	return info == SEND_SIG_NOINFO ||
 805		(!is_si_special(info) && SI_FROMUSER(info));
 806}
 807
 808/*
 809 * called with RCU read lock from check_kill_permission()
 810 */
 811static bool kill_ok_by_cred(struct task_struct *t)
 812{
 813	const struct cred *cred = current_cred();
 814	const struct cred *tcred = __task_cred(t);
 815
 816	return uid_eq(cred->euid, tcred->suid) ||
 817	       uid_eq(cred->euid, tcred->uid) ||
 818	       uid_eq(cred->uid, tcred->suid) ||
 819	       uid_eq(cred->uid, tcred->uid) ||
 820	       ns_capable(tcred->user_ns, CAP_KILL);
 
 
 
 
 
 821}
 822
 823/*
 824 * Bad permissions for sending the signal
 825 * - the caller must hold the RCU read lock
 826 */
 827static int check_kill_permission(int sig, struct kernel_siginfo *info,
 828				 struct task_struct *t)
 829{
 830	struct pid *sid;
 831	int error;
 832
 833	if (!valid_signal(sig))
 834		return -EINVAL;
 835
 836	if (!si_fromuser(info))
 837		return 0;
 838
 839	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 840	if (error)
 841		return error;
 842
 843	if (!same_thread_group(current, t) &&
 844	    !kill_ok_by_cred(t)) {
 845		switch (sig) {
 846		case SIGCONT:
 847			sid = task_session(t);
 848			/*
 849			 * We don't return the error if sid == NULL. The
 850			 * task was unhashed, the caller must notice this.
 851			 */
 852			if (!sid || sid == task_session(current))
 853				break;
 854			fallthrough;
 855		default:
 856			return -EPERM;
 857		}
 858	}
 859
 860	return security_task_kill(t, info, sig, NULL);
 861}
 862
 863/**
 864 * ptrace_trap_notify - schedule trap to notify ptracer
 865 * @t: tracee wanting to notify tracer
 866 *
 867 * This function schedules sticky ptrace trap which is cleared on the next
 868 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 869 * ptracer.
 870 *
 871 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 872 * ptracer is listening for events, tracee is woken up so that it can
 873 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 874 * eventually taken without returning to userland after the existing traps
 875 * are finished by PTRACE_CONT.
 876 *
 877 * CONTEXT:
 878 * Must be called with @task->sighand->siglock held.
 879 */
 880static void ptrace_trap_notify(struct task_struct *t)
 881{
 882	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 883	assert_spin_locked(&t->sighand->siglock);
 884
 885	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 886	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 887}
 888
 889/*
 890 * Handle magic process-wide effects of stop/continue signals. Unlike
 891 * the signal actions, these happen immediately at signal-generation
 892 * time regardless of blocking, ignoring, or handling.  This does the
 893 * actual continuing for SIGCONT, but not the actual stopping for stop
 894 * signals. The process stop is done as a signal action for SIG_DFL.
 895 *
 896 * Returns true if the signal should be actually delivered, otherwise
 897 * it should be dropped.
 898 */
 899static bool prepare_signal(int sig, struct task_struct *p, bool force)
 900{
 901	struct signal_struct *signal = p->signal;
 902	struct task_struct *t;
 903	sigset_t flush;
 904
 905	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
 906		if (!(signal->flags & SIGNAL_GROUP_EXIT))
 907			return sig == SIGKILL;
 908		/*
 909		 * The process is in the middle of dying, nothing to do.
 910		 */
 911	} else if (sig_kernel_stop(sig)) {
 912		/*
 913		 * This is a stop signal.  Remove SIGCONT from all queues.
 914		 */
 915		siginitset(&flush, sigmask(SIGCONT));
 916		flush_sigqueue_mask(&flush, &signal->shared_pending);
 917		for_each_thread(p, t)
 918			flush_sigqueue_mask(&flush, &t->pending);
 919	} else if (sig == SIGCONT) {
 920		unsigned int why;
 921		/*
 922		 * Remove all stop signals from all queues, wake all threads.
 923		 */
 924		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 925		flush_sigqueue_mask(&flush, &signal->shared_pending);
 926		for_each_thread(p, t) {
 927			flush_sigqueue_mask(&flush, &t->pending);
 928			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 929			if (likely(!(t->ptrace & PT_SEIZED)))
 930				wake_up_state(t, __TASK_STOPPED);
 931			else
 932				ptrace_trap_notify(t);
 933		}
 934
 935		/*
 936		 * Notify the parent with CLD_CONTINUED if we were stopped.
 937		 *
 938		 * If we were in the middle of a group stop, we pretend it
 939		 * was already finished, and then continued. Since SIGCHLD
 940		 * doesn't queue we report only CLD_STOPPED, as if the next
 941		 * CLD_CONTINUED was dropped.
 942		 */
 943		why = 0;
 944		if (signal->flags & SIGNAL_STOP_STOPPED)
 945			why |= SIGNAL_CLD_CONTINUED;
 946		else if (signal->group_stop_count)
 947			why |= SIGNAL_CLD_STOPPED;
 948
 949		if (why) {
 950			/*
 951			 * The first thread which returns from do_signal_stop()
 952			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 953			 * notify its parent. See get_signal().
 954			 */
 955			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 956			signal->group_stop_count = 0;
 957			signal->group_exit_code = 0;
 958		}
 959	}
 960
 961	return !sig_ignored(p, sig, force);
 962}
 963
 964/*
 965 * Test if P wants to take SIG.  After we've checked all threads with this,
 966 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 967 * blocking SIG were ruled out because they are not running and already
 968 * have pending signals.  Such threads will dequeue from the shared queue
 969 * as soon as they're available, so putting the signal on the shared queue
 970 * will be equivalent to sending it to one such thread.
 971 */
 972static inline bool wants_signal(int sig, struct task_struct *p)
 973{
 974	if (sigismember(&p->blocked, sig))
 975		return false;
 976
 977	if (p->flags & PF_EXITING)
 978		return false;
 979
 980	if (sig == SIGKILL)
 981		return true;
 982
 983	if (task_is_stopped_or_traced(p))
 984		return false;
 985
 986	return task_curr(p) || !signal_pending(p);
 987}
 988
 989static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 990{
 991	struct signal_struct *signal = p->signal;
 992	struct task_struct *t;
 993
 994	/*
 995	 * Now find a thread we can wake up to take the signal off the queue.
 996	 *
 997	 * If the main thread wants the signal, it gets first crack.
 998	 * Probably the least surprising to the average bear.
 999	 */
1000	if (wants_signal(sig, p))
1001		t = p;
1002	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1003		/*
1004		 * There is just one thread and it does not need to be woken.
1005		 * It will dequeue unblocked signals before it runs again.
1006		 */
1007		return;
1008	else {
1009		/*
1010		 * Otherwise try to find a suitable thread.
1011		 */
1012		t = signal->curr_target;
1013		while (!wants_signal(sig, t)) {
1014			t = next_thread(t);
1015			if (t == signal->curr_target)
1016				/*
1017				 * No thread needs to be woken.
1018				 * Any eligible threads will see
1019				 * the signal in the queue soon.
1020				 */
1021				return;
1022		}
1023		signal->curr_target = t;
1024	}
1025
1026	/*
1027	 * Found a killable thread.  If the signal will be fatal,
1028	 * then start taking the whole group down immediately.
1029	 */
1030	if (sig_fatal(p, sig) &&
1031	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1032	    !sigismember(&t->real_blocked, sig) &&
1033	    (sig == SIGKILL || !p->ptrace)) {
1034		/*
1035		 * This signal will be fatal to the whole group.
1036		 */
1037		if (!sig_kernel_coredump(sig)) {
1038			/*
1039			 * Start a group exit and wake everybody up.
1040			 * This way we don't have other threads
1041			 * running and doing things after a slower
1042			 * thread has the fatal signal pending.
1043			 */
1044			signal->flags = SIGNAL_GROUP_EXIT;
1045			signal->group_exit_code = sig;
1046			signal->group_stop_count = 0;
1047			t = p;
1048			do {
1049				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1050				sigaddset(&t->pending.signal, SIGKILL);
1051				signal_wake_up(t, 1);
1052			} while_each_thread(p, t);
1053			return;
1054		}
1055	}
1056
1057	/*
1058	 * The signal is already in the shared-pending queue.
1059	 * Tell the chosen thread to wake up and dequeue it.
1060	 */
1061	signal_wake_up(t, sig == SIGKILL);
1062	return;
1063}
1064
1065static inline bool legacy_queue(struct sigpending *signals, int sig)
1066{
1067	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1068}
1069
1070static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1071			enum pid_type type, bool force)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072{
1073	struct sigpending *pending;
1074	struct sigqueue *q;
1075	int override_rlimit;
1076	int ret = 0, result;
1077
1078	assert_spin_locked(&t->sighand->siglock);
1079
1080	result = TRACE_SIGNAL_IGNORED;
1081	if (!prepare_signal(sig, t, force))
 
1082		goto ret;
1083
1084	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1085	/*
1086	 * Short-circuit ignored signals and support queuing
1087	 * exactly one non-rt signal, so that we can get more
1088	 * detailed information about the cause of the signal.
1089	 */
1090	result = TRACE_SIGNAL_ALREADY_PENDING;
1091	if (legacy_queue(pending, sig))
1092		goto ret;
1093
1094	result = TRACE_SIGNAL_DELIVERED;
1095	/*
1096	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
 
1097	 */
1098	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1099		goto out_set;
1100
1101	/*
1102	 * Real-time signals must be queued if sent by sigqueue, or
1103	 * some other real-time mechanism.  It is implementation
1104	 * defined whether kill() does so.  We attempt to do so, on
1105	 * the principle of least surprise, but since kill is not
1106	 * allowed to fail with EAGAIN when low on memory we just
1107	 * make sure at least one signal gets delivered and don't
1108	 * pass on the info struct.
1109	 */
1110	if (sig < SIGRTMIN)
1111		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1112	else
1113		override_rlimit = 0;
1114
1115	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
 
1116	if (q) {
1117		list_add_tail(&q->list, &pending->list);
1118		switch ((unsigned long) info) {
1119		case (unsigned long) SEND_SIG_NOINFO:
1120			clear_siginfo(&q->info);
1121			q->info.si_signo = sig;
1122			q->info.si_errno = 0;
1123			q->info.si_code = SI_USER;
1124			q->info.si_pid = task_tgid_nr_ns(current,
1125							task_active_pid_ns(t));
1126			rcu_read_lock();
1127			q->info.si_uid =
1128				from_kuid_munged(task_cred_xxx(t, user_ns),
1129						 current_uid());
1130			rcu_read_unlock();
1131			break;
1132		case (unsigned long) SEND_SIG_PRIV:
1133			clear_siginfo(&q->info);
1134			q->info.si_signo = sig;
1135			q->info.si_errno = 0;
1136			q->info.si_code = SI_KERNEL;
1137			q->info.si_pid = 0;
1138			q->info.si_uid = 0;
1139			break;
1140		default:
1141			copy_siginfo(&q->info, info);
 
 
1142			break;
1143		}
1144	} else if (!is_si_special(info) &&
1145		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1146		/*
1147		 * Queue overflow, abort.  We may abort if the
1148		 * signal was rt and sent by user using something
1149		 * other than kill().
1150		 */
1151		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1152		ret = -EAGAIN;
1153		goto ret;
1154	} else {
1155		/*
1156		 * This is a silent loss of information.  We still
1157		 * send the signal, but the *info bits are lost.
1158		 */
1159		result = TRACE_SIGNAL_LOSE_INFO;
 
 
 
 
1160	}
1161
1162out_set:
1163	signalfd_notify(t, sig);
1164	sigaddset(&pending->signal, sig);
1165
1166	/* Let multiprocess signals appear after on-going forks */
1167	if (type > PIDTYPE_TGID) {
1168		struct multiprocess_signals *delayed;
1169		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1170			sigset_t *signal = &delayed->signal;
1171			/* Can't queue both a stop and a continue signal */
1172			if (sig == SIGCONT)
1173				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1174			else if (sig_kernel_stop(sig))
1175				sigdelset(signal, SIGCONT);
1176			sigaddset(signal, sig);
1177		}
1178	}
1179
1180	complete_signal(sig, t, type);
1181ret:
1182	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1183	return ret;
1184}
1185
1186static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1187{
1188	bool ret = false;
1189	switch (siginfo_layout(info->si_signo, info->si_code)) {
1190	case SIL_KILL:
1191	case SIL_CHLD:
1192	case SIL_RT:
1193		ret = true;
1194		break;
1195	case SIL_TIMER:
1196	case SIL_POLL:
1197	case SIL_FAULT:
1198	case SIL_FAULT_MCEERR:
1199	case SIL_FAULT_BNDERR:
1200	case SIL_FAULT_PKUERR:
1201	case SIL_SYS:
1202		ret = false;
1203		break;
1204	}
1205	return ret;
1206}
1207
1208static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1209			enum pid_type type)
1210{
1211	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1212	bool force = false;
1213
1214	if (info == SEND_SIG_NOINFO) {
1215		/* Force if sent from an ancestor pid namespace */
1216		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1217	} else if (info == SEND_SIG_PRIV) {
1218		/* Don't ignore kernel generated signals */
1219		force = true;
1220	} else if (has_si_pid_and_uid(info)) {
1221		/* SIGKILL and SIGSTOP is special or has ids */
1222		struct user_namespace *t_user_ns;
1223
1224		rcu_read_lock();
1225		t_user_ns = task_cred_xxx(t, user_ns);
1226		if (current_user_ns() != t_user_ns) {
1227			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1228			info->si_uid = from_kuid_munged(t_user_ns, uid);
1229		}
1230		rcu_read_unlock();
1231
1232		/* A kernel generated signal? */
1233		force = (info->si_code == SI_KERNEL);
1234
1235		/* From an ancestor pid namespace? */
1236		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1237			info->si_pid = 0;
1238			force = true;
1239		}
1240	}
1241	return __send_signal(sig, info, t, type, force);
1242}
1243
1244static void print_fatal_signal(int signr)
1245{
1246	struct pt_regs *regs = signal_pt_regs();
1247	pr_info("potentially unexpected fatal signal %d.\n", signr);
1248
1249#if defined(__i386__) && !defined(__arch_um__)
1250	pr_info("code at %08lx: ", regs->ip);
1251	{
1252		int i;
1253		for (i = 0; i < 16; i++) {
1254			unsigned char insn;
1255
1256			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1257				break;
1258			pr_cont("%02x ", insn);
1259		}
1260	}
1261	pr_cont("\n");
1262#endif
1263	preempt_disable();
1264	show_regs(regs);
1265	preempt_enable();
1266}
1267
1268static int __init setup_print_fatal_signals(char *str)
1269{
1270	get_option (&str, &print_fatal_signals);
1271
1272	return 1;
1273}
1274
1275__setup("print-fatal-signals=", setup_print_fatal_signals);
1276
1277int
1278__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
 
 
 
 
 
 
1279{
1280	return send_signal(sig, info, p, PIDTYPE_TGID);
1281}
1282
1283int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1284			enum pid_type type)
1285{
1286	unsigned long flags;
1287	int ret = -ESRCH;
1288
1289	if (lock_task_sighand(p, &flags)) {
1290		ret = send_signal(sig, info, p, type);
1291		unlock_task_sighand(p, &flags);
1292	}
1293
1294	return ret;
1295}
1296
1297/*
1298 * Force a signal that the process can't ignore: if necessary
1299 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1300 *
1301 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1302 * since we do not want to have a signal handler that was blocked
1303 * be invoked when user space had explicitly blocked it.
1304 *
1305 * We don't want to have recursive SIGSEGV's etc, for example,
1306 * that is why we also clear SIGNAL_UNKILLABLE.
1307 */
1308static int
1309force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1310{
1311	unsigned long int flags;
1312	int ret, blocked, ignored;
1313	struct k_sigaction *action;
1314	int sig = info->si_signo;
1315
1316	spin_lock_irqsave(&t->sighand->siglock, flags);
1317	action = &t->sighand->action[sig-1];
1318	ignored = action->sa.sa_handler == SIG_IGN;
1319	blocked = sigismember(&t->blocked, sig);
1320	if (blocked || ignored) {
1321		action->sa.sa_handler = SIG_DFL;
1322		if (blocked) {
1323			sigdelset(&t->blocked, sig);
1324			recalc_sigpending_and_wake(t);
1325		}
1326	}
1327	/*
1328	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1329	 * debugging to leave init killable.
1330	 */
1331	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1332		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1333	ret = send_signal(sig, info, t, PIDTYPE_PID);
1334	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1335
1336	return ret;
1337}
1338
1339int force_sig_info(struct kernel_siginfo *info)
1340{
1341	return force_sig_info_to_task(info, current);
1342}
1343
1344/*
1345 * Nuke all other threads in the group.
1346 */
1347int zap_other_threads(struct task_struct *p)
1348{
1349	struct task_struct *t = p;
1350	int count = 0;
1351
1352	p->signal->group_stop_count = 0;
1353
1354	while_each_thread(p, t) {
1355		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1356		count++;
1357
1358		/* Don't bother with already dead threads */
1359		if (t->exit_state)
1360			continue;
1361		sigaddset(&t->pending.signal, SIGKILL);
1362		signal_wake_up(t, 1);
1363	}
1364
1365	return count;
1366}
1367
1368struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1369					   unsigned long *flags)
1370{
1371	struct sighand_struct *sighand;
1372
1373	rcu_read_lock();
1374	for (;;) {
 
 
 
 
 
 
1375		sighand = rcu_dereference(tsk->sighand);
1376		if (unlikely(sighand == NULL))
 
 
1377			break;
1378
1379		/*
1380		 * This sighand can be already freed and even reused, but
1381		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1382		 * initializes ->siglock: this slab can't go away, it has
1383		 * the same object type, ->siglock can't be reinitialized.
1384		 *
1385		 * We need to ensure that tsk->sighand is still the same
1386		 * after we take the lock, we can race with de_thread() or
1387		 * __exit_signal(). In the latter case the next iteration
1388		 * must see ->sighand == NULL.
1389		 */
1390		spin_lock_irqsave(&sighand->siglock, *flags);
1391		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
 
1392			break;
1393		spin_unlock_irqrestore(&sighand->siglock, *flags);
 
 
 
1394	}
1395	rcu_read_unlock();
1396
1397	return sighand;
1398}
1399
1400/*
1401 * send signal info to all the members of a group
1402 */
1403int group_send_sig_info(int sig, struct kernel_siginfo *info,
1404			struct task_struct *p, enum pid_type type)
1405{
1406	int ret;
1407
1408	rcu_read_lock();
1409	ret = check_kill_permission(sig, info, p);
1410	rcu_read_unlock();
1411
1412	if (!ret && sig)
1413		ret = do_send_sig_info(sig, info, p, type);
1414
1415	return ret;
1416}
1417
1418/*
1419 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1420 * control characters do (^C, ^Z etc)
1421 * - the caller must hold at least a readlock on tasklist_lock
1422 */
1423int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1424{
1425	struct task_struct *p = NULL;
1426	int retval, success;
1427
1428	success = 0;
1429	retval = -ESRCH;
1430	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1431		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1432		success |= !err;
1433		retval = err;
1434	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1435	return success ? 0 : retval;
1436}
1437
1438int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1439{
1440	int error = -ESRCH;
1441	struct task_struct *p;
1442
1443	for (;;) {
1444		rcu_read_lock();
1445		p = pid_task(pid, PIDTYPE_PID);
1446		if (p)
1447			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1448		rcu_read_unlock();
1449		if (likely(!p || error != -ESRCH))
1450			return error;
1451
1452		/*
1453		 * The task was unhashed in between, try again.  If it
1454		 * is dead, pid_task() will return NULL, if we race with
1455		 * de_thread() it will find the new leader.
1456		 */
1457	}
1458}
1459
1460static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1461{
1462	int error;
1463	rcu_read_lock();
1464	error = kill_pid_info(sig, info, find_vpid(pid));
1465	rcu_read_unlock();
1466	return error;
1467}
1468
1469static inline bool kill_as_cred_perm(const struct cred *cred,
1470				     struct task_struct *target)
1471{
1472	const struct cred *pcred = __task_cred(target);
1473
1474	return uid_eq(cred->euid, pcred->suid) ||
1475	       uid_eq(cred->euid, pcred->uid) ||
1476	       uid_eq(cred->uid, pcred->suid) ||
1477	       uid_eq(cred->uid, pcred->uid);
1478}
1479
1480/*
1481 * The usb asyncio usage of siginfo is wrong.  The glibc support
1482 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1483 * AKA after the generic fields:
1484 *	kernel_pid_t	si_pid;
1485 *	kernel_uid32_t	si_uid;
1486 *	sigval_t	si_value;
1487 *
1488 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1489 * after the generic fields is:
1490 *	void __user 	*si_addr;
1491 *
1492 * This is a practical problem when there is a 64bit big endian kernel
1493 * and a 32bit userspace.  As the 32bit address will encoded in the low
1494 * 32bits of the pointer.  Those low 32bits will be stored at higher
1495 * address than appear in a 32 bit pointer.  So userspace will not
1496 * see the address it was expecting for it's completions.
1497 *
1498 * There is nothing in the encoding that can allow
1499 * copy_siginfo_to_user32 to detect this confusion of formats, so
1500 * handle this by requiring the caller of kill_pid_usb_asyncio to
1501 * notice when this situration takes place and to store the 32bit
1502 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1503 * parameter.
1504 */
1505int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1506			 struct pid *pid, const struct cred *cred)
1507{
1508	struct kernel_siginfo info;
1509	struct task_struct *p;
1510	unsigned long flags;
1511	int ret = -EINVAL;
1512
1513	if (!valid_signal(sig))
1514		return ret;
1515
1516	clear_siginfo(&info);
1517	info.si_signo = sig;
1518	info.si_errno = errno;
1519	info.si_code = SI_ASYNCIO;
1520	*((sigval_t *)&info.si_pid) = addr;
1521
1522	rcu_read_lock();
1523	p = pid_task(pid, PIDTYPE_PID);
1524	if (!p) {
1525		ret = -ESRCH;
1526		goto out_unlock;
1527	}
1528	if (!kill_as_cred_perm(cred, p)) {
1529		ret = -EPERM;
1530		goto out_unlock;
1531	}
1532	ret = security_task_kill(p, &info, sig, cred);
1533	if (ret)
1534		goto out_unlock;
1535
1536	if (sig) {
1537		if (lock_task_sighand(p, &flags)) {
1538			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1539			unlock_task_sighand(p, &flags);
1540		} else
1541			ret = -ESRCH;
1542	}
1543out_unlock:
1544	rcu_read_unlock();
1545	return ret;
1546}
1547EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1548
1549/*
1550 * kill_something_info() interprets pid in interesting ways just like kill(2).
1551 *
1552 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1553 * is probably wrong.  Should make it like BSD or SYSV.
1554 */
1555
1556static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1557{
1558	int ret;
1559
1560	if (pid > 0)
1561		return kill_proc_info(sig, info, pid);
1562
1563	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1564	if (pid == INT_MIN)
1565		return -ESRCH;
1566
1567	read_lock(&tasklist_lock);
1568	if (pid != -1) {
1569		ret = __kill_pgrp_info(sig, info,
1570				pid ? find_vpid(-pid) : task_pgrp(current));
1571	} else {
1572		int retval = 0, count = 0;
1573		struct task_struct * p;
1574
1575		for_each_process(p) {
1576			if (task_pid_vnr(p) > 1 &&
1577					!same_thread_group(p, current)) {
1578				int err = group_send_sig_info(sig, info, p,
1579							      PIDTYPE_MAX);
1580				++count;
1581				if (err != -EPERM)
1582					retval = err;
1583			}
1584		}
1585		ret = count ? retval : -ESRCH;
1586	}
1587	read_unlock(&tasklist_lock);
1588
1589	return ret;
1590}
1591
1592/*
1593 * These are for backward compatibility with the rest of the kernel source.
1594 */
1595
1596int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1597{
1598	/*
1599	 * Make sure legacy kernel users don't send in bad values
1600	 * (normal paths check this in check_kill_permission).
1601	 */
1602	if (!valid_signal(sig))
1603		return -EINVAL;
1604
1605	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1606}
1607EXPORT_SYMBOL(send_sig_info);
1608
1609#define __si_special(priv) \
1610	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1611
1612int
1613send_sig(int sig, struct task_struct *p, int priv)
1614{
1615	return send_sig_info(sig, __si_special(priv), p);
1616}
1617EXPORT_SYMBOL(send_sig);
1618
1619void force_sig(int sig)
 
1620{
1621	struct kernel_siginfo info;
1622
1623	clear_siginfo(&info);
1624	info.si_signo = sig;
1625	info.si_errno = 0;
1626	info.si_code = SI_KERNEL;
1627	info.si_pid = 0;
1628	info.si_uid = 0;
1629	force_sig_info(&info);
1630}
1631EXPORT_SYMBOL(force_sig);
1632
1633/*
1634 * When things go south during signal handling, we
1635 * will force a SIGSEGV. And if the signal that caused
1636 * the problem was already a SIGSEGV, we'll want to
1637 * make sure we don't even try to deliver the signal..
1638 */
1639void force_sigsegv(int sig)
 
1640{
1641	struct task_struct *p = current;
1642
1643	if (sig == SIGSEGV) {
1644		unsigned long flags;
1645		spin_lock_irqsave(&p->sighand->siglock, flags);
1646		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1647		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1648	}
1649	force_sig(SIGSEGV);
1650}
1651
1652int force_sig_fault_to_task(int sig, int code, void __user *addr
1653	___ARCH_SI_TRAPNO(int trapno)
1654	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1655	, struct task_struct *t)
1656{
1657	struct kernel_siginfo info;
1658
1659	clear_siginfo(&info);
1660	info.si_signo = sig;
1661	info.si_errno = 0;
1662	info.si_code  = code;
1663	info.si_addr  = addr;
1664#ifdef __ARCH_SI_TRAPNO
1665	info.si_trapno = trapno;
1666#endif
1667#ifdef __ia64__
1668	info.si_imm = imm;
1669	info.si_flags = flags;
1670	info.si_isr = isr;
1671#endif
1672	return force_sig_info_to_task(&info, t);
1673}
1674
1675int force_sig_fault(int sig, int code, void __user *addr
1676	___ARCH_SI_TRAPNO(int trapno)
1677	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1678{
1679	return force_sig_fault_to_task(sig, code, addr
1680				       ___ARCH_SI_TRAPNO(trapno)
1681				       ___ARCH_SI_IA64(imm, flags, isr), current);
1682}
1683
1684int send_sig_fault(int sig, int code, void __user *addr
1685	___ARCH_SI_TRAPNO(int trapno)
1686	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1687	, struct task_struct *t)
1688{
1689	struct kernel_siginfo info;
1690
1691	clear_siginfo(&info);
1692	info.si_signo = sig;
1693	info.si_errno = 0;
1694	info.si_code  = code;
1695	info.si_addr  = addr;
1696#ifdef __ARCH_SI_TRAPNO
1697	info.si_trapno = trapno;
1698#endif
1699#ifdef __ia64__
1700	info.si_imm = imm;
1701	info.si_flags = flags;
1702	info.si_isr = isr;
1703#endif
1704	return send_sig_info(info.si_signo, &info, t);
1705}
1706
1707int force_sig_mceerr(int code, void __user *addr, short lsb)
1708{
1709	struct kernel_siginfo info;
1710
1711	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1712	clear_siginfo(&info);
1713	info.si_signo = SIGBUS;
1714	info.si_errno = 0;
1715	info.si_code = code;
1716	info.si_addr = addr;
1717	info.si_addr_lsb = lsb;
1718	return force_sig_info(&info);
1719}
1720
1721int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1722{
1723	struct kernel_siginfo info;
1724
1725	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1726	clear_siginfo(&info);
1727	info.si_signo = SIGBUS;
1728	info.si_errno = 0;
1729	info.si_code = code;
1730	info.si_addr = addr;
1731	info.si_addr_lsb = lsb;
1732	return send_sig_info(info.si_signo, &info, t);
1733}
1734EXPORT_SYMBOL(send_sig_mceerr);
1735
1736int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1737{
1738	struct kernel_siginfo info;
1739
1740	clear_siginfo(&info);
1741	info.si_signo = SIGSEGV;
1742	info.si_errno = 0;
1743	info.si_code  = SEGV_BNDERR;
1744	info.si_addr  = addr;
1745	info.si_lower = lower;
1746	info.si_upper = upper;
1747	return force_sig_info(&info);
1748}
1749
1750#ifdef SEGV_PKUERR
1751int force_sig_pkuerr(void __user *addr, u32 pkey)
1752{
1753	struct kernel_siginfo info;
1754
1755	clear_siginfo(&info);
1756	info.si_signo = SIGSEGV;
1757	info.si_errno = 0;
1758	info.si_code  = SEGV_PKUERR;
1759	info.si_addr  = addr;
1760	info.si_pkey  = pkey;
1761	return force_sig_info(&info);
1762}
1763#endif
1764
1765/* For the crazy architectures that include trap information in
1766 * the errno field, instead of an actual errno value.
1767 */
1768int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1769{
1770	struct kernel_siginfo info;
1771
1772	clear_siginfo(&info);
1773	info.si_signo = SIGTRAP;
1774	info.si_errno = errno;
1775	info.si_code  = TRAP_HWBKPT;
1776	info.si_addr  = addr;
1777	return force_sig_info(&info);
1778}
1779
1780int kill_pgrp(struct pid *pid, int sig, int priv)
1781{
1782	int ret;
1783
1784	read_lock(&tasklist_lock);
1785	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1786	read_unlock(&tasklist_lock);
1787
1788	return ret;
1789}
1790EXPORT_SYMBOL(kill_pgrp);
1791
1792int kill_pid(struct pid *pid, int sig, int priv)
1793{
1794	return kill_pid_info(sig, __si_special(priv), pid);
1795}
1796EXPORT_SYMBOL(kill_pid);
1797
1798/*
1799 * These functions support sending signals using preallocated sigqueue
1800 * structures.  This is needed "because realtime applications cannot
1801 * afford to lose notifications of asynchronous events, like timer
1802 * expirations or I/O completions".  In the case of POSIX Timers
1803 * we allocate the sigqueue structure from the timer_create.  If this
1804 * allocation fails we are able to report the failure to the application
1805 * with an EAGAIN error.
1806 */
1807struct sigqueue *sigqueue_alloc(void)
1808{
1809	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1810
1811	if (q)
1812		q->flags |= SIGQUEUE_PREALLOC;
1813
1814	return q;
1815}
1816
1817void sigqueue_free(struct sigqueue *q)
1818{
1819	unsigned long flags;
1820	spinlock_t *lock = &current->sighand->siglock;
1821
1822	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1823	/*
1824	 * We must hold ->siglock while testing q->list
1825	 * to serialize with collect_signal() or with
1826	 * __exit_signal()->flush_sigqueue().
1827	 */
1828	spin_lock_irqsave(lock, flags);
1829	q->flags &= ~SIGQUEUE_PREALLOC;
1830	/*
1831	 * If it is queued it will be freed when dequeued,
1832	 * like the "regular" sigqueue.
1833	 */
1834	if (!list_empty(&q->list))
1835		q = NULL;
1836	spin_unlock_irqrestore(lock, flags);
1837
1838	if (q)
1839		__sigqueue_free(q);
1840}
1841
1842int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1843{
1844	int sig = q->info.si_signo;
1845	struct sigpending *pending;
1846	struct task_struct *t;
1847	unsigned long flags;
1848	int ret, result;
1849
1850	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1851
1852	ret = -1;
1853	rcu_read_lock();
1854	t = pid_task(pid, type);
1855	if (!t || !likely(lock_task_sighand(t, &flags)))
1856		goto ret;
1857
1858	ret = 1; /* the signal is ignored */
1859	result = TRACE_SIGNAL_IGNORED;
1860	if (!prepare_signal(sig, t, false))
1861		goto out;
1862
1863	ret = 0;
1864	if (unlikely(!list_empty(&q->list))) {
1865		/*
1866		 * If an SI_TIMER entry is already queue just increment
1867		 * the overrun count.
1868		 */
1869		BUG_ON(q->info.si_code != SI_TIMER);
1870		q->info.si_overrun++;
1871		result = TRACE_SIGNAL_ALREADY_PENDING;
1872		goto out;
1873	}
1874	q->info.si_overrun = 0;
1875
1876	signalfd_notify(t, sig);
1877	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1878	list_add_tail(&q->list, &pending->list);
1879	sigaddset(&pending->signal, sig);
1880	complete_signal(sig, t, type);
1881	result = TRACE_SIGNAL_DELIVERED;
1882out:
1883	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1884	unlock_task_sighand(t, &flags);
1885ret:
1886	rcu_read_unlock();
1887	return ret;
1888}
1889
1890static void do_notify_pidfd(struct task_struct *task)
1891{
1892	struct pid *pid;
1893
1894	WARN_ON(task->exit_state == 0);
1895	pid = task_pid(task);
1896	wake_up_all(&pid->wait_pidfd);
1897}
1898
1899/*
1900 * Let a parent know about the death of a child.
1901 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1902 *
1903 * Returns true if our parent ignored us and so we've switched to
1904 * self-reaping.
1905 */
1906bool do_notify_parent(struct task_struct *tsk, int sig)
1907{
1908	struct kernel_siginfo info;
1909	unsigned long flags;
1910	struct sighand_struct *psig;
1911	bool autoreap = false;
1912	u64 utime, stime;
1913
1914	BUG_ON(sig == -1);
1915
1916 	/* do_notify_parent_cldstop should have been called instead.  */
1917 	BUG_ON(task_is_stopped_or_traced(tsk));
1918
1919	BUG_ON(!tsk->ptrace &&
1920	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1921
1922	/* Wake up all pidfd waiters */
1923	do_notify_pidfd(tsk);
1924
1925	if (sig != SIGCHLD) {
1926		/*
1927		 * This is only possible if parent == real_parent.
1928		 * Check if it has changed security domain.
1929		 */
1930		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1931			sig = SIGCHLD;
1932	}
1933
1934	clear_siginfo(&info);
1935	info.si_signo = sig;
1936	info.si_errno = 0;
1937	/*
1938	 * We are under tasklist_lock here so our parent is tied to
1939	 * us and cannot change.
1940	 *
1941	 * task_active_pid_ns will always return the same pid namespace
1942	 * until a task passes through release_task.
1943	 *
1944	 * write_lock() currently calls preempt_disable() which is the
1945	 * same as rcu_read_lock(), but according to Oleg, this is not
1946	 * correct to rely on this
1947	 */
1948	rcu_read_lock();
1949	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1950	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1951				       task_uid(tsk));
1952	rcu_read_unlock();
1953
1954	task_cputime(tsk, &utime, &stime);
1955	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1956	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1957
1958	info.si_status = tsk->exit_code & 0x7f;
1959	if (tsk->exit_code & 0x80)
1960		info.si_code = CLD_DUMPED;
1961	else if (tsk->exit_code & 0x7f)
1962		info.si_code = CLD_KILLED;
1963	else {
1964		info.si_code = CLD_EXITED;
1965		info.si_status = tsk->exit_code >> 8;
1966	}
1967
1968	psig = tsk->parent->sighand;
1969	spin_lock_irqsave(&psig->siglock, flags);
1970	if (!tsk->ptrace && sig == SIGCHLD &&
1971	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1972	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1973		/*
1974		 * We are exiting and our parent doesn't care.  POSIX.1
1975		 * defines special semantics for setting SIGCHLD to SIG_IGN
1976		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1977		 * automatically and not left for our parent's wait4 call.
1978		 * Rather than having the parent do it as a magic kind of
1979		 * signal handler, we just set this to tell do_exit that we
1980		 * can be cleaned up without becoming a zombie.  Note that
1981		 * we still call __wake_up_parent in this case, because a
1982		 * blocked sys_wait4 might now return -ECHILD.
1983		 *
1984		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1985		 * is implementation-defined: we do (if you don't want
1986		 * it, just use SIG_IGN instead).
1987		 */
1988		autoreap = true;
1989		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1990			sig = 0;
1991	}
1992	/*
1993	 * Send with __send_signal as si_pid and si_uid are in the
1994	 * parent's namespaces.
1995	 */
1996	if (valid_signal(sig) && sig)
1997		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1998	__wake_up_parent(tsk, tsk->parent);
1999	spin_unlock_irqrestore(&psig->siglock, flags);
2000
2001	return autoreap;
2002}
2003
2004/**
2005 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2006 * @tsk: task reporting the state change
2007 * @for_ptracer: the notification is for ptracer
2008 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2009 *
2010 * Notify @tsk's parent that the stopped/continued state has changed.  If
2011 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2012 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2013 *
2014 * CONTEXT:
2015 * Must be called with tasklist_lock at least read locked.
2016 */
2017static void do_notify_parent_cldstop(struct task_struct *tsk,
2018				     bool for_ptracer, int why)
2019{
2020	struct kernel_siginfo info;
2021	unsigned long flags;
2022	struct task_struct *parent;
2023	struct sighand_struct *sighand;
2024	u64 utime, stime;
2025
2026	if (for_ptracer) {
2027		parent = tsk->parent;
2028	} else {
2029		tsk = tsk->group_leader;
2030		parent = tsk->real_parent;
2031	}
2032
2033	clear_siginfo(&info);
2034	info.si_signo = SIGCHLD;
2035	info.si_errno = 0;
2036	/*
2037	 * see comment in do_notify_parent() about the following 4 lines
2038	 */
2039	rcu_read_lock();
2040	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2041	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2042	rcu_read_unlock();
2043
2044	task_cputime(tsk, &utime, &stime);
2045	info.si_utime = nsec_to_clock_t(utime);
2046	info.si_stime = nsec_to_clock_t(stime);
2047
2048 	info.si_code = why;
2049 	switch (why) {
2050 	case CLD_CONTINUED:
2051 		info.si_status = SIGCONT;
2052 		break;
2053 	case CLD_STOPPED:
2054 		info.si_status = tsk->signal->group_exit_code & 0x7f;
2055 		break;
2056 	case CLD_TRAPPED:
2057 		info.si_status = tsk->exit_code & 0x7f;
2058 		break;
2059 	default:
2060 		BUG();
2061 	}
2062
2063	sighand = parent->sighand;
2064	spin_lock_irqsave(&sighand->siglock, flags);
2065	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2066	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2067		__group_send_sig_info(SIGCHLD, &info, parent);
2068	/*
2069	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2070	 */
2071	__wake_up_parent(tsk, parent);
2072	spin_unlock_irqrestore(&sighand->siglock, flags);
2073}
2074
2075static inline bool may_ptrace_stop(void)
2076{
2077	if (!likely(current->ptrace))
2078		return false;
2079	/*
2080	 * Are we in the middle of do_coredump?
2081	 * If so and our tracer is also part of the coredump stopping
2082	 * is a deadlock situation, and pointless because our tracer
2083	 * is dead so don't allow us to stop.
2084	 * If SIGKILL was already sent before the caller unlocked
2085	 * ->siglock we must see ->core_state != NULL. Otherwise it
2086	 * is safe to enter schedule().
2087	 *
2088	 * This is almost outdated, a task with the pending SIGKILL can't
2089	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2090	 * after SIGKILL was already dequeued.
2091	 */
2092	if (unlikely(current->mm->core_state) &&
2093	    unlikely(current->mm == current->parent->mm))
2094		return false;
2095
2096	return true;
2097}
2098
2099/*
2100 * Return non-zero if there is a SIGKILL that should be waking us up.
2101 * Called with the siglock held.
2102 */
2103static bool sigkill_pending(struct task_struct *tsk)
2104{
2105	return sigismember(&tsk->pending.signal, SIGKILL) ||
2106	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2107}
2108
2109/*
2110 * This must be called with current->sighand->siglock held.
2111 *
2112 * This should be the path for all ptrace stops.
2113 * We always set current->last_siginfo while stopped here.
2114 * That makes it a way to test a stopped process for
2115 * being ptrace-stopped vs being job-control-stopped.
2116 *
2117 * If we actually decide not to stop at all because the tracer
2118 * is gone, we keep current->exit_code unless clear_code.
2119 */
2120static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2121	__releases(&current->sighand->siglock)
2122	__acquires(&current->sighand->siglock)
2123{
2124	bool gstop_done = false;
2125
2126	if (arch_ptrace_stop_needed(exit_code, info)) {
2127		/*
2128		 * The arch code has something special to do before a
2129		 * ptrace stop.  This is allowed to block, e.g. for faults
2130		 * on user stack pages.  We can't keep the siglock while
2131		 * calling arch_ptrace_stop, so we must release it now.
2132		 * To preserve proper semantics, we must do this before
2133		 * any signal bookkeeping like checking group_stop_count.
2134		 * Meanwhile, a SIGKILL could come in before we retake the
2135		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
2136		 * So after regaining the lock, we must check for SIGKILL.
2137		 */
2138		spin_unlock_irq(&current->sighand->siglock);
2139		arch_ptrace_stop(exit_code, info);
2140		spin_lock_irq(&current->sighand->siglock);
2141		if (sigkill_pending(current))
2142			return;
2143	}
2144
2145	set_special_state(TASK_TRACED);
2146
2147	/*
2148	 * We're committing to trapping.  TRACED should be visible before
2149	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2150	 * Also, transition to TRACED and updates to ->jobctl should be
2151	 * atomic with respect to siglock and should be done after the arch
2152	 * hook as siglock is released and regrabbed across it.
2153	 *
2154	 *     TRACER				    TRACEE
2155	 *
2156	 *     ptrace_attach()
2157	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2158	 *     do_wait()
2159	 *       set_current_state()                smp_wmb();
2160	 *       ptrace_do_wait()
2161	 *         wait_task_stopped()
2162	 *           task_stopped_code()
2163	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2164	 */
2165	smp_wmb();
2166
2167	current->last_siginfo = info;
2168	current->exit_code = exit_code;
2169
2170	/*
2171	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2172	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2173	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2174	 * could be clear now.  We act as if SIGCONT is received after
2175	 * TASK_TRACED is entered - ignore it.
2176	 */
2177	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2178		gstop_done = task_participate_group_stop(current);
2179
2180	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2181	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2182	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2183		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2184
2185	/* entering a trap, clear TRAPPING */
2186	task_clear_jobctl_trapping(current);
2187
2188	spin_unlock_irq(&current->sighand->siglock);
2189	read_lock(&tasklist_lock);
2190	if (may_ptrace_stop()) {
2191		/*
2192		 * Notify parents of the stop.
2193		 *
2194		 * While ptraced, there are two parents - the ptracer and
2195		 * the real_parent of the group_leader.  The ptracer should
2196		 * know about every stop while the real parent is only
2197		 * interested in the completion of group stop.  The states
2198		 * for the two don't interact with each other.  Notify
2199		 * separately unless they're gonna be duplicates.
2200		 */
2201		do_notify_parent_cldstop(current, true, why);
2202		if (gstop_done && ptrace_reparented(current))
2203			do_notify_parent_cldstop(current, false, why);
2204
2205		/*
2206		 * Don't want to allow preemption here, because
2207		 * sys_ptrace() needs this task to be inactive.
2208		 *
2209		 * XXX: implement read_unlock_no_resched().
2210		 */
2211		preempt_disable();
2212		read_unlock(&tasklist_lock);
2213		cgroup_enter_frozen();
2214		preempt_enable_no_resched();
2215		freezable_schedule();
2216		cgroup_leave_frozen(true);
2217	} else {
2218		/*
2219		 * By the time we got the lock, our tracer went away.
2220		 * Don't drop the lock yet, another tracer may come.
2221		 *
2222		 * If @gstop_done, the ptracer went away between group stop
2223		 * completion and here.  During detach, it would have set
2224		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2225		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2226		 * the real parent of the group stop completion is enough.
2227		 */
2228		if (gstop_done)
2229			do_notify_parent_cldstop(current, false, why);
2230
2231		/* tasklist protects us from ptrace_freeze_traced() */
2232		__set_current_state(TASK_RUNNING);
2233		if (clear_code)
2234			current->exit_code = 0;
2235		read_unlock(&tasklist_lock);
2236	}
2237
2238	/*
2239	 * We are back.  Now reacquire the siglock before touching
2240	 * last_siginfo, so that we are sure to have synchronized with
2241	 * any signal-sending on another CPU that wants to examine it.
2242	 */
2243	spin_lock_irq(&current->sighand->siglock);
2244	current->last_siginfo = NULL;
2245
2246	/* LISTENING can be set only during STOP traps, clear it */
2247	current->jobctl &= ~JOBCTL_LISTENING;
2248
2249	/*
2250	 * Queued signals ignored us while we were stopped for tracing.
2251	 * So check for any that we should take before resuming user mode.
2252	 * This sets TIF_SIGPENDING, but never clears it.
2253	 */
2254	recalc_sigpending_tsk(current);
2255}
2256
2257static void ptrace_do_notify(int signr, int exit_code, int why)
2258{
2259	kernel_siginfo_t info;
2260
2261	clear_siginfo(&info);
2262	info.si_signo = signr;
2263	info.si_code = exit_code;
2264	info.si_pid = task_pid_vnr(current);
2265	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2266
2267	/* Let the debugger run.  */
2268	ptrace_stop(exit_code, why, 1, &info);
2269}
2270
2271void ptrace_notify(int exit_code)
2272{
2273	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2274	if (unlikely(current->task_works))
2275		task_work_run();
2276
2277	spin_lock_irq(&current->sighand->siglock);
2278	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2279	spin_unlock_irq(&current->sighand->siglock);
2280}
2281
2282/**
2283 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2284 * @signr: signr causing group stop if initiating
2285 *
2286 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2287 * and participate in it.  If already set, participate in the existing
2288 * group stop.  If participated in a group stop (and thus slept), %true is
2289 * returned with siglock released.
2290 *
2291 * If ptraced, this function doesn't handle stop itself.  Instead,
2292 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2293 * untouched.  The caller must ensure that INTERRUPT trap handling takes
2294 * places afterwards.
2295 *
2296 * CONTEXT:
2297 * Must be called with @current->sighand->siglock held, which is released
2298 * on %true return.
2299 *
2300 * RETURNS:
2301 * %false if group stop is already cancelled or ptrace trap is scheduled.
2302 * %true if participated in group stop.
2303 */
2304static bool do_signal_stop(int signr)
2305	__releases(&current->sighand->siglock)
2306{
2307	struct signal_struct *sig = current->signal;
2308
2309	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2310		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2311		struct task_struct *t;
2312
2313		/* signr will be recorded in task->jobctl for retries */
2314		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2315
2316		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2317		    unlikely(signal_group_exit(sig)))
2318			return false;
2319		/*
2320		 * There is no group stop already in progress.  We must
2321		 * initiate one now.
2322		 *
2323		 * While ptraced, a task may be resumed while group stop is
2324		 * still in effect and then receive a stop signal and
2325		 * initiate another group stop.  This deviates from the
2326		 * usual behavior as two consecutive stop signals can't
2327		 * cause two group stops when !ptraced.  That is why we
2328		 * also check !task_is_stopped(t) below.
2329		 *
2330		 * The condition can be distinguished by testing whether
2331		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2332		 * group_exit_code in such case.
2333		 *
2334		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2335		 * an intervening stop signal is required to cause two
2336		 * continued events regardless of ptrace.
2337		 */
2338		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2339			sig->group_exit_code = signr;
2340
2341		sig->group_stop_count = 0;
2342
2343		if (task_set_jobctl_pending(current, signr | gstop))
2344			sig->group_stop_count++;
2345
2346		t = current;
2347		while_each_thread(current, t) {
2348			/*
2349			 * Setting state to TASK_STOPPED for a group
2350			 * stop is always done with the siglock held,
2351			 * so this check has no races.
2352			 */
2353			if (!task_is_stopped(t) &&
2354			    task_set_jobctl_pending(t, signr | gstop)) {
2355				sig->group_stop_count++;
2356				if (likely(!(t->ptrace & PT_SEIZED)))
2357					signal_wake_up(t, 0);
2358				else
2359					ptrace_trap_notify(t);
2360			}
2361		}
2362	}
2363
2364	if (likely(!current->ptrace)) {
2365		int notify = 0;
2366
2367		/*
2368		 * If there are no other threads in the group, or if there
2369		 * is a group stop in progress and we are the last to stop,
2370		 * report to the parent.
2371		 */
2372		if (task_participate_group_stop(current))
2373			notify = CLD_STOPPED;
2374
2375		set_special_state(TASK_STOPPED);
2376		spin_unlock_irq(&current->sighand->siglock);
2377
2378		/*
2379		 * Notify the parent of the group stop completion.  Because
2380		 * we're not holding either the siglock or tasklist_lock
2381		 * here, ptracer may attach inbetween; however, this is for
2382		 * group stop and should always be delivered to the real
2383		 * parent of the group leader.  The new ptracer will get
2384		 * its notification when this task transitions into
2385		 * TASK_TRACED.
2386		 */
2387		if (notify) {
2388			read_lock(&tasklist_lock);
2389			do_notify_parent_cldstop(current, false, notify);
2390			read_unlock(&tasklist_lock);
2391		}
2392
2393		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2394		cgroup_enter_frozen();
2395		freezable_schedule();
2396		return true;
2397	} else {
2398		/*
2399		 * While ptraced, group stop is handled by STOP trap.
2400		 * Schedule it and let the caller deal with it.
2401		 */
2402		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2403		return false;
2404	}
2405}
2406
2407/**
2408 * do_jobctl_trap - take care of ptrace jobctl traps
2409 *
2410 * When PT_SEIZED, it's used for both group stop and explicit
2411 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2412 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2413 * the stop signal; otherwise, %SIGTRAP.
2414 *
2415 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2416 * number as exit_code and no siginfo.
2417 *
2418 * CONTEXT:
2419 * Must be called with @current->sighand->siglock held, which may be
2420 * released and re-acquired before returning with intervening sleep.
2421 */
2422static void do_jobctl_trap(void)
2423{
2424	struct signal_struct *signal = current->signal;
2425	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2426
2427	if (current->ptrace & PT_SEIZED) {
2428		if (!signal->group_stop_count &&
2429		    !(signal->flags & SIGNAL_STOP_STOPPED))
2430			signr = SIGTRAP;
2431		WARN_ON_ONCE(!signr);
2432		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2433				 CLD_STOPPED);
2434	} else {
2435		WARN_ON_ONCE(!signr);
2436		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2437		current->exit_code = 0;
2438	}
2439}
2440
2441/**
2442 * do_freezer_trap - handle the freezer jobctl trap
2443 *
2444 * Puts the task into frozen state, if only the task is not about to quit.
2445 * In this case it drops JOBCTL_TRAP_FREEZE.
2446 *
2447 * CONTEXT:
2448 * Must be called with @current->sighand->siglock held,
2449 * which is always released before returning.
2450 */
2451static void do_freezer_trap(void)
2452	__releases(&current->sighand->siglock)
2453{
2454	/*
2455	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2456	 * let's make another loop to give it a chance to be handled.
2457	 * In any case, we'll return back.
2458	 */
2459	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2460	     JOBCTL_TRAP_FREEZE) {
2461		spin_unlock_irq(&current->sighand->siglock);
2462		return;
2463	}
2464
2465	/*
2466	 * Now we're sure that there is no pending fatal signal and no
2467	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2468	 * immediately (if there is a non-fatal signal pending), and
2469	 * put the task into sleep.
2470	 */
2471	__set_current_state(TASK_INTERRUPTIBLE);
2472	clear_thread_flag(TIF_SIGPENDING);
2473	spin_unlock_irq(&current->sighand->siglock);
2474	cgroup_enter_frozen();
2475	freezable_schedule();
2476}
2477
2478static int ptrace_signal(int signr, kernel_siginfo_t *info)
2479{
 
2480	/*
2481	 * We do not check sig_kernel_stop(signr) but set this marker
2482	 * unconditionally because we do not know whether debugger will
2483	 * change signr. This flag has no meaning unless we are going
2484	 * to stop after return from ptrace_stop(). In this case it will
2485	 * be checked in do_signal_stop(), we should only stop if it was
2486	 * not cleared by SIGCONT while we were sleeping. See also the
2487	 * comment in dequeue_signal().
2488	 */
2489	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2490	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2491
2492	/* We're back.  Did the debugger cancel the sig?  */
2493	signr = current->exit_code;
2494	if (signr == 0)
2495		return signr;
2496
2497	current->exit_code = 0;
2498
2499	/*
2500	 * Update the siginfo structure if the signal has
2501	 * changed.  If the debugger wanted something
2502	 * specific in the siginfo structure then it should
2503	 * have updated *info via PTRACE_SETSIGINFO.
2504	 */
2505	if (signr != info->si_signo) {
2506		clear_siginfo(info);
2507		info->si_signo = signr;
2508		info->si_errno = 0;
2509		info->si_code = SI_USER;
2510		rcu_read_lock();
2511		info->si_pid = task_pid_vnr(current->parent);
2512		info->si_uid = from_kuid_munged(current_user_ns(),
2513						task_uid(current->parent));
2514		rcu_read_unlock();
2515	}
2516
2517	/* If the (new) signal is now blocked, requeue it.  */
2518	if (sigismember(&current->blocked, signr)) {
2519		send_signal(signr, info, current, PIDTYPE_PID);
2520		signr = 0;
2521	}
2522
2523	return signr;
2524}
2525
2526bool get_signal(struct ksignal *ksig)
2527{
2528	struct sighand_struct *sighand = current->sighand;
2529	struct signal_struct *signal = current->signal;
2530	int signr;
2531
 
 
 
2532	if (unlikely(uprobe_deny_signal()))
2533		return false;
2534
2535	/*
2536	 * Do this once, we can't return to user-mode if freezing() == T.
2537	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2538	 * thus do not need another check after return.
2539	 */
2540	try_to_freeze();
2541
2542relock:
2543	spin_lock_irq(&sighand->siglock);
2544	/*
2545	 * Make sure we can safely read ->jobctl() in task_work add. As Oleg
2546	 * states:
2547	 *
2548	 * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we
2549	 * roughly have
2550	 *
2551	 *	task_work_add:				get_signal:
2552	 *	STORE(task->task_works, new_work);	STORE(task->jobctl);
2553	 *	mb();					mb();
2554	 *	LOAD(task->jobctl);			LOAD(task->task_works);
2555	 *
2556	 * and we can rely on STORE-MB-LOAD [ in task_work_add].
2557	 */
2558	smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK);
2559	if (unlikely(current->task_works)) {
2560		spin_unlock_irq(&sighand->siglock);
2561		task_work_run();
2562		goto relock;
2563	}
2564
2565	/*
2566	 * Every stopped thread goes here after wakeup. Check to see if
2567	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2568	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2569	 */
2570	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2571		int why;
2572
2573		if (signal->flags & SIGNAL_CLD_CONTINUED)
2574			why = CLD_CONTINUED;
2575		else
2576			why = CLD_STOPPED;
2577
2578		signal->flags &= ~SIGNAL_CLD_MASK;
2579
2580		spin_unlock_irq(&sighand->siglock);
2581
2582		/*
2583		 * Notify the parent that we're continuing.  This event is
2584		 * always per-process and doesn't make whole lot of sense
2585		 * for ptracers, who shouldn't consume the state via
2586		 * wait(2) either, but, for backward compatibility, notify
2587		 * the ptracer of the group leader too unless it's gonna be
2588		 * a duplicate.
2589		 */
2590		read_lock(&tasklist_lock);
2591		do_notify_parent_cldstop(current, false, why);
2592
2593		if (ptrace_reparented(current->group_leader))
2594			do_notify_parent_cldstop(current->group_leader,
2595						true, why);
2596		read_unlock(&tasklist_lock);
2597
2598		goto relock;
2599	}
2600
2601	/* Has this task already been marked for death? */
2602	if (signal_group_exit(signal)) {
2603		ksig->info.si_signo = signr = SIGKILL;
2604		sigdelset(&current->pending.signal, SIGKILL);
2605		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2606				&sighand->action[SIGKILL - 1]);
2607		recalc_sigpending();
2608		goto fatal;
2609	}
2610
2611	for (;;) {
2612		struct k_sigaction *ka;
2613
2614		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2615		    do_signal_stop(0))
2616			goto relock;
2617
2618		if (unlikely(current->jobctl &
2619			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2620			if (current->jobctl & JOBCTL_TRAP_MASK) {
2621				do_jobctl_trap();
2622				spin_unlock_irq(&sighand->siglock);
2623			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2624				do_freezer_trap();
2625
2626			goto relock;
2627		}
2628
2629		/*
2630		 * If the task is leaving the frozen state, let's update
2631		 * cgroup counters and reset the frozen bit.
2632		 */
2633		if (unlikely(cgroup_task_frozen(current))) {
2634			spin_unlock_irq(&sighand->siglock);
2635			cgroup_leave_frozen(false);
2636			goto relock;
2637		}
2638
2639		/*
2640		 * Signals generated by the execution of an instruction
2641		 * need to be delivered before any other pending signals
2642		 * so that the instruction pointer in the signal stack
2643		 * frame points to the faulting instruction.
2644		 */
2645		signr = dequeue_synchronous_signal(&ksig->info);
2646		if (!signr)
2647			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2648
2649		if (!signr)
2650			break; /* will return 0 */
2651
2652		if (unlikely(current->ptrace) && signr != SIGKILL) {
2653			signr = ptrace_signal(signr, &ksig->info);
2654			if (!signr)
2655				continue;
2656		}
2657
2658		ka = &sighand->action[signr-1];
2659
2660		/* Trace actually delivered signals. */
2661		trace_signal_deliver(signr, &ksig->info, ka);
2662
2663		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2664			continue;
2665		if (ka->sa.sa_handler != SIG_DFL) {
2666			/* Run the handler.  */
2667			ksig->ka = *ka;
2668
2669			if (ka->sa.sa_flags & SA_ONESHOT)
2670				ka->sa.sa_handler = SIG_DFL;
2671
2672			break; /* will return non-zero "signr" value */
2673		}
2674
2675		/*
2676		 * Now we are doing the default action for this signal.
2677		 */
2678		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2679			continue;
2680
2681		/*
2682		 * Global init gets no signals it doesn't want.
2683		 * Container-init gets no signals it doesn't want from same
2684		 * container.
2685		 *
2686		 * Note that if global/container-init sees a sig_kernel_only()
2687		 * signal here, the signal must have been generated internally
2688		 * or must have come from an ancestor namespace. In either
2689		 * case, the signal cannot be dropped.
2690		 */
2691		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2692				!sig_kernel_only(signr))
2693			continue;
2694
2695		if (sig_kernel_stop(signr)) {
2696			/*
2697			 * The default action is to stop all threads in
2698			 * the thread group.  The job control signals
2699			 * do nothing in an orphaned pgrp, but SIGSTOP
2700			 * always works.  Note that siglock needs to be
2701			 * dropped during the call to is_orphaned_pgrp()
2702			 * because of lock ordering with tasklist_lock.
2703			 * This allows an intervening SIGCONT to be posted.
2704			 * We need to check for that and bail out if necessary.
2705			 */
2706			if (signr != SIGSTOP) {
2707				spin_unlock_irq(&sighand->siglock);
2708
2709				/* signals can be posted during this window */
2710
2711				if (is_current_pgrp_orphaned())
2712					goto relock;
2713
2714				spin_lock_irq(&sighand->siglock);
2715			}
2716
2717			if (likely(do_signal_stop(ksig->info.si_signo))) {
2718				/* It released the siglock.  */
2719				goto relock;
2720			}
2721
2722			/*
2723			 * We didn't actually stop, due to a race
2724			 * with SIGCONT or something like that.
2725			 */
2726			continue;
2727		}
2728
2729	fatal:
2730		spin_unlock_irq(&sighand->siglock);
2731		if (unlikely(cgroup_task_frozen(current)))
2732			cgroup_leave_frozen(true);
2733
2734		/*
2735		 * Anything else is fatal, maybe with a core dump.
2736		 */
2737		current->flags |= PF_SIGNALED;
2738
2739		if (sig_kernel_coredump(signr)) {
2740			if (print_fatal_signals)
2741				print_fatal_signal(ksig->info.si_signo);
2742			proc_coredump_connector(current);
2743			/*
2744			 * If it was able to dump core, this kills all
2745			 * other threads in the group and synchronizes with
2746			 * their demise.  If we lost the race with another
2747			 * thread getting here, it set group_exit_code
2748			 * first and our do_group_exit call below will use
2749			 * that value and ignore the one we pass it.
2750			 */
2751			do_coredump(&ksig->info);
2752		}
2753
2754		/*
2755		 * Death signals, no core dump.
2756		 */
2757		do_group_exit(ksig->info.si_signo);
2758		/* NOTREACHED */
2759	}
2760	spin_unlock_irq(&sighand->siglock);
2761
2762	ksig->sig = signr;
2763	return ksig->sig > 0;
2764}
2765
2766/**
2767 * signal_delivered - 
2768 * @ksig:		kernel signal struct
2769 * @stepping:		nonzero if debugger single-step or block-step in use
2770 *
2771 * This function should be called when a signal has successfully been
2772 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2773 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2774 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2775 */
2776static void signal_delivered(struct ksignal *ksig, int stepping)
2777{
2778	sigset_t blocked;
2779
2780	/* A signal was successfully delivered, and the
2781	   saved sigmask was stored on the signal frame,
2782	   and will be restored by sigreturn.  So we can
2783	   simply clear the restore sigmask flag.  */
2784	clear_restore_sigmask();
2785
2786	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2787	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2788		sigaddset(&blocked, ksig->sig);
2789	set_current_blocked(&blocked);
2790	tracehook_signal_handler(stepping);
2791}
2792
2793void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2794{
2795	if (failed)
2796		force_sigsegv(ksig->sig);
2797	else
2798		signal_delivered(ksig, stepping);
2799}
2800
2801/*
2802 * It could be that complete_signal() picked us to notify about the
2803 * group-wide signal. Other threads should be notified now to take
2804 * the shared signals in @which since we will not.
2805 */
2806static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2807{
2808	sigset_t retarget;
2809	struct task_struct *t;
2810
2811	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2812	if (sigisemptyset(&retarget))
2813		return;
2814
2815	t = tsk;
2816	while_each_thread(tsk, t) {
2817		if (t->flags & PF_EXITING)
2818			continue;
2819
2820		if (!has_pending_signals(&retarget, &t->blocked))
2821			continue;
2822		/* Remove the signals this thread can handle. */
2823		sigandsets(&retarget, &retarget, &t->blocked);
2824
2825		if (!signal_pending(t))
2826			signal_wake_up(t, 0);
2827
2828		if (sigisemptyset(&retarget))
2829			break;
2830	}
2831}
2832
2833void exit_signals(struct task_struct *tsk)
2834{
2835	int group_stop = 0;
2836	sigset_t unblocked;
2837
2838	/*
2839	 * @tsk is about to have PF_EXITING set - lock out users which
2840	 * expect stable threadgroup.
2841	 */
2842	cgroup_threadgroup_change_begin(tsk);
2843
2844	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2845		tsk->flags |= PF_EXITING;
2846		cgroup_threadgroup_change_end(tsk);
2847		return;
2848	}
2849
2850	spin_lock_irq(&tsk->sighand->siglock);
2851	/*
2852	 * From now this task is not visible for group-wide signals,
2853	 * see wants_signal(), do_signal_stop().
2854	 */
2855	tsk->flags |= PF_EXITING;
2856
2857	cgroup_threadgroup_change_end(tsk);
2858
2859	if (!signal_pending(tsk))
2860		goto out;
2861
2862	unblocked = tsk->blocked;
2863	signotset(&unblocked);
2864	retarget_shared_pending(tsk, &unblocked);
2865
2866	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2867	    task_participate_group_stop(tsk))
2868		group_stop = CLD_STOPPED;
2869out:
2870	spin_unlock_irq(&tsk->sighand->siglock);
2871
2872	/*
2873	 * If group stop has completed, deliver the notification.  This
2874	 * should always go to the real parent of the group leader.
2875	 */
2876	if (unlikely(group_stop)) {
2877		read_lock(&tasklist_lock);
2878		do_notify_parent_cldstop(tsk, false, group_stop);
2879		read_unlock(&tasklist_lock);
2880	}
2881}
2882
 
 
 
 
 
 
 
 
2883/*
2884 * System call entry points.
2885 */
2886
2887/**
2888 *  sys_restart_syscall - restart a system call
2889 */
2890SYSCALL_DEFINE0(restart_syscall)
2891{
2892	struct restart_block *restart = &current->restart_block;
2893	return restart->fn(restart);
2894}
2895
2896long do_no_restart_syscall(struct restart_block *param)
2897{
2898	return -EINTR;
2899}
2900
2901static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2902{
2903	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2904		sigset_t newblocked;
2905		/* A set of now blocked but previously unblocked signals. */
2906		sigandnsets(&newblocked, newset, &current->blocked);
2907		retarget_shared_pending(tsk, &newblocked);
2908	}
2909	tsk->blocked = *newset;
2910	recalc_sigpending();
2911}
2912
2913/**
2914 * set_current_blocked - change current->blocked mask
2915 * @newset: new mask
2916 *
2917 * It is wrong to change ->blocked directly, this helper should be used
2918 * to ensure the process can't miss a shared signal we are going to block.
2919 */
2920void set_current_blocked(sigset_t *newset)
2921{
2922	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2923	__set_current_blocked(newset);
2924}
2925
2926void __set_current_blocked(const sigset_t *newset)
2927{
2928	struct task_struct *tsk = current;
2929
2930	/*
2931	 * In case the signal mask hasn't changed, there is nothing we need
2932	 * to do. The current->blocked shouldn't be modified by other task.
2933	 */
2934	if (sigequalsets(&tsk->blocked, newset))
2935		return;
2936
2937	spin_lock_irq(&tsk->sighand->siglock);
2938	__set_task_blocked(tsk, newset);
2939	spin_unlock_irq(&tsk->sighand->siglock);
2940}
2941
2942/*
2943 * This is also useful for kernel threads that want to temporarily
2944 * (or permanently) block certain signals.
2945 *
2946 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2947 * interface happily blocks "unblockable" signals like SIGKILL
2948 * and friends.
2949 */
2950int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2951{
2952	struct task_struct *tsk = current;
2953	sigset_t newset;
2954
2955	/* Lockless, only current can change ->blocked, never from irq */
2956	if (oldset)
2957		*oldset = tsk->blocked;
2958
2959	switch (how) {
2960	case SIG_BLOCK:
2961		sigorsets(&newset, &tsk->blocked, set);
2962		break;
2963	case SIG_UNBLOCK:
2964		sigandnsets(&newset, &tsk->blocked, set);
2965		break;
2966	case SIG_SETMASK:
2967		newset = *set;
2968		break;
2969	default:
2970		return -EINVAL;
2971	}
2972
2973	__set_current_blocked(&newset);
2974	return 0;
2975}
2976EXPORT_SYMBOL(sigprocmask);
2977
2978/*
2979 * The api helps set app-provided sigmasks.
2980 *
2981 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2982 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2983 *
2984 * Note that it does set_restore_sigmask() in advance, so it must be always
2985 * paired with restore_saved_sigmask_unless() before return from syscall.
2986 */
2987int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2988{
2989	sigset_t kmask;
2990
2991	if (!umask)
2992		return 0;
2993	if (sigsetsize != sizeof(sigset_t))
2994		return -EINVAL;
2995	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2996		return -EFAULT;
2997
2998	set_restore_sigmask();
2999	current->saved_sigmask = current->blocked;
3000	set_current_blocked(&kmask);
3001
3002	return 0;
3003}
3004
3005#ifdef CONFIG_COMPAT
3006int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3007			    size_t sigsetsize)
3008{
3009	sigset_t kmask;
3010
3011	if (!umask)
3012		return 0;
3013	if (sigsetsize != sizeof(compat_sigset_t))
3014		return -EINVAL;
3015	if (get_compat_sigset(&kmask, umask))
3016		return -EFAULT;
3017
3018	set_restore_sigmask();
3019	current->saved_sigmask = current->blocked;
3020	set_current_blocked(&kmask);
3021
3022	return 0;
3023}
3024#endif
3025
3026/**
3027 *  sys_rt_sigprocmask - change the list of currently blocked signals
3028 *  @how: whether to add, remove, or set signals
3029 *  @nset: stores pending signals
3030 *  @oset: previous value of signal mask if non-null
3031 *  @sigsetsize: size of sigset_t type
3032 */
3033SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3034		sigset_t __user *, oset, size_t, sigsetsize)
3035{
3036	sigset_t old_set, new_set;
3037	int error;
3038
3039	/* XXX: Don't preclude handling different sized sigset_t's.  */
3040	if (sigsetsize != sizeof(sigset_t))
3041		return -EINVAL;
3042
3043	old_set = current->blocked;
3044
3045	if (nset) {
3046		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3047			return -EFAULT;
3048		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3049
3050		error = sigprocmask(how, &new_set, NULL);
3051		if (error)
3052			return error;
3053	}
3054
3055	if (oset) {
3056		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3057			return -EFAULT;
3058	}
3059
3060	return 0;
3061}
3062
3063#ifdef CONFIG_COMPAT
3064COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3065		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3066{
 
3067	sigset_t old_set = current->blocked;
3068
3069	/* XXX: Don't preclude handling different sized sigset_t's.  */
3070	if (sigsetsize != sizeof(sigset_t))
3071		return -EINVAL;
3072
3073	if (nset) {
 
3074		sigset_t new_set;
3075		int error;
3076		if (get_compat_sigset(&new_set, nset))
3077			return -EFAULT;
 
 
3078		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3079
3080		error = sigprocmask(how, &new_set, NULL);
3081		if (error)
3082			return error;
3083	}
3084	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
 
 
 
 
 
 
 
 
 
 
3085}
3086#endif
3087
3088static void do_sigpending(sigset_t *set)
3089{
 
 
 
3090	spin_lock_irq(&current->sighand->siglock);
3091	sigorsets(set, &current->pending.signal,
3092		  &current->signal->shared_pending.signal);
3093	spin_unlock_irq(&current->sighand->siglock);
3094
3095	/* Outside the lock because only this thread touches it.  */
3096	sigandsets(set, &current->blocked, set);
 
3097}
3098
3099/**
3100 *  sys_rt_sigpending - examine a pending signal that has been raised
3101 *			while blocked
3102 *  @uset: stores pending signals
3103 *  @sigsetsize: size of sigset_t type or larger
3104 */
3105SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3106{
3107	sigset_t set;
3108
3109	if (sigsetsize > sizeof(*uset))
3110		return -EINVAL;
3111
3112	do_sigpending(&set);
3113
3114	if (copy_to_user(uset, &set, sigsetsize))
3115		return -EFAULT;
3116
3117	return 0;
3118}
3119
3120#ifdef CONFIG_COMPAT
3121COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3122		compat_size_t, sigsetsize)
3123{
 
3124	sigset_t set;
3125
3126	if (sigsetsize > sizeof(*uset))
3127		return -EINVAL;
3128
3129	do_sigpending(&set);
3130
3131	return put_compat_sigset(uset, &set, sigsetsize);
3132}
3133#endif
3134
3135static const struct {
3136	unsigned char limit, layout;
3137} sig_sicodes[] = {
3138	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3139	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3140	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3141	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3142	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3143#if defined(SIGEMT)
3144	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3145#endif
3146	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3147	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3148	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3149};
3150
3151static bool known_siginfo_layout(unsigned sig, int si_code)
3152{
3153	if (si_code == SI_KERNEL)
3154		return true;
3155	else if ((si_code > SI_USER)) {
3156		if (sig_specific_sicodes(sig)) {
3157			if (si_code <= sig_sicodes[sig].limit)
3158				return true;
3159		}
3160		else if (si_code <= NSIGPOLL)
3161			return true;
3162	}
3163	else if (si_code >= SI_DETHREAD)
3164		return true;
3165	else if (si_code == SI_ASYNCNL)
3166		return true;
3167	return false;
3168}
3169
3170enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3171{
3172	enum siginfo_layout layout = SIL_KILL;
3173	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3174		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3175		    (si_code <= sig_sicodes[sig].limit)) {
3176			layout = sig_sicodes[sig].layout;
3177			/* Handle the exceptions */
3178			if ((sig == SIGBUS) &&
3179			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3180				layout = SIL_FAULT_MCEERR;
3181			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3182				layout = SIL_FAULT_BNDERR;
3183#ifdef SEGV_PKUERR
3184			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3185				layout = SIL_FAULT_PKUERR;
3186#endif
3187		}
3188		else if (si_code <= NSIGPOLL)
3189			layout = SIL_POLL;
3190	} else {
3191		if (si_code == SI_TIMER)
3192			layout = SIL_TIMER;
3193		else if (si_code == SI_SIGIO)
3194			layout = SIL_POLL;
3195		else if (si_code < 0)
3196			layout = SIL_RT;
3197	}
3198	return layout;
3199}
3200
3201static inline char __user *si_expansion(const siginfo_t __user *info)
3202{
3203	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3204}
3205
3206int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3207{
3208	char __user *expansion = si_expansion(to);
3209	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3210		return -EFAULT;
3211	if (clear_user(expansion, SI_EXPANSION_SIZE))
3212		return -EFAULT;
3213	return 0;
3214}
3215
3216static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3217				       const siginfo_t __user *from)
3218{
3219	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3220		char __user *expansion = si_expansion(from);
3221		char buf[SI_EXPANSION_SIZE];
3222		int i;
3223		/*
3224		 * An unknown si_code might need more than
3225		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3226		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3227		 * will return this data to userspace exactly.
3228		 */
3229		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3230			return -EFAULT;
3231		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3232			if (buf[i] != 0)
3233				return -E2BIG;
3234		}
3235	}
3236	return 0;
3237}
3238
3239static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3240				    const siginfo_t __user *from)
3241{
3242	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3243		return -EFAULT;
3244	to->si_signo = signo;
3245	return post_copy_siginfo_from_user(to, from);
3246}
3247
3248int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3249{
3250	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3251		return -EFAULT;
3252	return post_copy_siginfo_from_user(to, from);
3253}
3254
3255#ifdef CONFIG_COMPAT
3256/**
3257 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3258 * @to: compat siginfo destination
3259 * @from: kernel siginfo source
3260 *
3261 * Note: This function does not work properly for the SIGCHLD on x32, but
3262 * fortunately it doesn't have to.  The only valid callers for this function are
3263 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3264 * The latter does not care because SIGCHLD will never cause a coredump.
3265 */
3266void copy_siginfo_to_external32(struct compat_siginfo *to,
3267		const struct kernel_siginfo *from)
3268{
3269	memset(to, 0, sizeof(*to));
3270
3271	to->si_signo = from->si_signo;
3272	to->si_errno = from->si_errno;
3273	to->si_code  = from->si_code;
3274	switch(siginfo_layout(from->si_signo, from->si_code)) {
3275	case SIL_KILL:
3276		to->si_pid = from->si_pid;
3277		to->si_uid = from->si_uid;
 
 
3278		break;
3279	case SIL_TIMER:
3280		to->si_tid     = from->si_tid;
3281		to->si_overrun = from->si_overrun;
3282		to->si_int     = from->si_int;
3283		break;
3284	case SIL_POLL:
3285		to->si_band = from->si_band;
3286		to->si_fd   = from->si_fd;
3287		break;
3288	case SIL_FAULT:
3289		to->si_addr = ptr_to_compat(from->si_addr);
3290#ifdef __ARCH_SI_TRAPNO
3291		to->si_trapno = from->si_trapno;
3292#endif
3293		break;
3294	case SIL_FAULT_MCEERR:
3295		to->si_addr = ptr_to_compat(from->si_addr);
3296#ifdef __ARCH_SI_TRAPNO
3297		to->si_trapno = from->si_trapno;
 
 
 
3298#endif
3299		to->si_addr_lsb = from->si_addr_lsb;
3300		break;
3301	case SIL_FAULT_BNDERR:
3302		to->si_addr = ptr_to_compat(from->si_addr);
3303#ifdef __ARCH_SI_TRAPNO
3304		to->si_trapno = from->si_trapno;
3305#endif
3306		to->si_lower = ptr_to_compat(from->si_lower);
3307		to->si_upper = ptr_to_compat(from->si_upper);
3308		break;
3309	case SIL_FAULT_PKUERR:
3310		to->si_addr = ptr_to_compat(from->si_addr);
3311#ifdef __ARCH_SI_TRAPNO
3312		to->si_trapno = from->si_trapno;
3313#endif
3314		to->si_pkey = from->si_pkey;
3315		break;
3316	case SIL_CHLD:
3317		to->si_pid = from->si_pid;
3318		to->si_uid = from->si_uid;
3319		to->si_status = from->si_status;
3320		to->si_utime = from->si_utime;
3321		to->si_stime = from->si_stime;
3322		break;
3323	case SIL_RT:
3324		to->si_pid = from->si_pid;
3325		to->si_uid = from->si_uid;
3326		to->si_int = from->si_int;
3327		break;
3328	case SIL_SYS:
3329		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3330		to->si_syscall   = from->si_syscall;
3331		to->si_arch      = from->si_arch;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3332		break;
3333	}
 
3334}
3335
3336int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3337			   const struct kernel_siginfo *from)
3338{
3339	struct compat_siginfo new;
3340
3341	copy_siginfo_to_external32(&new, from);
3342	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3343		return -EFAULT;
3344	return 0;
3345}
3346
3347static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3348					 const struct compat_siginfo *from)
3349{
3350	clear_siginfo(to);
3351	to->si_signo = from->si_signo;
3352	to->si_errno = from->si_errno;
3353	to->si_code  = from->si_code;
3354	switch(siginfo_layout(from->si_signo, from->si_code)) {
3355	case SIL_KILL:
3356		to->si_pid = from->si_pid;
3357		to->si_uid = from->si_uid;
3358		break;
3359	case SIL_TIMER:
3360		to->si_tid     = from->si_tid;
3361		to->si_overrun = from->si_overrun;
3362		to->si_int     = from->si_int;
3363		break;
3364	case SIL_POLL:
3365		to->si_band = from->si_band;
3366		to->si_fd   = from->si_fd;
3367		break;
3368	case SIL_FAULT:
3369		to->si_addr = compat_ptr(from->si_addr);
3370#ifdef __ARCH_SI_TRAPNO
3371		to->si_trapno = from->si_trapno;
3372#endif
3373		break;
3374	case SIL_FAULT_MCEERR:
3375		to->si_addr = compat_ptr(from->si_addr);
3376#ifdef __ARCH_SI_TRAPNO
3377		to->si_trapno = from->si_trapno;
3378#endif
3379		to->si_addr_lsb = from->si_addr_lsb;
3380		break;
3381	case SIL_FAULT_BNDERR:
3382		to->si_addr = compat_ptr(from->si_addr);
3383#ifdef __ARCH_SI_TRAPNO
3384		to->si_trapno = from->si_trapno;
3385#endif
3386		to->si_lower = compat_ptr(from->si_lower);
3387		to->si_upper = compat_ptr(from->si_upper);
3388		break;
3389	case SIL_FAULT_PKUERR:
3390		to->si_addr = compat_ptr(from->si_addr);
3391#ifdef __ARCH_SI_TRAPNO
3392		to->si_trapno = from->si_trapno;
3393#endif
3394		to->si_pkey = from->si_pkey;
3395		break;
3396	case SIL_CHLD:
3397		to->si_pid    = from->si_pid;
3398		to->si_uid    = from->si_uid;
3399		to->si_status = from->si_status;
3400#ifdef CONFIG_X86_X32_ABI
3401		if (in_x32_syscall()) {
3402			to->si_utime = from->_sifields._sigchld_x32._utime;
3403			to->si_stime = from->_sifields._sigchld_x32._stime;
3404		} else
3405#endif
3406		{
3407			to->si_utime = from->si_utime;
3408			to->si_stime = from->si_stime;
3409		}
3410		break;
3411	case SIL_RT:
3412		to->si_pid = from->si_pid;
3413		to->si_uid = from->si_uid;
3414		to->si_int = from->si_int;
3415		break;
3416	case SIL_SYS:
3417		to->si_call_addr = compat_ptr(from->si_call_addr);
3418		to->si_syscall   = from->si_syscall;
3419		to->si_arch      = from->si_arch;
3420		break;
3421	}
3422	return 0;
3423}
3424
3425static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3426				      const struct compat_siginfo __user *ufrom)
3427{
3428	struct compat_siginfo from;
3429
3430	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3431		return -EFAULT;
3432
3433	from.si_signo = signo;
3434	return post_copy_siginfo_from_user32(to, &from);
3435}
3436
3437int copy_siginfo_from_user32(struct kernel_siginfo *to,
3438			     const struct compat_siginfo __user *ufrom)
3439{
3440	struct compat_siginfo from;
3441
3442	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3443		return -EFAULT;
3444
3445	return post_copy_siginfo_from_user32(to, &from);
3446}
3447#endif /* CONFIG_COMPAT */
3448
3449/**
3450 *  do_sigtimedwait - wait for queued signals specified in @which
3451 *  @which: queued signals to wait for
3452 *  @info: if non-null, the signal's siginfo is returned here
3453 *  @ts: upper bound on process time suspension
3454 */
3455static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3456		    const struct timespec64 *ts)
3457{
3458	ktime_t *to = NULL, timeout = KTIME_MAX;
3459	struct task_struct *tsk = current;
3460	sigset_t mask = *which;
3461	int sig, ret = 0;
3462
3463	if (ts) {
3464		if (!timespec64_valid(ts))
3465			return -EINVAL;
3466		timeout = timespec64_to_ktime(*ts);
3467		to = &timeout;
3468	}
3469
3470	/*
3471	 * Invert the set of allowed signals to get those we want to block.
3472	 */
3473	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3474	signotset(&mask);
3475
3476	spin_lock_irq(&tsk->sighand->siglock);
3477	sig = dequeue_signal(tsk, &mask, info);
3478	if (!sig && timeout) {
3479		/*
3480		 * None ready, temporarily unblock those we're interested
3481		 * while we are sleeping in so that we'll be awakened when
3482		 * they arrive. Unblocking is always fine, we can avoid
3483		 * set_current_blocked().
3484		 */
3485		tsk->real_blocked = tsk->blocked;
3486		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3487		recalc_sigpending();
3488		spin_unlock_irq(&tsk->sighand->siglock);
3489
3490		__set_current_state(TASK_INTERRUPTIBLE);
3491		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3492							 HRTIMER_MODE_REL);
3493		spin_lock_irq(&tsk->sighand->siglock);
3494		__set_task_blocked(tsk, &tsk->real_blocked);
3495		sigemptyset(&tsk->real_blocked);
3496		sig = dequeue_signal(tsk, &mask, info);
3497	}
3498	spin_unlock_irq(&tsk->sighand->siglock);
3499
3500	if (sig)
3501		return sig;
3502	return ret ? -EINTR : -EAGAIN;
3503}
3504
3505/**
3506 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3507 *			in @uthese
3508 *  @uthese: queued signals to wait for
3509 *  @uinfo: if non-null, the signal's siginfo is returned here
3510 *  @uts: upper bound on process time suspension
3511 *  @sigsetsize: size of sigset_t type
3512 */
3513SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3514		siginfo_t __user *, uinfo,
3515		const struct __kernel_timespec __user *, uts,
3516		size_t, sigsetsize)
3517{
3518	sigset_t these;
3519	struct timespec64 ts;
3520	kernel_siginfo_t info;
3521	int ret;
3522
3523	/* XXX: Don't preclude handling different sized sigset_t's.  */
3524	if (sigsetsize != sizeof(sigset_t))
3525		return -EINVAL;
3526
3527	if (copy_from_user(&these, uthese, sizeof(these)))
3528		return -EFAULT;
3529
3530	if (uts) {
3531		if (get_timespec64(&ts, uts))
3532			return -EFAULT;
3533	}
3534
3535	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3536
3537	if (ret > 0 && uinfo) {
3538		if (copy_siginfo_to_user(uinfo, &info))
3539			ret = -EFAULT;
3540	}
3541
3542	return ret;
3543}
3544
3545#ifdef CONFIG_COMPAT_32BIT_TIME
3546SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3547		siginfo_t __user *, uinfo,
3548		const struct old_timespec32 __user *, uts,
3549		size_t, sigsetsize)
3550{
3551	sigset_t these;
3552	struct timespec64 ts;
3553	kernel_siginfo_t info;
3554	int ret;
3555
3556	if (sigsetsize != sizeof(sigset_t))
3557		return -EINVAL;
3558
3559	if (copy_from_user(&these, uthese, sizeof(these)))
3560		return -EFAULT;
3561
3562	if (uts) {
3563		if (get_old_timespec32(&ts, uts))
3564			return -EFAULT;
3565	}
3566
3567	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3568
3569	if (ret > 0 && uinfo) {
3570		if (copy_siginfo_to_user(uinfo, &info))
3571			ret = -EFAULT;
3572	}
3573
3574	return ret;
3575}
3576#endif
3577
3578#ifdef CONFIG_COMPAT
3579COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3580		struct compat_siginfo __user *, uinfo,
3581		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3582{
3583	sigset_t s;
3584	struct timespec64 t;
3585	kernel_siginfo_t info;
3586	long ret;
3587
3588	if (sigsetsize != sizeof(sigset_t))
3589		return -EINVAL;
3590
3591	if (get_compat_sigset(&s, uthese))
3592		return -EFAULT;
3593
3594	if (uts) {
3595		if (get_timespec64(&t, uts))
3596			return -EFAULT;
3597	}
3598
3599	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3600
3601	if (ret > 0 && uinfo) {
3602		if (copy_siginfo_to_user32(uinfo, &info))
3603			ret = -EFAULT;
3604	}
3605
3606	return ret;
3607}
3608
3609#ifdef CONFIG_COMPAT_32BIT_TIME
3610COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3611		struct compat_siginfo __user *, uinfo,
3612		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3613{
3614	sigset_t s;
3615	struct timespec64 t;
3616	kernel_siginfo_t info;
3617	long ret;
3618
3619	if (sigsetsize != sizeof(sigset_t))
3620		return -EINVAL;
3621
3622	if (get_compat_sigset(&s, uthese))
3623		return -EFAULT;
3624
3625	if (uts) {
3626		if (get_old_timespec32(&t, uts))
3627			return -EFAULT;
3628	}
3629
3630	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3631
3632	if (ret > 0 && uinfo) {
3633		if (copy_siginfo_to_user32(uinfo, &info))
3634			ret = -EFAULT;
3635	}
3636
3637	return ret;
3638}
3639#endif
3640#endif
3641
3642static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3643{
3644	clear_siginfo(info);
3645	info->si_signo = sig;
3646	info->si_errno = 0;
3647	info->si_code = SI_USER;
3648	info->si_pid = task_tgid_vnr(current);
3649	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3650}
3651
3652/**
3653 *  sys_kill - send a signal to a process
3654 *  @pid: the PID of the process
3655 *  @sig: signal to be sent
3656 */
3657SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3658{
3659	struct kernel_siginfo info;
3660
3661	prepare_kill_siginfo(sig, &info);
 
 
 
 
3662
3663	return kill_something_info(sig, &info, pid);
3664}
3665
3666/*
3667 * Verify that the signaler and signalee either are in the same pid namespace
3668 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3669 * namespace.
3670 */
3671static bool access_pidfd_pidns(struct pid *pid)
3672{
3673	struct pid_namespace *active = task_active_pid_ns(current);
3674	struct pid_namespace *p = ns_of_pid(pid);
3675
3676	for (;;) {
3677		if (!p)
3678			return false;
3679		if (p == active)
3680			break;
3681		p = p->parent;
3682	}
3683
3684	return true;
3685}
3686
3687static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3688{
3689#ifdef CONFIG_COMPAT
3690	/*
3691	 * Avoid hooking up compat syscalls and instead handle necessary
3692	 * conversions here. Note, this is a stop-gap measure and should not be
3693	 * considered a generic solution.
3694	 */
3695	if (in_compat_syscall())
3696		return copy_siginfo_from_user32(
3697			kinfo, (struct compat_siginfo __user *)info);
3698#endif
3699	return copy_siginfo_from_user(kinfo, info);
3700}
3701
3702static struct pid *pidfd_to_pid(const struct file *file)
3703{
3704	struct pid *pid;
3705
3706	pid = pidfd_pid(file);
3707	if (!IS_ERR(pid))
3708		return pid;
3709
3710	return tgid_pidfd_to_pid(file);
3711}
3712
3713/**
3714 * sys_pidfd_send_signal - Signal a process through a pidfd
3715 * @pidfd:  file descriptor of the process
3716 * @sig:    signal to send
3717 * @info:   signal info
3718 * @flags:  future flags
3719 *
3720 * The syscall currently only signals via PIDTYPE_PID which covers
3721 * kill(<positive-pid>, <signal>. It does not signal threads or process
3722 * groups.
3723 * In order to extend the syscall to threads and process groups the @flags
3724 * argument should be used. In essence, the @flags argument will determine
3725 * what is signaled and not the file descriptor itself. Put in other words,
3726 * grouping is a property of the flags argument not a property of the file
3727 * descriptor.
3728 *
3729 * Return: 0 on success, negative errno on failure
3730 */
3731SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3732		siginfo_t __user *, info, unsigned int, flags)
3733{
3734	int ret;
3735	struct fd f;
3736	struct pid *pid;
3737	kernel_siginfo_t kinfo;
3738
3739	/* Enforce flags be set to 0 until we add an extension. */
3740	if (flags)
3741		return -EINVAL;
3742
3743	f = fdget(pidfd);
3744	if (!f.file)
3745		return -EBADF;
3746
3747	/* Is this a pidfd? */
3748	pid = pidfd_to_pid(f.file);
3749	if (IS_ERR(pid)) {
3750		ret = PTR_ERR(pid);
3751		goto err;
3752	}
3753
3754	ret = -EINVAL;
3755	if (!access_pidfd_pidns(pid))
3756		goto err;
3757
3758	if (info) {
3759		ret = copy_siginfo_from_user_any(&kinfo, info);
3760		if (unlikely(ret))
3761			goto err;
3762
3763		ret = -EINVAL;
3764		if (unlikely(sig != kinfo.si_signo))
3765			goto err;
3766
3767		/* Only allow sending arbitrary signals to yourself. */
3768		ret = -EPERM;
3769		if ((task_pid(current) != pid) &&
3770		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3771			goto err;
3772	} else {
3773		prepare_kill_siginfo(sig, &kinfo);
3774	}
3775
3776	ret = kill_pid_info(sig, &kinfo, pid);
3777
3778err:
3779	fdput(f);
3780	return ret;
3781}
3782
3783static int
3784do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3785{
3786	struct task_struct *p;
3787	int error = -ESRCH;
3788
3789	rcu_read_lock();
3790	p = find_task_by_vpid(pid);
3791	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3792		error = check_kill_permission(sig, info, p);
3793		/*
3794		 * The null signal is a permissions and process existence
3795		 * probe.  No signal is actually delivered.
3796		 */
3797		if (!error && sig) {
3798			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3799			/*
3800			 * If lock_task_sighand() failed we pretend the task
3801			 * dies after receiving the signal. The window is tiny,
3802			 * and the signal is private anyway.
3803			 */
3804			if (unlikely(error == -ESRCH))
3805				error = 0;
3806		}
3807	}
3808	rcu_read_unlock();
3809
3810	return error;
3811}
3812
3813static int do_tkill(pid_t tgid, pid_t pid, int sig)
3814{
3815	struct kernel_siginfo info;
3816
3817	clear_siginfo(&info);
3818	info.si_signo = sig;
3819	info.si_errno = 0;
3820	info.si_code = SI_TKILL;
3821	info.si_pid = task_tgid_vnr(current);
3822	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3823
3824	return do_send_specific(tgid, pid, sig, &info);
3825}
3826
3827/**
3828 *  sys_tgkill - send signal to one specific thread
3829 *  @tgid: the thread group ID of the thread
3830 *  @pid: the PID of the thread
3831 *  @sig: signal to be sent
3832 *
3833 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3834 *  exists but it's not belonging to the target process anymore. This
3835 *  method solves the problem of threads exiting and PIDs getting reused.
3836 */
3837SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3838{
3839	/* This is only valid for single tasks */
3840	if (pid <= 0 || tgid <= 0)
3841		return -EINVAL;
3842
3843	return do_tkill(tgid, pid, sig);
3844}
3845
3846/**
3847 *  sys_tkill - send signal to one specific task
3848 *  @pid: the PID of the task
3849 *  @sig: signal to be sent
3850 *
3851 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3852 */
3853SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3854{
3855	/* This is only valid for single tasks */
3856	if (pid <= 0)
3857		return -EINVAL;
3858
3859	return do_tkill(0, pid, sig);
3860}
3861
3862static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3863{
3864	/* Not even root can pretend to send signals from the kernel.
3865	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3866	 */
3867	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3868	    (task_pid_vnr(current) != pid))
3869		return -EPERM;
3870
 
 
3871	/* POSIX.1b doesn't mention process groups.  */
3872	return kill_proc_info(sig, info, pid);
3873}
3874
3875/**
3876 *  sys_rt_sigqueueinfo - send signal information to a signal
3877 *  @pid: the PID of the thread
3878 *  @sig: signal to be sent
3879 *  @uinfo: signal info to be sent
3880 */
3881SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3882		siginfo_t __user *, uinfo)
3883{
3884	kernel_siginfo_t info;
3885	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3886	if (unlikely(ret))
3887		return ret;
3888	return do_rt_sigqueueinfo(pid, sig, &info);
3889}
3890
3891#ifdef CONFIG_COMPAT
3892COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3893			compat_pid_t, pid,
3894			int, sig,
3895			struct compat_siginfo __user *, uinfo)
3896{
3897	kernel_siginfo_t info;
3898	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3899	if (unlikely(ret))
3900		return ret;
3901	return do_rt_sigqueueinfo(pid, sig, &info);
3902}
3903#endif
3904
3905static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3906{
3907	/* This is only valid for single tasks */
3908	if (pid <= 0 || tgid <= 0)
3909		return -EINVAL;
3910
3911	/* Not even root can pretend to send signals from the kernel.
3912	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3913	 */
3914	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3915	    (task_pid_vnr(current) != pid))
3916		return -EPERM;
3917
 
 
3918	return do_send_specific(tgid, pid, sig, info);
3919}
3920
3921SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3922		siginfo_t __user *, uinfo)
3923{
3924	kernel_siginfo_t info;
3925	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3926	if (unlikely(ret))
3927		return ret;
 
3928	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3929}
3930
3931#ifdef CONFIG_COMPAT
3932COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3933			compat_pid_t, tgid,
3934			compat_pid_t, pid,
3935			int, sig,
3936			struct compat_siginfo __user *, uinfo)
3937{
3938	kernel_siginfo_t info;
3939	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3940	if (unlikely(ret))
3941		return ret;
3942	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3943}
3944#endif
3945
3946/*
3947 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3948 */
3949void kernel_sigaction(int sig, __sighandler_t action)
3950{
3951	spin_lock_irq(&current->sighand->siglock);
3952	current->sighand->action[sig - 1].sa.sa_handler = action;
3953	if (action == SIG_IGN) {
3954		sigset_t mask;
3955
3956		sigemptyset(&mask);
3957		sigaddset(&mask, sig);
3958
3959		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3960		flush_sigqueue_mask(&mask, &current->pending);
3961		recalc_sigpending();
3962	}
3963	spin_unlock_irq(&current->sighand->siglock);
3964}
3965EXPORT_SYMBOL(kernel_sigaction);
3966
3967void __weak sigaction_compat_abi(struct k_sigaction *act,
3968		struct k_sigaction *oact)
3969{
3970}
3971
3972int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3973{
3974	struct task_struct *p = current, *t;
3975	struct k_sigaction *k;
3976	sigset_t mask;
3977
3978	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3979		return -EINVAL;
3980
3981	k = &p->sighand->action[sig-1];
3982
3983	spin_lock_irq(&p->sighand->siglock);
3984	if (oact)
3985		*oact = *k;
3986
3987	sigaction_compat_abi(act, oact);
3988
3989	if (act) {
3990		sigdelsetmask(&act->sa.sa_mask,
3991			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3992		*k = *act;
3993		/*
3994		 * POSIX 3.3.1.3:
3995		 *  "Setting a signal action to SIG_IGN for a signal that is
3996		 *   pending shall cause the pending signal to be discarded,
3997		 *   whether or not it is blocked."
3998		 *
3999		 *  "Setting a signal action to SIG_DFL for a signal that is
4000		 *   pending and whose default action is to ignore the signal
4001		 *   (for example, SIGCHLD), shall cause the pending signal to
4002		 *   be discarded, whether or not it is blocked"
4003		 */
4004		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4005			sigemptyset(&mask);
4006			sigaddset(&mask, sig);
4007			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4008			for_each_thread(p, t)
4009				flush_sigqueue_mask(&mask, &t->pending);
4010		}
4011	}
4012
4013	spin_unlock_irq(&p->sighand->siglock);
4014	return 0;
4015}
4016
4017static int
4018do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4019		size_t min_ss_size)
4020{
4021	struct task_struct *t = current;
 
4022
4023	if (oss) {
4024		memset(oss, 0, sizeof(stack_t));
4025		oss->ss_sp = (void __user *) t->sas_ss_sp;
4026		oss->ss_size = t->sas_ss_size;
4027		oss->ss_flags = sas_ss_flags(sp) |
4028			(current->sas_ss_flags & SS_FLAG_BITS);
4029	}
 
 
 
4030
4031	if (ss) {
4032		void __user *ss_sp = ss->ss_sp;
4033		size_t ss_size = ss->ss_size;
4034		unsigned ss_flags = ss->ss_flags;
4035		int ss_mode;
 
 
 
4036
4037		if (unlikely(on_sig_stack(sp)))
4038			return -EPERM;
 
4039
4040		ss_mode = ss_flags & ~SS_FLAG_BITS;
4041		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4042				ss_mode != 0))
4043			return -EINVAL;
 
4044
4045		if (ss_mode == SS_DISABLE) {
4046			ss_size = 0;
4047			ss_sp = NULL;
4048		} else {
4049			if (unlikely(ss_size < min_ss_size))
4050				return -ENOMEM;
4051		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4052
4053		t->sas_ss_sp = (unsigned long) ss_sp;
4054		t->sas_ss_size = ss_size;
4055		t->sas_ss_flags = ss_flags;
4056	}
4057	return 0;
4058}
4059
4060SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4061{
4062	stack_t new, old;
4063	int err;
4064	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4065		return -EFAULT;
4066	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4067			      current_user_stack_pointer(),
4068			      MINSIGSTKSZ);
4069	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4070		err = -EFAULT;
4071	return err;
4072}
4073
4074int restore_altstack(const stack_t __user *uss)
4075{
4076	stack_t new;
4077	if (copy_from_user(&new, uss, sizeof(stack_t)))
4078		return -EFAULT;
4079	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4080			     MINSIGSTKSZ);
4081	/* squash all but EFAULT for now */
4082	return 0;
4083}
4084
4085int __save_altstack(stack_t __user *uss, unsigned long sp)
4086{
4087	struct task_struct *t = current;
4088	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4089		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4090		__put_user(t->sas_ss_size, &uss->ss_size);
4091	if (err)
4092		return err;
4093	if (t->sas_ss_flags & SS_AUTODISARM)
4094		sas_ss_reset(t);
4095	return 0;
4096}
4097
4098#ifdef CONFIG_COMPAT
4099static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4100				 compat_stack_t __user *uoss_ptr)
 
4101{
4102	stack_t uss, uoss;
4103	int ret;
 
4104
4105	if (uss_ptr) {
4106		compat_stack_t uss32;
 
 
4107		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4108			return -EFAULT;
4109		uss.ss_sp = compat_ptr(uss32.ss_sp);
4110		uss.ss_flags = uss32.ss_flags;
4111		uss.ss_size = uss32.ss_size;
4112	}
4113	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4114			     compat_user_stack_pointer(),
4115			     COMPAT_MINSIGSTKSZ);
 
 
 
4116	if (ret >= 0 && uoss_ptr)  {
4117		compat_stack_t old;
4118		memset(&old, 0, sizeof(old));
4119		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4120		old.ss_flags = uoss.ss_flags;
4121		old.ss_size = uoss.ss_size;
4122		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4123			ret = -EFAULT;
4124	}
4125	return ret;
4126}
4127
4128COMPAT_SYSCALL_DEFINE2(sigaltstack,
4129			const compat_stack_t __user *, uss_ptr,
4130			compat_stack_t __user *, uoss_ptr)
4131{
4132	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4133}
4134
4135int compat_restore_altstack(const compat_stack_t __user *uss)
4136{
4137	int err = do_compat_sigaltstack(uss, NULL);
4138	/* squash all but -EFAULT for now */
4139	return err == -EFAULT ? err : 0;
4140}
4141
4142int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4143{
4144	int err;
4145	struct task_struct *t = current;
4146	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4147			 &uss->ss_sp) |
4148		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4149		__put_user(t->sas_ss_size, &uss->ss_size);
4150	if (err)
4151		return err;
4152	if (t->sas_ss_flags & SS_AUTODISARM)
4153		sas_ss_reset(t);
4154	return 0;
4155}
4156#endif
4157
4158#ifdef __ARCH_WANT_SYS_SIGPENDING
4159
4160/**
4161 *  sys_sigpending - examine pending signals
4162 *  @uset: where mask of pending signal is returned
4163 */
4164SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4165{
4166	sigset_t set;
4167
4168	if (sizeof(old_sigset_t) > sizeof(*uset))
4169		return -EINVAL;
4170
4171	do_sigpending(&set);
4172
4173	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4174		return -EFAULT;
4175
4176	return 0;
4177}
4178
4179#ifdef CONFIG_COMPAT
4180COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4181{
4182	sigset_t set;
4183
4184	do_sigpending(&set);
4185
4186	return put_user(set.sig[0], set32);
4187}
4188#endif
4189
4190#endif
4191
4192#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4193/**
4194 *  sys_sigprocmask - examine and change blocked signals
4195 *  @how: whether to add, remove, or set signals
4196 *  @nset: signals to add or remove (if non-null)
4197 *  @oset: previous value of signal mask if non-null
4198 *
4199 * Some platforms have their own version with special arguments;
4200 * others support only sys_rt_sigprocmask.
4201 */
4202
4203SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4204		old_sigset_t __user *, oset)
4205{
4206	old_sigset_t old_set, new_set;
4207	sigset_t new_blocked;
4208
4209	old_set = current->blocked.sig[0];
4210
4211	if (nset) {
4212		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4213			return -EFAULT;
4214
4215		new_blocked = current->blocked;
4216
4217		switch (how) {
4218		case SIG_BLOCK:
4219			sigaddsetmask(&new_blocked, new_set);
4220			break;
4221		case SIG_UNBLOCK:
4222			sigdelsetmask(&new_blocked, new_set);
4223			break;
4224		case SIG_SETMASK:
4225			new_blocked.sig[0] = new_set;
4226			break;
4227		default:
4228			return -EINVAL;
4229		}
4230
4231		set_current_blocked(&new_blocked);
4232	}
4233
4234	if (oset) {
4235		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4236			return -EFAULT;
4237	}
4238
4239	return 0;
4240}
4241#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4242
4243#ifndef CONFIG_ODD_RT_SIGACTION
4244/**
4245 *  sys_rt_sigaction - alter an action taken by a process
4246 *  @sig: signal to be sent
4247 *  @act: new sigaction
4248 *  @oact: used to save the previous sigaction
4249 *  @sigsetsize: size of sigset_t type
4250 */
4251SYSCALL_DEFINE4(rt_sigaction, int, sig,
4252		const struct sigaction __user *, act,
4253		struct sigaction __user *, oact,
4254		size_t, sigsetsize)
4255{
4256	struct k_sigaction new_sa, old_sa;
4257	int ret;
4258
4259	/* XXX: Don't preclude handling different sized sigset_t's.  */
4260	if (sigsetsize != sizeof(sigset_t))
4261		return -EINVAL;
4262
4263	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4264		return -EFAULT;
 
 
4265
4266	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4267	if (ret)
4268		return ret;
4269
4270	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4271		return -EFAULT;
4272
4273	return 0;
 
 
4274}
4275#ifdef CONFIG_COMPAT
4276COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4277		const struct compat_sigaction __user *, act,
4278		struct compat_sigaction __user *, oact,
4279		compat_size_t, sigsetsize)
4280{
4281	struct k_sigaction new_ka, old_ka;
 
4282#ifdef __ARCH_HAS_SA_RESTORER
4283	compat_uptr_t restorer;
4284#endif
4285	int ret;
4286
4287	/* XXX: Don't preclude handling different sized sigset_t's.  */
4288	if (sigsetsize != sizeof(compat_sigset_t))
4289		return -EINVAL;
4290
4291	if (act) {
4292		compat_uptr_t handler;
4293		ret = get_user(handler, &act->sa_handler);
4294		new_ka.sa.sa_handler = compat_ptr(handler);
4295#ifdef __ARCH_HAS_SA_RESTORER
4296		ret |= get_user(restorer, &act->sa_restorer);
4297		new_ka.sa.sa_restorer = compat_ptr(restorer);
4298#endif
4299		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4300		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4301		if (ret)
4302			return -EFAULT;
 
4303	}
4304
4305	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4306	if (!ret && oact) {
 
4307		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
4308			       &oact->sa_handler);
4309		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4310					 sizeof(oact->sa_mask));
4311		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4312#ifdef __ARCH_HAS_SA_RESTORER
4313		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4314				&oact->sa_restorer);
4315#endif
4316	}
4317	return ret;
4318}
4319#endif
4320#endif /* !CONFIG_ODD_RT_SIGACTION */
4321
4322#ifdef CONFIG_OLD_SIGACTION
4323SYSCALL_DEFINE3(sigaction, int, sig,
4324		const struct old_sigaction __user *, act,
4325	        struct old_sigaction __user *, oact)
4326{
4327	struct k_sigaction new_ka, old_ka;
4328	int ret;
4329
4330	if (act) {
4331		old_sigset_t mask;
4332		if (!access_ok(act, sizeof(*act)) ||
4333		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4334		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4335		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4336		    __get_user(mask, &act->sa_mask))
4337			return -EFAULT;
4338#ifdef __ARCH_HAS_KA_RESTORER
4339		new_ka.ka_restorer = NULL;
4340#endif
4341		siginitset(&new_ka.sa.sa_mask, mask);
4342	}
4343
4344	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4345
4346	if (!ret && oact) {
4347		if (!access_ok(oact, sizeof(*oact)) ||
4348		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4349		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4350		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4351		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4352			return -EFAULT;
4353	}
4354
4355	return ret;
4356}
4357#endif
4358#ifdef CONFIG_COMPAT_OLD_SIGACTION
4359COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4360		const struct compat_old_sigaction __user *, act,
4361	        struct compat_old_sigaction __user *, oact)
4362{
4363	struct k_sigaction new_ka, old_ka;
4364	int ret;
4365	compat_old_sigset_t mask;
4366	compat_uptr_t handler, restorer;
4367
4368	if (act) {
4369		if (!access_ok(act, sizeof(*act)) ||
4370		    __get_user(handler, &act->sa_handler) ||
4371		    __get_user(restorer, &act->sa_restorer) ||
4372		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4373		    __get_user(mask, &act->sa_mask))
4374			return -EFAULT;
4375
4376#ifdef __ARCH_HAS_KA_RESTORER
4377		new_ka.ka_restorer = NULL;
4378#endif
4379		new_ka.sa.sa_handler = compat_ptr(handler);
4380		new_ka.sa.sa_restorer = compat_ptr(restorer);
4381		siginitset(&new_ka.sa.sa_mask, mask);
4382	}
4383
4384	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4385
4386	if (!ret && oact) {
4387		if (!access_ok(oact, sizeof(*oact)) ||
4388		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4389			       &oact->sa_handler) ||
4390		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4391			       &oact->sa_restorer) ||
4392		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4393		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4394			return -EFAULT;
4395	}
4396	return ret;
4397}
4398#endif
4399
4400#ifdef CONFIG_SGETMASK_SYSCALL
4401
4402/*
4403 * For backwards compatibility.  Functionality superseded by sigprocmask.
4404 */
4405SYSCALL_DEFINE0(sgetmask)
4406{
4407	/* SMP safe */
4408	return current->blocked.sig[0];
4409}
4410
4411SYSCALL_DEFINE1(ssetmask, int, newmask)
4412{
4413	int old = current->blocked.sig[0];
4414	sigset_t newset;
4415
4416	siginitset(&newset, newmask);
4417	set_current_blocked(&newset);
4418
4419	return old;
4420}
4421#endif /* CONFIG_SGETMASK_SYSCALL */
4422
4423#ifdef __ARCH_WANT_SYS_SIGNAL
4424/*
4425 * For backwards compatibility.  Functionality superseded by sigaction.
4426 */
4427SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4428{
4429	struct k_sigaction new_sa, old_sa;
4430	int ret;
4431
4432	new_sa.sa.sa_handler = handler;
4433	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4434	sigemptyset(&new_sa.sa.sa_mask);
4435
4436	ret = do_sigaction(sig, &new_sa, &old_sa);
4437
4438	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4439}
4440#endif /* __ARCH_WANT_SYS_SIGNAL */
4441
4442#ifdef __ARCH_WANT_SYS_PAUSE
4443
4444SYSCALL_DEFINE0(pause)
4445{
4446	while (!signal_pending(current)) {
4447		__set_current_state(TASK_INTERRUPTIBLE);
4448		schedule();
4449	}
4450	return -ERESTARTNOHAND;
4451}
4452
4453#endif
4454
4455static int sigsuspend(sigset_t *set)
4456{
4457	current->saved_sigmask = current->blocked;
4458	set_current_blocked(set);
4459
4460	while (!signal_pending(current)) {
4461		__set_current_state(TASK_INTERRUPTIBLE);
4462		schedule();
4463	}
4464	set_restore_sigmask();
4465	return -ERESTARTNOHAND;
4466}
4467
4468/**
4469 *  sys_rt_sigsuspend - replace the signal mask for a value with the
4470 *	@unewset value until a signal is received
4471 *  @unewset: new signal mask value
4472 *  @sigsetsize: size of sigset_t type
4473 */
4474SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4475{
4476	sigset_t newset;
4477
4478	/* XXX: Don't preclude handling different sized sigset_t's.  */
4479	if (sigsetsize != sizeof(sigset_t))
4480		return -EINVAL;
4481
4482	if (copy_from_user(&newset, unewset, sizeof(newset)))
4483		return -EFAULT;
4484	return sigsuspend(&newset);
4485}
4486 
4487#ifdef CONFIG_COMPAT
4488COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4489{
 
4490	sigset_t newset;
 
4491
4492	/* XXX: Don't preclude handling different sized sigset_t's.  */
4493	if (sigsetsize != sizeof(sigset_t))
4494		return -EINVAL;
4495
4496	if (get_compat_sigset(&newset, unewset))
4497		return -EFAULT;
 
4498	return sigsuspend(&newset);
 
 
 
 
4499}
4500#endif
4501
4502#ifdef CONFIG_OLD_SIGSUSPEND
4503SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4504{
4505	sigset_t blocked;
4506	siginitset(&blocked, mask);
4507	return sigsuspend(&blocked);
4508}
4509#endif
4510#ifdef CONFIG_OLD_SIGSUSPEND3
4511SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4512{
4513	sigset_t blocked;
4514	siginitset(&blocked, mask);
4515	return sigsuspend(&blocked);
4516}
4517#endif
4518
4519__weak const char *arch_vma_name(struct vm_area_struct *vma)
4520{
4521	return NULL;
4522}
4523
4524static inline void siginfo_buildtime_checks(void)
4525{
4526	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4527
4528	/* Verify the offsets in the two siginfos match */
4529#define CHECK_OFFSET(field) \
4530	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4531
4532	/* kill */
4533	CHECK_OFFSET(si_pid);
4534	CHECK_OFFSET(si_uid);
4535
4536	/* timer */
4537	CHECK_OFFSET(si_tid);
4538	CHECK_OFFSET(si_overrun);
4539	CHECK_OFFSET(si_value);
4540
4541	/* rt */
4542	CHECK_OFFSET(si_pid);
4543	CHECK_OFFSET(si_uid);
4544	CHECK_OFFSET(si_value);
4545
4546	/* sigchld */
4547	CHECK_OFFSET(si_pid);
4548	CHECK_OFFSET(si_uid);
4549	CHECK_OFFSET(si_status);
4550	CHECK_OFFSET(si_utime);
4551	CHECK_OFFSET(si_stime);
4552
4553	/* sigfault */
4554	CHECK_OFFSET(si_addr);
4555	CHECK_OFFSET(si_addr_lsb);
4556	CHECK_OFFSET(si_lower);
4557	CHECK_OFFSET(si_upper);
4558	CHECK_OFFSET(si_pkey);
4559
4560	/* sigpoll */
4561	CHECK_OFFSET(si_band);
4562	CHECK_OFFSET(si_fd);
4563
4564	/* sigsys */
4565	CHECK_OFFSET(si_call_addr);
4566	CHECK_OFFSET(si_syscall);
4567	CHECK_OFFSET(si_arch);
4568#undef CHECK_OFFSET
4569
4570	/* usb asyncio */
4571	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4572		     offsetof(struct siginfo, si_addr));
4573	if (sizeof(int) == sizeof(void __user *)) {
4574		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4575			     sizeof(void __user *));
4576	} else {
4577		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4578			      sizeof_field(struct siginfo, si_uid)) !=
4579			     sizeof(void __user *));
4580		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4581			     offsetof(struct siginfo, si_uid));
4582	}
4583#ifdef CONFIG_COMPAT
4584	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4585		     offsetof(struct compat_siginfo, si_addr));
4586	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4587		     sizeof(compat_uptr_t));
4588	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4589		     sizeof_field(struct siginfo, si_pid));
4590#endif
4591}
4592
4593void __init signals_init(void)
4594{
4595	siginfo_buildtime_checks();
 
 
4596
4597	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4598}
4599
4600#ifdef CONFIG_KGDB_KDB
4601#include <linux/kdb.h>
4602/*
4603 * kdb_send_sig - Allows kdb to send signals without exposing
4604 * signal internals.  This function checks if the required locks are
4605 * available before calling the main signal code, to avoid kdb
4606 * deadlocks.
4607 */
4608void kdb_send_sig(struct task_struct *t, int sig)
 
4609{
4610	static struct task_struct *kdb_prev_t;
4611	int new_t, ret;
4612	if (!spin_trylock(&t->sighand->siglock)) {
4613		kdb_printf("Can't do kill command now.\n"
4614			   "The sigmask lock is held somewhere else in "
4615			   "kernel, try again later\n");
4616		return;
4617	}
 
4618	new_t = kdb_prev_t != t;
4619	kdb_prev_t = t;
4620	if (t->state != TASK_RUNNING && new_t) {
4621		spin_unlock(&t->sighand->siglock);
4622		kdb_printf("Process is not RUNNING, sending a signal from "
4623			   "kdb risks deadlock\n"
4624			   "on the run queue locks. "
4625			   "The signal has _not_ been sent.\n"
4626			   "Reissue the kill command if you want to risk "
4627			   "the deadlock.\n");
4628		return;
4629	}
4630	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4631	spin_unlock(&t->sighand->siglock);
4632	if (ret)
4633		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4634			   sig, t->pid);
4635	else
4636		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4637}
4638#endif	/* CONFIG_KGDB_KDB */