Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 *  linux/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   7 *
   8 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
   9 *		Changes to use preallocated sigqueue structures
  10 *		to allow signals to be sent reliably.
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/sched.h>
 
 
 
 
 
 
  17#include <linux/fs.h>
 
  18#include <linux/tty.h>
  19#include <linux/binfmts.h>
  20#include <linux/coredump.h>
  21#include <linux/security.h>
  22#include <linux/syscalls.h>
  23#include <linux/ptrace.h>
  24#include <linux/signal.h>
  25#include <linux/signalfd.h>
  26#include <linux/ratelimit.h>
  27#include <linux/tracehook.h>
  28#include <linux/capability.h>
  29#include <linux/freezer.h>
  30#include <linux/pid_namespace.h>
  31#include <linux/nsproxy.h>
  32#include <linux/user_namespace.h>
  33#include <linux/uprobes.h>
  34#include <linux/compat.h>
  35#include <linux/cn_proc.h>
  36#include <linux/compiler.h>
 
 
 
  37
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/signal.h>
  40
  41#include <asm/param.h>
  42#include <linux/uaccess.h>
  43#include <asm/unistd.h>
  44#include <asm/siginfo.h>
  45#include <asm/cacheflush.h>
  46#include "audit.h"	/* audit_signal_info() */
  47
  48/*
  49 * SLAB caches for signal bits.
  50 */
  51
  52static struct kmem_cache *sigqueue_cachep;
  53
  54int print_fatal_signals __read_mostly;
  55
  56static void __user *sig_handler(struct task_struct *t, int sig)
  57{
  58	return t->sighand->action[sig - 1].sa.sa_handler;
  59}
  60
  61static int sig_handler_ignored(void __user *handler, int sig)
  62{
  63	/* Is it explicitly or implicitly ignored? */
  64	return handler == SIG_IGN ||
  65		(handler == SIG_DFL && sig_kernel_ignore(sig));
  66}
  67
  68static int sig_task_ignored(struct task_struct *t, int sig, bool force)
  69{
  70	void __user *handler;
  71
  72	handler = sig_handler(t, sig);
  73
 
 
 
 
  74	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  75			handler == SIG_DFL && !force)
  76		return 1;
 
 
 
 
 
  77
  78	return sig_handler_ignored(handler, sig);
  79}
  80
  81static int sig_ignored(struct task_struct *t, int sig, bool force)
  82{
  83	/*
  84	 * Blocked signals are never ignored, since the
  85	 * signal handler may change by the time it is
  86	 * unblocked.
  87	 */
  88	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
  89		return 0;
  90
  91	if (!sig_task_ignored(t, sig, force))
  92		return 0;
  93
  94	/*
  95	 * Tracers may want to know about even ignored signals.
 
 
  96	 */
  97	return !t->ptrace;
 
 
 
  98}
  99
 100/*
 101 * Re-calculate pending state from the set of locally pending
 102 * signals, globally pending signals, and blocked signals.
 103 */
 104static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 105{
 106	unsigned long ready;
 107	long i;
 108
 109	switch (_NSIG_WORDS) {
 110	default:
 111		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 112			ready |= signal->sig[i] &~ blocked->sig[i];
 113		break;
 114
 115	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 116		ready |= signal->sig[2] &~ blocked->sig[2];
 117		ready |= signal->sig[1] &~ blocked->sig[1];
 118		ready |= signal->sig[0] &~ blocked->sig[0];
 119		break;
 120
 121	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 122		ready |= signal->sig[0] &~ blocked->sig[0];
 123		break;
 124
 125	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 126	}
 127	return ready !=	0;
 128}
 129
 130#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 131
 132static int recalc_sigpending_tsk(struct task_struct *t)
 133{
 134	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
 135	    PENDING(&t->pending, &t->blocked) ||
 136	    PENDING(&t->signal->shared_pending, &t->blocked)) {
 
 137		set_tsk_thread_flag(t, TIF_SIGPENDING);
 138		return 1;
 139	}
 
 140	/*
 141	 * We must never clear the flag in another thread, or in current
 142	 * when it's possible the current syscall is returning -ERESTART*.
 143	 * So we don't clear it here, and only callers who know they should do.
 144	 */
 145	return 0;
 146}
 147
 148/*
 149 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 150 * This is superfluous when called on current, the wakeup is a harmless no-op.
 151 */
 152void recalc_sigpending_and_wake(struct task_struct *t)
 153{
 154	if (recalc_sigpending_tsk(t))
 155		signal_wake_up(t, 0);
 156}
 157
 158void recalc_sigpending(void)
 159{
 160	if (!recalc_sigpending_tsk(current) && !freezing(current))
 161		clear_thread_flag(TIF_SIGPENDING);
 162
 163}
 
 
 
 
 
 
 
 
 
 
 
 
 164
 165/* Given the mask, find the first available signal that should be serviced. */
 166
 167#define SYNCHRONOUS_MASK \
 168	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 169	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 170
 171int next_signal(struct sigpending *pending, sigset_t *mask)
 172{
 173	unsigned long i, *s, *m, x;
 174	int sig = 0;
 175
 176	s = pending->signal.sig;
 177	m = mask->sig;
 178
 179	/*
 180	 * Handle the first word specially: it contains the
 181	 * synchronous signals that need to be dequeued first.
 182	 */
 183	x = *s &~ *m;
 184	if (x) {
 185		if (x & SYNCHRONOUS_MASK)
 186			x &= SYNCHRONOUS_MASK;
 187		sig = ffz(~x) + 1;
 188		return sig;
 189	}
 190
 191	switch (_NSIG_WORDS) {
 192	default:
 193		for (i = 1; i < _NSIG_WORDS; ++i) {
 194			x = *++s &~ *++m;
 195			if (!x)
 196				continue;
 197			sig = ffz(~x) + i*_NSIG_BPW + 1;
 198			break;
 199		}
 200		break;
 201
 202	case 2:
 203		x = s[1] &~ m[1];
 204		if (!x)
 205			break;
 206		sig = ffz(~x) + _NSIG_BPW + 1;
 207		break;
 208
 209	case 1:
 210		/* Nothing to do */
 211		break;
 212	}
 213
 214	return sig;
 215}
 216
 217static inline void print_dropped_signal(int sig)
 218{
 219	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 220
 221	if (!print_fatal_signals)
 222		return;
 223
 224	if (!__ratelimit(&ratelimit_state))
 225		return;
 226
 227	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 228				current->comm, current->pid, sig);
 229}
 230
 231/**
 232 * task_set_jobctl_pending - set jobctl pending bits
 233 * @task: target task
 234 * @mask: pending bits to set
 235 *
 236 * Clear @mask from @task->jobctl.  @mask must be subset of
 237 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 238 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 239 * cleared.  If @task is already being killed or exiting, this function
 240 * becomes noop.
 241 *
 242 * CONTEXT:
 243 * Must be called with @task->sighand->siglock held.
 244 *
 245 * RETURNS:
 246 * %true if @mask is set, %false if made noop because @task was dying.
 247 */
 248bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 249{
 250	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 251			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 252	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 253
 254	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 255		return false;
 256
 257	if (mask & JOBCTL_STOP_SIGMASK)
 258		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 259
 260	task->jobctl |= mask;
 261	return true;
 262}
 263
 264/**
 265 * task_clear_jobctl_trapping - clear jobctl trapping bit
 266 * @task: target task
 267 *
 268 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 269 * Clear it and wake up the ptracer.  Note that we don't need any further
 270 * locking.  @task->siglock guarantees that @task->parent points to the
 271 * ptracer.
 272 *
 273 * CONTEXT:
 274 * Must be called with @task->sighand->siglock held.
 275 */
 276void task_clear_jobctl_trapping(struct task_struct *task)
 277{
 278	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 279		task->jobctl &= ~JOBCTL_TRAPPING;
 280		smp_mb();	/* advised by wake_up_bit() */
 281		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 282	}
 283}
 284
 285/**
 286 * task_clear_jobctl_pending - clear jobctl pending bits
 287 * @task: target task
 288 * @mask: pending bits to clear
 289 *
 290 * Clear @mask from @task->jobctl.  @mask must be subset of
 291 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 292 * STOP bits are cleared together.
 293 *
 294 * If clearing of @mask leaves no stop or trap pending, this function calls
 295 * task_clear_jobctl_trapping().
 296 *
 297 * CONTEXT:
 298 * Must be called with @task->sighand->siglock held.
 299 */
 300void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 301{
 302	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 303
 304	if (mask & JOBCTL_STOP_PENDING)
 305		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 306
 307	task->jobctl &= ~mask;
 308
 309	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 310		task_clear_jobctl_trapping(task);
 311}
 312
 313/**
 314 * task_participate_group_stop - participate in a group stop
 315 * @task: task participating in a group stop
 316 *
 317 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 318 * Group stop states are cleared and the group stop count is consumed if
 319 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 320 * stop, the appropriate %SIGNAL_* flags are set.
 321 *
 322 * CONTEXT:
 323 * Must be called with @task->sighand->siglock held.
 324 *
 325 * RETURNS:
 326 * %true if group stop completion should be notified to the parent, %false
 327 * otherwise.
 328 */
 329static bool task_participate_group_stop(struct task_struct *task)
 330{
 331	struct signal_struct *sig = task->signal;
 332	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 333
 334	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 335
 336	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 337
 338	if (!consume)
 339		return false;
 340
 341	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 342		sig->group_stop_count--;
 343
 344	/*
 345	 * Tell the caller to notify completion iff we are entering into a
 346	 * fresh group stop.  Read comment in do_signal_stop() for details.
 347	 */
 348	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 349		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 350		return true;
 351	}
 352	return false;
 353}
 354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 355/*
 356 * allocate a new signal queue record
 357 * - this may be called without locks if and only if t == current, otherwise an
 358 *   appropriate lock must be held to stop the target task from exiting
 359 */
 360static struct sigqueue *
 361__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
 
 362{
 363	struct sigqueue *q = NULL;
 364	struct user_struct *user;
 
 365
 366	/*
 367	 * Protect access to @t credentials. This can go away when all
 368	 * callers hold rcu read lock.
 
 
 
 
 369	 */
 370	rcu_read_lock();
 371	user = get_uid(__task_cred(t)->user);
 372	atomic_inc(&user->sigpending);
 373	rcu_read_unlock();
 
 
 374
 375	if (override_rlimit ||
 376	    atomic_read(&user->sigpending) <=
 377			task_rlimit(t, RLIMIT_SIGPENDING)) {
 378		q = kmem_cache_alloc(sigqueue_cachep, flags);
 379	} else {
 380		print_dropped_signal(sig);
 381	}
 382
 383	if (unlikely(q == NULL)) {
 384		atomic_dec(&user->sigpending);
 385		free_uid(user);
 386	} else {
 387		INIT_LIST_HEAD(&q->list);
 388		q->flags = 0;
 389		q->user = user;
 390	}
 391
 392	return q;
 393}
 394
 395static void __sigqueue_free(struct sigqueue *q)
 396{
 397	if (q->flags & SIGQUEUE_PREALLOC)
 398		return;
 399	atomic_dec(&q->user->sigpending);
 400	free_uid(q->user);
 
 
 401	kmem_cache_free(sigqueue_cachep, q);
 402}
 403
 404void flush_sigqueue(struct sigpending *queue)
 405{
 406	struct sigqueue *q;
 407
 408	sigemptyset(&queue->signal);
 409	while (!list_empty(&queue->list)) {
 410		q = list_entry(queue->list.next, struct sigqueue , list);
 411		list_del_init(&q->list);
 412		__sigqueue_free(q);
 413	}
 414}
 415
 416/*
 417 * Flush all pending signals for this kthread.
 418 */
 419void flush_signals(struct task_struct *t)
 420{
 421	unsigned long flags;
 422
 423	spin_lock_irqsave(&t->sighand->siglock, flags);
 424	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 425	flush_sigqueue(&t->pending);
 426	flush_sigqueue(&t->signal->shared_pending);
 427	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 428}
 
 429
 430#ifdef CONFIG_POSIX_TIMERS
 431static void __flush_itimer_signals(struct sigpending *pending)
 432{
 433	sigset_t signal, retain;
 434	struct sigqueue *q, *n;
 435
 436	signal = pending->signal;
 437	sigemptyset(&retain);
 438
 439	list_for_each_entry_safe(q, n, &pending->list, list) {
 440		int sig = q->info.si_signo;
 441
 442		if (likely(q->info.si_code != SI_TIMER)) {
 443			sigaddset(&retain, sig);
 444		} else {
 445			sigdelset(&signal, sig);
 446			list_del_init(&q->list);
 447			__sigqueue_free(q);
 448		}
 449	}
 450
 451	sigorsets(&pending->signal, &signal, &retain);
 452}
 453
 454void flush_itimer_signals(void)
 455{
 456	struct task_struct *tsk = current;
 457	unsigned long flags;
 458
 459	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 460	__flush_itimer_signals(&tsk->pending);
 461	__flush_itimer_signals(&tsk->signal->shared_pending);
 462	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 463}
 464#endif
 465
 466void ignore_signals(struct task_struct *t)
 467{
 468	int i;
 469
 470	for (i = 0; i < _NSIG; ++i)
 471		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 472
 473	flush_signals(t);
 474}
 475
 476/*
 477 * Flush all handlers for a task.
 478 */
 479
 480void
 481flush_signal_handlers(struct task_struct *t, int force_default)
 482{
 483	int i;
 484	struct k_sigaction *ka = &t->sighand->action[0];
 485	for (i = _NSIG ; i != 0 ; i--) {
 486		if (force_default || ka->sa.sa_handler != SIG_IGN)
 487			ka->sa.sa_handler = SIG_DFL;
 488		ka->sa.sa_flags = 0;
 489#ifdef __ARCH_HAS_SA_RESTORER
 490		ka->sa.sa_restorer = NULL;
 491#endif
 492		sigemptyset(&ka->sa.sa_mask);
 493		ka++;
 494	}
 495}
 496
 497int unhandled_signal(struct task_struct *tsk, int sig)
 498{
 499	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 500	if (is_global_init(tsk))
 501		return 1;
 
 502	if (handler != SIG_IGN && handler != SIG_DFL)
 503		return 0;
 
 504	/* if ptraced, let the tracer determine */
 505	return !tsk->ptrace;
 506}
 507
 508static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 
 509{
 510	struct sigqueue *q, *first = NULL;
 511
 512	/*
 513	 * Collect the siginfo appropriate to this signal.  Check if
 514	 * there is another siginfo for the same signal.
 515	*/
 516	list_for_each_entry(q, &list->list, list) {
 517		if (q->info.si_signo == sig) {
 518			if (first)
 519				goto still_pending;
 520			first = q;
 521		}
 522	}
 523
 524	sigdelset(&list->signal, sig);
 525
 526	if (first) {
 527still_pending:
 528		list_del_init(&first->list);
 529		copy_siginfo(info, &first->info);
 
 
 
 
 
 
 530		__sigqueue_free(first);
 531	} else {
 532		/*
 533		 * Ok, it wasn't in the queue.  This must be
 534		 * a fast-pathed signal or we must have been
 535		 * out of queue space.  So zero out the info.
 536		 */
 
 537		info->si_signo = sig;
 538		info->si_errno = 0;
 539		info->si_code = SI_USER;
 540		info->si_pid = 0;
 541		info->si_uid = 0;
 542	}
 543}
 544
 545static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 546			siginfo_t *info)
 547{
 548	int sig = next_signal(pending, mask);
 549
 550	if (sig)
 551		collect_signal(sig, pending, info);
 552	return sig;
 553}
 554
 555/*
 556 * Dequeue a signal and return the element to the caller, which is
 557 * expected to free it.
 558 *
 559 * All callers have to hold the siglock.
 560 */
 561int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 
 562{
 
 563	int signr;
 564
 565	/* We only dequeue private signals from ourselves, we don't let
 566	 * signalfd steal them
 567	 */
 568	signr = __dequeue_signal(&tsk->pending, mask, info);
 
 569	if (!signr) {
 
 570		signr = __dequeue_signal(&tsk->signal->shared_pending,
 571					 mask, info);
 572#ifdef CONFIG_POSIX_TIMERS
 573		/*
 574		 * itimer signal ?
 575		 *
 576		 * itimers are process shared and we restart periodic
 577		 * itimers in the signal delivery path to prevent DoS
 578		 * attacks in the high resolution timer case. This is
 579		 * compliant with the old way of self-restarting
 580		 * itimers, as the SIGALRM is a legacy signal and only
 581		 * queued once. Changing the restart behaviour to
 582		 * restart the timer in the signal dequeue path is
 583		 * reducing the timer noise on heavy loaded !highres
 584		 * systems too.
 585		 */
 586		if (unlikely(signr == SIGALRM)) {
 587			struct hrtimer *tmr = &tsk->signal->real_timer;
 588
 589			if (!hrtimer_is_queued(tmr) &&
 590			    tsk->signal->it_real_incr != 0) {
 591				hrtimer_forward(tmr, tmr->base->get_time(),
 592						tsk->signal->it_real_incr);
 593				hrtimer_restart(tmr);
 594			}
 595		}
 596#endif
 597	}
 598
 599	recalc_sigpending();
 600	if (!signr)
 601		return 0;
 602
 603	if (unlikely(sig_kernel_stop(signr))) {
 604		/*
 605		 * Set a marker that we have dequeued a stop signal.  Our
 606		 * caller might release the siglock and then the pending
 607		 * stop signal it is about to process is no longer in the
 608		 * pending bitmasks, but must still be cleared by a SIGCONT
 609		 * (and overruled by a SIGKILL).  So those cases clear this
 610		 * shared flag after we've set it.  Note that this flag may
 611		 * remain set after the signal we return is ignored or
 612		 * handled.  That doesn't matter because its only purpose
 613		 * is to alert stop-signal processing code when another
 614		 * processor has come along and cleared the flag.
 615		 */
 616		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 617	}
 618#ifdef CONFIG_POSIX_TIMERS
 619	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
 620		/*
 621		 * Release the siglock to ensure proper locking order
 622		 * of timer locks outside of siglocks.  Note, we leave
 623		 * irqs disabled here, since the posix-timers code is
 624		 * about to disable them again anyway.
 625		 */
 626		spin_unlock(&tsk->sighand->siglock);
 627		do_schedule_next_timer(info);
 628		spin_lock(&tsk->sighand->siglock);
 
 
 
 629	}
 630#endif
 631	return signr;
 632}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634/*
 635 * Tell a process that it has a new active signal..
 636 *
 637 * NOTE! we rely on the previous spin_lock to
 638 * lock interrupts for us! We can only be called with
 639 * "siglock" held, and the local interrupt must
 640 * have been disabled when that got acquired!
 641 *
 642 * No need to set need_resched since signal event passing
 643 * goes through ->blocked
 644 */
 645void signal_wake_up_state(struct task_struct *t, unsigned int state)
 646{
 
 
 647	set_tsk_thread_flag(t, TIF_SIGPENDING);
 
 648	/*
 649	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 650	 * case. We don't check t->state here because there is a race with it
 651	 * executing another processor and just now entering stopped state.
 652	 * By using wake_up_state, we ensure the process will wake up and
 653	 * handle its death signal.
 654	 */
 655	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 656		kick_process(t);
 657}
 658
 659/*
 660 * Remove signals in mask from the pending set and queue.
 661 * Returns 1 if any signals were found.
 662 *
 663 * All callers must be holding the siglock.
 664 */
 665static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 666{
 667	struct sigqueue *q, *n;
 668	sigset_t m;
 669
 670	sigandsets(&m, mask, &s->signal);
 671	if (sigisemptyset(&m))
 672		return 0;
 673
 674	sigandnsets(&s->signal, &s->signal, mask);
 675	list_for_each_entry_safe(q, n, &s->list, list) {
 676		if (sigismember(mask, q->info.si_signo)) {
 677			list_del_init(&q->list);
 678			__sigqueue_free(q);
 679		}
 680	}
 681	return 1;
 682}
 683
 684static inline int is_si_special(const struct siginfo *info)
 685{
 686	return info <= SEND_SIG_FORCED;
 687}
 688
 689static inline bool si_fromuser(const struct siginfo *info)
 690{
 691	return info == SEND_SIG_NOINFO ||
 692		(!is_si_special(info) && SI_FROMUSER(info));
 693}
 694
 695/*
 696 * called with RCU read lock from check_kill_permission()
 697 */
 698static int kill_ok_by_cred(struct task_struct *t)
 699{
 700	const struct cred *cred = current_cred();
 701	const struct cred *tcred = __task_cred(t);
 702
 703	if (uid_eq(cred->euid, tcred->suid) ||
 704	    uid_eq(cred->euid, tcred->uid)  ||
 705	    uid_eq(cred->uid,  tcred->suid) ||
 706	    uid_eq(cred->uid,  tcred->uid))
 707		return 1;
 708
 709	if (ns_capable(tcred->user_ns, CAP_KILL))
 710		return 1;
 711
 712	return 0;
 713}
 714
 715/*
 716 * Bad permissions for sending the signal
 717 * - the caller must hold the RCU read lock
 718 */
 719static int check_kill_permission(int sig, struct siginfo *info,
 720				 struct task_struct *t)
 721{
 722	struct pid *sid;
 723	int error;
 724
 725	if (!valid_signal(sig))
 726		return -EINVAL;
 727
 728	if (!si_fromuser(info))
 729		return 0;
 730
 731	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 732	if (error)
 733		return error;
 734
 735	if (!same_thread_group(current, t) &&
 736	    !kill_ok_by_cred(t)) {
 737		switch (sig) {
 738		case SIGCONT:
 739			sid = task_session(t);
 740			/*
 741			 * We don't return the error if sid == NULL. The
 742			 * task was unhashed, the caller must notice this.
 743			 */
 744			if (!sid || sid == task_session(current))
 745				break;
 
 746		default:
 747			return -EPERM;
 748		}
 749	}
 750
 751	return security_task_kill(t, info, sig, 0);
 752}
 753
 754/**
 755 * ptrace_trap_notify - schedule trap to notify ptracer
 756 * @t: tracee wanting to notify tracer
 757 *
 758 * This function schedules sticky ptrace trap which is cleared on the next
 759 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 760 * ptracer.
 761 *
 762 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 763 * ptracer is listening for events, tracee is woken up so that it can
 764 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 765 * eventually taken without returning to userland after the existing traps
 766 * are finished by PTRACE_CONT.
 767 *
 768 * CONTEXT:
 769 * Must be called with @task->sighand->siglock held.
 770 */
 771static void ptrace_trap_notify(struct task_struct *t)
 772{
 773	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 774	assert_spin_locked(&t->sighand->siglock);
 775
 776	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 777	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 778}
 779
 780/*
 781 * Handle magic process-wide effects of stop/continue signals. Unlike
 782 * the signal actions, these happen immediately at signal-generation
 783 * time regardless of blocking, ignoring, or handling.  This does the
 784 * actual continuing for SIGCONT, but not the actual stopping for stop
 785 * signals. The process stop is done as a signal action for SIG_DFL.
 786 *
 787 * Returns true if the signal should be actually delivered, otherwise
 788 * it should be dropped.
 789 */
 790static bool prepare_signal(int sig, struct task_struct *p, bool force)
 791{
 792	struct signal_struct *signal = p->signal;
 793	struct task_struct *t;
 794	sigset_t flush;
 795
 796	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
 797		if (!(signal->flags & SIGNAL_GROUP_EXIT))
 798			return sig == SIGKILL;
 799		/*
 800		 * The process is in the middle of dying, nothing to do.
 801		 */
 
 802	} else if (sig_kernel_stop(sig)) {
 803		/*
 804		 * This is a stop signal.  Remove SIGCONT from all queues.
 805		 */
 806		siginitset(&flush, sigmask(SIGCONT));
 807		flush_sigqueue_mask(&flush, &signal->shared_pending);
 808		for_each_thread(p, t)
 809			flush_sigqueue_mask(&flush, &t->pending);
 810	} else if (sig == SIGCONT) {
 811		unsigned int why;
 812		/*
 813		 * Remove all stop signals from all queues, wake all threads.
 814		 */
 815		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 816		flush_sigqueue_mask(&flush, &signal->shared_pending);
 817		for_each_thread(p, t) {
 818			flush_sigqueue_mask(&flush, &t->pending);
 819			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 820			if (likely(!(t->ptrace & PT_SEIZED)))
 
 821				wake_up_state(t, __TASK_STOPPED);
 822			else
 823				ptrace_trap_notify(t);
 824		}
 825
 826		/*
 827		 * Notify the parent with CLD_CONTINUED if we were stopped.
 828		 *
 829		 * If we were in the middle of a group stop, we pretend it
 830		 * was already finished, and then continued. Since SIGCHLD
 831		 * doesn't queue we report only CLD_STOPPED, as if the next
 832		 * CLD_CONTINUED was dropped.
 833		 */
 834		why = 0;
 835		if (signal->flags & SIGNAL_STOP_STOPPED)
 836			why |= SIGNAL_CLD_CONTINUED;
 837		else if (signal->group_stop_count)
 838			why |= SIGNAL_CLD_STOPPED;
 839
 840		if (why) {
 841			/*
 842			 * The first thread which returns from do_signal_stop()
 843			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 844			 * notify its parent. See get_signal_to_deliver().
 845			 */
 846			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 847			signal->group_stop_count = 0;
 848			signal->group_exit_code = 0;
 849		}
 850	}
 851
 852	return !sig_ignored(p, sig, force);
 853}
 854
 855/*
 856 * Test if P wants to take SIG.  After we've checked all threads with this,
 857 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 858 * blocking SIG were ruled out because they are not running and already
 859 * have pending signals.  Such threads will dequeue from the shared queue
 860 * as soon as they're available, so putting the signal on the shared queue
 861 * will be equivalent to sending it to one such thread.
 862 */
 863static inline int wants_signal(int sig, struct task_struct *p)
 864{
 865	if (sigismember(&p->blocked, sig))
 866		return 0;
 
 867	if (p->flags & PF_EXITING)
 868		return 0;
 
 869	if (sig == SIGKILL)
 870		return 1;
 
 871	if (task_is_stopped_or_traced(p))
 872		return 0;
 873	return task_curr(p) || !signal_pending(p);
 
 874}
 875
 876static void complete_signal(int sig, struct task_struct *p, int group)
 877{
 878	struct signal_struct *signal = p->signal;
 879	struct task_struct *t;
 880
 881	/*
 882	 * Now find a thread we can wake up to take the signal off the queue.
 883	 *
 884	 * If the main thread wants the signal, it gets first crack.
 885	 * Probably the least surprising to the average bear.
 886	 */
 887	if (wants_signal(sig, p))
 888		t = p;
 889	else if (!group || thread_group_empty(p))
 890		/*
 891		 * There is just one thread and it does not need to be woken.
 892		 * It will dequeue unblocked signals before it runs again.
 893		 */
 894		return;
 895	else {
 896		/*
 897		 * Otherwise try to find a suitable thread.
 898		 */
 899		t = signal->curr_target;
 900		while (!wants_signal(sig, t)) {
 901			t = next_thread(t);
 902			if (t == signal->curr_target)
 903				/*
 904				 * No thread needs to be woken.
 905				 * Any eligible threads will see
 906				 * the signal in the queue soon.
 907				 */
 908				return;
 909		}
 910		signal->curr_target = t;
 911	}
 912
 913	/*
 914	 * Found a killable thread.  If the signal will be fatal,
 915	 * then start taking the whole group down immediately.
 916	 */
 917	if (sig_fatal(p, sig) &&
 918	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
 919	    !sigismember(&t->real_blocked, sig) &&
 920	    (sig == SIGKILL || !t->ptrace)) {
 921		/*
 922		 * This signal will be fatal to the whole group.
 923		 */
 924		if (!sig_kernel_coredump(sig)) {
 925			/*
 926			 * Start a group exit and wake everybody up.
 927			 * This way we don't have other threads
 928			 * running and doing things after a slower
 929			 * thread has the fatal signal pending.
 930			 */
 931			signal->flags = SIGNAL_GROUP_EXIT;
 932			signal->group_exit_code = sig;
 933			signal->group_stop_count = 0;
 934			t = p;
 935			do {
 936				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
 937				sigaddset(&t->pending.signal, SIGKILL);
 938				signal_wake_up(t, 1);
 939			} while_each_thread(p, t);
 940			return;
 941		}
 942	}
 943
 944	/*
 945	 * The signal is already in the shared-pending queue.
 946	 * Tell the chosen thread to wake up and dequeue it.
 947	 */
 948	signal_wake_up(t, sig == SIGKILL);
 949	return;
 950}
 951
 952static inline int legacy_queue(struct sigpending *signals, int sig)
 953{
 954	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 955}
 956
 957#ifdef CONFIG_USER_NS
 958static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
 959{
 960	if (current_user_ns() == task_cred_xxx(t, user_ns))
 961		return;
 962
 963	if (SI_FROMKERNEL(info))
 964		return;
 965
 966	rcu_read_lock();
 967	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
 968					make_kuid(current_user_ns(), info->si_uid));
 969	rcu_read_unlock();
 970}
 971#else
 972static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
 973{
 974	return;
 975}
 976#endif
 977
 978static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
 979			int group, int from_ancestor_ns)
 980{
 981	struct sigpending *pending;
 982	struct sigqueue *q;
 983	int override_rlimit;
 984	int ret = 0, result;
 985
 986	assert_spin_locked(&t->sighand->siglock);
 987
 988	result = TRACE_SIGNAL_IGNORED;
 989	if (!prepare_signal(sig, t,
 990			from_ancestor_ns || (info == SEND_SIG_FORCED)))
 991		goto ret;
 992
 993	pending = group ? &t->signal->shared_pending : &t->pending;
 994	/*
 995	 * Short-circuit ignored signals and support queuing
 996	 * exactly one non-rt signal, so that we can get more
 997	 * detailed information about the cause of the signal.
 998	 */
 999	result = TRACE_SIGNAL_ALREADY_PENDING;
1000	if (legacy_queue(pending, sig))
1001		goto ret;
1002
1003	result = TRACE_SIGNAL_DELIVERED;
1004	/*
1005	 * fast-pathed signals for kernel-internal things like SIGSTOP
1006	 * or SIGKILL.
1007	 */
1008	if (info == SEND_SIG_FORCED)
1009		goto out_set;
1010
1011	/*
1012	 * Real-time signals must be queued if sent by sigqueue, or
1013	 * some other real-time mechanism.  It is implementation
1014	 * defined whether kill() does so.  We attempt to do so, on
1015	 * the principle of least surprise, but since kill is not
1016	 * allowed to fail with EAGAIN when low on memory we just
1017	 * make sure at least one signal gets delivered and don't
1018	 * pass on the info struct.
1019	 */
1020	if (sig < SIGRTMIN)
1021		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1022	else
1023		override_rlimit = 0;
1024
1025	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1026		override_rlimit);
1027	if (q) {
1028		list_add_tail(&q->list, &pending->list);
1029		switch ((unsigned long) info) {
1030		case (unsigned long) SEND_SIG_NOINFO:
 
1031			q->info.si_signo = sig;
1032			q->info.si_errno = 0;
1033			q->info.si_code = SI_USER;
1034			q->info.si_pid = task_tgid_nr_ns(current,
1035							task_active_pid_ns(t));
1036			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
 
 
 
 
1037			break;
1038		case (unsigned long) SEND_SIG_PRIV:
 
1039			q->info.si_signo = sig;
1040			q->info.si_errno = 0;
1041			q->info.si_code = SI_KERNEL;
1042			q->info.si_pid = 0;
1043			q->info.si_uid = 0;
1044			break;
1045		default:
1046			copy_siginfo(&q->info, info);
1047			if (from_ancestor_ns)
1048				q->info.si_pid = 0;
1049			break;
1050		}
1051
1052		userns_fixup_signal_uid(&q->info, t);
1053
1054	} else if (!is_si_special(info)) {
1055		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1056			/*
1057			 * Queue overflow, abort.  We may abort if the
1058			 * signal was rt and sent by user using something
1059			 * other than kill().
1060			 */
1061			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1062			ret = -EAGAIN;
1063			goto ret;
1064		} else {
1065			/*
1066			 * This is a silent loss of information.  We still
1067			 * send the signal, but the *info bits are lost.
1068			 */
1069			result = TRACE_SIGNAL_LOSE_INFO;
1070		}
1071	}
1072
1073out_set:
1074	signalfd_notify(t, sig);
1075	sigaddset(&pending->signal, sig);
1076	complete_signal(sig, t, group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077ret:
1078	trace_signal_generate(sig, info, t, group, result);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079	return ret;
1080}
1081
1082static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1083			int group)
1084{
1085	int from_ancestor_ns = 0;
 
1086
1087#ifdef CONFIG_PID_NS
1088	from_ancestor_ns = si_fromuser(info) &&
1089			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1090#endif
 
 
 
 
 
1091
1092	return __send_signal(sig, info, t, group, from_ancestor_ns);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1093}
1094
1095static void print_fatal_signal(int signr)
1096{
1097	struct pt_regs *regs = signal_pt_regs();
1098	pr_info("potentially unexpected fatal signal %d.\n", signr);
1099
1100#if defined(__i386__) && !defined(__arch_um__)
1101	pr_info("code at %08lx: ", regs->ip);
1102	{
1103		int i;
1104		for (i = 0; i < 16; i++) {
1105			unsigned char insn;
1106
1107			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1108				break;
1109			pr_cont("%02x ", insn);
1110		}
1111	}
1112	pr_cont("\n");
1113#endif
1114	preempt_disable();
1115	show_regs(regs);
1116	preempt_enable();
1117}
1118
1119static int __init setup_print_fatal_signals(char *str)
1120{
1121	get_option (&str, &print_fatal_signals);
1122
1123	return 1;
1124}
1125
1126__setup("print-fatal-signals=", setup_print_fatal_signals);
1127
1128int
1129__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1130{
1131	return send_signal(sig, info, p, 1);
1132}
1133
1134static int
1135specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1136{
1137	return send_signal(sig, info, t, 0);
1138}
1139
1140int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1141			bool group)
1142{
1143	unsigned long flags;
1144	int ret = -ESRCH;
1145
1146	if (lock_task_sighand(p, &flags)) {
1147		ret = send_signal(sig, info, p, group);
1148		unlock_task_sighand(p, &flags);
1149	}
1150
1151	return ret;
1152}
1153
 
 
 
 
 
 
1154/*
1155 * Force a signal that the process can't ignore: if necessary
1156 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1157 *
1158 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1159 * since we do not want to have a signal handler that was blocked
1160 * be invoked when user space had explicitly blocked it.
1161 *
1162 * We don't want to have recursive SIGSEGV's etc, for example,
1163 * that is why we also clear SIGNAL_UNKILLABLE.
1164 */
1165int
1166force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 
1167{
1168	unsigned long int flags;
1169	int ret, blocked, ignored;
1170	struct k_sigaction *action;
 
1171
1172	spin_lock_irqsave(&t->sighand->siglock, flags);
1173	action = &t->sighand->action[sig-1];
1174	ignored = action->sa.sa_handler == SIG_IGN;
1175	blocked = sigismember(&t->blocked, sig);
1176	if (blocked || ignored) {
1177		action->sa.sa_handler = SIG_DFL;
 
 
1178		if (blocked) {
1179			sigdelset(&t->blocked, sig);
1180			recalc_sigpending_and_wake(t);
1181		}
1182	}
1183	if (action->sa.sa_handler == SIG_DFL)
 
 
 
 
 
1184		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1185	ret = specific_send_sig_info(sig, info, t);
1186	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1187
1188	return ret;
1189}
1190
 
 
 
 
 
1191/*
1192 * Nuke all other threads in the group.
1193 */
1194int zap_other_threads(struct task_struct *p)
1195{
1196	struct task_struct *t = p;
1197	int count = 0;
1198
1199	p->signal->group_stop_count = 0;
1200
1201	while_each_thread(p, t) {
1202		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1203		count++;
1204
1205		/* Don't bother with already dead threads */
1206		if (t->exit_state)
1207			continue;
1208		sigaddset(&t->pending.signal, SIGKILL);
1209		signal_wake_up(t, 1);
1210	}
1211
1212	return count;
1213}
1214
1215struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1216					   unsigned long *flags)
1217{
1218	struct sighand_struct *sighand;
1219
 
1220	for (;;) {
1221		/*
1222		 * Disable interrupts early to avoid deadlocks.
1223		 * See rcu_read_unlock() comment header for details.
1224		 */
1225		local_irq_save(*flags);
1226		rcu_read_lock();
1227		sighand = rcu_dereference(tsk->sighand);
1228		if (unlikely(sighand == NULL)) {
1229			rcu_read_unlock();
1230			local_irq_restore(*flags);
1231			break;
1232		}
1233		/*
1234		 * This sighand can be already freed and even reused, but
1235		 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1236		 * initializes ->siglock: this slab can't go away, it has
1237		 * the same object type, ->siglock can't be reinitialized.
1238		 *
1239		 * We need to ensure that tsk->sighand is still the same
1240		 * after we take the lock, we can race with de_thread() or
1241		 * __exit_signal(). In the latter case the next iteration
1242		 * must see ->sighand == NULL.
1243		 */
1244		spin_lock(&sighand->siglock);
1245		if (likely(sighand == tsk->sighand)) {
1246			rcu_read_unlock();
1247			break;
1248		}
1249		spin_unlock(&sighand->siglock);
1250		rcu_read_unlock();
1251		local_irq_restore(*flags);
1252	}
 
1253
1254	return sighand;
1255}
1256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1257/*
1258 * send signal info to all the members of a group
1259 */
1260int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 
1261{
1262	int ret;
1263
1264	rcu_read_lock();
1265	ret = check_kill_permission(sig, info, p);
1266	rcu_read_unlock();
1267
1268	if (!ret && sig)
1269		ret = do_send_sig_info(sig, info, p, true);
1270
1271	return ret;
1272}
1273
1274/*
1275 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1276 * control characters do (^C, ^Z etc)
1277 * - the caller must hold at least a readlock on tasklist_lock
1278 */
1279int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1280{
1281	struct task_struct *p = NULL;
1282	int retval, success;
1283
1284	success = 0;
1285	retval = -ESRCH;
1286	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1287		int err = group_send_sig_info(sig, info, p);
1288		success |= !err;
1289		retval = err;
1290	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1291	return success ? 0 : retval;
1292}
1293
1294int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1295{
1296	int error = -ESRCH;
1297	struct task_struct *p;
1298
1299	for (;;) {
1300		rcu_read_lock();
1301		p = pid_task(pid, PIDTYPE_PID);
1302		if (p)
1303			error = group_send_sig_info(sig, info, p);
1304		rcu_read_unlock();
1305		if (likely(!p || error != -ESRCH))
1306			return error;
1307
1308		/*
1309		 * The task was unhashed in between, try again.  If it
1310		 * is dead, pid_task() will return NULL, if we race with
1311		 * de_thread() it will find the new leader.
1312		 */
1313	}
1314}
1315
1316int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1317{
1318	int error;
1319	rcu_read_lock();
1320	error = kill_pid_info(sig, info, find_vpid(pid));
1321	rcu_read_unlock();
1322	return error;
1323}
1324
1325static int kill_as_cred_perm(const struct cred *cred,
1326			     struct task_struct *target)
1327{
1328	const struct cred *pcred = __task_cred(target);
1329	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1330	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1331		return 0;
1332	return 1;
 
1333}
1334
1335/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1336int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1337			 const struct cred *cred, u32 secid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1338{
1339	int ret = -EINVAL;
1340	struct task_struct *p;
1341	unsigned long flags;
 
1342
1343	if (!valid_signal(sig))
1344		return ret;
1345
 
 
 
 
 
 
1346	rcu_read_lock();
1347	p = pid_task(pid, PIDTYPE_PID);
1348	if (!p) {
1349		ret = -ESRCH;
1350		goto out_unlock;
1351	}
1352	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1353		ret = -EPERM;
1354		goto out_unlock;
1355	}
1356	ret = security_task_kill(p, info, sig, secid);
1357	if (ret)
1358		goto out_unlock;
1359
1360	if (sig) {
1361		if (lock_task_sighand(p, &flags)) {
1362			ret = __send_signal(sig, info, p, 1, 0);
1363			unlock_task_sighand(p, &flags);
1364		} else
1365			ret = -ESRCH;
1366	}
1367out_unlock:
1368	rcu_read_unlock();
1369	return ret;
1370}
1371EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1372
1373/*
1374 * kill_something_info() interprets pid in interesting ways just like kill(2).
1375 *
1376 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1377 * is probably wrong.  Should make it like BSD or SYSV.
1378 */
1379
1380static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1381{
1382	int ret;
1383
1384	if (pid > 0) {
1385		rcu_read_lock();
1386		ret = kill_pid_info(sig, info, find_vpid(pid));
1387		rcu_read_unlock();
1388		return ret;
1389	}
1390
1391	read_lock(&tasklist_lock);
1392	if (pid != -1) {
1393		ret = __kill_pgrp_info(sig, info,
1394				pid ? find_vpid(-pid) : task_pgrp(current));
1395	} else {
1396		int retval = 0, count = 0;
1397		struct task_struct * p;
1398
1399		for_each_process(p) {
1400			if (task_pid_vnr(p) > 1 &&
1401					!same_thread_group(p, current)) {
1402				int err = group_send_sig_info(sig, info, p);
 
1403				++count;
1404				if (err != -EPERM)
1405					retval = err;
1406			}
1407		}
1408		ret = count ? retval : -ESRCH;
1409	}
1410	read_unlock(&tasklist_lock);
1411
1412	return ret;
1413}
1414
1415/*
1416 * These are for backward compatibility with the rest of the kernel source.
1417 */
1418
1419int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1420{
1421	/*
1422	 * Make sure legacy kernel users don't send in bad values
1423	 * (normal paths check this in check_kill_permission).
1424	 */
1425	if (!valid_signal(sig))
1426		return -EINVAL;
1427
1428	return do_send_sig_info(sig, info, p, false);
1429}
 
1430
1431#define __si_special(priv) \
1432	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1433
1434int
1435send_sig(int sig, struct task_struct *p, int priv)
1436{
1437	return send_sig_info(sig, __si_special(priv), p);
1438}
 
1439
1440void
1441force_sig(int sig, struct task_struct *p)
1442{
1443	force_sig_info(sig, SEND_SIG_PRIV, p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444}
1445
1446/*
1447 * When things go south during signal handling, we
1448 * will force a SIGSEGV. And if the signal that caused
1449 * the problem was already a SIGSEGV, we'll want to
1450 * make sure we don't even try to deliver the signal..
1451 */
1452int
1453force_sigsegv(int sig, struct task_struct *p)
1454{
1455	if (sig == SIGSEGV) {
1456		unsigned long flags;
1457		spin_lock_irqsave(&p->sighand->siglock, flags);
1458		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1459		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1460	}
1461	force_sig(SIGSEGV, p);
1462	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1463}
1464
1465int kill_pgrp(struct pid *pid, int sig, int priv)
1466{
1467	int ret;
1468
1469	read_lock(&tasklist_lock);
1470	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1471	read_unlock(&tasklist_lock);
1472
1473	return ret;
1474}
1475EXPORT_SYMBOL(kill_pgrp);
1476
1477int kill_pid(struct pid *pid, int sig, int priv)
1478{
1479	return kill_pid_info(sig, __si_special(priv), pid);
1480}
1481EXPORT_SYMBOL(kill_pid);
1482
1483/*
1484 * These functions support sending signals using preallocated sigqueue
1485 * structures.  This is needed "because realtime applications cannot
1486 * afford to lose notifications of asynchronous events, like timer
1487 * expirations or I/O completions".  In the case of POSIX Timers
1488 * we allocate the sigqueue structure from the timer_create.  If this
1489 * allocation fails we are able to report the failure to the application
1490 * with an EAGAIN error.
1491 */
1492struct sigqueue *sigqueue_alloc(void)
1493{
1494	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1495
1496	if (q)
1497		q->flags |= SIGQUEUE_PREALLOC;
1498
1499	return q;
1500}
1501
1502void sigqueue_free(struct sigqueue *q)
1503{
1504	unsigned long flags;
1505	spinlock_t *lock = &current->sighand->siglock;
1506
1507	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1508	/*
1509	 * We must hold ->siglock while testing q->list
1510	 * to serialize with collect_signal() or with
1511	 * __exit_signal()->flush_sigqueue().
1512	 */
1513	spin_lock_irqsave(lock, flags);
1514	q->flags &= ~SIGQUEUE_PREALLOC;
1515	/*
1516	 * If it is queued it will be freed when dequeued,
1517	 * like the "regular" sigqueue.
1518	 */
1519	if (!list_empty(&q->list))
1520		q = NULL;
1521	spin_unlock_irqrestore(lock, flags);
1522
1523	if (q)
1524		__sigqueue_free(q);
1525}
1526
1527int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1528{
1529	int sig = q->info.si_signo;
1530	struct sigpending *pending;
 
1531	unsigned long flags;
1532	int ret, result;
1533
1534	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1535
1536	ret = -1;
1537	if (!likely(lock_task_sighand(t, &flags)))
 
 
1538		goto ret;
1539
1540	ret = 1; /* the signal is ignored */
1541	result = TRACE_SIGNAL_IGNORED;
1542	if (!prepare_signal(sig, t, false))
1543		goto out;
1544
1545	ret = 0;
1546	if (unlikely(!list_empty(&q->list))) {
1547		/*
1548		 * If an SI_TIMER entry is already queue just increment
1549		 * the overrun count.
1550		 */
1551		BUG_ON(q->info.si_code != SI_TIMER);
1552		q->info.si_overrun++;
1553		result = TRACE_SIGNAL_ALREADY_PENDING;
1554		goto out;
1555	}
1556	q->info.si_overrun = 0;
1557
1558	signalfd_notify(t, sig);
1559	pending = group ? &t->signal->shared_pending : &t->pending;
1560	list_add_tail(&q->list, &pending->list);
1561	sigaddset(&pending->signal, sig);
1562	complete_signal(sig, t, group);
1563	result = TRACE_SIGNAL_DELIVERED;
1564out:
1565	trace_signal_generate(sig, &q->info, t, group, result);
1566	unlock_task_sighand(t, &flags);
1567ret:
 
1568	return ret;
1569}
1570
 
 
 
 
 
 
 
 
 
1571/*
1572 * Let a parent know about the death of a child.
1573 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1574 *
1575 * Returns true if our parent ignored us and so we've switched to
1576 * self-reaping.
1577 */
1578bool do_notify_parent(struct task_struct *tsk, int sig)
1579{
1580	struct siginfo info;
1581	unsigned long flags;
1582	struct sighand_struct *psig;
1583	bool autoreap = false;
1584	cputime_t utime, stime;
1585
1586	BUG_ON(sig == -1);
1587
1588 	/* do_notify_parent_cldstop should have been called instead.  */
1589 	BUG_ON(task_is_stopped_or_traced(tsk));
1590
1591	BUG_ON(!tsk->ptrace &&
1592	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1593
 
 
 
1594	if (sig != SIGCHLD) {
1595		/*
1596		 * This is only possible if parent == real_parent.
1597		 * Check if it has changed security domain.
1598		 */
1599		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1600			sig = SIGCHLD;
1601	}
1602
 
1603	info.si_signo = sig;
1604	info.si_errno = 0;
1605	/*
1606	 * We are under tasklist_lock here so our parent is tied to
1607	 * us and cannot change.
1608	 *
1609	 * task_active_pid_ns will always return the same pid namespace
1610	 * until a task passes through release_task.
1611	 *
1612	 * write_lock() currently calls preempt_disable() which is the
1613	 * same as rcu_read_lock(), but according to Oleg, this is not
1614	 * correct to rely on this
1615	 */
1616	rcu_read_lock();
1617	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1618	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1619				       task_uid(tsk));
1620	rcu_read_unlock();
1621
1622	task_cputime(tsk, &utime, &stime);
1623	info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1624	info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1625
1626	info.si_status = tsk->exit_code & 0x7f;
1627	if (tsk->exit_code & 0x80)
1628		info.si_code = CLD_DUMPED;
1629	else if (tsk->exit_code & 0x7f)
1630		info.si_code = CLD_KILLED;
1631	else {
1632		info.si_code = CLD_EXITED;
1633		info.si_status = tsk->exit_code >> 8;
1634	}
1635
1636	psig = tsk->parent->sighand;
1637	spin_lock_irqsave(&psig->siglock, flags);
1638	if (!tsk->ptrace && sig == SIGCHLD &&
1639	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1640	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1641		/*
1642		 * We are exiting and our parent doesn't care.  POSIX.1
1643		 * defines special semantics for setting SIGCHLD to SIG_IGN
1644		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1645		 * automatically and not left for our parent's wait4 call.
1646		 * Rather than having the parent do it as a magic kind of
1647		 * signal handler, we just set this to tell do_exit that we
1648		 * can be cleaned up without becoming a zombie.  Note that
1649		 * we still call __wake_up_parent in this case, because a
1650		 * blocked sys_wait4 might now return -ECHILD.
1651		 *
1652		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1653		 * is implementation-defined: we do (if you don't want
1654		 * it, just use SIG_IGN instead).
1655		 */
1656		autoreap = true;
1657		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1658			sig = 0;
1659	}
 
 
 
 
1660	if (valid_signal(sig) && sig)
1661		__group_send_sig_info(sig, &info, tsk->parent);
1662	__wake_up_parent(tsk, tsk->parent);
1663	spin_unlock_irqrestore(&psig->siglock, flags);
1664
1665	return autoreap;
1666}
1667
1668/**
1669 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1670 * @tsk: task reporting the state change
1671 * @for_ptracer: the notification is for ptracer
1672 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1673 *
1674 * Notify @tsk's parent that the stopped/continued state has changed.  If
1675 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1676 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1677 *
1678 * CONTEXT:
1679 * Must be called with tasklist_lock at least read locked.
1680 */
1681static void do_notify_parent_cldstop(struct task_struct *tsk,
1682				     bool for_ptracer, int why)
1683{
1684	struct siginfo info;
1685	unsigned long flags;
1686	struct task_struct *parent;
1687	struct sighand_struct *sighand;
1688	cputime_t utime, stime;
1689
1690	if (for_ptracer) {
1691		parent = tsk->parent;
1692	} else {
1693		tsk = tsk->group_leader;
1694		parent = tsk->real_parent;
1695	}
1696
 
1697	info.si_signo = SIGCHLD;
1698	info.si_errno = 0;
1699	/*
1700	 * see comment in do_notify_parent() about the following 4 lines
1701	 */
1702	rcu_read_lock();
1703	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1704	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1705	rcu_read_unlock();
1706
1707	task_cputime(tsk, &utime, &stime);
1708	info.si_utime = cputime_to_clock_t(utime);
1709	info.si_stime = cputime_to_clock_t(stime);
1710
1711 	info.si_code = why;
1712 	switch (why) {
1713 	case CLD_CONTINUED:
1714 		info.si_status = SIGCONT;
1715 		break;
1716 	case CLD_STOPPED:
1717 		info.si_status = tsk->signal->group_exit_code & 0x7f;
1718 		break;
1719 	case CLD_TRAPPED:
1720 		info.si_status = tsk->exit_code & 0x7f;
1721 		break;
1722 	default:
1723 		BUG();
1724 	}
1725
1726	sighand = parent->sighand;
1727	spin_lock_irqsave(&sighand->siglock, flags);
1728	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1729	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1730		__group_send_sig_info(SIGCHLD, &info, parent);
1731	/*
1732	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1733	 */
1734	__wake_up_parent(tsk, parent);
1735	spin_unlock_irqrestore(&sighand->siglock, flags);
1736}
1737
1738static inline int may_ptrace_stop(void)
1739{
1740	if (!likely(current->ptrace))
1741		return 0;
1742	/*
1743	 * Are we in the middle of do_coredump?
1744	 * If so and our tracer is also part of the coredump stopping
1745	 * is a deadlock situation, and pointless because our tracer
1746	 * is dead so don't allow us to stop.
1747	 * If SIGKILL was already sent before the caller unlocked
1748	 * ->siglock we must see ->core_state != NULL. Otherwise it
1749	 * is safe to enter schedule().
1750	 *
1751	 * This is almost outdated, a task with the pending SIGKILL can't
1752	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1753	 * after SIGKILL was already dequeued.
1754	 */
1755	if (unlikely(current->mm->core_state) &&
1756	    unlikely(current->mm == current->parent->mm))
1757		return 0;
1758
1759	return 1;
1760}
1761
1762/*
1763 * Return non-zero if there is a SIGKILL that should be waking us up.
1764 * Called with the siglock held.
1765 */
1766static int sigkill_pending(struct task_struct *tsk)
1767{
1768	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1769		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1770}
1771
1772/*
1773 * This must be called with current->sighand->siglock held.
1774 *
1775 * This should be the path for all ptrace stops.
1776 * We always set current->last_siginfo while stopped here.
1777 * That makes it a way to test a stopped process for
1778 * being ptrace-stopped vs being job-control-stopped.
1779 *
1780 * If we actually decide not to stop at all because the tracer
1781 * is gone, we keep current->exit_code unless clear_code.
 
1782 */
1783static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
 
1784	__releases(&current->sighand->siglock)
1785	__acquires(&current->sighand->siglock)
1786{
1787	bool gstop_done = false;
1788
1789	if (arch_ptrace_stop_needed(exit_code, info)) {
1790		/*
1791		 * The arch code has something special to do before a
1792		 * ptrace stop.  This is allowed to block, e.g. for faults
1793		 * on user stack pages.  We can't keep the siglock while
1794		 * calling arch_ptrace_stop, so we must release it now.
1795		 * To preserve proper semantics, we must do this before
1796		 * any signal bookkeeping like checking group_stop_count.
1797		 * Meanwhile, a SIGKILL could come in before we retake the
1798		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1799		 * So after regaining the lock, we must check for SIGKILL.
1800		 */
1801		spin_unlock_irq(&current->sighand->siglock);
1802		arch_ptrace_stop(exit_code, info);
1803		spin_lock_irq(&current->sighand->siglock);
1804		if (sigkill_pending(current))
1805			return;
1806	}
1807
1808	/*
 
 
 
 
 
 
 
 
 
 
 
 
1809	 * We're committing to trapping.  TRACED should be visible before
1810	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1811	 * Also, transition to TRACED and updates to ->jobctl should be
1812	 * atomic with respect to siglock and should be done after the arch
1813	 * hook as siglock is released and regrabbed across it.
 
 
 
 
 
 
 
 
 
 
 
1814	 */
1815	set_current_state(TASK_TRACED);
1816
 
1817	current->last_siginfo = info;
1818	current->exit_code = exit_code;
1819
1820	/*
1821	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1822	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1823	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1824	 * could be clear now.  We act as if SIGCONT is received after
1825	 * TASK_TRACED is entered - ignore it.
1826	 */
1827	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1828		gstop_done = task_participate_group_stop(current);
1829
1830	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1831	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1832	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1833		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1834
1835	/* entering a trap, clear TRAPPING */
1836	task_clear_jobctl_trapping(current);
1837
1838	spin_unlock_irq(&current->sighand->siglock);
1839	read_lock(&tasklist_lock);
1840	if (may_ptrace_stop()) {
1841		/*
1842		 * Notify parents of the stop.
1843		 *
1844		 * While ptraced, there are two parents - the ptracer and
1845		 * the real_parent of the group_leader.  The ptracer should
1846		 * know about every stop while the real parent is only
1847		 * interested in the completion of group stop.  The states
1848		 * for the two don't interact with each other.  Notify
1849		 * separately unless they're gonna be duplicates.
1850		 */
1851		do_notify_parent_cldstop(current, true, why);
1852		if (gstop_done && ptrace_reparented(current))
1853			do_notify_parent_cldstop(current, false, why);
1854
1855		/*
1856		 * Don't want to allow preemption here, because
1857		 * sys_ptrace() needs this task to be inactive.
1858		 *
1859		 * XXX: implement read_unlock_no_resched().
1860		 */
1861		preempt_disable();
1862		read_unlock(&tasklist_lock);
1863		preempt_enable_no_resched();
1864		freezable_schedule();
1865	} else {
1866		/*
1867		 * By the time we got the lock, our tracer went away.
1868		 * Don't drop the lock yet, another tracer may come.
1869		 *
1870		 * If @gstop_done, the ptracer went away between group stop
1871		 * completion and here.  During detach, it would have set
1872		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1873		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1874		 * the real parent of the group stop completion is enough.
1875		 */
1876		if (gstop_done)
1877			do_notify_parent_cldstop(current, false, why);
1878
1879		/* tasklist protects us from ptrace_freeze_traced() */
1880		__set_current_state(TASK_RUNNING);
1881		if (clear_code)
1882			current->exit_code = 0;
1883		read_unlock(&tasklist_lock);
1884	}
1885
1886	/*
1887	 * We are back.  Now reacquire the siglock before touching
1888	 * last_siginfo, so that we are sure to have synchronized with
1889	 * any signal-sending on another CPU that wants to examine it.
1890	 */
1891	spin_lock_irq(&current->sighand->siglock);
 
1892	current->last_siginfo = NULL;
 
 
1893
1894	/* LISTENING can be set only during STOP traps, clear it */
1895	current->jobctl &= ~JOBCTL_LISTENING;
1896
1897	/*
1898	 * Queued signals ignored us while we were stopped for tracing.
1899	 * So check for any that we should take before resuming user mode.
1900	 * This sets TIF_SIGPENDING, but never clears it.
1901	 */
1902	recalc_sigpending_tsk(current);
 
1903}
1904
1905static void ptrace_do_notify(int signr, int exit_code, int why)
1906{
1907	siginfo_t info;
1908
1909	memset(&info, 0, sizeof info);
1910	info.si_signo = signr;
1911	info.si_code = exit_code;
1912	info.si_pid = task_pid_vnr(current);
1913	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1914
1915	/* Let the debugger run.  */
1916	ptrace_stop(exit_code, why, 1, &info);
1917}
1918
1919void ptrace_notify(int exit_code)
1920{
 
 
1921	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1922	if (unlikely(current->task_works))
1923		task_work_run();
1924
1925	spin_lock_irq(&current->sighand->siglock);
1926	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1927	spin_unlock_irq(&current->sighand->siglock);
 
1928}
1929
1930/**
1931 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1932 * @signr: signr causing group stop if initiating
1933 *
1934 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1935 * and participate in it.  If already set, participate in the existing
1936 * group stop.  If participated in a group stop (and thus slept), %true is
1937 * returned with siglock released.
1938 *
1939 * If ptraced, this function doesn't handle stop itself.  Instead,
1940 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1941 * untouched.  The caller must ensure that INTERRUPT trap handling takes
1942 * places afterwards.
1943 *
1944 * CONTEXT:
1945 * Must be called with @current->sighand->siglock held, which is released
1946 * on %true return.
1947 *
1948 * RETURNS:
1949 * %false if group stop is already cancelled or ptrace trap is scheduled.
1950 * %true if participated in group stop.
1951 */
1952static bool do_signal_stop(int signr)
1953	__releases(&current->sighand->siglock)
1954{
1955	struct signal_struct *sig = current->signal;
1956
1957	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1958		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1959		struct task_struct *t;
1960
1961		/* signr will be recorded in task->jobctl for retries */
1962		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1963
1964		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1965		    unlikely(signal_group_exit(sig)))
 
1966			return false;
1967		/*
1968		 * There is no group stop already in progress.  We must
1969		 * initiate one now.
1970		 *
1971		 * While ptraced, a task may be resumed while group stop is
1972		 * still in effect and then receive a stop signal and
1973		 * initiate another group stop.  This deviates from the
1974		 * usual behavior as two consecutive stop signals can't
1975		 * cause two group stops when !ptraced.  That is why we
1976		 * also check !task_is_stopped(t) below.
1977		 *
1978		 * The condition can be distinguished by testing whether
1979		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
1980		 * group_exit_code in such case.
1981		 *
1982		 * This is not necessary for SIGNAL_STOP_CONTINUED because
1983		 * an intervening stop signal is required to cause two
1984		 * continued events regardless of ptrace.
1985		 */
1986		if (!(sig->flags & SIGNAL_STOP_STOPPED))
1987			sig->group_exit_code = signr;
1988
1989		sig->group_stop_count = 0;
1990
1991		if (task_set_jobctl_pending(current, signr | gstop))
1992			sig->group_stop_count++;
1993
1994		t = current;
1995		while_each_thread(current, t) {
1996			/*
1997			 * Setting state to TASK_STOPPED for a group
1998			 * stop is always done with the siglock held,
1999			 * so this check has no races.
2000			 */
2001			if (!task_is_stopped(t) &&
2002			    task_set_jobctl_pending(t, signr | gstop)) {
2003				sig->group_stop_count++;
2004				if (likely(!(t->ptrace & PT_SEIZED)))
2005					signal_wake_up(t, 0);
2006				else
2007					ptrace_trap_notify(t);
2008			}
2009		}
2010	}
2011
2012	if (likely(!current->ptrace)) {
2013		int notify = 0;
2014
2015		/*
2016		 * If there are no other threads in the group, or if there
2017		 * is a group stop in progress and we are the last to stop,
2018		 * report to the parent.
2019		 */
2020		if (task_participate_group_stop(current))
2021			notify = CLD_STOPPED;
2022
2023		__set_current_state(TASK_STOPPED);
 
2024		spin_unlock_irq(&current->sighand->siglock);
2025
2026		/*
2027		 * Notify the parent of the group stop completion.  Because
2028		 * we're not holding either the siglock or tasklist_lock
2029		 * here, ptracer may attach inbetween; however, this is for
2030		 * group stop and should always be delivered to the real
2031		 * parent of the group leader.  The new ptracer will get
2032		 * its notification when this task transitions into
2033		 * TASK_TRACED.
2034		 */
2035		if (notify) {
2036			read_lock(&tasklist_lock);
2037			do_notify_parent_cldstop(current, false, notify);
2038			read_unlock(&tasklist_lock);
2039		}
2040
2041		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2042		freezable_schedule();
 
2043		return true;
2044	} else {
2045		/*
2046		 * While ptraced, group stop is handled by STOP trap.
2047		 * Schedule it and let the caller deal with it.
2048		 */
2049		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2050		return false;
2051	}
2052}
2053
2054/**
2055 * do_jobctl_trap - take care of ptrace jobctl traps
2056 *
2057 * When PT_SEIZED, it's used for both group stop and explicit
2058 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2059 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2060 * the stop signal; otherwise, %SIGTRAP.
2061 *
2062 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2063 * number as exit_code and no siginfo.
2064 *
2065 * CONTEXT:
2066 * Must be called with @current->sighand->siglock held, which may be
2067 * released and re-acquired before returning with intervening sleep.
2068 */
2069static void do_jobctl_trap(void)
2070{
2071	struct signal_struct *signal = current->signal;
2072	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2073
2074	if (current->ptrace & PT_SEIZED) {
2075		if (!signal->group_stop_count &&
2076		    !(signal->flags & SIGNAL_STOP_STOPPED))
2077			signr = SIGTRAP;
2078		WARN_ON_ONCE(!signr);
2079		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2080				 CLD_STOPPED);
2081	} else {
2082		WARN_ON_ONCE(!signr);
2083		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2084		current->exit_code = 0;
2085	}
2086}
2087
2088static int ptrace_signal(int signr, siginfo_t *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2089{
2090	ptrace_signal_deliver();
2091	/*
2092	 * We do not check sig_kernel_stop(signr) but set this marker
2093	 * unconditionally because we do not know whether debugger will
2094	 * change signr. This flag has no meaning unless we are going
2095	 * to stop after return from ptrace_stop(). In this case it will
2096	 * be checked in do_signal_stop(), we should only stop if it was
2097	 * not cleared by SIGCONT while we were sleeping. See also the
2098	 * comment in dequeue_signal().
2099	 */
2100	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2101	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2102
2103	/* We're back.  Did the debugger cancel the sig?  */
2104	signr = current->exit_code;
2105	if (signr == 0)
2106		return signr;
2107
2108	current->exit_code = 0;
2109
2110	/*
2111	 * Update the siginfo structure if the signal has
2112	 * changed.  If the debugger wanted something
2113	 * specific in the siginfo structure then it should
2114	 * have updated *info via PTRACE_SETSIGINFO.
2115	 */
2116	if (signr != info->si_signo) {
 
2117		info->si_signo = signr;
2118		info->si_errno = 0;
2119		info->si_code = SI_USER;
2120		rcu_read_lock();
2121		info->si_pid = task_pid_vnr(current->parent);
2122		info->si_uid = from_kuid_munged(current_user_ns(),
2123						task_uid(current->parent));
2124		rcu_read_unlock();
2125	}
2126
2127	/* If the (new) signal is now blocked, requeue it.  */
2128	if (sigismember(&current->blocked, signr)) {
2129		specific_send_sig_info(signr, info, current);
 
2130		signr = 0;
2131	}
2132
2133	return signr;
2134}
2135
2136int get_signal(struct ksignal *ksig)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2137{
2138	struct sighand_struct *sighand = current->sighand;
2139	struct signal_struct *signal = current->signal;
2140	int signr;
2141
2142	if (unlikely(current->task_works))
 
2143		task_work_run();
2144
 
 
 
2145	if (unlikely(uprobe_deny_signal()))
2146		return 0;
2147
2148	/*
2149	 * Do this once, we can't return to user-mode if freezing() == T.
2150	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2151	 * thus do not need another check after return.
2152	 */
2153	try_to_freeze();
2154
2155relock:
2156	spin_lock_irq(&sighand->siglock);
 
2157	/*
2158	 * Every stopped thread goes here after wakeup. Check to see if
2159	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2160	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2161	 */
2162	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2163		int why;
2164
2165		if (signal->flags & SIGNAL_CLD_CONTINUED)
2166			why = CLD_CONTINUED;
2167		else
2168			why = CLD_STOPPED;
2169
2170		signal->flags &= ~SIGNAL_CLD_MASK;
2171
2172		spin_unlock_irq(&sighand->siglock);
2173
2174		/*
2175		 * Notify the parent that we're continuing.  This event is
2176		 * always per-process and doesn't make whole lot of sense
2177		 * for ptracers, who shouldn't consume the state via
2178		 * wait(2) either, but, for backward compatibility, notify
2179		 * the ptracer of the group leader too unless it's gonna be
2180		 * a duplicate.
2181		 */
2182		read_lock(&tasklist_lock);
2183		do_notify_parent_cldstop(current, false, why);
2184
2185		if (ptrace_reparented(current->group_leader))
2186			do_notify_parent_cldstop(current->group_leader,
2187						true, why);
2188		read_unlock(&tasklist_lock);
2189
2190		goto relock;
2191	}
2192
2193	for (;;) {
2194		struct k_sigaction *ka;
 
 
 
 
 
 
 
 
 
 
 
 
 
2195
2196		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2197		    do_signal_stop(0))
2198			goto relock;
2199
2200		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2201			do_jobctl_trap();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2202			spin_unlock_irq(&sighand->siglock);
 
2203			goto relock;
2204		}
2205
2206		signr = dequeue_signal(current, &current->blocked, &ksig->info);
 
 
 
 
 
 
 
 
 
 
2207
2208		if (!signr)
2209			break; /* will return 0 */
2210
2211		if (unlikely(current->ptrace) && signr != SIGKILL) {
2212			signr = ptrace_signal(signr, &ksig->info);
 
2213			if (!signr)
2214				continue;
2215		}
2216
2217		ka = &sighand->action[signr-1];
2218
2219		/* Trace actually delivered signals. */
2220		trace_signal_deliver(signr, &ksig->info, ka);
2221
2222		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2223			continue;
2224		if (ka->sa.sa_handler != SIG_DFL) {
2225			/* Run the handler.  */
2226			ksig->ka = *ka;
2227
2228			if (ka->sa.sa_flags & SA_ONESHOT)
2229				ka->sa.sa_handler = SIG_DFL;
2230
2231			break; /* will return non-zero "signr" value */
2232		}
2233
2234		/*
2235		 * Now we are doing the default action for this signal.
2236		 */
2237		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2238			continue;
2239
2240		/*
2241		 * Global init gets no signals it doesn't want.
2242		 * Container-init gets no signals it doesn't want from same
2243		 * container.
2244		 *
2245		 * Note that if global/container-init sees a sig_kernel_only()
2246		 * signal here, the signal must have been generated internally
2247		 * or must have come from an ancestor namespace. In either
2248		 * case, the signal cannot be dropped.
2249		 */
2250		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2251				!sig_kernel_only(signr))
2252			continue;
2253
2254		if (sig_kernel_stop(signr)) {
2255			/*
2256			 * The default action is to stop all threads in
2257			 * the thread group.  The job control signals
2258			 * do nothing in an orphaned pgrp, but SIGSTOP
2259			 * always works.  Note that siglock needs to be
2260			 * dropped during the call to is_orphaned_pgrp()
2261			 * because of lock ordering with tasklist_lock.
2262			 * This allows an intervening SIGCONT to be posted.
2263			 * We need to check for that and bail out if necessary.
2264			 */
2265			if (signr != SIGSTOP) {
2266				spin_unlock_irq(&sighand->siglock);
2267
2268				/* signals can be posted during this window */
2269
2270				if (is_current_pgrp_orphaned())
2271					goto relock;
2272
2273				spin_lock_irq(&sighand->siglock);
2274			}
2275
2276			if (likely(do_signal_stop(ksig->info.si_signo))) {
2277				/* It released the siglock.  */
2278				goto relock;
2279			}
2280
2281			/*
2282			 * We didn't actually stop, due to a race
2283			 * with SIGCONT or something like that.
2284			 */
2285			continue;
2286		}
2287
 
2288		spin_unlock_irq(&sighand->siglock);
 
 
2289
2290		/*
2291		 * Anything else is fatal, maybe with a core dump.
2292		 */
2293		current->flags |= PF_SIGNALED;
2294
2295		if (sig_kernel_coredump(signr)) {
2296			if (print_fatal_signals)
2297				print_fatal_signal(ksig->info.si_signo);
2298			proc_coredump_connector(current);
2299			/*
2300			 * If it was able to dump core, this kills all
2301			 * other threads in the group and synchronizes with
2302			 * their demise.  If we lost the race with another
2303			 * thread getting here, it set group_exit_code
2304			 * first and our do_group_exit call below will use
2305			 * that value and ignore the one we pass it.
2306			 */
2307			do_coredump(&ksig->info);
2308		}
2309
2310		/*
 
 
 
 
 
 
 
 
2311		 * Death signals, no core dump.
2312		 */
2313		do_group_exit(ksig->info.si_signo);
2314		/* NOTREACHED */
2315	}
2316	spin_unlock_irq(&sighand->siglock);
2317
2318	ksig->sig = signr;
 
 
 
 
2319	return ksig->sig > 0;
2320}
2321
2322/**
2323 * signal_delivered - 
2324 * @ksig:		kernel signal struct
2325 * @stepping:		nonzero if debugger single-step or block-step in use
2326 *
2327 * This function should be called when a signal has successfully been
2328 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2329 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2330 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2331 */
2332static void signal_delivered(struct ksignal *ksig, int stepping)
2333{
2334	sigset_t blocked;
2335
2336	/* A signal was successfully delivered, and the
2337	   saved sigmask was stored on the signal frame,
2338	   and will be restored by sigreturn.  So we can
2339	   simply clear the restore sigmask flag.  */
2340	clear_restore_sigmask();
2341
2342	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2343	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2344		sigaddset(&blocked, ksig->sig);
2345	set_current_blocked(&blocked);
2346	tracehook_signal_handler(stepping);
 
 
 
2347}
2348
2349void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2350{
2351	if (failed)
2352		force_sigsegv(ksig->sig, current);
2353	else
2354		signal_delivered(ksig, stepping);
2355}
2356
2357/*
2358 * It could be that complete_signal() picked us to notify about the
2359 * group-wide signal. Other threads should be notified now to take
2360 * the shared signals in @which since we will not.
2361 */
2362static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2363{
2364	sigset_t retarget;
2365	struct task_struct *t;
2366
2367	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2368	if (sigisemptyset(&retarget))
2369		return;
2370
2371	t = tsk;
2372	while_each_thread(tsk, t) {
2373		if (t->flags & PF_EXITING)
2374			continue;
2375
2376		if (!has_pending_signals(&retarget, &t->blocked))
2377			continue;
2378		/* Remove the signals this thread can handle. */
2379		sigandsets(&retarget, &retarget, &t->blocked);
2380
2381		if (!signal_pending(t))
2382			signal_wake_up(t, 0);
2383
2384		if (sigisemptyset(&retarget))
2385			break;
2386	}
2387}
2388
2389void exit_signals(struct task_struct *tsk)
2390{
2391	int group_stop = 0;
2392	sigset_t unblocked;
2393
2394	/*
2395	 * @tsk is about to have PF_EXITING set - lock out users which
2396	 * expect stable threadgroup.
2397	 */
2398	threadgroup_change_begin(tsk);
2399
2400	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2401		tsk->flags |= PF_EXITING;
2402		threadgroup_change_end(tsk);
2403		return;
2404	}
2405
2406	spin_lock_irq(&tsk->sighand->siglock);
2407	/*
2408	 * From now this task is not visible for group-wide signals,
2409	 * see wants_signal(), do_signal_stop().
2410	 */
2411	tsk->flags |= PF_EXITING;
2412
2413	threadgroup_change_end(tsk);
2414
2415	if (!signal_pending(tsk))
2416		goto out;
2417
2418	unblocked = tsk->blocked;
2419	signotset(&unblocked);
2420	retarget_shared_pending(tsk, &unblocked);
2421
2422	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2423	    task_participate_group_stop(tsk))
2424		group_stop = CLD_STOPPED;
2425out:
2426	spin_unlock_irq(&tsk->sighand->siglock);
2427
2428	/*
2429	 * If group stop has completed, deliver the notification.  This
2430	 * should always go to the real parent of the group leader.
2431	 */
2432	if (unlikely(group_stop)) {
2433		read_lock(&tasklist_lock);
2434		do_notify_parent_cldstop(tsk, false, group_stop);
2435		read_unlock(&tasklist_lock);
2436	}
2437}
2438
2439EXPORT_SYMBOL(recalc_sigpending);
2440EXPORT_SYMBOL_GPL(dequeue_signal);
2441EXPORT_SYMBOL(flush_signals);
2442EXPORT_SYMBOL(force_sig);
2443EXPORT_SYMBOL(send_sig);
2444EXPORT_SYMBOL(send_sig_info);
2445EXPORT_SYMBOL(sigprocmask);
2446
2447/*
2448 * System call entry points.
2449 */
2450
2451/**
2452 *  sys_restart_syscall - restart a system call
2453 */
2454SYSCALL_DEFINE0(restart_syscall)
2455{
2456	struct restart_block *restart = &current->restart_block;
2457	return restart->fn(restart);
2458}
2459
2460long do_no_restart_syscall(struct restart_block *param)
2461{
2462	return -EINTR;
2463}
2464
2465static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2466{
2467	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2468		sigset_t newblocked;
2469		/* A set of now blocked but previously unblocked signals. */
2470		sigandnsets(&newblocked, newset, &current->blocked);
2471		retarget_shared_pending(tsk, &newblocked);
2472	}
2473	tsk->blocked = *newset;
2474	recalc_sigpending();
2475}
2476
2477/**
2478 * set_current_blocked - change current->blocked mask
2479 * @newset: new mask
2480 *
2481 * It is wrong to change ->blocked directly, this helper should be used
2482 * to ensure the process can't miss a shared signal we are going to block.
2483 */
2484void set_current_blocked(sigset_t *newset)
2485{
2486	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2487	__set_current_blocked(newset);
2488}
2489
2490void __set_current_blocked(const sigset_t *newset)
2491{
2492	struct task_struct *tsk = current;
2493
2494	/*
2495	 * In case the signal mask hasn't changed, there is nothing we need
2496	 * to do. The current->blocked shouldn't be modified by other task.
2497	 */
2498	if (sigequalsets(&tsk->blocked, newset))
2499		return;
2500
2501	spin_lock_irq(&tsk->sighand->siglock);
2502	__set_task_blocked(tsk, newset);
2503	spin_unlock_irq(&tsk->sighand->siglock);
2504}
2505
2506/*
2507 * This is also useful for kernel threads that want to temporarily
2508 * (or permanently) block certain signals.
2509 *
2510 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2511 * interface happily blocks "unblockable" signals like SIGKILL
2512 * and friends.
2513 */
2514int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2515{
2516	struct task_struct *tsk = current;
2517	sigset_t newset;
2518
2519	/* Lockless, only current can change ->blocked, never from irq */
2520	if (oldset)
2521		*oldset = tsk->blocked;
2522
2523	switch (how) {
2524	case SIG_BLOCK:
2525		sigorsets(&newset, &tsk->blocked, set);
2526		break;
2527	case SIG_UNBLOCK:
2528		sigandnsets(&newset, &tsk->blocked, set);
2529		break;
2530	case SIG_SETMASK:
2531		newset = *set;
2532		break;
2533	default:
2534		return -EINVAL;
2535	}
2536
2537	__set_current_blocked(&newset);
2538	return 0;
2539}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2540
2541/**
2542 *  sys_rt_sigprocmask - change the list of currently blocked signals
2543 *  @how: whether to add, remove, or set signals
2544 *  @nset: stores pending signals
2545 *  @oset: previous value of signal mask if non-null
2546 *  @sigsetsize: size of sigset_t type
2547 */
2548SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2549		sigset_t __user *, oset, size_t, sigsetsize)
2550{
2551	sigset_t old_set, new_set;
2552	int error;
2553
2554	/* XXX: Don't preclude handling different sized sigset_t's.  */
2555	if (sigsetsize != sizeof(sigset_t))
2556		return -EINVAL;
2557
2558	old_set = current->blocked;
2559
2560	if (nset) {
2561		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2562			return -EFAULT;
2563		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2564
2565		error = sigprocmask(how, &new_set, NULL);
2566		if (error)
2567			return error;
2568	}
2569
2570	if (oset) {
2571		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2572			return -EFAULT;
2573	}
2574
2575	return 0;
2576}
2577
2578#ifdef CONFIG_COMPAT
2579COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2580		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2581{
2582#ifdef __BIG_ENDIAN
2583	sigset_t old_set = current->blocked;
2584
2585	/* XXX: Don't preclude handling different sized sigset_t's.  */
2586	if (sigsetsize != sizeof(sigset_t))
2587		return -EINVAL;
2588
2589	if (nset) {
2590		compat_sigset_t new32;
2591		sigset_t new_set;
2592		int error;
2593		if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2594			return -EFAULT;
2595
2596		sigset_from_compat(&new_set, &new32);
2597		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2598
2599		error = sigprocmask(how, &new_set, NULL);
2600		if (error)
2601			return error;
2602	}
2603	if (oset) {
2604		compat_sigset_t old32;
2605		sigset_to_compat(&old32, &old_set);
2606		if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2607			return -EFAULT;
2608	}
2609	return 0;
2610#else
2611	return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2612				  (sigset_t __user *)oset, sigsetsize);
2613#endif
2614}
2615#endif
2616
2617static int do_sigpending(void *set, unsigned long sigsetsize)
2618{
2619	if (sigsetsize > sizeof(sigset_t))
2620		return -EINVAL;
2621
2622	spin_lock_irq(&current->sighand->siglock);
2623	sigorsets(set, &current->pending.signal,
2624		  &current->signal->shared_pending.signal);
2625	spin_unlock_irq(&current->sighand->siglock);
2626
2627	/* Outside the lock because only this thread touches it.  */
2628	sigandsets(set, &current->blocked, set);
2629	return 0;
2630}
2631
2632/**
2633 *  sys_rt_sigpending - examine a pending signal that has been raised
2634 *			while blocked
2635 *  @uset: stores pending signals
2636 *  @sigsetsize: size of sigset_t type or larger
2637 */
2638SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2639{
2640	sigset_t set;
2641	int err = do_sigpending(&set, sigsetsize);
2642	if (!err && copy_to_user(uset, &set, sigsetsize))
2643		err = -EFAULT;
2644	return err;
 
 
 
 
 
 
2645}
2646
2647#ifdef CONFIG_COMPAT
2648COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2649		compat_size_t, sigsetsize)
2650{
2651#ifdef __BIG_ENDIAN
2652	sigset_t set;
2653	int err = do_sigpending(&set, sigsetsize);
2654	if (!err) {
2655		compat_sigset_t set32;
2656		sigset_to_compat(&set32, &set);
2657		/* we can get here only if sigsetsize <= sizeof(set) */
2658		if (copy_to_user(uset, &set32, sigsetsize))
2659			err = -EFAULT;
2660	}
2661	return err;
2662#else
2663	return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2664#endif
2665}
2666#endif
2667
2668#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2669
2670int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2671{
2672	int err;
2673
2674	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2675		return -EFAULT;
2676	if (from->si_code < 0)
2677		return __copy_to_user(to, from, sizeof(siginfo_t))
2678			? -EFAULT : 0;
2679	/*
2680	 * If you change siginfo_t structure, please be sure
2681	 * this code is fixed accordingly.
2682	 * Please remember to update the signalfd_copyinfo() function
2683	 * inside fs/signalfd.c too, in case siginfo_t changes.
2684	 * It should never copy any pad contained in the structure
2685	 * to avoid security leaks, but must copy the generic
2686	 * 3 ints plus the relevant union member.
2687	 */
2688	err = __put_user(from->si_signo, &to->si_signo);
2689	err |= __put_user(from->si_errno, &to->si_errno);
2690	err |= __put_user((short)from->si_code, &to->si_code);
2691	switch (from->si_code & __SI_MASK) {
2692	case __SI_KILL:
2693		err |= __put_user(from->si_pid, &to->si_pid);
2694		err |= __put_user(from->si_uid, &to->si_uid);
2695		break;
2696	case __SI_TIMER:
2697		 err |= __put_user(from->si_tid, &to->si_tid);
2698		 err |= __put_user(from->si_overrun, &to->si_overrun);
2699		 err |= __put_user(from->si_ptr, &to->si_ptr);
2700		break;
2701	case __SI_POLL:
2702		err |= __put_user(from->si_band, &to->si_band);
2703		err |= __put_user(from->si_fd, &to->si_fd);
2704		break;
2705	case __SI_FAULT:
2706		err |= __put_user(from->si_addr, &to->si_addr);
2707#ifdef __ARCH_SI_TRAPNO
2708		err |= __put_user(from->si_trapno, &to->si_trapno);
2709#endif
2710#ifdef BUS_MCEERR_AO
2711		/*
2712		 * Other callers might not initialize the si_lsb field,
2713		 * so check explicitly for the right codes here.
2714		 */
2715		if (from->si_signo == SIGBUS &&
2716		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2717			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2718#endif
2719#ifdef SEGV_BNDERR
2720		if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2721			err |= __put_user(from->si_lower, &to->si_lower);
2722			err |= __put_user(from->si_upper, &to->si_upper);
2723		}
2724#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2725#ifdef SEGV_PKUERR
2726		if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2727			err |= __put_user(from->si_pkey, &to->si_pkey);
2728#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2729		break;
2730	case __SI_CHLD:
2731		err |= __put_user(from->si_pid, &to->si_pid);
2732		err |= __put_user(from->si_uid, &to->si_uid);
2733		err |= __put_user(from->si_status, &to->si_status);
2734		err |= __put_user(from->si_utime, &to->si_utime);
2735		err |= __put_user(from->si_stime, &to->si_stime);
2736		break;
2737	case __SI_RT: /* This is not generated by the kernel as of now. */
2738	case __SI_MESGQ: /* But this is */
2739		err |= __put_user(from->si_pid, &to->si_pid);
2740		err |= __put_user(from->si_uid, &to->si_uid);
2741		err |= __put_user(from->si_ptr, &to->si_ptr);
2742		break;
2743#ifdef __ARCH_SIGSYS
2744	case __SI_SYS:
2745		err |= __put_user(from->si_call_addr, &to->si_call_addr);
2746		err |= __put_user(from->si_syscall, &to->si_syscall);
2747		err |= __put_user(from->si_arch, &to->si_arch);
2748		break;
2749#endif
2750	default: /* this is just in case for now ... */
2751		err |= __put_user(from->si_pid, &to->si_pid);
2752		err |= __put_user(from->si_uid, &to->si_uid);
2753		break;
2754	}
2755	return err;
2756}
2757
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2758#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2759
2760/**
2761 *  do_sigtimedwait - wait for queued signals specified in @which
2762 *  @which: queued signals to wait for
2763 *  @info: if non-null, the signal's siginfo is returned here
2764 *  @ts: upper bound on process time suspension
2765 */
2766int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2767		    const struct timespec *ts)
2768{
2769	ktime_t *to = NULL, timeout = KTIME_MAX;
2770	struct task_struct *tsk = current;
2771	sigset_t mask = *which;
 
2772	int sig, ret = 0;
2773
2774	if (ts) {
2775		if (!timespec_valid(ts))
2776			return -EINVAL;
2777		timeout = timespec_to_ktime(*ts);
2778		to = &timeout;
2779	}
2780
2781	/*
2782	 * Invert the set of allowed signals to get those we want to block.
2783	 */
2784	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2785	signotset(&mask);
2786
2787	spin_lock_irq(&tsk->sighand->siglock);
2788	sig = dequeue_signal(tsk, &mask, info);
2789	if (!sig && timeout) {
2790		/*
2791		 * None ready, temporarily unblock those we're interested
2792		 * while we are sleeping in so that we'll be awakened when
2793		 * they arrive. Unblocking is always fine, we can avoid
2794		 * set_current_blocked().
2795		 */
2796		tsk->real_blocked = tsk->blocked;
2797		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2798		recalc_sigpending();
2799		spin_unlock_irq(&tsk->sighand->siglock);
2800
2801		__set_current_state(TASK_INTERRUPTIBLE);
2802		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2803							 HRTIMER_MODE_REL);
2804		spin_lock_irq(&tsk->sighand->siglock);
2805		__set_task_blocked(tsk, &tsk->real_blocked);
2806		sigemptyset(&tsk->real_blocked);
2807		sig = dequeue_signal(tsk, &mask, info);
2808	}
2809	spin_unlock_irq(&tsk->sighand->siglock);
2810
2811	if (sig)
2812		return sig;
2813	return ret ? -EINTR : -EAGAIN;
2814}
2815
2816/**
2817 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2818 *			in @uthese
2819 *  @uthese: queued signals to wait for
2820 *  @uinfo: if non-null, the signal's siginfo is returned here
2821 *  @uts: upper bound on process time suspension
2822 *  @sigsetsize: size of sigset_t type
2823 */
2824SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2825		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
 
2826		size_t, sigsetsize)
2827{
2828	sigset_t these;
2829	struct timespec ts;
2830	siginfo_t info;
2831	int ret;
2832
2833	/* XXX: Don't preclude handling different sized sigset_t's.  */
2834	if (sigsetsize != sizeof(sigset_t))
2835		return -EINVAL;
2836
2837	if (copy_from_user(&these, uthese, sizeof(these)))
2838		return -EFAULT;
2839
2840	if (uts) {
2841		if (copy_from_user(&ts, uts, sizeof(ts)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2842			return -EFAULT;
2843	}
2844
2845	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2846
2847	if (ret > 0 && uinfo) {
2848		if (copy_siginfo_to_user(uinfo, &info))
2849			ret = -EFAULT;
2850	}
2851
2852	return ret;
2853}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2854
2855/**
2856 *  sys_kill - send a signal to a process
2857 *  @pid: the PID of the process
2858 *  @sig: signal to be sent
2859 */
2860SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2861{
2862	struct siginfo info;
2863
2864	info.si_signo = sig;
2865	info.si_errno = 0;
2866	info.si_code = SI_USER;
2867	info.si_pid = task_tgid_vnr(current);
2868	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2869
2870	return kill_something_info(sig, &info, pid);
2871}
2872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2873static int
2874do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2875{
2876	struct task_struct *p;
2877	int error = -ESRCH;
2878
2879	rcu_read_lock();
2880	p = find_task_by_vpid(pid);
2881	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2882		error = check_kill_permission(sig, info, p);
2883		/*
2884		 * The null signal is a permissions and process existence
2885		 * probe.  No signal is actually delivered.
2886		 */
2887		if (!error && sig) {
2888			error = do_send_sig_info(sig, info, p, false);
2889			/*
2890			 * If lock_task_sighand() failed we pretend the task
2891			 * dies after receiving the signal. The window is tiny,
2892			 * and the signal is private anyway.
2893			 */
2894			if (unlikely(error == -ESRCH))
2895				error = 0;
2896		}
2897	}
2898	rcu_read_unlock();
2899
2900	return error;
2901}
2902
2903static int do_tkill(pid_t tgid, pid_t pid, int sig)
2904{
2905	struct siginfo info = {};
2906
 
2907	info.si_signo = sig;
2908	info.si_errno = 0;
2909	info.si_code = SI_TKILL;
2910	info.si_pid = task_tgid_vnr(current);
2911	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2912
2913	return do_send_specific(tgid, pid, sig, &info);
2914}
2915
2916/**
2917 *  sys_tgkill - send signal to one specific thread
2918 *  @tgid: the thread group ID of the thread
2919 *  @pid: the PID of the thread
2920 *  @sig: signal to be sent
2921 *
2922 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2923 *  exists but it's not belonging to the target process anymore. This
2924 *  method solves the problem of threads exiting and PIDs getting reused.
2925 */
2926SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2927{
2928	/* This is only valid for single tasks */
2929	if (pid <= 0 || tgid <= 0)
2930		return -EINVAL;
2931
2932	return do_tkill(tgid, pid, sig);
2933}
2934
2935/**
2936 *  sys_tkill - send signal to one specific task
2937 *  @pid: the PID of the task
2938 *  @sig: signal to be sent
2939 *
2940 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2941 */
2942SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2943{
2944	/* This is only valid for single tasks */
2945	if (pid <= 0)
2946		return -EINVAL;
2947
2948	return do_tkill(0, pid, sig);
2949}
2950
2951static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2952{
2953	/* Not even root can pretend to send signals from the kernel.
2954	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2955	 */
2956	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2957	    (task_pid_vnr(current) != pid))
2958		return -EPERM;
2959
2960	info->si_signo = sig;
2961
2962	/* POSIX.1b doesn't mention process groups.  */
2963	return kill_proc_info(sig, info, pid);
2964}
2965
2966/**
2967 *  sys_rt_sigqueueinfo - send signal information to a signal
2968 *  @pid: the PID of the thread
2969 *  @sig: signal to be sent
2970 *  @uinfo: signal info to be sent
2971 */
2972SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2973		siginfo_t __user *, uinfo)
2974{
2975	siginfo_t info;
2976	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2977		return -EFAULT;
 
2978	return do_rt_sigqueueinfo(pid, sig, &info);
2979}
2980
2981#ifdef CONFIG_COMPAT
2982COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
2983			compat_pid_t, pid,
2984			int, sig,
2985			struct compat_siginfo __user *, uinfo)
2986{
2987	siginfo_t info = {};
2988	int ret = copy_siginfo_from_user32(&info, uinfo);
2989	if (unlikely(ret))
2990		return ret;
2991	return do_rt_sigqueueinfo(pid, sig, &info);
2992}
2993#endif
2994
2995static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2996{
2997	/* This is only valid for single tasks */
2998	if (pid <= 0 || tgid <= 0)
2999		return -EINVAL;
3000
3001	/* Not even root can pretend to send signals from the kernel.
3002	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3003	 */
3004	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3005	    (task_pid_vnr(current) != pid))
3006		return -EPERM;
3007
3008	info->si_signo = sig;
3009
3010	return do_send_specific(tgid, pid, sig, info);
3011}
3012
3013SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3014		siginfo_t __user *, uinfo)
3015{
3016	siginfo_t info;
3017
3018	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3019		return -EFAULT;
3020
3021	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3022}
3023
3024#ifdef CONFIG_COMPAT
3025COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3026			compat_pid_t, tgid,
3027			compat_pid_t, pid,
3028			int, sig,
3029			struct compat_siginfo __user *, uinfo)
3030{
3031	siginfo_t info = {};
3032
3033	if (copy_siginfo_from_user32(&info, uinfo))
3034		return -EFAULT;
3035	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3036}
3037#endif
3038
3039/*
3040 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3041 */
3042void kernel_sigaction(int sig, __sighandler_t action)
3043{
3044	spin_lock_irq(&current->sighand->siglock);
3045	current->sighand->action[sig - 1].sa.sa_handler = action;
3046	if (action == SIG_IGN) {
3047		sigset_t mask;
3048
3049		sigemptyset(&mask);
3050		sigaddset(&mask, sig);
3051
3052		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3053		flush_sigqueue_mask(&mask, &current->pending);
3054		recalc_sigpending();
3055	}
3056	spin_unlock_irq(&current->sighand->siglock);
3057}
3058EXPORT_SYMBOL(kernel_sigaction);
3059
3060void __weak sigaction_compat_abi(struct k_sigaction *act,
3061		struct k_sigaction *oact)
3062{
3063}
3064
3065int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3066{
3067	struct task_struct *p = current, *t;
3068	struct k_sigaction *k;
3069	sigset_t mask;
3070
3071	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3072		return -EINVAL;
3073
3074	k = &p->sighand->action[sig-1];
3075
3076	spin_lock_irq(&p->sighand->siglock);
 
 
 
 
3077	if (oact)
3078		*oact = *k;
3079
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3080	sigaction_compat_abi(act, oact);
3081
3082	if (act) {
3083		sigdelsetmask(&act->sa.sa_mask,
3084			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3085		*k = *act;
3086		/*
3087		 * POSIX 3.3.1.3:
3088		 *  "Setting a signal action to SIG_IGN for a signal that is
3089		 *   pending shall cause the pending signal to be discarded,
3090		 *   whether or not it is blocked."
3091		 *
3092		 *  "Setting a signal action to SIG_DFL for a signal that is
3093		 *   pending and whose default action is to ignore the signal
3094		 *   (for example, SIGCHLD), shall cause the pending signal to
3095		 *   be discarded, whether or not it is blocked"
3096		 */
3097		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3098			sigemptyset(&mask);
3099			sigaddset(&mask, sig);
3100			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3101			for_each_thread(p, t)
3102				flush_sigqueue_mask(&mask, &t->pending);
3103		}
3104	}
3105
3106	spin_unlock_irq(&p->sighand->siglock);
3107	return 0;
3108}
3109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3110static int
3111do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
 
3112{
3113	stack_t oss;
3114	int error;
3115
3116	oss.ss_sp = (void __user *) current->sas_ss_sp;
3117	oss.ss_size = current->sas_ss_size;
3118	oss.ss_flags = sas_ss_flags(sp) |
3119		(current->sas_ss_flags & SS_FLAG_BITS);
3120
3121	if (uss) {
3122		void __user *ss_sp;
3123		size_t ss_size;
3124		unsigned ss_flags;
3125		int ss_mode;
3126
3127		error = -EFAULT;
3128		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3129			goto out;
3130		error = __get_user(ss_sp, &uss->ss_sp) |
3131			__get_user(ss_flags, &uss->ss_flags) |
3132			__get_user(ss_size, &uss->ss_size);
3133		if (error)
3134			goto out;
3135
3136		error = -EPERM;
3137		if (on_sig_stack(sp))
3138			goto out;
3139
3140		ss_mode = ss_flags & ~SS_FLAG_BITS;
3141		error = -EINVAL;
3142		if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3143				ss_mode != 0)
3144			goto out;
 
 
 
 
 
 
 
 
3145
 
3146		if (ss_mode == SS_DISABLE) {
3147			ss_size = 0;
3148			ss_sp = NULL;
3149		} else {
3150			error = -ENOMEM;
3151			if (ss_size < MINSIGSTKSZ)
3152				goto out;
 
3153		}
3154
3155		current->sas_ss_sp = (unsigned long) ss_sp;
3156		current->sas_ss_size = ss_size;
3157		current->sas_ss_flags = ss_flags;
3158	}
3159
3160	error = 0;
3161	if (uoss) {
3162		error = -EFAULT;
3163		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3164			goto out;
3165		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3166			__put_user(oss.ss_size, &uoss->ss_size) |
3167			__put_user(oss.ss_flags, &uoss->ss_flags);
3168	}
3169
3170out:
3171	return error;
3172}
 
3173SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3174{
3175	return do_sigaltstack(uss, uoss, current_user_stack_pointer());
 
 
 
 
 
 
 
 
 
3176}
3177
3178int restore_altstack(const stack_t __user *uss)
3179{
3180	int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
 
 
 
 
3181	/* squash all but EFAULT for now */
3182	return err == -EFAULT ? err : 0;
3183}
3184
3185int __save_altstack(stack_t __user *uss, unsigned long sp)
3186{
3187	struct task_struct *t = current;
3188	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3189		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3190		__put_user(t->sas_ss_size, &uss->ss_size);
3191	if (err)
3192		return err;
3193	if (t->sas_ss_flags & SS_AUTODISARM)
3194		sas_ss_reset(t);
3195	return 0;
3196}
3197
3198#ifdef CONFIG_COMPAT
3199COMPAT_SYSCALL_DEFINE2(sigaltstack,
3200			const compat_stack_t __user *, uss_ptr,
3201			compat_stack_t __user *, uoss_ptr)
3202{
3203	stack_t uss, uoss;
3204	int ret;
3205	mm_segment_t seg;
3206
3207	if (uss_ptr) {
3208		compat_stack_t uss32;
3209
3210		memset(&uss, 0, sizeof(stack_t));
3211		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3212			return -EFAULT;
3213		uss.ss_sp = compat_ptr(uss32.ss_sp);
3214		uss.ss_flags = uss32.ss_flags;
3215		uss.ss_size = uss32.ss_size;
3216	}
3217	seg = get_fs();
3218	set_fs(KERNEL_DS);
3219	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3220			     (stack_t __force __user *) &uoss,
3221			     compat_user_stack_pointer());
3222	set_fs(seg);
3223	if (ret >= 0 && uoss_ptr)  {
3224		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3225		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3226		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3227		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
 
 
3228			ret = -EFAULT;
3229	}
3230	return ret;
3231}
3232
 
 
 
 
 
 
 
3233int compat_restore_altstack(const compat_stack_t __user *uss)
3234{
3235	int err = compat_sys_sigaltstack(uss, NULL);
3236	/* squash all but -EFAULT for now */
3237	return err == -EFAULT ? err : 0;
3238}
3239
3240int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3241{
3242	int err;
3243	struct task_struct *t = current;
3244	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3245			 &uss->ss_sp) |
3246		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3247		__put_user(t->sas_ss_size, &uss->ss_size);
3248	if (err)
3249		return err;
3250	if (t->sas_ss_flags & SS_AUTODISARM)
3251		sas_ss_reset(t);
3252	return 0;
3253}
3254#endif
3255
3256#ifdef __ARCH_WANT_SYS_SIGPENDING
3257
3258/**
3259 *  sys_sigpending - examine pending signals
3260 *  @set: where mask of pending signal is returned
3261 */
3262SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3263{
3264	return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); 
 
 
 
 
 
 
 
 
 
 
3265}
3266
 
 
 
 
 
 
 
 
 
 
 
3267#endif
3268
3269#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3270/**
3271 *  sys_sigprocmask - examine and change blocked signals
3272 *  @how: whether to add, remove, or set signals
3273 *  @nset: signals to add or remove (if non-null)
3274 *  @oset: previous value of signal mask if non-null
3275 *
3276 * Some platforms have their own version with special arguments;
3277 * others support only sys_rt_sigprocmask.
3278 */
3279
3280SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3281		old_sigset_t __user *, oset)
3282{
3283	old_sigset_t old_set, new_set;
3284	sigset_t new_blocked;
3285
3286	old_set = current->blocked.sig[0];
3287
3288	if (nset) {
3289		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3290			return -EFAULT;
3291
3292		new_blocked = current->blocked;
3293
3294		switch (how) {
3295		case SIG_BLOCK:
3296			sigaddsetmask(&new_blocked, new_set);
3297			break;
3298		case SIG_UNBLOCK:
3299			sigdelsetmask(&new_blocked, new_set);
3300			break;
3301		case SIG_SETMASK:
3302			new_blocked.sig[0] = new_set;
3303			break;
3304		default:
3305			return -EINVAL;
3306		}
3307
3308		set_current_blocked(&new_blocked);
3309	}
3310
3311	if (oset) {
3312		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3313			return -EFAULT;
3314	}
3315
3316	return 0;
3317}
3318#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3319
3320#ifndef CONFIG_ODD_RT_SIGACTION
3321/**
3322 *  sys_rt_sigaction - alter an action taken by a process
3323 *  @sig: signal to be sent
3324 *  @act: new sigaction
3325 *  @oact: used to save the previous sigaction
3326 *  @sigsetsize: size of sigset_t type
3327 */
3328SYSCALL_DEFINE4(rt_sigaction, int, sig,
3329		const struct sigaction __user *, act,
3330		struct sigaction __user *, oact,
3331		size_t, sigsetsize)
3332{
3333	struct k_sigaction new_sa, old_sa;
3334	int ret = -EINVAL;
3335
3336	/* XXX: Don't preclude handling different sized sigset_t's.  */
3337	if (sigsetsize != sizeof(sigset_t))
3338		goto out;
3339
3340	if (act) {
3341		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3342			return -EFAULT;
3343	}
3344
3345	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
 
 
3346
3347	if (!ret && oact) {
3348		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3349			return -EFAULT;
3350	}
3351out:
3352	return ret;
3353}
3354#ifdef CONFIG_COMPAT
3355COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3356		const struct compat_sigaction __user *, act,
3357		struct compat_sigaction __user *, oact,
3358		compat_size_t, sigsetsize)
3359{
3360	struct k_sigaction new_ka, old_ka;
3361	compat_sigset_t mask;
3362#ifdef __ARCH_HAS_SA_RESTORER
3363	compat_uptr_t restorer;
3364#endif
3365	int ret;
3366
3367	/* XXX: Don't preclude handling different sized sigset_t's.  */
3368	if (sigsetsize != sizeof(compat_sigset_t))
3369		return -EINVAL;
3370
3371	if (act) {
3372		compat_uptr_t handler;
3373		ret = get_user(handler, &act->sa_handler);
3374		new_ka.sa.sa_handler = compat_ptr(handler);
3375#ifdef __ARCH_HAS_SA_RESTORER
3376		ret |= get_user(restorer, &act->sa_restorer);
3377		new_ka.sa.sa_restorer = compat_ptr(restorer);
3378#endif
3379		ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3380		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3381		if (ret)
3382			return -EFAULT;
3383		sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3384	}
3385
3386	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3387	if (!ret && oact) {
3388		sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3389		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
3390			       &oact->sa_handler);
3391		ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
 
3392		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3393#ifdef __ARCH_HAS_SA_RESTORER
3394		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3395				&oact->sa_restorer);
3396#endif
3397	}
3398	return ret;
3399}
3400#endif
3401#endif /* !CONFIG_ODD_RT_SIGACTION */
3402
3403#ifdef CONFIG_OLD_SIGACTION
3404SYSCALL_DEFINE3(sigaction, int, sig,
3405		const struct old_sigaction __user *, act,
3406	        struct old_sigaction __user *, oact)
3407{
3408	struct k_sigaction new_ka, old_ka;
3409	int ret;
3410
3411	if (act) {
3412		old_sigset_t mask;
3413		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3414		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3415		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3416		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3417		    __get_user(mask, &act->sa_mask))
3418			return -EFAULT;
3419#ifdef __ARCH_HAS_KA_RESTORER
3420		new_ka.ka_restorer = NULL;
3421#endif
3422		siginitset(&new_ka.sa.sa_mask, mask);
3423	}
3424
3425	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3426
3427	if (!ret && oact) {
3428		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3429		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3430		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3431		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3432		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3433			return -EFAULT;
3434	}
3435
3436	return ret;
3437}
3438#endif
3439#ifdef CONFIG_COMPAT_OLD_SIGACTION
3440COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3441		const struct compat_old_sigaction __user *, act,
3442	        struct compat_old_sigaction __user *, oact)
3443{
3444	struct k_sigaction new_ka, old_ka;
3445	int ret;
3446	compat_old_sigset_t mask;
3447	compat_uptr_t handler, restorer;
3448
3449	if (act) {
3450		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3451		    __get_user(handler, &act->sa_handler) ||
3452		    __get_user(restorer, &act->sa_restorer) ||
3453		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3454		    __get_user(mask, &act->sa_mask))
3455			return -EFAULT;
3456
3457#ifdef __ARCH_HAS_KA_RESTORER
3458		new_ka.ka_restorer = NULL;
3459#endif
3460		new_ka.sa.sa_handler = compat_ptr(handler);
3461		new_ka.sa.sa_restorer = compat_ptr(restorer);
3462		siginitset(&new_ka.sa.sa_mask, mask);
3463	}
3464
3465	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3466
3467	if (!ret && oact) {
3468		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3469		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3470			       &oact->sa_handler) ||
3471		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3472			       &oact->sa_restorer) ||
3473		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3474		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3475			return -EFAULT;
3476	}
3477	return ret;
3478}
3479#endif
3480
3481#ifdef CONFIG_SGETMASK_SYSCALL
3482
3483/*
3484 * For backwards compatibility.  Functionality superseded by sigprocmask.
3485 */
3486SYSCALL_DEFINE0(sgetmask)
3487{
3488	/* SMP safe */
3489	return current->blocked.sig[0];
3490}
3491
3492SYSCALL_DEFINE1(ssetmask, int, newmask)
3493{
3494	int old = current->blocked.sig[0];
3495	sigset_t newset;
3496
3497	siginitset(&newset, newmask);
3498	set_current_blocked(&newset);
3499
3500	return old;
3501}
3502#endif /* CONFIG_SGETMASK_SYSCALL */
3503
3504#ifdef __ARCH_WANT_SYS_SIGNAL
3505/*
3506 * For backwards compatibility.  Functionality superseded by sigaction.
3507 */
3508SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3509{
3510	struct k_sigaction new_sa, old_sa;
3511	int ret;
3512
3513	new_sa.sa.sa_handler = handler;
3514	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3515	sigemptyset(&new_sa.sa.sa_mask);
3516
3517	ret = do_sigaction(sig, &new_sa, &old_sa);
3518
3519	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3520}
3521#endif /* __ARCH_WANT_SYS_SIGNAL */
3522
3523#ifdef __ARCH_WANT_SYS_PAUSE
3524
3525SYSCALL_DEFINE0(pause)
3526{
3527	while (!signal_pending(current)) {
3528		__set_current_state(TASK_INTERRUPTIBLE);
3529		schedule();
3530	}
3531	return -ERESTARTNOHAND;
3532}
3533
3534#endif
3535
3536static int sigsuspend(sigset_t *set)
3537{
3538	current->saved_sigmask = current->blocked;
3539	set_current_blocked(set);
3540
3541	while (!signal_pending(current)) {
3542		__set_current_state(TASK_INTERRUPTIBLE);
3543		schedule();
3544	}
3545	set_restore_sigmask();
3546	return -ERESTARTNOHAND;
3547}
3548
3549/**
3550 *  sys_rt_sigsuspend - replace the signal mask for a value with the
3551 *	@unewset value until a signal is received
3552 *  @unewset: new signal mask value
3553 *  @sigsetsize: size of sigset_t type
3554 */
3555SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3556{
3557	sigset_t newset;
3558
3559	/* XXX: Don't preclude handling different sized sigset_t's.  */
3560	if (sigsetsize != sizeof(sigset_t))
3561		return -EINVAL;
3562
3563	if (copy_from_user(&newset, unewset, sizeof(newset)))
3564		return -EFAULT;
3565	return sigsuspend(&newset);
3566}
3567 
3568#ifdef CONFIG_COMPAT
3569COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3570{
3571#ifdef __BIG_ENDIAN
3572	sigset_t newset;
3573	compat_sigset_t newset32;
3574
3575	/* XXX: Don't preclude handling different sized sigset_t's.  */
3576	if (sigsetsize != sizeof(sigset_t))
3577		return -EINVAL;
3578
3579	if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3580		return -EFAULT;
3581	sigset_from_compat(&newset, &newset32);
3582	return sigsuspend(&newset);
3583#else
3584	/* on little-endian bitmaps don't care about granularity */
3585	return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3586#endif
3587}
3588#endif
3589
3590#ifdef CONFIG_OLD_SIGSUSPEND
3591SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3592{
3593	sigset_t blocked;
3594	siginitset(&blocked, mask);
3595	return sigsuspend(&blocked);
3596}
3597#endif
3598#ifdef CONFIG_OLD_SIGSUSPEND3
3599SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3600{
3601	sigset_t blocked;
3602	siginitset(&blocked, mask);
3603	return sigsuspend(&blocked);
3604}
3605#endif
3606
3607__weak const char *arch_vma_name(struct vm_area_struct *vma)
3608{
3609	return NULL;
3610}
3611
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612void __init signals_init(void)
3613{
3614	/* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3615	BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3616		!= offsetof(struct siginfo, _sifields._pad));
3617
3618	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3619}
3620
3621#ifdef CONFIG_KGDB_KDB
3622#include <linux/kdb.h>
3623/*
3624 * kdb_send_sig_info - Allows kdb to send signals without exposing
3625 * signal internals.  This function checks if the required locks are
3626 * available before calling the main signal code, to avoid kdb
3627 * deadlocks.
3628 */
3629void
3630kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3631{
3632	static struct task_struct *kdb_prev_t;
3633	int sig, new_t;
3634	if (!spin_trylock(&t->sighand->siglock)) {
3635		kdb_printf("Can't do kill command now.\n"
3636			   "The sigmask lock is held somewhere else in "
3637			   "kernel, try again later\n");
3638		return;
3639	}
3640	spin_unlock(&t->sighand->siglock);
3641	new_t = kdb_prev_t != t;
3642	kdb_prev_t = t;
3643	if (t->state != TASK_RUNNING && new_t) {
 
3644		kdb_printf("Process is not RUNNING, sending a signal from "
3645			   "kdb risks deadlock\n"
3646			   "on the run queue locks. "
3647			   "The signal has _not_ been sent.\n"
3648			   "Reissue the kill command if you want to risk "
3649			   "the deadlock.\n");
3650		return;
3651	}
3652	sig = info->si_signo;
3653	if (send_sig_info(sig, info, t))
 
3654		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3655			   sig, t->pid);
3656	else
3657		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3658}
3659#endif	/* CONFIG_KGDB_KDB */
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/signal.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
   8 *
   9 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
  10 *		Changes to use preallocated sigqueue structures
  11 *		to allow signals to be sent reliably.
  12 */
  13
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/init.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/user.h>
  19#include <linux/sched/debug.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/task_stack.h>
  22#include <linux/sched/cputime.h>
  23#include <linux/file.h>
  24#include <linux/fs.h>
  25#include <linux/proc_fs.h>
  26#include <linux/tty.h>
  27#include <linux/binfmts.h>
  28#include <linux/coredump.h>
  29#include <linux/security.h>
  30#include <linux/syscalls.h>
  31#include <linux/ptrace.h>
  32#include <linux/signal.h>
  33#include <linux/signalfd.h>
  34#include <linux/ratelimit.h>
  35#include <linux/task_work.h>
  36#include <linux/capability.h>
  37#include <linux/freezer.h>
  38#include <linux/pid_namespace.h>
  39#include <linux/nsproxy.h>
  40#include <linux/user_namespace.h>
  41#include <linux/uprobes.h>
  42#include <linux/compat.h>
  43#include <linux/cn_proc.h>
  44#include <linux/compiler.h>
  45#include <linux/posix-timers.h>
  46#include <linux/cgroup.h>
  47#include <linux/audit.h>
  48
  49#define CREATE_TRACE_POINTS
  50#include <trace/events/signal.h>
  51
  52#include <asm/param.h>
  53#include <linux/uaccess.h>
  54#include <asm/unistd.h>
  55#include <asm/siginfo.h>
  56#include <asm/cacheflush.h>
  57#include <asm/syscall.h>	/* for syscall_get_* */
  58
  59/*
  60 * SLAB caches for signal bits.
  61 */
  62
  63static struct kmem_cache *sigqueue_cachep;
  64
  65int print_fatal_signals __read_mostly;
  66
  67static void __user *sig_handler(struct task_struct *t, int sig)
  68{
  69	return t->sighand->action[sig - 1].sa.sa_handler;
  70}
  71
  72static inline bool sig_handler_ignored(void __user *handler, int sig)
  73{
  74	/* Is it explicitly or implicitly ignored? */
  75	return handler == SIG_IGN ||
  76	       (handler == SIG_DFL && sig_kernel_ignore(sig));
  77}
  78
  79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
  80{
  81	void __user *handler;
  82
  83	handler = sig_handler(t, sig);
  84
  85	/* SIGKILL and SIGSTOP may not be sent to the global init */
  86	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
  87		return true;
  88
  89	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  90	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  91		return true;
  92
  93	/* Only allow kernel generated signals to this kthread */
  94	if (unlikely((t->flags & PF_KTHREAD) &&
  95		     (handler == SIG_KTHREAD_KERNEL) && !force))
  96		return true;
  97
  98	return sig_handler_ignored(handler, sig);
  99}
 100
 101static bool sig_ignored(struct task_struct *t, int sig, bool force)
 102{
 103	/*
 104	 * Blocked signals are never ignored, since the
 105	 * signal handler may change by the time it is
 106	 * unblocked.
 107	 */
 108	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 109		return false;
 
 
 
 110
 111	/*
 112	 * Tracers may want to know about even ignored signal unless it
 113	 * is SIGKILL which can't be reported anyway but can be ignored
 114	 * by SIGNAL_UNKILLABLE task.
 115	 */
 116	if (t->ptrace && sig != SIGKILL)
 117		return false;
 118
 119	return sig_task_ignored(t, sig, force);
 120}
 121
 122/*
 123 * Re-calculate pending state from the set of locally pending
 124 * signals, globally pending signals, and blocked signals.
 125 */
 126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
 127{
 128	unsigned long ready;
 129	long i;
 130
 131	switch (_NSIG_WORDS) {
 132	default:
 133		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 134			ready |= signal->sig[i] &~ blocked->sig[i];
 135		break;
 136
 137	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 138		ready |= signal->sig[2] &~ blocked->sig[2];
 139		ready |= signal->sig[1] &~ blocked->sig[1];
 140		ready |= signal->sig[0] &~ blocked->sig[0];
 141		break;
 142
 143	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 144		ready |= signal->sig[0] &~ blocked->sig[0];
 145		break;
 146
 147	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 148	}
 149	return ready !=	0;
 150}
 151
 152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 153
 154static bool recalc_sigpending_tsk(struct task_struct *t)
 155{
 156	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
 157	    PENDING(&t->pending, &t->blocked) ||
 158	    PENDING(&t->signal->shared_pending, &t->blocked) ||
 159	    cgroup_task_frozen(t)) {
 160		set_tsk_thread_flag(t, TIF_SIGPENDING);
 161		return true;
 162	}
 163
 164	/*
 165	 * We must never clear the flag in another thread, or in current
 166	 * when it's possible the current syscall is returning -ERESTART*.
 167	 * So we don't clear it here, and only callers who know they should do.
 168	 */
 169	return false;
 170}
 171
 172/*
 173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
 174 * This is superfluous when called on current, the wakeup is a harmless no-op.
 175 */
 176void recalc_sigpending_and_wake(struct task_struct *t)
 177{
 178	if (recalc_sigpending_tsk(t))
 179		signal_wake_up(t, 0);
 180}
 181
 182void recalc_sigpending(void)
 183{
 184	if (!recalc_sigpending_tsk(current) && !freezing(current))
 185		clear_thread_flag(TIF_SIGPENDING);
 186
 187}
 188EXPORT_SYMBOL(recalc_sigpending);
 189
 190void calculate_sigpending(void)
 191{
 192	/* Have any signals or users of TIF_SIGPENDING been delayed
 193	 * until after fork?
 194	 */
 195	spin_lock_irq(&current->sighand->siglock);
 196	set_tsk_thread_flag(current, TIF_SIGPENDING);
 197	recalc_sigpending();
 198	spin_unlock_irq(&current->sighand->siglock);
 199}
 200
 201/* Given the mask, find the first available signal that should be serviced. */
 202
 203#define SYNCHRONOUS_MASK \
 204	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
 205	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 206
 207int next_signal(struct sigpending *pending, sigset_t *mask)
 208{
 209	unsigned long i, *s, *m, x;
 210	int sig = 0;
 211
 212	s = pending->signal.sig;
 213	m = mask->sig;
 214
 215	/*
 216	 * Handle the first word specially: it contains the
 217	 * synchronous signals that need to be dequeued first.
 218	 */
 219	x = *s &~ *m;
 220	if (x) {
 221		if (x & SYNCHRONOUS_MASK)
 222			x &= SYNCHRONOUS_MASK;
 223		sig = ffz(~x) + 1;
 224		return sig;
 225	}
 226
 227	switch (_NSIG_WORDS) {
 228	default:
 229		for (i = 1; i < _NSIG_WORDS; ++i) {
 230			x = *++s &~ *++m;
 231			if (!x)
 232				continue;
 233			sig = ffz(~x) + i*_NSIG_BPW + 1;
 234			break;
 235		}
 236		break;
 237
 238	case 2:
 239		x = s[1] &~ m[1];
 240		if (!x)
 241			break;
 242		sig = ffz(~x) + _NSIG_BPW + 1;
 243		break;
 244
 245	case 1:
 246		/* Nothing to do */
 247		break;
 248	}
 249
 250	return sig;
 251}
 252
 253static inline void print_dropped_signal(int sig)
 254{
 255	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 256
 257	if (!print_fatal_signals)
 258		return;
 259
 260	if (!__ratelimit(&ratelimit_state))
 261		return;
 262
 263	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 264				current->comm, current->pid, sig);
 265}
 266
 267/**
 268 * task_set_jobctl_pending - set jobctl pending bits
 269 * @task: target task
 270 * @mask: pending bits to set
 271 *
 272 * Clear @mask from @task->jobctl.  @mask must be subset of
 273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
 274 * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
 275 * cleared.  If @task is already being killed or exiting, this function
 276 * becomes noop.
 277 *
 278 * CONTEXT:
 279 * Must be called with @task->sighand->siglock held.
 280 *
 281 * RETURNS:
 282 * %true if @mask is set, %false if made noop because @task was dying.
 283 */
 284bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 285{
 286	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
 287			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
 288	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 289
 290	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
 291		return false;
 292
 293	if (mask & JOBCTL_STOP_SIGMASK)
 294		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
 295
 296	task->jobctl |= mask;
 297	return true;
 298}
 299
 300/**
 301 * task_clear_jobctl_trapping - clear jobctl trapping bit
 302 * @task: target task
 303 *
 304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
 305 * Clear it and wake up the ptracer.  Note that we don't need any further
 306 * locking.  @task->siglock guarantees that @task->parent points to the
 307 * ptracer.
 308 *
 309 * CONTEXT:
 310 * Must be called with @task->sighand->siglock held.
 311 */
 312void task_clear_jobctl_trapping(struct task_struct *task)
 313{
 314	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
 315		task->jobctl &= ~JOBCTL_TRAPPING;
 316		smp_mb();	/* advised by wake_up_bit() */
 317		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
 318	}
 319}
 320
 321/**
 322 * task_clear_jobctl_pending - clear jobctl pending bits
 323 * @task: target task
 324 * @mask: pending bits to clear
 325 *
 326 * Clear @mask from @task->jobctl.  @mask must be subset of
 327 * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
 328 * STOP bits are cleared together.
 329 *
 330 * If clearing of @mask leaves no stop or trap pending, this function calls
 331 * task_clear_jobctl_trapping().
 332 *
 333 * CONTEXT:
 334 * Must be called with @task->sighand->siglock held.
 335 */
 336void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 337{
 338	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 339
 340	if (mask & JOBCTL_STOP_PENDING)
 341		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
 342
 343	task->jobctl &= ~mask;
 344
 345	if (!(task->jobctl & JOBCTL_PENDING_MASK))
 346		task_clear_jobctl_trapping(task);
 347}
 348
 349/**
 350 * task_participate_group_stop - participate in a group stop
 351 * @task: task participating in a group stop
 352 *
 353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
 354 * Group stop states are cleared and the group stop count is consumed if
 355 * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
 356 * stop, the appropriate `SIGNAL_*` flags are set.
 357 *
 358 * CONTEXT:
 359 * Must be called with @task->sighand->siglock held.
 360 *
 361 * RETURNS:
 362 * %true if group stop completion should be notified to the parent, %false
 363 * otherwise.
 364 */
 365static bool task_participate_group_stop(struct task_struct *task)
 366{
 367	struct signal_struct *sig = task->signal;
 368	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 369
 370	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 371
 372	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 373
 374	if (!consume)
 375		return false;
 376
 377	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
 378		sig->group_stop_count--;
 379
 380	/*
 381	 * Tell the caller to notify completion iff we are entering into a
 382	 * fresh group stop.  Read comment in do_signal_stop() for details.
 383	 */
 384	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
 385		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 386		return true;
 387	}
 388	return false;
 389}
 390
 391void task_join_group_stop(struct task_struct *task)
 392{
 393	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
 394	struct signal_struct *sig = current->signal;
 395
 396	if (sig->group_stop_count) {
 397		sig->group_stop_count++;
 398		mask |= JOBCTL_STOP_CONSUME;
 399	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
 400		return;
 401
 402	/* Have the new thread join an on-going signal group stop */
 403	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
 404}
 405
 406/*
 407 * allocate a new signal queue record
 408 * - this may be called without locks if and only if t == current, otherwise an
 409 *   appropriate lock must be held to stop the target task from exiting
 410 */
 411static struct sigqueue *
 412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
 413		 int override_rlimit, const unsigned int sigqueue_flags)
 414{
 415	struct sigqueue *q = NULL;
 416	struct ucounts *ucounts = NULL;
 417	long sigpending;
 418
 419	/*
 420	 * Protect access to @t credentials. This can go away when all
 421	 * callers hold rcu read lock.
 422	 *
 423	 * NOTE! A pending signal will hold on to the user refcount,
 424	 * and we get/put the refcount only when the sigpending count
 425	 * changes from/to zero.
 426	 */
 427	rcu_read_lock();
 428	ucounts = task_ucounts(t);
 429	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 430	rcu_read_unlock();
 431	if (!sigpending)
 432		return NULL;
 433
 434	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
 435		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
 
 
 436	} else {
 437		print_dropped_signal(sig);
 438	}
 439
 440	if (unlikely(q == NULL)) {
 441		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
 
 442	} else {
 443		INIT_LIST_HEAD(&q->list);
 444		q->flags = sigqueue_flags;
 445		q->ucounts = ucounts;
 446	}
 
 447	return q;
 448}
 449
 450static void __sigqueue_free(struct sigqueue *q)
 451{
 452	if (q->flags & SIGQUEUE_PREALLOC)
 453		return;
 454	if (q->ucounts) {
 455		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
 456		q->ucounts = NULL;
 457	}
 458	kmem_cache_free(sigqueue_cachep, q);
 459}
 460
 461void flush_sigqueue(struct sigpending *queue)
 462{
 463	struct sigqueue *q;
 464
 465	sigemptyset(&queue->signal);
 466	while (!list_empty(&queue->list)) {
 467		q = list_entry(queue->list.next, struct sigqueue , list);
 468		list_del_init(&q->list);
 469		__sigqueue_free(q);
 470	}
 471}
 472
 473/*
 474 * Flush all pending signals for this kthread.
 475 */
 476void flush_signals(struct task_struct *t)
 477{
 478	unsigned long flags;
 479
 480	spin_lock_irqsave(&t->sighand->siglock, flags);
 481	clear_tsk_thread_flag(t, TIF_SIGPENDING);
 482	flush_sigqueue(&t->pending);
 483	flush_sigqueue(&t->signal->shared_pending);
 484	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 485}
 486EXPORT_SYMBOL(flush_signals);
 487
 488#ifdef CONFIG_POSIX_TIMERS
 489static void __flush_itimer_signals(struct sigpending *pending)
 490{
 491	sigset_t signal, retain;
 492	struct sigqueue *q, *n;
 493
 494	signal = pending->signal;
 495	sigemptyset(&retain);
 496
 497	list_for_each_entry_safe(q, n, &pending->list, list) {
 498		int sig = q->info.si_signo;
 499
 500		if (likely(q->info.si_code != SI_TIMER)) {
 501			sigaddset(&retain, sig);
 502		} else {
 503			sigdelset(&signal, sig);
 504			list_del_init(&q->list);
 505			__sigqueue_free(q);
 506		}
 507	}
 508
 509	sigorsets(&pending->signal, &signal, &retain);
 510}
 511
 512void flush_itimer_signals(void)
 513{
 514	struct task_struct *tsk = current;
 515	unsigned long flags;
 516
 517	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 518	__flush_itimer_signals(&tsk->pending);
 519	__flush_itimer_signals(&tsk->signal->shared_pending);
 520	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 521}
 522#endif
 523
 524void ignore_signals(struct task_struct *t)
 525{
 526	int i;
 527
 528	for (i = 0; i < _NSIG; ++i)
 529		t->sighand->action[i].sa.sa_handler = SIG_IGN;
 530
 531	flush_signals(t);
 532}
 533
 534/*
 535 * Flush all handlers for a task.
 536 */
 537
 538void
 539flush_signal_handlers(struct task_struct *t, int force_default)
 540{
 541	int i;
 542	struct k_sigaction *ka = &t->sighand->action[0];
 543	for (i = _NSIG ; i != 0 ; i--) {
 544		if (force_default || ka->sa.sa_handler != SIG_IGN)
 545			ka->sa.sa_handler = SIG_DFL;
 546		ka->sa.sa_flags = 0;
 547#ifdef __ARCH_HAS_SA_RESTORER
 548		ka->sa.sa_restorer = NULL;
 549#endif
 550		sigemptyset(&ka->sa.sa_mask);
 551		ka++;
 552	}
 553}
 554
 555bool unhandled_signal(struct task_struct *tsk, int sig)
 556{
 557	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
 558	if (is_global_init(tsk))
 559		return true;
 560
 561	if (handler != SIG_IGN && handler != SIG_DFL)
 562		return false;
 563
 564	/* if ptraced, let the tracer determine */
 565	return !tsk->ptrace;
 566}
 567
 568static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
 569			   bool *resched_timer)
 570{
 571	struct sigqueue *q, *first = NULL;
 572
 573	/*
 574	 * Collect the siginfo appropriate to this signal.  Check if
 575	 * there is another siginfo for the same signal.
 576	*/
 577	list_for_each_entry(q, &list->list, list) {
 578		if (q->info.si_signo == sig) {
 579			if (first)
 580				goto still_pending;
 581			first = q;
 582		}
 583	}
 584
 585	sigdelset(&list->signal, sig);
 586
 587	if (first) {
 588still_pending:
 589		list_del_init(&first->list);
 590		copy_siginfo(info, &first->info);
 591
 592		*resched_timer =
 593			(first->flags & SIGQUEUE_PREALLOC) &&
 594			(info->si_code == SI_TIMER) &&
 595			(info->si_sys_private);
 596
 597		__sigqueue_free(first);
 598	} else {
 599		/*
 600		 * Ok, it wasn't in the queue.  This must be
 601		 * a fast-pathed signal or we must have been
 602		 * out of queue space.  So zero out the info.
 603		 */
 604		clear_siginfo(info);
 605		info->si_signo = sig;
 606		info->si_errno = 0;
 607		info->si_code = SI_USER;
 608		info->si_pid = 0;
 609		info->si_uid = 0;
 610	}
 611}
 612
 613static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 614			kernel_siginfo_t *info, bool *resched_timer)
 615{
 616	int sig = next_signal(pending, mask);
 617
 618	if (sig)
 619		collect_signal(sig, pending, info, resched_timer);
 620	return sig;
 621}
 622
 623/*
 624 * Dequeue a signal and return the element to the caller, which is
 625 * expected to free it.
 626 *
 627 * All callers have to hold the siglock.
 628 */
 629int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
 630		   kernel_siginfo_t *info, enum pid_type *type)
 631{
 632	bool resched_timer = false;
 633	int signr;
 634
 635	/* We only dequeue private signals from ourselves, we don't let
 636	 * signalfd steal them
 637	 */
 638	*type = PIDTYPE_PID;
 639	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 640	if (!signr) {
 641		*type = PIDTYPE_TGID;
 642		signr = __dequeue_signal(&tsk->signal->shared_pending,
 643					 mask, info, &resched_timer);
 644#ifdef CONFIG_POSIX_TIMERS
 645		/*
 646		 * itimer signal ?
 647		 *
 648		 * itimers are process shared and we restart periodic
 649		 * itimers in the signal delivery path to prevent DoS
 650		 * attacks in the high resolution timer case. This is
 651		 * compliant with the old way of self-restarting
 652		 * itimers, as the SIGALRM is a legacy signal and only
 653		 * queued once. Changing the restart behaviour to
 654		 * restart the timer in the signal dequeue path is
 655		 * reducing the timer noise on heavy loaded !highres
 656		 * systems too.
 657		 */
 658		if (unlikely(signr == SIGALRM)) {
 659			struct hrtimer *tmr = &tsk->signal->real_timer;
 660
 661			if (!hrtimer_is_queued(tmr) &&
 662			    tsk->signal->it_real_incr != 0) {
 663				hrtimer_forward(tmr, tmr->base->get_time(),
 664						tsk->signal->it_real_incr);
 665				hrtimer_restart(tmr);
 666			}
 667		}
 668#endif
 669	}
 670
 671	recalc_sigpending();
 672	if (!signr)
 673		return 0;
 674
 675	if (unlikely(sig_kernel_stop(signr))) {
 676		/*
 677		 * Set a marker that we have dequeued a stop signal.  Our
 678		 * caller might release the siglock and then the pending
 679		 * stop signal it is about to process is no longer in the
 680		 * pending bitmasks, but must still be cleared by a SIGCONT
 681		 * (and overruled by a SIGKILL).  So those cases clear this
 682		 * shared flag after we've set it.  Note that this flag may
 683		 * remain set after the signal we return is ignored or
 684		 * handled.  That doesn't matter because its only purpose
 685		 * is to alert stop-signal processing code when another
 686		 * processor has come along and cleared the flag.
 687		 */
 688		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 689	}
 690#ifdef CONFIG_POSIX_TIMERS
 691	if (resched_timer) {
 692		/*
 693		 * Release the siglock to ensure proper locking order
 694		 * of timer locks outside of siglocks.  Note, we leave
 695		 * irqs disabled here, since the posix-timers code is
 696		 * about to disable them again anyway.
 697		 */
 698		spin_unlock(&tsk->sighand->siglock);
 699		posixtimer_rearm(info);
 700		spin_lock(&tsk->sighand->siglock);
 701
 702		/* Don't expose the si_sys_private value to userspace */
 703		info->si_sys_private = 0;
 704	}
 705#endif
 706	return signr;
 707}
 708EXPORT_SYMBOL_GPL(dequeue_signal);
 709
 710static int dequeue_synchronous_signal(kernel_siginfo_t *info)
 711{
 712	struct task_struct *tsk = current;
 713	struct sigpending *pending = &tsk->pending;
 714	struct sigqueue *q, *sync = NULL;
 715
 716	/*
 717	 * Might a synchronous signal be in the queue?
 718	 */
 719	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
 720		return 0;
 721
 722	/*
 723	 * Return the first synchronous signal in the queue.
 724	 */
 725	list_for_each_entry(q, &pending->list, list) {
 726		/* Synchronous signals have a positive si_code */
 727		if ((q->info.si_code > SI_USER) &&
 728		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
 729			sync = q;
 730			goto next;
 731		}
 732	}
 733	return 0;
 734next:
 735	/*
 736	 * Check if there is another siginfo for the same signal.
 737	 */
 738	list_for_each_entry_continue(q, &pending->list, list) {
 739		if (q->info.si_signo == sync->info.si_signo)
 740			goto still_pending;
 741	}
 742
 743	sigdelset(&pending->signal, sync->info.si_signo);
 744	recalc_sigpending();
 745still_pending:
 746	list_del_init(&sync->list);
 747	copy_siginfo(info, &sync->info);
 748	__sigqueue_free(sync);
 749	return info->si_signo;
 750}
 751
 752/*
 753 * Tell a process that it has a new active signal..
 754 *
 755 * NOTE! we rely on the previous spin_lock to
 756 * lock interrupts for us! We can only be called with
 757 * "siglock" held, and the local interrupt must
 758 * have been disabled when that got acquired!
 759 *
 760 * No need to set need_resched since signal event passing
 761 * goes through ->blocked
 762 */
 763void signal_wake_up_state(struct task_struct *t, unsigned int state)
 764{
 765	lockdep_assert_held(&t->sighand->siglock);
 766
 767	set_tsk_thread_flag(t, TIF_SIGPENDING);
 768
 769	/*
 770	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 771	 * case. We don't check t->state here because there is a race with it
 772	 * executing another processor and just now entering stopped state.
 773	 * By using wake_up_state, we ensure the process will wake up and
 774	 * handle its death signal.
 775	 */
 776	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
 777		kick_process(t);
 778}
 779
 780/*
 781 * Remove signals in mask from the pending set and queue.
 782 * Returns 1 if any signals were found.
 783 *
 784 * All callers must be holding the siglock.
 785 */
 786static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
 787{
 788	struct sigqueue *q, *n;
 789	sigset_t m;
 790
 791	sigandsets(&m, mask, &s->signal);
 792	if (sigisemptyset(&m))
 793		return;
 794
 795	sigandnsets(&s->signal, &s->signal, mask);
 796	list_for_each_entry_safe(q, n, &s->list, list) {
 797		if (sigismember(mask, q->info.si_signo)) {
 798			list_del_init(&q->list);
 799			__sigqueue_free(q);
 800		}
 801	}
 
 802}
 803
 804static inline int is_si_special(const struct kernel_siginfo *info)
 805{
 806	return info <= SEND_SIG_PRIV;
 807}
 808
 809static inline bool si_fromuser(const struct kernel_siginfo *info)
 810{
 811	return info == SEND_SIG_NOINFO ||
 812		(!is_si_special(info) && SI_FROMUSER(info));
 813}
 814
 815/*
 816 * called with RCU read lock from check_kill_permission()
 817 */
 818static bool kill_ok_by_cred(struct task_struct *t)
 819{
 820	const struct cred *cred = current_cred();
 821	const struct cred *tcred = __task_cred(t);
 822
 823	return uid_eq(cred->euid, tcred->suid) ||
 824	       uid_eq(cred->euid, tcred->uid) ||
 825	       uid_eq(cred->uid, tcred->suid) ||
 826	       uid_eq(cred->uid, tcred->uid) ||
 827	       ns_capable(tcred->user_ns, CAP_KILL);
 
 
 
 
 
 828}
 829
 830/*
 831 * Bad permissions for sending the signal
 832 * - the caller must hold the RCU read lock
 833 */
 834static int check_kill_permission(int sig, struct kernel_siginfo *info,
 835				 struct task_struct *t)
 836{
 837	struct pid *sid;
 838	int error;
 839
 840	if (!valid_signal(sig))
 841		return -EINVAL;
 842
 843	if (!si_fromuser(info))
 844		return 0;
 845
 846	error = audit_signal_info(sig, t); /* Let audit system see the signal */
 847	if (error)
 848		return error;
 849
 850	if (!same_thread_group(current, t) &&
 851	    !kill_ok_by_cred(t)) {
 852		switch (sig) {
 853		case SIGCONT:
 854			sid = task_session(t);
 855			/*
 856			 * We don't return the error if sid == NULL. The
 857			 * task was unhashed, the caller must notice this.
 858			 */
 859			if (!sid || sid == task_session(current))
 860				break;
 861			fallthrough;
 862		default:
 863			return -EPERM;
 864		}
 865	}
 866
 867	return security_task_kill(t, info, sig, NULL);
 868}
 869
 870/**
 871 * ptrace_trap_notify - schedule trap to notify ptracer
 872 * @t: tracee wanting to notify tracer
 873 *
 874 * This function schedules sticky ptrace trap which is cleared on the next
 875 * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
 876 * ptracer.
 877 *
 878 * If @t is running, STOP trap will be taken.  If trapped for STOP and
 879 * ptracer is listening for events, tracee is woken up so that it can
 880 * re-trap for the new event.  If trapped otherwise, STOP trap will be
 881 * eventually taken without returning to userland after the existing traps
 882 * are finished by PTRACE_CONT.
 883 *
 884 * CONTEXT:
 885 * Must be called with @task->sighand->siglock held.
 886 */
 887static void ptrace_trap_notify(struct task_struct *t)
 888{
 889	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
 890	lockdep_assert_held(&t->sighand->siglock);
 891
 892	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
 893	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 894}
 895
 896/*
 897 * Handle magic process-wide effects of stop/continue signals. Unlike
 898 * the signal actions, these happen immediately at signal-generation
 899 * time regardless of blocking, ignoring, or handling.  This does the
 900 * actual continuing for SIGCONT, but not the actual stopping for stop
 901 * signals. The process stop is done as a signal action for SIG_DFL.
 902 *
 903 * Returns true if the signal should be actually delivered, otherwise
 904 * it should be dropped.
 905 */
 906static bool prepare_signal(int sig, struct task_struct *p, bool force)
 907{
 908	struct signal_struct *signal = p->signal;
 909	struct task_struct *t;
 910	sigset_t flush;
 911
 912	if (signal->flags & SIGNAL_GROUP_EXIT) {
 913		if (signal->core_state)
 914			return sig == SIGKILL;
 915		/*
 916		 * The process is in the middle of dying, drop the signal.
 917		 */
 918		return false;
 919	} else if (sig_kernel_stop(sig)) {
 920		/*
 921		 * This is a stop signal.  Remove SIGCONT from all queues.
 922		 */
 923		siginitset(&flush, sigmask(SIGCONT));
 924		flush_sigqueue_mask(&flush, &signal->shared_pending);
 925		for_each_thread(p, t)
 926			flush_sigqueue_mask(&flush, &t->pending);
 927	} else if (sig == SIGCONT) {
 928		unsigned int why;
 929		/*
 930		 * Remove all stop signals from all queues, wake all threads.
 931		 */
 932		siginitset(&flush, SIG_KERNEL_STOP_MASK);
 933		flush_sigqueue_mask(&flush, &signal->shared_pending);
 934		for_each_thread(p, t) {
 935			flush_sigqueue_mask(&flush, &t->pending);
 936			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
 937			if (likely(!(t->ptrace & PT_SEIZED))) {
 938				t->jobctl &= ~JOBCTL_STOPPED;
 939				wake_up_state(t, __TASK_STOPPED);
 940			} else
 941				ptrace_trap_notify(t);
 942		}
 943
 944		/*
 945		 * Notify the parent with CLD_CONTINUED if we were stopped.
 946		 *
 947		 * If we were in the middle of a group stop, we pretend it
 948		 * was already finished, and then continued. Since SIGCHLD
 949		 * doesn't queue we report only CLD_STOPPED, as if the next
 950		 * CLD_CONTINUED was dropped.
 951		 */
 952		why = 0;
 953		if (signal->flags & SIGNAL_STOP_STOPPED)
 954			why |= SIGNAL_CLD_CONTINUED;
 955		else if (signal->group_stop_count)
 956			why |= SIGNAL_CLD_STOPPED;
 957
 958		if (why) {
 959			/*
 960			 * The first thread which returns from do_signal_stop()
 961			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 962			 * notify its parent. See get_signal().
 963			 */
 964			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 965			signal->group_stop_count = 0;
 966			signal->group_exit_code = 0;
 967		}
 968	}
 969
 970	return !sig_ignored(p, sig, force);
 971}
 972
 973/*
 974 * Test if P wants to take SIG.  After we've checked all threads with this,
 975 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 976 * blocking SIG were ruled out because they are not running and already
 977 * have pending signals.  Such threads will dequeue from the shared queue
 978 * as soon as they're available, so putting the signal on the shared queue
 979 * will be equivalent to sending it to one such thread.
 980 */
 981static inline bool wants_signal(int sig, struct task_struct *p)
 982{
 983	if (sigismember(&p->blocked, sig))
 984		return false;
 985
 986	if (p->flags & PF_EXITING)
 987		return false;
 988
 989	if (sig == SIGKILL)
 990		return true;
 991
 992	if (task_is_stopped_or_traced(p))
 993		return false;
 994
 995	return task_curr(p) || !task_sigpending(p);
 996}
 997
 998static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 999{
1000	struct signal_struct *signal = p->signal;
1001	struct task_struct *t;
1002
1003	/*
1004	 * Now find a thread we can wake up to take the signal off the queue.
1005	 *
1006	 * If the main thread wants the signal, it gets first crack.
1007	 * Probably the least surprising to the average bear.
1008	 */
1009	if (wants_signal(sig, p))
1010		t = p;
1011	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1012		/*
1013		 * There is just one thread and it does not need to be woken.
1014		 * It will dequeue unblocked signals before it runs again.
1015		 */
1016		return;
1017	else {
1018		/*
1019		 * Otherwise try to find a suitable thread.
1020		 */
1021		t = signal->curr_target;
1022		while (!wants_signal(sig, t)) {
1023			t = next_thread(t);
1024			if (t == signal->curr_target)
1025				/*
1026				 * No thread needs to be woken.
1027				 * Any eligible threads will see
1028				 * the signal in the queue soon.
1029				 */
1030				return;
1031		}
1032		signal->curr_target = t;
1033	}
1034
1035	/*
1036	 * Found a killable thread.  If the signal will be fatal,
1037	 * then start taking the whole group down immediately.
1038	 */
1039	if (sig_fatal(p, sig) &&
1040	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1041	    !sigismember(&t->real_blocked, sig) &&
1042	    (sig == SIGKILL || !p->ptrace)) {
1043		/*
1044		 * This signal will be fatal to the whole group.
1045		 */
1046		if (!sig_kernel_coredump(sig)) {
1047			/*
1048			 * Start a group exit and wake everybody up.
1049			 * This way we don't have other threads
1050			 * running and doing things after a slower
1051			 * thread has the fatal signal pending.
1052			 */
1053			signal->flags = SIGNAL_GROUP_EXIT;
1054			signal->group_exit_code = sig;
1055			signal->group_stop_count = 0;
1056			t = p;
1057			do {
1058				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1059				sigaddset(&t->pending.signal, SIGKILL);
1060				signal_wake_up(t, 1);
1061			} while_each_thread(p, t);
1062			return;
1063		}
1064	}
1065
1066	/*
1067	 * The signal is already in the shared-pending queue.
1068	 * Tell the chosen thread to wake up and dequeue it.
1069	 */
1070	signal_wake_up(t, sig == SIGKILL);
1071	return;
1072}
1073
1074static inline bool legacy_queue(struct sigpending *signals, int sig)
1075{
1076	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1077}
1078
1079static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1080				struct task_struct *t, enum pid_type type, bool force)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1081{
1082	struct sigpending *pending;
1083	struct sigqueue *q;
1084	int override_rlimit;
1085	int ret = 0, result;
1086
1087	lockdep_assert_held(&t->sighand->siglock);
1088
1089	result = TRACE_SIGNAL_IGNORED;
1090	if (!prepare_signal(sig, t, force))
 
1091		goto ret;
1092
1093	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1094	/*
1095	 * Short-circuit ignored signals and support queuing
1096	 * exactly one non-rt signal, so that we can get more
1097	 * detailed information about the cause of the signal.
1098	 */
1099	result = TRACE_SIGNAL_ALREADY_PENDING;
1100	if (legacy_queue(pending, sig))
1101		goto ret;
1102
1103	result = TRACE_SIGNAL_DELIVERED;
1104	/*
1105	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
 
1106	 */
1107	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1108		goto out_set;
1109
1110	/*
1111	 * Real-time signals must be queued if sent by sigqueue, or
1112	 * some other real-time mechanism.  It is implementation
1113	 * defined whether kill() does so.  We attempt to do so, on
1114	 * the principle of least surprise, but since kill is not
1115	 * allowed to fail with EAGAIN when low on memory we just
1116	 * make sure at least one signal gets delivered and don't
1117	 * pass on the info struct.
1118	 */
1119	if (sig < SIGRTMIN)
1120		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1121	else
1122		override_rlimit = 0;
1123
1124	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1125
1126	if (q) {
1127		list_add_tail(&q->list, &pending->list);
1128		switch ((unsigned long) info) {
1129		case (unsigned long) SEND_SIG_NOINFO:
1130			clear_siginfo(&q->info);
1131			q->info.si_signo = sig;
1132			q->info.si_errno = 0;
1133			q->info.si_code = SI_USER;
1134			q->info.si_pid = task_tgid_nr_ns(current,
1135							task_active_pid_ns(t));
1136			rcu_read_lock();
1137			q->info.si_uid =
1138				from_kuid_munged(task_cred_xxx(t, user_ns),
1139						 current_uid());
1140			rcu_read_unlock();
1141			break;
1142		case (unsigned long) SEND_SIG_PRIV:
1143			clear_siginfo(&q->info);
1144			q->info.si_signo = sig;
1145			q->info.si_errno = 0;
1146			q->info.si_code = SI_KERNEL;
1147			q->info.si_pid = 0;
1148			q->info.si_uid = 0;
1149			break;
1150		default:
1151			copy_siginfo(&q->info, info);
 
 
1152			break;
1153		}
1154	} else if (!is_si_special(info) &&
1155		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1156		/*
1157		 * Queue overflow, abort.  We may abort if the
1158		 * signal was rt and sent by user using something
1159		 * other than kill().
1160		 */
1161		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1162		ret = -EAGAIN;
1163		goto ret;
1164	} else {
1165		/*
1166		 * This is a silent loss of information.  We still
1167		 * send the signal, but the *info bits are lost.
1168		 */
1169		result = TRACE_SIGNAL_LOSE_INFO;
 
 
 
 
1170	}
1171
1172out_set:
1173	signalfd_notify(t, sig);
1174	sigaddset(&pending->signal, sig);
1175
1176	/* Let multiprocess signals appear after on-going forks */
1177	if (type > PIDTYPE_TGID) {
1178		struct multiprocess_signals *delayed;
1179		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1180			sigset_t *signal = &delayed->signal;
1181			/* Can't queue both a stop and a continue signal */
1182			if (sig == SIGCONT)
1183				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1184			else if (sig_kernel_stop(sig))
1185				sigdelset(signal, SIGCONT);
1186			sigaddset(signal, sig);
1187		}
1188	}
1189
1190	complete_signal(sig, t, type);
1191ret:
1192	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1193	return ret;
1194}
1195
1196static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1197{
1198	bool ret = false;
1199	switch (siginfo_layout(info->si_signo, info->si_code)) {
1200	case SIL_KILL:
1201	case SIL_CHLD:
1202	case SIL_RT:
1203		ret = true;
1204		break;
1205	case SIL_TIMER:
1206	case SIL_POLL:
1207	case SIL_FAULT:
1208	case SIL_FAULT_TRAPNO:
1209	case SIL_FAULT_MCEERR:
1210	case SIL_FAULT_BNDERR:
1211	case SIL_FAULT_PKUERR:
1212	case SIL_FAULT_PERF_EVENT:
1213	case SIL_SYS:
1214		ret = false;
1215		break;
1216	}
1217	return ret;
1218}
1219
1220int send_signal_locked(int sig, struct kernel_siginfo *info,
1221		       struct task_struct *t, enum pid_type type)
1222{
1223	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1224	bool force = false;
1225
1226	if (info == SEND_SIG_NOINFO) {
1227		/* Force if sent from an ancestor pid namespace */
1228		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1229	} else if (info == SEND_SIG_PRIV) {
1230		/* Don't ignore kernel generated signals */
1231		force = true;
1232	} else if (has_si_pid_and_uid(info)) {
1233		/* SIGKILL and SIGSTOP is special or has ids */
1234		struct user_namespace *t_user_ns;
1235
1236		rcu_read_lock();
1237		t_user_ns = task_cred_xxx(t, user_ns);
1238		if (current_user_ns() != t_user_ns) {
1239			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1240			info->si_uid = from_kuid_munged(t_user_ns, uid);
1241		}
1242		rcu_read_unlock();
1243
1244		/* A kernel generated signal? */
1245		force = (info->si_code == SI_KERNEL);
1246
1247		/* From an ancestor pid namespace? */
1248		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1249			info->si_pid = 0;
1250			force = true;
1251		}
1252	}
1253	return __send_signal_locked(sig, info, t, type, force);
1254}
1255
1256static void print_fatal_signal(int signr)
1257{
1258	struct pt_regs *regs = task_pt_regs(current);
1259	pr_info("potentially unexpected fatal signal %d.\n", signr);
1260
1261#if defined(__i386__) && !defined(__arch_um__)
1262	pr_info("code at %08lx: ", regs->ip);
1263	{
1264		int i;
1265		for (i = 0; i < 16; i++) {
1266			unsigned char insn;
1267
1268			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1269				break;
1270			pr_cont("%02x ", insn);
1271		}
1272	}
1273	pr_cont("\n");
1274#endif
1275	preempt_disable();
1276	show_regs(regs);
1277	preempt_enable();
1278}
1279
1280static int __init setup_print_fatal_signals(char *str)
1281{
1282	get_option (&str, &print_fatal_signals);
1283
1284	return 1;
1285}
1286
1287__setup("print-fatal-signals=", setup_print_fatal_signals);
1288
1289int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1290			enum pid_type type)
 
 
 
 
 
 
 
 
 
 
 
 
1291{
1292	unsigned long flags;
1293	int ret = -ESRCH;
1294
1295	if (lock_task_sighand(p, &flags)) {
1296		ret = send_signal_locked(sig, info, p, type);
1297		unlock_task_sighand(p, &flags);
1298	}
1299
1300	return ret;
1301}
1302
1303enum sig_handler {
1304	HANDLER_CURRENT, /* If reachable use the current handler */
1305	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1306	HANDLER_EXIT,	 /* Only visible as the process exit code */
1307};
1308
1309/*
1310 * Force a signal that the process can't ignore: if necessary
1311 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1312 *
1313 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1314 * since we do not want to have a signal handler that was blocked
1315 * be invoked when user space had explicitly blocked it.
1316 *
1317 * We don't want to have recursive SIGSEGV's etc, for example,
1318 * that is why we also clear SIGNAL_UNKILLABLE.
1319 */
1320static int
1321force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1322	enum sig_handler handler)
1323{
1324	unsigned long int flags;
1325	int ret, blocked, ignored;
1326	struct k_sigaction *action;
1327	int sig = info->si_signo;
1328
1329	spin_lock_irqsave(&t->sighand->siglock, flags);
1330	action = &t->sighand->action[sig-1];
1331	ignored = action->sa.sa_handler == SIG_IGN;
1332	blocked = sigismember(&t->blocked, sig);
1333	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1334		action->sa.sa_handler = SIG_DFL;
1335		if (handler == HANDLER_EXIT)
1336			action->sa.sa_flags |= SA_IMMUTABLE;
1337		if (blocked) {
1338			sigdelset(&t->blocked, sig);
1339			recalc_sigpending_and_wake(t);
1340		}
1341	}
1342	/*
1343	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1344	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1345	 */
1346	if (action->sa.sa_handler == SIG_DFL &&
1347	    (!t->ptrace || (handler == HANDLER_EXIT)))
1348		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1349	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1350	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1351
1352	return ret;
1353}
1354
1355int force_sig_info(struct kernel_siginfo *info)
1356{
1357	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1358}
1359
1360/*
1361 * Nuke all other threads in the group.
1362 */
1363int zap_other_threads(struct task_struct *p)
1364{
1365	struct task_struct *t = p;
1366	int count = 0;
1367
1368	p->signal->group_stop_count = 0;
1369
1370	while_each_thread(p, t) {
1371		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1372		count++;
1373
1374		/* Don't bother with already dead threads */
1375		if (t->exit_state)
1376			continue;
1377		sigaddset(&t->pending.signal, SIGKILL);
1378		signal_wake_up(t, 1);
1379	}
1380
1381	return count;
1382}
1383
1384struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1385					   unsigned long *flags)
1386{
1387	struct sighand_struct *sighand;
1388
1389	rcu_read_lock();
1390	for (;;) {
 
 
 
 
 
 
1391		sighand = rcu_dereference(tsk->sighand);
1392		if (unlikely(sighand == NULL))
 
 
1393			break;
1394
1395		/*
1396		 * This sighand can be already freed and even reused, but
1397		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1398		 * initializes ->siglock: this slab can't go away, it has
1399		 * the same object type, ->siglock can't be reinitialized.
1400		 *
1401		 * We need to ensure that tsk->sighand is still the same
1402		 * after we take the lock, we can race with de_thread() or
1403		 * __exit_signal(). In the latter case the next iteration
1404		 * must see ->sighand == NULL.
1405		 */
1406		spin_lock_irqsave(&sighand->siglock, *flags);
1407		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
 
1408			break;
1409		spin_unlock_irqrestore(&sighand->siglock, *flags);
 
 
 
1410	}
1411	rcu_read_unlock();
1412
1413	return sighand;
1414}
1415
1416#ifdef CONFIG_LOCKDEP
1417void lockdep_assert_task_sighand_held(struct task_struct *task)
1418{
1419	struct sighand_struct *sighand;
1420
1421	rcu_read_lock();
1422	sighand = rcu_dereference(task->sighand);
1423	if (sighand)
1424		lockdep_assert_held(&sighand->siglock);
1425	else
1426		WARN_ON_ONCE(1);
1427	rcu_read_unlock();
1428}
1429#endif
1430
1431/*
1432 * send signal info to all the members of a group
1433 */
1434int group_send_sig_info(int sig, struct kernel_siginfo *info,
1435			struct task_struct *p, enum pid_type type)
1436{
1437	int ret;
1438
1439	rcu_read_lock();
1440	ret = check_kill_permission(sig, info, p);
1441	rcu_read_unlock();
1442
1443	if (!ret && sig)
1444		ret = do_send_sig_info(sig, info, p, type);
1445
1446	return ret;
1447}
1448
1449/*
1450 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1451 * control characters do (^C, ^Z etc)
1452 * - the caller must hold at least a readlock on tasklist_lock
1453 */
1454int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1455{
1456	struct task_struct *p = NULL;
1457	int retval, success;
1458
1459	success = 0;
1460	retval = -ESRCH;
1461	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1462		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1463		success |= !err;
1464		retval = err;
1465	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1466	return success ? 0 : retval;
1467}
1468
1469int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1470{
1471	int error = -ESRCH;
1472	struct task_struct *p;
1473
1474	for (;;) {
1475		rcu_read_lock();
1476		p = pid_task(pid, PIDTYPE_PID);
1477		if (p)
1478			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1479		rcu_read_unlock();
1480		if (likely(!p || error != -ESRCH))
1481			return error;
1482
1483		/*
1484		 * The task was unhashed in between, try again.  If it
1485		 * is dead, pid_task() will return NULL, if we race with
1486		 * de_thread() it will find the new leader.
1487		 */
1488	}
1489}
1490
1491static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1492{
1493	int error;
1494	rcu_read_lock();
1495	error = kill_pid_info(sig, info, find_vpid(pid));
1496	rcu_read_unlock();
1497	return error;
1498}
1499
1500static inline bool kill_as_cred_perm(const struct cred *cred,
1501				     struct task_struct *target)
1502{
1503	const struct cred *pcred = __task_cred(target);
1504
1505	return uid_eq(cred->euid, pcred->suid) ||
1506	       uid_eq(cred->euid, pcred->uid) ||
1507	       uid_eq(cred->uid, pcred->suid) ||
1508	       uid_eq(cred->uid, pcred->uid);
1509}
1510
1511/*
1512 * The usb asyncio usage of siginfo is wrong.  The glibc support
1513 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1514 * AKA after the generic fields:
1515 *	kernel_pid_t	si_pid;
1516 *	kernel_uid32_t	si_uid;
1517 *	sigval_t	si_value;
1518 *
1519 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1520 * after the generic fields is:
1521 *	void __user 	*si_addr;
1522 *
1523 * This is a practical problem when there is a 64bit big endian kernel
1524 * and a 32bit userspace.  As the 32bit address will encoded in the low
1525 * 32bits of the pointer.  Those low 32bits will be stored at higher
1526 * address than appear in a 32 bit pointer.  So userspace will not
1527 * see the address it was expecting for it's completions.
1528 *
1529 * There is nothing in the encoding that can allow
1530 * copy_siginfo_to_user32 to detect this confusion of formats, so
1531 * handle this by requiring the caller of kill_pid_usb_asyncio to
1532 * notice when this situration takes place and to store the 32bit
1533 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1534 * parameter.
1535 */
1536int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1537			 struct pid *pid, const struct cred *cred)
1538{
1539	struct kernel_siginfo info;
1540	struct task_struct *p;
1541	unsigned long flags;
1542	int ret = -EINVAL;
1543
1544	if (!valid_signal(sig))
1545		return ret;
1546
1547	clear_siginfo(&info);
1548	info.si_signo = sig;
1549	info.si_errno = errno;
1550	info.si_code = SI_ASYNCIO;
1551	*((sigval_t *)&info.si_pid) = addr;
1552
1553	rcu_read_lock();
1554	p = pid_task(pid, PIDTYPE_PID);
1555	if (!p) {
1556		ret = -ESRCH;
1557		goto out_unlock;
1558	}
1559	if (!kill_as_cred_perm(cred, p)) {
1560		ret = -EPERM;
1561		goto out_unlock;
1562	}
1563	ret = security_task_kill(p, &info, sig, cred);
1564	if (ret)
1565		goto out_unlock;
1566
1567	if (sig) {
1568		if (lock_task_sighand(p, &flags)) {
1569			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1570			unlock_task_sighand(p, &flags);
1571		} else
1572			ret = -ESRCH;
1573	}
1574out_unlock:
1575	rcu_read_unlock();
1576	return ret;
1577}
1578EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1579
1580/*
1581 * kill_something_info() interprets pid in interesting ways just like kill(2).
1582 *
1583 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1584 * is probably wrong.  Should make it like BSD or SYSV.
1585 */
1586
1587static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1588{
1589	int ret;
1590
1591	if (pid > 0)
1592		return kill_proc_info(sig, info, pid);
1593
1594	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1595	if (pid == INT_MIN)
1596		return -ESRCH;
1597
1598	read_lock(&tasklist_lock);
1599	if (pid != -1) {
1600		ret = __kill_pgrp_info(sig, info,
1601				pid ? find_vpid(-pid) : task_pgrp(current));
1602	} else {
1603		int retval = 0, count = 0;
1604		struct task_struct * p;
1605
1606		for_each_process(p) {
1607			if (task_pid_vnr(p) > 1 &&
1608					!same_thread_group(p, current)) {
1609				int err = group_send_sig_info(sig, info, p,
1610							      PIDTYPE_MAX);
1611				++count;
1612				if (err != -EPERM)
1613					retval = err;
1614			}
1615		}
1616		ret = count ? retval : -ESRCH;
1617	}
1618	read_unlock(&tasklist_lock);
1619
1620	return ret;
1621}
1622
1623/*
1624 * These are for backward compatibility with the rest of the kernel source.
1625 */
1626
1627int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1628{
1629	/*
1630	 * Make sure legacy kernel users don't send in bad values
1631	 * (normal paths check this in check_kill_permission).
1632	 */
1633	if (!valid_signal(sig))
1634		return -EINVAL;
1635
1636	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1637}
1638EXPORT_SYMBOL(send_sig_info);
1639
1640#define __si_special(priv) \
1641	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1642
1643int
1644send_sig(int sig, struct task_struct *p, int priv)
1645{
1646	return send_sig_info(sig, __si_special(priv), p);
1647}
1648EXPORT_SYMBOL(send_sig);
1649
1650void force_sig(int sig)
 
1651{
1652	struct kernel_siginfo info;
1653
1654	clear_siginfo(&info);
1655	info.si_signo = sig;
1656	info.si_errno = 0;
1657	info.si_code = SI_KERNEL;
1658	info.si_pid = 0;
1659	info.si_uid = 0;
1660	force_sig_info(&info);
1661}
1662EXPORT_SYMBOL(force_sig);
1663
1664void force_fatal_sig(int sig)
1665{
1666	struct kernel_siginfo info;
1667
1668	clear_siginfo(&info);
1669	info.si_signo = sig;
1670	info.si_errno = 0;
1671	info.si_code = SI_KERNEL;
1672	info.si_pid = 0;
1673	info.si_uid = 0;
1674	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1675}
1676
1677void force_exit_sig(int sig)
1678{
1679	struct kernel_siginfo info;
1680
1681	clear_siginfo(&info);
1682	info.si_signo = sig;
1683	info.si_errno = 0;
1684	info.si_code = SI_KERNEL;
1685	info.si_pid = 0;
1686	info.si_uid = 0;
1687	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1688}
1689
1690/*
1691 * When things go south during signal handling, we
1692 * will force a SIGSEGV. And if the signal that caused
1693 * the problem was already a SIGSEGV, we'll want to
1694 * make sure we don't even try to deliver the signal..
1695 */
1696void force_sigsegv(int sig)
 
1697{
1698	if (sig == SIGSEGV)
1699		force_fatal_sig(SIGSEGV);
1700	else
1701		force_sig(SIGSEGV);
1702}
1703
1704int force_sig_fault_to_task(int sig, int code, void __user *addr
1705	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1706	, struct task_struct *t)
1707{
1708	struct kernel_siginfo info;
1709
1710	clear_siginfo(&info);
1711	info.si_signo = sig;
1712	info.si_errno = 0;
1713	info.si_code  = code;
1714	info.si_addr  = addr;
1715#ifdef __ia64__
1716	info.si_imm = imm;
1717	info.si_flags = flags;
1718	info.si_isr = isr;
1719#endif
1720	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1721}
1722
1723int force_sig_fault(int sig, int code, void __user *addr
1724	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1725{
1726	return force_sig_fault_to_task(sig, code, addr
1727				       ___ARCH_SI_IA64(imm, flags, isr), current);
1728}
1729
1730int send_sig_fault(int sig, int code, void __user *addr
1731	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1732	, struct task_struct *t)
1733{
1734	struct kernel_siginfo info;
1735
1736	clear_siginfo(&info);
1737	info.si_signo = sig;
1738	info.si_errno = 0;
1739	info.si_code  = code;
1740	info.si_addr  = addr;
1741#ifdef __ia64__
1742	info.si_imm = imm;
1743	info.si_flags = flags;
1744	info.si_isr = isr;
1745#endif
1746	return send_sig_info(info.si_signo, &info, t);
1747}
1748
1749int force_sig_mceerr(int code, void __user *addr, short lsb)
1750{
1751	struct kernel_siginfo info;
1752
1753	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1754	clear_siginfo(&info);
1755	info.si_signo = SIGBUS;
1756	info.si_errno = 0;
1757	info.si_code = code;
1758	info.si_addr = addr;
1759	info.si_addr_lsb = lsb;
1760	return force_sig_info(&info);
1761}
1762
1763int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1764{
1765	struct kernel_siginfo info;
1766
1767	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1768	clear_siginfo(&info);
1769	info.si_signo = SIGBUS;
1770	info.si_errno = 0;
1771	info.si_code = code;
1772	info.si_addr = addr;
1773	info.si_addr_lsb = lsb;
1774	return send_sig_info(info.si_signo, &info, t);
1775}
1776EXPORT_SYMBOL(send_sig_mceerr);
1777
1778int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1779{
1780	struct kernel_siginfo info;
1781
1782	clear_siginfo(&info);
1783	info.si_signo = SIGSEGV;
1784	info.si_errno = 0;
1785	info.si_code  = SEGV_BNDERR;
1786	info.si_addr  = addr;
1787	info.si_lower = lower;
1788	info.si_upper = upper;
1789	return force_sig_info(&info);
1790}
1791
1792#ifdef SEGV_PKUERR
1793int force_sig_pkuerr(void __user *addr, u32 pkey)
1794{
1795	struct kernel_siginfo info;
1796
1797	clear_siginfo(&info);
1798	info.si_signo = SIGSEGV;
1799	info.si_errno = 0;
1800	info.si_code  = SEGV_PKUERR;
1801	info.si_addr  = addr;
1802	info.si_pkey  = pkey;
1803	return force_sig_info(&info);
1804}
1805#endif
1806
1807int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1808{
1809	struct kernel_siginfo info;
1810
1811	clear_siginfo(&info);
1812	info.si_signo     = SIGTRAP;
1813	info.si_errno     = 0;
1814	info.si_code      = TRAP_PERF;
1815	info.si_addr      = addr;
1816	info.si_perf_data = sig_data;
1817	info.si_perf_type = type;
1818
1819	/*
1820	 * Signals generated by perf events should not terminate the whole
1821	 * process if SIGTRAP is blocked, however, delivering the signal
1822	 * asynchronously is better than not delivering at all. But tell user
1823	 * space if the signal was asynchronous, so it can clearly be
1824	 * distinguished from normal synchronous ones.
1825	 */
1826	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1827				     TRAP_PERF_FLAG_ASYNC :
1828				     0;
1829
1830	return send_sig_info(info.si_signo, &info, current);
1831}
1832
1833/**
1834 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1835 * @syscall: syscall number to send to userland
1836 * @reason: filter-supplied reason code to send to userland (via si_errno)
1837 * @force_coredump: true to trigger a coredump
1838 *
1839 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1840 */
1841int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1842{
1843	struct kernel_siginfo info;
1844
1845	clear_siginfo(&info);
1846	info.si_signo = SIGSYS;
1847	info.si_code = SYS_SECCOMP;
1848	info.si_call_addr = (void __user *)KSTK_EIP(current);
1849	info.si_errno = reason;
1850	info.si_arch = syscall_get_arch(current);
1851	info.si_syscall = syscall;
1852	return force_sig_info_to_task(&info, current,
1853		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1854}
1855
1856/* For the crazy architectures that include trap information in
1857 * the errno field, instead of an actual errno value.
1858 */
1859int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1860{
1861	struct kernel_siginfo info;
1862
1863	clear_siginfo(&info);
1864	info.si_signo = SIGTRAP;
1865	info.si_errno = errno;
1866	info.si_code  = TRAP_HWBKPT;
1867	info.si_addr  = addr;
1868	return force_sig_info(&info);
1869}
1870
1871/* For the rare architectures that include trap information using
1872 * si_trapno.
1873 */
1874int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1875{
1876	struct kernel_siginfo info;
1877
1878	clear_siginfo(&info);
1879	info.si_signo = sig;
1880	info.si_errno = 0;
1881	info.si_code  = code;
1882	info.si_addr  = addr;
1883	info.si_trapno = trapno;
1884	return force_sig_info(&info);
1885}
1886
1887/* For the rare architectures that include trap information using
1888 * si_trapno.
1889 */
1890int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1891			  struct task_struct *t)
1892{
1893	struct kernel_siginfo info;
1894
1895	clear_siginfo(&info);
1896	info.si_signo = sig;
1897	info.si_errno = 0;
1898	info.si_code  = code;
1899	info.si_addr  = addr;
1900	info.si_trapno = trapno;
1901	return send_sig_info(info.si_signo, &info, t);
1902}
1903
1904int kill_pgrp(struct pid *pid, int sig, int priv)
1905{
1906	int ret;
1907
1908	read_lock(&tasklist_lock);
1909	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1910	read_unlock(&tasklist_lock);
1911
1912	return ret;
1913}
1914EXPORT_SYMBOL(kill_pgrp);
1915
1916int kill_pid(struct pid *pid, int sig, int priv)
1917{
1918	return kill_pid_info(sig, __si_special(priv), pid);
1919}
1920EXPORT_SYMBOL(kill_pid);
1921
1922/*
1923 * These functions support sending signals using preallocated sigqueue
1924 * structures.  This is needed "because realtime applications cannot
1925 * afford to lose notifications of asynchronous events, like timer
1926 * expirations or I/O completions".  In the case of POSIX Timers
1927 * we allocate the sigqueue structure from the timer_create.  If this
1928 * allocation fails we are able to report the failure to the application
1929 * with an EAGAIN error.
1930 */
1931struct sigqueue *sigqueue_alloc(void)
1932{
1933	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
 
 
 
 
 
1934}
1935
1936void sigqueue_free(struct sigqueue *q)
1937{
1938	unsigned long flags;
1939	spinlock_t *lock = &current->sighand->siglock;
1940
1941	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1942	/*
1943	 * We must hold ->siglock while testing q->list
1944	 * to serialize with collect_signal() or with
1945	 * __exit_signal()->flush_sigqueue().
1946	 */
1947	spin_lock_irqsave(lock, flags);
1948	q->flags &= ~SIGQUEUE_PREALLOC;
1949	/*
1950	 * If it is queued it will be freed when dequeued,
1951	 * like the "regular" sigqueue.
1952	 */
1953	if (!list_empty(&q->list))
1954		q = NULL;
1955	spin_unlock_irqrestore(lock, flags);
1956
1957	if (q)
1958		__sigqueue_free(q);
1959}
1960
1961int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1962{
1963	int sig = q->info.si_signo;
1964	struct sigpending *pending;
1965	struct task_struct *t;
1966	unsigned long flags;
1967	int ret, result;
1968
1969	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1970
1971	ret = -1;
1972	rcu_read_lock();
1973	t = pid_task(pid, type);
1974	if (!t || !likely(lock_task_sighand(t, &flags)))
1975		goto ret;
1976
1977	ret = 1; /* the signal is ignored */
1978	result = TRACE_SIGNAL_IGNORED;
1979	if (!prepare_signal(sig, t, false))
1980		goto out;
1981
1982	ret = 0;
1983	if (unlikely(!list_empty(&q->list))) {
1984		/*
1985		 * If an SI_TIMER entry is already queue just increment
1986		 * the overrun count.
1987		 */
1988		BUG_ON(q->info.si_code != SI_TIMER);
1989		q->info.si_overrun++;
1990		result = TRACE_SIGNAL_ALREADY_PENDING;
1991		goto out;
1992	}
1993	q->info.si_overrun = 0;
1994
1995	signalfd_notify(t, sig);
1996	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1997	list_add_tail(&q->list, &pending->list);
1998	sigaddset(&pending->signal, sig);
1999	complete_signal(sig, t, type);
2000	result = TRACE_SIGNAL_DELIVERED;
2001out:
2002	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2003	unlock_task_sighand(t, &flags);
2004ret:
2005	rcu_read_unlock();
2006	return ret;
2007}
2008
2009static void do_notify_pidfd(struct task_struct *task)
2010{
2011	struct pid *pid;
2012
2013	WARN_ON(task->exit_state == 0);
2014	pid = task_pid(task);
2015	wake_up_all(&pid->wait_pidfd);
2016}
2017
2018/*
2019 * Let a parent know about the death of a child.
2020 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2021 *
2022 * Returns true if our parent ignored us and so we've switched to
2023 * self-reaping.
2024 */
2025bool do_notify_parent(struct task_struct *tsk, int sig)
2026{
2027	struct kernel_siginfo info;
2028	unsigned long flags;
2029	struct sighand_struct *psig;
2030	bool autoreap = false;
2031	u64 utime, stime;
2032
2033	WARN_ON_ONCE(sig == -1);
2034
2035	/* do_notify_parent_cldstop should have been called instead.  */
2036	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2037
2038	WARN_ON_ONCE(!tsk->ptrace &&
2039	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2040
2041	/* Wake up all pidfd waiters */
2042	do_notify_pidfd(tsk);
2043
2044	if (sig != SIGCHLD) {
2045		/*
2046		 * This is only possible if parent == real_parent.
2047		 * Check if it has changed security domain.
2048		 */
2049		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2050			sig = SIGCHLD;
2051	}
2052
2053	clear_siginfo(&info);
2054	info.si_signo = sig;
2055	info.si_errno = 0;
2056	/*
2057	 * We are under tasklist_lock here so our parent is tied to
2058	 * us and cannot change.
2059	 *
2060	 * task_active_pid_ns will always return the same pid namespace
2061	 * until a task passes through release_task.
2062	 *
2063	 * write_lock() currently calls preempt_disable() which is the
2064	 * same as rcu_read_lock(), but according to Oleg, this is not
2065	 * correct to rely on this
2066	 */
2067	rcu_read_lock();
2068	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2069	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2070				       task_uid(tsk));
2071	rcu_read_unlock();
2072
2073	task_cputime(tsk, &utime, &stime);
2074	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2075	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2076
2077	info.si_status = tsk->exit_code & 0x7f;
2078	if (tsk->exit_code & 0x80)
2079		info.si_code = CLD_DUMPED;
2080	else if (tsk->exit_code & 0x7f)
2081		info.si_code = CLD_KILLED;
2082	else {
2083		info.si_code = CLD_EXITED;
2084		info.si_status = tsk->exit_code >> 8;
2085	}
2086
2087	psig = tsk->parent->sighand;
2088	spin_lock_irqsave(&psig->siglock, flags);
2089	if (!tsk->ptrace && sig == SIGCHLD &&
2090	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2091	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2092		/*
2093		 * We are exiting and our parent doesn't care.  POSIX.1
2094		 * defines special semantics for setting SIGCHLD to SIG_IGN
2095		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2096		 * automatically and not left for our parent's wait4 call.
2097		 * Rather than having the parent do it as a magic kind of
2098		 * signal handler, we just set this to tell do_exit that we
2099		 * can be cleaned up without becoming a zombie.  Note that
2100		 * we still call __wake_up_parent in this case, because a
2101		 * blocked sys_wait4 might now return -ECHILD.
2102		 *
2103		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2104		 * is implementation-defined: we do (if you don't want
2105		 * it, just use SIG_IGN instead).
2106		 */
2107		autoreap = true;
2108		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2109			sig = 0;
2110	}
2111	/*
2112	 * Send with __send_signal as si_pid and si_uid are in the
2113	 * parent's namespaces.
2114	 */
2115	if (valid_signal(sig) && sig)
2116		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2117	__wake_up_parent(tsk, tsk->parent);
2118	spin_unlock_irqrestore(&psig->siglock, flags);
2119
2120	return autoreap;
2121}
2122
2123/**
2124 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2125 * @tsk: task reporting the state change
2126 * @for_ptracer: the notification is for ptracer
2127 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2128 *
2129 * Notify @tsk's parent that the stopped/continued state has changed.  If
2130 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2131 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2132 *
2133 * CONTEXT:
2134 * Must be called with tasklist_lock at least read locked.
2135 */
2136static void do_notify_parent_cldstop(struct task_struct *tsk,
2137				     bool for_ptracer, int why)
2138{
2139	struct kernel_siginfo info;
2140	unsigned long flags;
2141	struct task_struct *parent;
2142	struct sighand_struct *sighand;
2143	u64 utime, stime;
2144
2145	if (for_ptracer) {
2146		parent = tsk->parent;
2147	} else {
2148		tsk = tsk->group_leader;
2149		parent = tsk->real_parent;
2150	}
2151
2152	clear_siginfo(&info);
2153	info.si_signo = SIGCHLD;
2154	info.si_errno = 0;
2155	/*
2156	 * see comment in do_notify_parent() about the following 4 lines
2157	 */
2158	rcu_read_lock();
2159	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2160	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2161	rcu_read_unlock();
2162
2163	task_cputime(tsk, &utime, &stime);
2164	info.si_utime = nsec_to_clock_t(utime);
2165	info.si_stime = nsec_to_clock_t(stime);
2166
2167 	info.si_code = why;
2168 	switch (why) {
2169 	case CLD_CONTINUED:
2170 		info.si_status = SIGCONT;
2171 		break;
2172 	case CLD_STOPPED:
2173 		info.si_status = tsk->signal->group_exit_code & 0x7f;
2174 		break;
2175 	case CLD_TRAPPED:
2176 		info.si_status = tsk->exit_code & 0x7f;
2177 		break;
2178 	default:
2179 		BUG();
2180 	}
2181
2182	sighand = parent->sighand;
2183	spin_lock_irqsave(&sighand->siglock, flags);
2184	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2185	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2186		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2187	/*
2188	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2189	 */
2190	__wake_up_parent(tsk, parent);
2191	spin_unlock_irqrestore(&sighand->siglock, flags);
2192}
2193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2194/*
2195 * This must be called with current->sighand->siglock held.
2196 *
2197 * This should be the path for all ptrace stops.
2198 * We always set current->last_siginfo while stopped here.
2199 * That makes it a way to test a stopped process for
2200 * being ptrace-stopped vs being job-control-stopped.
2201 *
2202 * Returns the signal the ptracer requested the code resume
2203 * with.  If the code did not stop because the tracer is gone,
2204 * the stop signal remains unchanged unless clear_code.
2205 */
2206static int ptrace_stop(int exit_code, int why, unsigned long message,
2207		       kernel_siginfo_t *info)
2208	__releases(&current->sighand->siglock)
2209	__acquires(&current->sighand->siglock)
2210{
2211	bool gstop_done = false;
2212
2213	if (arch_ptrace_stop_needed()) {
2214		/*
2215		 * The arch code has something special to do before a
2216		 * ptrace stop.  This is allowed to block, e.g. for faults
2217		 * on user stack pages.  We can't keep the siglock while
2218		 * calling arch_ptrace_stop, so we must release it now.
2219		 * To preserve proper semantics, we must do this before
2220		 * any signal bookkeeping like checking group_stop_count.
 
 
 
2221		 */
2222		spin_unlock_irq(&current->sighand->siglock);
2223		arch_ptrace_stop();
2224		spin_lock_irq(&current->sighand->siglock);
 
 
2225	}
2226
2227	/*
2228	 * After this point ptrace_signal_wake_up or signal_wake_up
2229	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2230	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2231	 * signals here to prevent ptrace_stop sleeping in schedule.
2232	 */
2233	if (!current->ptrace || __fatal_signal_pending(current))
2234		return exit_code;
2235
2236	set_special_state(TASK_TRACED);
2237	current->jobctl |= JOBCTL_TRACED;
2238
2239	/*
2240	 * We're committing to trapping.  TRACED should be visible before
2241	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2242	 * Also, transition to TRACED and updates to ->jobctl should be
2243	 * atomic with respect to siglock and should be done after the arch
2244	 * hook as siglock is released and regrabbed across it.
2245	 *
2246	 *     TRACER				    TRACEE
2247	 *
2248	 *     ptrace_attach()
2249	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2250	 *     do_wait()
2251	 *       set_current_state()                smp_wmb();
2252	 *       ptrace_do_wait()
2253	 *         wait_task_stopped()
2254	 *           task_stopped_code()
2255	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2256	 */
2257	smp_wmb();
2258
2259	current->ptrace_message = message;
2260	current->last_siginfo = info;
2261	current->exit_code = exit_code;
2262
2263	/*
2264	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2265	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2266	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2267	 * could be clear now.  We act as if SIGCONT is received after
2268	 * TASK_TRACED is entered - ignore it.
2269	 */
2270	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2271		gstop_done = task_participate_group_stop(current);
2272
2273	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2274	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2275	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2276		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2277
2278	/* entering a trap, clear TRAPPING */
2279	task_clear_jobctl_trapping(current);
2280
2281	spin_unlock_irq(&current->sighand->siglock);
2282	read_lock(&tasklist_lock);
2283	/*
2284	 * Notify parents of the stop.
2285	 *
2286	 * While ptraced, there are two parents - the ptracer and
2287	 * the real_parent of the group_leader.  The ptracer should
2288	 * know about every stop while the real parent is only
2289	 * interested in the completion of group stop.  The states
2290	 * for the two don't interact with each other.  Notify
2291	 * separately unless they're gonna be duplicates.
2292	 */
2293	if (current->ptrace)
2294		do_notify_parent_cldstop(current, true, why);
2295	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2296		do_notify_parent_cldstop(current, false, why);
2297
2298	/*
2299	 * Don't want to allow preemption here, because
2300	 * sys_ptrace() needs this task to be inactive.
2301	 *
2302	 * XXX: implement read_unlock_no_resched().
2303	 */
2304	preempt_disable();
2305	read_unlock(&tasklist_lock);
2306	cgroup_enter_frozen();
2307	preempt_enable_no_resched();
2308	schedule();
2309	cgroup_leave_frozen(true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2310
2311	/*
2312	 * We are back.  Now reacquire the siglock before touching
2313	 * last_siginfo, so that we are sure to have synchronized with
2314	 * any signal-sending on another CPU that wants to examine it.
2315	 */
2316	spin_lock_irq(&current->sighand->siglock);
2317	exit_code = current->exit_code;
2318	current->last_siginfo = NULL;
2319	current->ptrace_message = 0;
2320	current->exit_code = 0;
2321
2322	/* LISTENING can be set only during STOP traps, clear it */
2323	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2324
2325	/*
2326	 * Queued signals ignored us while we were stopped for tracing.
2327	 * So check for any that we should take before resuming user mode.
2328	 * This sets TIF_SIGPENDING, but never clears it.
2329	 */
2330	recalc_sigpending_tsk(current);
2331	return exit_code;
2332}
2333
2334static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2335{
2336	kernel_siginfo_t info;
2337
2338	clear_siginfo(&info);
2339	info.si_signo = signr;
2340	info.si_code = exit_code;
2341	info.si_pid = task_pid_vnr(current);
2342	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2343
2344	/* Let the debugger run.  */
2345	return ptrace_stop(exit_code, why, message, &info);
2346}
2347
2348int ptrace_notify(int exit_code, unsigned long message)
2349{
2350	int signr;
2351
2352	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2353	if (unlikely(task_work_pending(current)))
2354		task_work_run();
2355
2356	spin_lock_irq(&current->sighand->siglock);
2357	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2358	spin_unlock_irq(&current->sighand->siglock);
2359	return signr;
2360}
2361
2362/**
2363 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2364 * @signr: signr causing group stop if initiating
2365 *
2366 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2367 * and participate in it.  If already set, participate in the existing
2368 * group stop.  If participated in a group stop (and thus slept), %true is
2369 * returned with siglock released.
2370 *
2371 * If ptraced, this function doesn't handle stop itself.  Instead,
2372 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2373 * untouched.  The caller must ensure that INTERRUPT trap handling takes
2374 * places afterwards.
2375 *
2376 * CONTEXT:
2377 * Must be called with @current->sighand->siglock held, which is released
2378 * on %true return.
2379 *
2380 * RETURNS:
2381 * %false if group stop is already cancelled or ptrace trap is scheduled.
2382 * %true if participated in group stop.
2383 */
2384static bool do_signal_stop(int signr)
2385	__releases(&current->sighand->siglock)
2386{
2387	struct signal_struct *sig = current->signal;
2388
2389	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2390		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2391		struct task_struct *t;
2392
2393		/* signr will be recorded in task->jobctl for retries */
2394		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2395
2396		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2397		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2398		    unlikely(sig->group_exec_task))
2399			return false;
2400		/*
2401		 * There is no group stop already in progress.  We must
2402		 * initiate one now.
2403		 *
2404		 * While ptraced, a task may be resumed while group stop is
2405		 * still in effect and then receive a stop signal and
2406		 * initiate another group stop.  This deviates from the
2407		 * usual behavior as two consecutive stop signals can't
2408		 * cause two group stops when !ptraced.  That is why we
2409		 * also check !task_is_stopped(t) below.
2410		 *
2411		 * The condition can be distinguished by testing whether
2412		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2413		 * group_exit_code in such case.
2414		 *
2415		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2416		 * an intervening stop signal is required to cause two
2417		 * continued events regardless of ptrace.
2418		 */
2419		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2420			sig->group_exit_code = signr;
2421
2422		sig->group_stop_count = 0;
2423
2424		if (task_set_jobctl_pending(current, signr | gstop))
2425			sig->group_stop_count++;
2426
2427		t = current;
2428		while_each_thread(current, t) {
2429			/*
2430			 * Setting state to TASK_STOPPED for a group
2431			 * stop is always done with the siglock held,
2432			 * so this check has no races.
2433			 */
2434			if (!task_is_stopped(t) &&
2435			    task_set_jobctl_pending(t, signr | gstop)) {
2436				sig->group_stop_count++;
2437				if (likely(!(t->ptrace & PT_SEIZED)))
2438					signal_wake_up(t, 0);
2439				else
2440					ptrace_trap_notify(t);
2441			}
2442		}
2443	}
2444
2445	if (likely(!current->ptrace)) {
2446		int notify = 0;
2447
2448		/*
2449		 * If there are no other threads in the group, or if there
2450		 * is a group stop in progress and we are the last to stop,
2451		 * report to the parent.
2452		 */
2453		if (task_participate_group_stop(current))
2454			notify = CLD_STOPPED;
2455
2456		current->jobctl |= JOBCTL_STOPPED;
2457		set_special_state(TASK_STOPPED);
2458		spin_unlock_irq(&current->sighand->siglock);
2459
2460		/*
2461		 * Notify the parent of the group stop completion.  Because
2462		 * we're not holding either the siglock or tasklist_lock
2463		 * here, ptracer may attach inbetween; however, this is for
2464		 * group stop and should always be delivered to the real
2465		 * parent of the group leader.  The new ptracer will get
2466		 * its notification when this task transitions into
2467		 * TASK_TRACED.
2468		 */
2469		if (notify) {
2470			read_lock(&tasklist_lock);
2471			do_notify_parent_cldstop(current, false, notify);
2472			read_unlock(&tasklist_lock);
2473		}
2474
2475		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2476		cgroup_enter_frozen();
2477		schedule();
2478		return true;
2479	} else {
2480		/*
2481		 * While ptraced, group stop is handled by STOP trap.
2482		 * Schedule it and let the caller deal with it.
2483		 */
2484		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2485		return false;
2486	}
2487}
2488
2489/**
2490 * do_jobctl_trap - take care of ptrace jobctl traps
2491 *
2492 * When PT_SEIZED, it's used for both group stop and explicit
2493 * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2494 * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2495 * the stop signal; otherwise, %SIGTRAP.
2496 *
2497 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2498 * number as exit_code and no siginfo.
2499 *
2500 * CONTEXT:
2501 * Must be called with @current->sighand->siglock held, which may be
2502 * released and re-acquired before returning with intervening sleep.
2503 */
2504static void do_jobctl_trap(void)
2505{
2506	struct signal_struct *signal = current->signal;
2507	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2508
2509	if (current->ptrace & PT_SEIZED) {
2510		if (!signal->group_stop_count &&
2511		    !(signal->flags & SIGNAL_STOP_STOPPED))
2512			signr = SIGTRAP;
2513		WARN_ON_ONCE(!signr);
2514		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2515				 CLD_STOPPED, 0);
2516	} else {
2517		WARN_ON_ONCE(!signr);
2518		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
 
2519	}
2520}
2521
2522/**
2523 * do_freezer_trap - handle the freezer jobctl trap
2524 *
2525 * Puts the task into frozen state, if only the task is not about to quit.
2526 * In this case it drops JOBCTL_TRAP_FREEZE.
2527 *
2528 * CONTEXT:
2529 * Must be called with @current->sighand->siglock held,
2530 * which is always released before returning.
2531 */
2532static void do_freezer_trap(void)
2533	__releases(&current->sighand->siglock)
2534{
2535	/*
2536	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2537	 * let's make another loop to give it a chance to be handled.
2538	 * In any case, we'll return back.
2539	 */
2540	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2541	     JOBCTL_TRAP_FREEZE) {
2542		spin_unlock_irq(&current->sighand->siglock);
2543		return;
2544	}
2545
2546	/*
2547	 * Now we're sure that there is no pending fatal signal and no
2548	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2549	 * immediately (if there is a non-fatal signal pending), and
2550	 * put the task into sleep.
2551	 */
2552	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2553	clear_thread_flag(TIF_SIGPENDING);
2554	spin_unlock_irq(&current->sighand->siglock);
2555	cgroup_enter_frozen();
2556	schedule();
2557}
2558
2559static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2560{
 
2561	/*
2562	 * We do not check sig_kernel_stop(signr) but set this marker
2563	 * unconditionally because we do not know whether debugger will
2564	 * change signr. This flag has no meaning unless we are going
2565	 * to stop after return from ptrace_stop(). In this case it will
2566	 * be checked in do_signal_stop(), we should only stop if it was
2567	 * not cleared by SIGCONT while we were sleeping. See also the
2568	 * comment in dequeue_signal().
2569	 */
2570	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2571	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2572
2573	/* We're back.  Did the debugger cancel the sig?  */
 
2574	if (signr == 0)
2575		return signr;
2576
 
 
2577	/*
2578	 * Update the siginfo structure if the signal has
2579	 * changed.  If the debugger wanted something
2580	 * specific in the siginfo structure then it should
2581	 * have updated *info via PTRACE_SETSIGINFO.
2582	 */
2583	if (signr != info->si_signo) {
2584		clear_siginfo(info);
2585		info->si_signo = signr;
2586		info->si_errno = 0;
2587		info->si_code = SI_USER;
2588		rcu_read_lock();
2589		info->si_pid = task_pid_vnr(current->parent);
2590		info->si_uid = from_kuid_munged(current_user_ns(),
2591						task_uid(current->parent));
2592		rcu_read_unlock();
2593	}
2594
2595	/* If the (new) signal is now blocked, requeue it.  */
2596	if (sigismember(&current->blocked, signr) ||
2597	    fatal_signal_pending(current)) {
2598		send_signal_locked(signr, info, current, type);
2599		signr = 0;
2600	}
2601
2602	return signr;
2603}
2604
2605static void hide_si_addr_tag_bits(struct ksignal *ksig)
2606{
2607	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2608	case SIL_FAULT:
2609	case SIL_FAULT_TRAPNO:
2610	case SIL_FAULT_MCEERR:
2611	case SIL_FAULT_BNDERR:
2612	case SIL_FAULT_PKUERR:
2613	case SIL_FAULT_PERF_EVENT:
2614		ksig->info.si_addr = arch_untagged_si_addr(
2615			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2616		break;
2617	case SIL_KILL:
2618	case SIL_TIMER:
2619	case SIL_POLL:
2620	case SIL_CHLD:
2621	case SIL_RT:
2622	case SIL_SYS:
2623		break;
2624	}
2625}
2626
2627bool get_signal(struct ksignal *ksig)
2628{
2629	struct sighand_struct *sighand = current->sighand;
2630	struct signal_struct *signal = current->signal;
2631	int signr;
2632
2633	clear_notify_signal();
2634	if (unlikely(task_work_pending(current)))
2635		task_work_run();
2636
2637	if (!task_sigpending(current))
2638		return false;
2639
2640	if (unlikely(uprobe_deny_signal()))
2641		return false;
2642
2643	/*
2644	 * Do this once, we can't return to user-mode if freezing() == T.
2645	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2646	 * thus do not need another check after return.
2647	 */
2648	try_to_freeze();
2649
2650relock:
2651	spin_lock_irq(&sighand->siglock);
2652
2653	/*
2654	 * Every stopped thread goes here after wakeup. Check to see if
2655	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2656	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2657	 */
2658	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2659		int why;
2660
2661		if (signal->flags & SIGNAL_CLD_CONTINUED)
2662			why = CLD_CONTINUED;
2663		else
2664			why = CLD_STOPPED;
2665
2666		signal->flags &= ~SIGNAL_CLD_MASK;
2667
2668		spin_unlock_irq(&sighand->siglock);
2669
2670		/*
2671		 * Notify the parent that we're continuing.  This event is
2672		 * always per-process and doesn't make whole lot of sense
2673		 * for ptracers, who shouldn't consume the state via
2674		 * wait(2) either, but, for backward compatibility, notify
2675		 * the ptracer of the group leader too unless it's gonna be
2676		 * a duplicate.
2677		 */
2678		read_lock(&tasklist_lock);
2679		do_notify_parent_cldstop(current, false, why);
2680
2681		if (ptrace_reparented(current->group_leader))
2682			do_notify_parent_cldstop(current->group_leader,
2683						true, why);
2684		read_unlock(&tasklist_lock);
2685
2686		goto relock;
2687	}
2688
2689	for (;;) {
2690		struct k_sigaction *ka;
2691		enum pid_type type;
2692
2693		/* Has this task already been marked for death? */
2694		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2695		     signal->group_exec_task) {
2696			clear_siginfo(&ksig->info);
2697			ksig->info.si_signo = signr = SIGKILL;
2698			sigdelset(&current->pending.signal, SIGKILL);
2699			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2700				&sighand->action[SIGKILL - 1]);
2701			recalc_sigpending();
2702			goto fatal;
2703		}
2704
2705		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2706		    do_signal_stop(0))
2707			goto relock;
2708
2709		if (unlikely(current->jobctl &
2710			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2711			if (current->jobctl & JOBCTL_TRAP_MASK) {
2712				do_jobctl_trap();
2713				spin_unlock_irq(&sighand->siglock);
2714			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2715				do_freezer_trap();
2716
2717			goto relock;
2718		}
2719
2720		/*
2721		 * If the task is leaving the frozen state, let's update
2722		 * cgroup counters and reset the frozen bit.
2723		 */
2724		if (unlikely(cgroup_task_frozen(current))) {
2725			spin_unlock_irq(&sighand->siglock);
2726			cgroup_leave_frozen(false);
2727			goto relock;
2728		}
2729
2730		/*
2731		 * Signals generated by the execution of an instruction
2732		 * need to be delivered before any other pending signals
2733		 * so that the instruction pointer in the signal stack
2734		 * frame points to the faulting instruction.
2735		 */
2736		type = PIDTYPE_PID;
2737		signr = dequeue_synchronous_signal(&ksig->info);
2738		if (!signr)
2739			signr = dequeue_signal(current, &current->blocked,
2740					       &ksig->info, &type);
2741
2742		if (!signr)
2743			break; /* will return 0 */
2744
2745		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2746		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2747			signr = ptrace_signal(signr, &ksig->info, type);
2748			if (!signr)
2749				continue;
2750		}
2751
2752		ka = &sighand->action[signr-1];
2753
2754		/* Trace actually delivered signals. */
2755		trace_signal_deliver(signr, &ksig->info, ka);
2756
2757		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2758			continue;
2759		if (ka->sa.sa_handler != SIG_DFL) {
2760			/* Run the handler.  */
2761			ksig->ka = *ka;
2762
2763			if (ka->sa.sa_flags & SA_ONESHOT)
2764				ka->sa.sa_handler = SIG_DFL;
2765
2766			break; /* will return non-zero "signr" value */
2767		}
2768
2769		/*
2770		 * Now we are doing the default action for this signal.
2771		 */
2772		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2773			continue;
2774
2775		/*
2776		 * Global init gets no signals it doesn't want.
2777		 * Container-init gets no signals it doesn't want from same
2778		 * container.
2779		 *
2780		 * Note that if global/container-init sees a sig_kernel_only()
2781		 * signal here, the signal must have been generated internally
2782		 * or must have come from an ancestor namespace. In either
2783		 * case, the signal cannot be dropped.
2784		 */
2785		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2786				!sig_kernel_only(signr))
2787			continue;
2788
2789		if (sig_kernel_stop(signr)) {
2790			/*
2791			 * The default action is to stop all threads in
2792			 * the thread group.  The job control signals
2793			 * do nothing in an orphaned pgrp, but SIGSTOP
2794			 * always works.  Note that siglock needs to be
2795			 * dropped during the call to is_orphaned_pgrp()
2796			 * because of lock ordering with tasklist_lock.
2797			 * This allows an intervening SIGCONT to be posted.
2798			 * We need to check for that and bail out if necessary.
2799			 */
2800			if (signr != SIGSTOP) {
2801				spin_unlock_irq(&sighand->siglock);
2802
2803				/* signals can be posted during this window */
2804
2805				if (is_current_pgrp_orphaned())
2806					goto relock;
2807
2808				spin_lock_irq(&sighand->siglock);
2809			}
2810
2811			if (likely(do_signal_stop(ksig->info.si_signo))) {
2812				/* It released the siglock.  */
2813				goto relock;
2814			}
2815
2816			/*
2817			 * We didn't actually stop, due to a race
2818			 * with SIGCONT or something like that.
2819			 */
2820			continue;
2821		}
2822
2823	fatal:
2824		spin_unlock_irq(&sighand->siglock);
2825		if (unlikely(cgroup_task_frozen(current)))
2826			cgroup_leave_frozen(true);
2827
2828		/*
2829		 * Anything else is fatal, maybe with a core dump.
2830		 */
2831		current->flags |= PF_SIGNALED;
2832
2833		if (sig_kernel_coredump(signr)) {
2834			if (print_fatal_signals)
2835				print_fatal_signal(ksig->info.si_signo);
2836			proc_coredump_connector(current);
2837			/*
2838			 * If it was able to dump core, this kills all
2839			 * other threads in the group and synchronizes with
2840			 * their demise.  If we lost the race with another
2841			 * thread getting here, it set group_exit_code
2842			 * first and our do_group_exit call below will use
2843			 * that value and ignore the one we pass it.
2844			 */
2845			do_coredump(&ksig->info);
2846		}
2847
2848		/*
2849		 * PF_IO_WORKER threads will catch and exit on fatal signals
2850		 * themselves. They have cleanup that must be performed, so
2851		 * we cannot call do_exit() on their behalf.
2852		 */
2853		if (current->flags & PF_IO_WORKER)
2854			goto out;
2855
2856		/*
2857		 * Death signals, no core dump.
2858		 */
2859		do_group_exit(ksig->info.si_signo);
2860		/* NOTREACHED */
2861	}
2862	spin_unlock_irq(&sighand->siglock);
2863out:
2864	ksig->sig = signr;
2865
2866	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2867		hide_si_addr_tag_bits(ksig);
2868
2869	return ksig->sig > 0;
2870}
2871
2872/**
2873 * signal_delivered - called after signal delivery to update blocked signals
2874 * @ksig:		kernel signal struct
2875 * @stepping:		nonzero if debugger single-step or block-step in use
2876 *
2877 * This function should be called when a signal has successfully been
2878 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2879 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2880 * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2881 */
2882static void signal_delivered(struct ksignal *ksig, int stepping)
2883{
2884	sigset_t blocked;
2885
2886	/* A signal was successfully delivered, and the
2887	   saved sigmask was stored on the signal frame,
2888	   and will be restored by sigreturn.  So we can
2889	   simply clear the restore sigmask flag.  */
2890	clear_restore_sigmask();
2891
2892	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2893	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2894		sigaddset(&blocked, ksig->sig);
2895	set_current_blocked(&blocked);
2896	if (current->sas_ss_flags & SS_AUTODISARM)
2897		sas_ss_reset(current);
2898	if (stepping)
2899		ptrace_notify(SIGTRAP, 0);
2900}
2901
2902void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2903{
2904	if (failed)
2905		force_sigsegv(ksig->sig);
2906	else
2907		signal_delivered(ksig, stepping);
2908}
2909
2910/*
2911 * It could be that complete_signal() picked us to notify about the
2912 * group-wide signal. Other threads should be notified now to take
2913 * the shared signals in @which since we will not.
2914 */
2915static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2916{
2917	sigset_t retarget;
2918	struct task_struct *t;
2919
2920	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2921	if (sigisemptyset(&retarget))
2922		return;
2923
2924	t = tsk;
2925	while_each_thread(tsk, t) {
2926		if (t->flags & PF_EXITING)
2927			continue;
2928
2929		if (!has_pending_signals(&retarget, &t->blocked))
2930			continue;
2931		/* Remove the signals this thread can handle. */
2932		sigandsets(&retarget, &retarget, &t->blocked);
2933
2934		if (!task_sigpending(t))
2935			signal_wake_up(t, 0);
2936
2937		if (sigisemptyset(&retarget))
2938			break;
2939	}
2940}
2941
2942void exit_signals(struct task_struct *tsk)
2943{
2944	int group_stop = 0;
2945	sigset_t unblocked;
2946
2947	/*
2948	 * @tsk is about to have PF_EXITING set - lock out users which
2949	 * expect stable threadgroup.
2950	 */
2951	cgroup_threadgroup_change_begin(tsk);
2952
2953	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2954		tsk->flags |= PF_EXITING;
2955		cgroup_threadgroup_change_end(tsk);
2956		return;
2957	}
2958
2959	spin_lock_irq(&tsk->sighand->siglock);
2960	/*
2961	 * From now this task is not visible for group-wide signals,
2962	 * see wants_signal(), do_signal_stop().
2963	 */
2964	tsk->flags |= PF_EXITING;
2965
2966	cgroup_threadgroup_change_end(tsk);
2967
2968	if (!task_sigpending(tsk))
2969		goto out;
2970
2971	unblocked = tsk->blocked;
2972	signotset(&unblocked);
2973	retarget_shared_pending(tsk, &unblocked);
2974
2975	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2976	    task_participate_group_stop(tsk))
2977		group_stop = CLD_STOPPED;
2978out:
2979	spin_unlock_irq(&tsk->sighand->siglock);
2980
2981	/*
2982	 * If group stop has completed, deliver the notification.  This
2983	 * should always go to the real parent of the group leader.
2984	 */
2985	if (unlikely(group_stop)) {
2986		read_lock(&tasklist_lock);
2987		do_notify_parent_cldstop(tsk, false, group_stop);
2988		read_unlock(&tasklist_lock);
2989	}
2990}
2991
 
 
 
 
 
 
 
 
2992/*
2993 * System call entry points.
2994 */
2995
2996/**
2997 *  sys_restart_syscall - restart a system call
2998 */
2999SYSCALL_DEFINE0(restart_syscall)
3000{
3001	struct restart_block *restart = &current->restart_block;
3002	return restart->fn(restart);
3003}
3004
3005long do_no_restart_syscall(struct restart_block *param)
3006{
3007	return -EINTR;
3008}
3009
3010static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3011{
3012	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3013		sigset_t newblocked;
3014		/* A set of now blocked but previously unblocked signals. */
3015		sigandnsets(&newblocked, newset, &current->blocked);
3016		retarget_shared_pending(tsk, &newblocked);
3017	}
3018	tsk->blocked = *newset;
3019	recalc_sigpending();
3020}
3021
3022/**
3023 * set_current_blocked - change current->blocked mask
3024 * @newset: new mask
3025 *
3026 * It is wrong to change ->blocked directly, this helper should be used
3027 * to ensure the process can't miss a shared signal we are going to block.
3028 */
3029void set_current_blocked(sigset_t *newset)
3030{
3031	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3032	__set_current_blocked(newset);
3033}
3034
3035void __set_current_blocked(const sigset_t *newset)
3036{
3037	struct task_struct *tsk = current;
3038
3039	/*
3040	 * In case the signal mask hasn't changed, there is nothing we need
3041	 * to do. The current->blocked shouldn't be modified by other task.
3042	 */
3043	if (sigequalsets(&tsk->blocked, newset))
3044		return;
3045
3046	spin_lock_irq(&tsk->sighand->siglock);
3047	__set_task_blocked(tsk, newset);
3048	spin_unlock_irq(&tsk->sighand->siglock);
3049}
3050
3051/*
3052 * This is also useful for kernel threads that want to temporarily
3053 * (or permanently) block certain signals.
3054 *
3055 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3056 * interface happily blocks "unblockable" signals like SIGKILL
3057 * and friends.
3058 */
3059int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3060{
3061	struct task_struct *tsk = current;
3062	sigset_t newset;
3063
3064	/* Lockless, only current can change ->blocked, never from irq */
3065	if (oldset)
3066		*oldset = tsk->blocked;
3067
3068	switch (how) {
3069	case SIG_BLOCK:
3070		sigorsets(&newset, &tsk->blocked, set);
3071		break;
3072	case SIG_UNBLOCK:
3073		sigandnsets(&newset, &tsk->blocked, set);
3074		break;
3075	case SIG_SETMASK:
3076		newset = *set;
3077		break;
3078	default:
3079		return -EINVAL;
3080	}
3081
3082	__set_current_blocked(&newset);
3083	return 0;
3084}
3085EXPORT_SYMBOL(sigprocmask);
3086
3087/*
3088 * The api helps set app-provided sigmasks.
3089 *
3090 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3091 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3092 *
3093 * Note that it does set_restore_sigmask() in advance, so it must be always
3094 * paired with restore_saved_sigmask_unless() before return from syscall.
3095 */
3096int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3097{
3098	sigset_t kmask;
3099
3100	if (!umask)
3101		return 0;
3102	if (sigsetsize != sizeof(sigset_t))
3103		return -EINVAL;
3104	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3105		return -EFAULT;
3106
3107	set_restore_sigmask();
3108	current->saved_sigmask = current->blocked;
3109	set_current_blocked(&kmask);
3110
3111	return 0;
3112}
3113
3114#ifdef CONFIG_COMPAT
3115int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3116			    size_t sigsetsize)
3117{
3118	sigset_t kmask;
3119
3120	if (!umask)
3121		return 0;
3122	if (sigsetsize != sizeof(compat_sigset_t))
3123		return -EINVAL;
3124	if (get_compat_sigset(&kmask, umask))
3125		return -EFAULT;
3126
3127	set_restore_sigmask();
3128	current->saved_sigmask = current->blocked;
3129	set_current_blocked(&kmask);
3130
3131	return 0;
3132}
3133#endif
3134
3135/**
3136 *  sys_rt_sigprocmask - change the list of currently blocked signals
3137 *  @how: whether to add, remove, or set signals
3138 *  @nset: stores pending signals
3139 *  @oset: previous value of signal mask if non-null
3140 *  @sigsetsize: size of sigset_t type
3141 */
3142SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3143		sigset_t __user *, oset, size_t, sigsetsize)
3144{
3145	sigset_t old_set, new_set;
3146	int error;
3147
3148	/* XXX: Don't preclude handling different sized sigset_t's.  */
3149	if (sigsetsize != sizeof(sigset_t))
3150		return -EINVAL;
3151
3152	old_set = current->blocked;
3153
3154	if (nset) {
3155		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3156			return -EFAULT;
3157		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3158
3159		error = sigprocmask(how, &new_set, NULL);
3160		if (error)
3161			return error;
3162	}
3163
3164	if (oset) {
3165		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3166			return -EFAULT;
3167	}
3168
3169	return 0;
3170}
3171
3172#ifdef CONFIG_COMPAT
3173COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3174		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3175{
 
3176	sigset_t old_set = current->blocked;
3177
3178	/* XXX: Don't preclude handling different sized sigset_t's.  */
3179	if (sigsetsize != sizeof(sigset_t))
3180		return -EINVAL;
3181
3182	if (nset) {
 
3183		sigset_t new_set;
3184		int error;
3185		if (get_compat_sigset(&new_set, nset))
3186			return -EFAULT;
 
 
3187		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3188
3189		error = sigprocmask(how, &new_set, NULL);
3190		if (error)
3191			return error;
3192	}
3193	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
 
 
 
 
 
 
 
 
 
 
3194}
3195#endif
3196
3197static void do_sigpending(sigset_t *set)
3198{
 
 
 
3199	spin_lock_irq(&current->sighand->siglock);
3200	sigorsets(set, &current->pending.signal,
3201		  &current->signal->shared_pending.signal);
3202	spin_unlock_irq(&current->sighand->siglock);
3203
3204	/* Outside the lock because only this thread touches it.  */
3205	sigandsets(set, &current->blocked, set);
 
3206}
3207
3208/**
3209 *  sys_rt_sigpending - examine a pending signal that has been raised
3210 *			while blocked
3211 *  @uset: stores pending signals
3212 *  @sigsetsize: size of sigset_t type or larger
3213 */
3214SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3215{
3216	sigset_t set;
3217
3218	if (sigsetsize > sizeof(*uset))
3219		return -EINVAL;
3220
3221	do_sigpending(&set);
3222
3223	if (copy_to_user(uset, &set, sigsetsize))
3224		return -EFAULT;
3225
3226	return 0;
3227}
3228
3229#ifdef CONFIG_COMPAT
3230COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3231		compat_size_t, sigsetsize)
3232{
 
3233	sigset_t set;
3234
3235	if (sigsetsize > sizeof(*uset))
3236		return -EINVAL;
3237
3238	do_sigpending(&set);
3239
3240	return put_compat_sigset(uset, &set, sigsetsize);
 
 
 
 
 
3241}
3242#endif
3243
3244static const struct {
3245	unsigned char limit, layout;
3246} sig_sicodes[] = {
3247	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3248	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3249	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3250	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3251	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3252#if defined(SIGEMT)
3253	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3254#endif
3255	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3256	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3257	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3258};
3259
3260static bool known_siginfo_layout(unsigned sig, int si_code)
3261{
3262	if (si_code == SI_KERNEL)
3263		return true;
3264	else if ((si_code > SI_USER)) {
3265		if (sig_specific_sicodes(sig)) {
3266			if (si_code <= sig_sicodes[sig].limit)
3267				return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3268		}
3269		else if (si_code <= NSIGPOLL)
3270			return true;
3271	}
3272	else if (si_code >= SI_DETHREAD)
3273		return true;
3274	else if (si_code == SI_ASYNCNL)
3275		return true;
3276	return false;
3277}
3278
3279enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3280{
3281	enum siginfo_layout layout = SIL_KILL;
3282	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3283		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3284		    (si_code <= sig_sicodes[sig].limit)) {
3285			layout = sig_sicodes[sig].layout;
3286			/* Handle the exceptions */
3287			if ((sig == SIGBUS) &&
3288			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3289				layout = SIL_FAULT_MCEERR;
3290			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3291				layout = SIL_FAULT_BNDERR;
3292#ifdef SEGV_PKUERR
3293			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3294				layout = SIL_FAULT_PKUERR;
3295#endif
3296			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3297				layout = SIL_FAULT_PERF_EVENT;
3298			else if (IS_ENABLED(CONFIG_SPARC) &&
3299				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3300				layout = SIL_FAULT_TRAPNO;
3301			else if (IS_ENABLED(CONFIG_ALPHA) &&
3302				 ((sig == SIGFPE) ||
3303				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3304				layout = SIL_FAULT_TRAPNO;
3305		}
3306		else if (si_code <= NSIGPOLL)
3307			layout = SIL_POLL;
3308	} else {
3309		if (si_code == SI_TIMER)
3310			layout = SIL_TIMER;
3311		else if (si_code == SI_SIGIO)
3312			layout = SIL_POLL;
3313		else if (si_code < 0)
3314			layout = SIL_RT;
3315	}
3316	return layout;
3317}
3318
3319static inline char __user *si_expansion(const siginfo_t __user *info)
3320{
3321	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3322}
3323
3324int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3325{
3326	char __user *expansion = si_expansion(to);
3327	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3328		return -EFAULT;
3329	if (clear_user(expansion, SI_EXPANSION_SIZE))
3330		return -EFAULT;
3331	return 0;
3332}
3333
3334static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3335				       const siginfo_t __user *from)
3336{
3337	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3338		char __user *expansion = si_expansion(from);
3339		char buf[SI_EXPANSION_SIZE];
3340		int i;
3341		/*
3342		 * An unknown si_code might need more than
3343		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3344		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3345		 * will return this data to userspace exactly.
3346		 */
3347		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3348			return -EFAULT;
3349		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3350			if (buf[i] != 0)
3351				return -E2BIG;
3352		}
3353	}
3354	return 0;
3355}
3356
3357static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3358				    const siginfo_t __user *from)
3359{
3360	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3361		return -EFAULT;
3362	to->si_signo = signo;
3363	return post_copy_siginfo_from_user(to, from);
3364}
3365
3366int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3367{
3368	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3369		return -EFAULT;
3370	return post_copy_siginfo_from_user(to, from);
3371}
3372
3373#ifdef CONFIG_COMPAT
3374/**
3375 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3376 * @to: compat siginfo destination
3377 * @from: kernel siginfo source
3378 *
3379 * Note: This function does not work properly for the SIGCHLD on x32, but
3380 * fortunately it doesn't have to.  The only valid callers for this function are
3381 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3382 * The latter does not care because SIGCHLD will never cause a coredump.
3383 */
3384void copy_siginfo_to_external32(struct compat_siginfo *to,
3385		const struct kernel_siginfo *from)
3386{
3387	memset(to, 0, sizeof(*to));
3388
3389	to->si_signo = from->si_signo;
3390	to->si_errno = from->si_errno;
3391	to->si_code  = from->si_code;
3392	switch(siginfo_layout(from->si_signo, from->si_code)) {
3393	case SIL_KILL:
3394		to->si_pid = from->si_pid;
3395		to->si_uid = from->si_uid;
3396		break;
3397	case SIL_TIMER:
3398		to->si_tid     = from->si_tid;
3399		to->si_overrun = from->si_overrun;
3400		to->si_int     = from->si_int;
3401		break;
3402	case SIL_POLL:
3403		to->si_band = from->si_band;
3404		to->si_fd   = from->si_fd;
3405		break;
3406	case SIL_FAULT:
3407		to->si_addr = ptr_to_compat(from->si_addr);
3408		break;
3409	case SIL_FAULT_TRAPNO:
3410		to->si_addr = ptr_to_compat(from->si_addr);
3411		to->si_trapno = from->si_trapno;
3412		break;
3413	case SIL_FAULT_MCEERR:
3414		to->si_addr = ptr_to_compat(from->si_addr);
3415		to->si_addr_lsb = from->si_addr_lsb;
3416		break;
3417	case SIL_FAULT_BNDERR:
3418		to->si_addr = ptr_to_compat(from->si_addr);
3419		to->si_lower = ptr_to_compat(from->si_lower);
3420		to->si_upper = ptr_to_compat(from->si_upper);
3421		break;
3422	case SIL_FAULT_PKUERR:
3423		to->si_addr = ptr_to_compat(from->si_addr);
3424		to->si_pkey = from->si_pkey;
3425		break;
3426	case SIL_FAULT_PERF_EVENT:
3427		to->si_addr = ptr_to_compat(from->si_addr);
3428		to->si_perf_data = from->si_perf_data;
3429		to->si_perf_type = from->si_perf_type;
3430		to->si_perf_flags = from->si_perf_flags;
3431		break;
3432	case SIL_CHLD:
3433		to->si_pid = from->si_pid;
3434		to->si_uid = from->si_uid;
3435		to->si_status = from->si_status;
3436		to->si_utime = from->si_utime;
3437		to->si_stime = from->si_stime;
3438		break;
3439	case SIL_RT:
3440		to->si_pid = from->si_pid;
3441		to->si_uid = from->si_uid;
3442		to->si_int = from->si_int;
3443		break;
3444	case SIL_SYS:
3445		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3446		to->si_syscall   = from->si_syscall;
3447		to->si_arch      = from->si_arch;
 
3448		break;
3449	}
 
3450}
3451
3452int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3453			   const struct kernel_siginfo *from)
3454{
3455	struct compat_siginfo new;
3456
3457	copy_siginfo_to_external32(&new, from);
3458	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3459		return -EFAULT;
3460	return 0;
3461}
3462
3463static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3464					 const struct compat_siginfo *from)
3465{
3466	clear_siginfo(to);
3467	to->si_signo = from->si_signo;
3468	to->si_errno = from->si_errno;
3469	to->si_code  = from->si_code;
3470	switch(siginfo_layout(from->si_signo, from->si_code)) {
3471	case SIL_KILL:
3472		to->si_pid = from->si_pid;
3473		to->si_uid = from->si_uid;
3474		break;
3475	case SIL_TIMER:
3476		to->si_tid     = from->si_tid;
3477		to->si_overrun = from->si_overrun;
3478		to->si_int     = from->si_int;
3479		break;
3480	case SIL_POLL:
3481		to->si_band = from->si_band;
3482		to->si_fd   = from->si_fd;
3483		break;
3484	case SIL_FAULT:
3485		to->si_addr = compat_ptr(from->si_addr);
3486		break;
3487	case SIL_FAULT_TRAPNO:
3488		to->si_addr = compat_ptr(from->si_addr);
3489		to->si_trapno = from->si_trapno;
3490		break;
3491	case SIL_FAULT_MCEERR:
3492		to->si_addr = compat_ptr(from->si_addr);
3493		to->si_addr_lsb = from->si_addr_lsb;
3494		break;
3495	case SIL_FAULT_BNDERR:
3496		to->si_addr = compat_ptr(from->si_addr);
3497		to->si_lower = compat_ptr(from->si_lower);
3498		to->si_upper = compat_ptr(from->si_upper);
3499		break;
3500	case SIL_FAULT_PKUERR:
3501		to->si_addr = compat_ptr(from->si_addr);
3502		to->si_pkey = from->si_pkey;
3503		break;
3504	case SIL_FAULT_PERF_EVENT:
3505		to->si_addr = compat_ptr(from->si_addr);
3506		to->si_perf_data = from->si_perf_data;
3507		to->si_perf_type = from->si_perf_type;
3508		to->si_perf_flags = from->si_perf_flags;
3509		break;
3510	case SIL_CHLD:
3511		to->si_pid    = from->si_pid;
3512		to->si_uid    = from->si_uid;
3513		to->si_status = from->si_status;
3514#ifdef CONFIG_X86_X32_ABI
3515		if (in_x32_syscall()) {
3516			to->si_utime = from->_sifields._sigchld_x32._utime;
3517			to->si_stime = from->_sifields._sigchld_x32._stime;
3518		} else
3519#endif
3520		{
3521			to->si_utime = from->si_utime;
3522			to->si_stime = from->si_stime;
3523		}
3524		break;
3525	case SIL_RT:
3526		to->si_pid = from->si_pid;
3527		to->si_uid = from->si_uid;
3528		to->si_int = from->si_int;
3529		break;
3530	case SIL_SYS:
3531		to->si_call_addr = compat_ptr(from->si_call_addr);
3532		to->si_syscall   = from->si_syscall;
3533		to->si_arch      = from->si_arch;
3534		break;
3535	}
3536	return 0;
3537}
3538
3539static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3540				      const struct compat_siginfo __user *ufrom)
3541{
3542	struct compat_siginfo from;
3543
3544	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3545		return -EFAULT;
3546
3547	from.si_signo = signo;
3548	return post_copy_siginfo_from_user32(to, &from);
3549}
3550
3551int copy_siginfo_from_user32(struct kernel_siginfo *to,
3552			     const struct compat_siginfo __user *ufrom)
3553{
3554	struct compat_siginfo from;
3555
3556	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3557		return -EFAULT;
3558
3559	return post_copy_siginfo_from_user32(to, &from);
3560}
3561#endif /* CONFIG_COMPAT */
3562
3563/**
3564 *  do_sigtimedwait - wait for queued signals specified in @which
3565 *  @which: queued signals to wait for
3566 *  @info: if non-null, the signal's siginfo is returned here
3567 *  @ts: upper bound on process time suspension
3568 */
3569static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3570		    const struct timespec64 *ts)
3571{
3572	ktime_t *to = NULL, timeout = KTIME_MAX;
3573	struct task_struct *tsk = current;
3574	sigset_t mask = *which;
3575	enum pid_type type;
3576	int sig, ret = 0;
3577
3578	if (ts) {
3579		if (!timespec64_valid(ts))
3580			return -EINVAL;
3581		timeout = timespec64_to_ktime(*ts);
3582		to = &timeout;
3583	}
3584
3585	/*
3586	 * Invert the set of allowed signals to get those we want to block.
3587	 */
3588	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3589	signotset(&mask);
3590
3591	spin_lock_irq(&tsk->sighand->siglock);
3592	sig = dequeue_signal(tsk, &mask, info, &type);
3593	if (!sig && timeout) {
3594		/*
3595		 * None ready, temporarily unblock those we're interested
3596		 * while we are sleeping in so that we'll be awakened when
3597		 * they arrive. Unblocking is always fine, we can avoid
3598		 * set_current_blocked().
3599		 */
3600		tsk->real_blocked = tsk->blocked;
3601		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3602		recalc_sigpending();
3603		spin_unlock_irq(&tsk->sighand->siglock);
3604
3605		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3606		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3607					       HRTIMER_MODE_REL);
3608		spin_lock_irq(&tsk->sighand->siglock);
3609		__set_task_blocked(tsk, &tsk->real_blocked);
3610		sigemptyset(&tsk->real_blocked);
3611		sig = dequeue_signal(tsk, &mask, info, &type);
3612	}
3613	spin_unlock_irq(&tsk->sighand->siglock);
3614
3615	if (sig)
3616		return sig;
3617	return ret ? -EINTR : -EAGAIN;
3618}
3619
3620/**
3621 *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3622 *			in @uthese
3623 *  @uthese: queued signals to wait for
3624 *  @uinfo: if non-null, the signal's siginfo is returned here
3625 *  @uts: upper bound on process time suspension
3626 *  @sigsetsize: size of sigset_t type
3627 */
3628SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3629		siginfo_t __user *, uinfo,
3630		const struct __kernel_timespec __user *, uts,
3631		size_t, sigsetsize)
3632{
3633	sigset_t these;
3634	struct timespec64 ts;
3635	kernel_siginfo_t info;
3636	int ret;
3637
3638	/* XXX: Don't preclude handling different sized sigset_t's.  */
3639	if (sigsetsize != sizeof(sigset_t))
3640		return -EINVAL;
3641
3642	if (copy_from_user(&these, uthese, sizeof(these)))
3643		return -EFAULT;
3644
3645	if (uts) {
3646		if (get_timespec64(&ts, uts))
3647			return -EFAULT;
3648	}
3649
3650	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3651
3652	if (ret > 0 && uinfo) {
3653		if (copy_siginfo_to_user(uinfo, &info))
3654			ret = -EFAULT;
3655	}
3656
3657	return ret;
3658}
3659
3660#ifdef CONFIG_COMPAT_32BIT_TIME
3661SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3662		siginfo_t __user *, uinfo,
3663		const struct old_timespec32 __user *, uts,
3664		size_t, sigsetsize)
3665{
3666	sigset_t these;
3667	struct timespec64 ts;
3668	kernel_siginfo_t info;
3669	int ret;
3670
3671	if (sigsetsize != sizeof(sigset_t))
3672		return -EINVAL;
3673
3674	if (copy_from_user(&these, uthese, sizeof(these)))
3675		return -EFAULT;
3676
3677	if (uts) {
3678		if (get_old_timespec32(&ts, uts))
3679			return -EFAULT;
3680	}
3681
3682	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3683
3684	if (ret > 0 && uinfo) {
3685		if (copy_siginfo_to_user(uinfo, &info))
3686			ret = -EFAULT;
3687	}
3688
3689	return ret;
3690}
3691#endif
3692
3693#ifdef CONFIG_COMPAT
3694COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3695		struct compat_siginfo __user *, uinfo,
3696		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3697{
3698	sigset_t s;
3699	struct timespec64 t;
3700	kernel_siginfo_t info;
3701	long ret;
3702
3703	if (sigsetsize != sizeof(sigset_t))
3704		return -EINVAL;
3705
3706	if (get_compat_sigset(&s, uthese))
3707		return -EFAULT;
3708
3709	if (uts) {
3710		if (get_timespec64(&t, uts))
3711			return -EFAULT;
3712	}
3713
3714	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3715
3716	if (ret > 0 && uinfo) {
3717		if (copy_siginfo_to_user32(uinfo, &info))
3718			ret = -EFAULT;
3719	}
3720
3721	return ret;
3722}
3723
3724#ifdef CONFIG_COMPAT_32BIT_TIME
3725COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3726		struct compat_siginfo __user *, uinfo,
3727		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3728{
3729	sigset_t s;
3730	struct timespec64 t;
3731	kernel_siginfo_t info;
3732	long ret;
3733
3734	if (sigsetsize != sizeof(sigset_t))
3735		return -EINVAL;
3736
3737	if (get_compat_sigset(&s, uthese))
3738		return -EFAULT;
3739
3740	if (uts) {
3741		if (get_old_timespec32(&t, uts))
3742			return -EFAULT;
3743	}
3744
3745	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3746
3747	if (ret > 0 && uinfo) {
3748		if (copy_siginfo_to_user32(uinfo, &info))
3749			ret = -EFAULT;
3750	}
3751
3752	return ret;
3753}
3754#endif
3755#endif
3756
3757static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3758{
3759	clear_siginfo(info);
3760	info->si_signo = sig;
3761	info->si_errno = 0;
3762	info->si_code = SI_USER;
3763	info->si_pid = task_tgid_vnr(current);
3764	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3765}
3766
3767/**
3768 *  sys_kill - send a signal to a process
3769 *  @pid: the PID of the process
3770 *  @sig: signal to be sent
3771 */
3772SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3773{
3774	struct kernel_siginfo info;
3775
3776	prepare_kill_siginfo(sig, &info);
 
 
 
 
3777
3778	return kill_something_info(sig, &info, pid);
3779}
3780
3781/*
3782 * Verify that the signaler and signalee either are in the same pid namespace
3783 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3784 * namespace.
3785 */
3786static bool access_pidfd_pidns(struct pid *pid)
3787{
3788	struct pid_namespace *active = task_active_pid_ns(current);
3789	struct pid_namespace *p = ns_of_pid(pid);
3790
3791	for (;;) {
3792		if (!p)
3793			return false;
3794		if (p == active)
3795			break;
3796		p = p->parent;
3797	}
3798
3799	return true;
3800}
3801
3802static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3803		siginfo_t __user *info)
3804{
3805#ifdef CONFIG_COMPAT
3806	/*
3807	 * Avoid hooking up compat syscalls and instead handle necessary
3808	 * conversions here. Note, this is a stop-gap measure and should not be
3809	 * considered a generic solution.
3810	 */
3811	if (in_compat_syscall())
3812		return copy_siginfo_from_user32(
3813			kinfo, (struct compat_siginfo __user *)info);
3814#endif
3815	return copy_siginfo_from_user(kinfo, info);
3816}
3817
3818static struct pid *pidfd_to_pid(const struct file *file)
3819{
3820	struct pid *pid;
3821
3822	pid = pidfd_pid(file);
3823	if (!IS_ERR(pid))
3824		return pid;
3825
3826	return tgid_pidfd_to_pid(file);
3827}
3828
3829/**
3830 * sys_pidfd_send_signal - Signal a process through a pidfd
3831 * @pidfd:  file descriptor of the process
3832 * @sig:    signal to send
3833 * @info:   signal info
3834 * @flags:  future flags
3835 *
3836 * The syscall currently only signals via PIDTYPE_PID which covers
3837 * kill(<positive-pid>, <signal>. It does not signal threads or process
3838 * groups.
3839 * In order to extend the syscall to threads and process groups the @flags
3840 * argument should be used. In essence, the @flags argument will determine
3841 * what is signaled and not the file descriptor itself. Put in other words,
3842 * grouping is a property of the flags argument not a property of the file
3843 * descriptor.
3844 *
3845 * Return: 0 on success, negative errno on failure
3846 */
3847SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3848		siginfo_t __user *, info, unsigned int, flags)
3849{
3850	int ret;
3851	struct fd f;
3852	struct pid *pid;
3853	kernel_siginfo_t kinfo;
3854
3855	/* Enforce flags be set to 0 until we add an extension. */
3856	if (flags)
3857		return -EINVAL;
3858
3859	f = fdget(pidfd);
3860	if (!f.file)
3861		return -EBADF;
3862
3863	/* Is this a pidfd? */
3864	pid = pidfd_to_pid(f.file);
3865	if (IS_ERR(pid)) {
3866		ret = PTR_ERR(pid);
3867		goto err;
3868	}
3869
3870	ret = -EINVAL;
3871	if (!access_pidfd_pidns(pid))
3872		goto err;
3873
3874	if (info) {
3875		ret = copy_siginfo_from_user_any(&kinfo, info);
3876		if (unlikely(ret))
3877			goto err;
3878
3879		ret = -EINVAL;
3880		if (unlikely(sig != kinfo.si_signo))
3881			goto err;
3882
3883		/* Only allow sending arbitrary signals to yourself. */
3884		ret = -EPERM;
3885		if ((task_pid(current) != pid) &&
3886		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3887			goto err;
3888	} else {
3889		prepare_kill_siginfo(sig, &kinfo);
3890	}
3891
3892	ret = kill_pid_info(sig, &kinfo, pid);
3893
3894err:
3895	fdput(f);
3896	return ret;
3897}
3898
3899static int
3900do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3901{
3902	struct task_struct *p;
3903	int error = -ESRCH;
3904
3905	rcu_read_lock();
3906	p = find_task_by_vpid(pid);
3907	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3908		error = check_kill_permission(sig, info, p);
3909		/*
3910		 * The null signal is a permissions and process existence
3911		 * probe.  No signal is actually delivered.
3912		 */
3913		if (!error && sig) {
3914			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3915			/*
3916			 * If lock_task_sighand() failed we pretend the task
3917			 * dies after receiving the signal. The window is tiny,
3918			 * and the signal is private anyway.
3919			 */
3920			if (unlikely(error == -ESRCH))
3921				error = 0;
3922		}
3923	}
3924	rcu_read_unlock();
3925
3926	return error;
3927}
3928
3929static int do_tkill(pid_t tgid, pid_t pid, int sig)
3930{
3931	struct kernel_siginfo info;
3932
3933	clear_siginfo(&info);
3934	info.si_signo = sig;
3935	info.si_errno = 0;
3936	info.si_code = SI_TKILL;
3937	info.si_pid = task_tgid_vnr(current);
3938	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3939
3940	return do_send_specific(tgid, pid, sig, &info);
3941}
3942
3943/**
3944 *  sys_tgkill - send signal to one specific thread
3945 *  @tgid: the thread group ID of the thread
3946 *  @pid: the PID of the thread
3947 *  @sig: signal to be sent
3948 *
3949 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3950 *  exists but it's not belonging to the target process anymore. This
3951 *  method solves the problem of threads exiting and PIDs getting reused.
3952 */
3953SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3954{
3955	/* This is only valid for single tasks */
3956	if (pid <= 0 || tgid <= 0)
3957		return -EINVAL;
3958
3959	return do_tkill(tgid, pid, sig);
3960}
3961
3962/**
3963 *  sys_tkill - send signal to one specific task
3964 *  @pid: the PID of the task
3965 *  @sig: signal to be sent
3966 *
3967 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3968 */
3969SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3970{
3971	/* This is only valid for single tasks */
3972	if (pid <= 0)
3973		return -EINVAL;
3974
3975	return do_tkill(0, pid, sig);
3976}
3977
3978static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3979{
3980	/* Not even root can pretend to send signals from the kernel.
3981	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3982	 */
3983	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3984	    (task_pid_vnr(current) != pid))
3985		return -EPERM;
3986
 
 
3987	/* POSIX.1b doesn't mention process groups.  */
3988	return kill_proc_info(sig, info, pid);
3989}
3990
3991/**
3992 *  sys_rt_sigqueueinfo - send signal information to a signal
3993 *  @pid: the PID of the thread
3994 *  @sig: signal to be sent
3995 *  @uinfo: signal info to be sent
3996 */
3997SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3998		siginfo_t __user *, uinfo)
3999{
4000	kernel_siginfo_t info;
4001	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4002	if (unlikely(ret))
4003		return ret;
4004	return do_rt_sigqueueinfo(pid, sig, &info);
4005}
4006
4007#ifdef CONFIG_COMPAT
4008COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4009			compat_pid_t, pid,
4010			int, sig,
4011			struct compat_siginfo __user *, uinfo)
4012{
4013	kernel_siginfo_t info;
4014	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4015	if (unlikely(ret))
4016		return ret;
4017	return do_rt_sigqueueinfo(pid, sig, &info);
4018}
4019#endif
4020
4021static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4022{
4023	/* This is only valid for single tasks */
4024	if (pid <= 0 || tgid <= 0)
4025		return -EINVAL;
4026
4027	/* Not even root can pretend to send signals from the kernel.
4028	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4029	 */
4030	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4031	    (task_pid_vnr(current) != pid))
4032		return -EPERM;
4033
 
 
4034	return do_send_specific(tgid, pid, sig, info);
4035}
4036
4037SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4038		siginfo_t __user *, uinfo)
4039{
4040	kernel_siginfo_t info;
4041	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4042	if (unlikely(ret))
4043		return ret;
 
4044	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4045}
4046
4047#ifdef CONFIG_COMPAT
4048COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4049			compat_pid_t, tgid,
4050			compat_pid_t, pid,
4051			int, sig,
4052			struct compat_siginfo __user *, uinfo)
4053{
4054	kernel_siginfo_t info;
4055	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4056	if (unlikely(ret))
4057		return ret;
4058	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4059}
4060#endif
4061
4062/*
4063 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4064 */
4065void kernel_sigaction(int sig, __sighandler_t action)
4066{
4067	spin_lock_irq(&current->sighand->siglock);
4068	current->sighand->action[sig - 1].sa.sa_handler = action;
4069	if (action == SIG_IGN) {
4070		sigset_t mask;
4071
4072		sigemptyset(&mask);
4073		sigaddset(&mask, sig);
4074
4075		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4076		flush_sigqueue_mask(&mask, &current->pending);
4077		recalc_sigpending();
4078	}
4079	spin_unlock_irq(&current->sighand->siglock);
4080}
4081EXPORT_SYMBOL(kernel_sigaction);
4082
4083void __weak sigaction_compat_abi(struct k_sigaction *act,
4084		struct k_sigaction *oact)
4085{
4086}
4087
4088int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4089{
4090	struct task_struct *p = current, *t;
4091	struct k_sigaction *k;
4092	sigset_t mask;
4093
4094	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4095		return -EINVAL;
4096
4097	k = &p->sighand->action[sig-1];
4098
4099	spin_lock_irq(&p->sighand->siglock);
4100	if (k->sa.sa_flags & SA_IMMUTABLE) {
4101		spin_unlock_irq(&p->sighand->siglock);
4102		return -EINVAL;
4103	}
4104	if (oact)
4105		*oact = *k;
4106
4107	/*
4108	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4109	 * e.g. by having an architecture use the bit in their uapi.
4110	 */
4111	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4112
4113	/*
4114	 * Clear unknown flag bits in order to allow userspace to detect missing
4115	 * support for flag bits and to allow the kernel to use non-uapi bits
4116	 * internally.
4117	 */
4118	if (act)
4119		act->sa.sa_flags &= UAPI_SA_FLAGS;
4120	if (oact)
4121		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4122
4123	sigaction_compat_abi(act, oact);
4124
4125	if (act) {
4126		sigdelsetmask(&act->sa.sa_mask,
4127			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4128		*k = *act;
4129		/*
4130		 * POSIX 3.3.1.3:
4131		 *  "Setting a signal action to SIG_IGN for a signal that is
4132		 *   pending shall cause the pending signal to be discarded,
4133		 *   whether or not it is blocked."
4134		 *
4135		 *  "Setting a signal action to SIG_DFL for a signal that is
4136		 *   pending and whose default action is to ignore the signal
4137		 *   (for example, SIGCHLD), shall cause the pending signal to
4138		 *   be discarded, whether or not it is blocked"
4139		 */
4140		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4141			sigemptyset(&mask);
4142			sigaddset(&mask, sig);
4143			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4144			for_each_thread(p, t)
4145				flush_sigqueue_mask(&mask, &t->pending);
4146		}
4147	}
4148
4149	spin_unlock_irq(&p->sighand->siglock);
4150	return 0;
4151}
4152
4153#ifdef CONFIG_DYNAMIC_SIGFRAME
4154static inline void sigaltstack_lock(void)
4155	__acquires(&current->sighand->siglock)
4156{
4157	spin_lock_irq(&current->sighand->siglock);
4158}
4159
4160static inline void sigaltstack_unlock(void)
4161	__releases(&current->sighand->siglock)
4162{
4163	spin_unlock_irq(&current->sighand->siglock);
4164}
4165#else
4166static inline void sigaltstack_lock(void) { }
4167static inline void sigaltstack_unlock(void) { }
4168#endif
4169
4170static int
4171do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4172		size_t min_ss_size)
4173{
4174	struct task_struct *t = current;
4175	int ret = 0;
4176
4177	if (oss) {
4178		memset(oss, 0, sizeof(stack_t));
4179		oss->ss_sp = (void __user *) t->sas_ss_sp;
4180		oss->ss_size = t->sas_ss_size;
4181		oss->ss_flags = sas_ss_flags(sp) |
4182			(current->sas_ss_flags & SS_FLAG_BITS);
4183	}
 
 
 
4184
4185	if (ss) {
4186		void __user *ss_sp = ss->ss_sp;
4187		size_t ss_size = ss->ss_size;
4188		unsigned ss_flags = ss->ss_flags;
4189		int ss_mode;
 
 
 
4190
4191		if (unlikely(on_sig_stack(sp)))
4192			return -EPERM;
 
4193
4194		ss_mode = ss_flags & ~SS_FLAG_BITS;
4195		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4196				ss_mode != 0))
4197			return -EINVAL;
4198
4199		/*
4200		 * Return before taking any locks if no actual
4201		 * sigaltstack changes were requested.
4202		 */
4203		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4204		    t->sas_ss_size == ss_size &&
4205		    t->sas_ss_flags == ss_flags)
4206			return 0;
4207
4208		sigaltstack_lock();
4209		if (ss_mode == SS_DISABLE) {
4210			ss_size = 0;
4211			ss_sp = NULL;
4212		} else {
4213			if (unlikely(ss_size < min_ss_size))
4214				ret = -ENOMEM;
4215			if (!sigaltstack_size_valid(ss_size))
4216				ret = -ENOMEM;
4217		}
4218		if (!ret) {
4219			t->sas_ss_sp = (unsigned long) ss_sp;
4220			t->sas_ss_size = ss_size;
4221			t->sas_ss_flags = ss_flags;
4222		}
4223		sigaltstack_unlock();
 
 
 
 
 
 
 
 
4224	}
4225	return ret;
 
 
4226}
4227
4228SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4229{
4230	stack_t new, old;
4231	int err;
4232	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4233		return -EFAULT;
4234	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4235			      current_user_stack_pointer(),
4236			      MINSIGSTKSZ);
4237	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4238		err = -EFAULT;
4239	return err;
4240}
4241
4242int restore_altstack(const stack_t __user *uss)
4243{
4244	stack_t new;
4245	if (copy_from_user(&new, uss, sizeof(stack_t)))
4246		return -EFAULT;
4247	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4248			     MINSIGSTKSZ);
4249	/* squash all but EFAULT for now */
4250	return 0;
4251}
4252
4253int __save_altstack(stack_t __user *uss, unsigned long sp)
4254{
4255	struct task_struct *t = current;
4256	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4257		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4258		__put_user(t->sas_ss_size, &uss->ss_size);
4259	return err;
 
 
 
 
4260}
4261
4262#ifdef CONFIG_COMPAT
4263static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4264				 compat_stack_t __user *uoss_ptr)
 
4265{
4266	stack_t uss, uoss;
4267	int ret;
 
4268
4269	if (uss_ptr) {
4270		compat_stack_t uss32;
 
 
4271		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4272			return -EFAULT;
4273		uss.ss_sp = compat_ptr(uss32.ss_sp);
4274		uss.ss_flags = uss32.ss_flags;
4275		uss.ss_size = uss32.ss_size;
4276	}
4277	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4278			     compat_user_stack_pointer(),
4279			     COMPAT_MINSIGSTKSZ);
 
 
 
4280	if (ret >= 0 && uoss_ptr)  {
4281		compat_stack_t old;
4282		memset(&old, 0, sizeof(old));
4283		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4284		old.ss_flags = uoss.ss_flags;
4285		old.ss_size = uoss.ss_size;
4286		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4287			ret = -EFAULT;
4288	}
4289	return ret;
4290}
4291
4292COMPAT_SYSCALL_DEFINE2(sigaltstack,
4293			const compat_stack_t __user *, uss_ptr,
4294			compat_stack_t __user *, uoss_ptr)
4295{
4296	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4297}
4298
4299int compat_restore_altstack(const compat_stack_t __user *uss)
4300{
4301	int err = do_compat_sigaltstack(uss, NULL);
4302	/* squash all but -EFAULT for now */
4303	return err == -EFAULT ? err : 0;
4304}
4305
4306int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4307{
4308	int err;
4309	struct task_struct *t = current;
4310	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4311			 &uss->ss_sp) |
4312		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4313		__put_user(t->sas_ss_size, &uss->ss_size);
4314	return err;
 
 
 
 
4315}
4316#endif
4317
4318#ifdef __ARCH_WANT_SYS_SIGPENDING
4319
4320/**
4321 *  sys_sigpending - examine pending signals
4322 *  @uset: where mask of pending signal is returned
4323 */
4324SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4325{
4326	sigset_t set;
4327
4328	if (sizeof(old_sigset_t) > sizeof(*uset))
4329		return -EINVAL;
4330
4331	do_sigpending(&set);
4332
4333	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4334		return -EFAULT;
4335
4336	return 0;
4337}
4338
4339#ifdef CONFIG_COMPAT
4340COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4341{
4342	sigset_t set;
4343
4344	do_sigpending(&set);
4345
4346	return put_user(set.sig[0], set32);
4347}
4348#endif
4349
4350#endif
4351
4352#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4353/**
4354 *  sys_sigprocmask - examine and change blocked signals
4355 *  @how: whether to add, remove, or set signals
4356 *  @nset: signals to add or remove (if non-null)
4357 *  @oset: previous value of signal mask if non-null
4358 *
4359 * Some platforms have their own version with special arguments;
4360 * others support only sys_rt_sigprocmask.
4361 */
4362
4363SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4364		old_sigset_t __user *, oset)
4365{
4366	old_sigset_t old_set, new_set;
4367	sigset_t new_blocked;
4368
4369	old_set = current->blocked.sig[0];
4370
4371	if (nset) {
4372		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4373			return -EFAULT;
4374
4375		new_blocked = current->blocked;
4376
4377		switch (how) {
4378		case SIG_BLOCK:
4379			sigaddsetmask(&new_blocked, new_set);
4380			break;
4381		case SIG_UNBLOCK:
4382			sigdelsetmask(&new_blocked, new_set);
4383			break;
4384		case SIG_SETMASK:
4385			new_blocked.sig[0] = new_set;
4386			break;
4387		default:
4388			return -EINVAL;
4389		}
4390
4391		set_current_blocked(&new_blocked);
4392	}
4393
4394	if (oset) {
4395		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4396			return -EFAULT;
4397	}
4398
4399	return 0;
4400}
4401#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4402
4403#ifndef CONFIG_ODD_RT_SIGACTION
4404/**
4405 *  sys_rt_sigaction - alter an action taken by a process
4406 *  @sig: signal to be sent
4407 *  @act: new sigaction
4408 *  @oact: used to save the previous sigaction
4409 *  @sigsetsize: size of sigset_t type
4410 */
4411SYSCALL_DEFINE4(rt_sigaction, int, sig,
4412		const struct sigaction __user *, act,
4413		struct sigaction __user *, oact,
4414		size_t, sigsetsize)
4415{
4416	struct k_sigaction new_sa, old_sa;
4417	int ret;
4418
4419	/* XXX: Don't preclude handling different sized sigset_t's.  */
4420	if (sigsetsize != sizeof(sigset_t))
4421		return -EINVAL;
4422
4423	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4424		return -EFAULT;
 
 
4425
4426	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4427	if (ret)
4428		return ret;
4429
4430	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4431		return -EFAULT;
4432
4433	return 0;
 
 
4434}
4435#ifdef CONFIG_COMPAT
4436COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4437		const struct compat_sigaction __user *, act,
4438		struct compat_sigaction __user *, oact,
4439		compat_size_t, sigsetsize)
4440{
4441	struct k_sigaction new_ka, old_ka;
 
4442#ifdef __ARCH_HAS_SA_RESTORER
4443	compat_uptr_t restorer;
4444#endif
4445	int ret;
4446
4447	/* XXX: Don't preclude handling different sized sigset_t's.  */
4448	if (sigsetsize != sizeof(compat_sigset_t))
4449		return -EINVAL;
4450
4451	if (act) {
4452		compat_uptr_t handler;
4453		ret = get_user(handler, &act->sa_handler);
4454		new_ka.sa.sa_handler = compat_ptr(handler);
4455#ifdef __ARCH_HAS_SA_RESTORER
4456		ret |= get_user(restorer, &act->sa_restorer);
4457		new_ka.sa.sa_restorer = compat_ptr(restorer);
4458#endif
4459		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4460		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4461		if (ret)
4462			return -EFAULT;
 
4463	}
4464
4465	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4466	if (!ret && oact) {
 
4467		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
4468			       &oact->sa_handler);
4469		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4470					 sizeof(oact->sa_mask));
4471		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4472#ifdef __ARCH_HAS_SA_RESTORER
4473		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4474				&oact->sa_restorer);
4475#endif
4476	}
4477	return ret;
4478}
4479#endif
4480#endif /* !CONFIG_ODD_RT_SIGACTION */
4481
4482#ifdef CONFIG_OLD_SIGACTION
4483SYSCALL_DEFINE3(sigaction, int, sig,
4484		const struct old_sigaction __user *, act,
4485	        struct old_sigaction __user *, oact)
4486{
4487	struct k_sigaction new_ka, old_ka;
4488	int ret;
4489
4490	if (act) {
4491		old_sigset_t mask;
4492		if (!access_ok(act, sizeof(*act)) ||
4493		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4494		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4495		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4496		    __get_user(mask, &act->sa_mask))
4497			return -EFAULT;
4498#ifdef __ARCH_HAS_KA_RESTORER
4499		new_ka.ka_restorer = NULL;
4500#endif
4501		siginitset(&new_ka.sa.sa_mask, mask);
4502	}
4503
4504	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4505
4506	if (!ret && oact) {
4507		if (!access_ok(oact, sizeof(*oact)) ||
4508		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4509		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4510		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4511		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4512			return -EFAULT;
4513	}
4514
4515	return ret;
4516}
4517#endif
4518#ifdef CONFIG_COMPAT_OLD_SIGACTION
4519COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4520		const struct compat_old_sigaction __user *, act,
4521	        struct compat_old_sigaction __user *, oact)
4522{
4523	struct k_sigaction new_ka, old_ka;
4524	int ret;
4525	compat_old_sigset_t mask;
4526	compat_uptr_t handler, restorer;
4527
4528	if (act) {
4529		if (!access_ok(act, sizeof(*act)) ||
4530		    __get_user(handler, &act->sa_handler) ||
4531		    __get_user(restorer, &act->sa_restorer) ||
4532		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4533		    __get_user(mask, &act->sa_mask))
4534			return -EFAULT;
4535
4536#ifdef __ARCH_HAS_KA_RESTORER
4537		new_ka.ka_restorer = NULL;
4538#endif
4539		new_ka.sa.sa_handler = compat_ptr(handler);
4540		new_ka.sa.sa_restorer = compat_ptr(restorer);
4541		siginitset(&new_ka.sa.sa_mask, mask);
4542	}
4543
4544	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4545
4546	if (!ret && oact) {
4547		if (!access_ok(oact, sizeof(*oact)) ||
4548		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4549			       &oact->sa_handler) ||
4550		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4551			       &oact->sa_restorer) ||
4552		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4553		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4554			return -EFAULT;
4555	}
4556	return ret;
4557}
4558#endif
4559
4560#ifdef CONFIG_SGETMASK_SYSCALL
4561
4562/*
4563 * For backwards compatibility.  Functionality superseded by sigprocmask.
4564 */
4565SYSCALL_DEFINE0(sgetmask)
4566{
4567	/* SMP safe */
4568	return current->blocked.sig[0];
4569}
4570
4571SYSCALL_DEFINE1(ssetmask, int, newmask)
4572{
4573	int old = current->blocked.sig[0];
4574	sigset_t newset;
4575
4576	siginitset(&newset, newmask);
4577	set_current_blocked(&newset);
4578
4579	return old;
4580}
4581#endif /* CONFIG_SGETMASK_SYSCALL */
4582
4583#ifdef __ARCH_WANT_SYS_SIGNAL
4584/*
4585 * For backwards compatibility.  Functionality superseded by sigaction.
4586 */
4587SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4588{
4589	struct k_sigaction new_sa, old_sa;
4590	int ret;
4591
4592	new_sa.sa.sa_handler = handler;
4593	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4594	sigemptyset(&new_sa.sa.sa_mask);
4595
4596	ret = do_sigaction(sig, &new_sa, &old_sa);
4597
4598	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4599}
4600#endif /* __ARCH_WANT_SYS_SIGNAL */
4601
4602#ifdef __ARCH_WANT_SYS_PAUSE
4603
4604SYSCALL_DEFINE0(pause)
4605{
4606	while (!signal_pending(current)) {
4607		__set_current_state(TASK_INTERRUPTIBLE);
4608		schedule();
4609	}
4610	return -ERESTARTNOHAND;
4611}
4612
4613#endif
4614
4615static int sigsuspend(sigset_t *set)
4616{
4617	current->saved_sigmask = current->blocked;
4618	set_current_blocked(set);
4619
4620	while (!signal_pending(current)) {
4621		__set_current_state(TASK_INTERRUPTIBLE);
4622		schedule();
4623	}
4624	set_restore_sigmask();
4625	return -ERESTARTNOHAND;
4626}
4627
4628/**
4629 *  sys_rt_sigsuspend - replace the signal mask for a value with the
4630 *	@unewset value until a signal is received
4631 *  @unewset: new signal mask value
4632 *  @sigsetsize: size of sigset_t type
4633 */
4634SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4635{
4636	sigset_t newset;
4637
4638	/* XXX: Don't preclude handling different sized sigset_t's.  */
4639	if (sigsetsize != sizeof(sigset_t))
4640		return -EINVAL;
4641
4642	if (copy_from_user(&newset, unewset, sizeof(newset)))
4643		return -EFAULT;
4644	return sigsuspend(&newset);
4645}
4646 
4647#ifdef CONFIG_COMPAT
4648COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4649{
 
4650	sigset_t newset;
 
4651
4652	/* XXX: Don't preclude handling different sized sigset_t's.  */
4653	if (sigsetsize != sizeof(sigset_t))
4654		return -EINVAL;
4655
4656	if (get_compat_sigset(&newset, unewset))
4657		return -EFAULT;
 
4658	return sigsuspend(&newset);
 
 
 
 
4659}
4660#endif
4661
4662#ifdef CONFIG_OLD_SIGSUSPEND
4663SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4664{
4665	sigset_t blocked;
4666	siginitset(&blocked, mask);
4667	return sigsuspend(&blocked);
4668}
4669#endif
4670#ifdef CONFIG_OLD_SIGSUSPEND3
4671SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4672{
4673	sigset_t blocked;
4674	siginitset(&blocked, mask);
4675	return sigsuspend(&blocked);
4676}
4677#endif
4678
4679__weak const char *arch_vma_name(struct vm_area_struct *vma)
4680{
4681	return NULL;
4682}
4683
4684static inline void siginfo_buildtime_checks(void)
4685{
4686	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4687
4688	/* Verify the offsets in the two siginfos match */
4689#define CHECK_OFFSET(field) \
4690	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4691
4692	/* kill */
4693	CHECK_OFFSET(si_pid);
4694	CHECK_OFFSET(si_uid);
4695
4696	/* timer */
4697	CHECK_OFFSET(si_tid);
4698	CHECK_OFFSET(si_overrun);
4699	CHECK_OFFSET(si_value);
4700
4701	/* rt */
4702	CHECK_OFFSET(si_pid);
4703	CHECK_OFFSET(si_uid);
4704	CHECK_OFFSET(si_value);
4705
4706	/* sigchld */
4707	CHECK_OFFSET(si_pid);
4708	CHECK_OFFSET(si_uid);
4709	CHECK_OFFSET(si_status);
4710	CHECK_OFFSET(si_utime);
4711	CHECK_OFFSET(si_stime);
4712
4713	/* sigfault */
4714	CHECK_OFFSET(si_addr);
4715	CHECK_OFFSET(si_trapno);
4716	CHECK_OFFSET(si_addr_lsb);
4717	CHECK_OFFSET(si_lower);
4718	CHECK_OFFSET(si_upper);
4719	CHECK_OFFSET(si_pkey);
4720	CHECK_OFFSET(si_perf_data);
4721	CHECK_OFFSET(si_perf_type);
4722	CHECK_OFFSET(si_perf_flags);
4723
4724	/* sigpoll */
4725	CHECK_OFFSET(si_band);
4726	CHECK_OFFSET(si_fd);
4727
4728	/* sigsys */
4729	CHECK_OFFSET(si_call_addr);
4730	CHECK_OFFSET(si_syscall);
4731	CHECK_OFFSET(si_arch);
4732#undef CHECK_OFFSET
4733
4734	/* usb asyncio */
4735	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4736		     offsetof(struct siginfo, si_addr));
4737	if (sizeof(int) == sizeof(void __user *)) {
4738		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4739			     sizeof(void __user *));
4740	} else {
4741		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4742			      sizeof_field(struct siginfo, si_uid)) !=
4743			     sizeof(void __user *));
4744		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4745			     offsetof(struct siginfo, si_uid));
4746	}
4747#ifdef CONFIG_COMPAT
4748	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4749		     offsetof(struct compat_siginfo, si_addr));
4750	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4751		     sizeof(compat_uptr_t));
4752	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4753		     sizeof_field(struct siginfo, si_pid));
4754#endif
4755}
4756
4757void __init signals_init(void)
4758{
4759	siginfo_buildtime_checks();
 
 
4760
4761	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4762}
4763
4764#ifdef CONFIG_KGDB_KDB
4765#include <linux/kdb.h>
4766/*
4767 * kdb_send_sig - Allows kdb to send signals without exposing
4768 * signal internals.  This function checks if the required locks are
4769 * available before calling the main signal code, to avoid kdb
4770 * deadlocks.
4771 */
4772void kdb_send_sig(struct task_struct *t, int sig)
 
4773{
4774	static struct task_struct *kdb_prev_t;
4775	int new_t, ret;
4776	if (!spin_trylock(&t->sighand->siglock)) {
4777		kdb_printf("Can't do kill command now.\n"
4778			   "The sigmask lock is held somewhere else in "
4779			   "kernel, try again later\n");
4780		return;
4781	}
 
4782	new_t = kdb_prev_t != t;
4783	kdb_prev_t = t;
4784	if (!task_is_running(t) && new_t) {
4785		spin_unlock(&t->sighand->siglock);
4786		kdb_printf("Process is not RUNNING, sending a signal from "
4787			   "kdb risks deadlock\n"
4788			   "on the run queue locks. "
4789			   "The signal has _not_ been sent.\n"
4790			   "Reissue the kill command if you want to risk "
4791			   "the deadlock.\n");
4792		return;
4793	}
4794	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4795	spin_unlock(&t->sighand->siglock);
4796	if (ret)
4797		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4798			   sig, t->pid);
4799	else
4800		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4801}
4802#endif	/* CONFIG_KGDB_KDB */